Explorar o código

first working CI

Change-Id: I68c0cafba040debefd660c5213f21e6fc702a4a6
gejun %!s(int64=6) %!d(string=hai) anos
pai
achega
7998155962
Modificáronse 100 ficheiros con 21337 adicións e 0 borrados
  1. 215 0
      COMAKE
  2. 2491 0
      Makefile
  3. 38 0
      base/allocator/type_profiler_control.cc
  4. 31 0
      base/allocator/type_profiler_control.h
  5. 92 0
      base/arena.cpp
  6. 71 0
      base/arena.h
  7. 82 0
      base/at_exit.cc
  8. 76 0
      base/at_exit.h
  9. 80 0
      base/atomic_ref_count.h
  10. 60 0
      base/atomic_sequence_num.h
  11. 316 0
      base/atomicops.h
  12. 307 0
      base/atomicops_internals_arm64_gcc.h
  13. 294 0
      base/atomicops_internals_arm_gcc.h
  14. 100 0
      base/atomicops_internals_atomicword_compat.h
  15. 106 0
      base/atomicops_internals_gcc.h
  16. 197 0
      base/atomicops_internals_mac.h
  17. 154 0
      base/atomicops_internals_mips_gcc.h
  18. 186 0
      base/atomicops_internals_tsan.h
  19. 100 0
      base/atomicops_internals_x86_gcc.cc
  20. 242 0
      base/atomicops_internals_x86_gcc.h
  21. 198 0
      base/atomicops_internals_x86_msvc.h
  22. 41 0
      base/auto_reset.h
  23. 52 0
      base/barrier_closure.cc
  24. 30 0
      base/barrier_closure.h
  25. 37 0
      base/base64.cc
  26. 24 0
      base/base64.h
  27. 34 0
      base/base_export.h
  28. 46 0
      base/base_paths.cc
  29. 55 0
      base/base_paths.h
  30. 25 0
      base/base_paths_android.h
  31. 24 0
      base/base_paths_mac.h
  32. 114 0
      base/base_paths_mac.mm
  33. 116 0
      base/base_paths_posix.cc
  34. 27 0
      base/base_paths_posix.h
  35. 68 0
      base/base_switches.cc
  36. 33 0
      base/base_switches.h
  37. 35 0
      base/basictypes.h
  38. 97 0
      base/big_endian.cc
  39. 102 0
      base/big_endian.h
  40. 511 0
      base/bind.h
  41. 14 0
      base/bind_helpers.cc
  42. 544 0
      base/bind_helpers.h
  43. 2789 0
      base/bind_internal.h
  44. 88 0
      base/bit_array.h
  45. 47 0
      base/bits.h
  46. 176 0
      base/build_config.h
  47. 25 0
      base/build_time.cc
  48. 25 0
      base/build_time.h
  49. 770 0
      base/callback.h
  50. 17 0
      base/callback_forward.h
  51. 42 0
      base/callback_helpers.cc
  52. 50 0
      base/callback_helpers.h
  53. 38 0
      base/callback_internal.cc
  54. 178 0
      base/callback_internal.h
  55. 406 0
      base/callback_list.h
  56. 272 0
      base/cancelable_callback.h
  57. 50 0
      base/class_name.cpp
  58. 45 0
      base/class_name.h
  59. 380 0
      base/comlog_sink.cc
  60. 152 0
      base/comlog_sink.h
  61. 442 0
      base/command_line.cc
  62. 194 0
      base/command_line.h
  63. 282 0
      base/compiler_specific.h
  64. 267 0
      base/containers/bounded_queue.h
  65. 40 0
      base/containers/case_ignored_flat_map.cpp
  66. 59 0
      base/containers/case_ignored_flat_map.h
  67. 399 0
      base/containers/doubly_buffered_data.h
  68. 471 0
      base/containers/flat_map.h
  69. 639 0
      base/containers/flat_map_inl.h
  70. 277 0
      base/containers/hash_tables.h
  71. 196 0
      base/containers/linked_list.h
  72. 310 0
      base/containers/mru_cache.h
  73. 182 0
      base/containers/pooled_map.h
  74. 157 0
      base/containers/scoped_ptr_hash_map.h
  75. 652 0
      base/containers/small_map.h
  76. 265 0
      base/containers/stack_container.h
  77. 244 0
      base/cpu.cc
  78. 90 0
      base/cpu.h
  79. 457 0
      base/crc32c.cc
  80. 52 0
      base/crc32c.h
  81. 23 0
      base/debug/alias.cc
  82. 21 0
      base/debug/alias.h
  83. 94 0
      base/debug/asan_invalid_access.cc
  84. 47 0
      base/debug/asan_invalid_access.h
  85. 202 0
      base/debug/crash_logging.cc
  86. 104 0
      base/debug/crash_logging.h
  87. 41 0
      base/debug/debugger.cc
  88. 44 0
      base/debug/debugger.h
  89. 255 0
      base/debug/debugger_posix.cc
  90. 32 0
      base/debug/dump_without_crashing.cc
  91. 27 0
      base/debug/dump_without_crashing.h
  92. 55 0
      base/debug/leak_annotations.h
  93. 138 0
      base/debug/leak_tracker.h
  94. 167 0
      base/debug/proc_maps_linux.cc
  95. 90 0
      base/debug/proc_maps_linux.h
  96. 219 0
      base/debug/profiler.cc
  97. 90 0
      base/debug/profiler.h
  98. 43 0
      base/debug/stack_trace.cc
  99. 112 0
      base/debug/stack_trace.h
  100. 843 0
      base/debug/stack_trace_posix.cc

+ 215 - 0
COMAKE

@@ -0,0 +1,215 @@
+#edit-mode: -*- python -*-
+#coding:gbk
+
+WORKROOT('../../../')
+
+import commands
+getRevCommand='$(shell git rev-parse --short HEAD)'
+
+CXXFLAGS('-O2 -g -pipe -Wall -W -Werror -fPIC -fstrict-aliasing -Wno-invalid-offsetof -Wno-unused-parameter -fno-omit-frame-pointer -std=c++0x -include brpc/config.h')
+CFLAGS('-O2 -g -pipe -Wall -W -Werror -fPIC -fstrict-aliasing -Wno-unused-parameter -fno-omit-frame-pointer')
+CPPFLAGS('-D__const__= -DNDEBUG -DUSE_SYMBOLIZE -DNO_TCMALLOC -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -D__STDC_CONSTANT_MACROS -DBRPC_REVISION=\\"%s\\"' % getRevCommand)
+
+INCPATHS('.')
+
+CONFIGS('third-64/gflags@gflags_2-0-0-100_PD_BL')
+CONFIGS('third-64/protobuf@protobuf_2-4-1-1100_PD_BL')
+CONFIGS('third-64/leveldb@leveldb_1-0-0-0_PD_BL')   # rpcz
+
+sources = [
+    "base/third_party/dmg_fp/g_fmt.cc",
+    "base/third_party/dmg_fp/dtoa_wrapper.cc",
+    "base/third_party/dmg_fp/dtoa.cc",
+    "base/third_party/dynamic_annotations/dynamic_annotations.c",
+    "base/third_party/icu/icu_utf.cc",
+    "base/third_party/superfasthash/superfasthash.c",
+    "base/third_party/modp_b64/modp_b64.cc",
+    "base/third_party/nspr/prtime.cc",
+    "base/third_party/symbolize/demangle.cc",
+    "base/third_party/symbolize/symbolize.cc",
+    "base/third_party/xdg_mime/xdgmime.c",
+    "base/third_party/xdg_mime/xdgmimealias.c",
+    "base/third_party/xdg_mime/xdgmimecache.c",
+    "base/third_party/xdg_mime/xdgmimeglob.c",
+    "base/third_party/xdg_mime/xdgmimeicon.c",
+    "base/third_party/xdg_mime/xdgmimeint.c",
+    "base/third_party/xdg_mime/xdgmimemagic.c",
+    "base/third_party/xdg_mime/xdgmimeparent.c",
+    "base/third_party/xdg_user_dirs/xdg_user_dir_lookup.cc",
+    "base/third_party/snappy/snappy-sinksource.cc",
+    "base/third_party/snappy/snappy-stubs-internal.cc",
+    "base/third_party/snappy/snappy.cc",
+    "base/third_party/murmurhash3/murmurhash3.cpp",
+    "base/allocator/type_profiler_control.cc",
+    "base/arena.cpp",
+    "base/at_exit.cc",
+    "base/atomicops_internals_x86_gcc.cc",
+    "base/barrier_closure.cc",
+    "base/base_paths.cc",
+    "base/base_paths_posix.cc",
+    "base/base64.cc",
+    "base/base_switches.cc",
+    "base/big_endian.cc",
+    "base/bind_helpers.cc",
+    "base/build_time.cc",
+    "base/callback_helpers.cc",
+    "base/callback_internal.cc",
+    "base/command_line.cc",
+    "base/cpu.cc",
+    "base/debug/alias.cc",
+    "base/debug/asan_invalid_access.cc",
+    "base/debug/crash_logging.cc",
+    "base/debug/debugger.cc",
+    "base/debug/debugger_posix.cc",
+    "base/debug/dump_without_crashing.cc",
+    "base/debug/proc_maps_linux.cc",
+    "base/debug/stack_trace.cc",
+    "base/debug/stack_trace_posix.cc",
+    "base/environment.cc",
+    "base/files/file.cc",
+    "base/files/file_posix.cc",
+    "base/files/file_enumerator.cc",
+    "base/files/file_enumerator_posix.cc",
+    "base/files/file_path.cc",
+    "base/files/file_path_constants.cc",
+    "base/files/memory_mapped_file.cc",
+    "base/files/memory_mapped_file_posix.cc",
+    "base/files/scoped_file.cc",
+    "base/files/scoped_temp_dir.cc",
+    "base/file_util.cc",
+    "base/file_util_linux.cc",
+    "base/file_util_posix.cc",
+    "base/guid.cc",
+    "base/guid_posix.cc",
+    "base/hash.cc",
+    "base/lazy_instance.cc",
+    "base/location.cc",
+    "base/md5.cc",
+    "base/memory/aligned_memory.cc",
+    "base/memory/ref_counted.cc",
+    "base/memory/ref_counted_memory.cc",
+    "base/memory/shared_memory_posix.cc",
+    "base/memory/singleton.cc",
+    "base/memory/weak_ptr.cc",
+    "base/nix/mime_util_xdg.cc",
+    "base/nix/xdg_util.cc",
+    "base/path_service.cc",
+    "base/posix/file_descriptor_shuffle.cc",
+    "base/posix/global_descriptors.cc",
+    "base/process/internal_linux.cc",
+    "base/process/kill.cc",
+    "base/process/kill_posix.cc",
+    "base/process/launch.cc",
+    "base/process/launch_posix.cc",
+    "base/process/memory.cc",
+    "base/process/memory_linux.cc",
+    "base/process/process_handle_linux.cc",
+    "base/process/process_handle_posix.cc",
+    "base/process/process_info_linux.cc",
+    "base/process/process_iterator.cc",
+    "base/process/process_iterator_linux.cc",
+    "base/process/process_linux.cc",
+    "base/process/process_metrics.cc",
+    "base/process/process_metrics_linux.cc",
+    "base/process/process_metrics_posix.cc",
+    "base/process/process_posix.cc",
+    "base/rand_util.cc",
+    "base/rand_util_posix.cc",
+    "base/fast_rand.cpp",
+    "base/safe_strerror_posix.cc",
+    "base/sha1_portable.cc",
+    "base/strings/latin1_string_conversions.cc",
+    "base/strings/nullable_string16.cc",
+    "base/strings/safe_sprintf.cc",
+    "base/strings/string16.cc",
+    "base/strings/string_number_conversions.cc",
+    "base/strings/string_split.cc",
+    "base/strings/string_piece.cc",
+    "base/strings/string_util.cc",
+    "base/strings/string_util_constants.cc",
+    "base/strings/stringprintf.cc",
+    "base/strings/sys_string_conversions_posix.cc",
+    "base/strings/utf_offset_string_conversions.cc",
+    "base/strings/utf_string_conversion_utils.cc",
+    "base/strings/utf_string_conversions.cc",
+    "base/synchronization/cancellation_flag.cc",
+    "base/synchronization/condition_variable_posix.cc",
+    "base/synchronization/waitable_event_posix.cc",
+    "base/sys_info.cc",
+    "base/sys_info_linux.cc",
+    "base/sys_info_posix.cc",
+    "base/threading/non_thread_safe_impl.cc",
+    "base/threading/platform_thread_linux.cc",
+    "base/threading/platform_thread_posix.cc",
+    "base/threading/simple_thread.cc",
+    "base/threading/thread_checker_impl.cc",
+    "base/threading/thread_collision_warner.cc",
+    "base/threading/thread_id_name_manager.cc",
+    "base/threading/thread_local_posix.cc",
+    "base/threading/thread_local_storage.cc",
+    "base/threading/thread_local_storage_posix.cc",
+    "base/threading/thread_restrictions.cc",
+    "base/threading/watchdog.cc",
+    "base/time/clock.cc",
+    "base/time/default_clock.cc",
+    "base/time/default_tick_clock.cc",
+    "base/time/tick_clock.cc",
+    "base/time/time.cc",
+    "base/time/time_posix.cc",
+    "base/version.cc",
+    "base/logging.cc",
+    
+    "base/class_name.cpp",
+    "base/errno.cpp", 
+    "base/find_cstr.cpp",
+    "base/status.cpp",
+    "base/string_printf.cpp",
+    "base/thread_local.cpp",
+    "base/unix_socket.cpp",
+    "base/endpoint.cpp",
+    "base/fd_utility.cpp",
+    "base/files/temp_file.cpp",
+    "base/files/file_watcher.cpp",
+    "base/time.cpp",
+    "base/zero_copy_stream_as_streambuf.cpp",
+    "base/crc32c.cc",
+    "base/containers/case_ignored_flat_map.cpp",
+    "base/iobuf.cpp"
+]
+
+LDFLAGS('-lpthread -lrt -lssl -lcrypto -ldl -lz')
+
+
+def GenProtoCpp(filelist):
+    protoc_bin=ENV.WorkRoot() + "/third-64/protobuf/bin/protoc"
+    import commands
+    for path in filelist.split(' '):
+        part = path.partition('/')
+        removed_ext = os.path.splitext(path)[0]
+        cpp_path = removed_ext + '.pb.cc'
+        cmd = protoc_bin + \
+              ' --proto_path=' + part[0] + \
+              ' --proto_path=' + ENV.WorkRoot() + '/third-64/protobuf/include/' + \
+              ' --cpp_out=. ' + path
+        commands.getoutput(cmd);
+        TARGET(cpp_path, Prefixes(path + ' ' + protoc_bin), ShellCommands(cmd), CleanFiles(''))
+
+GenProtoCpp(GLOB('./*.proto'))
+GenProtoCpp(GLOB('./brpc/*.proto'))
+GenProtoCpp(GLOB('./brpc/policy/*.proto'))
+
+StaticLibrary('base', Sources(' '.join(sources)))
+StaticLibrary('bvar',Sources(GLOB('bvar/*.cpp bvar/detail/*.cpp')))
+StaticLibrary('bthread', Sources(GLOB('bthread/*.cpp')))
+StaticLibrary('json2pb', Sources(GLOB('json2pb/*.cpp')))
+StaticLibrary('mcpack2pb', Sources(GLOB('mcpack2pb/*.cpp idl_options.pb.cc'), 
+              HeaderFiles(GLOB('mcpack2pb/*.h'))) - Sources('mcpack2pb/generator.cpp'))
+Application('protoc-gen-mcpack', Sources('mcpack2pb/generator.cpp'),
+            Libraries('libmcpack2pb.a libbase.a libbthread.a libbvar.a'))
+StaticLibrary('brpc',
+              Sources(GLOB('brpc/*.cpp brpc/policy/*.cpp brpc/builtin/*.cpp brpc/details/*.cpp brpc/*.pb.cc brpc/policy/*.pb.cc'))
+              - Sources('brpc/policy/baidu_naming_service.cpp brpc/policy/giano_authenticator.cpp'))
+
+TARGET('output/include', PhonyMode('true'), Prefixes('libbase.a libbvar.a libbthread.a libjson2pb.a libmcpack2pb.a libbrpc.a'),
+       ShellCommands("for dir in `find base bvar bthread brpc -type f -name \"*.h\" -exec dirname {} \\; | sort | uniq`; do mkdir -p output/include/$$dir && cp $$dir/*.h output/include/$$dir/; done; for dir in `find base bvar bthread brpc -type f -name \"*.hpp\" -exec dirname {} \\; | sort | uniq`; do mkdir -p output/include/$$dir && cp $$dir/*.hpp output/include/$$dir/; done"),
+       CleanFiles('output/include'))

+ 2491 - 0
Makefile

@@ -0,0 +1,2491 @@
+#COMAKE2 edit-mode: -*- Makefile -*-
+####################64Bit Mode####################
+ifeq ($(shell uname -m),x86_64)
+CC=gcc
+CXX=g++
+CXXFLAGS=-O2 \
+  -g \
+  -pipe \
+  -Wall \
+  -W \
+  -Werror \
+  -fPIC \
+  -fstrict-aliasing \
+  -Wno-invalid-offsetof \
+  -Wno-unused-parameter \
+  -fno-omit-frame-pointer \
+  -std=c++0x \
+  -include \
+  brpc/config.h
+CFLAGS=-O2 \
+  -g \
+  -pipe \
+  -Wall \
+  -W \
+  -Werror \
+  -fPIC \
+  -fstrict-aliasing \
+  -Wno-unused-parameter \
+  -fno-omit-frame-pointer
+CPPFLAGS=-D__const__= \
+  -DNDEBUG \
+  -DUSE_SYMBOLIZE \
+  -DNO_TCMALLOC \
+  -D__STDC_FORMAT_MACROS \
+  -D__STDC_LIMIT_MACROS \
+  -D__STDC_CONSTANT_MACROS \
+  -DBRPC_REVISION=\"$(shell \
+  git \
+  rev-parse \
+  --short \
+  HEAD)\"
+INCPATH=-I.
+DEP_INCPATH=-I../../../third-64/gflags \
+  -I../../../third-64/gflags/include \
+  -I../../../third-64/gflags/output \
+  -I../../../third-64/gflags/output/include \
+  -I../../../third-64/leveldb \
+  -I../../../third-64/leveldb/include \
+  -I../../../third-64/leveldb/output \
+  -I../../../third-64/leveldb/output/include \
+  -I../../../third-64/protobuf \
+  -I../../../third-64/protobuf/include \
+  -I../../../third-64/protobuf/output \
+  -I../../../third-64/protobuf/output/include
+
+#============ CCP vars ============
+CCHECK=@ccheck.py
+CCHECK_FLAGS=
+PCLINT=@pclint
+PCLINT_FLAGS=
+CCP=@ccp.py
+CCP_FLAGS=
+
+
+#COMAKE UUID
+COMAKE_MD5=278b132339335c67b6a9d91325f51a67  COMAKE
+
+
+.PHONY:all
+all:comake2_makefile_check idl_options.pb.cc brpc/builtin_service.pb.cc brpc/errno.pb.cc brpc/get_favicon.pb.cc brpc/get_js.pb.cc brpc/nshead_meta.pb.cc brpc/options.pb.cc brpc/rpc_dump.pb.cc brpc/rtmp.pb.cc brpc/span.pb.cc brpc/streaming_rpc_meta.pb.cc brpc/trackme.pb.cc brpc/policy/baidu_rpc_meta.pb.cc brpc/policy/hulu_pbrpc_meta.pb.cc brpc/policy/mongo.pb.cc brpc/policy/public_pbrpc_meta.pb.cc brpc/policy/sofa_pbrpc_meta.pb.cc libbase.a libbvar.a libbthread.a libjson2pb.a libmcpack2pb.a protoc-gen-mcpack libbrpc.a output/include 
+	@echo "[COMAKE:BUILD][Target:'all']"
+	@echo "make all done"
+
+.PHONY:comake2_makefile_check
+comake2_makefile_check:
+	@echo "[COMAKE:BUILD][Target:'comake2_makefile_check']"
+	#in case of error, update 'Makefile' by 'comake2'
+	@echo "$(COMAKE_MD5)">comake2.md5
+	@md5sum -c --status comake2.md5
+	@rm -f comake2.md5
+
+.PHONY:ccpclean
+ccpclean:
+	@echo "[COMAKE:BUILD][Target:'ccpclean']"
+	@echo "make ccpclean done"
+
+.PHONY:clean
+clean:ccpclean
+	@echo "[COMAKE:BUILD][Target:'clean']"
+	rm -rf libbase.a
+	rm -rf ./output/lib/libbase.a
+	rm -rf libbvar.a
+	rm -rf ./output/lib/libbvar.a
+	rm -rf libbthread.a
+	rm -rf ./output/lib/libbthread.a
+	rm -rf libjson2pb.a
+	rm -rf ./output/lib/libjson2pb.a
+	rm -rf libmcpack2pb.a
+	rm -rf ./output/lib/libmcpack2pb.a
+	rm -rf protoc-gen-mcpack
+	rm -rf ./output/bin/protoc-gen-mcpack
+	rm -rf libbrpc.a
+	rm -rf ./output/lib/libbrpc.a
+	rm -rf output/include
+	rm -rf base/third_party/dmg_fp/base_g_fmt.o
+	rm -rf base/third_party/dmg_fp/base_dtoa_wrapper.o
+	rm -rf base/third_party/dmg_fp/base_dtoa.o
+	rm -rf base/third_party/dynamic_annotations/base_dynamic_annotations.o
+	rm -rf base/third_party/icu/base_icu_utf.o
+	rm -rf base/third_party/superfasthash/base_superfasthash.o
+	rm -rf base/third_party/modp_b64/base_modp_b64.o
+	rm -rf base/third_party/nspr/base_prtime.o
+	rm -rf base/third_party/symbolize/base_demangle.o
+	rm -rf base/third_party/symbolize/base_symbolize.o
+	rm -rf base/third_party/xdg_mime/base_xdgmime.o
+	rm -rf base/third_party/xdg_mime/base_xdgmimealias.o
+	rm -rf base/third_party/xdg_mime/base_xdgmimecache.o
+	rm -rf base/third_party/xdg_mime/base_xdgmimeglob.o
+	rm -rf base/third_party/xdg_mime/base_xdgmimeicon.o
+	rm -rf base/third_party/xdg_mime/base_xdgmimeint.o
+	rm -rf base/third_party/xdg_mime/base_xdgmimemagic.o
+	rm -rf base/third_party/xdg_mime/base_xdgmimeparent.o
+	rm -rf base/third_party/xdg_user_dirs/base_xdg_user_dir_lookup.o
+	rm -rf base/third_party/snappy/base_snappy-sinksource.o
+	rm -rf base/third_party/snappy/base_snappy-stubs-internal.o
+	rm -rf base/third_party/snappy/base_snappy.o
+	rm -rf base/third_party/murmurhash3/base_murmurhash3.o
+	rm -rf base/allocator/base_type_profiler_control.o
+	rm -rf base/base_arena.o
+	rm -rf base/base_at_exit.o
+	rm -rf base/base_atomicops_internals_x86_gcc.o
+	rm -rf base/base_barrier_closure.o
+	rm -rf base/base_base_paths.o
+	rm -rf base/base_base_paths_posix.o
+	rm -rf base/base_base64.o
+	rm -rf base/base_base_switches.o
+	rm -rf base/base_big_endian.o
+	rm -rf base/base_bind_helpers.o
+	rm -rf base/base_build_time.o
+	rm -rf base/base_callback_helpers.o
+	rm -rf base/base_callback_internal.o
+	rm -rf base/base_command_line.o
+	rm -rf base/base_cpu.o
+	rm -rf base/debug/base_alias.o
+	rm -rf base/debug/base_asan_invalid_access.o
+	rm -rf base/debug/base_crash_logging.o
+	rm -rf base/debug/base_debugger.o
+	rm -rf base/debug/base_debugger_posix.o
+	rm -rf base/debug/base_dump_without_crashing.o
+	rm -rf base/debug/base_proc_maps_linux.o
+	rm -rf base/debug/base_stack_trace.o
+	rm -rf base/debug/base_stack_trace_posix.o
+	rm -rf base/base_environment.o
+	rm -rf base/files/base_file.o
+	rm -rf base/files/base_file_posix.o
+	rm -rf base/files/base_file_enumerator.o
+	rm -rf base/files/base_file_enumerator_posix.o
+	rm -rf base/files/base_file_path.o
+	rm -rf base/files/base_file_path_constants.o
+	rm -rf base/files/base_memory_mapped_file.o
+	rm -rf base/files/base_memory_mapped_file_posix.o
+	rm -rf base/files/base_scoped_file.o
+	rm -rf base/files/base_scoped_temp_dir.o
+	rm -rf base/base_file_util.o
+	rm -rf base/base_file_util_linux.o
+	rm -rf base/base_file_util_posix.o
+	rm -rf base/base_guid.o
+	rm -rf base/base_guid_posix.o
+	rm -rf base/base_hash.o
+	rm -rf base/base_lazy_instance.o
+	rm -rf base/base_location.o
+	rm -rf base/base_md5.o
+	rm -rf base/memory/base_aligned_memory.o
+	rm -rf base/memory/base_ref_counted.o
+	rm -rf base/memory/base_ref_counted_memory.o
+	rm -rf base/memory/base_shared_memory_posix.o
+	rm -rf base/memory/base_singleton.o
+	rm -rf base/memory/base_weak_ptr.o
+	rm -rf base/nix/base_mime_util_xdg.o
+	rm -rf base/nix/base_xdg_util.o
+	rm -rf base/base_path_service.o
+	rm -rf base/posix/base_file_descriptor_shuffle.o
+	rm -rf base/posix/base_global_descriptors.o
+	rm -rf base/process/base_internal_linux.o
+	rm -rf base/process/base_kill.o
+	rm -rf base/process/base_kill_posix.o
+	rm -rf base/process/base_launch.o
+	rm -rf base/process/base_launch_posix.o
+	rm -rf base/process/base_memory.o
+	rm -rf base/process/base_memory_linux.o
+	rm -rf base/process/base_process_handle_linux.o
+	rm -rf base/process/base_process_handle_posix.o
+	rm -rf base/process/base_process_info_linux.o
+	rm -rf base/process/base_process_iterator.o
+	rm -rf base/process/base_process_iterator_linux.o
+	rm -rf base/process/base_process_linux.o
+	rm -rf base/process/base_process_metrics.o
+	rm -rf base/process/base_process_metrics_linux.o
+	rm -rf base/process/base_process_metrics_posix.o
+	rm -rf base/process/base_process_posix.o
+	rm -rf base/base_rand_util.o
+	rm -rf base/base_rand_util_posix.o
+	rm -rf base/base_fast_rand.o
+	rm -rf base/base_safe_strerror_posix.o
+	rm -rf base/base_sha1_portable.o
+	rm -rf base/strings/base_latin1_string_conversions.o
+	rm -rf base/strings/base_nullable_string16.o
+	rm -rf base/strings/base_safe_sprintf.o
+	rm -rf base/strings/base_string16.o
+	rm -rf base/strings/base_string_number_conversions.o
+	rm -rf base/strings/base_string_split.o
+	rm -rf base/strings/base_string_piece.o
+	rm -rf base/strings/base_string_util.o
+	rm -rf base/strings/base_string_util_constants.o
+	rm -rf base/strings/base_stringprintf.o
+	rm -rf base/strings/base_sys_string_conversions_posix.o
+	rm -rf base/strings/base_utf_offset_string_conversions.o
+	rm -rf base/strings/base_utf_string_conversion_utils.o
+	rm -rf base/strings/base_utf_string_conversions.o
+	rm -rf base/synchronization/base_cancellation_flag.o
+	rm -rf base/synchronization/base_condition_variable_posix.o
+	rm -rf base/synchronization/base_waitable_event_posix.o
+	rm -rf base/base_sys_info.o
+	rm -rf base/base_sys_info_linux.o
+	rm -rf base/base_sys_info_posix.o
+	rm -rf base/threading/base_non_thread_safe_impl.o
+	rm -rf base/threading/base_platform_thread_linux.o
+	rm -rf base/threading/base_platform_thread_posix.o
+	rm -rf base/threading/base_simple_thread.o
+	rm -rf base/threading/base_thread_checker_impl.o
+	rm -rf base/threading/base_thread_collision_warner.o
+	rm -rf base/threading/base_thread_id_name_manager.o
+	rm -rf base/threading/base_thread_local_posix.o
+	rm -rf base/threading/base_thread_local_storage.o
+	rm -rf base/threading/base_thread_local_storage_posix.o
+	rm -rf base/threading/base_thread_restrictions.o
+	rm -rf base/threading/base_watchdog.o
+	rm -rf base/time/base_clock.o
+	rm -rf base/time/base_default_clock.o
+	rm -rf base/time/base_default_tick_clock.o
+	rm -rf base/time/base_tick_clock.o
+	rm -rf base/time/base_time.o
+	rm -rf base/time/base_time_posix.o
+	rm -rf base/base_version.o
+	rm -rf base/base_logging.o
+	rm -rf base/base_class_name.o
+	rm -rf base/base_errno.o
+	rm -rf base/base_find_cstr.o
+	rm -rf base/base_status.o
+	rm -rf base/base_string_printf.o
+	rm -rf base/base_thread_local.o
+	rm -rf base/base_unix_socket.o
+	rm -rf base/base_endpoint.o
+	rm -rf base/base_fd_utility.o
+	rm -rf base/files/base_temp_file.o
+	rm -rf base/files/base_file_watcher.o
+	rm -rf base/base_time.o
+	rm -rf base/base_zero_copy_stream_as_streambuf.o
+	rm -rf base/base_crc32c.o
+	rm -rf base/containers/base_case_ignored_flat_map.o
+	rm -rf base/base_iobuf.o
+	rm -rf bvar/bvar_collector.o
+	rm -rf bvar/bvar_default_variables.o
+	rm -rf bvar/bvar_gflag.o
+	rm -rf bvar/bvar_latency_recorder.o
+	rm -rf bvar/bvar_variable.o
+	rm -rf bvar/detail/bvar_percentile.o
+	rm -rf bvar/detail/bvar_sampler.o
+	rm -rf bthread/bthread_bthread.o
+	rm -rf bthread/bthread_butex.o
+	rm -rf bthread/bthread_cond.o
+	rm -rf bthread/bthread_context.o
+	rm -rf bthread/bthread_countdown_event.o
+	rm -rf bthread/bthread_errno.o
+	rm -rf bthread/bthread_execution_queue.o
+	rm -rf bthread/bthread_fd.o
+	rm -rf bthread/bthread_id.o
+	rm -rf bthread/bthread_interrupt_pthread.o
+	rm -rf bthread/bthread_key.o
+	rm -rf bthread/bthread_mutex.o
+	rm -rf bthread/bthread_stack.o
+	rm -rf bthread/bthread_sys_futex.o
+	rm -rf bthread/bthread_task_control.o
+	rm -rf bthread/bthread_task_group.o
+	rm -rf bthread/bthread_timer_thread.o
+	rm -rf json2pb/json2pb_encode_decode.o
+	rm -rf json2pb/json2pb_json_to_pb.o
+	rm -rf json2pb/json2pb_pb_to_json.o
+	rm -rf json2pb/json2pb_protobuf_map.o
+	rm -rf json2pb/json2pb_string_printf.o
+	rm -rf mcpack2pb/mcpack2pb_field_type.o
+	rm -rf mcpack2pb/mcpack2pb_mcpack2pb.o
+	rm -rf mcpack2pb/mcpack2pb_parser.o
+	rm -rf mcpack2pb/mcpack2pb_serializer.o
+	rm -rf mcpack2pb_idl_options.pb.o
+	rm -rf mcpack2pb/protoc-gen-mcpack_generator.o
+	rm -rf brpc/brpc_acceptor.o
+	rm -rf brpc/brpc_adaptive_connection_type.o
+	rm -rf brpc/brpc_amf.o
+	rm -rf brpc/brpc_bad_method_service.o
+	rm -rf brpc/brpc_channel.o
+	rm -rf brpc/brpc_compress.o
+	rm -rf brpc/brpc_controller.o
+	rm -rf brpc/brpc_esp_message.o
+	rm -rf brpc/brpc_event_dispatcher.o
+	rm -rf brpc/brpc_global.o
+	rm -rf brpc/brpc_http_header.o
+	rm -rf brpc/brpc_http_method.o
+	rm -rf brpc/brpc_http_status_code.o
+	rm -rf brpc/brpc_input_messenger.o
+	rm -rf brpc/brpc_load_balancer.o
+	rm -rf brpc/brpc_load_balancer_with_naming.o
+	rm -rf brpc/brpc_memcache.o
+	rm -rf brpc/brpc_naming_service_thread.o
+	rm -rf brpc/brpc_nshead_message.o
+	rm -rf brpc/brpc_nshead_pb_service_adaptor.o
+	rm -rf brpc/brpc_nshead_service.o
+	rm -rf brpc/brpc_parallel_channel.o
+	rm -rf brpc/brpc_partition_channel.o
+	rm -rf brpc/brpc_periodic_naming_service.o
+	rm -rf brpc/brpc_progressive_attachment.o
+	rm -rf brpc/brpc_protocol.o
+	rm -rf brpc/brpc_redis.o
+	rm -rf brpc/brpc_redis_command.o
+	rm -rf brpc/brpc_redis_reply.o
+	rm -rf brpc/brpc_reloadable_flags.o
+	rm -rf brpc/brpc_restful.o
+	rm -rf brpc/brpc_retry_policy.o
+	rm -rf brpc/brpc_rpc_dump.o
+	rm -rf brpc/brpc_rtmp.o
+	rm -rf brpc/brpc_selective_channel.o
+	rm -rf brpc/brpc_serialized_request.o
+	rm -rf brpc/brpc_server.o
+	rm -rf brpc/brpc_server_id.o
+	rm -rf brpc/brpc_socket.o
+	rm -rf brpc/brpc_socket_map.o
+	rm -rf brpc/brpc_span.o
+	rm -rf brpc/brpc_stream.o
+	rm -rf brpc/brpc_tcmalloc_extension.o
+	rm -rf brpc/brpc_trackme.o
+	rm -rf brpc/brpc_ts.o
+	rm -rf brpc/brpc_uri.o
+	rm -rf brpc/policy/brpc_baidu_rpc_protocol.o
+	rm -rf brpc/policy/brpc_consistent_hashing_load_balancer.o
+	rm -rf brpc/policy/brpc_dh.o
+	rm -rf brpc/policy/brpc_domain_naming_service.o
+	rm -rf brpc/policy/brpc_dynpart_load_balancer.o
+	rm -rf brpc/policy/brpc_esp_authenticator.o
+	rm -rf brpc/policy/brpc_esp_protocol.o
+	rm -rf brpc/policy/brpc_file_naming_service.o
+	rm -rf brpc/policy/brpc_gzip_compress.o
+	rm -rf brpc/policy/brpc_hasher.o
+	rm -rf brpc/policy/brpc_http_rpc_protocol.o
+	rm -rf brpc/policy/brpc_hulu_pbrpc_protocol.o
+	rm -rf brpc/policy/brpc_list_naming_service.o
+	rm -rf brpc/policy/brpc_locality_aware_load_balancer.o
+	rm -rf brpc/policy/brpc_memcache_binary_protocol.o
+	rm -rf brpc/policy/brpc_mongo_protocol.o
+	rm -rf brpc/policy/brpc_nova_pbrpc_protocol.o
+	rm -rf brpc/policy/brpc_nshead_mcpack_protocol.o
+	rm -rf brpc/policy/brpc_nshead_protocol.o
+	rm -rf brpc/policy/brpc_public_pbrpc_protocol.o
+	rm -rf brpc/policy/brpc_randomized_load_balancer.o
+	rm -rf brpc/policy/brpc_redis_protocol.o
+	rm -rf brpc/policy/brpc_remote_file_naming_service.o
+	rm -rf brpc/policy/brpc_round_robin_load_balancer.o
+	rm -rf brpc/policy/brpc_rtmp_protocol.o
+	rm -rf brpc/policy/brpc_snappy_compress.o
+	rm -rf brpc/policy/brpc_sofa_pbrpc_protocol.o
+	rm -rf brpc/policy/brpc_streaming_rpc_protocol.o
+	rm -rf brpc/policy/brpc_ubrpc2pb_protocol.o
+	rm -rf brpc/builtin/brpc_bthreads_service.o
+	rm -rf brpc/builtin/brpc_common.o
+	rm -rf brpc/builtin/brpc_connections_service.o
+	rm -rf brpc/builtin/brpc_dir_service.o
+	rm -rf brpc/builtin/brpc_flags_service.o
+	rm -rf brpc/builtin/brpc_flot_min_js.o
+	rm -rf brpc/builtin/brpc_get_favicon_service.o
+	rm -rf brpc/builtin/brpc_get_js_service.o
+	rm -rf brpc/builtin/brpc_health_service.o
+	rm -rf brpc/builtin/brpc_hotspots_service.o
+	rm -rf brpc/builtin/brpc_ids_service.o
+	rm -rf brpc/builtin/brpc_index_service.o
+	rm -rf brpc/builtin/brpc_jquery_min_js.o
+	rm -rf brpc/builtin/brpc_list_service.o
+	rm -rf brpc/builtin/brpc_pprof_perl.o
+	rm -rf brpc/builtin/brpc_pprof_service.o
+	rm -rf brpc/builtin/brpc_protobufs_service.o
+	rm -rf brpc/builtin/brpc_rpcz_service.o
+	rm -rf brpc/builtin/brpc_sockets_service.o
+	rm -rf brpc/builtin/brpc_sorttable_js.o
+	rm -rf brpc/builtin/brpc_status_service.o
+	rm -rf brpc/builtin/brpc_threads_service.o
+	rm -rf brpc/builtin/brpc_vars_service.o
+	rm -rf brpc/builtin/brpc_version_service.o
+	rm -rf brpc/builtin/brpc_viz_min_js.o
+	rm -rf brpc/builtin/brpc_vlog_service.o
+	rm -rf brpc/details/brpc_has_epollrdhup.o
+	rm -rf brpc/details/brpc_hpack.o
+	rm -rf brpc/details/brpc_http_message.o
+	rm -rf brpc/details/brpc_http_message_serializer.o
+	rm -rf brpc/details/brpc_http_parser.o
+	rm -rf brpc/details/brpc_method_status.o
+	rm -rf brpc/details/brpc_rtmp_utils.o
+	rm -rf brpc/details/brpc_ssl_helper.o
+	rm -rf brpc/details/brpc_usercode_backup_pool.o
+	rm -rf brpc/brpc_builtin_service.pb.o
+	rm -rf brpc/brpc_errno.pb.o
+	rm -rf brpc/brpc_get_favicon.pb.o
+	rm -rf brpc/brpc_get_js.pb.o
+	rm -rf brpc/brpc_nshead_meta.pb.o
+	rm -rf brpc/brpc_options.pb.o
+	rm -rf brpc/brpc_rpc_dump.pb.o
+	rm -rf brpc/brpc_rtmp.pb.o
+	rm -rf brpc/brpc_span.pb.o
+	rm -rf brpc/brpc_streaming_rpc_meta.pb.o
+	rm -rf brpc/brpc_trackme.pb.o
+	rm -rf brpc/policy/brpc_baidu_rpc_meta.pb.o
+	rm -rf brpc/policy/brpc_hulu_pbrpc_meta.pb.o
+	rm -rf brpc/policy/brpc_mongo.pb.o
+	rm -rf brpc/policy/brpc_public_pbrpc_meta.pb.o
+	rm -rf brpc/policy/brpc_sofa_pbrpc_meta.pb.o
+
+.PHONY:dist
+dist:
+	@echo "[COMAKE:BUILD][Target:'dist']"
+	tar czvf output.tar.gz output
+	@echo "make dist done"
+
+.PHONY:distclean
+distclean:clean
+	@echo "[COMAKE:BUILD][Target:'distclean']"
+	rm -f output.tar.gz
+	@echo "make distclean done"
+
+.PHONY:love
+love:
+	@echo "[COMAKE:BUILD][Target:'love']"
+	@echo "make love done"
+
+idl_options.pb.cc:./idl_options.proto \
+  ../../..//third-64/protobuf/bin/protoc
+	@echo "[COMAKE:BUILD][Target:'idl_options.pb.cc']"
+	../../..//third-64/protobuf/bin/protoc --proto_path=. --proto_path=../../..//third-64/protobuf/include/ --cpp_out=. ./idl_options.proto
+
+brpc/builtin_service.pb.cc:./brpc/builtin_service.proto \
+  ../../..//third-64/protobuf/bin/protoc
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin_service.pb.cc']"
+	../../..//third-64/protobuf/bin/protoc --proto_path=. --proto_path=../../..//third-64/protobuf/include/ --cpp_out=. ./brpc/builtin_service.proto
+
+brpc/errno.pb.cc:./brpc/errno.proto \
+  ../../..//third-64/protobuf/bin/protoc
+	@echo "[COMAKE:BUILD][Target:'brpc/errno.pb.cc']"
+	../../..//third-64/protobuf/bin/protoc --proto_path=. --proto_path=../../..//third-64/protobuf/include/ --cpp_out=. ./brpc/errno.proto
+
+brpc/get_favicon.pb.cc:./brpc/get_favicon.proto \
+  ../../..//third-64/protobuf/bin/protoc
+	@echo "[COMAKE:BUILD][Target:'brpc/get_favicon.pb.cc']"
+	../../..//third-64/protobuf/bin/protoc --proto_path=. --proto_path=../../..//third-64/protobuf/include/ --cpp_out=. ./brpc/get_favicon.proto
+
+brpc/get_js.pb.cc:./brpc/get_js.proto \
+  ../../..//third-64/protobuf/bin/protoc
+	@echo "[COMAKE:BUILD][Target:'brpc/get_js.pb.cc']"
+	../../..//third-64/protobuf/bin/protoc --proto_path=. --proto_path=../../..//third-64/protobuf/include/ --cpp_out=. ./brpc/get_js.proto
+
+brpc/nshead_meta.pb.cc:./brpc/nshead_meta.proto \
+  ../../..//third-64/protobuf/bin/protoc
+	@echo "[COMAKE:BUILD][Target:'brpc/nshead_meta.pb.cc']"
+	../../..//third-64/protobuf/bin/protoc --proto_path=. --proto_path=../../..//third-64/protobuf/include/ --cpp_out=. ./brpc/nshead_meta.proto
+
+brpc/options.pb.cc:./brpc/options.proto \
+  ../../..//third-64/protobuf/bin/protoc
+	@echo "[COMAKE:BUILD][Target:'brpc/options.pb.cc']"
+	../../..//third-64/protobuf/bin/protoc --proto_path=. --proto_path=../../..//third-64/protobuf/include/ --cpp_out=. ./brpc/options.proto
+
+brpc/rpc_dump.pb.cc:./brpc/rpc_dump.proto \
+  ../../..//third-64/protobuf/bin/protoc
+	@echo "[COMAKE:BUILD][Target:'brpc/rpc_dump.pb.cc']"
+	../../..//third-64/protobuf/bin/protoc --proto_path=. --proto_path=../../..//third-64/protobuf/include/ --cpp_out=. ./brpc/rpc_dump.proto
+
+brpc/rtmp.pb.cc:./brpc/rtmp.proto \
+  ../../..//third-64/protobuf/bin/protoc
+	@echo "[COMAKE:BUILD][Target:'brpc/rtmp.pb.cc']"
+	../../..//third-64/protobuf/bin/protoc --proto_path=. --proto_path=../../..//third-64/protobuf/include/ --cpp_out=. ./brpc/rtmp.proto
+
+brpc/span.pb.cc:./brpc/span.proto \
+  ../../..//third-64/protobuf/bin/protoc
+	@echo "[COMAKE:BUILD][Target:'brpc/span.pb.cc']"
+	../../..//third-64/protobuf/bin/protoc --proto_path=. --proto_path=../../..//third-64/protobuf/include/ --cpp_out=. ./brpc/span.proto
+
+brpc/streaming_rpc_meta.pb.cc:./brpc/streaming_rpc_meta.proto \
+  ../../..//third-64/protobuf/bin/protoc
+	@echo "[COMAKE:BUILD][Target:'brpc/streaming_rpc_meta.pb.cc']"
+	../../..//third-64/protobuf/bin/protoc --proto_path=. --proto_path=../../..//third-64/protobuf/include/ --cpp_out=. ./brpc/streaming_rpc_meta.proto
+
+brpc/trackme.pb.cc:./brpc/trackme.proto \
+  ../../..//third-64/protobuf/bin/protoc
+	@echo "[COMAKE:BUILD][Target:'brpc/trackme.pb.cc']"
+	../../..//third-64/protobuf/bin/protoc --proto_path=. --proto_path=../../..//third-64/protobuf/include/ --cpp_out=. ./brpc/trackme.proto
+
+brpc/policy/baidu_rpc_meta.pb.cc:./brpc/policy/baidu_rpc_meta.proto \
+  ../../..//third-64/protobuf/bin/protoc
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/baidu_rpc_meta.pb.cc']"
+	../../..//third-64/protobuf/bin/protoc --proto_path=. --proto_path=../../..//third-64/protobuf/include/ --cpp_out=. ./brpc/policy/baidu_rpc_meta.proto
+
+brpc/policy/hulu_pbrpc_meta.pb.cc:./brpc/policy/hulu_pbrpc_meta.proto \
+  ../../..//third-64/protobuf/bin/protoc
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/hulu_pbrpc_meta.pb.cc']"
+	../../..//third-64/protobuf/bin/protoc --proto_path=. --proto_path=../../..//third-64/protobuf/include/ --cpp_out=. ./brpc/policy/hulu_pbrpc_meta.proto
+
+brpc/policy/mongo.pb.cc:./brpc/policy/mongo.proto \
+  ../../..//third-64/protobuf/bin/protoc
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/mongo.pb.cc']"
+	../../..//third-64/protobuf/bin/protoc --proto_path=. --proto_path=../../..//third-64/protobuf/include/ --cpp_out=. ./brpc/policy/mongo.proto
+
+brpc/policy/public_pbrpc_meta.pb.cc:./brpc/policy/public_pbrpc_meta.proto \
+  ../../..//third-64/protobuf/bin/protoc
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/public_pbrpc_meta.pb.cc']"
+	../../..//third-64/protobuf/bin/protoc --proto_path=. --proto_path=../../..//third-64/protobuf/include/ --cpp_out=. ./brpc/policy/public_pbrpc_meta.proto
+
+brpc/policy/sofa_pbrpc_meta.pb.cc:./brpc/policy/sofa_pbrpc_meta.proto \
+  ../../..//third-64/protobuf/bin/protoc
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/sofa_pbrpc_meta.pb.cc']"
+	../../..//third-64/protobuf/bin/protoc --proto_path=. --proto_path=../../..//third-64/protobuf/include/ --cpp_out=. ./brpc/policy/sofa_pbrpc_meta.proto
+
+libbase.a:base/third_party/dmg_fp/base_g_fmt.o \
+  base/third_party/dmg_fp/base_dtoa_wrapper.o \
+  base/third_party/dmg_fp/base_dtoa.o \
+  base/third_party/dynamic_annotations/base_dynamic_annotations.o \
+  base/third_party/icu/base_icu_utf.o \
+  base/third_party/superfasthash/base_superfasthash.o \
+  base/third_party/modp_b64/base_modp_b64.o \
+  base/third_party/nspr/base_prtime.o \
+  base/third_party/symbolize/base_demangle.o \
+  base/third_party/symbolize/base_symbolize.o \
+  base/third_party/xdg_mime/base_xdgmime.o \
+  base/third_party/xdg_mime/base_xdgmimealias.o \
+  base/third_party/xdg_mime/base_xdgmimecache.o \
+  base/third_party/xdg_mime/base_xdgmimeglob.o \
+  base/third_party/xdg_mime/base_xdgmimeicon.o \
+  base/third_party/xdg_mime/base_xdgmimeint.o \
+  base/third_party/xdg_mime/base_xdgmimemagic.o \
+  base/third_party/xdg_mime/base_xdgmimeparent.o \
+  base/third_party/xdg_user_dirs/base_xdg_user_dir_lookup.o \
+  base/third_party/snappy/base_snappy-sinksource.o \
+  base/third_party/snappy/base_snappy-stubs-internal.o \
+  base/third_party/snappy/base_snappy.o \
+  base/third_party/murmurhash3/base_murmurhash3.o \
+  base/allocator/base_type_profiler_control.o \
+  base/base_arena.o \
+  base/base_at_exit.o \
+  base/base_atomicops_internals_x86_gcc.o \
+  base/base_barrier_closure.o \
+  base/base_base_paths.o \
+  base/base_base_paths_posix.o \
+  base/base_base64.o \
+  base/base_base_switches.o \
+  base/base_big_endian.o \
+  base/base_bind_helpers.o \
+  base/base_build_time.o \
+  base/base_callback_helpers.o \
+  base/base_callback_internal.o \
+  base/base_command_line.o \
+  base/base_cpu.o \
+  base/debug/base_alias.o \
+  base/debug/base_asan_invalid_access.o \
+  base/debug/base_crash_logging.o \
+  base/debug/base_debugger.o \
+  base/debug/base_debugger_posix.o \
+  base/debug/base_dump_without_crashing.o \
+  base/debug/base_proc_maps_linux.o \
+  base/debug/base_stack_trace.o \
+  base/debug/base_stack_trace_posix.o \
+  base/base_environment.o \
+  base/files/base_file.o \
+  base/files/base_file_posix.o \
+  base/files/base_file_enumerator.o \
+  base/files/base_file_enumerator_posix.o \
+  base/files/base_file_path.o \
+  base/files/base_file_path_constants.o \
+  base/files/base_memory_mapped_file.o \
+  base/files/base_memory_mapped_file_posix.o \
+  base/files/base_scoped_file.o \
+  base/files/base_scoped_temp_dir.o \
+  base/base_file_util.o \
+  base/base_file_util_linux.o \
+  base/base_file_util_posix.o \
+  base/base_guid.o \
+  base/base_guid_posix.o \
+  base/base_hash.o \
+  base/base_lazy_instance.o \
+  base/base_location.o \
+  base/base_md5.o \
+  base/memory/base_aligned_memory.o \
+  base/memory/base_ref_counted.o \
+  base/memory/base_ref_counted_memory.o \
+  base/memory/base_shared_memory_posix.o \
+  base/memory/base_singleton.o \
+  base/memory/base_weak_ptr.o \
+  base/nix/base_mime_util_xdg.o \
+  base/nix/base_xdg_util.o \
+  base/base_path_service.o \
+  base/posix/base_file_descriptor_shuffle.o \
+  base/posix/base_global_descriptors.o \
+  base/process/base_internal_linux.o \
+  base/process/base_kill.o \
+  base/process/base_kill_posix.o \
+  base/process/base_launch.o \
+  base/process/base_launch_posix.o \
+  base/process/base_memory.o \
+  base/process/base_memory_linux.o \
+  base/process/base_process_handle_linux.o \
+  base/process/base_process_handle_posix.o \
+  base/process/base_process_info_linux.o \
+  base/process/base_process_iterator.o \
+  base/process/base_process_iterator_linux.o \
+  base/process/base_process_linux.o \
+  base/process/base_process_metrics.o \
+  base/process/base_process_metrics_linux.o \
+  base/process/base_process_metrics_posix.o \
+  base/process/base_process_posix.o \
+  base/base_rand_util.o \
+  base/base_rand_util_posix.o \
+  base/base_fast_rand.o \
+  base/base_safe_strerror_posix.o \
+  base/base_sha1_portable.o \
+  base/strings/base_latin1_string_conversions.o \
+  base/strings/base_nullable_string16.o \
+  base/strings/base_safe_sprintf.o \
+  base/strings/base_string16.o \
+  base/strings/base_string_number_conversions.o \
+  base/strings/base_string_split.o \
+  base/strings/base_string_piece.o \
+  base/strings/base_string_util.o \
+  base/strings/base_string_util_constants.o \
+  base/strings/base_stringprintf.o \
+  base/strings/base_sys_string_conversions_posix.o \
+  base/strings/base_utf_offset_string_conversions.o \
+  base/strings/base_utf_string_conversion_utils.o \
+  base/strings/base_utf_string_conversions.o \
+  base/synchronization/base_cancellation_flag.o \
+  base/synchronization/base_condition_variable_posix.o \
+  base/synchronization/base_waitable_event_posix.o \
+  base/base_sys_info.o \
+  base/base_sys_info_linux.o \
+  base/base_sys_info_posix.o \
+  base/threading/base_non_thread_safe_impl.o \
+  base/threading/base_platform_thread_linux.o \
+  base/threading/base_platform_thread_posix.o \
+  base/threading/base_simple_thread.o \
+  base/threading/base_thread_checker_impl.o \
+  base/threading/base_thread_collision_warner.o \
+  base/threading/base_thread_id_name_manager.o \
+  base/threading/base_thread_local_posix.o \
+  base/threading/base_thread_local_storage.o \
+  base/threading/base_thread_local_storage_posix.o \
+  base/threading/base_thread_restrictions.o \
+  base/threading/base_watchdog.o \
+  base/time/base_clock.o \
+  base/time/base_default_clock.o \
+  base/time/base_default_tick_clock.o \
+  base/time/base_tick_clock.o \
+  base/time/base_time.o \
+  base/time/base_time_posix.o \
+  base/base_version.o \
+  base/base_logging.o \
+  base/base_class_name.o \
+  base/base_errno.o \
+  base/base_find_cstr.o \
+  base/base_status.o \
+  base/base_string_printf.o \
+  base/base_thread_local.o \
+  base/base_unix_socket.o \
+  base/base_endpoint.o \
+  base/base_fd_utility.o \
+  base/files/base_temp_file.o \
+  base/files/base_file_watcher.o \
+  base/base_time.o \
+  base/base_zero_copy_stream_as_streambuf.o \
+  base/base_crc32c.o \
+  base/containers/base_case_ignored_flat_map.o \
+  base/base_iobuf.o
+	@echo "[COMAKE:BUILD][Target:'libbase.a']"
+	ar crs libbase.a base/third_party/dmg_fp/base_g_fmt.o \
+  base/third_party/dmg_fp/base_dtoa_wrapper.o \
+  base/third_party/dmg_fp/base_dtoa.o \
+  base/third_party/dynamic_annotations/base_dynamic_annotations.o \
+  base/third_party/icu/base_icu_utf.o \
+  base/third_party/superfasthash/base_superfasthash.o \
+  base/third_party/modp_b64/base_modp_b64.o \
+  base/third_party/nspr/base_prtime.o \
+  base/third_party/symbolize/base_demangle.o \
+  base/third_party/symbolize/base_symbolize.o \
+  base/third_party/xdg_mime/base_xdgmime.o \
+  base/third_party/xdg_mime/base_xdgmimealias.o \
+  base/third_party/xdg_mime/base_xdgmimecache.o \
+  base/third_party/xdg_mime/base_xdgmimeglob.o \
+  base/third_party/xdg_mime/base_xdgmimeicon.o \
+  base/third_party/xdg_mime/base_xdgmimeint.o \
+  base/third_party/xdg_mime/base_xdgmimemagic.o \
+  base/third_party/xdg_mime/base_xdgmimeparent.o \
+  base/third_party/xdg_user_dirs/base_xdg_user_dir_lookup.o \
+  base/third_party/snappy/base_snappy-sinksource.o \
+  base/third_party/snappy/base_snappy-stubs-internal.o \
+  base/third_party/snappy/base_snappy.o \
+  base/third_party/murmurhash3/base_murmurhash3.o \
+  base/allocator/base_type_profiler_control.o \
+  base/base_arena.o \
+  base/base_at_exit.o \
+  base/base_atomicops_internals_x86_gcc.o \
+  base/base_barrier_closure.o \
+  base/base_base_paths.o \
+  base/base_base_paths_posix.o \
+  base/base_base64.o \
+  base/base_base_switches.o \
+  base/base_big_endian.o \
+  base/base_bind_helpers.o \
+  base/base_build_time.o \
+  base/base_callback_helpers.o \
+  base/base_callback_internal.o \
+  base/base_command_line.o \
+  base/base_cpu.o \
+  base/debug/base_alias.o \
+  base/debug/base_asan_invalid_access.o \
+  base/debug/base_crash_logging.o \
+  base/debug/base_debugger.o \
+  base/debug/base_debugger_posix.o \
+  base/debug/base_dump_without_crashing.o \
+  base/debug/base_proc_maps_linux.o \
+  base/debug/base_stack_trace.o \
+  base/debug/base_stack_trace_posix.o \
+  base/base_environment.o \
+  base/files/base_file.o \
+  base/files/base_file_posix.o \
+  base/files/base_file_enumerator.o \
+  base/files/base_file_enumerator_posix.o \
+  base/files/base_file_path.o \
+  base/files/base_file_path_constants.o \
+  base/files/base_memory_mapped_file.o \
+  base/files/base_memory_mapped_file_posix.o \
+  base/files/base_scoped_file.o \
+  base/files/base_scoped_temp_dir.o \
+  base/base_file_util.o \
+  base/base_file_util_linux.o \
+  base/base_file_util_posix.o \
+  base/base_guid.o \
+  base/base_guid_posix.o \
+  base/base_hash.o \
+  base/base_lazy_instance.o \
+  base/base_location.o \
+  base/base_md5.o \
+  base/memory/base_aligned_memory.o \
+  base/memory/base_ref_counted.o \
+  base/memory/base_ref_counted_memory.o \
+  base/memory/base_shared_memory_posix.o \
+  base/memory/base_singleton.o \
+  base/memory/base_weak_ptr.o \
+  base/nix/base_mime_util_xdg.o \
+  base/nix/base_xdg_util.o \
+  base/base_path_service.o \
+  base/posix/base_file_descriptor_shuffle.o \
+  base/posix/base_global_descriptors.o \
+  base/process/base_internal_linux.o \
+  base/process/base_kill.o \
+  base/process/base_kill_posix.o \
+  base/process/base_launch.o \
+  base/process/base_launch_posix.o \
+  base/process/base_memory.o \
+  base/process/base_memory_linux.o \
+  base/process/base_process_handle_linux.o \
+  base/process/base_process_handle_posix.o \
+  base/process/base_process_info_linux.o \
+  base/process/base_process_iterator.o \
+  base/process/base_process_iterator_linux.o \
+  base/process/base_process_linux.o \
+  base/process/base_process_metrics.o \
+  base/process/base_process_metrics_linux.o \
+  base/process/base_process_metrics_posix.o \
+  base/process/base_process_posix.o \
+  base/base_rand_util.o \
+  base/base_rand_util_posix.o \
+  base/base_fast_rand.o \
+  base/base_safe_strerror_posix.o \
+  base/base_sha1_portable.o \
+  base/strings/base_latin1_string_conversions.o \
+  base/strings/base_nullable_string16.o \
+  base/strings/base_safe_sprintf.o \
+  base/strings/base_string16.o \
+  base/strings/base_string_number_conversions.o \
+  base/strings/base_string_split.o \
+  base/strings/base_string_piece.o \
+  base/strings/base_string_util.o \
+  base/strings/base_string_util_constants.o \
+  base/strings/base_stringprintf.o \
+  base/strings/base_sys_string_conversions_posix.o \
+  base/strings/base_utf_offset_string_conversions.o \
+  base/strings/base_utf_string_conversion_utils.o \
+  base/strings/base_utf_string_conversions.o \
+  base/synchronization/base_cancellation_flag.o \
+  base/synchronization/base_condition_variable_posix.o \
+  base/synchronization/base_waitable_event_posix.o \
+  base/base_sys_info.o \
+  base/base_sys_info_linux.o \
+  base/base_sys_info_posix.o \
+  base/threading/base_non_thread_safe_impl.o \
+  base/threading/base_platform_thread_linux.o \
+  base/threading/base_platform_thread_posix.o \
+  base/threading/base_simple_thread.o \
+  base/threading/base_thread_checker_impl.o \
+  base/threading/base_thread_collision_warner.o \
+  base/threading/base_thread_id_name_manager.o \
+  base/threading/base_thread_local_posix.o \
+  base/threading/base_thread_local_storage.o \
+  base/threading/base_thread_local_storage_posix.o \
+  base/threading/base_thread_restrictions.o \
+  base/threading/base_watchdog.o \
+  base/time/base_clock.o \
+  base/time/base_default_clock.o \
+  base/time/base_default_tick_clock.o \
+  base/time/base_tick_clock.o \
+  base/time/base_time.o \
+  base/time/base_time_posix.o \
+  base/base_version.o \
+  base/base_logging.o \
+  base/base_class_name.o \
+  base/base_errno.o \
+  base/base_find_cstr.o \
+  base/base_status.o \
+  base/base_string_printf.o \
+  base/base_thread_local.o \
+  base/base_unix_socket.o \
+  base/base_endpoint.o \
+  base/base_fd_utility.o \
+  base/files/base_temp_file.o \
+  base/files/base_file_watcher.o \
+  base/base_time.o \
+  base/base_zero_copy_stream_as_streambuf.o \
+  base/base_crc32c.o \
+  base/containers/base_case_ignored_flat_map.o \
+  base/base_iobuf.o
+	mkdir -p ./output/lib
+	cp -f libbase.a ./output/lib
+
+libbvar.a:bvar/bvar_collector.o \
+  bvar/bvar_default_variables.o \
+  bvar/bvar_gflag.o \
+  bvar/bvar_latency_recorder.o \
+  bvar/bvar_variable.o \
+  bvar/detail/bvar_percentile.o \
+  bvar/detail/bvar_sampler.o
+	@echo "[COMAKE:BUILD][Target:'libbvar.a']"
+	ar crs libbvar.a bvar/bvar_collector.o \
+  bvar/bvar_default_variables.o \
+  bvar/bvar_gflag.o \
+  bvar/bvar_latency_recorder.o \
+  bvar/bvar_variable.o \
+  bvar/detail/bvar_percentile.o \
+  bvar/detail/bvar_sampler.o
+	mkdir -p ./output/lib
+	cp -f libbvar.a ./output/lib
+
+libbthread.a:bthread/bthread_bthread.o \
+  bthread/bthread_butex.o \
+  bthread/bthread_cond.o \
+  bthread/bthread_context.o \
+  bthread/bthread_countdown_event.o \
+  bthread/bthread_errno.o \
+  bthread/bthread_execution_queue.o \
+  bthread/bthread_fd.o \
+  bthread/bthread_id.o \
+  bthread/bthread_interrupt_pthread.o \
+  bthread/bthread_key.o \
+  bthread/bthread_mutex.o \
+  bthread/bthread_stack.o \
+  bthread/bthread_sys_futex.o \
+  bthread/bthread_task_control.o \
+  bthread/bthread_task_group.o \
+  bthread/bthread_timer_thread.o
+	@echo "[COMAKE:BUILD][Target:'libbthread.a']"
+	ar crs libbthread.a bthread/bthread_bthread.o \
+  bthread/bthread_butex.o \
+  bthread/bthread_cond.o \
+  bthread/bthread_context.o \
+  bthread/bthread_countdown_event.o \
+  bthread/bthread_errno.o \
+  bthread/bthread_execution_queue.o \
+  bthread/bthread_fd.o \
+  bthread/bthread_id.o \
+  bthread/bthread_interrupt_pthread.o \
+  bthread/bthread_key.o \
+  bthread/bthread_mutex.o \
+  bthread/bthread_stack.o \
+  bthread/bthread_sys_futex.o \
+  bthread/bthread_task_control.o \
+  bthread/bthread_task_group.o \
+  bthread/bthread_timer_thread.o
+	mkdir -p ./output/lib
+	cp -f libbthread.a ./output/lib
+
+libjson2pb.a:json2pb/json2pb_encode_decode.o \
+  json2pb/json2pb_json_to_pb.o \
+  json2pb/json2pb_pb_to_json.o \
+  json2pb/json2pb_protobuf_map.o \
+  json2pb/json2pb_string_printf.o
+	@echo "[COMAKE:BUILD][Target:'libjson2pb.a']"
+	ar crs libjson2pb.a json2pb/json2pb_encode_decode.o \
+  json2pb/json2pb_json_to_pb.o \
+  json2pb/json2pb_pb_to_json.o \
+  json2pb/json2pb_protobuf_map.o \
+  json2pb/json2pb_string_printf.o
+	mkdir -p ./output/lib
+	cp -f libjson2pb.a ./output/lib
+
+libmcpack2pb.a:mcpack2pb/mcpack2pb_field_type.o \
+  mcpack2pb/mcpack2pb_mcpack2pb.o \
+  mcpack2pb/mcpack2pb_parser.o \
+  mcpack2pb/mcpack2pb_serializer.o \
+  mcpack2pb_idl_options.pb.o
+	@echo "[COMAKE:BUILD][Target:'libmcpack2pb.a']"
+	ar crs libmcpack2pb.a mcpack2pb/mcpack2pb_field_type.o \
+  mcpack2pb/mcpack2pb_mcpack2pb.o \
+  mcpack2pb/mcpack2pb_parser.o \
+  mcpack2pb/mcpack2pb_serializer.o \
+  mcpack2pb_idl_options.pb.o
+	mkdir -p ./output/lib
+	cp -f libmcpack2pb.a ./output/lib
+
+protoc-gen-mcpack:mcpack2pb/protoc-gen-mcpack_generator.o \
+  libmcpack2pb.a \
+  libbase.a \
+  libbthread.a \
+  libbvar.a
+	@echo "[COMAKE:BUILD][Target:'protoc-gen-mcpack']"
+	$(CXX) mcpack2pb/protoc-gen-mcpack_generator.o -Xlinker "-(" libmcpack2pb.a \
+  libbase.a \
+  libbthread.a \
+  libbvar.a ../../../third-64/gflags/lib/libgflags.a \
+  ../../../third-64/gflags/lib/libgflags_nothreads.a \
+  ../../../third-64/leveldb/lib/libleveldb.a \
+  ../../../third-64/protobuf/lib/libprotobuf-lite.a \
+  ../../../third-64/protobuf/lib/libprotobuf.a \
+  ../../../third-64/protobuf/lib/libprotoc.a -lpthread \
+  -lrt \
+  -lssl \
+  -lcrypto \
+  -ldl \
+  -lz -Xlinker "-)" -o protoc-gen-mcpack
+	mkdir -p ./output/bin
+	cp -f protoc-gen-mcpack ./output/bin
+
+libbrpc.a:brpc/brpc_acceptor.o \
+  brpc/brpc_adaptive_connection_type.o \
+  brpc/brpc_amf.o \
+  brpc/brpc_bad_method_service.o \
+  brpc/brpc_channel.o \
+  brpc/brpc_compress.o \
+  brpc/brpc_controller.o \
+  brpc/brpc_esp_message.o \
+  brpc/brpc_event_dispatcher.o \
+  brpc/brpc_global.o \
+  brpc/brpc_http_header.o \
+  brpc/brpc_http_method.o \
+  brpc/brpc_http_status_code.o \
+  brpc/brpc_input_messenger.o \
+  brpc/brpc_load_balancer.o \
+  brpc/brpc_load_balancer_with_naming.o \
+  brpc/brpc_memcache.o \
+  brpc/brpc_naming_service_thread.o \
+  brpc/brpc_nshead_message.o \
+  brpc/brpc_nshead_pb_service_adaptor.o \
+  brpc/brpc_nshead_service.o \
+  brpc/brpc_parallel_channel.o \
+  brpc/brpc_partition_channel.o \
+  brpc/brpc_periodic_naming_service.o \
+  brpc/brpc_progressive_attachment.o \
+  brpc/brpc_protocol.o \
+  brpc/brpc_redis.o \
+  brpc/brpc_redis_command.o \
+  brpc/brpc_redis_reply.o \
+  brpc/brpc_reloadable_flags.o \
+  brpc/brpc_restful.o \
+  brpc/brpc_retry_policy.o \
+  brpc/brpc_rpc_dump.o \
+  brpc/brpc_rtmp.o \
+  brpc/brpc_selective_channel.o \
+  brpc/brpc_serialized_request.o \
+  brpc/brpc_server.o \
+  brpc/brpc_server_id.o \
+  brpc/brpc_socket.o \
+  brpc/brpc_socket_map.o \
+  brpc/brpc_span.o \
+  brpc/brpc_stream.o \
+  brpc/brpc_tcmalloc_extension.o \
+  brpc/brpc_trackme.o \
+  brpc/brpc_ts.o \
+  brpc/brpc_uri.o \
+  brpc/policy/brpc_baidu_rpc_protocol.o \
+  brpc/policy/brpc_consistent_hashing_load_balancer.o \
+  brpc/policy/brpc_dh.o \
+  brpc/policy/brpc_domain_naming_service.o \
+  brpc/policy/brpc_dynpart_load_balancer.o \
+  brpc/policy/brpc_esp_authenticator.o \
+  brpc/policy/brpc_esp_protocol.o \
+  brpc/policy/brpc_file_naming_service.o \
+  brpc/policy/brpc_gzip_compress.o \
+  brpc/policy/brpc_hasher.o \
+  brpc/policy/brpc_http_rpc_protocol.o \
+  brpc/policy/brpc_hulu_pbrpc_protocol.o \
+  brpc/policy/brpc_list_naming_service.o \
+  brpc/policy/brpc_locality_aware_load_balancer.o \
+  brpc/policy/brpc_memcache_binary_protocol.o \
+  brpc/policy/brpc_mongo_protocol.o \
+  brpc/policy/brpc_nova_pbrpc_protocol.o \
+  brpc/policy/brpc_nshead_mcpack_protocol.o \
+  brpc/policy/brpc_nshead_protocol.o \
+  brpc/policy/brpc_public_pbrpc_protocol.o \
+  brpc/policy/brpc_randomized_load_balancer.o \
+  brpc/policy/brpc_redis_protocol.o \
+  brpc/policy/brpc_remote_file_naming_service.o \
+  brpc/policy/brpc_round_robin_load_balancer.o \
+  brpc/policy/brpc_rtmp_protocol.o \
+  brpc/policy/brpc_snappy_compress.o \
+  brpc/policy/brpc_sofa_pbrpc_protocol.o \
+  brpc/policy/brpc_streaming_rpc_protocol.o \
+  brpc/policy/brpc_ubrpc2pb_protocol.o \
+  brpc/builtin/brpc_bthreads_service.o \
+  brpc/builtin/brpc_common.o \
+  brpc/builtin/brpc_connections_service.o \
+  brpc/builtin/brpc_dir_service.o \
+  brpc/builtin/brpc_flags_service.o \
+  brpc/builtin/brpc_flot_min_js.o \
+  brpc/builtin/brpc_get_favicon_service.o \
+  brpc/builtin/brpc_get_js_service.o \
+  brpc/builtin/brpc_health_service.o \
+  brpc/builtin/brpc_hotspots_service.o \
+  brpc/builtin/brpc_ids_service.o \
+  brpc/builtin/brpc_index_service.o \
+  brpc/builtin/brpc_jquery_min_js.o \
+  brpc/builtin/brpc_list_service.o \
+  brpc/builtin/brpc_pprof_perl.o \
+  brpc/builtin/brpc_pprof_service.o \
+  brpc/builtin/brpc_protobufs_service.o \
+  brpc/builtin/brpc_rpcz_service.o \
+  brpc/builtin/brpc_sockets_service.o \
+  brpc/builtin/brpc_sorttable_js.o \
+  brpc/builtin/brpc_status_service.o \
+  brpc/builtin/brpc_threads_service.o \
+  brpc/builtin/brpc_vars_service.o \
+  brpc/builtin/brpc_version_service.o \
+  brpc/builtin/brpc_viz_min_js.o \
+  brpc/builtin/brpc_vlog_service.o \
+  brpc/details/brpc_has_epollrdhup.o \
+  brpc/details/brpc_hpack.o \
+  brpc/details/brpc_http_message.o \
+  brpc/details/brpc_http_message_serializer.o \
+  brpc/details/brpc_http_parser.o \
+  brpc/details/brpc_method_status.o \
+  brpc/details/brpc_rtmp_utils.o \
+  brpc/details/brpc_ssl_helper.o \
+  brpc/details/brpc_usercode_backup_pool.o \
+  brpc/brpc_builtin_service.pb.o \
+  brpc/brpc_errno.pb.o \
+  brpc/brpc_get_favicon.pb.o \
+  brpc/brpc_get_js.pb.o \
+  brpc/brpc_nshead_meta.pb.o \
+  brpc/brpc_options.pb.o \
+  brpc/brpc_rpc_dump.pb.o \
+  brpc/brpc_rtmp.pb.o \
+  brpc/brpc_span.pb.o \
+  brpc/brpc_streaming_rpc_meta.pb.o \
+  brpc/brpc_trackme.pb.o \
+  brpc/policy/brpc_baidu_rpc_meta.pb.o \
+  brpc/policy/brpc_hulu_pbrpc_meta.pb.o \
+  brpc/policy/brpc_mongo.pb.o \
+  brpc/policy/brpc_public_pbrpc_meta.pb.o \
+  brpc/policy/brpc_sofa_pbrpc_meta.pb.o
+	@echo "[COMAKE:BUILD][Target:'libbrpc.a']"
+	ar crs libbrpc.a brpc/brpc_acceptor.o \
+  brpc/brpc_adaptive_connection_type.o \
+  brpc/brpc_amf.o \
+  brpc/brpc_bad_method_service.o \
+  brpc/brpc_channel.o \
+  brpc/brpc_compress.o \
+  brpc/brpc_controller.o \
+  brpc/brpc_esp_message.o \
+  brpc/brpc_event_dispatcher.o \
+  brpc/brpc_global.o \
+  brpc/brpc_http_header.o \
+  brpc/brpc_http_method.o \
+  brpc/brpc_http_status_code.o \
+  brpc/brpc_input_messenger.o \
+  brpc/brpc_load_balancer.o \
+  brpc/brpc_load_balancer_with_naming.o \
+  brpc/brpc_memcache.o \
+  brpc/brpc_naming_service_thread.o \
+  brpc/brpc_nshead_message.o \
+  brpc/brpc_nshead_pb_service_adaptor.o \
+  brpc/brpc_nshead_service.o \
+  brpc/brpc_parallel_channel.o \
+  brpc/brpc_partition_channel.o \
+  brpc/brpc_periodic_naming_service.o \
+  brpc/brpc_progressive_attachment.o \
+  brpc/brpc_protocol.o \
+  brpc/brpc_redis.o \
+  brpc/brpc_redis_command.o \
+  brpc/brpc_redis_reply.o \
+  brpc/brpc_reloadable_flags.o \
+  brpc/brpc_restful.o \
+  brpc/brpc_retry_policy.o \
+  brpc/brpc_rpc_dump.o \
+  brpc/brpc_rtmp.o \
+  brpc/brpc_selective_channel.o \
+  brpc/brpc_serialized_request.o \
+  brpc/brpc_server.o \
+  brpc/brpc_server_id.o \
+  brpc/brpc_socket.o \
+  brpc/brpc_socket_map.o \
+  brpc/brpc_span.o \
+  brpc/brpc_stream.o \
+  brpc/brpc_tcmalloc_extension.o \
+  brpc/brpc_trackme.o \
+  brpc/brpc_ts.o \
+  brpc/brpc_uri.o \
+  brpc/policy/brpc_baidu_rpc_protocol.o \
+  brpc/policy/brpc_consistent_hashing_load_balancer.o \
+  brpc/policy/brpc_dh.o \
+  brpc/policy/brpc_domain_naming_service.o \
+  brpc/policy/brpc_dynpart_load_balancer.o \
+  brpc/policy/brpc_esp_authenticator.o \
+  brpc/policy/brpc_esp_protocol.o \
+  brpc/policy/brpc_file_naming_service.o \
+  brpc/policy/brpc_gzip_compress.o \
+  brpc/policy/brpc_hasher.o \
+  brpc/policy/brpc_http_rpc_protocol.o \
+  brpc/policy/brpc_hulu_pbrpc_protocol.o \
+  brpc/policy/brpc_list_naming_service.o \
+  brpc/policy/brpc_locality_aware_load_balancer.o \
+  brpc/policy/brpc_memcache_binary_protocol.o \
+  brpc/policy/brpc_mongo_protocol.o \
+  brpc/policy/brpc_nova_pbrpc_protocol.o \
+  brpc/policy/brpc_nshead_mcpack_protocol.o \
+  brpc/policy/brpc_nshead_protocol.o \
+  brpc/policy/brpc_public_pbrpc_protocol.o \
+  brpc/policy/brpc_randomized_load_balancer.o \
+  brpc/policy/brpc_redis_protocol.o \
+  brpc/policy/brpc_remote_file_naming_service.o \
+  brpc/policy/brpc_round_robin_load_balancer.o \
+  brpc/policy/brpc_rtmp_protocol.o \
+  brpc/policy/brpc_snappy_compress.o \
+  brpc/policy/brpc_sofa_pbrpc_protocol.o \
+  brpc/policy/brpc_streaming_rpc_protocol.o \
+  brpc/policy/brpc_ubrpc2pb_protocol.o \
+  brpc/builtin/brpc_bthreads_service.o \
+  brpc/builtin/brpc_common.o \
+  brpc/builtin/brpc_connections_service.o \
+  brpc/builtin/brpc_dir_service.o \
+  brpc/builtin/brpc_flags_service.o \
+  brpc/builtin/brpc_flot_min_js.o \
+  brpc/builtin/brpc_get_favicon_service.o \
+  brpc/builtin/brpc_get_js_service.o \
+  brpc/builtin/brpc_health_service.o \
+  brpc/builtin/brpc_hotspots_service.o \
+  brpc/builtin/brpc_ids_service.o \
+  brpc/builtin/brpc_index_service.o \
+  brpc/builtin/brpc_jquery_min_js.o \
+  brpc/builtin/brpc_list_service.o \
+  brpc/builtin/brpc_pprof_perl.o \
+  brpc/builtin/brpc_pprof_service.o \
+  brpc/builtin/brpc_protobufs_service.o \
+  brpc/builtin/brpc_rpcz_service.o \
+  brpc/builtin/brpc_sockets_service.o \
+  brpc/builtin/brpc_sorttable_js.o \
+  brpc/builtin/brpc_status_service.o \
+  brpc/builtin/brpc_threads_service.o \
+  brpc/builtin/brpc_vars_service.o \
+  brpc/builtin/brpc_version_service.o \
+  brpc/builtin/brpc_viz_min_js.o \
+  brpc/builtin/brpc_vlog_service.o \
+  brpc/details/brpc_has_epollrdhup.o \
+  brpc/details/brpc_hpack.o \
+  brpc/details/brpc_http_message.o \
+  brpc/details/brpc_http_message_serializer.o \
+  brpc/details/brpc_http_parser.o \
+  brpc/details/brpc_method_status.o \
+  brpc/details/brpc_rtmp_utils.o \
+  brpc/details/brpc_ssl_helper.o \
+  brpc/details/brpc_usercode_backup_pool.o \
+  brpc/brpc_builtin_service.pb.o \
+  brpc/brpc_errno.pb.o \
+  brpc/brpc_get_favicon.pb.o \
+  brpc/brpc_get_js.pb.o \
+  brpc/brpc_nshead_meta.pb.o \
+  brpc/brpc_options.pb.o \
+  brpc/brpc_rpc_dump.pb.o \
+  brpc/brpc_rtmp.pb.o \
+  brpc/brpc_span.pb.o \
+  brpc/brpc_streaming_rpc_meta.pb.o \
+  brpc/brpc_trackme.pb.o \
+  brpc/policy/brpc_baidu_rpc_meta.pb.o \
+  brpc/policy/brpc_hulu_pbrpc_meta.pb.o \
+  brpc/policy/brpc_mongo.pb.o \
+  brpc/policy/brpc_public_pbrpc_meta.pb.o \
+  brpc/policy/brpc_sofa_pbrpc_meta.pb.o
+	mkdir -p ./output/lib
+	cp -f libbrpc.a ./output/lib
+
+.PHONY:output/include
+output/include:libbase.a \
+  libbvar.a \
+  libbthread.a \
+  libjson2pb.a \
+  libmcpack2pb.a \
+  libbrpc.a
+	@echo "[COMAKE:BUILD][Target:'output/include']"
+	for dir in `find base bvar bthread brpc -type f -name "*.h" -exec dirname {} \; | sort | uniq`; do mkdir -p output/include/$$dir && cp $$dir/*.h output/include/$$dir/; done; for dir in `find base bvar bthread brpc -type f -name "*.hpp" -exec dirname {} \; | sort | uniq`; do mkdir -p output/include/$$dir && cp $$dir/*.hpp output/include/$$dir/; done
+
+base/third_party/dmg_fp/base_g_fmt.o:base/third_party/dmg_fp/g_fmt.cc
+	@echo "[COMAKE:BUILD][Target:'base/third_party/dmg_fp/base_g_fmt.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/third_party/dmg_fp/base_g_fmt.o base/third_party/dmg_fp/g_fmt.cc
+
+base/third_party/dmg_fp/base_dtoa_wrapper.o:base/third_party/dmg_fp/dtoa_wrapper.cc
+	@echo "[COMAKE:BUILD][Target:'base/third_party/dmg_fp/base_dtoa_wrapper.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/third_party/dmg_fp/base_dtoa_wrapper.o base/third_party/dmg_fp/dtoa_wrapper.cc
+
+base/third_party/dmg_fp/base_dtoa.o:base/third_party/dmg_fp/dtoa.cc
+	@echo "[COMAKE:BUILD][Target:'base/third_party/dmg_fp/base_dtoa.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/third_party/dmg_fp/base_dtoa.o base/third_party/dmg_fp/dtoa.cc
+
+base/third_party/dynamic_annotations/base_dynamic_annotations.o:base/third_party/dynamic_annotations/dynamic_annotations.c
+	@echo "[COMAKE:BUILD][Target:'base/third_party/dynamic_annotations/base_dynamic_annotations.o']"
+	$(CC) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CFLAGS)  -o base/third_party/dynamic_annotations/base_dynamic_annotations.o base/third_party/dynamic_annotations/dynamic_annotations.c
+
+base/third_party/icu/base_icu_utf.o:base/third_party/icu/icu_utf.cc
+	@echo "[COMAKE:BUILD][Target:'base/third_party/icu/base_icu_utf.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/third_party/icu/base_icu_utf.o base/third_party/icu/icu_utf.cc
+
+base/third_party/superfasthash/base_superfasthash.o:base/third_party/superfasthash/superfasthash.c
+	@echo "[COMAKE:BUILD][Target:'base/third_party/superfasthash/base_superfasthash.o']"
+	$(CC) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CFLAGS)  -o base/third_party/superfasthash/base_superfasthash.o base/third_party/superfasthash/superfasthash.c
+
+base/third_party/modp_b64/base_modp_b64.o:base/third_party/modp_b64/modp_b64.cc
+	@echo "[COMAKE:BUILD][Target:'base/third_party/modp_b64/base_modp_b64.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/third_party/modp_b64/base_modp_b64.o base/third_party/modp_b64/modp_b64.cc
+
+base/third_party/nspr/base_prtime.o:base/third_party/nspr/prtime.cc
+	@echo "[COMAKE:BUILD][Target:'base/third_party/nspr/base_prtime.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/third_party/nspr/base_prtime.o base/third_party/nspr/prtime.cc
+
+base/third_party/symbolize/base_demangle.o:base/third_party/symbolize/demangle.cc
+	@echo "[COMAKE:BUILD][Target:'base/third_party/symbolize/base_demangle.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/third_party/symbolize/base_demangle.o base/third_party/symbolize/demangle.cc
+
+base/third_party/symbolize/base_symbolize.o:base/third_party/symbolize/symbolize.cc
+	@echo "[COMAKE:BUILD][Target:'base/third_party/symbolize/base_symbolize.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/third_party/symbolize/base_symbolize.o base/third_party/symbolize/symbolize.cc
+
+base/third_party/xdg_mime/base_xdgmime.o:base/third_party/xdg_mime/xdgmime.c
+	@echo "[COMAKE:BUILD][Target:'base/third_party/xdg_mime/base_xdgmime.o']"
+	$(CC) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CFLAGS)  -o base/third_party/xdg_mime/base_xdgmime.o base/third_party/xdg_mime/xdgmime.c
+
+base/third_party/xdg_mime/base_xdgmimealias.o:base/third_party/xdg_mime/xdgmimealias.c
+	@echo "[COMAKE:BUILD][Target:'base/third_party/xdg_mime/base_xdgmimealias.o']"
+	$(CC) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CFLAGS)  -o base/third_party/xdg_mime/base_xdgmimealias.o base/third_party/xdg_mime/xdgmimealias.c
+
+base/third_party/xdg_mime/base_xdgmimecache.o:base/third_party/xdg_mime/xdgmimecache.c
+	@echo "[COMAKE:BUILD][Target:'base/third_party/xdg_mime/base_xdgmimecache.o']"
+	$(CC) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CFLAGS)  -o base/third_party/xdg_mime/base_xdgmimecache.o base/third_party/xdg_mime/xdgmimecache.c
+
+base/third_party/xdg_mime/base_xdgmimeglob.o:base/third_party/xdg_mime/xdgmimeglob.c
+	@echo "[COMAKE:BUILD][Target:'base/third_party/xdg_mime/base_xdgmimeglob.o']"
+	$(CC) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CFLAGS)  -o base/third_party/xdg_mime/base_xdgmimeglob.o base/third_party/xdg_mime/xdgmimeglob.c
+
+base/third_party/xdg_mime/base_xdgmimeicon.o:base/third_party/xdg_mime/xdgmimeicon.c
+	@echo "[COMAKE:BUILD][Target:'base/third_party/xdg_mime/base_xdgmimeicon.o']"
+	$(CC) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CFLAGS)  -o base/third_party/xdg_mime/base_xdgmimeicon.o base/third_party/xdg_mime/xdgmimeicon.c
+
+base/third_party/xdg_mime/base_xdgmimeint.o:base/third_party/xdg_mime/xdgmimeint.c
+	@echo "[COMAKE:BUILD][Target:'base/third_party/xdg_mime/base_xdgmimeint.o']"
+	$(CC) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CFLAGS)  -o base/third_party/xdg_mime/base_xdgmimeint.o base/third_party/xdg_mime/xdgmimeint.c
+
+base/third_party/xdg_mime/base_xdgmimemagic.o:base/third_party/xdg_mime/xdgmimemagic.c
+	@echo "[COMAKE:BUILD][Target:'base/third_party/xdg_mime/base_xdgmimemagic.o']"
+	$(CC) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CFLAGS)  -o base/third_party/xdg_mime/base_xdgmimemagic.o base/third_party/xdg_mime/xdgmimemagic.c
+
+base/third_party/xdg_mime/base_xdgmimeparent.o:base/third_party/xdg_mime/xdgmimeparent.c
+	@echo "[COMAKE:BUILD][Target:'base/third_party/xdg_mime/base_xdgmimeparent.o']"
+	$(CC) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CFLAGS)  -o base/third_party/xdg_mime/base_xdgmimeparent.o base/third_party/xdg_mime/xdgmimeparent.c
+
+base/third_party/xdg_user_dirs/base_xdg_user_dir_lookup.o:base/third_party/xdg_user_dirs/xdg_user_dir_lookup.cc
+	@echo "[COMAKE:BUILD][Target:'base/third_party/xdg_user_dirs/base_xdg_user_dir_lookup.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/third_party/xdg_user_dirs/base_xdg_user_dir_lookup.o base/third_party/xdg_user_dirs/xdg_user_dir_lookup.cc
+
+base/third_party/snappy/base_snappy-sinksource.o:base/third_party/snappy/snappy-sinksource.cc
+	@echo "[COMAKE:BUILD][Target:'base/third_party/snappy/base_snappy-sinksource.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/third_party/snappy/base_snappy-sinksource.o base/third_party/snappy/snappy-sinksource.cc
+
+base/third_party/snappy/base_snappy-stubs-internal.o:base/third_party/snappy/snappy-stubs-internal.cc
+	@echo "[COMAKE:BUILD][Target:'base/third_party/snappy/base_snappy-stubs-internal.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/third_party/snappy/base_snappy-stubs-internal.o base/third_party/snappy/snappy-stubs-internal.cc
+
+base/third_party/snappy/base_snappy.o:base/third_party/snappy/snappy.cc
+	@echo "[COMAKE:BUILD][Target:'base/third_party/snappy/base_snappy.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/third_party/snappy/base_snappy.o base/third_party/snappy/snappy.cc
+
+base/third_party/murmurhash3/base_murmurhash3.o:base/third_party/murmurhash3/murmurhash3.cpp
+	@echo "[COMAKE:BUILD][Target:'base/third_party/murmurhash3/base_murmurhash3.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/third_party/murmurhash3/base_murmurhash3.o base/third_party/murmurhash3/murmurhash3.cpp
+
+base/allocator/base_type_profiler_control.o:base/allocator/type_profiler_control.cc
+	@echo "[COMAKE:BUILD][Target:'base/allocator/base_type_profiler_control.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/allocator/base_type_profiler_control.o base/allocator/type_profiler_control.cc
+
+base/base_arena.o:base/arena.cpp
+	@echo "[COMAKE:BUILD][Target:'base/base_arena.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_arena.o base/arena.cpp
+
+base/base_at_exit.o:base/at_exit.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_at_exit.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_at_exit.o base/at_exit.cc
+
+base/base_atomicops_internals_x86_gcc.o:base/atomicops_internals_x86_gcc.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_atomicops_internals_x86_gcc.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_atomicops_internals_x86_gcc.o base/atomicops_internals_x86_gcc.cc
+
+base/base_barrier_closure.o:base/barrier_closure.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_barrier_closure.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_barrier_closure.o base/barrier_closure.cc
+
+base/base_base_paths.o:base/base_paths.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_base_paths.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_base_paths.o base/base_paths.cc
+
+base/base_base_paths_posix.o:base/base_paths_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_base_paths_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_base_paths_posix.o base/base_paths_posix.cc
+
+base/base_base64.o:base/base64.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_base64.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_base64.o base/base64.cc
+
+base/base_base_switches.o:base/base_switches.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_base_switches.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_base_switches.o base/base_switches.cc
+
+base/base_big_endian.o:base/big_endian.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_big_endian.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_big_endian.o base/big_endian.cc
+
+base/base_bind_helpers.o:base/bind_helpers.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_bind_helpers.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_bind_helpers.o base/bind_helpers.cc
+
+base/base_build_time.o:base/build_time.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_build_time.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_build_time.o base/build_time.cc
+
+base/base_callback_helpers.o:base/callback_helpers.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_callback_helpers.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_callback_helpers.o base/callback_helpers.cc
+
+base/base_callback_internal.o:base/callback_internal.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_callback_internal.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_callback_internal.o base/callback_internal.cc
+
+base/base_command_line.o:base/command_line.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_command_line.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_command_line.o base/command_line.cc
+
+base/base_cpu.o:base/cpu.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_cpu.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_cpu.o base/cpu.cc
+
+base/debug/base_alias.o:base/debug/alias.cc
+	@echo "[COMAKE:BUILD][Target:'base/debug/base_alias.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/debug/base_alias.o base/debug/alias.cc
+
+base/debug/base_asan_invalid_access.o:base/debug/asan_invalid_access.cc
+	@echo "[COMAKE:BUILD][Target:'base/debug/base_asan_invalid_access.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/debug/base_asan_invalid_access.o base/debug/asan_invalid_access.cc
+
+base/debug/base_crash_logging.o:base/debug/crash_logging.cc
+	@echo "[COMAKE:BUILD][Target:'base/debug/base_crash_logging.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/debug/base_crash_logging.o base/debug/crash_logging.cc
+
+base/debug/base_debugger.o:base/debug/debugger.cc
+	@echo "[COMAKE:BUILD][Target:'base/debug/base_debugger.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/debug/base_debugger.o base/debug/debugger.cc
+
+base/debug/base_debugger_posix.o:base/debug/debugger_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/debug/base_debugger_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/debug/base_debugger_posix.o base/debug/debugger_posix.cc
+
+base/debug/base_dump_without_crashing.o:base/debug/dump_without_crashing.cc
+	@echo "[COMAKE:BUILD][Target:'base/debug/base_dump_without_crashing.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/debug/base_dump_without_crashing.o base/debug/dump_without_crashing.cc
+
+base/debug/base_proc_maps_linux.o:base/debug/proc_maps_linux.cc
+	@echo "[COMAKE:BUILD][Target:'base/debug/base_proc_maps_linux.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/debug/base_proc_maps_linux.o base/debug/proc_maps_linux.cc
+
+base/debug/base_stack_trace.o:base/debug/stack_trace.cc
+	@echo "[COMAKE:BUILD][Target:'base/debug/base_stack_trace.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/debug/base_stack_trace.o base/debug/stack_trace.cc
+
+base/debug/base_stack_trace_posix.o:base/debug/stack_trace_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/debug/base_stack_trace_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/debug/base_stack_trace_posix.o base/debug/stack_trace_posix.cc
+
+base/base_environment.o:base/environment.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_environment.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_environment.o base/environment.cc
+
+base/files/base_file.o:base/files/file.cc
+	@echo "[COMAKE:BUILD][Target:'base/files/base_file.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/files/base_file.o base/files/file.cc
+
+base/files/base_file_posix.o:base/files/file_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/files/base_file_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/files/base_file_posix.o base/files/file_posix.cc
+
+base/files/base_file_enumerator.o:base/files/file_enumerator.cc
+	@echo "[COMAKE:BUILD][Target:'base/files/base_file_enumerator.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/files/base_file_enumerator.o base/files/file_enumerator.cc
+
+base/files/base_file_enumerator_posix.o:base/files/file_enumerator_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/files/base_file_enumerator_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/files/base_file_enumerator_posix.o base/files/file_enumerator_posix.cc
+
+base/files/base_file_path.o:base/files/file_path.cc
+	@echo "[COMAKE:BUILD][Target:'base/files/base_file_path.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/files/base_file_path.o base/files/file_path.cc
+
+base/files/base_file_path_constants.o:base/files/file_path_constants.cc
+	@echo "[COMAKE:BUILD][Target:'base/files/base_file_path_constants.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/files/base_file_path_constants.o base/files/file_path_constants.cc
+
+base/files/base_memory_mapped_file.o:base/files/memory_mapped_file.cc
+	@echo "[COMAKE:BUILD][Target:'base/files/base_memory_mapped_file.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/files/base_memory_mapped_file.o base/files/memory_mapped_file.cc
+
+base/files/base_memory_mapped_file_posix.o:base/files/memory_mapped_file_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/files/base_memory_mapped_file_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/files/base_memory_mapped_file_posix.o base/files/memory_mapped_file_posix.cc
+
+base/files/base_scoped_file.o:base/files/scoped_file.cc
+	@echo "[COMAKE:BUILD][Target:'base/files/base_scoped_file.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/files/base_scoped_file.o base/files/scoped_file.cc
+
+base/files/base_scoped_temp_dir.o:base/files/scoped_temp_dir.cc
+	@echo "[COMAKE:BUILD][Target:'base/files/base_scoped_temp_dir.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/files/base_scoped_temp_dir.o base/files/scoped_temp_dir.cc
+
+base/base_file_util.o:base/file_util.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_file_util.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_file_util.o base/file_util.cc
+
+base/base_file_util_linux.o:base/file_util_linux.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_file_util_linux.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_file_util_linux.o base/file_util_linux.cc
+
+base/base_file_util_posix.o:base/file_util_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_file_util_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_file_util_posix.o base/file_util_posix.cc
+
+base/base_guid.o:base/guid.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_guid.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_guid.o base/guid.cc
+
+base/base_guid_posix.o:base/guid_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_guid_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_guid_posix.o base/guid_posix.cc
+
+base/base_hash.o:base/hash.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_hash.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_hash.o base/hash.cc
+
+base/base_lazy_instance.o:base/lazy_instance.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_lazy_instance.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_lazy_instance.o base/lazy_instance.cc
+
+base/base_location.o:base/location.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_location.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_location.o base/location.cc
+
+base/base_md5.o:base/md5.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_md5.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_md5.o base/md5.cc
+
+base/memory/base_aligned_memory.o:base/memory/aligned_memory.cc
+	@echo "[COMAKE:BUILD][Target:'base/memory/base_aligned_memory.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/memory/base_aligned_memory.o base/memory/aligned_memory.cc
+
+base/memory/base_ref_counted.o:base/memory/ref_counted.cc
+	@echo "[COMAKE:BUILD][Target:'base/memory/base_ref_counted.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/memory/base_ref_counted.o base/memory/ref_counted.cc
+
+base/memory/base_ref_counted_memory.o:base/memory/ref_counted_memory.cc
+	@echo "[COMAKE:BUILD][Target:'base/memory/base_ref_counted_memory.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/memory/base_ref_counted_memory.o base/memory/ref_counted_memory.cc
+
+base/memory/base_shared_memory_posix.o:base/memory/shared_memory_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/memory/base_shared_memory_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/memory/base_shared_memory_posix.o base/memory/shared_memory_posix.cc
+
+base/memory/base_singleton.o:base/memory/singleton.cc
+	@echo "[COMAKE:BUILD][Target:'base/memory/base_singleton.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/memory/base_singleton.o base/memory/singleton.cc
+
+base/memory/base_weak_ptr.o:base/memory/weak_ptr.cc
+	@echo "[COMAKE:BUILD][Target:'base/memory/base_weak_ptr.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/memory/base_weak_ptr.o base/memory/weak_ptr.cc
+
+base/nix/base_mime_util_xdg.o:base/nix/mime_util_xdg.cc
+	@echo "[COMAKE:BUILD][Target:'base/nix/base_mime_util_xdg.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/nix/base_mime_util_xdg.o base/nix/mime_util_xdg.cc
+
+base/nix/base_xdg_util.o:base/nix/xdg_util.cc
+	@echo "[COMAKE:BUILD][Target:'base/nix/base_xdg_util.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/nix/base_xdg_util.o base/nix/xdg_util.cc
+
+base/base_path_service.o:base/path_service.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_path_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_path_service.o base/path_service.cc
+
+base/posix/base_file_descriptor_shuffle.o:base/posix/file_descriptor_shuffle.cc
+	@echo "[COMAKE:BUILD][Target:'base/posix/base_file_descriptor_shuffle.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/posix/base_file_descriptor_shuffle.o base/posix/file_descriptor_shuffle.cc
+
+base/posix/base_global_descriptors.o:base/posix/global_descriptors.cc
+	@echo "[COMAKE:BUILD][Target:'base/posix/base_global_descriptors.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/posix/base_global_descriptors.o base/posix/global_descriptors.cc
+
+base/process/base_internal_linux.o:base/process/internal_linux.cc
+	@echo "[COMAKE:BUILD][Target:'base/process/base_internal_linux.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/process/base_internal_linux.o base/process/internal_linux.cc
+
+base/process/base_kill.o:base/process/kill.cc
+	@echo "[COMAKE:BUILD][Target:'base/process/base_kill.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/process/base_kill.o base/process/kill.cc
+
+base/process/base_kill_posix.o:base/process/kill_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/process/base_kill_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/process/base_kill_posix.o base/process/kill_posix.cc
+
+base/process/base_launch.o:base/process/launch.cc
+	@echo "[COMAKE:BUILD][Target:'base/process/base_launch.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/process/base_launch.o base/process/launch.cc
+
+base/process/base_launch_posix.o:base/process/launch_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/process/base_launch_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/process/base_launch_posix.o base/process/launch_posix.cc
+
+base/process/base_memory.o:base/process/memory.cc
+	@echo "[COMAKE:BUILD][Target:'base/process/base_memory.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/process/base_memory.o base/process/memory.cc
+
+base/process/base_memory_linux.o:base/process/memory_linux.cc
+	@echo "[COMAKE:BUILD][Target:'base/process/base_memory_linux.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/process/base_memory_linux.o base/process/memory_linux.cc
+
+base/process/base_process_handle_linux.o:base/process/process_handle_linux.cc
+	@echo "[COMAKE:BUILD][Target:'base/process/base_process_handle_linux.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/process/base_process_handle_linux.o base/process/process_handle_linux.cc
+
+base/process/base_process_handle_posix.o:base/process/process_handle_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/process/base_process_handle_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/process/base_process_handle_posix.o base/process/process_handle_posix.cc
+
+base/process/base_process_info_linux.o:base/process/process_info_linux.cc
+	@echo "[COMAKE:BUILD][Target:'base/process/base_process_info_linux.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/process/base_process_info_linux.o base/process/process_info_linux.cc
+
+base/process/base_process_iterator.o:base/process/process_iterator.cc
+	@echo "[COMAKE:BUILD][Target:'base/process/base_process_iterator.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/process/base_process_iterator.o base/process/process_iterator.cc
+
+base/process/base_process_iterator_linux.o:base/process/process_iterator_linux.cc
+	@echo "[COMAKE:BUILD][Target:'base/process/base_process_iterator_linux.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/process/base_process_iterator_linux.o base/process/process_iterator_linux.cc
+
+base/process/base_process_linux.o:base/process/process_linux.cc
+	@echo "[COMAKE:BUILD][Target:'base/process/base_process_linux.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/process/base_process_linux.o base/process/process_linux.cc
+
+base/process/base_process_metrics.o:base/process/process_metrics.cc
+	@echo "[COMAKE:BUILD][Target:'base/process/base_process_metrics.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/process/base_process_metrics.o base/process/process_metrics.cc
+
+base/process/base_process_metrics_linux.o:base/process/process_metrics_linux.cc
+	@echo "[COMAKE:BUILD][Target:'base/process/base_process_metrics_linux.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/process/base_process_metrics_linux.o base/process/process_metrics_linux.cc
+
+base/process/base_process_metrics_posix.o:base/process/process_metrics_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/process/base_process_metrics_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/process/base_process_metrics_posix.o base/process/process_metrics_posix.cc
+
+base/process/base_process_posix.o:base/process/process_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/process/base_process_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/process/base_process_posix.o base/process/process_posix.cc
+
+base/base_rand_util.o:base/rand_util.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_rand_util.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_rand_util.o base/rand_util.cc
+
+base/base_rand_util_posix.o:base/rand_util_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_rand_util_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_rand_util_posix.o base/rand_util_posix.cc
+
+base/base_fast_rand.o:base/fast_rand.cpp
+	@echo "[COMAKE:BUILD][Target:'base/base_fast_rand.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_fast_rand.o base/fast_rand.cpp
+
+base/base_safe_strerror_posix.o:base/safe_strerror_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_safe_strerror_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_safe_strerror_posix.o base/safe_strerror_posix.cc
+
+base/base_sha1_portable.o:base/sha1_portable.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_sha1_portable.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_sha1_portable.o base/sha1_portable.cc
+
+base/strings/base_latin1_string_conversions.o:base/strings/latin1_string_conversions.cc
+	@echo "[COMAKE:BUILD][Target:'base/strings/base_latin1_string_conversions.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/strings/base_latin1_string_conversions.o base/strings/latin1_string_conversions.cc
+
+base/strings/base_nullable_string16.o:base/strings/nullable_string16.cc
+	@echo "[COMAKE:BUILD][Target:'base/strings/base_nullable_string16.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/strings/base_nullable_string16.o base/strings/nullable_string16.cc
+
+base/strings/base_safe_sprintf.o:base/strings/safe_sprintf.cc
+	@echo "[COMAKE:BUILD][Target:'base/strings/base_safe_sprintf.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/strings/base_safe_sprintf.o base/strings/safe_sprintf.cc
+
+base/strings/base_string16.o:base/strings/string16.cc
+	@echo "[COMAKE:BUILD][Target:'base/strings/base_string16.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/strings/base_string16.o base/strings/string16.cc
+
+base/strings/base_string_number_conversions.o:base/strings/string_number_conversions.cc
+	@echo "[COMAKE:BUILD][Target:'base/strings/base_string_number_conversions.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/strings/base_string_number_conversions.o base/strings/string_number_conversions.cc
+
+base/strings/base_string_split.o:base/strings/string_split.cc
+	@echo "[COMAKE:BUILD][Target:'base/strings/base_string_split.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/strings/base_string_split.o base/strings/string_split.cc
+
+base/strings/base_string_piece.o:base/strings/string_piece.cc
+	@echo "[COMAKE:BUILD][Target:'base/strings/base_string_piece.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/strings/base_string_piece.o base/strings/string_piece.cc
+
+base/strings/base_string_util.o:base/strings/string_util.cc
+	@echo "[COMAKE:BUILD][Target:'base/strings/base_string_util.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/strings/base_string_util.o base/strings/string_util.cc
+
+base/strings/base_string_util_constants.o:base/strings/string_util_constants.cc
+	@echo "[COMAKE:BUILD][Target:'base/strings/base_string_util_constants.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/strings/base_string_util_constants.o base/strings/string_util_constants.cc
+
+base/strings/base_stringprintf.o:base/strings/stringprintf.cc
+	@echo "[COMAKE:BUILD][Target:'base/strings/base_stringprintf.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/strings/base_stringprintf.o base/strings/stringprintf.cc
+
+base/strings/base_sys_string_conversions_posix.o:base/strings/sys_string_conversions_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/strings/base_sys_string_conversions_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/strings/base_sys_string_conversions_posix.o base/strings/sys_string_conversions_posix.cc
+
+base/strings/base_utf_offset_string_conversions.o:base/strings/utf_offset_string_conversions.cc
+	@echo "[COMAKE:BUILD][Target:'base/strings/base_utf_offset_string_conversions.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/strings/base_utf_offset_string_conversions.o base/strings/utf_offset_string_conversions.cc
+
+base/strings/base_utf_string_conversion_utils.o:base/strings/utf_string_conversion_utils.cc
+	@echo "[COMAKE:BUILD][Target:'base/strings/base_utf_string_conversion_utils.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/strings/base_utf_string_conversion_utils.o base/strings/utf_string_conversion_utils.cc
+
+base/strings/base_utf_string_conversions.o:base/strings/utf_string_conversions.cc
+	@echo "[COMAKE:BUILD][Target:'base/strings/base_utf_string_conversions.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/strings/base_utf_string_conversions.o base/strings/utf_string_conversions.cc
+
+base/synchronization/base_cancellation_flag.o:base/synchronization/cancellation_flag.cc
+	@echo "[COMAKE:BUILD][Target:'base/synchronization/base_cancellation_flag.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/synchronization/base_cancellation_flag.o base/synchronization/cancellation_flag.cc
+
+base/synchronization/base_condition_variable_posix.o:base/synchronization/condition_variable_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/synchronization/base_condition_variable_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/synchronization/base_condition_variable_posix.o base/synchronization/condition_variable_posix.cc
+
+base/synchronization/base_waitable_event_posix.o:base/synchronization/waitable_event_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/synchronization/base_waitable_event_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/synchronization/base_waitable_event_posix.o base/synchronization/waitable_event_posix.cc
+
+base/base_sys_info.o:base/sys_info.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_sys_info.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_sys_info.o base/sys_info.cc
+
+base/base_sys_info_linux.o:base/sys_info_linux.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_sys_info_linux.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_sys_info_linux.o base/sys_info_linux.cc
+
+base/base_sys_info_posix.o:base/sys_info_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_sys_info_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_sys_info_posix.o base/sys_info_posix.cc
+
+base/threading/base_non_thread_safe_impl.o:base/threading/non_thread_safe_impl.cc
+	@echo "[COMAKE:BUILD][Target:'base/threading/base_non_thread_safe_impl.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/threading/base_non_thread_safe_impl.o base/threading/non_thread_safe_impl.cc
+
+base/threading/base_platform_thread_linux.o:base/threading/platform_thread_linux.cc
+	@echo "[COMAKE:BUILD][Target:'base/threading/base_platform_thread_linux.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/threading/base_platform_thread_linux.o base/threading/platform_thread_linux.cc
+
+base/threading/base_platform_thread_posix.o:base/threading/platform_thread_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/threading/base_platform_thread_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/threading/base_platform_thread_posix.o base/threading/platform_thread_posix.cc
+
+base/threading/base_simple_thread.o:base/threading/simple_thread.cc
+	@echo "[COMAKE:BUILD][Target:'base/threading/base_simple_thread.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/threading/base_simple_thread.o base/threading/simple_thread.cc
+
+base/threading/base_thread_checker_impl.o:base/threading/thread_checker_impl.cc
+	@echo "[COMAKE:BUILD][Target:'base/threading/base_thread_checker_impl.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/threading/base_thread_checker_impl.o base/threading/thread_checker_impl.cc
+
+base/threading/base_thread_collision_warner.o:base/threading/thread_collision_warner.cc
+	@echo "[COMAKE:BUILD][Target:'base/threading/base_thread_collision_warner.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/threading/base_thread_collision_warner.o base/threading/thread_collision_warner.cc
+
+base/threading/base_thread_id_name_manager.o:base/threading/thread_id_name_manager.cc
+	@echo "[COMAKE:BUILD][Target:'base/threading/base_thread_id_name_manager.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/threading/base_thread_id_name_manager.o base/threading/thread_id_name_manager.cc
+
+base/threading/base_thread_local_posix.o:base/threading/thread_local_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/threading/base_thread_local_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/threading/base_thread_local_posix.o base/threading/thread_local_posix.cc
+
+base/threading/base_thread_local_storage.o:base/threading/thread_local_storage.cc
+	@echo "[COMAKE:BUILD][Target:'base/threading/base_thread_local_storage.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/threading/base_thread_local_storage.o base/threading/thread_local_storage.cc
+
+base/threading/base_thread_local_storage_posix.o:base/threading/thread_local_storage_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/threading/base_thread_local_storage_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/threading/base_thread_local_storage_posix.o base/threading/thread_local_storage_posix.cc
+
+base/threading/base_thread_restrictions.o:base/threading/thread_restrictions.cc
+	@echo "[COMAKE:BUILD][Target:'base/threading/base_thread_restrictions.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/threading/base_thread_restrictions.o base/threading/thread_restrictions.cc
+
+base/threading/base_watchdog.o:base/threading/watchdog.cc
+	@echo "[COMAKE:BUILD][Target:'base/threading/base_watchdog.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/threading/base_watchdog.o base/threading/watchdog.cc
+
+base/time/base_clock.o:base/time/clock.cc
+	@echo "[COMAKE:BUILD][Target:'base/time/base_clock.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/time/base_clock.o base/time/clock.cc
+
+base/time/base_default_clock.o:base/time/default_clock.cc
+	@echo "[COMAKE:BUILD][Target:'base/time/base_default_clock.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/time/base_default_clock.o base/time/default_clock.cc
+
+base/time/base_default_tick_clock.o:base/time/default_tick_clock.cc
+	@echo "[COMAKE:BUILD][Target:'base/time/base_default_tick_clock.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/time/base_default_tick_clock.o base/time/default_tick_clock.cc
+
+base/time/base_tick_clock.o:base/time/tick_clock.cc
+	@echo "[COMAKE:BUILD][Target:'base/time/base_tick_clock.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/time/base_tick_clock.o base/time/tick_clock.cc
+
+base/time/base_time.o:base/time/time.cc
+	@echo "[COMAKE:BUILD][Target:'base/time/base_time.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/time/base_time.o base/time/time.cc
+
+base/time/base_time_posix.o:base/time/time_posix.cc
+	@echo "[COMAKE:BUILD][Target:'base/time/base_time_posix.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/time/base_time_posix.o base/time/time_posix.cc
+
+base/base_version.o:base/version.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_version.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_version.o base/version.cc
+
+base/base_logging.o:base/logging.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_logging.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_logging.o base/logging.cc
+
+base/base_class_name.o:base/class_name.cpp
+	@echo "[COMAKE:BUILD][Target:'base/base_class_name.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_class_name.o base/class_name.cpp
+
+base/base_errno.o:base/errno.cpp
+	@echo "[COMAKE:BUILD][Target:'base/base_errno.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_errno.o base/errno.cpp
+
+base/base_find_cstr.o:base/find_cstr.cpp
+	@echo "[COMAKE:BUILD][Target:'base/base_find_cstr.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_find_cstr.o base/find_cstr.cpp
+
+base/base_status.o:base/status.cpp
+	@echo "[COMAKE:BUILD][Target:'base/base_status.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_status.o base/status.cpp
+
+base/base_string_printf.o:base/string_printf.cpp
+	@echo "[COMAKE:BUILD][Target:'base/base_string_printf.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_string_printf.o base/string_printf.cpp
+
+base/base_thread_local.o:base/thread_local.cpp
+	@echo "[COMAKE:BUILD][Target:'base/base_thread_local.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_thread_local.o base/thread_local.cpp
+
+base/base_unix_socket.o:base/unix_socket.cpp
+	@echo "[COMAKE:BUILD][Target:'base/base_unix_socket.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_unix_socket.o base/unix_socket.cpp
+
+base/base_endpoint.o:base/endpoint.cpp
+	@echo "[COMAKE:BUILD][Target:'base/base_endpoint.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_endpoint.o base/endpoint.cpp
+
+base/base_fd_utility.o:base/fd_utility.cpp
+	@echo "[COMAKE:BUILD][Target:'base/base_fd_utility.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_fd_utility.o base/fd_utility.cpp
+
+base/files/base_temp_file.o:base/files/temp_file.cpp
+	@echo "[COMAKE:BUILD][Target:'base/files/base_temp_file.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/files/base_temp_file.o base/files/temp_file.cpp
+
+base/files/base_file_watcher.o:base/files/file_watcher.cpp
+	@echo "[COMAKE:BUILD][Target:'base/files/base_file_watcher.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/files/base_file_watcher.o base/files/file_watcher.cpp
+
+base/base_time.o:base/time.cpp
+	@echo "[COMAKE:BUILD][Target:'base/base_time.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_time.o base/time.cpp
+
+base/base_zero_copy_stream_as_streambuf.o:base/zero_copy_stream_as_streambuf.cpp
+	@echo "[COMAKE:BUILD][Target:'base/base_zero_copy_stream_as_streambuf.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_zero_copy_stream_as_streambuf.o base/zero_copy_stream_as_streambuf.cpp
+
+base/base_crc32c.o:base/crc32c.cc
+	@echo "[COMAKE:BUILD][Target:'base/base_crc32c.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_crc32c.o base/crc32c.cc
+
+base/containers/base_case_ignored_flat_map.o:base/containers/case_ignored_flat_map.cpp
+	@echo "[COMAKE:BUILD][Target:'base/containers/base_case_ignored_flat_map.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/containers/base_case_ignored_flat_map.o base/containers/case_ignored_flat_map.cpp
+
+base/base_iobuf.o:base/iobuf.cpp
+	@echo "[COMAKE:BUILD][Target:'base/base_iobuf.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o base/base_iobuf.o base/iobuf.cpp
+
+bvar/bvar_collector.o:bvar/collector.cpp
+	@echo "[COMAKE:BUILD][Target:'bvar/bvar_collector.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bvar/bvar_collector.o bvar/collector.cpp
+
+bvar/bvar_default_variables.o:bvar/default_variables.cpp
+	@echo "[COMAKE:BUILD][Target:'bvar/bvar_default_variables.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bvar/bvar_default_variables.o bvar/default_variables.cpp
+
+bvar/bvar_gflag.o:bvar/gflag.cpp
+	@echo "[COMAKE:BUILD][Target:'bvar/bvar_gflag.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bvar/bvar_gflag.o bvar/gflag.cpp
+
+bvar/bvar_latency_recorder.o:bvar/latency_recorder.cpp
+	@echo "[COMAKE:BUILD][Target:'bvar/bvar_latency_recorder.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bvar/bvar_latency_recorder.o bvar/latency_recorder.cpp
+
+bvar/bvar_variable.o:bvar/variable.cpp
+	@echo "[COMAKE:BUILD][Target:'bvar/bvar_variable.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bvar/bvar_variable.o bvar/variable.cpp
+
+bvar/detail/bvar_percentile.o:bvar/detail/percentile.cpp
+	@echo "[COMAKE:BUILD][Target:'bvar/detail/bvar_percentile.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bvar/detail/bvar_percentile.o bvar/detail/percentile.cpp
+
+bvar/detail/bvar_sampler.o:bvar/detail/sampler.cpp
+	@echo "[COMAKE:BUILD][Target:'bvar/detail/bvar_sampler.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bvar/detail/bvar_sampler.o bvar/detail/sampler.cpp
+
+bthread/bthread_bthread.o:bthread/bthread.cpp
+	@echo "[COMAKE:BUILD][Target:'bthread/bthread_bthread.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bthread/bthread_bthread.o bthread/bthread.cpp
+
+bthread/bthread_butex.o:bthread/butex.cpp
+	@echo "[COMAKE:BUILD][Target:'bthread/bthread_butex.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bthread/bthread_butex.o bthread/butex.cpp
+
+bthread/bthread_cond.o:bthread/cond.cpp
+	@echo "[COMAKE:BUILD][Target:'bthread/bthread_cond.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bthread/bthread_cond.o bthread/cond.cpp
+
+bthread/bthread_context.o:bthread/context.cpp
+	@echo "[COMAKE:BUILD][Target:'bthread/bthread_context.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bthread/bthread_context.o bthread/context.cpp
+
+bthread/bthread_countdown_event.o:bthread/countdown_event.cpp
+	@echo "[COMAKE:BUILD][Target:'bthread/bthread_countdown_event.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bthread/bthread_countdown_event.o bthread/countdown_event.cpp
+
+bthread/bthread_errno.o:bthread/errno.cpp
+	@echo "[COMAKE:BUILD][Target:'bthread/bthread_errno.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bthread/bthread_errno.o bthread/errno.cpp
+
+bthread/bthread_execution_queue.o:bthread/execution_queue.cpp
+	@echo "[COMAKE:BUILD][Target:'bthread/bthread_execution_queue.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bthread/bthread_execution_queue.o bthread/execution_queue.cpp
+
+bthread/bthread_fd.o:bthread/fd.cpp
+	@echo "[COMAKE:BUILD][Target:'bthread/bthread_fd.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bthread/bthread_fd.o bthread/fd.cpp
+
+bthread/bthread_id.o:bthread/id.cpp
+	@echo "[COMAKE:BUILD][Target:'bthread/bthread_id.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bthread/bthread_id.o bthread/id.cpp
+
+bthread/bthread_interrupt_pthread.o:bthread/interrupt_pthread.cpp
+	@echo "[COMAKE:BUILD][Target:'bthread/bthread_interrupt_pthread.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bthread/bthread_interrupt_pthread.o bthread/interrupt_pthread.cpp
+
+bthread/bthread_key.o:bthread/key.cpp
+	@echo "[COMAKE:BUILD][Target:'bthread/bthread_key.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bthread/bthread_key.o bthread/key.cpp
+
+bthread/bthread_mutex.o:bthread/mutex.cpp
+	@echo "[COMAKE:BUILD][Target:'bthread/bthread_mutex.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bthread/bthread_mutex.o bthread/mutex.cpp
+
+bthread/bthread_stack.o:bthread/stack.cpp
+	@echo "[COMAKE:BUILD][Target:'bthread/bthread_stack.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bthread/bthread_stack.o bthread/stack.cpp
+
+bthread/bthread_sys_futex.o:bthread/sys_futex.cpp
+	@echo "[COMAKE:BUILD][Target:'bthread/bthread_sys_futex.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bthread/bthread_sys_futex.o bthread/sys_futex.cpp
+
+bthread/bthread_task_control.o:bthread/task_control.cpp
+	@echo "[COMAKE:BUILD][Target:'bthread/bthread_task_control.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bthread/bthread_task_control.o bthread/task_control.cpp
+
+bthread/bthread_task_group.o:bthread/task_group.cpp
+	@echo "[COMAKE:BUILD][Target:'bthread/bthread_task_group.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bthread/bthread_task_group.o bthread/task_group.cpp
+
+bthread/bthread_timer_thread.o:bthread/timer_thread.cpp
+	@echo "[COMAKE:BUILD][Target:'bthread/bthread_timer_thread.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o bthread/bthread_timer_thread.o bthread/timer_thread.cpp
+
+json2pb/json2pb_encode_decode.o:json2pb/encode_decode.cpp
+	@echo "[COMAKE:BUILD][Target:'json2pb/json2pb_encode_decode.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o json2pb/json2pb_encode_decode.o json2pb/encode_decode.cpp
+
+json2pb/json2pb_json_to_pb.o:json2pb/json_to_pb.cpp
+	@echo "[COMAKE:BUILD][Target:'json2pb/json2pb_json_to_pb.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o json2pb/json2pb_json_to_pb.o json2pb/json_to_pb.cpp
+
+json2pb/json2pb_pb_to_json.o:json2pb/pb_to_json.cpp
+	@echo "[COMAKE:BUILD][Target:'json2pb/json2pb_pb_to_json.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o json2pb/json2pb_pb_to_json.o json2pb/pb_to_json.cpp
+
+json2pb/json2pb_protobuf_map.o:json2pb/protobuf_map.cpp
+	@echo "[COMAKE:BUILD][Target:'json2pb/json2pb_protobuf_map.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o json2pb/json2pb_protobuf_map.o json2pb/protobuf_map.cpp
+
+json2pb/json2pb_string_printf.o:json2pb/string_printf.cpp
+	@echo "[COMAKE:BUILD][Target:'json2pb/json2pb_string_printf.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o json2pb/json2pb_string_printf.o json2pb/string_printf.cpp
+
+mcpack2pb/mcpack2pb_field_type.o:mcpack2pb/field_type.cpp
+	@echo "[COMAKE:BUILD][Target:'mcpack2pb/mcpack2pb_field_type.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o mcpack2pb/mcpack2pb_field_type.o mcpack2pb/field_type.cpp
+
+mcpack2pb/mcpack2pb_mcpack2pb.o:mcpack2pb/mcpack2pb.cpp
+	@echo "[COMAKE:BUILD][Target:'mcpack2pb/mcpack2pb_mcpack2pb.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o mcpack2pb/mcpack2pb_mcpack2pb.o mcpack2pb/mcpack2pb.cpp
+
+mcpack2pb/mcpack2pb_parser.o:mcpack2pb/parser.cpp
+	@echo "[COMAKE:BUILD][Target:'mcpack2pb/mcpack2pb_parser.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o mcpack2pb/mcpack2pb_parser.o mcpack2pb/parser.cpp
+
+mcpack2pb/mcpack2pb_serializer.o:mcpack2pb/serializer.cpp
+	@echo "[COMAKE:BUILD][Target:'mcpack2pb/mcpack2pb_serializer.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o mcpack2pb/mcpack2pb_serializer.o mcpack2pb/serializer.cpp
+
+mcpack2pb_idl_options.pb.o:idl_options.pb.cc
+	@echo "[COMAKE:BUILD][Target:'mcpack2pb_idl_options.pb.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o mcpack2pb_idl_options.pb.o idl_options.pb.cc
+
+mcpack2pb/protoc-gen-mcpack_generator.o:mcpack2pb/generator.cpp
+	@echo "[COMAKE:BUILD][Target:'mcpack2pb/protoc-gen-mcpack_generator.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o mcpack2pb/protoc-gen-mcpack_generator.o mcpack2pb/generator.cpp
+
+brpc/brpc_acceptor.o:brpc/acceptor.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_acceptor.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_acceptor.o brpc/acceptor.cpp
+
+brpc/brpc_adaptive_connection_type.o:brpc/adaptive_connection_type.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_adaptive_connection_type.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_adaptive_connection_type.o brpc/adaptive_connection_type.cpp
+
+brpc/brpc_amf.o:brpc/amf.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_amf.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_amf.o brpc/amf.cpp
+
+brpc/brpc_bad_method_service.o:brpc/bad_method_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_bad_method_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_bad_method_service.o brpc/bad_method_service.cpp
+
+brpc/brpc_channel.o:brpc/channel.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_channel.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_channel.o brpc/channel.cpp
+
+brpc/brpc_compress.o:brpc/compress.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_compress.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_compress.o brpc/compress.cpp
+
+brpc/brpc_controller.o:brpc/controller.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_controller.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_controller.o brpc/controller.cpp
+
+brpc/brpc_esp_message.o:brpc/esp_message.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_esp_message.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_esp_message.o brpc/esp_message.cpp
+
+brpc/brpc_event_dispatcher.o:brpc/event_dispatcher.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_event_dispatcher.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_event_dispatcher.o brpc/event_dispatcher.cpp
+
+brpc/brpc_global.o:brpc/global.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_global.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_global.o brpc/global.cpp
+
+brpc/brpc_http_header.o:brpc/http_header.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_http_header.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_http_header.o brpc/http_header.cpp
+
+brpc/brpc_http_method.o:brpc/http_method.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_http_method.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_http_method.o brpc/http_method.cpp
+
+brpc/brpc_http_status_code.o:brpc/http_status_code.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_http_status_code.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_http_status_code.o brpc/http_status_code.cpp
+
+brpc/brpc_input_messenger.o:brpc/input_messenger.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_input_messenger.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_input_messenger.o brpc/input_messenger.cpp
+
+brpc/brpc_load_balancer.o:brpc/load_balancer.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_load_balancer.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_load_balancer.o brpc/load_balancer.cpp
+
+brpc/brpc_load_balancer_with_naming.o:brpc/load_balancer_with_naming.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_load_balancer_with_naming.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_load_balancer_with_naming.o brpc/load_balancer_with_naming.cpp
+
+brpc/brpc_memcache.o:brpc/memcache.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_memcache.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_memcache.o brpc/memcache.cpp
+
+brpc/brpc_naming_service_thread.o:brpc/naming_service_thread.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_naming_service_thread.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_naming_service_thread.o brpc/naming_service_thread.cpp
+
+brpc/brpc_nshead_message.o:brpc/nshead_message.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_nshead_message.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_nshead_message.o brpc/nshead_message.cpp
+
+brpc/brpc_nshead_pb_service_adaptor.o:brpc/nshead_pb_service_adaptor.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_nshead_pb_service_adaptor.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_nshead_pb_service_adaptor.o brpc/nshead_pb_service_adaptor.cpp
+
+brpc/brpc_nshead_service.o:brpc/nshead_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_nshead_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_nshead_service.o brpc/nshead_service.cpp
+
+brpc/brpc_parallel_channel.o:brpc/parallel_channel.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_parallel_channel.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_parallel_channel.o brpc/parallel_channel.cpp
+
+brpc/brpc_partition_channel.o:brpc/partition_channel.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_partition_channel.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_partition_channel.o brpc/partition_channel.cpp
+
+brpc/brpc_periodic_naming_service.o:brpc/periodic_naming_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_periodic_naming_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_periodic_naming_service.o brpc/periodic_naming_service.cpp
+
+brpc/brpc_progressive_attachment.o:brpc/progressive_attachment.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_progressive_attachment.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_progressive_attachment.o brpc/progressive_attachment.cpp
+
+brpc/brpc_protocol.o:brpc/protocol.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_protocol.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_protocol.o brpc/protocol.cpp
+
+brpc/brpc_redis.o:brpc/redis.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_redis.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_redis.o brpc/redis.cpp
+
+brpc/brpc_redis_command.o:brpc/redis_command.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_redis_command.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_redis_command.o brpc/redis_command.cpp
+
+brpc/brpc_redis_reply.o:brpc/redis_reply.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_redis_reply.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_redis_reply.o brpc/redis_reply.cpp
+
+brpc/brpc_reloadable_flags.o:brpc/reloadable_flags.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_reloadable_flags.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_reloadable_flags.o brpc/reloadable_flags.cpp
+
+brpc/brpc_restful.o:brpc/restful.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_restful.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_restful.o brpc/restful.cpp
+
+brpc/brpc_retry_policy.o:brpc/retry_policy.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_retry_policy.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_retry_policy.o brpc/retry_policy.cpp
+
+brpc/brpc_rpc_dump.o:brpc/rpc_dump.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_rpc_dump.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_rpc_dump.o brpc/rpc_dump.cpp
+
+brpc/brpc_rtmp.o:brpc/rtmp.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_rtmp.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_rtmp.o brpc/rtmp.cpp
+
+brpc/brpc_selective_channel.o:brpc/selective_channel.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_selective_channel.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_selective_channel.o brpc/selective_channel.cpp
+
+brpc/brpc_serialized_request.o:brpc/serialized_request.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_serialized_request.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_serialized_request.o brpc/serialized_request.cpp
+
+brpc/brpc_server.o:brpc/server.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_server.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_server.o brpc/server.cpp
+
+brpc/brpc_server_id.o:brpc/server_id.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_server_id.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_server_id.o brpc/server_id.cpp
+
+brpc/brpc_socket.o:brpc/socket.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_socket.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_socket.o brpc/socket.cpp
+
+brpc/brpc_socket_map.o:brpc/socket_map.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_socket_map.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_socket_map.o brpc/socket_map.cpp
+
+brpc/brpc_span.o:brpc/span.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_span.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_span.o brpc/span.cpp
+
+brpc/brpc_stream.o:brpc/stream.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_stream.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_stream.o brpc/stream.cpp
+
+brpc/brpc_tcmalloc_extension.o:brpc/tcmalloc_extension.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_tcmalloc_extension.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_tcmalloc_extension.o brpc/tcmalloc_extension.cpp
+
+brpc/brpc_trackme.o:brpc/trackme.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_trackme.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_trackme.o brpc/trackme.cpp
+
+brpc/brpc_ts.o:brpc/ts.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_ts.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_ts.o brpc/ts.cpp
+
+brpc/brpc_uri.o:brpc/uri.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_uri.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_uri.o brpc/uri.cpp
+
+brpc/policy/brpc_baidu_rpc_protocol.o:brpc/policy/baidu_rpc_protocol.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_baidu_rpc_protocol.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_baidu_rpc_protocol.o brpc/policy/baidu_rpc_protocol.cpp
+
+brpc/policy/brpc_consistent_hashing_load_balancer.o:brpc/policy/consistent_hashing_load_balancer.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_consistent_hashing_load_balancer.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_consistent_hashing_load_balancer.o brpc/policy/consistent_hashing_load_balancer.cpp
+
+brpc/policy/brpc_dh.o:brpc/policy/dh.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_dh.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_dh.o brpc/policy/dh.cpp
+
+brpc/policy/brpc_domain_naming_service.o:brpc/policy/domain_naming_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_domain_naming_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_domain_naming_service.o brpc/policy/domain_naming_service.cpp
+
+brpc/policy/brpc_dynpart_load_balancer.o:brpc/policy/dynpart_load_balancer.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_dynpart_load_balancer.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_dynpart_load_balancer.o brpc/policy/dynpart_load_balancer.cpp
+
+brpc/policy/brpc_esp_authenticator.o:brpc/policy/esp_authenticator.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_esp_authenticator.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_esp_authenticator.o brpc/policy/esp_authenticator.cpp
+
+brpc/policy/brpc_esp_protocol.o:brpc/policy/esp_protocol.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_esp_protocol.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_esp_protocol.o brpc/policy/esp_protocol.cpp
+
+brpc/policy/brpc_file_naming_service.o:brpc/policy/file_naming_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_file_naming_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_file_naming_service.o brpc/policy/file_naming_service.cpp
+
+brpc/policy/brpc_gzip_compress.o:brpc/policy/gzip_compress.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_gzip_compress.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_gzip_compress.o brpc/policy/gzip_compress.cpp
+
+brpc/policy/brpc_hasher.o:brpc/policy/hasher.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_hasher.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_hasher.o brpc/policy/hasher.cpp
+
+brpc/policy/brpc_http_rpc_protocol.o:brpc/policy/http_rpc_protocol.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_http_rpc_protocol.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_http_rpc_protocol.o brpc/policy/http_rpc_protocol.cpp
+
+brpc/policy/brpc_hulu_pbrpc_protocol.o:brpc/policy/hulu_pbrpc_protocol.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_hulu_pbrpc_protocol.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_hulu_pbrpc_protocol.o brpc/policy/hulu_pbrpc_protocol.cpp
+
+brpc/policy/brpc_list_naming_service.o:brpc/policy/list_naming_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_list_naming_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_list_naming_service.o brpc/policy/list_naming_service.cpp
+
+brpc/policy/brpc_locality_aware_load_balancer.o:brpc/policy/locality_aware_load_balancer.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_locality_aware_load_balancer.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_locality_aware_load_balancer.o brpc/policy/locality_aware_load_balancer.cpp
+
+brpc/policy/brpc_memcache_binary_protocol.o:brpc/policy/memcache_binary_protocol.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_memcache_binary_protocol.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_memcache_binary_protocol.o brpc/policy/memcache_binary_protocol.cpp
+
+brpc/policy/brpc_mongo_protocol.o:brpc/policy/mongo_protocol.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_mongo_protocol.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_mongo_protocol.o brpc/policy/mongo_protocol.cpp
+
+brpc/policy/brpc_nova_pbrpc_protocol.o:brpc/policy/nova_pbrpc_protocol.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_nova_pbrpc_protocol.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_nova_pbrpc_protocol.o brpc/policy/nova_pbrpc_protocol.cpp
+
+brpc/policy/brpc_nshead_mcpack_protocol.o:brpc/policy/nshead_mcpack_protocol.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_nshead_mcpack_protocol.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_nshead_mcpack_protocol.o brpc/policy/nshead_mcpack_protocol.cpp
+
+brpc/policy/brpc_nshead_protocol.o:brpc/policy/nshead_protocol.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_nshead_protocol.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_nshead_protocol.o brpc/policy/nshead_protocol.cpp
+
+brpc/policy/brpc_public_pbrpc_protocol.o:brpc/policy/public_pbrpc_protocol.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_public_pbrpc_protocol.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_public_pbrpc_protocol.o brpc/policy/public_pbrpc_protocol.cpp
+
+brpc/policy/brpc_randomized_load_balancer.o:brpc/policy/randomized_load_balancer.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_randomized_load_balancer.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_randomized_load_balancer.o brpc/policy/randomized_load_balancer.cpp
+
+brpc/policy/brpc_redis_protocol.o:brpc/policy/redis_protocol.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_redis_protocol.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_redis_protocol.o brpc/policy/redis_protocol.cpp
+
+brpc/policy/brpc_remote_file_naming_service.o:brpc/policy/remote_file_naming_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_remote_file_naming_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_remote_file_naming_service.o brpc/policy/remote_file_naming_service.cpp
+
+brpc/policy/brpc_round_robin_load_balancer.o:brpc/policy/round_robin_load_balancer.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_round_robin_load_balancer.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_round_robin_load_balancer.o brpc/policy/round_robin_load_balancer.cpp
+
+brpc/policy/brpc_rtmp_protocol.o:brpc/policy/rtmp_protocol.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_rtmp_protocol.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_rtmp_protocol.o brpc/policy/rtmp_protocol.cpp
+
+brpc/policy/brpc_snappy_compress.o:brpc/policy/snappy_compress.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_snappy_compress.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_snappy_compress.o brpc/policy/snappy_compress.cpp
+
+brpc/policy/brpc_sofa_pbrpc_protocol.o:brpc/policy/sofa_pbrpc_protocol.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_sofa_pbrpc_protocol.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_sofa_pbrpc_protocol.o brpc/policy/sofa_pbrpc_protocol.cpp
+
+brpc/policy/brpc_streaming_rpc_protocol.o:brpc/policy/streaming_rpc_protocol.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_streaming_rpc_protocol.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_streaming_rpc_protocol.o brpc/policy/streaming_rpc_protocol.cpp
+
+brpc/policy/brpc_ubrpc2pb_protocol.o:brpc/policy/ubrpc2pb_protocol.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_ubrpc2pb_protocol.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_ubrpc2pb_protocol.o brpc/policy/ubrpc2pb_protocol.cpp
+
+brpc/builtin/brpc_bthreads_service.o:brpc/builtin/bthreads_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_bthreads_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_bthreads_service.o brpc/builtin/bthreads_service.cpp
+
+brpc/builtin/brpc_common.o:brpc/builtin/common.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_common.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_common.o brpc/builtin/common.cpp
+
+brpc/builtin/brpc_connections_service.o:brpc/builtin/connections_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_connections_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_connections_service.o brpc/builtin/connections_service.cpp
+
+brpc/builtin/brpc_dir_service.o:brpc/builtin/dir_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_dir_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_dir_service.o brpc/builtin/dir_service.cpp
+
+brpc/builtin/brpc_flags_service.o:brpc/builtin/flags_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_flags_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_flags_service.o brpc/builtin/flags_service.cpp
+
+brpc/builtin/brpc_flot_min_js.o:brpc/builtin/flot_min_js.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_flot_min_js.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_flot_min_js.o brpc/builtin/flot_min_js.cpp
+
+brpc/builtin/brpc_get_favicon_service.o:brpc/builtin/get_favicon_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_get_favicon_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_get_favicon_service.o brpc/builtin/get_favicon_service.cpp
+
+brpc/builtin/brpc_get_js_service.o:brpc/builtin/get_js_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_get_js_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_get_js_service.o brpc/builtin/get_js_service.cpp
+
+brpc/builtin/brpc_health_service.o:brpc/builtin/health_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_health_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_health_service.o brpc/builtin/health_service.cpp
+
+brpc/builtin/brpc_hotspots_service.o:brpc/builtin/hotspots_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_hotspots_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_hotspots_service.o brpc/builtin/hotspots_service.cpp
+
+brpc/builtin/brpc_ids_service.o:brpc/builtin/ids_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_ids_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_ids_service.o brpc/builtin/ids_service.cpp
+
+brpc/builtin/brpc_index_service.o:brpc/builtin/index_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_index_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_index_service.o brpc/builtin/index_service.cpp
+
+brpc/builtin/brpc_jquery_min_js.o:brpc/builtin/jquery_min_js.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_jquery_min_js.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_jquery_min_js.o brpc/builtin/jquery_min_js.cpp
+
+brpc/builtin/brpc_list_service.o:brpc/builtin/list_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_list_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_list_service.o brpc/builtin/list_service.cpp
+
+brpc/builtin/brpc_pprof_perl.o:brpc/builtin/pprof_perl.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_pprof_perl.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_pprof_perl.o brpc/builtin/pprof_perl.cpp
+
+brpc/builtin/brpc_pprof_service.o:brpc/builtin/pprof_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_pprof_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_pprof_service.o brpc/builtin/pprof_service.cpp
+
+brpc/builtin/brpc_protobufs_service.o:brpc/builtin/protobufs_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_protobufs_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_protobufs_service.o brpc/builtin/protobufs_service.cpp
+
+brpc/builtin/brpc_rpcz_service.o:brpc/builtin/rpcz_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_rpcz_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_rpcz_service.o brpc/builtin/rpcz_service.cpp
+
+brpc/builtin/brpc_sockets_service.o:brpc/builtin/sockets_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_sockets_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_sockets_service.o brpc/builtin/sockets_service.cpp
+
+brpc/builtin/brpc_sorttable_js.o:brpc/builtin/sorttable_js.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_sorttable_js.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_sorttable_js.o brpc/builtin/sorttable_js.cpp
+
+brpc/builtin/brpc_status_service.o:brpc/builtin/status_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_status_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_status_service.o brpc/builtin/status_service.cpp
+
+brpc/builtin/brpc_threads_service.o:brpc/builtin/threads_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_threads_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_threads_service.o brpc/builtin/threads_service.cpp
+
+brpc/builtin/brpc_vars_service.o:brpc/builtin/vars_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_vars_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_vars_service.o brpc/builtin/vars_service.cpp
+
+brpc/builtin/brpc_version_service.o:brpc/builtin/version_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_version_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_version_service.o brpc/builtin/version_service.cpp
+
+brpc/builtin/brpc_viz_min_js.o:brpc/builtin/viz_min_js.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_viz_min_js.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_viz_min_js.o brpc/builtin/viz_min_js.cpp
+
+brpc/builtin/brpc_vlog_service.o:brpc/builtin/vlog_service.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/builtin/brpc_vlog_service.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/builtin/brpc_vlog_service.o brpc/builtin/vlog_service.cpp
+
+brpc/details/brpc_has_epollrdhup.o:brpc/details/has_epollrdhup.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/details/brpc_has_epollrdhup.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/details/brpc_has_epollrdhup.o brpc/details/has_epollrdhup.cpp
+
+brpc/details/brpc_hpack.o:brpc/details/hpack.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/details/brpc_hpack.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/details/brpc_hpack.o brpc/details/hpack.cpp
+
+brpc/details/brpc_http_message.o:brpc/details/http_message.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/details/brpc_http_message.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/details/brpc_http_message.o brpc/details/http_message.cpp
+
+brpc/details/brpc_http_message_serializer.o:brpc/details/http_message_serializer.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/details/brpc_http_message_serializer.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/details/brpc_http_message_serializer.o brpc/details/http_message_serializer.cpp
+
+brpc/details/brpc_http_parser.o:brpc/details/http_parser.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/details/brpc_http_parser.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/details/brpc_http_parser.o brpc/details/http_parser.cpp
+
+brpc/details/brpc_method_status.o:brpc/details/method_status.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/details/brpc_method_status.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/details/brpc_method_status.o brpc/details/method_status.cpp
+
+brpc/details/brpc_rtmp_utils.o:brpc/details/rtmp_utils.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/details/brpc_rtmp_utils.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/details/brpc_rtmp_utils.o brpc/details/rtmp_utils.cpp
+
+brpc/details/brpc_ssl_helper.o:brpc/details/ssl_helper.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/details/brpc_ssl_helper.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/details/brpc_ssl_helper.o brpc/details/ssl_helper.cpp
+
+brpc/details/brpc_usercode_backup_pool.o:brpc/details/usercode_backup_pool.cpp
+	@echo "[COMAKE:BUILD][Target:'brpc/details/brpc_usercode_backup_pool.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/details/brpc_usercode_backup_pool.o brpc/details/usercode_backup_pool.cpp
+
+brpc/brpc_builtin_service.pb.o:brpc/builtin_service.pb.cc
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_builtin_service.pb.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_builtin_service.pb.o brpc/builtin_service.pb.cc
+
+brpc/brpc_errno.pb.o:brpc/errno.pb.cc
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_errno.pb.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_errno.pb.o brpc/errno.pb.cc
+
+brpc/brpc_get_favicon.pb.o:brpc/get_favicon.pb.cc
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_get_favicon.pb.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_get_favicon.pb.o brpc/get_favicon.pb.cc
+
+brpc/brpc_get_js.pb.o:brpc/get_js.pb.cc
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_get_js.pb.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_get_js.pb.o brpc/get_js.pb.cc
+
+brpc/brpc_nshead_meta.pb.o:brpc/nshead_meta.pb.cc
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_nshead_meta.pb.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_nshead_meta.pb.o brpc/nshead_meta.pb.cc
+
+brpc/brpc_options.pb.o:brpc/options.pb.cc
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_options.pb.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_options.pb.o brpc/options.pb.cc
+
+brpc/brpc_rpc_dump.pb.o:brpc/rpc_dump.pb.cc
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_rpc_dump.pb.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_rpc_dump.pb.o brpc/rpc_dump.pb.cc
+
+brpc/brpc_rtmp.pb.o:brpc/rtmp.pb.cc
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_rtmp.pb.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_rtmp.pb.o brpc/rtmp.pb.cc
+
+brpc/brpc_span.pb.o:brpc/span.pb.cc
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_span.pb.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_span.pb.o brpc/span.pb.cc
+
+brpc/brpc_streaming_rpc_meta.pb.o:brpc/streaming_rpc_meta.pb.cc
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_streaming_rpc_meta.pb.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_streaming_rpc_meta.pb.o brpc/streaming_rpc_meta.pb.cc
+
+brpc/brpc_trackme.pb.o:brpc/trackme.pb.cc
+	@echo "[COMAKE:BUILD][Target:'brpc/brpc_trackme.pb.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/brpc_trackme.pb.o brpc/trackme.pb.cc
+
+brpc/policy/brpc_baidu_rpc_meta.pb.o:brpc/policy/baidu_rpc_meta.pb.cc
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_baidu_rpc_meta.pb.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_baidu_rpc_meta.pb.o brpc/policy/baidu_rpc_meta.pb.cc
+
+brpc/policy/brpc_hulu_pbrpc_meta.pb.o:brpc/policy/hulu_pbrpc_meta.pb.cc
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_hulu_pbrpc_meta.pb.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_hulu_pbrpc_meta.pb.o brpc/policy/hulu_pbrpc_meta.pb.cc
+
+brpc/policy/brpc_mongo.pb.o:brpc/policy/mongo.pb.cc
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_mongo.pb.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_mongo.pb.o brpc/policy/mongo.pb.cc
+
+brpc/policy/brpc_public_pbrpc_meta.pb.o:brpc/policy/public_pbrpc_meta.pb.cc
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_public_pbrpc_meta.pb.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_public_pbrpc_meta.pb.o brpc/policy/public_pbrpc_meta.pb.cc
+
+brpc/policy/brpc_sofa_pbrpc_meta.pb.o:brpc/policy/sofa_pbrpc_meta.pb.cc
+	@echo "[COMAKE:BUILD][Target:'brpc/policy/brpc_sofa_pbrpc_meta.pb.o']"
+	$(CXX) -c $(INCPATH) $(DEP_INCPATH) $(CPPFLAGS) $(CXXFLAGS)  -o brpc/policy/brpc_sofa_pbrpc_meta.pb.o brpc/policy/sofa_pbrpc_meta.pb.cc
+
+endif #ifeq ($(shell uname -m),x86_64)
+
+

+ 38 - 0
base/allocator/type_profiler_control.cc

@@ -0,0 +1,38 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/type_profiler_control.h"
+
+namespace base {
+namespace type_profiler {
+
+namespace {
+
+#if defined(TYPE_PROFILING)
+const bool kTypeProfilingEnabled = true;
+#else
+const bool kTypeProfilingEnabled = false;
+#endif
+
+bool g_enable_intercept = kTypeProfilingEnabled;
+
+}  // namespace
+
+// static
+void Controller::Stop() {
+  g_enable_intercept = false;
+}
+
+// static
+bool Controller::IsProfiling() {
+  return kTypeProfilingEnabled && g_enable_intercept;
+}
+
+// static
+void Controller::Restart() {
+  g_enable_intercept = kTypeProfilingEnabled;
+}
+
+}  // namespace type_profiler
+}  // namespace base

+ 31 - 0
base/allocator/type_profiler_control.h

@@ -0,0 +1,31 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_TYPE_PROFILER_CONTROL_H_
+#define BASE_ALLOCATOR_TYPE_PROFILER_CONTROL_H_
+
+#include "base/gtest_prod_util.h"
+
+namespace base {
+namespace type_profiler {
+
+class Controller {
+ public:
+  static void Stop();
+  static bool IsProfiling();
+
+ private:
+  FRIEND_TEST_ALL_PREFIXES(TypeProfilerTest,
+                           TestProfileNewWithoutProfiledDelete);
+
+  // It must be used only from allowed unit tests.  The following is only
+  // allowed for use in unit tests. Profiling should never be restarted in
+  // regular use.
+  static void Restart();
+};
+
+}  // namespace type_profiler
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_TYPE_PROFILER_CONTROL_H_

+ 92 - 0
base/arena.cpp

@@ -0,0 +1,92 @@
+// Copyright (c) 2015 Baidu.com, Inc. All Rights Reserved
+//
+// Do small memory allocations on continuous blocks.
+//
+// Author: Ge,Jun (gejun@baidu.com)
+// Date: Fri Jun  5 18:25:40 CST 2015
+
+#include <stdlib.h>
+#include <algorithm>
+#include "base/arena.h"
+
+namespace base {
+
+ArenaOptions::ArenaOptions()
+    : initial_block_size(64)
+    , max_block_size(8192)
+{}
+
+Arena::Arena(const ArenaOptions& options)
+    : _cur_block(NULL)
+    , _isolated_blocks(NULL)
+    , _block_size(options.initial_block_size)
+    , _options(options) {
+}
+
+Arena::~Arena() {
+    while (_cur_block != NULL) {
+        Block* const saved_next = _cur_block->next;
+        free(_cur_block);
+        _cur_block = saved_next;
+    }
+    while (_isolated_blocks != NULL) {
+        Block* const saved_next = _isolated_blocks->next;
+        free(_isolated_blocks);
+        _isolated_blocks = saved_next;
+    }
+}
+
+void Arena::swap(Arena& other) {
+    std::swap(_cur_block, other._cur_block);
+    std::swap(_isolated_blocks, other._isolated_blocks);
+    std::swap(_block_size, other._block_size);
+    const ArenaOptions tmp = _options;
+    _options = other._options;
+    other._options = tmp;
+}
+
+void Arena::clear() {
+    // TODO(gejun): Reuse memory
+    Arena a;
+    swap(a);
+}
+
+void* Arena::allocate_new_block(size_t n) {
+    Block* b = (Block*)malloc(offsetof(Block, data) + n);
+    b->next = _isolated_blocks;
+    b->alloc_size = n;
+    b->size = n;
+    _isolated_blocks = b;
+    return b->data;
+}
+
+void* Arena::allocate_in_other_blocks(size_t n) {
+    if (n > _block_size / 4) { // put outlier on separate blocks.
+        return allocate_new_block(n);
+    }
+    // Waste the left space. At most 1/4 of allocated spaces are wasted.
+
+    // Grow the block size gradually.
+    if (_cur_block != NULL) {
+        _block_size = std::min(2 * _block_size, _options.max_block_size);
+    }
+    size_t new_size = _block_size;
+    if (new_size < n) {
+        new_size = n;
+    }
+    Block* b = (Block*)malloc(offsetof(Block, data) + new_size);
+    if (NULL == b) {
+        return NULL;
+    }
+    b->next = NULL;
+    b->alloc_size = n;
+    b->size = new_size;
+    if (_cur_block) {
+        _cur_block->next = _isolated_blocks;
+        _isolated_blocks = _cur_block;
+    }
+    _cur_block = b;
+    return b->data;
+}
+
+}  // namespace base

+ 71 - 0
base/arena.h

@@ -0,0 +1,71 @@
+// Copyright (c) 2015 Baidu.com, Inc. All Rights Reserved
+//
+// Do small memory allocations on continuous blocks.
+//
+// Author: Ge,Jun (gejun@baidu.com)
+// Date: Fri Jun  5 18:25:40 CST 2015
+
+#ifndef BRPC_BASE_ARENA_H
+#define BRPC_BASE_ARENA_H
+
+#include <stdint.h>
+#include "base/macros.h"
+
+namespace base {
+
+struct ArenaOptions {
+    size_t initial_block_size;
+    size_t max_block_size;
+
+    // Constructed with default options.
+    ArenaOptions();
+};
+
+// Just a proof-of-concept, will be refactored in future CI.
+class Arena {
+public:
+    explicit Arena(const ArenaOptions& options = ArenaOptions());
+    ~Arena();
+    void swap(Arena&);
+    void* allocate(size_t n);
+    void* allocate_aligned(size_t n);  // not implemented.
+    void clear();
+
+private:
+    DISALLOW_COPY_AND_ASSIGN(Arena);
+
+    struct Block {
+        uint32_t left_space() const { return size - alloc_size; }
+        
+        Block* next;
+        uint32_t alloc_size;
+        uint32_t size;
+        char data[0];
+    };
+
+    void* allocate_in_other_blocks(size_t n);
+    void* allocate_new_block(size_t n);
+    Block* pop_block(Block* & head) {
+        Block* saved_head = head;
+        head = head->next;
+        return saved_head;
+    }
+    
+    Block* _cur_block;
+    Block* _isolated_blocks;
+    size_t _block_size;
+    ArenaOptions _options;
+};
+
+inline void* Arena::allocate(size_t n) {
+    if (_cur_block != NULL && _cur_block->left_space() >= n) {
+        void* ret = _cur_block->data + _cur_block->alloc_size;
+        _cur_block->alloc_size += n;
+        return ret;
+    }
+    return allocate_in_other_blocks(n);
+}
+
+}  // namespace base
+
+#endif  // BRPC_BASE_ARENA_H

+ 82 - 0
base/at_exit.cc

@@ -0,0 +1,82 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/at_exit.h"
+
+#include <stddef.h>
+#include <ostream>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/logging.h"
+
+namespace base {
+
+// Keep a stack of registered AtExitManagers.  We always operate on the most
+// recent, and we should never have more than one outside of testing (for a
+// statically linked version of this library).  Testing may use the shadow
+// version of the constructor, and if we are building a dynamic library we may
+// end up with multiple AtExitManagers on the same process.  We don't protect
+// this for thread-safe access, since it will only be modified in testing.
+static AtExitManager* g_top_manager = NULL;
+
+AtExitManager::AtExitManager() : next_manager_(g_top_manager) {
+// If multiple modules instantiate AtExitManagers they'll end up living in this
+// module... they have to coexist.
+#if !defined(COMPONENT_BUILD)
+  DCHECK(!g_top_manager);
+#endif
+  g_top_manager = this;
+}
+
+AtExitManager::~AtExitManager() {
+  if (!g_top_manager) {
+    NOTREACHED() << "Tried to ~AtExitManager without an AtExitManager";
+    return;
+  }
+  DCHECK_EQ(this, g_top_manager);
+
+  ProcessCallbacksNow();
+  g_top_manager = next_manager_;
+}
+
+// static
+void AtExitManager::RegisterCallback(AtExitCallbackType func, void* param) {
+  DCHECK(func);
+  RegisterTask(base::Bind(func, param));
+}
+
+// static
+void AtExitManager::RegisterTask(base::Closure task) {
+  if (!g_top_manager) {
+    NOTREACHED() << "Tried to RegisterCallback without an AtExitManager";
+    return;
+  }
+
+  AutoLock lock(g_top_manager->lock_);
+  g_top_manager->stack_.push(task);
+}
+
+// static
+void AtExitManager::ProcessCallbacksNow() {
+  if (!g_top_manager) {
+    NOTREACHED() << "Tried to ProcessCallbacksNow without an AtExitManager";
+    return;
+  }
+
+  AutoLock lock(g_top_manager->lock_);
+
+  while (!g_top_manager->stack_.empty()) {
+    base::Closure task = g_top_manager->stack_.top();
+    task.Run();
+    g_top_manager->stack_.pop();
+  }
+}
+
+AtExitManager::AtExitManager(bool shadow) : next_manager_(g_top_manager) {
+  DCHECK(shadow || !g_top_manager);
+  g_top_manager = this;
+}
+
+}  // namespace base

+ 76 - 0
base/at_exit.h

@@ -0,0 +1,76 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_AT_EXIT_H_
+#define BASE_AT_EXIT_H_
+
+#include <stack>
+
+#include "base/base_export.h"
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+// This class provides a facility similar to the CRT atexit(), except that
+// we control when the callbacks are executed. Under Windows for a DLL they
+// happen at a really bad time and under the loader lock. This facility is
+// mostly used by base::Singleton.
+//
+// The usage is simple. Early in the main() or WinMain() scope create an
+// AtExitManager object on the stack:
+// int main(...) {
+//    base::AtExitManager exit_manager;
+//
+// }
+// When the exit_manager object goes out of scope, all the registered
+// callbacks and singleton destructors will be called.
+
+class BASE_EXPORT AtExitManager {
+ public:
+  typedef void (*AtExitCallbackType)(void*);
+
+  AtExitManager();
+
+  // The dtor calls all the registered callbacks. Do not try to register more
+  // callbacks after this point.
+  ~AtExitManager();
+
+  // Registers the specified function to be called at exit. The prototype of
+  // the callback function is void func(void*).
+  static void RegisterCallback(AtExitCallbackType func, void* param);
+
+  // Registers the specified task to be called at exit.
+  static void RegisterTask(base::Closure task);
+
+  // Calls the functions registered with RegisterCallback in LIFO order. It
+  // is possible to register new callbacks after calling this function.
+  static void ProcessCallbacksNow();
+
+ protected:
+  // This constructor will allow this instance of AtExitManager to be created
+  // even if one already exists.  This should only be used for testing!
+  // AtExitManagers are kept on a global stack, and it will be removed during
+  // destruction.  This allows you to shadow another AtExitManager.
+  explicit AtExitManager(bool shadow);
+
+ private:
+  base::Lock lock_;
+  std::stack<base::Closure> stack_;
+  AtExitManager* next_manager_;  // Stack of managers to allow shadowing.
+
+  DISALLOW_COPY_AND_ASSIGN(AtExitManager);
+};
+
+#if defined(UNIT_TEST)
+class ShadowingAtExitManager : public AtExitManager {
+ public:
+  ShadowingAtExitManager() : AtExitManager(true) {}
+};
+#endif  // defined(UNIT_TEST)
+
+}  // namespace base
+
+#endif  // BASE_AT_EXIT_H_

+ 80 - 0
base/atomic_ref_count.h

@@ -0,0 +1,80 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a low level implementation of atomic semantics for reference
+// counting.  Please use base/memory/ref_counted.h directly instead.
+//
+// The implementation includes annotations to avoid some false positives
+// when using data race detection tools.
+
+#ifndef BASE_ATOMIC_REF_COUNT_H_
+#define BASE_ATOMIC_REF_COUNT_H_
+
+#include "base/atomicops.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+
+namespace base {
+
+typedef subtle::Atomic32 AtomicRefCount;
+
+// Increment a reference count by "increment", which must exceed 0.
+inline void AtomicRefCountIncN(volatile AtomicRefCount *ptr,
+                               AtomicRefCount increment) {
+  subtle::NoBarrier_AtomicIncrement(ptr, increment);
+}
+
+// Decrement a reference count by "decrement", which must exceed 0,
+// and return whether the result is non-zero.
+// Insert barriers to ensure that state written before the reference count
+// became zero will be visible to a thread that has just made the count zero.
+inline bool AtomicRefCountDecN(volatile AtomicRefCount *ptr,
+                               AtomicRefCount decrement) {
+  ANNOTATE_HAPPENS_BEFORE(ptr);
+  bool res = (subtle::Barrier_AtomicIncrement(ptr, -decrement) != 0);
+  if (!res) {
+    ANNOTATE_HAPPENS_AFTER(ptr);
+  }
+  return res;
+}
+
+// Increment a reference count by 1.
+inline void AtomicRefCountInc(volatile AtomicRefCount *ptr) {
+  base::AtomicRefCountIncN(ptr, 1);
+}
+
+// Decrement a reference count by 1 and return whether the result is non-zero.
+// Insert barriers to ensure that state written before the reference count
+// became zero will be visible to a thread that has just made the count zero.
+inline bool AtomicRefCountDec(volatile AtomicRefCount *ptr) {
+  return base::AtomicRefCountDecN(ptr, 1);
+}
+
+// Return whether the reference count is one.  If the reference count is used
+// in the conventional way, a refrerence count of 1 implies that the current
+// thread owns the reference and no other thread shares it.  This call performs
+// the test for a reference count of one, and performs the memory barrier
+// needed for the owning thread to act on the object, knowing that it has
+// exclusive access to the object.
+inline bool AtomicRefCountIsOne(volatile AtomicRefCount *ptr) {
+  bool res = (subtle::Acquire_Load(ptr) == 1);
+  if (res) {
+    ANNOTATE_HAPPENS_AFTER(ptr);
+  }
+  return res;
+}
+
+// Return whether the reference count is zero.  With conventional object
+// referencing counting, the object will be destroyed, so the reference count
+// should never be zero.  Hence this is generally used for a debug check.
+inline bool AtomicRefCountIsZero(volatile AtomicRefCount *ptr) {
+  bool res = (subtle::Acquire_Load(ptr) == 0);
+  if (res) {
+    ANNOTATE_HAPPENS_AFTER(ptr);
+  }
+  return res;
+}
+
+}  // namespace base
+
+#endif  // BASE_ATOMIC_REF_COUNT_H_

+ 60 - 0
base/atomic_sequence_num.h

@@ -0,0 +1,60 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ATOMIC_SEQUENCE_NUM_H_
+#define BASE_ATOMIC_SEQUENCE_NUM_H_
+
+#include "base/atomicops.h"
+#include "base/basictypes.h"
+
+namespace base {
+
+class AtomicSequenceNumber;
+
+// Static (POD) AtomicSequenceNumber that MUST be used in global scope (or
+// non-function scope) ONLY. This implementation does not generate any static
+// initializer.  Note that it does not implement any constructor which means
+// that its fields are not initialized except when it is stored in the global
+// data section (.data in ELF). If you want to allocate an atomic sequence
+// number on the stack (or heap), please use the AtomicSequenceNumber class
+// declared below.
+class StaticAtomicSequenceNumber {
+ public:
+  inline int GetNext() {
+    return static_cast<int>(
+        base::subtle::NoBarrier_AtomicIncrement(&seq_, 1) - 1);
+  }
+
+ private:
+  friend class AtomicSequenceNumber;
+
+  inline void Reset() {
+    base::subtle::Release_Store(&seq_, 0);
+  }
+
+  base::subtle::Atomic32 seq_;
+};
+
+// AtomicSequenceNumber that can be stored and used safely (i.e. its fields are
+// always initialized as opposed to StaticAtomicSequenceNumber declared above).
+// Please use StaticAtomicSequenceNumber if you want to declare an atomic
+// sequence number in the global scope.
+class AtomicSequenceNumber {
+ public:
+  AtomicSequenceNumber() {
+    seq_.Reset();
+  }
+
+  inline int GetNext() {
+    return seq_.GetNext();
+  }
+
+ private:
+  StaticAtomicSequenceNumber seq_;
+  DISALLOW_COPY_AND_ASSIGN(AtomicSequenceNumber);
+};
+
+}  // namespace base
+
+#endif  // BASE_ATOMIC_SEQUENCE_NUM_H_

+ 316 - 0
base/atomicops.h

@@ -0,0 +1,316 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// For atomic operations on reference counts, see atomic_refcount.h.
+// For atomic operations on sequence numbers, see atomic_sequence_num.h.
+
+// The routines exported by this module are subtle.  If you use them, even if
+// you get the code right, it will depend on careful reasoning about atomicity
+// and memory ordering; it will be less readable, and harder to maintain.  If
+// you plan to use these routines, you should have a good reason, such as solid
+// evidence that performance would otherwise suffer, or there being no
+// alternative.  You should assume only properties explicitly guaranteed by the
+// specifications in this file.  You are almost certainly _not_ writing code
+// just for the x86; if you assume x86 semantics, x86 hardware bugs and
+// implementations on other archtectures will cause your code to break.  If you
+// do not know what you are doing, avoid these routines, and use a Mutex.
+//
+// It is incorrect to make direct assignments to/from an atomic variable.
+// You should use one of the Load or Store routines.  The NoBarrier
+// versions are provided when no barriers are needed:
+//   NoBarrier_Store()
+//   NoBarrier_Load()
+// Although there are currently no compiler enforcement, you are encouraged
+// to use these.
+//
+
+#ifndef BASE_ATOMICOPS_H_
+#define BASE_ATOMICOPS_H_
+
+#include <stdint.h>
+
+#include "base/build_config.h"
+#include "base/macros.h"
+
+#if defined(OS_WIN) && defined(ARCH_CPU_64_BITS)
+// windows.h #defines this (only on x64). This causes problems because the
+// public API also uses MemoryBarrier at the public name for this fence. So, on
+// X64, undef it, and call its documented
+// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
+// implementation directly.
+#undef MemoryBarrier
+#endif
+
+namespace base {
+namespace subtle {
+
+typedef int32_t Atomic32;
+#ifdef ARCH_CPU_64_BITS
+// We need to be able to go between Atomic64 and AtomicWord implicitly.  This
+// means Atomic64 and AtomicWord should be the same type on 64-bit.
+#if defined(__ILP32__) || defined(OS_NACL)
+// NaCl's intptr_t is not actually 64-bits on 64-bit!
+// http://code.google.com/p/nativeclient/issues/detail?id=1162
+typedef int64_t Atomic64;
+#else
+typedef intptr_t Atomic64;
+#endif
+#endif
+
+// Use AtomicWord for a machine-sized pointer.  It will use the Atomic32 or
+// Atomic64 routines below, depending on your architecture.
+typedef intptr_t AtomicWord;
+
+// Atomically execute:
+//      result = *ptr;
+//      if (*ptr == old_value)
+//        *ptr = new_value;
+//      return result;
+//
+// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
+// Always return the old value of "*ptr"
+//
+// This routine implies no memory barriers.
+Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                  Atomic32 old_value,
+                                  Atomic32 new_value);
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr.  This routine implies no memory barriers.
+Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
+
+// Atomically increment *ptr by "increment".  Returns the new value of
+// *ptr with the increment applied.  This routine implies no memory barriers.
+Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
+
+Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                 Atomic32 increment);
+
+// These following lower-level operations are typically useful only to people
+// implementing higher-level synchronization operations like spinlocks,
+// mutexes, and condition-variables.  They combine CompareAndSwap(), a load, or
+// a store with appropriate memory-ordering instructions.  "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation.  "Barrier" operations have both "Acquire" and "Release"
+// semantics.   A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                Atomic32 old_value,
+                                Atomic32 new_value);
+Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                Atomic32 old_value,
+                                Atomic32 new_value);
+
+void MemoryBarrier();
+void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
+void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
+void Release_Store(volatile Atomic32* ptr, Atomic32 value);
+
+Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
+Atomic32 Acquire_Load(volatile const Atomic32* ptr);
+Atomic32 Release_Load(volatile const Atomic32* ptr);
+
+// 64-bit atomic operations (only available on 64-bit processors).
+#ifdef ARCH_CPU_64_BITS
+Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                  Atomic64 old_value,
+                                  Atomic64 new_value);
+Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
+Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+
+Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                Atomic64 old_value,
+                                Atomic64 new_value);
+Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                Atomic64 old_value,
+                                Atomic64 new_value);
+void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
+void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
+void Release_Store(volatile Atomic64* ptr, Atomic64 value);
+Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
+Atomic64 Acquire_Load(volatile const Atomic64* ptr);
+Atomic64 Release_Load(volatile const Atomic64* ptr);
+#endif  // ARCH_CPU_64_BITS
+
+}  // namespace subtle
+}  // namespace base
+
+// Include our platform specific implementation.
+#if defined(THREAD_SANITIZER)
+#include "base/atomicops_internals_tsan.h"
+#elif defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY)
+#include "base/atomicops_internals_x86_msvc.h"
+#elif defined(OS_MACOSX)
+#include "base/atomicops_internals_mac.h"
+#elif defined(OS_NACL)
+#include "base/atomicops_internals_gcc.h"
+#elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARMEL)
+#include "base/atomicops_internals_arm_gcc.h"
+#elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARM64)
+#include "base/atomicops_internals_arm64_gcc.h"
+#elif defined(COMPILER_GCC) && defined(ARCH_CPU_X86_FAMILY)
+#include "base/atomicops_internals_x86_gcc.h"
+#elif defined(COMPILER_GCC) && defined(ARCH_CPU_MIPS_FAMILY)
+#include "base/atomicops_internals_mips_gcc.h"
+#else
+#error "Atomic operations are not supported on your platform"
+#endif
+
+// On some platforms we need additional declarations to make
+// AtomicWord compatible with our other Atomic* types.
+#if defined(OS_MACOSX) || defined(OS_OPENBSD)
+#include "base/atomicops_internals_atomicword_compat.h"
+#endif
+
+// ========= Provide base::atomic<T> =========
+#if defined(BASE_CXX11_ENABLED)
+
+// gcc supports atomic thread fence since 4.8 checkout
+// https://gcc.gnu.org/gcc-4.7/cxx0x_status.html and
+// https://gcc.gnu.org/gcc-4.8/cxx0x_status.html for more details
+#if !defined(__GNUC__) || (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 >= 40800)
+#include <atomic>
+#else 
+
+#if __GNUC__ * 10000 + __GNUC_MINOR__ * 100 >= 40500
+// gcc 4.5 renames cstdatomic to atomic
+// (https://gcc.gnu.org/gcc-4.5/changes.html)
+#include <atomic>
+#else
+#include <cstdatomic>
+#endif
+
+namespace std {
+
+BASE_FORCE_INLINE void atomic_thread_fence(memory_order v) {
+    switch (v) {
+    case memory_order_relaxed:
+        break;
+    case memory_order_consume:
+    case memory_order_acquire:
+    case memory_order_release:
+    case memory_order_acq_rel:
+        __asm__ __volatile__("" : : : "memory");
+        break;
+    case memory_order_seq_cst:
+        __asm__ __volatile__("mfence" : : : "memory");
+        break;
+    }
+}
+
+BASE_FORCE_INLINE void atomic_signal_fence(memory_order v) {
+    if (v != memory_order_relaxed) {
+        __asm__ __volatile__("" : : : "memory");
+    }
+}
+
+}  // namespace std
+
+#endif  // __GNUC__
+
+namespace base {
+using ::std::memory_order;
+using ::std::memory_order_relaxed;
+using ::std::memory_order_consume;
+using ::std::memory_order_acquire;
+using ::std::memory_order_release;
+using ::std::memory_order_acq_rel;
+using ::std::memory_order_seq_cst;
+using ::std::atomic_thread_fence;
+using ::std::atomic_signal_fence;
+template <typename T> class atomic : public ::std::atomic<T> {
+public:
+    atomic() {}
+    atomic(T v) : ::std::atomic<T>(v) {}
+    atomic& operator=(T v) {
+        this->store(v);
+        return *this;
+    }
+private:
+    DISALLOW_COPY_AND_ASSIGN(atomic);
+    // Make sure memory layout of std::atomic<T> and boost::atomic<T>
+    // are same so that different compilation units seeing different 
+    // definitions(enable C++11 or not) should be compatible.
+    BAIDU_CASSERT(sizeof(T) == sizeof(::std::atomic<T>), size_must_match);
+};
+} // namespace base
+#else
+#include <boost/atomic.hpp>
+namespace base {
+using ::boost::memory_order;
+using ::boost::memory_order_relaxed;
+using ::boost::memory_order_consume;
+using ::boost::memory_order_acquire;
+using ::boost::memory_order_release;
+using ::boost::memory_order_acq_rel;
+using ::boost::memory_order_seq_cst;
+using ::boost::atomic_thread_fence;
+using ::boost::atomic_signal_fence;
+template <typename T> class atomic : public ::boost::atomic<T> {
+public:
+    atomic() {}
+    atomic(T v) : ::boost::atomic<T>(v) {}
+    atomic& operator=(T v) {
+        this->store(v);
+        return *this;
+    }
+private:
+    DISALLOW_COPY_AND_ASSIGN(atomic);
+    // Make sure memory layout of std::atomic<T> and boost::atomic<T>
+    // are same so that different compilation units seeing different 
+    // definitions(enable C++11 or not) should be compatible.
+    BAIDU_CASSERT(sizeof(T) == sizeof(::boost::atomic<T>), size_must_match);
+};
+} // namespace base
+#endif
+
+// static_atomic<> is a work-around for C++03 to declare global atomics
+// w/o constructing-order issues. It can also used in C++11 though.
+// Example:
+//   base::static_atomic<int> g_counter = BASE_STATIC_ATOMIC_INIT(0);
+// Notice that to make static_atomic work for C++03, it cannot be
+// initialized by a constructor. Following code is wrong:
+//   base::static_atomic<int> g_counter(0); // Not compile
+
+#define BASE_STATIC_ATOMIC_INIT(val) { (val) }
+
+namespace base {
+template <typename T> struct static_atomic {
+    T val;
+
+    // NOTE: the memory_order parameters must be present.
+    T load(memory_order o) { return ref().load(o); }
+    void store(T v, memory_order o) { return ref().store(v, o); }
+    T exchange(T v, memory_order o) { return ref().exchange(v, o); }
+    bool compare_exchange_weak(T& e, T d, memory_order o)
+    { return ref().compare_exchange_weak(e, d, o); }
+    bool compare_exchange_weak(T& e, T d, memory_order so, memory_order fo)
+    { return ref().compare_exchange_weak(e, d, so, fo); }
+    bool compare_exchange_strong(T& e, T d, memory_order o)
+    { return ref().compare_exchange_strong(e, d, o); }
+    bool compare_exchange_strong(T& e, T d, memory_order so, memory_order fo)
+    { return ref().compare_exchange_strong(e, d, so, fo); }
+    T fetch_add(T v, memory_order o) { return ref().fetch_add(v, o); }
+    T fetch_sub(T v, memory_order o) { return ref().fetch_sub(v, o); }
+    T fetch_and(T v, memory_order o) { return ref().fetch_and(v, o); }
+    T fetch_or(T v, memory_order o) { return ref().fetch_or(v, o); }
+    T fetch_xor(T v, memory_order o) { return ref().fetch_xor(v, o); }
+    static_atomic& operator=(T v) {
+        store(v, memory_order_seq_cst);
+        return *this;
+    }
+private:
+    DISALLOW_ASSIGN(static_atomic);
+    BAIDU_CASSERT(sizeof(T) == sizeof(atomic<T>), size_must_match);
+    atomic<T>& ref() {
+        // Suppress strict-alias warnings.
+        atomic<T>* p = reinterpret_cast<atomic<T>*>(&val);
+        return *p;
+    }
+};
+} // namespace base
+
+#endif  // BASE_ATOMICOPS_H_

+ 307 - 0
base/atomicops_internals_arm64_gcc.h

@@ -0,0 +1,307 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+
+// TODO(rmcilroy): Investigate whether we can use __sync__ intrinsics instead of
+//                 the hand coded assembly without introducing perf regressions.
+// TODO(rmcilroy): Investigate whether we can use acquire / release versions of
+//                 exclusive load / store assembly instructions and do away with
+//                 the barriers.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_
+#define BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_
+
+#if defined(OS_QNX)
+#include <sys/cpuinline.h>
+#endif
+
+namespace base {
+namespace subtle {
+
+inline void MemoryBarrier() {
+  __asm__ __volatile__ ("dmb ish" ::: "memory");  // NOLINT
+}
+
+// NoBarrier versions of the operation include "memory" in the clobber list.
+// This is not required for direct usage of the NoBarrier versions of the
+// operations. However this is required for correctness when they are used as
+// part of the Acquire or Release versions, to ensure that nothing from outside
+// the call is reordered between the operation and the memory barrier. This does
+// not change the code generated, so has no or minimal impact on the
+// NoBarrier operations.
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                    \n\t"
+    "ldxr %w[prev], %[ptr]                 \n\t"  // Load the previous value.
+    "cmp %w[prev], %w[old_value]           \n\t"
+    "bne 1f                                \n\t"
+    "stxr %w[temp], %w[new_value], %[ptr]  \n\t"  // Try to store the new value.
+    "cbnz %w[temp], 0b                     \n\t"  // Retry if it did not work.
+    "1:                                    \n\t"
+    : [prev]"=&r" (prev),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [old_value]"IJr" (old_value),
+      [new_value]"r" (new_value)
+    : "cc", "memory"
+  );  // NOLINT
+
+  return prev;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 result;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                    \n\t"
+    "ldxr %w[result], %[ptr]               \n\t"  // Load the previous value.
+    "stxr %w[temp], %w[new_value], %[ptr]  \n\t"  // Try to store the new value.
+    "cbnz %w[temp], 0b                     \n\t"  // Retry if it did not work.
+    : [result]"=&r" (result),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [new_value]"r" (new_value)
+    : "memory"
+  );  // NOLINT
+
+  return result;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  Atomic32 result;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                       \n\t"
+    "ldxr %w[result], %[ptr]                  \n\t"  // Load the previous value.
+    "add %w[result], %w[result], %w[increment]\n\t"
+    "stxr %w[temp], %w[result], %[ptr]        \n\t"  // Try to store the result.
+    "cbnz %w[temp], 0b                        \n\t"  // Retry on failure.
+    : [result]"=&r" (result),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [increment]"IJr" (increment)
+    : "memory"
+  );  // NOLINT
+
+  return result;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  MemoryBarrier();
+  Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
+  MemoryBarrier();
+
+  return result;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  MemoryBarrier();
+
+  return prev;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  MemoryBarrier();
+  Atomic32 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+
+  return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  __asm__ __volatile__ (  // NOLINT
+    "stlr %w[value], %[ptr]  \n\t"
+    : [ptr]"=Q" (*ptr)
+    : [value]"r" (value)
+    : "memory"
+  );  // NOLINT
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value;
+
+  __asm__ __volatile__ (  // NOLINT
+    "ldar %w[value], %[ptr]  \n\t"
+    : [value]"=r" (value)
+    : [ptr]"Q" (*ptr)
+    : "memory"
+  );  // NOLINT
+
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+// 64-bit versions of the operations.
+// See the 32-bit versions for comments.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  Atomic64 prev;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                    \n\t"
+    "ldxr %[prev], %[ptr]                  \n\t"
+    "cmp %[prev], %[old_value]             \n\t"
+    "bne 1f                                \n\t"
+    "stxr %w[temp], %[new_value], %[ptr]   \n\t"
+    "cbnz %w[temp], 0b                     \n\t"
+    "1:                                    \n\t"
+    : [prev]"=&r" (prev),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [old_value]"IJr" (old_value),
+      [new_value]"r" (new_value)
+    : "cc", "memory"
+  );  // NOLINT
+
+  return prev;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  Atomic64 result;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                    \n\t"
+    "ldxr %[result], %[ptr]                \n\t"
+    "stxr %w[temp], %[new_value], %[ptr]   \n\t"
+    "cbnz %w[temp], 0b                     \n\t"
+    : [result]"=&r" (result),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [new_value]"r" (new_value)
+    : "memory"
+  );  // NOLINT
+
+  return result;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  Atomic64 result;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                     \n\t"
+    "ldxr %[result], %[ptr]                 \n\t"
+    "add %[result], %[result], %[increment] \n\t"
+    "stxr %w[temp], %[result], %[ptr]       \n\t"
+    "cbnz %w[temp], 0b                      \n\t"
+    : [result]"=&r" (result),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [increment]"IJr" (increment)
+    : "memory"
+  );  // NOLINT
+
+  return result;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  MemoryBarrier();
+  Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment);
+  MemoryBarrier();
+
+  return result;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  MemoryBarrier();
+
+  return prev;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  MemoryBarrier();
+  Atomic64 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+
+  return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  __asm__ __volatile__ (  // NOLINT
+    "stlr %x[value], %[ptr]  \n\t"
+    : [ptr]"=Q" (*ptr)
+    : [value]"r" (value)
+    : "memory"
+  );  // NOLINT
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 value;
+
+  __asm__ __volatile__ (  // NOLINT
+    "ldar %x[value], %[ptr]  \n\t"
+    : [value]"=r" (value)
+    : [ptr]"Q" (*ptr)
+    : "memory"
+  );  // NOLINT
+
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+}  // namespace base::subtle
+}  // namespace base
+
+#endif  // BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_

+ 294 - 0
base/atomicops_internals_arm_gcc.h

@@ -0,0 +1,294 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+//
+// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
+#define BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
+
+#if defined(OS_QNX)
+#include <sys/cpuinline.h>
+#endif
+
+namespace base {
+namespace subtle {
+
+// Memory barriers on ARM are funky, but the kernel is here to help:
+//
+// * ARMv5 didn't support SMP, there is no memory barrier instruction at
+//   all on this architecture, or when targeting its machine code.
+//
+// * Some ARMv6 CPUs support SMP. A full memory barrier can be produced by
+//   writing a random value to a very specific coprocessor register.
+//
+// * On ARMv7, the "dmb" instruction is used to perform a full memory
+//   barrier (though writing to the co-processor will still work).
+//   However, on single core devices (e.g. Nexus One, or Nexus S),
+//   this instruction will take up to 200 ns, which is huge, even though
+//   it's completely un-needed on these devices.
+//
+// * There is no easy way to determine at runtime if the device is
+//   single or multi-core. However, the kernel provides a useful helper
+//   function at a fixed memory address (0xffff0fa0), which will always
+//   perform a memory barrier in the most efficient way. I.e. on single
+//   core devices, this is an empty function that exits immediately.
+//   On multi-core devices, it implements a full memory barrier.
+//
+// * This source could be compiled to ARMv5 machine code that runs on a
+//   multi-core ARMv6 or ARMv7 device. In this case, memory barriers
+//   are needed for correct execution. Always call the kernel helper, even
+//   when targeting ARMv5TE.
+//
+
+inline void MemoryBarrier() {
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+  // Note: This is a function call, which is also an implicit compiler barrier.
+  typedef void (*KernelMemoryBarrierFunc)();
+  ((KernelMemoryBarrierFunc)0xffff0fa0)();
+#elif defined(OS_QNX)
+  __cpu_membarrier();
+#else
+#error MemoryBarrier() is not implemented on this platform.
+#endif
+}
+
+// An ARM toolchain would only define one of these depending on which
+// variant of the target architecture is being used. This tests against
+// any known ARMv6 or ARMv7 variant, where it is possible to directly
+// use ldrex/strex instructions to implement fast atomic operations.
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
+    defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \
+    defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
+    defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
+    defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__)
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev_value;
+  int reloop;
+  do {
+    // The following is equivalent to:
+    //
+    //   prev_value = LDREX(ptr)
+    //   reloop = 0
+    //   if (prev_value != old_value)
+    //      reloop = STREX(ptr, new_value)
+    __asm__ __volatile__("    ldrex %0, [%3]\n"
+                         "    mov %1, #0\n"
+                         "    cmp %0, %4\n"
+#ifdef __thumb2__
+                         "    it eq\n"
+#endif
+                         "    strexeq %1, %5, [%3]\n"
+                         : "=&r"(prev_value), "=&r"(reloop), "+m"(*ptr)
+                         : "r"(ptr), "r"(old_value), "r"(new_value)
+                         : "cc", "memory");
+  } while (reloop != 0);
+  return prev_value;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 result = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  MemoryBarrier();
+  return result;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  MemoryBarrier();
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  Atomic32 value;
+  int reloop;
+  do {
+    // Equivalent to:
+    //
+    //  value = LDREX(ptr)
+    //  value += increment
+    //  reloop = STREX(ptr, value)
+    //
+    __asm__ __volatile__("    ldrex %0, [%3]\n"
+                         "    add %0, %0, %4\n"
+                         "    strex %1, %0, [%3]\n"
+                         : "=&r"(value), "=&r"(reloop), "+m"(*ptr)
+                         : "r"(ptr), "r"(increment)
+                         : "cc", "memory");
+  } while (reloop);
+  return value;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  // TODO(digit): Investigate if it's possible to implement this with
+  // a single MemoryBarrier() operation between the LDREX and STREX.
+  // See http://crbug.com/246514
+  MemoryBarrier();
+  Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
+  MemoryBarrier();
+  return result;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 old_value;
+  int reloop;
+  do {
+    // old_value = LDREX(ptr)
+    // reloop = STREX(ptr, new_value)
+    __asm__ __volatile__("   ldrex %0, [%3]\n"
+                         "   strex %1, %4, [%3]\n"
+                         : "=&r"(old_value), "=&r"(reloop), "+m"(*ptr)
+                         : "r"(ptr), "r"(new_value)
+                         : "cc", "memory");
+  } while (reloop != 0);
+  return old_value;
+}
+
+// This tests against any known ARMv5 variant.
+#elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) || \
+      defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__)
+
+// The kernel also provides a helper function to perform an atomic
+// compare-and-swap operation at the hard-wired address 0xffff0fc0.
+// On ARMv5, this is implemented by a special code path that the kernel
+// detects and treats specially when thread pre-emption happens.
+// On ARMv6 and higher, it uses LDREX/STREX instructions instead.
+//
+// Note that this always perform a full memory barrier, there is no
+// need to add calls MemoryBarrier() before or after it. It also
+// returns 0 on success, and 1 on exit.
+//
+// Available and reliable since Linux 2.6.24. Both Android and ChromeOS
+// use newer kernel revisions, so this should not be a concern.
+namespace {
+
+inline int LinuxKernelCmpxchg(Atomic32 old_value,
+                              Atomic32 new_value,
+                              volatile Atomic32* ptr) {
+  typedef int (*KernelCmpxchgFunc)(Atomic32, Atomic32, volatile Atomic32*);
+  return ((KernelCmpxchgFunc)0xffff0fc0)(old_value, new_value, ptr);
+}
+
+}  // namespace
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev_value;
+  for (;;) {
+    prev_value = *ptr;
+    if (prev_value != old_value)
+      return prev_value;
+    if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
+      return old_value;
+  }
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 old_value;
+  do {
+    old_value = *ptr;
+  } while (LinuxKernelCmpxchg(old_value, new_value, ptr));
+  return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  for (;;) {
+    // Atomic exchange the old value with an incremented one.
+    Atomic32 old_value = *ptr;
+    Atomic32 new_value = old_value + increment;
+    if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) {
+      // The exchange took place as expected.
+      return new_value;
+    }
+    // Otherwise, *ptr changed mid-loop and we need to retry.
+  }
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 prev_value;
+  for (;;) {
+    prev_value = *ptr;
+    if (prev_value != old_value) {
+      // Always ensure acquire semantics.
+      MemoryBarrier();
+      return prev_value;
+    }
+    if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
+      return old_value;
+  }
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  // This could be implemented as:
+  //    MemoryBarrier();
+  //    return NoBarrier_CompareAndSwap();
+  //
+  // But would use 3 barriers per succesful CAS. To save performance,
+  // use Acquire_CompareAndSwap(). Its implementation guarantees that:
+  // - A succesful swap uses only 2 barriers (in the kernel helper).
+  // - An early return due to (prev_value != old_value) performs
+  //   a memory barrier with no store, which is equivalent to the
+  //   generic implementation above.
+  return Acquire_CompareAndSwap(ptr, old_value, new_value);
+}
+
+#else
+#  error "Your CPU's ARM architecture is not supported yet"
+#endif
+
+// NOTE: Atomicity of the following load and store operations is only
+// guaranteed in case of 32-bit alignement of |ptr| values.
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+}  // namespace base::subtle
+}  // namespace base
+
+#endif  // BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_

+ 100 - 0
base/atomicops_internals_atomicword_compat.h

@@ -0,0 +1,100 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
+#define BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
+
+// AtomicWord is a synonym for intptr_t, and Atomic32 is a synonym for int32_t,
+// which in turn means int. On some LP32 platforms, intptr_t is an int, but
+// on others, it's a long. When AtomicWord and Atomic32 are based on different
+// fundamental types, their pointers are incompatible.
+//
+// This file defines function overloads to allow both AtomicWord and Atomic32
+// data to be used with this interface.
+//
+// On LP64 platforms, AtomicWord and Atomic64 are both always long,
+// so this problem doesn't occur.
+
+#if !defined(ARCH_CPU_64_BITS)
+
+namespace base {
+namespace subtle {
+
+inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
+                                           AtomicWord old_value,
+                                           AtomicWord new_value) {
+  return NoBarrier_CompareAndSwap(
+      reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
+                                           AtomicWord new_value) {
+  return NoBarrier_AtomicExchange(
+      reinterpret_cast<volatile Atomic32*>(ptr), new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
+                                            AtomicWord increment) {
+  return NoBarrier_AtomicIncrement(
+      reinterpret_cast<volatile Atomic32*>(ptr), increment);
+}
+
+inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
+                                          AtomicWord increment) {
+  return Barrier_AtomicIncrement(
+      reinterpret_cast<volatile Atomic32*>(ptr), increment);
+}
+
+inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
+                                         AtomicWord old_value,
+                                         AtomicWord new_value) {
+  return base::subtle::Acquire_CompareAndSwap(
+      reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
+                                         AtomicWord old_value,
+                                         AtomicWord new_value) {
+  return base::subtle::Release_CompareAndSwap(
+      reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
+  NoBarrier_Store(
+      reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
+  return base::subtle::Acquire_Store(
+      reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
+  return base::subtle::Release_Store(
+      reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
+  return NoBarrier_Load(
+      reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
+  return base::subtle::Acquire_Load(
+      reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
+  return base::subtle::Release_Load(
+      reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+}   // namespace base::subtle
+}   // namespace base
+
+#endif  // !defined(ARCH_CPU_64_BITS)
+
+#endif  // BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_

+ 106 - 0
base/atomicops_internals_gcc.h

@@ -0,0 +1,106 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, include base/atomicops.h
+// instead. This file is for platforms that use GCC intrinsics rather than
+// platform-specific assembly code for atomic operations.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_GCC_H_
+#define BASE_ATOMICOPS_INTERNALS_GCC_H_
+
+namespace base {
+namespace subtle {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev_value;
+  do {
+    if (__sync_bool_compare_and_swap(ptr, old_value, new_value))
+      return old_value;
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 old_value;
+  do {
+    old_value = *ptr;
+  } while (!__sync_bool_compare_and_swap(ptr, old_value, new_value));
+  return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  for (;;) {
+    // Atomic exchange the old value with an incremented one.
+    Atomic32 old_value = *ptr;
+    Atomic32 new_value = old_value + increment;
+    if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) {
+      // The exchange took place as expected.
+      return new_value;
+    }
+    // Otherwise, *ptr changed mid-loop and we need to retry.
+  }
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  // Since NoBarrier_CompareAndSwap uses __sync_bool_compare_and_swap, which
+  // is a full memory barrier, none is needed here or below in Release.
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void MemoryBarrier() {
+  __sync_synchronize();
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+}  // namespace base::subtle
+}  // namespace base
+
+#endif  // BASE_ATOMICOPS_INTERNALS_GCC_H_
+

+ 197 - 0
base/atomicops_internals_mac.h

@@ -0,0 +1,197 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_MAC_H_
+#define BASE_ATOMICOPS_INTERNALS_MAC_H_
+
+#include <libkern/OSAtomic.h>
+
+namespace base {
+namespace subtle {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev_value;
+  do {
+    if (OSAtomicCompareAndSwap32(old_value, new_value,
+                                 const_cast<Atomic32*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 old_value;
+  do {
+    old_value = *ptr;
+  } while (!OSAtomicCompareAndSwap32(old_value, new_value,
+                                     const_cast<Atomic32*>(ptr)));
+  return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
+}
+
+inline void MemoryBarrier() {
+  OSMemoryBarrier();
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 prev_value;
+  do {
+    if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
+                                        const_cast<Atomic32*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return Acquire_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#ifdef __LP64__
+
+// 64-bit implementation on 64-bit platform
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  Atomic64 prev_value;
+  do {
+    if (OSAtomicCompareAndSwap64(old_value, new_value,
+                                 reinterpret_cast<volatile int64_t*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  Atomic64 old_value;
+  do {
+    old_value = *ptr;
+  } while (!OSAtomicCompareAndSwap64(old_value, new_value,
+                                     reinterpret_cast<volatile int64_t*>(ptr)));
+  return old_value;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr));
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  return OSAtomicAdd64Barrier(increment,
+                              reinterpret_cast<volatile int64_t*>(ptr));
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 prev_value;
+  do {
+    if (OSAtomicCompareAndSwap64Barrier(
+        old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  // The lib kern interface does not distinguish between
+  // Acquire and Release memory barriers; they are equivalent.
+  return Acquire_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#endif  // defined(__LP64__)
+
+}   // namespace base::subtle
+}   // namespace base
+
+#endif  // BASE_ATOMICOPS_INTERNALS_MAC_H_

+ 154 - 0
base/atomicops_internals_mips_gcc.h

@@ -0,0 +1,154 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+//
+// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+#define BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+
+namespace base {
+namespace subtle {
+
+// Atomically execute:
+//      result = *ptr;
+//      if (*ptr == old_value)
+//        *ptr = new_value;
+//      return result;
+//
+// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
+// Always return the old value of "*ptr"
+//
+// This routine implies no memory barriers.
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev, tmp;
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "ll %0, %5\n"  // prev = *ptr
+                       "bne %0, %3, 2f\n"  // if (prev != old_value) goto 2
+                       "move %2, %4\n"  // tmp = new_value
+                       "sc %2, %1\n"  // *ptr = tmp (with atomic check)
+                       "beqz %2, 1b\n"  // start again on atomic error
+                       "nop\n"  // delay slot nop
+                       "2:\n"
+                       ".set pop\n"
+                       : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
+                       : "Ir" (old_value), "r" (new_value), "m" (*ptr)
+                       : "memory");
+  return prev;
+}
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr.  This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 temp, old;
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "ll %1, %2\n"  // old = *ptr
+                       "move %0, %3\n"  // temp = new_value
+                       "sc %0, %2\n"  // *ptr = temp (with atomic check)
+                       "beqz %0, 1b\n"  // start again on atomic error
+                       "nop\n"  // delay slot nop
+                       ".set pop\n"
+                       : "=&r" (temp), "=&r" (old), "=m" (*ptr)
+                       : "r" (new_value), "m" (*ptr)
+                       : "memory");
+
+  return old;
+}
+
+// Atomically increment *ptr by "increment".  Returns the new value of
+// *ptr with the increment applied.  This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  Atomic32 temp, temp2;
+
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "ll %0, %2\n"  // temp = *ptr
+                       "addu %1, %0, %3\n"  // temp2 = temp + increment
+                       "sc %1, %2\n"  // *ptr = temp2 (with atomic check)
+                       "beqz %1, 1b\n"  // start again on atomic error
+                       "addu %1, %0, %3\n"  // temp2 = temp + increment
+                       ".set pop\n"
+                       : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
+                       : "Ir" (increment), "m" (*ptr)
+                       : "memory");
+  // temp2 now holds the final value.
+  return temp2;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  MemoryBarrier();
+  Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
+  MemoryBarrier();
+  return res;
+}
+
+// "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation.  "Barrier" operations have both "Acquire" and "Release"
+// semantics.   A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  MemoryBarrier();
+  return res;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  MemoryBarrier();
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void MemoryBarrier() {
+  __asm__ __volatile__("sync" : : : "memory");
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+} // namespace base::subtle
+} // namespace base
+
+#endif  // BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_

+ 186 - 0
base/atomicops_internals_tsan.h

@@ -0,0 +1,186 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation for compiler-based
+// ThreadSanitizer. Use base/atomicops.h instead.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_TSAN_H_
+#define BASE_ATOMICOPS_INTERNALS_TSAN_H_
+
+#include <sanitizer/tsan_interface_atomic.h>
+
+namespace base {
+namespace subtle {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 cmp = old_value;
+  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
+  return cmp;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  return __tsan_atomic32_exchange(ptr, new_value,
+      __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
+                                       Atomic32 new_value) {
+  return __tsan_atomic32_exchange(ptr, new_value,
+      __tsan_memory_order_acquire);
+}
+
+inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
+                                       Atomic32 new_value) {
+  return __tsan_atomic32_exchange(ptr, new_value,
+      __tsan_memory_order_release);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return increment + __tsan_atomic32_fetch_add(ptr, increment,
+      __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  return increment + __tsan_atomic32_fetch_add(ptr, increment,
+      __tsan_memory_order_acq_rel);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 cmp = old_value;
+  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_acquire, __tsan_memory_order_acquire);
+  return cmp;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 cmp = old_value;
+  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_release, __tsan_memory_order_relaxed);
+  return cmp;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
+  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+  return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  Atomic64 cmp = old_value;
+  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
+  return cmp;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
+                                       Atomic64 new_value) {
+  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
+}
+
+inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
+                                       Atomic64 new_value) {
+  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  return increment + __tsan_atomic64_fetch_add(ptr, increment,
+      __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  return increment + __tsan_atomic64_fetch_add(ptr, increment,
+      __tsan_memory_order_acq_rel);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
+  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+  return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 cmp = old_value;
+  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_acquire, __tsan_memory_order_acquire);
+  return cmp;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 cmp = old_value;
+  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_release, __tsan_memory_order_relaxed);
+  return cmp;
+}
+
+inline void MemoryBarrier() {
+  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+}  // namespace base::subtle
+}  // namespace base
+
+#endif  // BASE_ATOMICOPS_INTERNALS_TSAN_H_

+ 100 - 0
base/atomicops_internals_x86_gcc.cc

@@ -0,0 +1,100 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This module gets enough CPU information to optimize the
+// atomicops module on x86.
+
+#include <stdint.h>
+#include <string.h>
+
+#include "base/atomicops.h"
+
+// This file only makes sense with atomicops_internals_x86_gcc.h -- it
+// depends on structs that are defined in that file.  If atomicops.h
+// doesn't sub-include that file, then we aren't needed, and shouldn't
+// try to do anything.
+#ifdef BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
+
+// Inline cpuid instruction.  In PIC compilations, %ebx contains the address
+// of the global offset table.  To avoid breaking such executables, this code
+// must preserve that register's value across cpuid instructions.
+#if defined(__i386__)
+#define cpuid(a, b, c, d, inp) \
+  asm("mov %%ebx, %%edi\n"     \
+      "cpuid\n"                \
+      "xchg %%edi, %%ebx\n"    \
+      : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
+#elif defined(__x86_64__)
+#define cpuid(a, b, c, d, inp) \
+  asm("mov %%rbx, %%rdi\n"     \
+      "cpuid\n"                \
+      "xchg %%rdi, %%rbx\n"    \
+      : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
+#endif
+
+#if defined(cpuid)        // initialize the struct only on x86
+
+// Set the flags so that code will run correctly and conservatively, so even
+// if we haven't been initialized yet, we're probably single threaded, and our
+// default values should hopefully be pretty safe.
+struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
+  false,          // bug can't exist before process spawns multiple threads
+};
+
+namespace {
+
+// Initialize the AtomicOps_Internalx86CPUFeatures struct.
+void AtomicOps_Internalx86CPUFeaturesInit() {
+  uint32_t eax;
+  uint32_t ebx;
+  uint32_t ecx;
+  uint32_t edx;
+
+  // Get vendor string (issue CPUID with eax = 0)
+  cpuid(eax, ebx, ecx, edx, 0);
+  char vendor[13];
+  memcpy(vendor, &ebx, 4);
+  memcpy(vendor + 4, &edx, 4);
+  memcpy(vendor + 8, &ecx, 4);
+  vendor[12] = 0;
+
+  // get feature flags in ecx/edx, and family/model in eax
+  cpuid(eax, ebx, ecx, edx, 1);
+
+  int family = (eax >> 8) & 0xf;        // family and model fields
+  int model = (eax >> 4) & 0xf;
+  if (family == 0xf) {                  // use extended family and model fields
+    family += (eax >> 20) & 0xff;
+    model += ((eax >> 16) & 0xf) << 4;
+  }
+
+  // Opteron Rev E has a bug in which on very rare occasions a locked
+  // instruction doesn't act as a read-acquire barrier if followed by a
+  // non-locked read-modify-write instruction.  Rev F has this bug in
+  // pre-release versions, but not in versions released to customers,
+  // so we test only for Rev E, which is family 15, model 32..63 inclusive.
+  if (strcmp(vendor, "AuthenticAMD") == 0 &&       // AMD
+      family == 15 &&
+      32 <= model && model <= 63) {
+    AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true;
+  } else {
+    AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false;
+  }
+}
+
+class AtomicOpsx86Initializer {
+ public:
+  AtomicOpsx86Initializer() {
+    AtomicOps_Internalx86CPUFeaturesInit();
+  }
+};
+
+// A global to get use initialized on startup via static initialization :/
+AtomicOpsx86Initializer g_initer;
+
+}  // namespace
+
+#endif  // if x86
+
+#endif  // ifdef BASE_ATOMICOPS_INTERNALS_X86_GCC_H_

+ 242 - 0
base/atomicops_internals_x86_gcc.h

@@ -0,0 +1,242 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
+#define BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
+
+#include "base/base_export.h"
+
+// This struct is not part of the public API of this module; clients may not
+// use it.  (However, it's exported via BASE_EXPORT because clients implicitly
+// do use it at link time by inlining these functions.)
+// Features of this x86.  Values may not be correct before main() is run,
+// but are set conservatively.
+struct AtomicOps_x86CPUFeatureStruct {
+  bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
+                            // after acquire compare-and-swap.
+};
+BASE_EXPORT extern struct AtomicOps_x86CPUFeatureStruct
+    AtomicOps_Internalx86CPUFeatures;
+
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
+
+namespace base {
+namespace subtle {
+
+// 32-bit low-level operations on any platform.
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev;
+  __asm__ __volatile__("lock; cmpxchgl %1,%2"
+                       : "=a" (prev)
+                       : "q" (new_value), "m" (*ptr), "0" (old_value)
+                       : "memory");
+  return prev;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  __asm__ __volatile__("xchgl %1,%0"  // The lock prefix is implicit for xchg.
+                       : "=r" (new_value)
+                       : "m" (*ptr), "0" (new_value)
+                       : "memory");
+  return new_value;  // Now it's the previous value.
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  Atomic32 temp = increment;
+  __asm__ __volatile__("lock; xaddl %0,%1"
+                       : "+r" (temp), "+m" (*ptr)
+                       : : "memory");
+  // temp now holds the old value of *ptr
+  return temp + increment;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  Atomic32 temp = increment;
+  __asm__ __volatile__("lock; xaddl %0,%1"
+                       : "+r" (temp), "+m" (*ptr)
+                       : : "memory");
+  // temp now holds the old value of *ptr
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
+  return temp + increment;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
+  return x;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void MemoryBarrier() {
+  __asm__ __volatile__("mfence" : : : "memory");
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  ATOMICOPS_COMPILER_BARRIER();
+  *ptr = value; // An x86 store acts as a release barrier.
+  // See comments in Atomic64 version of Release_Store(), below.
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
+  // See comments in Atomic64 version of Release_Store(), below.
+  ATOMICOPS_COMPILER_BARRIER();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#if defined(__x86_64__)
+
+// 64-bit low-level operations on 64-bit platform.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  Atomic64 prev;
+  __asm__ __volatile__("lock; cmpxchgq %1,%2"
+                       : "=a" (prev)
+                       : "q" (new_value), "m" (*ptr), "0" (old_value)
+                       : "memory");
+  return prev;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  __asm__ __volatile__("xchgq %1,%0"  // The lock prefix is implicit for xchg.
+                       : "=r" (new_value)
+                       : "m" (*ptr), "0" (new_value)
+                       : "memory");
+  return new_value;  // Now it's the previous value.
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  Atomic64 temp = increment;
+  __asm__ __volatile__("lock; xaddq %0,%1"
+                       : "+r" (temp), "+m" (*ptr)
+                       : : "memory");
+  // temp now contains the previous value of *ptr
+  return temp + increment;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  Atomic64 temp = increment;
+  __asm__ __volatile__("lock; xaddq %0,%1"
+                       : "+r" (temp), "+m" (*ptr)
+                       : : "memory");
+  // temp now contains the previous value of *ptr
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
+  return temp + increment;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  ATOMICOPS_COMPILER_BARRIER();
+
+  *ptr = value; // An x86 store acts as a release barrier
+                // for current AMD/Intel chips as of Jan 2008.
+                // See also Acquire_Load(), below.
+
+  // When new chips come out, check:
+  //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
+  //  System Programming Guide, Chatper 7: Multiple-processor management,
+  //  Section 7.2, Memory Ordering.
+  // Last seen at:
+  //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
+  //
+  // x86 stores/loads fail to act as barriers for a few instructions (clflush
+  // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
+  // not generated by the compiler, and are rare.  Users of these instructions
+  // need to know about cache behaviour in any case since all of these involve
+  // either flushing cache lines or non-temporal cache hints.
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
+                         // for current AMD/Intel chips as of Jan 2008.
+                         // See also Release_Store(), above.
+  ATOMICOPS_COMPILER_BARRIER();
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
+  return x;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+#endif  // defined(__x86_64__)
+
+} // namespace base::subtle
+} // namespace base
+
+#undef ATOMICOPS_COMPILER_BARRIER
+
+#endif  // BASE_ATOMICOPS_INTERNALS_X86_GCC_H_

+ 198 - 0
base/atomicops_internals_x86_msvc.h

@@ -0,0 +1,198 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
+#define BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
+
+#include <windows.h>
+
+#include <intrin.h>
+
+#include "base/macros.h"
+
+#if defined(ARCH_CPU_64_BITS)
+// windows.h #defines this (only on x64). This causes problems because the
+// public API also uses MemoryBarrier at the public name for this fence. So, on
+// X64, undef it, and call its documented
+// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
+// implementation directly.
+#undef MemoryBarrier
+#endif
+
+namespace base {
+namespace subtle {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  LONG result = _InterlockedCompareExchange(
+      reinterpret_cast<volatile LONG*>(ptr),
+      static_cast<LONG>(new_value),
+      static_cast<LONG>(old_value));
+  return static_cast<Atomic32>(result);
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  LONG result = _InterlockedExchange(
+      reinterpret_cast<volatile LONG*>(ptr),
+      static_cast<LONG>(new_value));
+  return static_cast<Atomic32>(result);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  return _InterlockedExchangeAdd(
+      reinterpret_cast<volatile LONG*>(ptr),
+      static_cast<LONG>(increment)) + increment;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return Barrier_AtomicIncrement(ptr, increment);
+}
+
+#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
+#error "We require at least vs2005 for MemoryBarrier"
+#endif
+inline void MemoryBarrier() {
+#if defined(ARCH_CPU_64_BITS)
+  // See #undef and note at the top of this file.
+  __faststorefence();
+#else
+  // We use MemoryBarrier from WinNT.h
+  ::MemoryBarrier();
+#endif
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  NoBarrier_AtomicExchange(ptr, value);
+              // acts as a barrier in this implementation
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value; // works w/o barrier for current Intel chips as of June 2005
+  // See comments in Atomic64 version of Release_Store() below.
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#if defined(_WIN64)
+
+// 64-bit low-level operations on 64-bit platform.
+
+COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic);
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  PVOID result = InterlockedCompareExchangePointer(
+    reinterpret_cast<volatile PVOID*>(ptr),
+    reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
+  return reinterpret_cast<Atomic64>(result);
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  PVOID result = InterlockedExchangePointer(
+    reinterpret_cast<volatile PVOID*>(ptr),
+    reinterpret_cast<PVOID>(new_value));
+  return reinterpret_cast<Atomic64>(result);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  return InterlockedExchangeAdd64(
+      reinterpret_cast<volatile LONGLONG*>(ptr),
+      static_cast<LONGLONG>(increment)) + increment;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  NoBarrier_AtomicExchange(ptr, value);
+              // acts as a barrier in this implementation
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value; // works w/o barrier for current Intel chips as of June 2005
+
+  // When new chips come out, check:
+  //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
+  //  System Programming Guide, Chatper 7: Multiple-processor management,
+  //  Section 7.2, Memory Ordering.
+  // Last seen at:
+  //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 value = *ptr;
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+
+#endif  // defined(_WIN64)
+
+}  // namespace base::subtle
+}  // namespace base
+
+#endif  // BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_

+ 41 - 0
base/auto_reset.h

@@ -0,0 +1,41 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_AUTO_RESET_H_
+#define BASE_AUTO_RESET_H_
+
+#include "base/basictypes.h"
+
+// base::AutoReset<> is useful for setting a variable to a new value only within
+// a particular scope. An base::AutoReset<> object resets a variable to its
+// original value upon destruction, making it an alternative to writing
+// "var = false;" or "var = old_val;" at all of a block's exit points.
+//
+// This should be obvious, but note that an base::AutoReset<> instance should
+// have a shorter lifetime than its scoped_variable, to prevent invalid memory
+// writes when the base::AutoReset<> object is destroyed.
+
+namespace base {
+
+template<typename T>
+class AutoReset {
+ public:
+  AutoReset(T* scoped_variable, T new_value)
+      : scoped_variable_(scoped_variable),
+        original_value_(*scoped_variable) {
+    *scoped_variable_ = new_value;
+  }
+
+  ~AutoReset() { *scoped_variable_ = original_value_; }
+
+ private:
+  T* scoped_variable_;
+  T original_value_;
+
+  DISALLOW_COPY_AND_ASSIGN(AutoReset);
+};
+
+}
+
+#endif  // BASE_AUTO_RESET_H_

+ 52 - 0
base/barrier_closure.cc

@@ -0,0 +1,52 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/barrier_closure.h"
+
+#include "base/atomic_ref_count.h"
+#include "base/bind.h"
+
+namespace {
+
+// Maintains state for a BarrierClosure.
+class BarrierInfo {
+ public:
+  BarrierInfo(int num_callbacks_left, const base::Closure& done_closure);
+  void Run();
+
+ private:
+  base::AtomicRefCount num_callbacks_left_;
+  base::Closure done_closure_;
+};
+
+BarrierInfo::BarrierInfo(int num_callbacks, const base::Closure& done_closure)
+    : num_callbacks_left_(num_callbacks),
+      done_closure_(done_closure) {
+}
+
+void BarrierInfo::Run() {
+  DCHECK(!base::AtomicRefCountIsZero(&num_callbacks_left_));
+  if (!base::AtomicRefCountDec(&num_callbacks_left_)) {
+    done_closure_.Run();
+    done_closure_.Reset();
+  }
+}
+
+}  // namespace
+
+namespace base {
+
+base::Closure BarrierClosure(int num_callbacks_left,
+                             const base::Closure& done_closure) {
+  DCHECK(num_callbacks_left >= 0);
+
+  if (num_callbacks_left == 0)
+    done_closure.Run();
+
+  return base::Bind(&BarrierInfo::Run,
+                    base::Owned(
+                        new BarrierInfo(num_callbacks_left, done_closure)));
+}
+
+}  // namespace base

+ 30 - 0
base/barrier_closure.h

@@ -0,0 +1,30 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BARRIER_CLOSURE_H_
+#define BASE_BARRIER_CLOSURE_H_
+
+#include "base/base_export.h"
+#include "base/callback_forward.h"
+
+namespace base {
+
+// BarrierClosure executes |done_closure| after it has been invoked
+// |num_closures| times.
+//
+// If |num_closures| is 0, |done_closure| is executed immediately.
+//
+// BarrierClosure is thread-safe - the count of remaining closures is
+// maintained as a base::AtomicRefCount. |done_closure| will be run on
+// the thread that calls the final Run() on the returned closures.
+//
+// |done_closure| is also Reset() on the final calling thread but due to the
+// refcounted nature of callbacks, it is hard to know what thread resources
+// will be released on.
+BASE_EXPORT base::Closure BarrierClosure(int num_closures,
+                                         const base::Closure& done_closure);
+
+}  // namespace base
+
+#endif  // BASE_BARRIER_CLOSURE_H_

+ 37 - 0
base/base64.cc

@@ -0,0 +1,37 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/base64.h"
+
+#include "third_party/modp_b64/modp_b64.h"
+
+namespace base {
+
+void Base64Encode(const StringPiece& input, std::string* output) {
+  std::string temp;
+  temp.resize(modp_b64_encode_len(input.size()));  // makes room for null byte
+
+  // modp_b64_encode_len() returns at least 1, so temp[0] is safe to use.
+  size_t output_size = modp_b64_encode(&(temp[0]), input.data(), input.size());
+
+  temp.resize(output_size);  // strips off null byte
+  output->swap(temp);
+}
+
+bool Base64Decode(const StringPiece& input, std::string* output) {
+  std::string temp;
+  temp.resize(modp_b64_decode_len(input.size()));
+
+  // does not null terminate result since result is binary data!
+  size_t input_size = input.size();
+  size_t output_size = modp_b64_decode(&(temp[0]), input.data(), input_size);
+  if (output_size == MODP_B64_ERROR)
+    return false;
+
+  temp.resize(output_size);
+  output->swap(temp);
+  return true;
+}
+
+}  // namespace base

+ 24 - 0
base/base64.h

@@ -0,0 +1,24 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BASE64_H__
+#define BASE_BASE64_H__
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// Encodes the input string in base64.
+BASE_EXPORT void Base64Encode(const StringPiece& input, std::string* output);
+
+// Decodes the base64 input string.  Returns true if successful and false
+// otherwise.  The output string is only modified if successful.
+BASE_EXPORT bool Base64Decode(const StringPiece& input, std::string* output);
+
+}  // namespace base
+
+#endif  // BASE_BASE64_H__

+ 34 - 0
base/base_export.h

@@ -0,0 +1,34 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BASE_EXPORT_H_
+#define BASE_BASE_EXPORT_H_
+
+#if defined(COMPONENT_BUILD)
+#if defined(WIN32)
+
+#if defined(BASE_IMPLEMENTATION)
+#define BASE_EXPORT __declspec(dllexport)
+#define BASE_EXPORT_PRIVATE __declspec(dllexport)
+#else
+#define BASE_EXPORT __declspec(dllimport)
+#define BASE_EXPORT_PRIVATE __declspec(dllimport)
+#endif  // defined(BASE_IMPLEMENTATION)
+
+#else  // defined(WIN32)
+#if defined(BASE_IMPLEMENTATION)
+#define BASE_EXPORT __attribute__((visibility("default")))
+#define BASE_EXPORT_PRIVATE __attribute__((visibility("default")))
+#else
+#define BASE_EXPORT
+#define BASE_EXPORT_PRIVATE
+#endif  // defined(BASE_IMPLEMENTATION)
+#endif
+
+#else  // defined(COMPONENT_BUILD)
+#define BASE_EXPORT
+#define BASE_EXPORT_PRIVATE
+#endif
+
+#endif  // BASE_BASE_EXPORT_H_

+ 46 - 0
base/base_paths.cc

@@ -0,0 +1,46 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/base_paths.h"
+
+#include "base/file_util.h"
+#include "base/files/file_path.h"
+#include "base/path_service.h"
+
+namespace base {
+
+bool PathProvider(int key, FilePath* result) {
+  // NOTE: DIR_CURRENT is a special case in PathService::Get
+
+  switch (key) {
+    case DIR_EXE:
+      PathService::Get(FILE_EXE, result);
+      *result = result->DirName();
+      return true;
+    case DIR_MODULE:
+      PathService::Get(FILE_MODULE, result);
+      *result = result->DirName();
+      return true;
+    case DIR_TEMP:
+      if (!GetTempDir(result))
+        return false;
+      return true;
+    case base::DIR_HOME:
+      *result = GetHomeDir();
+      return true;
+    case DIR_TEST_DATA:
+      if (!PathService::Get(DIR_SOURCE_ROOT, result))
+        return false;
+      *result = result->Append(FILE_PATH_LITERAL("base"));
+      *result = result->Append(FILE_PATH_LITERAL("test"));
+      *result = result->Append(FILE_PATH_LITERAL("data"));
+      if (!PathExists(*result))  // We don't want to create this.
+        return false;
+      return true;
+    default:
+      return false;
+  }
+}
+
+}  // namespace base

+ 55 - 0
base/base_paths.h

@@ -0,0 +1,55 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BASE_PATHS_H_
+#define BASE_BASE_PATHS_H_
+
+// This file declares path keys for the base module.  These can be used with
+// the PathService to access various special directories and files.
+
+#include "base/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/base_paths_win.h"
+#elif defined(OS_MACOSX)
+#include "base/base_paths_mac.h"
+#elif defined(OS_ANDROID)
+#include "base/base_paths_android.h"
+#endif
+
+#if defined(OS_POSIX)
+#include "base/base_paths_posix.h"
+#endif
+
+namespace base {
+
+enum BasePathKey {
+  PATH_START = 0,
+
+  DIR_CURRENT,       // Current directory.
+  DIR_EXE,           // Directory containing FILE_EXE.
+  DIR_MODULE,        // Directory containing FILE_MODULE.
+  DIR_TEMP,          // Temporary directory.
+  DIR_HOME,          // User's root home directory. On Windows this will look
+                     // like "C:\Users\you" (or on XP
+                     // "C:\Document and Settings\you") which isn't necessarily
+                     // a great place to put files.
+  FILE_EXE,          // Path and filename of the current executable.
+  FILE_MODULE,       // Path and filename of the module containing the code for
+                     // the PathService (which could differ from FILE_EXE if the
+                     // PathService were compiled into a shared object, for
+                     // example).
+  DIR_SOURCE_ROOT,   // Returns the root of the source tree. This key is useful
+                     // for tests that need to locate various resources. It
+                     // should not be used outside of test code.
+  DIR_USER_DESKTOP,  // The current user's Desktop.
+
+  DIR_TEST_DATA,     // Used only for testing.
+
+  PATH_END
+};
+
+}  // namespace base
+
+#endif  // BASE_BASE_PATHS_H_

+ 25 - 0
base/base_paths_android.h

@@ -0,0 +1,25 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BASE_PATHS_ANDROID_H_
+#define BASE_BASE_PATHS_ANDROID_H_
+
+// This file declares Android-specific path keys for the base module.
+// These can be used with the PathService to access various special
+// directories and files.
+
+namespace base {
+
+enum {
+  PATH_ANDROID_START = 300,
+
+  DIR_ANDROID_APP_DATA,  // Directory where to put Android app's data.
+  DIR_ANDROID_EXTERNAL_STORAGE,  // Android external storage directory.
+
+  PATH_ANDROID_END
+};
+
+}  // namespace base
+
+#endif  // BASE_BASE_PATHS_ANDROID_H_

+ 24 - 0
base/base_paths_mac.h

@@ -0,0 +1,24 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BASE_PATHS_MAC_H_
+#define BASE_BASE_PATHS_MAC_H_
+
+// This file declares Mac-specific path keys for the base module.
+// These can be used with the PathService to access various special
+// directories and files.
+
+namespace base {
+
+enum {
+  PATH_MAC_START = 200,
+
+  DIR_APP_DATA,  // ~/Library/Application Support
+
+  PATH_MAC_END
+};
+
+}  // namespace base
+
+#endif  // BASE_BASE_PATHS_MAC_H_

+ 114 - 0
base/base_paths_mac.mm

@@ -0,0 +1,114 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Defines base::PathProviderMac which replaces base::PathProviderPosix for Mac
+// in base/path_service.cc.
+
+#include <dlfcn.h>
+#import <Foundation/Foundation.h>
+#include <mach-o/dyld.h>
+
+#include "base/base_paths.h"
+#include "base/compiler_specific.h"
+#include "base/file_util.h"
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/mac/foundation_util.h"
+#include "base/path_service.h"
+#include "base/strings/string_util.h"
+#include "base/build_config.h"
+
+namespace {
+
+void GetNSExecutablePath(base::FilePath* path) {
+  DCHECK(path);
+  // Executable path can have relative references ("..") depending on
+  // how the app was launched.
+  uint32_t executable_length = 0;
+  _NSGetExecutablePath(NULL, &executable_length);
+  DCHECK_GT(executable_length, 1u);
+  std::string executable_path;
+  int rv = _NSGetExecutablePath(WriteInto(&executable_path, executable_length),
+                                &executable_length);
+  DCHECK_EQ(rv, 0);
+
+  // _NSGetExecutablePath may return paths containing ./ or ../ which makes
+  // FilePath::DirName() work incorrectly, convert it to absolute path so that
+  // paths such as DIR_SOURCE_ROOT can work, since we expect absolute paths to
+  // be returned here.
+  *path = base::MakeAbsoluteFilePath(base::FilePath(executable_path));
+}
+
+// Returns true if the module for |address| is found. |path| will contain
+// the path to the module. Note that |path| may not be absolute.
+bool GetModulePathForAddress(base::FilePath* path,
+                             const void* address) WARN_UNUSED_RESULT;
+
+bool GetModulePathForAddress(base::FilePath* path, const void* address) {
+  Dl_info info;
+  if (dladdr(address, &info) == 0)
+    return false;
+  *path = base::FilePath(info.dli_fname);
+  return true;
+}
+
+}  // namespace
+
+namespace base {
+
+bool PathProviderMac(int key, base::FilePath* result) {
+  switch (key) {
+    case base::FILE_EXE:
+      GetNSExecutablePath(result);
+      return true;
+    case base::FILE_MODULE:
+      return GetModulePathForAddress(result,
+          reinterpret_cast<const void*>(&base::PathProviderMac));
+    case base::DIR_APP_DATA: {
+      bool success = base::mac::GetUserDirectory(NSApplicationSupportDirectory,
+                                                 result);
+#if defined(OS_IOS)
+      // On IOS, this directory does not exist unless it is created explicitly.
+      if (success && !base::PathExists(*result))
+        success = base::CreateDirectory(*result);
+#endif  // defined(OS_IOS)
+      return success;
+    }
+    case base::DIR_SOURCE_ROOT:
+      // Go through PathService to catch overrides.
+      if (!PathService::Get(base::FILE_EXE, result))
+        return false;
+
+      // Start with the executable's directory.
+      *result = result->DirName();
+
+#if !defined(OS_IOS)
+      if (base::mac::AmIBundled()) {
+        // The bundled app executables (Chromium, TestShell, etc) live five
+        // levels down, eg:
+        // src/xcodebuild/{Debug|Release}/Chromium.app/Contents/MacOS/Chromium
+        *result = result->DirName().DirName().DirName().DirName().DirName();
+      } else {
+        // Unit tests execute two levels deep from the source root, eg:
+        // src/xcodebuild/{Debug|Release}/base_unittests
+        *result = result->DirName().DirName();
+      }
+#endif
+      return true;
+    case base::DIR_USER_DESKTOP:
+#if defined(OS_IOS)
+      // iOS does not have desktop directories.
+      NOTIMPLEMENTED();
+      return false;
+#else
+      return base::mac::GetUserDirectory(NSDesktopDirectory, result);
+#endif
+    case base::DIR_CACHE:
+      return base::mac::GetUserDirectory(NSCachesDirectory, result);
+    default:
+      return false;
+  }
+}
+
+}  // namespace base

+ 116 - 0
base/base_paths_posix.cc

@@ -0,0 +1,116 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Defines base::PathProviderPosix, default path provider on POSIX OSes that
+// don't have their own base_paths_OS.cc implementation (i.e. all but Mac and
+// Android).
+
+#include <ostream>
+#include <string>
+
+#include "base/base_paths.h"
+#include "base/environment.h"
+#include "base/file_util.h"
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/nix/xdg_util.h"
+#include "base/path_service.h"
+#include "base/process/process_metrics.h"
+#include "base/build_config.h"
+
+#if defined(OS_FREEBSD)
+#include <sys/param.h>
+#include <sys/sysctl.h>
+#elif defined(OS_SOLARIS)
+#include <stdlib.h>
+#endif
+
+namespace base {
+
+bool PathProviderPosix(int key, FilePath* result) {
+  FilePath path;
+  switch (key) {
+    case base::FILE_EXE:
+    case base::FILE_MODULE: {  // TODO(evanm): is this correct?
+#if defined(OS_LINUX)
+      FilePath bin_dir;
+      if (!ReadSymbolicLink(FilePath(kProcSelfExe), &bin_dir)) {
+        NOTREACHED() << "Unable to resolve " << kProcSelfExe << ".";
+        return false;
+      }
+      *result = bin_dir;
+      return true;
+#elif defined(OS_FREEBSD)
+      int name[] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1 };
+      char bin_dir[PATH_MAX + 1];
+      size_t length = sizeof(bin_dir);
+      // Upon return, |length| is the number of bytes written to |bin_dir|
+      // including the string terminator.
+      int error = sysctl(name, 4, bin_dir, &length, NULL, 0);
+      if (error < 0 || length <= 1) {
+        NOTREACHED() << "Unable to resolve path.";
+        return false;
+      }
+      *result = FilePath(FilePath::StringType(bin_dir, length - 1));
+      return true;
+#elif defined(OS_SOLARIS)
+      char bin_dir[PATH_MAX + 1];
+      if (realpath(getexecname(), bin_dir) == NULL) {
+        NOTREACHED() << "Unable to resolve " << getexecname() << ".";
+        return false;
+      }
+      *result = FilePath(bin_dir);
+      return true;
+#elif defined(OS_OPENBSD)
+      // There is currently no way to get the executable path on OpenBSD
+      char* cpath;
+      if ((cpath = getenv("CHROME_EXE_PATH")) != NULL)
+        *result = FilePath(cpath);
+      else
+        *result = FilePath("/usr/local/chrome/chrome");
+      return true;
+#endif
+    }
+    case base::DIR_SOURCE_ROOT: {
+      // Allow passing this in the environment, for more flexibility in build
+      // tree configurations (sub-project builds, gyp --output_dir, etc.)
+      scoped_ptr<base::Environment> env(base::Environment::Create());
+      std::string cr_source_root;
+      if (env->GetVar("CR_SOURCE_ROOT", &cr_source_root)) {
+        path = FilePath(cr_source_root);
+        if (base::PathExists(path)) {
+          *result = path;
+          return true;
+        } else {
+          DLOG(WARNING) << "CR_SOURCE_ROOT is set, but it appears to not "
+                        << "point to a directory.";
+        }
+      }
+      // On POSIX, unit tests execute two levels deep from the source root.
+      // For example:  out/{Debug|Release}/net_unittest
+      if (PathService::Get(base::DIR_EXE, &path)) {
+        *result = path.DirName().DirName();
+        return true;
+      }
+
+      DLOG(ERROR) << "Couldn't find your source root.  "
+                  << "Try running from your chromium/src directory.";
+      return false;
+    }
+    case base::DIR_USER_DESKTOP:
+      *result = base::nix::GetXDGUserDirectory("DESKTOP", "Desktop");
+      return true;
+    case base::DIR_CACHE: {
+      scoped_ptr<base::Environment> env(base::Environment::Create());
+      FilePath cache_dir(base::nix::GetXDGDirectory(env.get(), "XDG_CACHE_HOME",
+                                                    ".cache"));
+      *result = cache_dir;
+      return true;
+    }
+  }
+  return false;
+}
+
+}  // namespace base

+ 27 - 0
base/base_paths_posix.h

@@ -0,0 +1,27 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BASE_PATHS_POSIX_H_
+#define BASE_BASE_PATHS_POSIX_H_
+
+// This file declares windows-specific path keys for the base module.
+// These can be used with the PathService to access various special
+// directories and files.
+
+namespace base {
+
+enum {
+  PATH_POSIX_START = 400,
+
+  DIR_CACHE,    // Directory where to put cache data.  Note this is
+                // *not* where the browser cache lives, but the
+                // browser cache can be a subdirectory.
+                // This is $XDG_CACHE_HOME on Linux and
+                // ~/Library/Caches on Mac.
+  PATH_POSIX_END
+};
+
+}  // namespace base
+
+#endif  // BASE_BASE_PATHS_POSIX_H_

+ 68 - 0
base/base_switches.cc

@@ -0,0 +1,68 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/base_switches.h"
+
+namespace switches {
+
+// Disables the crash reporting.
+const char kDisableBreakpad[]               = "disable-breakpad";
+
+// Indicates that crash reporting should be enabled. On platforms where helper
+// processes cannot access to files needed to make this decision, this flag is
+// generated internally.
+const char kEnableCrashReporter[]           = "enable-crash-reporter";
+
+// Generates full memory crash dump.
+const char kFullMemoryCrashReport[]         = "full-memory-crash-report";
+
+// Force low-end device when set to 1;
+// Auto-detect low-end device when set to 2;
+// Force non-low-end device when set to other values or empty;
+const char kLowEndDeviceMode[]              = "low-end-device-mode";
+
+// Suppresses all error dialogs when present.
+const char kNoErrorDialogs[]                = "noerrdialogs";
+
+// When running certain tests that spawn child processes, this switch indicates
+// to the test framework that the current process is a child process.
+const char kTestChildProcess[]              = "test-child-process";
+
+// Gives the default maximal active V-logging level; 0 is the default.
+// Normally positive values are used for V-logging levels.
+const char kV[]                             = "v";
+
+// Gives the per-module maximal V-logging levels to override the value
+// given by --v.  E.g. "my_module=2,foo*=3" would change the logging
+// level for all code in source files "my_module.*" and "foo*.*"
+// ("-inl" suffixes are also disregarded for this matching).
+//
+// Any pattern containing a forward or backward slash will be tested
+// against the whole pathname and not just the module.  E.g.,
+// "*/foo/bar/*=2" would change the logging level for all code in
+// source files under a "foo/bar" directory.
+const char kVModule[]                       = "vmodule";
+
+// Will wait for 60 seconds for a debugger to come to attach to the process.
+const char kWaitForDebugger[]               = "wait-for-debugger";
+
+// Sends a pretty-printed version of tracing info to the console.
+const char kTraceToConsole[]                = "trace-to-console";
+
+// Configure whether chrome://profiler will contain timing information. This
+// option is enabled by default. A value of "0" will disable profiler timing,
+// while all other values will enable it.
+const char kProfilerTiming[]                = "profiler-timing";
+// Value of the --profiler-timing flag that will disable timing information for
+// chrome://profiler.
+const char kProfilerTimingDisabledValue[]   = "0";
+
+#if defined(OS_POSIX)
+// Used for turning on Breakpad crash reporting in a debug environment where
+// crash reporting is typically compiled but disabled.
+const char kEnableCrashReporterForTesting[] =
+    "enable-crash-reporter-for-testing";
+#endif
+
+}  // namespace switches

+ 33 - 0
base/base_switches.h

@@ -0,0 +1,33 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Defines all the "base" command-line switches.
+
+#ifndef BASE_BASE_SWITCHES_H_
+#define BASE_BASE_SWITCHES_H_
+
+#include "base/build_config.h"
+
+namespace switches {
+
+extern const char kDisableBreakpad[];
+extern const char kEnableCrashReporter[];
+extern const char kFullMemoryCrashReport[];
+extern const char kLowEndDeviceMode[];
+extern const char kNoErrorDialogs[];
+extern const char kProfilerTiming[];
+extern const char kProfilerTimingDisabledValue[];
+extern const char kTestChildProcess[];
+extern const char kTraceToConsole[];
+extern const char kV[];
+extern const char kVModule[];
+extern const char kWaitForDebugger[];
+
+#if defined(OS_POSIX)
+extern const char kEnableCrashReporterForTesting[];
+#endif
+
+}  // namespace switches
+
+#endif  // BASE_BASE_SWITCHES_H_

+ 35 - 0
base/basictypes.h

@@ -0,0 +1,35 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains definitions of our old basic integral types
+// ((u)int{8,16,32,64}) and further includes. I recommend that you use the C99
+// standard types instead, and include <stdint.h>/<stddef.h>/etc. as needed.
+// Note that the macros and macro-like constructs that were formerly defined in
+// this file are now available separately in base/macros.h.
+
+#ifndef BASE_BASICTYPES_H_
+#define BASE_BASICTYPES_H_
+
+#include <limits.h>  // So we can set the bounds of our types.
+#include <stddef.h>  // For size_t.
+#include <stdint.h>  // For intptr_t.
+
+#include "base/macros.h"
+#include "base/port.h"  // Types that only need exist on certain systems.
+
+// DEPRECATED: Please use std::numeric_limits (from <limits>) instead.
+const uint8_t  kuint8max  = (( uint8_t) 0xFF);
+const uint16_t kuint16max = ((uint16_t) 0xFFFF);
+const uint32_t kuint32max = ((uint32_t) 0xFFFFFFFF);
+const uint64_t kuint64max = ((uint64_t) 0xFFFFFFFFFFFFFFFFULL);
+const  int8_t  kint8min   = ((  int8_t) 0x80);
+const  int8_t  kint8max   = ((  int8_t) 0x7F);
+const  int16_t kint16min  = (( int16_t) 0x8000);
+const  int16_t kint16max  = (( int16_t) 0x7FFF);
+const  int32_t kint32min  = (( int32_t) 0x80000000);
+const  int32_t kint32max  = (( int32_t) 0x7FFFFFFF);
+const  int64_t kint64min  = (( int64_t) 0x8000000000000000LL);
+const  int64_t kint64max  = (( int64_t) 0x7FFFFFFFFFFFFFFFLL);
+
+#endif  // BASE_BASICTYPES_H_

+ 97 - 0
base/big_endian.cc

@@ -0,0 +1,97 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/big_endian.h"
+
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+BigEndianReader::BigEndianReader(const char* buf, size_t len)
+    : ptr_(buf), end_(ptr_ + len) {}
+
+bool BigEndianReader::Skip(size_t len) {
+  if (ptr_ + len > end_)
+    return false;
+  ptr_ += len;
+  return true;
+}
+
+bool BigEndianReader::ReadBytes(void* out, size_t len) {
+  if (ptr_ + len > end_)
+    return false;
+  memcpy(out, ptr_, len);
+  ptr_ += len;
+  return true;
+}
+
+bool BigEndianReader::ReadPiece(base::StringPiece* out, size_t len) {
+  if (ptr_ + len > end_)
+    return false;
+  *out = base::StringPiece(ptr_, len);
+  ptr_ += len;
+  return true;
+}
+
+template<typename T>
+bool BigEndianReader::Read(T* value) {
+  if (ptr_ + sizeof(T) > end_)
+    return false;
+  ReadBigEndian<T>(ptr_, value);
+  ptr_ += sizeof(T);
+  return true;
+}
+
+bool BigEndianReader::ReadU8(uint8_t* value) {
+  return Read(value);
+}
+
+bool BigEndianReader::ReadU16(uint16_t* value) {
+  return Read(value);
+}
+
+bool BigEndianReader::ReadU32(uint32_t* value) {
+  return Read(value);
+}
+
+BigEndianWriter::BigEndianWriter(char* buf, size_t len)
+    : ptr_(buf), end_(ptr_ + len) {}
+
+bool BigEndianWriter::Skip(size_t len) {
+  if (ptr_ + len > end_)
+    return false;
+  ptr_ += len;
+  return true;
+}
+
+bool BigEndianWriter::WriteBytes(const void* buf, size_t len) {
+  if (ptr_ + len > end_)
+    return false;
+  memcpy(ptr_, buf, len);
+  ptr_ += len;
+  return true;
+}
+
+template<typename T>
+bool BigEndianWriter::Write(T value) {
+  if (ptr_ + sizeof(T) > end_)
+    return false;
+  WriteBigEndian<T>(ptr_, value);
+  ptr_ += sizeof(T);
+  return true;
+}
+
+bool BigEndianWriter::WriteU8(uint8_t value) {
+  return Write(value);
+}
+
+bool BigEndianWriter::WriteU16(uint16_t value) {
+  return Write(value);
+}
+
+bool BigEndianWriter::WriteU32(uint32_t value) {
+  return Write(value);
+}
+
+}  // namespace base

+ 102 - 0
base/big_endian.h

@@ -0,0 +1,102 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BIG_ENDIAN_H_
+#define BASE_BIG_ENDIAN_H_
+
+#include "base/base_export.h"
+#include "base/basictypes.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// Read an integer (signed or unsigned) from |buf| in Big Endian order.
+// Note: this loop is unrolled with -O1 and above.
+// NOTE(szym): glibc dns-canon.c and SpdyFrameBuilder use
+// ntohs(*(uint16_t*)ptr) which is potentially unaligned.
+// This would cause SIGBUS on ARMv5 or earlier and ARMv6-M.
+template<typename T>
+inline void ReadBigEndian(const char buf[], T* out) {
+  *out = buf[0];
+  for (size_t i = 1; i < sizeof(T); ++i) {
+    *out <<= 8;
+    // Must cast to uint8_t to avoid clobbering by sign extension.
+    *out |= static_cast<uint8_t>(buf[i]);
+  }
+}
+
+// Write an integer (signed or unsigned) |val| to |buf| in Big Endian order.
+// Note: this loop is unrolled with -O1 and above.
+template<typename T>
+inline void WriteBigEndian(char buf[], T val) {
+  for (size_t i = 0; i < sizeof(T); ++i) {
+    buf[sizeof(T)-i-1] = static_cast<char>(val & 0xFF);
+    val >>= 8;
+  }
+}
+
+// Specializations to make clang happy about the (dead code) shifts above.
+template<>
+inline void ReadBigEndian<uint8_t>(const char buf[], uint8_t* out) {
+  *out = buf[0];
+}
+
+template<>
+inline void WriteBigEndian<uint8_t>(char buf[], uint8_t val) {
+  buf[0] = static_cast<char>(val);
+}
+
+// Allows reading integers in network order (big endian) while iterating over
+// an underlying buffer. All the reading functions advance the internal pointer.
+class BASE_EXPORT BigEndianReader {
+ public:
+  BigEndianReader(const char* buf, size_t len);
+
+  const char* ptr() const { return ptr_; }
+  int remaining() const { return end_ - ptr_; }
+
+  bool Skip(size_t len);
+  bool ReadBytes(void* out, size_t len);
+  // Creates a StringPiece in |out| that points to the underlying buffer.
+  bool ReadPiece(base::StringPiece* out, size_t len);
+  bool ReadU8(uint8_t* value);
+  bool ReadU16(uint16_t* value);
+  bool ReadU32(uint32_t* value);
+
+ private:
+  // Hidden to promote type safety.
+  template<typename T>
+  bool Read(T* v);
+
+  const char* ptr_;
+  const char* end_;
+};
+
+// Allows writing integers in network order (big endian) while iterating over
+// an underlying buffer. All the writing functions advance the internal pointer.
+class BASE_EXPORT BigEndianWriter {
+ public:
+  BigEndianWriter(char* buf, size_t len);
+
+  char* ptr() const { return ptr_; }
+  int remaining() const { return end_ - ptr_; }
+
+  bool Skip(size_t len);
+  bool WriteBytes(const void* buf, size_t len);
+  bool WriteU8(uint8_t value);
+  bool WriteU16(uint16_t value);
+  bool WriteU32(uint32_t value);
+
+ private:
+  // Hidden to promote type safety.
+  template<typename T>
+  bool Write(T v);
+
+  char* ptr_;
+  char* end_;
+};
+
+}  // namespace base
+
+#endif  // BASE_BIG_ENDIAN_H_

+ 511 - 0
base/bind.h

@@ -0,0 +1,511 @@
+// This file was GENERATED by command:
+//     pump.py bind.h.pump
+// DO NOT EDIT BY HAND!!!
+
+
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BIND_H_
+#define BASE_BIND_H_
+
+#include "base/bind_internal.h"
+#include "base/callback_internal.h"
+
+// -----------------------------------------------------------------------------
+// Usage documentation
+// -----------------------------------------------------------------------------
+//
+// See base/callback.h for documentation.
+//
+//
+// -----------------------------------------------------------------------------
+// Implementation notes
+// -----------------------------------------------------------------------------
+//
+// If you're reading the implementation, before proceeding further, you should
+// read the top comment of base/bind_internal.h for a definition of common
+// terms and concepts.
+//
+// RETURN TYPES
+//
+// Though Bind()'s result is meant to be stored in a Callback<> type, it
+// cannot actually return the exact type without requiring a large amount
+// of extra template specializations. The problem is that in order to
+// discern the correct specialization of Callback<>, Bind would need to
+// unwrap the function signature to determine the signature's arity, and
+// whether or not it is a method.
+//
+// Each unique combination of (arity, function_type, num_prebound) where
+// function_type is one of {function, method, const_method} would require
+// one specialization.  We eventually have to do a similar number of
+// specializations anyways in the implementation (see the Invoker<>,
+// classes).  However, it is avoidable in Bind if we return the result
+// via an indirection like we do below.
+//
+// TODO(ajwong): We might be able to avoid this now, but need to test.
+//
+// It is possible to move most of the COMPILE_ASSERT asserts into BindState<>,
+// but it feels a little nicer to have the asserts here so people do not
+// need to crack open bind_internal.h.  On the other hand, it makes Bind()
+// harder to read.
+
+namespace base {
+
+template <typename Functor>
+base::Callback<
+    typename internal::BindState<
+        typename internal::FunctorTraits<Functor>::RunnableType,
+        typename internal::FunctorTraits<Functor>::RunType,
+        void()>
+            ::UnboundRunType>
+Bind(Functor functor) {
+  // Typedefs for how to store and run the functor.
+  typedef typename internal::FunctorTraits<Functor>::RunnableType RunnableType;
+  typedef typename internal::FunctorTraits<Functor>::RunType RunType;
+
+  typedef internal::BindState<RunnableType, RunType, void()> BindState;
+
+
+  return Callback<typename BindState::UnboundRunType>(
+      new BindState(internal::MakeRunnable(functor)));
+}
+
+template <typename Functor, typename P1>
+base::Callback<
+    typename internal::BindState<
+        typename internal::FunctorTraits<Functor>::RunnableType,
+        typename internal::FunctorTraits<Functor>::RunType,
+        void(typename internal::CallbackParamTraits<P1>::StorageType)>
+            ::UnboundRunType>
+Bind(Functor functor, const P1& p1) {
+  // Typedefs for how to store and run the functor.
+  typedef typename internal::FunctorTraits<Functor>::RunnableType RunnableType;
+  typedef typename internal::FunctorTraits<Functor>::RunType RunType;
+
+  // Use RunnableType::RunType instead of RunType above because our
+  // checks should below for bound references need to know what the actual
+  // functor is going to interpret the argument as.
+  typedef internal::FunctionTraits<typename RunnableType::RunType>
+      BoundFunctorTraits;
+
+  // Do not allow binding a non-const reference parameter. Non-const reference
+  // parameters are disallowed by the Google style guide.  Also, binding a
+  // non-const reference parameter can make for subtle bugs because the
+  // invoked function will receive a reference to the stored copy of the
+  // argument and not the original.
+  COMPILE_ASSERT(
+      !(is_non_const_reference<typename BoundFunctorTraits::A1Type>::value ),
+      do_not_bind_functions_with_nonconst_ref);
+
+  // For methods, we need to be careful for parameter 1.  We do not require
+  // a scoped_refptr because BindState<> itself takes care of AddRef() for
+  // methods. We also disallow binding of an array as the method's target
+  // object.
+  COMPILE_ASSERT(
+      internal::HasIsMethodTag<RunnableType>::value ||
+          !internal::NeedsScopedRefptrButGetsRawPtr<P1>::value,
+      p1_is_refcounted_type_and_needs_scoped_refptr);
+  COMPILE_ASSERT(!internal::HasIsMethodTag<RunnableType>::value ||
+                     !is_array<P1>::value,
+                 first_bound_argument_to_method_cannot_be_array);
+  typedef internal::BindState<RunnableType, RunType,
+      void(typename internal::CallbackParamTraits<P1>::StorageType)> BindState;
+
+
+  return Callback<typename BindState::UnboundRunType>(
+      new BindState(internal::MakeRunnable(functor), p1));
+}
+
+template <typename Functor, typename P1, typename P2>
+base::Callback<
+    typename internal::BindState<
+        typename internal::FunctorTraits<Functor>::RunnableType,
+        typename internal::FunctorTraits<Functor>::RunType,
+        void(typename internal::CallbackParamTraits<P1>::StorageType,
+            typename internal::CallbackParamTraits<P2>::StorageType)>
+            ::UnboundRunType>
+Bind(Functor functor, const P1& p1, const P2& p2) {
+  // Typedefs for how to store and run the functor.
+  typedef typename internal::FunctorTraits<Functor>::RunnableType RunnableType;
+  typedef typename internal::FunctorTraits<Functor>::RunType RunType;
+
+  // Use RunnableType::RunType instead of RunType above because our
+  // checks should below for bound references need to know what the actual
+  // functor is going to interpret the argument as.
+  typedef internal::FunctionTraits<typename RunnableType::RunType>
+      BoundFunctorTraits;
+
+  // Do not allow binding a non-const reference parameter. Non-const reference
+  // parameters are disallowed by the Google style guide.  Also, binding a
+  // non-const reference parameter can make for subtle bugs because the
+  // invoked function will receive a reference to the stored copy of the
+  // argument and not the original.
+  COMPILE_ASSERT(
+      !(is_non_const_reference<typename BoundFunctorTraits::A1Type>::value ||
+          is_non_const_reference<typename BoundFunctorTraits::A2Type>::value ),
+      do_not_bind_functions_with_nonconst_ref);
+
+  // For methods, we need to be careful for parameter 1.  We do not require
+  // a scoped_refptr because BindState<> itself takes care of AddRef() for
+  // methods. We also disallow binding of an array as the method's target
+  // object.
+  COMPILE_ASSERT(
+      internal::HasIsMethodTag<RunnableType>::value ||
+          !internal::NeedsScopedRefptrButGetsRawPtr<P1>::value,
+      p1_is_refcounted_type_and_needs_scoped_refptr);
+  COMPILE_ASSERT(!internal::HasIsMethodTag<RunnableType>::value ||
+                     !is_array<P1>::value,
+                 first_bound_argument_to_method_cannot_be_array);
+  COMPILE_ASSERT(!internal::NeedsScopedRefptrButGetsRawPtr<P2>::value,
+                 p2_is_refcounted_type_and_needs_scoped_refptr);
+  typedef internal::BindState<RunnableType, RunType,
+      void(typename internal::CallbackParamTraits<P1>::StorageType,
+      typename internal::CallbackParamTraits<P2>::StorageType)> BindState;
+
+
+  return Callback<typename BindState::UnboundRunType>(
+      new BindState(internal::MakeRunnable(functor), p1, p2));
+}
+
+template <typename Functor, typename P1, typename P2, typename P3>
+base::Callback<
+    typename internal::BindState<
+        typename internal::FunctorTraits<Functor>::RunnableType,
+        typename internal::FunctorTraits<Functor>::RunType,
+        void(typename internal::CallbackParamTraits<P1>::StorageType,
+            typename internal::CallbackParamTraits<P2>::StorageType,
+            typename internal::CallbackParamTraits<P3>::StorageType)>
+            ::UnboundRunType>
+Bind(Functor functor, const P1& p1, const P2& p2, const P3& p3) {
+  // Typedefs for how to store and run the functor.
+  typedef typename internal::FunctorTraits<Functor>::RunnableType RunnableType;
+  typedef typename internal::FunctorTraits<Functor>::RunType RunType;
+
+  // Use RunnableType::RunType instead of RunType above because our
+  // checks should below for bound references need to know what the actual
+  // functor is going to interpret the argument as.
+  typedef internal::FunctionTraits<typename RunnableType::RunType>
+      BoundFunctorTraits;
+
+  // Do not allow binding a non-const reference parameter. Non-const reference
+  // parameters are disallowed by the Google style guide.  Also, binding a
+  // non-const reference parameter can make for subtle bugs because the
+  // invoked function will receive a reference to the stored copy of the
+  // argument and not the original.
+  COMPILE_ASSERT(
+      !(is_non_const_reference<typename BoundFunctorTraits::A1Type>::value ||
+          is_non_const_reference<typename BoundFunctorTraits::A2Type>::value ||
+          is_non_const_reference<typename BoundFunctorTraits::A3Type>::value ),
+      do_not_bind_functions_with_nonconst_ref);
+
+  // For methods, we need to be careful for parameter 1.  We do not require
+  // a scoped_refptr because BindState<> itself takes care of AddRef() for
+  // methods. We also disallow binding of an array as the method's target
+  // object.
+  COMPILE_ASSERT(
+      internal::HasIsMethodTag<RunnableType>::value ||
+          !internal::NeedsScopedRefptrButGetsRawPtr<P1>::value,
+      p1_is_refcounted_type_and_needs_scoped_refptr);
+  COMPILE_ASSERT(!internal::HasIsMethodTag<RunnableType>::value ||
+                     !is_array<P1>::value,
+                 first_bound_argument_to_method_cannot_be_array);
+  COMPILE_ASSERT(!internal::NeedsScopedRefptrButGetsRawPtr<P2>::value,
+                 p2_is_refcounted_type_and_needs_scoped_refptr);
+  COMPILE_ASSERT(!internal::NeedsScopedRefptrButGetsRawPtr<P3>::value,
+                 p3_is_refcounted_type_and_needs_scoped_refptr);
+  typedef internal::BindState<RunnableType, RunType,
+      void(typename internal::CallbackParamTraits<P1>::StorageType,
+      typename internal::CallbackParamTraits<P2>::StorageType,
+      typename internal::CallbackParamTraits<P3>::StorageType)> BindState;
+
+
+  return Callback<typename BindState::UnboundRunType>(
+      new BindState(internal::MakeRunnable(functor), p1, p2, p3));
+}
+
+template <typename Functor, typename P1, typename P2, typename P3, typename P4>
+base::Callback<
+    typename internal::BindState<
+        typename internal::FunctorTraits<Functor>::RunnableType,
+        typename internal::FunctorTraits<Functor>::RunType,
+        void(typename internal::CallbackParamTraits<P1>::StorageType,
+            typename internal::CallbackParamTraits<P2>::StorageType,
+            typename internal::CallbackParamTraits<P3>::StorageType,
+            typename internal::CallbackParamTraits<P4>::StorageType)>
+            ::UnboundRunType>
+Bind(Functor functor, const P1& p1, const P2& p2, const P3& p3, const P4& p4) {
+  // Typedefs for how to store and run the functor.
+  typedef typename internal::FunctorTraits<Functor>::RunnableType RunnableType;
+  typedef typename internal::FunctorTraits<Functor>::RunType RunType;
+
+  // Use RunnableType::RunType instead of RunType above because our
+  // checks should below for bound references need to know what the actual
+  // functor is going to interpret the argument as.
+  typedef internal::FunctionTraits<typename RunnableType::RunType>
+      BoundFunctorTraits;
+
+  // Do not allow binding a non-const reference parameter. Non-const reference
+  // parameters are disallowed by the Google style guide.  Also, binding a
+  // non-const reference parameter can make for subtle bugs because the
+  // invoked function will receive a reference to the stored copy of the
+  // argument and not the original.
+  COMPILE_ASSERT(
+      !(is_non_const_reference<typename BoundFunctorTraits::A1Type>::value ||
+          is_non_const_reference<typename BoundFunctorTraits::A2Type>::value ||
+          is_non_const_reference<typename BoundFunctorTraits::A3Type>::value ||
+          is_non_const_reference<typename BoundFunctorTraits::A4Type>::value ),
+      do_not_bind_functions_with_nonconst_ref);
+
+  // For methods, we need to be careful for parameter 1.  We do not require
+  // a scoped_refptr because BindState<> itself takes care of AddRef() for
+  // methods. We also disallow binding of an array as the method's target
+  // object.
+  COMPILE_ASSERT(
+      internal::HasIsMethodTag<RunnableType>::value ||
+          !internal::NeedsScopedRefptrButGetsRawPtr<P1>::value,
+      p1_is_refcounted_type_and_needs_scoped_refptr);
+  COMPILE_ASSERT(!internal::HasIsMethodTag<RunnableType>::value ||
+                     !is_array<P1>::value,
+                 first_bound_argument_to_method_cannot_be_array);
+  COMPILE_ASSERT(!internal::NeedsScopedRefptrButGetsRawPtr<P2>::value,
+                 p2_is_refcounted_type_and_needs_scoped_refptr);
+  COMPILE_ASSERT(!internal::NeedsScopedRefptrButGetsRawPtr<P3>::value,
+                 p3_is_refcounted_type_and_needs_scoped_refptr);
+  COMPILE_ASSERT(!internal::NeedsScopedRefptrButGetsRawPtr<P4>::value,
+                 p4_is_refcounted_type_and_needs_scoped_refptr);
+  typedef internal::BindState<RunnableType, RunType,
+      void(typename internal::CallbackParamTraits<P1>::StorageType,
+      typename internal::CallbackParamTraits<P2>::StorageType,
+      typename internal::CallbackParamTraits<P3>::StorageType,
+      typename internal::CallbackParamTraits<P4>::StorageType)> BindState;
+
+
+  return Callback<typename BindState::UnboundRunType>(
+      new BindState(internal::MakeRunnable(functor), p1, p2, p3, p4));
+}
+
+template <typename Functor, typename P1, typename P2, typename P3, typename P4,
+    typename P5>
+base::Callback<
+    typename internal::BindState<
+        typename internal::FunctorTraits<Functor>::RunnableType,
+        typename internal::FunctorTraits<Functor>::RunType,
+        void(typename internal::CallbackParamTraits<P1>::StorageType,
+            typename internal::CallbackParamTraits<P2>::StorageType,
+            typename internal::CallbackParamTraits<P3>::StorageType,
+            typename internal::CallbackParamTraits<P4>::StorageType,
+            typename internal::CallbackParamTraits<P5>::StorageType)>
+            ::UnboundRunType>
+Bind(Functor functor, const P1& p1, const P2& p2, const P3& p3, const P4& p4,
+    const P5& p5) {
+  // Typedefs for how to store and run the functor.
+  typedef typename internal::FunctorTraits<Functor>::RunnableType RunnableType;
+  typedef typename internal::FunctorTraits<Functor>::RunType RunType;
+
+  // Use RunnableType::RunType instead of RunType above because our
+  // checks should below for bound references need to know what the actual
+  // functor is going to interpret the argument as.
+  typedef internal::FunctionTraits<typename RunnableType::RunType>
+      BoundFunctorTraits;
+
+  // Do not allow binding a non-const reference parameter. Non-const reference
+  // parameters are disallowed by the Google style guide.  Also, binding a
+  // non-const reference parameter can make for subtle bugs because the
+  // invoked function will receive a reference to the stored copy of the
+  // argument and not the original.
+  COMPILE_ASSERT(
+      !(is_non_const_reference<typename BoundFunctorTraits::A1Type>::value ||
+          is_non_const_reference<typename BoundFunctorTraits::A2Type>::value ||
+          is_non_const_reference<typename BoundFunctorTraits::A3Type>::value ||
+          is_non_const_reference<typename BoundFunctorTraits::A4Type>::value ||
+          is_non_const_reference<typename BoundFunctorTraits::A5Type>::value ),
+      do_not_bind_functions_with_nonconst_ref);
+
+  // For methods, we need to be careful for parameter 1.  We do not require
+  // a scoped_refptr because BindState<> itself takes care of AddRef() for
+  // methods. We also disallow binding of an array as the method's target
+  // object.
+  COMPILE_ASSERT(
+      internal::HasIsMethodTag<RunnableType>::value ||
+          !internal::NeedsScopedRefptrButGetsRawPtr<P1>::value,
+      p1_is_refcounted_type_and_needs_scoped_refptr);
+  COMPILE_ASSERT(!internal::HasIsMethodTag<RunnableType>::value ||
+                     !is_array<P1>::value,
+                 first_bound_argument_to_method_cannot_be_array);
+  COMPILE_ASSERT(!internal::NeedsScopedRefptrButGetsRawPtr<P2>::value,
+                 p2_is_refcounted_type_and_needs_scoped_refptr);
+  COMPILE_ASSERT(!internal::NeedsScopedRefptrButGetsRawPtr<P3>::value,
+                 p3_is_refcounted_type_and_needs_scoped_refptr);
+  COMPILE_ASSERT(!internal::NeedsScopedRefptrButGetsRawPtr<P4>::value,
+                 p4_is_refcounted_type_and_needs_scoped_refptr);
+  COMPILE_ASSERT(!internal::NeedsScopedRefptrButGetsRawPtr<P5>::value,
+                 p5_is_refcounted_type_and_needs_scoped_refptr);
+  typedef internal::BindState<RunnableType, RunType,
+      void(typename internal::CallbackParamTraits<P1>::StorageType,
+      typename internal::CallbackParamTraits<P2>::StorageType,
+      typename internal::CallbackParamTraits<P3>::StorageType,
+      typename internal::CallbackParamTraits<P4>::StorageType,
+      typename internal::CallbackParamTraits<P5>::StorageType)> BindState;
+
+
+  return Callback<typename BindState::UnboundRunType>(
+      new BindState(internal::MakeRunnable(functor), p1, p2, p3, p4, p5));
+}
+
+template <typename Functor, typename P1, typename P2, typename P3, typename P4,
+    typename P5, typename P6>
+base::Callback<
+    typename internal::BindState<
+        typename internal::FunctorTraits<Functor>::RunnableType,
+        typename internal::FunctorTraits<Functor>::RunType,
+        void(typename internal::CallbackParamTraits<P1>::StorageType,
+            typename internal::CallbackParamTraits<P2>::StorageType,
+            typename internal::CallbackParamTraits<P3>::StorageType,
+            typename internal::CallbackParamTraits<P4>::StorageType,
+            typename internal::CallbackParamTraits<P5>::StorageType,
+            typename internal::CallbackParamTraits<P6>::StorageType)>
+            ::UnboundRunType>
+Bind(Functor functor, const P1& p1, const P2& p2, const P3& p3, const P4& p4,
+    const P5& p5, const P6& p6) {
+  // Typedefs for how to store and run the functor.
+  typedef typename internal::FunctorTraits<Functor>::RunnableType RunnableType;
+  typedef typename internal::FunctorTraits<Functor>::RunType RunType;
+
+  // Use RunnableType::RunType instead of RunType above because our
+  // checks should below for bound references need to know what the actual
+  // functor is going to interpret the argument as.
+  typedef internal::FunctionTraits<typename RunnableType::RunType>
+      BoundFunctorTraits;
+
+  // Do not allow binding a non-const reference parameter. Non-const reference
+  // parameters are disallowed by the Google style guide.  Also, binding a
+  // non-const reference parameter can make for subtle bugs because the
+  // invoked function will receive a reference to the stored copy of the
+  // argument and not the original.
+  COMPILE_ASSERT(
+      !(is_non_const_reference<typename BoundFunctorTraits::A1Type>::value ||
+          is_non_const_reference<typename BoundFunctorTraits::A2Type>::value ||
+          is_non_const_reference<typename BoundFunctorTraits::A3Type>::value ||
+          is_non_const_reference<typename BoundFunctorTraits::A4Type>::value ||
+          is_non_const_reference<typename BoundFunctorTraits::A5Type>::value ||
+          is_non_const_reference<typename BoundFunctorTraits::A6Type>::value ),
+      do_not_bind_functions_with_nonconst_ref);
+
+  // For methods, we need to be careful for parameter 1.  We do not require
+  // a scoped_refptr because BindState<> itself takes care of AddRef() for
+  // methods. We also disallow binding of an array as the method's target
+  // object.
+  COMPILE_ASSERT(
+      internal::HasIsMethodTag<RunnableType>::value ||
+          !internal::NeedsScopedRefptrButGetsRawPtr<P1>::value,
+      p1_is_refcounted_type_and_needs_scoped_refptr);
+  COMPILE_ASSERT(!internal::HasIsMethodTag<RunnableType>::value ||
+                     !is_array<P1>::value,
+                 first_bound_argument_to_method_cannot_be_array);
+  COMPILE_ASSERT(!internal::NeedsScopedRefptrButGetsRawPtr<P2>::value,
+                 p2_is_refcounted_type_and_needs_scoped_refptr);
+  COMPILE_ASSERT(!internal::NeedsScopedRefptrButGetsRawPtr<P3>::value,
+                 p3_is_refcounted_type_and_needs_scoped_refptr);
+  COMPILE_ASSERT(!internal::NeedsScopedRefptrButGetsRawPtr<P4>::value,
+                 p4_is_refcounted_type_and_needs_scoped_refptr);
+  COMPILE_ASSERT(!internal::NeedsScopedRefptrButGetsRawPtr<P5>::value,
+                 p5_is_refcounted_type_and_needs_scoped_refptr);
+  COMPILE_ASSERT(!internal::NeedsScopedRefptrButGetsRawPtr<P6>::value,
+                 p6_is_refcounted_type_and_needs_scoped_refptr);
+  typedef internal::BindState<RunnableType, RunType,
+      void(typename internal::CallbackParamTraits<P1>::StorageType,
+      typename internal::CallbackParamTraits<P2>::StorageType,
+      typename internal::CallbackParamTraits<P3>::StorageType,
+      typename internal::CallbackParamTraits<P4>::StorageType,
+      typename internal::CallbackParamTraits<P5>::StorageType,
+      typename internal::CallbackParamTraits<P6>::StorageType)> BindState;
+
+
+  return Callback<typename BindState::UnboundRunType>(
+      new BindState(internal::MakeRunnable(functor), p1, p2, p3, p4, p5, p6));
+}
+
+template <typename Functor, typename P1, typename P2, typename P3, typename P4,
+    typename P5, typename P6, typename P7>
+base::Callback<
+    typename internal::BindState<
+        typename internal::FunctorTraits<Functor>::RunnableType,
+        typename internal::FunctorTraits<Functor>::RunType,
+        void(typename internal::CallbackParamTraits<P1>::StorageType,
+            typename internal::CallbackParamTraits<P2>::StorageType,
+            typename internal::CallbackParamTraits<P3>::StorageType,
+            typename internal::CallbackParamTraits<P4>::StorageType,
+            typename internal::CallbackParamTraits<P5>::StorageType,
+            typename internal::CallbackParamTraits<P6>::StorageType,
+            typename internal::CallbackParamTraits<P7>::StorageType)>
+            ::UnboundRunType>
+Bind(Functor functor, const P1& p1, const P2& p2, const P3& p3, const P4& p4,
+    const P5& p5, const P6& p6, const P7& p7) {
+  // Typedefs for how to store and run the functor.
+  typedef typename internal::FunctorTraits<Functor>::RunnableType RunnableType;
+  typedef typename internal::FunctorTraits<Functor>::RunType RunType;
+
+  // Use RunnableType::RunType instead of RunType above because our
+  // checks should below for bound references need to know what the actual
+  // functor is going to interpret the argument as.
+  typedef internal::FunctionTraits<typename RunnableType::RunType>
+      BoundFunctorTraits;
+
+  // Do not allow binding a non-const reference parameter. Non-const reference
+  // parameters are disallowed by the Google style guide.  Also, binding a
+  // non-const reference parameter can make for subtle bugs because the
+  // invoked function will receive a reference to the stored copy of the
+  // argument and not the original.
+  COMPILE_ASSERT(
+      !(is_non_const_reference<typename BoundFunctorTraits::A1Type>::value ||
+          is_non_const_reference<typename BoundFunctorTraits::A2Type>::value ||
+          is_non_const_reference<typename BoundFunctorTraits::A3Type>::value ||
+          is_non_const_reference<typename BoundFunctorTraits::A4Type>::value ||
+          is_non_const_reference<typename BoundFunctorTraits::A5Type>::value ||
+          is_non_const_reference<typename BoundFunctorTraits::A6Type>::value ||
+          is_non_const_reference<typename BoundFunctorTraits::A7Type>::value ),
+      do_not_bind_functions_with_nonconst_ref);
+
+  // For methods, we need to be careful for parameter 1.  We do not require
+  // a scoped_refptr because BindState<> itself takes care of AddRef() for
+  // methods. We also disallow binding of an array as the method's target
+  // object.
+  COMPILE_ASSERT(
+      internal::HasIsMethodTag<RunnableType>::value ||
+          !internal::NeedsScopedRefptrButGetsRawPtr<P1>::value,
+      p1_is_refcounted_type_and_needs_scoped_refptr);
+  COMPILE_ASSERT(!internal::HasIsMethodTag<RunnableType>::value ||
+                     !is_array<P1>::value,
+                 first_bound_argument_to_method_cannot_be_array);
+  COMPILE_ASSERT(!internal::NeedsScopedRefptrButGetsRawPtr<P2>::value,
+                 p2_is_refcounted_type_and_needs_scoped_refptr);
+  COMPILE_ASSERT(!internal::NeedsScopedRefptrButGetsRawPtr<P3>::value,
+                 p3_is_refcounted_type_and_needs_scoped_refptr);
+  COMPILE_ASSERT(!internal::NeedsScopedRefptrButGetsRawPtr<P4>::value,
+                 p4_is_refcounted_type_and_needs_scoped_refptr);
+  COMPILE_ASSERT(!internal::NeedsScopedRefptrButGetsRawPtr<P5>::value,
+                 p5_is_refcounted_type_and_needs_scoped_refptr);
+  COMPILE_ASSERT(!internal::NeedsScopedRefptrButGetsRawPtr<P6>::value,
+                 p6_is_refcounted_type_and_needs_scoped_refptr);
+  COMPILE_ASSERT(!internal::NeedsScopedRefptrButGetsRawPtr<P7>::value,
+                 p7_is_refcounted_type_and_needs_scoped_refptr);
+  typedef internal::BindState<RunnableType, RunType,
+      void(typename internal::CallbackParamTraits<P1>::StorageType,
+      typename internal::CallbackParamTraits<P2>::StorageType,
+      typename internal::CallbackParamTraits<P3>::StorageType,
+      typename internal::CallbackParamTraits<P4>::StorageType,
+      typename internal::CallbackParamTraits<P5>::StorageType,
+      typename internal::CallbackParamTraits<P6>::StorageType,
+      typename internal::CallbackParamTraits<P7>::StorageType)> BindState;
+
+
+  return Callback<typename BindState::UnboundRunType>(
+      new BindState(internal::MakeRunnable(functor), p1, p2, p3, p4, p5, p6,
+          p7));
+}
+
+}  // namespace base
+
+#endif  // BASE_BIND_H_

+ 14 - 0
base/bind_helpers.cc

@@ -0,0 +1,14 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind_helpers.h"
+
+#include "base/callback.h"
+
+namespace base {
+
+void DoNothing() {
+}
+
+}  // namespace base

+ 544 - 0
base/bind_helpers.h

@@ -0,0 +1,544 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This defines a set of argument wrappers and related factory methods that
+// can be used specify the refcounting and reference semantics of arguments
+// that are bound by the Bind() function in base/bind.h.
+//
+// It also defines a set of simple functions and utilities that people want
+// when using Callback<> and Bind().
+//
+//
+// ARGUMENT BINDING WRAPPERS
+//
+// The wrapper functions are base::Unretained(), base::Owned(), base::Passed(),
+// base::ConstRef(), and base::IgnoreResult().
+//
+// Unretained() allows Bind() to bind a non-refcounted class, and to disable
+// refcounting on arguments that are refcounted objects.
+//
+// Owned() transfers ownership of an object to the Callback resulting from
+// bind; the object will be deleted when the Callback is deleted.
+//
+// Passed() is for transferring movable-but-not-copyable types (eg. scoped_ptr)
+// through a Callback. Logically, this signifies a destructive transfer of
+// the state of the argument into the target function.  Invoking
+// Callback::Run() twice on a Callback that was created with a Passed()
+// argument will CHECK() because the first invocation would have already
+// transferred ownership to the target function.
+//
+// ConstRef() allows binding a constant reference to an argument rather
+// than a copy.
+//
+// IgnoreResult() is used to adapt a function or Callback with a return type to
+// one with a void return. This is most useful if you have a function with,
+// say, a pesky ignorable bool return that you want to use with PostTask or
+// something else that expect a Callback with a void return.
+//
+// EXAMPLE OF Unretained():
+//
+//   class Foo {
+//    public:
+//     void func() { cout << "Foo:f" << endl; }
+//   };
+//
+//   // In some function somewhere.
+//   Foo foo;
+//   Closure foo_callback =
+//       Bind(&Foo::func, Unretained(&foo));
+//   foo_callback.Run();  // Prints "Foo:f".
+//
+// Without the Unretained() wrapper on |&foo|, the above call would fail
+// to compile because Foo does not support the AddRef() and Release() methods.
+//
+//
+// EXAMPLE OF Owned():
+//
+//   void foo(int* arg) { cout << *arg << endl }
+//
+//   int* pn = new int(1);
+//   Closure foo_callback = Bind(&foo, Owned(pn));
+//
+//   foo_callback.Run();  // Prints "1"
+//   foo_callback.Run();  // Prints "1"
+//   *n = 2;
+//   foo_callback.Run();  // Prints "2"
+//
+//   foo_callback.Reset();  // |pn| is deleted.  Also will happen when
+//                          // |foo_callback| goes out of scope.
+//
+// Without Owned(), someone would have to know to delete |pn| when the last
+// reference to the Callback is deleted.
+//
+//
+// EXAMPLE OF ConstRef():
+//
+//   void foo(int arg) { cout << arg << endl }
+//
+//   int n = 1;
+//   Closure no_ref = Bind(&foo, n);
+//   Closure has_ref = Bind(&foo, ConstRef(n));
+//
+//   no_ref.Run();  // Prints "1"
+//   has_ref.Run();  // Prints "1"
+//
+//   n = 2;
+//   no_ref.Run();  // Prints "1"
+//   has_ref.Run();  // Prints "2"
+//
+// Note that because ConstRef() takes a reference on |n|, |n| must outlive all
+// its bound callbacks.
+//
+//
+// EXAMPLE OF IgnoreResult():
+//
+//   int DoSomething(int arg) { cout << arg << endl; }
+//
+//   // Assign to a Callback with a void return type.
+//   Callback<void(int)> cb = Bind(IgnoreResult(&DoSomething));
+//   cb->Run(1);  // Prints "1".
+//
+//   // Prints "1" on |ml|.
+//   ml->PostTask(FROM_HERE, Bind(IgnoreResult(&DoSomething), 1);
+//
+//
+// EXAMPLE OF Passed():
+//
+//   void TakesOwnership(scoped_ptr<Foo> arg) { }
+//   scoped_ptr<Foo> CreateFoo() { return scoped_ptr<Foo>(new Foo()); }
+//
+//   scoped_ptr<Foo> f(new Foo());
+//
+//   // |cb| is given ownership of Foo(). |f| is now NULL.
+//   // You can use f.Pass() in place of &f, but it's more verbose.
+//   Closure cb = Bind(&TakesOwnership, Passed(&f));
+//
+//   // Run was never called so |cb| still owns Foo() and deletes
+//   // it on Reset().
+//   cb.Reset();
+//
+//   // |cb| is given a new Foo created by CreateFoo().
+//   cb = Bind(&TakesOwnership, Passed(CreateFoo()));
+//
+//   // |arg| in TakesOwnership() is given ownership of Foo(). |cb|
+//   // no longer owns Foo() and, if reset, would not delete Foo().
+//   cb.Run();  // Foo() is now transferred to |arg| and deleted.
+//   cb.Run();  // This CHECK()s since Foo() already been used once.
+//
+// Passed() is particularly useful with PostTask() when you are transferring
+// ownership of an argument into a task, but don't necessarily know if the
+// task will always be executed. This can happen if the task is cancellable
+// or if it is posted to a MessageLoopProxy.
+//
+//
+// SIMPLE FUNCTIONS AND UTILITIES.
+//
+//   DoNothing() - Useful for creating a Closure that does nothing when called.
+//   DeletePointer<T>() - Useful for creating a Closure that will delete a
+//                        pointer when invoked. Only use this when necessary.
+//                        In most cases MessageLoop::DeleteSoon() is a better
+//                        fit.
+
+#ifndef BASE_BIND_HELPERS_H_
+#define BASE_BIND_HELPERS_H_
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/memory/weak_ptr.h"
+#include "base/type_traits.h"
+
+namespace base {
+namespace internal {
+
+// Use the Substitution Failure Is Not An Error (SFINAE) trick to inspect T
+// for the existence of AddRef() and Release() functions of the correct
+// signature.
+//
+// http://en.wikipedia.org/wiki/Substitution_failure_is_not_an_error
+// http://stackoverflow.com/questions/257288/is-it-possible-to-write-a-c-template-to-check-for-a-functions-existence
+// http://stackoverflow.com/questions/4358584/sfinae-approach-comparison
+// http://stackoverflow.com/questions/1966362/sfinae-to-check-for-inherited-member-functions
+//
+// The last link in particular show the method used below.
+//
+// For SFINAE to work with inherited methods, we need to pull some extra tricks
+// with multiple inheritance.  In the more standard formulation, the overloads
+// of Check would be:
+//
+//   template <typename C>
+//   Yes NotTheCheckWeWant(Helper<&C::TargetFunc>*);
+//
+//   template <typename C>
+//   No NotTheCheckWeWant(...);
+//
+//   static const bool value = sizeof(NotTheCheckWeWant<T>(0)) == sizeof(Yes);
+//
+// The problem here is that template resolution will not match
+// C::TargetFunc if TargetFunc does not exist directly in C.  That is, if
+// TargetFunc in inherited from an ancestor, &C::TargetFunc will not match,
+// |value| will be false.  This formulation only checks for whether or
+// not TargetFunc exist directly in the class being introspected.
+//
+// To get around this, we play a dirty trick with multiple inheritance.
+// First, We create a class BaseMixin that declares each function that we
+// want to probe for.  Then we create a class Base that inherits from both T
+// (the class we wish to probe) and BaseMixin.  Note that the function
+// signature in BaseMixin does not need to match the signature of the function
+// we are probing for; thus it's easiest to just use void(void).
+//
+// Now, if TargetFunc exists somewhere in T, then &Base::TargetFunc has an
+// ambiguous resolution between BaseMixin and T.  This lets us write the
+// following:
+//
+//   template <typename C>
+//   No GoodCheck(Helper<&C::TargetFunc>*);
+//
+//   template <typename C>
+//   Yes GoodCheck(...);
+//
+//   static const bool value = sizeof(GoodCheck<Base>(0)) == sizeof(Yes);
+//
+// Notice here that the variadic version of GoodCheck() returns Yes here
+// instead of No like the previous one. Also notice that we calculate |value|
+// by specializing GoodCheck() on Base instead of T.
+//
+// We've reversed the roles of the variadic, and Helper overloads.
+// GoodCheck(Helper<&C::TargetFunc>*), when C = Base, fails to be a valid
+// substitution if T::TargetFunc exists. Thus GoodCheck<Base>(0) will resolve
+// to the variadic version if T has TargetFunc.  If T::TargetFunc does not
+// exist, then &C::TargetFunc is not ambiguous, and the overload resolution
+// will prefer GoodCheck(Helper<&C::TargetFunc>*).
+//
+// This method of SFINAE will correctly probe for inherited names, but it cannot
+// typecheck those names.  It's still a good enough sanity check though.
+//
+// Works on gcc-4.2, gcc-4.4, and Visual Studio 2008.
+//
+// TODO(ajwong): Move to ref_counted.h or type_traits.h when we've vetted
+// this works well.
+//
+// TODO(ajwong): Make this check for Release() as well.
+// See http://crbug.com/82038.
+template <typename T>
+class SupportsAddRefAndRelease {
+  typedef char Yes[1];
+  typedef char No[2];
+
+  struct BaseMixin {
+    void AddRef();
+  };
+
+// MSVC warns when you try to use Base if T has a private destructor, the
+// common pattern for refcounted types. It does this even though no attempt to
+// instantiate Base is made.  We disable the warning for this definition.
+#if defined(OS_WIN)
+#pragma warning(push)
+#pragma warning(disable:4624)
+#endif
+  struct Base : public T, public BaseMixin {
+  };
+#if defined(OS_WIN)
+#pragma warning(pop)
+#endif
+
+  template <void(BaseMixin::*)(void)> struct Helper {};
+
+  template <typename C>
+  static No& Check(Helper<&C::AddRef>*);
+
+  template <typename >
+  static Yes& Check(...);
+
+ public:
+  static const bool value = sizeof(Check<Base>(0)) == sizeof(Yes);
+};
+
+// Helpers to assert that arguments of a recounted type are bound with a
+// scoped_refptr.
+template <bool IsClasstype, typename T>
+struct UnsafeBindtoRefCountedArgHelper : false_type {
+};
+
+template <typename T>
+struct UnsafeBindtoRefCountedArgHelper<true, T>
+    : integral_constant<bool, SupportsAddRefAndRelease<T>::value> {
+};
+
+template <typename T>
+struct UnsafeBindtoRefCountedArg : false_type {
+};
+
+template <typename T>
+struct UnsafeBindtoRefCountedArg<T*>
+    : UnsafeBindtoRefCountedArgHelper<is_class<T>::value, T> {
+};
+
+template <typename T>
+class HasIsMethodTag {
+  typedef char Yes[1];
+  typedef char No[2];
+
+  template <typename U>
+  static Yes& Check(typename U::IsMethod*);
+
+  template <typename U>
+  static No& Check(...);
+
+ public:
+  static const bool value = sizeof(Check<T>(0)) == sizeof(Yes);
+};
+
+template <typename T>
+class UnretainedWrapper {
+ public:
+  explicit UnretainedWrapper(T* o) : ptr_(o) {}
+  T* get() const { return ptr_; }
+ private:
+  T* ptr_;
+};
+
+template <typename T>
+class ConstRefWrapper {
+ public:
+  explicit ConstRefWrapper(const T& o) : ptr_(&o) {}
+  const T& get() const { return *ptr_; }
+ private:
+  const T* ptr_;
+};
+
+template <typename T>
+struct IgnoreResultHelper {
+  explicit IgnoreResultHelper(T functor) : functor_(functor) {}
+
+  T functor_;
+};
+
+template <typename T>
+struct IgnoreResultHelper<Callback<T> > {
+  explicit IgnoreResultHelper(const Callback<T>& functor) : functor_(functor) {}
+
+  const Callback<T>& functor_;
+};
+
+// An alternate implementation is to avoid the destructive copy, and instead
+// specialize ParamTraits<> for OwnedWrapper<> to change the StorageType to
+// a class that is essentially a scoped_ptr<>.
+//
+// The current implementation has the benefit though of leaving ParamTraits<>
+// fully in callback_internal.h as well as avoiding type conversions during
+// storage.
+template <typename T>
+class OwnedWrapper {
+ public:
+  explicit OwnedWrapper(T* o) : ptr_(o) {}
+  ~OwnedWrapper() { delete ptr_; }
+  T* get() const { return ptr_; }
+  OwnedWrapper(const OwnedWrapper& other) {
+    ptr_ = other.ptr_;
+    other.ptr_ = NULL;
+  }
+
+ private:
+  mutable T* ptr_;
+};
+
+// PassedWrapper is a copyable adapter for a scoper that ignores const.
+//
+// It is needed to get around the fact that Bind() takes a const reference to
+// all its arguments.  Because Bind() takes a const reference to avoid
+// unnecessary copies, it is incompatible with movable-but-not-copyable
+// types; doing a destructive "move" of the type into Bind() would violate
+// the const correctness.
+//
+// This conundrum cannot be solved without either C++11 rvalue references or
+// a O(2^n) blowup of Bind() templates to handle each combination of regular
+// types and movable-but-not-copyable types.  Thus we introduce a wrapper type
+// that is copyable to transmit the correct type information down into
+// BindState<>. Ignoring const in this type makes sense because it is only
+// created when we are explicitly trying to do a destructive move.
+//
+// Two notes:
+//  1) PassedWrapper supports any type that has a "Pass()" function.
+//     This is intentional. The whitelisting of which specific types we
+//     support is maintained by CallbackParamTraits<>.
+//  2) is_valid_ is distinct from NULL because it is valid to bind a "NULL"
+//     scoper to a Callback and allow the Callback to execute once.
+template <typename T>
+class PassedWrapper {
+ public:
+  explicit PassedWrapper(T scoper) : is_valid_(true), scoper_(scoper.Pass()) {}
+  PassedWrapper(const PassedWrapper& other)
+      : is_valid_(other.is_valid_), scoper_(other.scoper_.Pass()) {
+  }
+  T Pass() const {
+    CHECK(is_valid_);
+    is_valid_ = false;
+    return scoper_.Pass();
+  }
+
+ private:
+  mutable bool is_valid_;
+  mutable T scoper_;
+};
+
+// Unwrap the stored parameters for the wrappers above.
+template <typename T>
+struct UnwrapTraits {
+  typedef const T& ForwardType;
+  static ForwardType Unwrap(const T& o) { return o; }
+};
+
+template <typename T>
+struct UnwrapTraits<UnretainedWrapper<T> > {
+  typedef T* ForwardType;
+  static ForwardType Unwrap(UnretainedWrapper<T> unretained) {
+    return unretained.get();
+  }
+};
+
+template <typename T>
+struct UnwrapTraits<ConstRefWrapper<T> > {
+  typedef const T& ForwardType;
+  static ForwardType Unwrap(ConstRefWrapper<T> const_ref) {
+    return const_ref.get();
+  }
+};
+
+template <typename T>
+struct UnwrapTraits<scoped_refptr<T> > {
+  typedef T* ForwardType;
+  static ForwardType Unwrap(const scoped_refptr<T>& o) { return o.get(); }
+};
+
+template <typename T>
+struct UnwrapTraits<WeakPtr<T> > {
+  typedef const WeakPtr<T>& ForwardType;
+  static ForwardType Unwrap(const WeakPtr<T>& o) { return o; }
+};
+
+template <typename T>
+struct UnwrapTraits<OwnedWrapper<T> > {
+  typedef T* ForwardType;
+  static ForwardType Unwrap(const OwnedWrapper<T>& o) {
+    return o.get();
+  }
+};
+
+template <typename T>
+struct UnwrapTraits<PassedWrapper<T> > {
+  typedef T ForwardType;
+  static T Unwrap(PassedWrapper<T>& o) {
+    return o.Pass();
+  }
+};
+
+// Utility for handling different refcounting semantics in the Bind()
+// function.
+template <bool is_method, typename T>
+struct MaybeRefcount;
+
+template <typename T>
+struct MaybeRefcount<false, T> {
+  static void AddRef(const T&) {}
+  static void Release(const T&) {}
+};
+
+template <typename T, size_t n>
+struct MaybeRefcount<false, T[n]> {
+  static void AddRef(const T*) {}
+  static void Release(const T*) {}
+};
+
+template <typename T>
+struct MaybeRefcount<true, T> {
+  static void AddRef(const T&) {}
+  static void Release(const T&) {}
+};
+
+template <typename T>
+struct MaybeRefcount<true, T*> {
+  static void AddRef(T* o) { o->AddRef(); }
+  static void Release(T* o) { o->Release(); }
+};
+
+// No need to additionally AddRef() and Release() since we are storing a
+// scoped_refptr<> inside the storage object already.
+template <typename T>
+struct MaybeRefcount<true, scoped_refptr<T> > {
+  static void AddRef(const scoped_refptr<T>& o) {}
+  static void Release(const scoped_refptr<T>& o) {}
+};
+
+template <typename T>
+struct MaybeRefcount<true, const T*> {
+  static void AddRef(const T* o) { o->AddRef(); }
+  static void Release(const T* o) { o->Release(); }
+};
+
+// IsWeakMethod is a helper that determine if we are binding a WeakPtr<> to a
+// method.  It is used internally by Bind() to select the correct
+// InvokeHelper that will no-op itself in the event the WeakPtr<> for
+// the target object is invalidated.
+//
+// P1 should be the type of the object that will be received of the method.
+template <bool IsMethod, typename P1>
+struct IsWeakMethod : public false_type {};
+
+template <typename T>
+struct IsWeakMethod<true, WeakPtr<T> > : public true_type {};
+
+template <typename T>
+struct IsWeakMethod<true, ConstRefWrapper<WeakPtr<T> > > : public true_type {};
+
+}  // namespace internal
+
+template <typename T>
+static inline internal::UnretainedWrapper<T> Unretained(T* o) {
+  return internal::UnretainedWrapper<T>(o);
+}
+
+template <typename T>
+static inline internal::ConstRefWrapper<T> ConstRef(const T& o) {
+  return internal::ConstRefWrapper<T>(o);
+}
+
+template <typename T>
+static inline internal::OwnedWrapper<T> Owned(T* o) {
+  return internal::OwnedWrapper<T>(o);
+}
+
+// We offer 2 syntaxes for calling Passed().  The first takes a temporary and
+// is best suited for use with the return value of a function. The second
+// takes a pointer to the scoper and is just syntactic sugar to avoid having
+// to write Passed(scoper.Pass()).
+template <typename T>
+static inline internal::PassedWrapper<T> Passed(T scoper) {
+  return internal::PassedWrapper<T>(scoper.Pass());
+}
+template <typename T>
+static inline internal::PassedWrapper<T> Passed(T* scoper) {
+  return internal::PassedWrapper<T>(scoper->Pass());
+}
+
+template <typename T>
+static inline internal::IgnoreResultHelper<T> IgnoreResult(T data) {
+  return internal::IgnoreResultHelper<T>(data);
+}
+
+template <typename T>
+static inline internal::IgnoreResultHelper<Callback<T> >
+IgnoreResult(const Callback<T>& data) {
+  return internal::IgnoreResultHelper<Callback<T> >(data);
+}
+
+BASE_EXPORT void DoNothing();
+
+template<typename T>
+void DeletePointer(T* obj) {
+  delete obj;
+}
+
+}  // namespace base
+
+#endif  // BASE_BIND_HELPERS_H_

+ 2789 - 0
base/bind_internal.h

@@ -0,0 +1,2789 @@
+// This file was GENERATED by command:
+//     pump.py bind_internal.h.pump
+// DO NOT EDIT BY HAND!!!
+
+
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BIND_INTERNAL_H_
+#define BASE_BIND_INTERNAL_H_
+
+#include "base/bind_helpers.h"
+#include "base/callback_internal.h"
+#include "base/memory/raw_scoped_refptr_mismatch_checker.h"
+#include "base/memory/weak_ptr.h"
+#include "base/type_traits.h"
+#include "base/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/bind_internal_win.h"
+#endif
+
+namespace base {
+namespace internal {
+
+// See base/callback.h for user documentation.
+//
+//
+// CONCEPTS:
+//  Runnable -- A type (really a type class) that has a single Run() method
+//              and a RunType typedef that corresponds to the type of Run().
+//              A Runnable can declare that it should treated like a method
+//              call by including a typedef named IsMethod.  The value of
+//              this typedef is NOT inspected, only the existence.  When a
+//              Runnable declares itself a method, Bind() will enforce special
+//              refcounting + WeakPtr handling semantics for the first
+//              parameter which is expected to be an object.
+//  Functor -- A copyable type representing something that should be called.
+//             All function pointers, Callback<>, and Runnables are functors
+//             even if the invocation syntax differs.
+//  RunType -- A function type (as opposed to function _pointer_ type) for
+//             a Run() function.  Usually just a convenience typedef.
+//  (Bound)ArgsType -- A function type that is being (ab)used to store the
+//                     types of set of arguments.  The "return" type is always
+//                     void here.  We use this hack so that we do not need
+//                     a new type name for each arity of type. (eg.,
+//                     BindState1, BindState2).  This makes forward
+//                     declarations and friending much much easier.
+//
+// Types:
+//  RunnableAdapter<> -- Wraps the various "function" pointer types into an
+//                       object that adheres to the Runnable interface.
+//                       There are |3*ARITY| RunnableAdapter types.
+//  FunctionTraits<> -- Type traits that unwrap a function signature into a
+//                      a set of easier to use typedefs.  Used mainly for
+//                      compile time asserts.
+//                      There are |ARITY| FunctionTraits types.
+//  ForceVoidReturn<> -- Helper class for translating function signatures to
+//                       equivalent forms with a "void" return type.
+//                    There are |ARITY| ForceVoidReturn types.
+//  FunctorTraits<> -- Type traits used determine the correct RunType and
+//                     RunnableType for a Functor.  This is where function
+//                     signature adapters are applied.
+//                    There are |ARITY| ForceVoidReturn types.
+//  MakeRunnable<> -- Takes a Functor and returns an object in the Runnable
+//                    type class that represents the underlying Functor.
+//                    There are |O(1)| MakeRunnable types.
+//  InvokeHelper<> -- Take a Runnable + arguments and actully invokes it.
+// Handle the differing syntaxes needed for WeakPtr<> support,
+//                    and for ignoring return values.  This is separate from
+//                    Invoker to avoid creating multiple version of Invoker<>
+//                    which grows at O(n^2) with the arity.
+//                    There are |k*ARITY| InvokeHelper types.
+//  Invoker<> -- Unwraps the curried parameters and executes the Runnable.
+//               There are |(ARITY^2 + ARITY)/2| Invoketypes.
+//  BindState<> -- Stores the curried parameters, and is the main entry point
+//                 into the Bind() system, doing most of the type resolution.
+//                 There are ARITY BindState types.
+
+// RunnableAdapter<>
+//
+// The RunnableAdapter<> templates provide a uniform interface for invoking
+// a function pointer, method pointer, or const method pointer. The adapter
+// exposes a Run() method with an appropriate signature. Using this wrapper
+// allows for writing code that supports all three pointer types without
+// undue repetition.  Without it, a lot of code would need to be repeated 3
+// times.
+//
+// For method pointers and const method pointers the first argument to Run()
+// is considered to be the received of the method.  This is similar to STL's
+// mem_fun().
+//
+// This class also exposes a RunType typedef that is the function type of the
+// Run() function.
+//
+// If and only if the wrapper contains a method or const method pointer, an
+// IsMethod typedef is exposed.  The existence of this typedef (NOT the value)
+// marks that the wrapper should be considered a method wrapper.
+
+template <typename Functor>
+class RunnableAdapter;
+
+// Function: Arity 0.
+template <typename R>
+class RunnableAdapter<R(*)()> {
+ public:
+  typedef R (RunType)();
+
+  explicit RunnableAdapter(R(*function)())
+      : function_(function) {
+  }
+
+  R Run() {
+    return function_();
+  }
+
+ private:
+  R (*function_)();
+};
+
+// Method: Arity 0.
+template <typename R, typename T>
+class RunnableAdapter<R(T::*)()> {
+ public:
+  typedef R (RunType)(T*);
+  typedef true_type IsMethod;
+
+  explicit RunnableAdapter(R(T::*method)())
+      : method_(method) {
+  }
+
+  R Run(T* object) {
+    return (object->*method_)();
+  }
+
+ private:
+  R (T::*method_)();
+};
+
+// Const Method: Arity 0.
+template <typename R, typename T>
+class RunnableAdapter<R(T::*)() const> {
+ public:
+  typedef R (RunType)(const T*);
+  typedef true_type IsMethod;
+
+  explicit RunnableAdapter(R(T::*method)() const)
+      : method_(method) {
+  }
+
+  R Run(const T* object) {
+    return (object->*method_)();
+  }
+
+ private:
+  R (T::*method_)() const;
+};
+
+// Function: Arity 1.
+template <typename R, typename A1>
+class RunnableAdapter<R(*)(A1)> {
+ public:
+  typedef R (RunType)(A1);
+
+  explicit RunnableAdapter(R(*function)(A1))
+      : function_(function) {
+  }
+
+  R Run(typename CallbackParamTraits<A1>::ForwardType a1) {
+    return function_(CallbackForward(a1));
+  }
+
+ private:
+  R (*function_)(A1);
+};
+
+// Method: Arity 1.
+template <typename R, typename T, typename A1>
+class RunnableAdapter<R(T::*)(A1)> {
+ public:
+  typedef R (RunType)(T*, A1);
+  typedef true_type IsMethod;
+
+  explicit RunnableAdapter(R(T::*method)(A1))
+      : method_(method) {
+  }
+
+  R Run(T* object, typename CallbackParamTraits<A1>::ForwardType a1) {
+    return (object->*method_)(CallbackForward(a1));
+  }
+
+ private:
+  R (T::*method_)(A1);
+};
+
+// Const Method: Arity 1.
+template <typename R, typename T, typename A1>
+class RunnableAdapter<R(T::*)(A1) const> {
+ public:
+  typedef R (RunType)(const T*, A1);
+  typedef true_type IsMethod;
+
+  explicit RunnableAdapter(R(T::*method)(A1) const)
+      : method_(method) {
+  }
+
+  R Run(const T* object, typename CallbackParamTraits<A1>::ForwardType a1) {
+    return (object->*method_)(CallbackForward(a1));
+  }
+
+ private:
+  R (T::*method_)(A1) const;
+};
+
+// Function: Arity 2.
+template <typename R, typename A1, typename A2>
+class RunnableAdapter<R(*)(A1, A2)> {
+ public:
+  typedef R (RunType)(A1, A2);
+
+  explicit RunnableAdapter(R(*function)(A1, A2))
+      : function_(function) {
+  }
+
+  R Run(typename CallbackParamTraits<A1>::ForwardType a1,
+      typename CallbackParamTraits<A2>::ForwardType a2) {
+    return function_(CallbackForward(a1), CallbackForward(a2));
+  }
+
+ private:
+  R (*function_)(A1, A2);
+};
+
+// Method: Arity 2.
+template <typename R, typename T, typename A1, typename A2>
+class RunnableAdapter<R(T::*)(A1, A2)> {
+ public:
+  typedef R (RunType)(T*, A1, A2);
+  typedef true_type IsMethod;
+
+  explicit RunnableAdapter(R(T::*method)(A1, A2))
+      : method_(method) {
+  }
+
+  R Run(T* object, typename CallbackParamTraits<A1>::ForwardType a1,
+      typename CallbackParamTraits<A2>::ForwardType a2) {
+    return (object->*method_)(CallbackForward(a1), CallbackForward(a2));
+  }
+
+ private:
+  R (T::*method_)(A1, A2);
+};
+
+// Const Method: Arity 2.
+template <typename R, typename T, typename A1, typename A2>
+class RunnableAdapter<R(T::*)(A1, A2) const> {
+ public:
+  typedef R (RunType)(const T*, A1, A2);
+  typedef true_type IsMethod;
+
+  explicit RunnableAdapter(R(T::*method)(A1, A2) const)
+      : method_(method) {
+  }
+
+  R Run(const T* object, typename CallbackParamTraits<A1>::ForwardType a1,
+      typename CallbackParamTraits<A2>::ForwardType a2) {
+    return (object->*method_)(CallbackForward(a1), CallbackForward(a2));
+  }
+
+ private:
+  R (T::*method_)(A1, A2) const;
+};
+
+// Function: Arity 3.
+template <typename R, typename A1, typename A2, typename A3>
+class RunnableAdapter<R(*)(A1, A2, A3)> {
+ public:
+  typedef R (RunType)(A1, A2, A3);
+
+  explicit RunnableAdapter(R(*function)(A1, A2, A3))
+      : function_(function) {
+  }
+
+  R Run(typename CallbackParamTraits<A1>::ForwardType a1,
+      typename CallbackParamTraits<A2>::ForwardType a2,
+      typename CallbackParamTraits<A3>::ForwardType a3) {
+    return function_(CallbackForward(a1), CallbackForward(a2),
+        CallbackForward(a3));
+  }
+
+ private:
+  R (*function_)(A1, A2, A3);
+};
+
+// Method: Arity 3.
+template <typename R, typename T, typename A1, typename A2, typename A3>
+class RunnableAdapter<R(T::*)(A1, A2, A3)> {
+ public:
+  typedef R (RunType)(T*, A1, A2, A3);
+  typedef true_type IsMethod;
+
+  explicit RunnableAdapter(R(T::*method)(A1, A2, A3))
+      : method_(method) {
+  }
+
+  R Run(T* object, typename CallbackParamTraits<A1>::ForwardType a1,
+      typename CallbackParamTraits<A2>::ForwardType a2,
+      typename CallbackParamTraits<A3>::ForwardType a3) {
+    return (object->*method_)(CallbackForward(a1), CallbackForward(a2),
+        CallbackForward(a3));
+  }
+
+ private:
+  R (T::*method_)(A1, A2, A3);
+};
+
+// Const Method: Arity 3.
+template <typename R, typename T, typename A1, typename A2, typename A3>
+class RunnableAdapter<R(T::*)(A1, A2, A3) const> {
+ public:
+  typedef R (RunType)(const T*, A1, A2, A3);
+  typedef true_type IsMethod;
+
+  explicit RunnableAdapter(R(T::*method)(A1, A2, A3) const)
+      : method_(method) {
+  }
+
+  R Run(const T* object, typename CallbackParamTraits<A1>::ForwardType a1,
+      typename CallbackParamTraits<A2>::ForwardType a2,
+      typename CallbackParamTraits<A3>::ForwardType a3) {
+    return (object->*method_)(CallbackForward(a1), CallbackForward(a2),
+        CallbackForward(a3));
+  }
+
+ private:
+  R (T::*method_)(A1, A2, A3) const;
+};
+
+// Function: Arity 4.
+template <typename R, typename A1, typename A2, typename A3, typename A4>
+class RunnableAdapter<R(*)(A1, A2, A3, A4)> {
+ public:
+  typedef R (RunType)(A1, A2, A3, A4);
+
+  explicit RunnableAdapter(R(*function)(A1, A2, A3, A4))
+      : function_(function) {
+  }
+
+  R Run(typename CallbackParamTraits<A1>::ForwardType a1,
+      typename CallbackParamTraits<A2>::ForwardType a2,
+      typename CallbackParamTraits<A3>::ForwardType a3,
+      typename CallbackParamTraits<A4>::ForwardType a4) {
+    return function_(CallbackForward(a1), CallbackForward(a2),
+        CallbackForward(a3), CallbackForward(a4));
+  }
+
+ private:
+  R (*function_)(A1, A2, A3, A4);
+};
+
+// Method: Arity 4.
+template <typename R, typename T, typename A1, typename A2, typename A3,
+    typename A4>
+class RunnableAdapter<R(T::*)(A1, A2, A3, A4)> {
+ public:
+  typedef R (RunType)(T*, A1, A2, A3, A4);
+  typedef true_type IsMethod;
+
+  explicit RunnableAdapter(R(T::*method)(A1, A2, A3, A4))
+      : method_(method) {
+  }
+
+  R Run(T* object, typename CallbackParamTraits<A1>::ForwardType a1,
+      typename CallbackParamTraits<A2>::ForwardType a2,
+      typename CallbackParamTraits<A3>::ForwardType a3,
+      typename CallbackParamTraits<A4>::ForwardType a4) {
+    return (object->*method_)(CallbackForward(a1), CallbackForward(a2),
+        CallbackForward(a3), CallbackForward(a4));
+  }
+
+ private:
+  R (T::*method_)(A1, A2, A3, A4);
+};
+
+// Const Method: Arity 4.
+template <typename R, typename T, typename A1, typename A2, typename A3,
+    typename A4>
+class RunnableAdapter<R(T::*)(A1, A2, A3, A4) const> {
+ public:
+  typedef R (RunType)(const T*, A1, A2, A3, A4);
+  typedef true_type IsMethod;
+
+  explicit RunnableAdapter(R(T::*method)(A1, A2, A3, A4) const)
+      : method_(method) {
+  }
+
+  R Run(const T* object, typename CallbackParamTraits<A1>::ForwardType a1,
+      typename CallbackParamTraits<A2>::ForwardType a2,
+      typename CallbackParamTraits<A3>::ForwardType a3,
+      typename CallbackParamTraits<A4>::ForwardType a4) {
+    return (object->*method_)(CallbackForward(a1), CallbackForward(a2),
+        CallbackForward(a3), CallbackForward(a4));
+  }
+
+ private:
+  R (T::*method_)(A1, A2, A3, A4) const;
+};
+
+// Function: Arity 5.
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5>
+class RunnableAdapter<R(*)(A1, A2, A3, A4, A5)> {
+ public:
+  typedef R (RunType)(A1, A2, A3, A4, A5);
+
+  explicit RunnableAdapter(R(*function)(A1, A2, A3, A4, A5))
+      : function_(function) {
+  }
+
+  R Run(typename CallbackParamTraits<A1>::ForwardType a1,
+      typename CallbackParamTraits<A2>::ForwardType a2,
+      typename CallbackParamTraits<A3>::ForwardType a3,
+      typename CallbackParamTraits<A4>::ForwardType a4,
+      typename CallbackParamTraits<A5>::ForwardType a5) {
+    return function_(CallbackForward(a1), CallbackForward(a2),
+        CallbackForward(a3), CallbackForward(a4), CallbackForward(a5));
+  }
+
+ private:
+  R (*function_)(A1, A2, A3, A4, A5);
+};
+
+// Method: Arity 5.
+template <typename R, typename T, typename A1, typename A2, typename A3,
+    typename A4, typename A5>
+class RunnableAdapter<R(T::*)(A1, A2, A3, A4, A5)> {
+ public:
+  typedef R (RunType)(T*, A1, A2, A3, A4, A5);
+  typedef true_type IsMethod;
+
+  explicit RunnableAdapter(R(T::*method)(A1, A2, A3, A4, A5))
+      : method_(method) {
+  }
+
+  R Run(T* object, typename CallbackParamTraits<A1>::ForwardType a1,
+      typename CallbackParamTraits<A2>::ForwardType a2,
+      typename CallbackParamTraits<A3>::ForwardType a3,
+      typename CallbackParamTraits<A4>::ForwardType a4,
+      typename CallbackParamTraits<A5>::ForwardType a5) {
+    return (object->*method_)(CallbackForward(a1), CallbackForward(a2),
+        CallbackForward(a3), CallbackForward(a4), CallbackForward(a5));
+  }
+
+ private:
+  R (T::*method_)(A1, A2, A3, A4, A5);
+};
+
+// Const Method: Arity 5.
+template <typename R, typename T, typename A1, typename A2, typename A3,
+    typename A4, typename A5>
+class RunnableAdapter<R(T::*)(A1, A2, A3, A4, A5) const> {
+ public:
+  typedef R (RunType)(const T*, A1, A2, A3, A4, A5);
+  typedef true_type IsMethod;
+
+  explicit RunnableAdapter(R(T::*method)(A1, A2, A3, A4, A5) const)
+      : method_(method) {
+  }
+
+  R Run(const T* object, typename CallbackParamTraits<A1>::ForwardType a1,
+      typename CallbackParamTraits<A2>::ForwardType a2,
+      typename CallbackParamTraits<A3>::ForwardType a3,
+      typename CallbackParamTraits<A4>::ForwardType a4,
+      typename CallbackParamTraits<A5>::ForwardType a5) {
+    return (object->*method_)(CallbackForward(a1), CallbackForward(a2),
+        CallbackForward(a3), CallbackForward(a4), CallbackForward(a5));
+  }
+
+ private:
+  R (T::*method_)(A1, A2, A3, A4, A5) const;
+};
+
+// Function: Arity 6.
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6>
+class RunnableAdapter<R(*)(A1, A2, A3, A4, A5, A6)> {
+ public:
+  typedef R (RunType)(A1, A2, A3, A4, A5, A6);
+
+  explicit RunnableAdapter(R(*function)(A1, A2, A3, A4, A5, A6))
+      : function_(function) {
+  }
+
+  R Run(typename CallbackParamTraits<A1>::ForwardType a1,
+      typename CallbackParamTraits<A2>::ForwardType a2,
+      typename CallbackParamTraits<A3>::ForwardType a3,
+      typename CallbackParamTraits<A4>::ForwardType a4,
+      typename CallbackParamTraits<A5>::ForwardType a5,
+      typename CallbackParamTraits<A6>::ForwardType a6) {
+    return function_(CallbackForward(a1), CallbackForward(a2),
+        CallbackForward(a3), CallbackForward(a4), CallbackForward(a5),
+        CallbackForward(a6));
+  }
+
+ private:
+  R (*function_)(A1, A2, A3, A4, A5, A6);
+};
+
+// Method: Arity 6.
+template <typename R, typename T, typename A1, typename A2, typename A3,
+    typename A4, typename A5, typename A6>
+class RunnableAdapter<R(T::*)(A1, A2, A3, A4, A5, A6)> {
+ public:
+  typedef R (RunType)(T*, A1, A2, A3, A4, A5, A6);
+  typedef true_type IsMethod;
+
+  explicit RunnableAdapter(R(T::*method)(A1, A2, A3, A4, A5, A6))
+      : method_(method) {
+  }
+
+  R Run(T* object, typename CallbackParamTraits<A1>::ForwardType a1,
+      typename CallbackParamTraits<A2>::ForwardType a2,
+      typename CallbackParamTraits<A3>::ForwardType a3,
+      typename CallbackParamTraits<A4>::ForwardType a4,
+      typename CallbackParamTraits<A5>::ForwardType a5,
+      typename CallbackParamTraits<A6>::ForwardType a6) {
+    return (object->*method_)(CallbackForward(a1), CallbackForward(a2),
+        CallbackForward(a3), CallbackForward(a4), CallbackForward(a5),
+        CallbackForward(a6));
+  }
+
+ private:
+  R (T::*method_)(A1, A2, A3, A4, A5, A6);
+};
+
+// Const Method: Arity 6.
+template <typename R, typename T, typename A1, typename A2, typename A3,
+    typename A4, typename A5, typename A6>
+class RunnableAdapter<R(T::*)(A1, A2, A3, A4, A5, A6) const> {
+ public:
+  typedef R (RunType)(const T*, A1, A2, A3, A4, A5, A6);
+  typedef true_type IsMethod;
+
+  explicit RunnableAdapter(R(T::*method)(A1, A2, A3, A4, A5, A6) const)
+      : method_(method) {
+  }
+
+  R Run(const T* object, typename CallbackParamTraits<A1>::ForwardType a1,
+      typename CallbackParamTraits<A2>::ForwardType a2,
+      typename CallbackParamTraits<A3>::ForwardType a3,
+      typename CallbackParamTraits<A4>::ForwardType a4,
+      typename CallbackParamTraits<A5>::ForwardType a5,
+      typename CallbackParamTraits<A6>::ForwardType a6) {
+    return (object->*method_)(CallbackForward(a1), CallbackForward(a2),
+        CallbackForward(a3), CallbackForward(a4), CallbackForward(a5),
+        CallbackForward(a6));
+  }
+
+ private:
+  R (T::*method_)(A1, A2, A3, A4, A5, A6) const;
+};
+
+// Function: Arity 7.
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7>
+class RunnableAdapter<R(*)(A1, A2, A3, A4, A5, A6, A7)> {
+ public:
+  typedef R (RunType)(A1, A2, A3, A4, A5, A6, A7);
+
+  explicit RunnableAdapter(R(*function)(A1, A2, A3, A4, A5, A6, A7))
+      : function_(function) {
+  }
+
+  R Run(typename CallbackParamTraits<A1>::ForwardType a1,
+      typename CallbackParamTraits<A2>::ForwardType a2,
+      typename CallbackParamTraits<A3>::ForwardType a3,
+      typename CallbackParamTraits<A4>::ForwardType a4,
+      typename CallbackParamTraits<A5>::ForwardType a5,
+      typename CallbackParamTraits<A6>::ForwardType a6,
+      typename CallbackParamTraits<A7>::ForwardType a7) {
+    return function_(CallbackForward(a1), CallbackForward(a2),
+        CallbackForward(a3), CallbackForward(a4), CallbackForward(a5),
+        CallbackForward(a6), CallbackForward(a7));
+  }
+
+ private:
+  R (*function_)(A1, A2, A3, A4, A5, A6, A7);
+};
+
+// Method: Arity 7.
+template <typename R, typename T, typename A1, typename A2, typename A3,
+    typename A4, typename A5, typename A6, typename A7>
+class RunnableAdapter<R(T::*)(A1, A2, A3, A4, A5, A6, A7)> {
+ public:
+  typedef R (RunType)(T*, A1, A2, A3, A4, A5, A6, A7);
+  typedef true_type IsMethod;
+
+  explicit RunnableAdapter(R(T::*method)(A1, A2, A3, A4, A5, A6, A7))
+      : method_(method) {
+  }
+
+  R Run(T* object, typename CallbackParamTraits<A1>::ForwardType a1,
+      typename CallbackParamTraits<A2>::ForwardType a2,
+      typename CallbackParamTraits<A3>::ForwardType a3,
+      typename CallbackParamTraits<A4>::ForwardType a4,
+      typename CallbackParamTraits<A5>::ForwardType a5,
+      typename CallbackParamTraits<A6>::ForwardType a6,
+      typename CallbackParamTraits<A7>::ForwardType a7) {
+    return (object->*method_)(CallbackForward(a1), CallbackForward(a2),
+        CallbackForward(a3), CallbackForward(a4), CallbackForward(a5),
+        CallbackForward(a6), CallbackForward(a7));
+  }
+
+ private:
+  R (T::*method_)(A1, A2, A3, A4, A5, A6, A7);
+};
+
+// Const Method: Arity 7.
+template <typename R, typename T, typename A1, typename A2, typename A3,
+    typename A4, typename A5, typename A6, typename A7>
+class RunnableAdapter<R(T::*)(A1, A2, A3, A4, A5, A6, A7) const> {
+ public:
+  typedef R (RunType)(const T*, A1, A2, A3, A4, A5, A6, A7);
+  typedef true_type IsMethod;
+
+  explicit RunnableAdapter(R(T::*method)(A1, A2, A3, A4, A5, A6, A7) const)
+      : method_(method) {
+  }
+
+  R Run(const T* object, typename CallbackParamTraits<A1>::ForwardType a1,
+      typename CallbackParamTraits<A2>::ForwardType a2,
+      typename CallbackParamTraits<A3>::ForwardType a3,
+      typename CallbackParamTraits<A4>::ForwardType a4,
+      typename CallbackParamTraits<A5>::ForwardType a5,
+      typename CallbackParamTraits<A6>::ForwardType a6,
+      typename CallbackParamTraits<A7>::ForwardType a7) {
+    return (object->*method_)(CallbackForward(a1), CallbackForward(a2),
+        CallbackForward(a3), CallbackForward(a4), CallbackForward(a5),
+        CallbackForward(a6), CallbackForward(a7));
+  }
+
+ private:
+  R (T::*method_)(A1, A2, A3, A4, A5, A6, A7) const;
+};
+
+
+// FunctionTraits<>
+//
+// Breaks a function signature apart into typedefs for easier introspection.
+template <typename Sig>
+struct FunctionTraits;
+
+template <typename R>
+struct FunctionTraits<R()> {
+  typedef R ReturnType;
+};
+
+template <typename R, typename A1>
+struct FunctionTraits<R(A1)> {
+  typedef R ReturnType;
+  typedef A1 A1Type;
+};
+
+template <typename R, typename A1, typename A2>
+struct FunctionTraits<R(A1, A2)> {
+  typedef R ReturnType;
+  typedef A1 A1Type;
+  typedef A2 A2Type;
+};
+
+template <typename R, typename A1, typename A2, typename A3>
+struct FunctionTraits<R(A1, A2, A3)> {
+  typedef R ReturnType;
+  typedef A1 A1Type;
+  typedef A2 A2Type;
+  typedef A3 A3Type;
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4>
+struct FunctionTraits<R(A1, A2, A3, A4)> {
+  typedef R ReturnType;
+  typedef A1 A1Type;
+  typedef A2 A2Type;
+  typedef A3 A3Type;
+  typedef A4 A4Type;
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5>
+struct FunctionTraits<R(A1, A2, A3, A4, A5)> {
+  typedef R ReturnType;
+  typedef A1 A1Type;
+  typedef A2 A2Type;
+  typedef A3 A3Type;
+  typedef A4 A4Type;
+  typedef A5 A5Type;
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6>
+struct FunctionTraits<R(A1, A2, A3, A4, A5, A6)> {
+  typedef R ReturnType;
+  typedef A1 A1Type;
+  typedef A2 A2Type;
+  typedef A3 A3Type;
+  typedef A4 A4Type;
+  typedef A5 A5Type;
+  typedef A6 A6Type;
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7>
+struct FunctionTraits<R(A1, A2, A3, A4, A5, A6, A7)> {
+  typedef R ReturnType;
+  typedef A1 A1Type;
+  typedef A2 A2Type;
+  typedef A3 A3Type;
+  typedef A4 A4Type;
+  typedef A5 A5Type;
+  typedef A6 A6Type;
+  typedef A7 A7Type;
+};
+
+
+// ForceVoidReturn<>
+//
+// Set of templates that support forcing the function return type to void.
+template <typename Sig>
+struct ForceVoidReturn;
+
+template <typename R>
+struct ForceVoidReturn<R()> {
+  typedef void(RunType)();
+};
+
+template <typename R, typename A1>
+struct ForceVoidReturn<R(A1)> {
+  typedef void(RunType)(A1);
+};
+
+template <typename R, typename A1, typename A2>
+struct ForceVoidReturn<R(A1, A2)> {
+  typedef void(RunType)(A1, A2);
+};
+
+template <typename R, typename A1, typename A2, typename A3>
+struct ForceVoidReturn<R(A1, A2, A3)> {
+  typedef void(RunType)(A1, A2, A3);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4>
+struct ForceVoidReturn<R(A1, A2, A3, A4)> {
+  typedef void(RunType)(A1, A2, A3, A4);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5>
+struct ForceVoidReturn<R(A1, A2, A3, A4, A5)> {
+  typedef void(RunType)(A1, A2, A3, A4, A5);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6>
+struct ForceVoidReturn<R(A1, A2, A3, A4, A5, A6)> {
+  typedef void(RunType)(A1, A2, A3, A4, A5, A6);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7>
+struct ForceVoidReturn<R(A1, A2, A3, A4, A5, A6, A7)> {
+  typedef void(RunType)(A1, A2, A3, A4, A5, A6, A7);
+};
+
+
+// FunctorTraits<>
+//
+// See description at top of file.
+template <typename T>
+struct FunctorTraits {
+  typedef RunnableAdapter<T> RunnableType;
+  typedef typename RunnableType::RunType RunType;
+};
+
+template <typename T>
+struct FunctorTraits<IgnoreResultHelper<T> > {
+  typedef typename FunctorTraits<T>::RunnableType RunnableType;
+  typedef typename ForceVoidReturn<
+      typename RunnableType::RunType>::RunType RunType;
+};
+
+template <typename T>
+struct FunctorTraits<Callback<T> > {
+  typedef Callback<T> RunnableType;
+  typedef typename Callback<T>::RunType RunType;
+};
+
+
+// MakeRunnable<>
+//
+// Converts a passed in functor to a RunnableType using type inference.
+
+template <typename T>
+typename FunctorTraits<T>::RunnableType MakeRunnable(const T& t) {
+  return RunnableAdapter<T>(t);
+}
+
+template <typename T>
+typename FunctorTraits<T>::RunnableType
+MakeRunnable(const IgnoreResultHelper<T>& t) {
+  return MakeRunnable(t.functor_);
+}
+
+template <typename T>
+const typename FunctorTraits<Callback<T> >::RunnableType&
+MakeRunnable(const Callback<T>& t) {
+  DCHECK(!t.is_null());
+  return t;
+}
+
+
+// InvokeHelper<>
+//
+// There are 3 logical InvokeHelper<> specializations: normal, void-return,
+// WeakCalls.
+//
+// The normal type just calls the underlying runnable.
+//
+// We need a InvokeHelper to handle void return types in order to support
+// IgnoreResult().  Normally, if the Runnable's RunType had a void return,
+// the template system would just accept "return functor.Run()" ignoring
+// the fact that a void function is being used with return. This piece of
+// sugar breaks though when the Runnable's RunType is not void.  Thus, we
+// need a partial specialization to change the syntax to drop the "return"
+// from the invocation call.
+//
+// WeakCalls similarly need special syntax that is applied to the first
+// argument to check if they should no-op themselves.
+template <bool IsWeakCall, typename ReturnType, typename Runnable,
+          typename ArgsType>
+struct InvokeHelper;
+
+template <typename ReturnType, typename Runnable>
+struct InvokeHelper<false, ReturnType, Runnable,
+    void()>  {
+  static ReturnType MakeItSo(Runnable runnable) {
+    return runnable.Run();
+  }
+};
+
+template <typename Runnable>
+struct InvokeHelper<false, void, Runnable,
+    void()>  {
+  static void MakeItSo(Runnable runnable) {
+    runnable.Run();
+  }
+};
+
+template <typename ReturnType, typename Runnable,typename A1>
+struct InvokeHelper<false, ReturnType, Runnable,
+    void(A1)>  {
+  static ReturnType MakeItSo(Runnable runnable, A1 a1) {
+    return runnable.Run(CallbackForward(a1));
+  }
+};
+
+template <typename Runnable,typename A1>
+struct InvokeHelper<false, void, Runnable,
+    void(A1)>  {
+  static void MakeItSo(Runnable runnable, A1 a1) {
+    runnable.Run(CallbackForward(a1));
+  }
+};
+
+template <typename Runnable, typename BoundWeakPtr>
+struct InvokeHelper<true, void, Runnable,
+    void(BoundWeakPtr)>  {
+  static void MakeItSo(Runnable runnable, BoundWeakPtr weak_ptr) {
+    if (!weak_ptr.get()) {
+      return;
+    }
+    runnable.Run(weak_ptr.get());
+  }
+};
+
+template <typename ReturnType, typename Runnable,typename A1, typename A2>
+struct InvokeHelper<false, ReturnType, Runnable,
+    void(A1, A2)>  {
+  static ReturnType MakeItSo(Runnable runnable, A1 a1, A2 a2) {
+    return runnable.Run(CallbackForward(a1), CallbackForward(a2));
+  }
+};
+
+template <typename Runnable,typename A1, typename A2>
+struct InvokeHelper<false, void, Runnable,
+    void(A1, A2)>  {
+  static void MakeItSo(Runnable runnable, A1 a1, A2 a2) {
+    runnable.Run(CallbackForward(a1), CallbackForward(a2));
+  }
+};
+
+template <typename Runnable, typename BoundWeakPtr, typename A2>
+struct InvokeHelper<true, void, Runnable,
+    void(BoundWeakPtr, A2)>  {
+  static void MakeItSo(Runnable runnable, BoundWeakPtr weak_ptr, A2 a2) {
+    if (!weak_ptr.get()) {
+      return;
+    }
+    runnable.Run(weak_ptr.get(), CallbackForward(a2));
+  }
+};
+
+template <typename ReturnType, typename Runnable,typename A1, typename A2,
+    typename A3>
+struct InvokeHelper<false, ReturnType, Runnable,
+    void(A1, A2, A3)>  {
+  static ReturnType MakeItSo(Runnable runnable, A1 a1, A2 a2, A3 a3) {
+    return runnable.Run(CallbackForward(a1), CallbackForward(a2),
+        CallbackForward(a3));
+  }
+};
+
+template <typename Runnable,typename A1, typename A2, typename A3>
+struct InvokeHelper<false, void, Runnable,
+    void(A1, A2, A3)>  {
+  static void MakeItSo(Runnable runnable, A1 a1, A2 a2, A3 a3) {
+    runnable.Run(CallbackForward(a1), CallbackForward(a2), CallbackForward(a3));
+  }
+};
+
+template <typename Runnable, typename BoundWeakPtr, typename A2, typename A3>
+struct InvokeHelper<true, void, Runnable,
+    void(BoundWeakPtr, A2, A3)>  {
+  static void MakeItSo(Runnable runnable, BoundWeakPtr weak_ptr, A2 a2, A3 a3) {
+    if (!weak_ptr.get()) {
+      return;
+    }
+    runnable.Run(weak_ptr.get(), CallbackForward(a2), CallbackForward(a3));
+  }
+};
+
+template <typename ReturnType, typename Runnable,typename A1, typename A2,
+    typename A3, typename A4>
+struct InvokeHelper<false, ReturnType, Runnable,
+    void(A1, A2, A3, A4)>  {
+  static ReturnType MakeItSo(Runnable runnable, A1 a1, A2 a2, A3 a3, A4 a4) {
+    return runnable.Run(CallbackForward(a1), CallbackForward(a2),
+        CallbackForward(a3), CallbackForward(a4));
+  }
+};
+
+template <typename Runnable,typename A1, typename A2, typename A3, typename A4>
+struct InvokeHelper<false, void, Runnable,
+    void(A1, A2, A3, A4)>  {
+  static void MakeItSo(Runnable runnable, A1 a1, A2 a2, A3 a3, A4 a4) {
+    runnable.Run(CallbackForward(a1), CallbackForward(a2), CallbackForward(a3),
+        CallbackForward(a4));
+  }
+};
+
+template <typename Runnable, typename BoundWeakPtr, typename A2, typename A3,
+    typename A4>
+struct InvokeHelper<true, void, Runnable,
+    void(BoundWeakPtr, A2, A3, A4)>  {
+  static void MakeItSo(Runnable runnable, BoundWeakPtr weak_ptr, A2 a2, A3 a3,
+      A4 a4) {
+    if (!weak_ptr.get()) {
+      return;
+    }
+    runnable.Run(weak_ptr.get(), CallbackForward(a2), CallbackForward(a3),
+        CallbackForward(a4));
+  }
+};
+
+template <typename ReturnType, typename Runnable,typename A1, typename A2,
+    typename A3, typename A4, typename A5>
+struct InvokeHelper<false, ReturnType, Runnable,
+    void(A1, A2, A3, A4, A5)>  {
+  static ReturnType MakeItSo(Runnable runnable, A1 a1, A2 a2, A3 a3, A4 a4,
+      A5 a5) {
+    return runnable.Run(CallbackForward(a1), CallbackForward(a2),
+        CallbackForward(a3), CallbackForward(a4), CallbackForward(a5));
+  }
+};
+
+template <typename Runnable,typename A1, typename A2, typename A3, typename A4,
+    typename A5>
+struct InvokeHelper<false, void, Runnable,
+    void(A1, A2, A3, A4, A5)>  {
+  static void MakeItSo(Runnable runnable, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5) {
+    runnable.Run(CallbackForward(a1), CallbackForward(a2), CallbackForward(a3),
+        CallbackForward(a4), CallbackForward(a5));
+  }
+};
+
+template <typename Runnable, typename BoundWeakPtr, typename A2, typename A3,
+    typename A4, typename A5>
+struct InvokeHelper<true, void, Runnable,
+    void(BoundWeakPtr, A2, A3, A4, A5)>  {
+  static void MakeItSo(Runnable runnable, BoundWeakPtr weak_ptr, A2 a2, A3 a3,
+      A4 a4, A5 a5) {
+    if (!weak_ptr.get()) {
+      return;
+    }
+    runnable.Run(weak_ptr.get(), CallbackForward(a2), CallbackForward(a3),
+        CallbackForward(a4), CallbackForward(a5));
+  }
+};
+
+template <typename ReturnType, typename Runnable,typename A1, typename A2,
+    typename A3, typename A4, typename A5, typename A6>
+struct InvokeHelper<false, ReturnType, Runnable,
+    void(A1, A2, A3, A4, A5, A6)>  {
+  static ReturnType MakeItSo(Runnable runnable, A1 a1, A2 a2, A3 a3, A4 a4,
+      A5 a5, A6 a6) {
+    return runnable.Run(CallbackForward(a1), CallbackForward(a2),
+        CallbackForward(a3), CallbackForward(a4), CallbackForward(a5),
+        CallbackForward(a6));
+  }
+};
+
+template <typename Runnable,typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6>
+struct InvokeHelper<false, void, Runnable,
+    void(A1, A2, A3, A4, A5, A6)>  {
+  static void MakeItSo(Runnable runnable, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5,
+      A6 a6) {
+    runnable.Run(CallbackForward(a1), CallbackForward(a2), CallbackForward(a3),
+        CallbackForward(a4), CallbackForward(a5), CallbackForward(a6));
+  }
+};
+
+template <typename Runnable, typename BoundWeakPtr, typename A2, typename A3,
+    typename A4, typename A5, typename A6>
+struct InvokeHelper<true, void, Runnable,
+    void(BoundWeakPtr, A2, A3, A4, A5, A6)>  {
+  static void MakeItSo(Runnable runnable, BoundWeakPtr weak_ptr, A2 a2, A3 a3,
+      A4 a4, A5 a5, A6 a6) {
+    if (!weak_ptr.get()) {
+      return;
+    }
+    runnable.Run(weak_ptr.get(), CallbackForward(a2), CallbackForward(a3),
+        CallbackForward(a4), CallbackForward(a5), CallbackForward(a6));
+  }
+};
+
+template <typename ReturnType, typename Runnable,typename A1, typename A2,
+    typename A3, typename A4, typename A5, typename A6, typename A7>
+struct InvokeHelper<false, ReturnType, Runnable,
+    void(A1, A2, A3, A4, A5, A6, A7)>  {
+  static ReturnType MakeItSo(Runnable runnable, A1 a1, A2 a2, A3 a3, A4 a4,
+      A5 a5, A6 a6, A7 a7) {
+    return runnable.Run(CallbackForward(a1), CallbackForward(a2),
+        CallbackForward(a3), CallbackForward(a4), CallbackForward(a5),
+        CallbackForward(a6), CallbackForward(a7));
+  }
+};
+
+template <typename Runnable,typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7>
+struct InvokeHelper<false, void, Runnable,
+    void(A1, A2, A3, A4, A5, A6, A7)>  {
+  static void MakeItSo(Runnable runnable, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5,
+      A6 a6, A7 a7) {
+    runnable.Run(CallbackForward(a1), CallbackForward(a2), CallbackForward(a3),
+        CallbackForward(a4), CallbackForward(a5), CallbackForward(a6),
+        CallbackForward(a7));
+  }
+};
+
+template <typename Runnable, typename BoundWeakPtr, typename A2, typename A3,
+    typename A4, typename A5, typename A6, typename A7>
+struct InvokeHelper<true, void, Runnable,
+    void(BoundWeakPtr, A2, A3, A4, A5, A6, A7)>  {
+  static void MakeItSo(Runnable runnable, BoundWeakPtr weak_ptr, A2 a2, A3 a3,
+      A4 a4, A5 a5, A6 a6, A7 a7) {
+    if (!weak_ptr.get()) {
+      return;
+    }
+    runnable.Run(weak_ptr.get(), CallbackForward(a2), CallbackForward(a3),
+        CallbackForward(a4), CallbackForward(a5), CallbackForward(a6),
+        CallbackForward(a7));
+  }
+};
+
+#if !defined(_MSC_VER)
+
+template <typename ReturnType, typename Runnable, typename ArgsType>
+struct InvokeHelper<true, ReturnType, Runnable, ArgsType> {
+  // WeakCalls are only supported for functions with a void return type.
+  // Otherwise, the function result would be undefined if the the WeakPtr<>
+  // is invalidated.
+  COMPILE_ASSERT(is_void<ReturnType>::value,
+                 weak_ptrs_can_only_bind_to_methods_without_return_values);
+};
+
+#endif
+
+// Invoker<>
+//
+// See description at the top of the file.
+template <int NumBound, typename Storage, typename RunType>
+struct Invoker;
+
+// Arity 0 -> 0.
+template <typename StorageType, typename R>
+struct Invoker<0, StorageType, R()> {
+  typedef R(RunType)(BindStateBase*);
+
+  typedef R(UnboundRunType)();
+
+  static R Run(BindStateBase* base) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void()>
+               ::MakeItSo(storage->runnable_);
+  }
+};
+
+// Arity 1 -> 1.
+template <typename StorageType, typename R,typename X1>
+struct Invoker<0, StorageType, R(X1)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X1>::ForwardType);
+
+  typedef R(UnboundRunType)(X1);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X1>::ForwardType x1) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename CallbackParamTraits<X1>::ForwardType x1)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1));
+  }
+};
+
+// Arity 1 -> 0.
+template <typename StorageType, typename R,typename X1>
+struct Invoker<1, StorageType, R(X1)> {
+  typedef R(RunType)(BindStateBase*);
+
+  typedef R(UnboundRunType)();
+
+  static R Run(BindStateBase* base) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1));
+  }
+};
+
+// Arity 2 -> 2.
+template <typename StorageType, typename R,typename X1, typename X2>
+struct Invoker<0, StorageType, R(X1, X2)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X1>::ForwardType,
+      typename CallbackParamTraits<X2>::ForwardType);
+
+  typedef R(UnboundRunType)(X1, X2);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X1>::ForwardType x1,
+      typename CallbackParamTraits<X2>::ForwardType x2) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename CallbackParamTraits<X1>::ForwardType x1,
+               typename CallbackParamTraits<X2>::ForwardType x2)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2));
+  }
+};
+
+// Arity 2 -> 1.
+template <typename StorageType, typename R,typename X1, typename X2>
+struct Invoker<1, StorageType, R(X1, X2)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X2>::ForwardType);
+
+  typedef R(UnboundRunType)(X2);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X2>::ForwardType x2) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename CallbackParamTraits<X2>::ForwardType x2)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2));
+  }
+};
+
+// Arity 2 -> 0.
+template <typename StorageType, typename R,typename X1, typename X2>
+struct Invoker<2, StorageType, R(X1, X2)> {
+  typedef R(RunType)(BindStateBase*);
+
+  typedef R(UnboundRunType)();
+
+  static R Run(BindStateBase* base) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+    typedef typename StorageType::Bound2UnwrapTraits Bound2UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    typename Bound2UnwrapTraits::ForwardType x2 =
+        Bound2UnwrapTraits::Unwrap(storage->p2_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename Bound2UnwrapTraits::ForwardType)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2));
+  }
+};
+
+// Arity 3 -> 3.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3>
+struct Invoker<0, StorageType, R(X1, X2, X3)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X1>::ForwardType,
+      typename CallbackParamTraits<X2>::ForwardType,
+      typename CallbackParamTraits<X3>::ForwardType);
+
+  typedef R(UnboundRunType)(X1, X2, X3);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X1>::ForwardType x1,
+      typename CallbackParamTraits<X2>::ForwardType x2,
+      typename CallbackParamTraits<X3>::ForwardType x3) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename CallbackParamTraits<X1>::ForwardType x1,
+               typename CallbackParamTraits<X2>::ForwardType x2,
+               typename CallbackParamTraits<X3>::ForwardType x3)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3));
+  }
+};
+
+// Arity 3 -> 2.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3>
+struct Invoker<1, StorageType, R(X1, X2, X3)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X2>::ForwardType,
+      typename CallbackParamTraits<X3>::ForwardType);
+
+  typedef R(UnboundRunType)(X2, X3);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X2>::ForwardType x2,
+      typename CallbackParamTraits<X3>::ForwardType x3) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename CallbackParamTraits<X2>::ForwardType x2,
+               typename CallbackParamTraits<X3>::ForwardType x3)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3));
+  }
+};
+
+// Arity 3 -> 1.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3>
+struct Invoker<2, StorageType, R(X1, X2, X3)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X3>::ForwardType);
+
+  typedef R(UnboundRunType)(X3);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X3>::ForwardType x3) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+    typedef typename StorageType::Bound2UnwrapTraits Bound2UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    typename Bound2UnwrapTraits::ForwardType x2 =
+        Bound2UnwrapTraits::Unwrap(storage->p2_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename Bound2UnwrapTraits::ForwardType,
+               typename CallbackParamTraits<X3>::ForwardType x3)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3));
+  }
+};
+
+// Arity 3 -> 0.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3>
+struct Invoker<3, StorageType, R(X1, X2, X3)> {
+  typedef R(RunType)(BindStateBase*);
+
+  typedef R(UnboundRunType)();
+
+  static R Run(BindStateBase* base) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+    typedef typename StorageType::Bound2UnwrapTraits Bound2UnwrapTraits;
+    typedef typename StorageType::Bound3UnwrapTraits Bound3UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    typename Bound2UnwrapTraits::ForwardType x2 =
+        Bound2UnwrapTraits::Unwrap(storage->p2_);
+    typename Bound3UnwrapTraits::ForwardType x3 =
+        Bound3UnwrapTraits::Unwrap(storage->p3_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename Bound2UnwrapTraits::ForwardType,
+               typename Bound3UnwrapTraits::ForwardType)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3));
+  }
+};
+
+// Arity 4 -> 4.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4>
+struct Invoker<0, StorageType, R(X1, X2, X3, X4)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X1>::ForwardType,
+      typename CallbackParamTraits<X2>::ForwardType,
+      typename CallbackParamTraits<X3>::ForwardType,
+      typename CallbackParamTraits<X4>::ForwardType);
+
+  typedef R(UnboundRunType)(X1, X2, X3, X4);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X1>::ForwardType x1,
+      typename CallbackParamTraits<X2>::ForwardType x2,
+      typename CallbackParamTraits<X3>::ForwardType x3,
+      typename CallbackParamTraits<X4>::ForwardType x4) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename CallbackParamTraits<X1>::ForwardType x1,
+               typename CallbackParamTraits<X2>::ForwardType x2,
+               typename CallbackParamTraits<X3>::ForwardType x3,
+               typename CallbackParamTraits<X4>::ForwardType x4)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4));
+  }
+};
+
+// Arity 4 -> 3.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4>
+struct Invoker<1, StorageType, R(X1, X2, X3, X4)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X2>::ForwardType,
+      typename CallbackParamTraits<X3>::ForwardType,
+      typename CallbackParamTraits<X4>::ForwardType);
+
+  typedef R(UnboundRunType)(X2, X3, X4);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X2>::ForwardType x2,
+      typename CallbackParamTraits<X3>::ForwardType x3,
+      typename CallbackParamTraits<X4>::ForwardType x4) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename CallbackParamTraits<X2>::ForwardType x2,
+               typename CallbackParamTraits<X3>::ForwardType x3,
+               typename CallbackParamTraits<X4>::ForwardType x4)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4));
+  }
+};
+
+// Arity 4 -> 2.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4>
+struct Invoker<2, StorageType, R(X1, X2, X3, X4)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X3>::ForwardType,
+      typename CallbackParamTraits<X4>::ForwardType);
+
+  typedef R(UnboundRunType)(X3, X4);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X3>::ForwardType x3,
+      typename CallbackParamTraits<X4>::ForwardType x4) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+    typedef typename StorageType::Bound2UnwrapTraits Bound2UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    typename Bound2UnwrapTraits::ForwardType x2 =
+        Bound2UnwrapTraits::Unwrap(storage->p2_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename Bound2UnwrapTraits::ForwardType,
+               typename CallbackParamTraits<X3>::ForwardType x3,
+               typename CallbackParamTraits<X4>::ForwardType x4)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4));
+  }
+};
+
+// Arity 4 -> 1.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4>
+struct Invoker<3, StorageType, R(X1, X2, X3, X4)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X4>::ForwardType);
+
+  typedef R(UnboundRunType)(X4);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X4>::ForwardType x4) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+    typedef typename StorageType::Bound2UnwrapTraits Bound2UnwrapTraits;
+    typedef typename StorageType::Bound3UnwrapTraits Bound3UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    typename Bound2UnwrapTraits::ForwardType x2 =
+        Bound2UnwrapTraits::Unwrap(storage->p2_);
+    typename Bound3UnwrapTraits::ForwardType x3 =
+        Bound3UnwrapTraits::Unwrap(storage->p3_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename Bound2UnwrapTraits::ForwardType,
+               typename Bound3UnwrapTraits::ForwardType,
+               typename CallbackParamTraits<X4>::ForwardType x4)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4));
+  }
+};
+
+// Arity 4 -> 0.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4>
+struct Invoker<4, StorageType, R(X1, X2, X3, X4)> {
+  typedef R(RunType)(BindStateBase*);
+
+  typedef R(UnboundRunType)();
+
+  static R Run(BindStateBase* base) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+    typedef typename StorageType::Bound2UnwrapTraits Bound2UnwrapTraits;
+    typedef typename StorageType::Bound3UnwrapTraits Bound3UnwrapTraits;
+    typedef typename StorageType::Bound4UnwrapTraits Bound4UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    typename Bound2UnwrapTraits::ForwardType x2 =
+        Bound2UnwrapTraits::Unwrap(storage->p2_);
+    typename Bound3UnwrapTraits::ForwardType x3 =
+        Bound3UnwrapTraits::Unwrap(storage->p3_);
+    typename Bound4UnwrapTraits::ForwardType x4 =
+        Bound4UnwrapTraits::Unwrap(storage->p4_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename Bound2UnwrapTraits::ForwardType,
+               typename Bound3UnwrapTraits::ForwardType,
+               typename Bound4UnwrapTraits::ForwardType)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4));
+  }
+};
+
+// Arity 5 -> 5.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4, typename X5>
+struct Invoker<0, StorageType, R(X1, X2, X3, X4, X5)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X1>::ForwardType,
+      typename CallbackParamTraits<X2>::ForwardType,
+      typename CallbackParamTraits<X3>::ForwardType,
+      typename CallbackParamTraits<X4>::ForwardType,
+      typename CallbackParamTraits<X5>::ForwardType);
+
+  typedef R(UnboundRunType)(X1, X2, X3, X4, X5);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X1>::ForwardType x1,
+      typename CallbackParamTraits<X2>::ForwardType x2,
+      typename CallbackParamTraits<X3>::ForwardType x3,
+      typename CallbackParamTraits<X4>::ForwardType x4,
+      typename CallbackParamTraits<X5>::ForwardType x5) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename CallbackParamTraits<X1>::ForwardType x1,
+               typename CallbackParamTraits<X2>::ForwardType x2,
+               typename CallbackParamTraits<X3>::ForwardType x3,
+               typename CallbackParamTraits<X4>::ForwardType x4,
+               typename CallbackParamTraits<X5>::ForwardType x5)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4), CallbackForward(x5));
+  }
+};
+
+// Arity 5 -> 4.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4, typename X5>
+struct Invoker<1, StorageType, R(X1, X2, X3, X4, X5)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X2>::ForwardType,
+      typename CallbackParamTraits<X3>::ForwardType,
+      typename CallbackParamTraits<X4>::ForwardType,
+      typename CallbackParamTraits<X5>::ForwardType);
+
+  typedef R(UnboundRunType)(X2, X3, X4, X5);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X2>::ForwardType x2,
+      typename CallbackParamTraits<X3>::ForwardType x3,
+      typename CallbackParamTraits<X4>::ForwardType x4,
+      typename CallbackParamTraits<X5>::ForwardType x5) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename CallbackParamTraits<X2>::ForwardType x2,
+               typename CallbackParamTraits<X3>::ForwardType x3,
+               typename CallbackParamTraits<X4>::ForwardType x4,
+               typename CallbackParamTraits<X5>::ForwardType x5)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4), CallbackForward(x5));
+  }
+};
+
+// Arity 5 -> 3.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4, typename X5>
+struct Invoker<2, StorageType, R(X1, X2, X3, X4, X5)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X3>::ForwardType,
+      typename CallbackParamTraits<X4>::ForwardType,
+      typename CallbackParamTraits<X5>::ForwardType);
+
+  typedef R(UnboundRunType)(X3, X4, X5);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X3>::ForwardType x3,
+      typename CallbackParamTraits<X4>::ForwardType x4,
+      typename CallbackParamTraits<X5>::ForwardType x5) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+    typedef typename StorageType::Bound2UnwrapTraits Bound2UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    typename Bound2UnwrapTraits::ForwardType x2 =
+        Bound2UnwrapTraits::Unwrap(storage->p2_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename Bound2UnwrapTraits::ForwardType,
+               typename CallbackParamTraits<X3>::ForwardType x3,
+               typename CallbackParamTraits<X4>::ForwardType x4,
+               typename CallbackParamTraits<X5>::ForwardType x5)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4), CallbackForward(x5));
+  }
+};
+
+// Arity 5 -> 2.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4, typename X5>
+struct Invoker<3, StorageType, R(X1, X2, X3, X4, X5)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X4>::ForwardType,
+      typename CallbackParamTraits<X5>::ForwardType);
+
+  typedef R(UnboundRunType)(X4, X5);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X4>::ForwardType x4,
+      typename CallbackParamTraits<X5>::ForwardType x5) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+    typedef typename StorageType::Bound2UnwrapTraits Bound2UnwrapTraits;
+    typedef typename StorageType::Bound3UnwrapTraits Bound3UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    typename Bound2UnwrapTraits::ForwardType x2 =
+        Bound2UnwrapTraits::Unwrap(storage->p2_);
+    typename Bound3UnwrapTraits::ForwardType x3 =
+        Bound3UnwrapTraits::Unwrap(storage->p3_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename Bound2UnwrapTraits::ForwardType,
+               typename Bound3UnwrapTraits::ForwardType,
+               typename CallbackParamTraits<X4>::ForwardType x4,
+               typename CallbackParamTraits<X5>::ForwardType x5)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4), CallbackForward(x5));
+  }
+};
+
+// Arity 5 -> 1.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4, typename X5>
+struct Invoker<4, StorageType, R(X1, X2, X3, X4, X5)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X5>::ForwardType);
+
+  typedef R(UnboundRunType)(X5);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X5>::ForwardType x5) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+    typedef typename StorageType::Bound2UnwrapTraits Bound2UnwrapTraits;
+    typedef typename StorageType::Bound3UnwrapTraits Bound3UnwrapTraits;
+    typedef typename StorageType::Bound4UnwrapTraits Bound4UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    typename Bound2UnwrapTraits::ForwardType x2 =
+        Bound2UnwrapTraits::Unwrap(storage->p2_);
+    typename Bound3UnwrapTraits::ForwardType x3 =
+        Bound3UnwrapTraits::Unwrap(storage->p3_);
+    typename Bound4UnwrapTraits::ForwardType x4 =
+        Bound4UnwrapTraits::Unwrap(storage->p4_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename Bound2UnwrapTraits::ForwardType,
+               typename Bound3UnwrapTraits::ForwardType,
+               typename Bound4UnwrapTraits::ForwardType,
+               typename CallbackParamTraits<X5>::ForwardType x5)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4), CallbackForward(x5));
+  }
+};
+
+// Arity 5 -> 0.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4, typename X5>
+struct Invoker<5, StorageType, R(X1, X2, X3, X4, X5)> {
+  typedef R(RunType)(BindStateBase*);
+
+  typedef R(UnboundRunType)();
+
+  static R Run(BindStateBase* base) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+    typedef typename StorageType::Bound2UnwrapTraits Bound2UnwrapTraits;
+    typedef typename StorageType::Bound3UnwrapTraits Bound3UnwrapTraits;
+    typedef typename StorageType::Bound4UnwrapTraits Bound4UnwrapTraits;
+    typedef typename StorageType::Bound5UnwrapTraits Bound5UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    typename Bound2UnwrapTraits::ForwardType x2 =
+        Bound2UnwrapTraits::Unwrap(storage->p2_);
+    typename Bound3UnwrapTraits::ForwardType x3 =
+        Bound3UnwrapTraits::Unwrap(storage->p3_);
+    typename Bound4UnwrapTraits::ForwardType x4 =
+        Bound4UnwrapTraits::Unwrap(storage->p4_);
+    typename Bound5UnwrapTraits::ForwardType x5 =
+        Bound5UnwrapTraits::Unwrap(storage->p5_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename Bound2UnwrapTraits::ForwardType,
+               typename Bound3UnwrapTraits::ForwardType,
+               typename Bound4UnwrapTraits::ForwardType,
+               typename Bound5UnwrapTraits::ForwardType)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4), CallbackForward(x5));
+  }
+};
+
+// Arity 6 -> 6.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4, typename X5, typename X6>
+struct Invoker<0, StorageType, R(X1, X2, X3, X4, X5, X6)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X1>::ForwardType,
+      typename CallbackParamTraits<X2>::ForwardType,
+      typename CallbackParamTraits<X3>::ForwardType,
+      typename CallbackParamTraits<X4>::ForwardType,
+      typename CallbackParamTraits<X5>::ForwardType,
+      typename CallbackParamTraits<X6>::ForwardType);
+
+  typedef R(UnboundRunType)(X1, X2, X3, X4, X5, X6);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X1>::ForwardType x1,
+      typename CallbackParamTraits<X2>::ForwardType x2,
+      typename CallbackParamTraits<X3>::ForwardType x3,
+      typename CallbackParamTraits<X4>::ForwardType x4,
+      typename CallbackParamTraits<X5>::ForwardType x5,
+      typename CallbackParamTraits<X6>::ForwardType x6) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename CallbackParamTraits<X1>::ForwardType x1,
+               typename CallbackParamTraits<X2>::ForwardType x2,
+               typename CallbackParamTraits<X3>::ForwardType x3,
+               typename CallbackParamTraits<X4>::ForwardType x4,
+               typename CallbackParamTraits<X5>::ForwardType x5,
+               typename CallbackParamTraits<X6>::ForwardType x6)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4), CallbackForward(x5),
+                   CallbackForward(x6));
+  }
+};
+
+// Arity 6 -> 5.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4, typename X5, typename X6>
+struct Invoker<1, StorageType, R(X1, X2, X3, X4, X5, X6)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X2>::ForwardType,
+      typename CallbackParamTraits<X3>::ForwardType,
+      typename CallbackParamTraits<X4>::ForwardType,
+      typename CallbackParamTraits<X5>::ForwardType,
+      typename CallbackParamTraits<X6>::ForwardType);
+
+  typedef R(UnboundRunType)(X2, X3, X4, X5, X6);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X2>::ForwardType x2,
+      typename CallbackParamTraits<X3>::ForwardType x3,
+      typename CallbackParamTraits<X4>::ForwardType x4,
+      typename CallbackParamTraits<X5>::ForwardType x5,
+      typename CallbackParamTraits<X6>::ForwardType x6) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename CallbackParamTraits<X2>::ForwardType x2,
+               typename CallbackParamTraits<X3>::ForwardType x3,
+               typename CallbackParamTraits<X4>::ForwardType x4,
+               typename CallbackParamTraits<X5>::ForwardType x5,
+               typename CallbackParamTraits<X6>::ForwardType x6)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4), CallbackForward(x5),
+                   CallbackForward(x6));
+  }
+};
+
+// Arity 6 -> 4.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4, typename X5, typename X6>
+struct Invoker<2, StorageType, R(X1, X2, X3, X4, X5, X6)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X3>::ForwardType,
+      typename CallbackParamTraits<X4>::ForwardType,
+      typename CallbackParamTraits<X5>::ForwardType,
+      typename CallbackParamTraits<X6>::ForwardType);
+
+  typedef R(UnboundRunType)(X3, X4, X5, X6);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X3>::ForwardType x3,
+      typename CallbackParamTraits<X4>::ForwardType x4,
+      typename CallbackParamTraits<X5>::ForwardType x5,
+      typename CallbackParamTraits<X6>::ForwardType x6) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+    typedef typename StorageType::Bound2UnwrapTraits Bound2UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    typename Bound2UnwrapTraits::ForwardType x2 =
+        Bound2UnwrapTraits::Unwrap(storage->p2_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename Bound2UnwrapTraits::ForwardType,
+               typename CallbackParamTraits<X3>::ForwardType x3,
+               typename CallbackParamTraits<X4>::ForwardType x4,
+               typename CallbackParamTraits<X5>::ForwardType x5,
+               typename CallbackParamTraits<X6>::ForwardType x6)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4), CallbackForward(x5),
+                   CallbackForward(x6));
+  }
+};
+
+// Arity 6 -> 3.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4, typename X5, typename X6>
+struct Invoker<3, StorageType, R(X1, X2, X3, X4, X5, X6)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X4>::ForwardType,
+      typename CallbackParamTraits<X5>::ForwardType,
+      typename CallbackParamTraits<X6>::ForwardType);
+
+  typedef R(UnboundRunType)(X4, X5, X6);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X4>::ForwardType x4,
+      typename CallbackParamTraits<X5>::ForwardType x5,
+      typename CallbackParamTraits<X6>::ForwardType x6) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+    typedef typename StorageType::Bound2UnwrapTraits Bound2UnwrapTraits;
+    typedef typename StorageType::Bound3UnwrapTraits Bound3UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    typename Bound2UnwrapTraits::ForwardType x2 =
+        Bound2UnwrapTraits::Unwrap(storage->p2_);
+    typename Bound3UnwrapTraits::ForwardType x3 =
+        Bound3UnwrapTraits::Unwrap(storage->p3_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename Bound2UnwrapTraits::ForwardType,
+               typename Bound3UnwrapTraits::ForwardType,
+               typename CallbackParamTraits<X4>::ForwardType x4,
+               typename CallbackParamTraits<X5>::ForwardType x5,
+               typename CallbackParamTraits<X6>::ForwardType x6)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4), CallbackForward(x5),
+                   CallbackForward(x6));
+  }
+};
+
+// Arity 6 -> 2.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4, typename X5, typename X6>
+struct Invoker<4, StorageType, R(X1, X2, X3, X4, X5, X6)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X5>::ForwardType,
+      typename CallbackParamTraits<X6>::ForwardType);
+
+  typedef R(UnboundRunType)(X5, X6);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X5>::ForwardType x5,
+      typename CallbackParamTraits<X6>::ForwardType x6) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+    typedef typename StorageType::Bound2UnwrapTraits Bound2UnwrapTraits;
+    typedef typename StorageType::Bound3UnwrapTraits Bound3UnwrapTraits;
+    typedef typename StorageType::Bound4UnwrapTraits Bound4UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    typename Bound2UnwrapTraits::ForwardType x2 =
+        Bound2UnwrapTraits::Unwrap(storage->p2_);
+    typename Bound3UnwrapTraits::ForwardType x3 =
+        Bound3UnwrapTraits::Unwrap(storage->p3_);
+    typename Bound4UnwrapTraits::ForwardType x4 =
+        Bound4UnwrapTraits::Unwrap(storage->p4_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename Bound2UnwrapTraits::ForwardType,
+               typename Bound3UnwrapTraits::ForwardType,
+               typename Bound4UnwrapTraits::ForwardType,
+               typename CallbackParamTraits<X5>::ForwardType x5,
+               typename CallbackParamTraits<X6>::ForwardType x6)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4), CallbackForward(x5),
+                   CallbackForward(x6));
+  }
+};
+
+// Arity 6 -> 1.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4, typename X5, typename X6>
+struct Invoker<5, StorageType, R(X1, X2, X3, X4, X5, X6)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X6>::ForwardType);
+
+  typedef R(UnboundRunType)(X6);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X6>::ForwardType x6) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+    typedef typename StorageType::Bound2UnwrapTraits Bound2UnwrapTraits;
+    typedef typename StorageType::Bound3UnwrapTraits Bound3UnwrapTraits;
+    typedef typename StorageType::Bound4UnwrapTraits Bound4UnwrapTraits;
+    typedef typename StorageType::Bound5UnwrapTraits Bound5UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    typename Bound2UnwrapTraits::ForwardType x2 =
+        Bound2UnwrapTraits::Unwrap(storage->p2_);
+    typename Bound3UnwrapTraits::ForwardType x3 =
+        Bound3UnwrapTraits::Unwrap(storage->p3_);
+    typename Bound4UnwrapTraits::ForwardType x4 =
+        Bound4UnwrapTraits::Unwrap(storage->p4_);
+    typename Bound5UnwrapTraits::ForwardType x5 =
+        Bound5UnwrapTraits::Unwrap(storage->p5_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename Bound2UnwrapTraits::ForwardType,
+               typename Bound3UnwrapTraits::ForwardType,
+               typename Bound4UnwrapTraits::ForwardType,
+               typename Bound5UnwrapTraits::ForwardType,
+               typename CallbackParamTraits<X6>::ForwardType x6)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4), CallbackForward(x5),
+                   CallbackForward(x6));
+  }
+};
+
+// Arity 6 -> 0.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4, typename X5, typename X6>
+struct Invoker<6, StorageType, R(X1, X2, X3, X4, X5, X6)> {
+  typedef R(RunType)(BindStateBase*);
+
+  typedef R(UnboundRunType)();
+
+  static R Run(BindStateBase* base) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+    typedef typename StorageType::Bound2UnwrapTraits Bound2UnwrapTraits;
+    typedef typename StorageType::Bound3UnwrapTraits Bound3UnwrapTraits;
+    typedef typename StorageType::Bound4UnwrapTraits Bound4UnwrapTraits;
+    typedef typename StorageType::Bound5UnwrapTraits Bound5UnwrapTraits;
+    typedef typename StorageType::Bound6UnwrapTraits Bound6UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    typename Bound2UnwrapTraits::ForwardType x2 =
+        Bound2UnwrapTraits::Unwrap(storage->p2_);
+    typename Bound3UnwrapTraits::ForwardType x3 =
+        Bound3UnwrapTraits::Unwrap(storage->p3_);
+    typename Bound4UnwrapTraits::ForwardType x4 =
+        Bound4UnwrapTraits::Unwrap(storage->p4_);
+    typename Bound5UnwrapTraits::ForwardType x5 =
+        Bound5UnwrapTraits::Unwrap(storage->p5_);
+    typename Bound6UnwrapTraits::ForwardType x6 =
+        Bound6UnwrapTraits::Unwrap(storage->p6_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename Bound2UnwrapTraits::ForwardType,
+               typename Bound3UnwrapTraits::ForwardType,
+               typename Bound4UnwrapTraits::ForwardType,
+               typename Bound5UnwrapTraits::ForwardType,
+               typename Bound6UnwrapTraits::ForwardType)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4), CallbackForward(x5),
+                   CallbackForward(x6));
+  }
+};
+
+// Arity 7 -> 7.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4, typename X5, typename X6, typename X7>
+struct Invoker<0, StorageType, R(X1, X2, X3, X4, X5, X6, X7)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X1>::ForwardType,
+      typename CallbackParamTraits<X2>::ForwardType,
+      typename CallbackParamTraits<X3>::ForwardType,
+      typename CallbackParamTraits<X4>::ForwardType,
+      typename CallbackParamTraits<X5>::ForwardType,
+      typename CallbackParamTraits<X6>::ForwardType,
+      typename CallbackParamTraits<X7>::ForwardType);
+
+  typedef R(UnboundRunType)(X1, X2, X3, X4, X5, X6, X7);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X1>::ForwardType x1,
+      typename CallbackParamTraits<X2>::ForwardType x2,
+      typename CallbackParamTraits<X3>::ForwardType x3,
+      typename CallbackParamTraits<X4>::ForwardType x4,
+      typename CallbackParamTraits<X5>::ForwardType x5,
+      typename CallbackParamTraits<X6>::ForwardType x6,
+      typename CallbackParamTraits<X7>::ForwardType x7) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename CallbackParamTraits<X1>::ForwardType x1,
+               typename CallbackParamTraits<X2>::ForwardType x2,
+               typename CallbackParamTraits<X3>::ForwardType x3,
+               typename CallbackParamTraits<X4>::ForwardType x4,
+               typename CallbackParamTraits<X5>::ForwardType x5,
+               typename CallbackParamTraits<X6>::ForwardType x6,
+               typename CallbackParamTraits<X7>::ForwardType x7)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4), CallbackForward(x5),
+                   CallbackForward(x6), CallbackForward(x7));
+  }
+};
+
+// Arity 7 -> 6.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4, typename X5, typename X6, typename X7>
+struct Invoker<1, StorageType, R(X1, X2, X3, X4, X5, X6, X7)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X2>::ForwardType,
+      typename CallbackParamTraits<X3>::ForwardType,
+      typename CallbackParamTraits<X4>::ForwardType,
+      typename CallbackParamTraits<X5>::ForwardType,
+      typename CallbackParamTraits<X6>::ForwardType,
+      typename CallbackParamTraits<X7>::ForwardType);
+
+  typedef R(UnboundRunType)(X2, X3, X4, X5, X6, X7);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X2>::ForwardType x2,
+      typename CallbackParamTraits<X3>::ForwardType x3,
+      typename CallbackParamTraits<X4>::ForwardType x4,
+      typename CallbackParamTraits<X5>::ForwardType x5,
+      typename CallbackParamTraits<X6>::ForwardType x6,
+      typename CallbackParamTraits<X7>::ForwardType x7) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename CallbackParamTraits<X2>::ForwardType x2,
+               typename CallbackParamTraits<X3>::ForwardType x3,
+               typename CallbackParamTraits<X4>::ForwardType x4,
+               typename CallbackParamTraits<X5>::ForwardType x5,
+               typename CallbackParamTraits<X6>::ForwardType x6,
+               typename CallbackParamTraits<X7>::ForwardType x7)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4), CallbackForward(x5),
+                   CallbackForward(x6), CallbackForward(x7));
+  }
+};
+
+// Arity 7 -> 5.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4, typename X5, typename X6, typename X7>
+struct Invoker<2, StorageType, R(X1, X2, X3, X4, X5, X6, X7)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X3>::ForwardType,
+      typename CallbackParamTraits<X4>::ForwardType,
+      typename CallbackParamTraits<X5>::ForwardType,
+      typename CallbackParamTraits<X6>::ForwardType,
+      typename CallbackParamTraits<X7>::ForwardType);
+
+  typedef R(UnboundRunType)(X3, X4, X5, X6, X7);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X3>::ForwardType x3,
+      typename CallbackParamTraits<X4>::ForwardType x4,
+      typename CallbackParamTraits<X5>::ForwardType x5,
+      typename CallbackParamTraits<X6>::ForwardType x6,
+      typename CallbackParamTraits<X7>::ForwardType x7) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+    typedef typename StorageType::Bound2UnwrapTraits Bound2UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    typename Bound2UnwrapTraits::ForwardType x2 =
+        Bound2UnwrapTraits::Unwrap(storage->p2_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename Bound2UnwrapTraits::ForwardType,
+               typename CallbackParamTraits<X3>::ForwardType x3,
+               typename CallbackParamTraits<X4>::ForwardType x4,
+               typename CallbackParamTraits<X5>::ForwardType x5,
+               typename CallbackParamTraits<X6>::ForwardType x6,
+               typename CallbackParamTraits<X7>::ForwardType x7)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4), CallbackForward(x5),
+                   CallbackForward(x6), CallbackForward(x7));
+  }
+};
+
+// Arity 7 -> 4.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4, typename X5, typename X6, typename X7>
+struct Invoker<3, StorageType, R(X1, X2, X3, X4, X5, X6, X7)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X4>::ForwardType,
+      typename CallbackParamTraits<X5>::ForwardType,
+      typename CallbackParamTraits<X6>::ForwardType,
+      typename CallbackParamTraits<X7>::ForwardType);
+
+  typedef R(UnboundRunType)(X4, X5, X6, X7);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X4>::ForwardType x4,
+      typename CallbackParamTraits<X5>::ForwardType x5,
+      typename CallbackParamTraits<X6>::ForwardType x6,
+      typename CallbackParamTraits<X7>::ForwardType x7) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+    typedef typename StorageType::Bound2UnwrapTraits Bound2UnwrapTraits;
+    typedef typename StorageType::Bound3UnwrapTraits Bound3UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    typename Bound2UnwrapTraits::ForwardType x2 =
+        Bound2UnwrapTraits::Unwrap(storage->p2_);
+    typename Bound3UnwrapTraits::ForwardType x3 =
+        Bound3UnwrapTraits::Unwrap(storage->p3_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename Bound2UnwrapTraits::ForwardType,
+               typename Bound3UnwrapTraits::ForwardType,
+               typename CallbackParamTraits<X4>::ForwardType x4,
+               typename CallbackParamTraits<X5>::ForwardType x5,
+               typename CallbackParamTraits<X6>::ForwardType x6,
+               typename CallbackParamTraits<X7>::ForwardType x7)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4), CallbackForward(x5),
+                   CallbackForward(x6), CallbackForward(x7));
+  }
+};
+
+// Arity 7 -> 3.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4, typename X5, typename X6, typename X7>
+struct Invoker<4, StorageType, R(X1, X2, X3, X4, X5, X6, X7)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X5>::ForwardType,
+      typename CallbackParamTraits<X6>::ForwardType,
+      typename CallbackParamTraits<X7>::ForwardType);
+
+  typedef R(UnboundRunType)(X5, X6, X7);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X5>::ForwardType x5,
+      typename CallbackParamTraits<X6>::ForwardType x6,
+      typename CallbackParamTraits<X7>::ForwardType x7) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+    typedef typename StorageType::Bound2UnwrapTraits Bound2UnwrapTraits;
+    typedef typename StorageType::Bound3UnwrapTraits Bound3UnwrapTraits;
+    typedef typename StorageType::Bound4UnwrapTraits Bound4UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    typename Bound2UnwrapTraits::ForwardType x2 =
+        Bound2UnwrapTraits::Unwrap(storage->p2_);
+    typename Bound3UnwrapTraits::ForwardType x3 =
+        Bound3UnwrapTraits::Unwrap(storage->p3_);
+    typename Bound4UnwrapTraits::ForwardType x4 =
+        Bound4UnwrapTraits::Unwrap(storage->p4_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename Bound2UnwrapTraits::ForwardType,
+               typename Bound3UnwrapTraits::ForwardType,
+               typename Bound4UnwrapTraits::ForwardType,
+               typename CallbackParamTraits<X5>::ForwardType x5,
+               typename CallbackParamTraits<X6>::ForwardType x6,
+               typename CallbackParamTraits<X7>::ForwardType x7)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4), CallbackForward(x5),
+                   CallbackForward(x6), CallbackForward(x7));
+  }
+};
+
+// Arity 7 -> 2.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4, typename X5, typename X6, typename X7>
+struct Invoker<5, StorageType, R(X1, X2, X3, X4, X5, X6, X7)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X6>::ForwardType,
+      typename CallbackParamTraits<X7>::ForwardType);
+
+  typedef R(UnboundRunType)(X6, X7);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X6>::ForwardType x6,
+      typename CallbackParamTraits<X7>::ForwardType x7) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+    typedef typename StorageType::Bound2UnwrapTraits Bound2UnwrapTraits;
+    typedef typename StorageType::Bound3UnwrapTraits Bound3UnwrapTraits;
+    typedef typename StorageType::Bound4UnwrapTraits Bound4UnwrapTraits;
+    typedef typename StorageType::Bound5UnwrapTraits Bound5UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    typename Bound2UnwrapTraits::ForwardType x2 =
+        Bound2UnwrapTraits::Unwrap(storage->p2_);
+    typename Bound3UnwrapTraits::ForwardType x3 =
+        Bound3UnwrapTraits::Unwrap(storage->p3_);
+    typename Bound4UnwrapTraits::ForwardType x4 =
+        Bound4UnwrapTraits::Unwrap(storage->p4_);
+    typename Bound5UnwrapTraits::ForwardType x5 =
+        Bound5UnwrapTraits::Unwrap(storage->p5_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename Bound2UnwrapTraits::ForwardType,
+               typename Bound3UnwrapTraits::ForwardType,
+               typename Bound4UnwrapTraits::ForwardType,
+               typename Bound5UnwrapTraits::ForwardType,
+               typename CallbackParamTraits<X6>::ForwardType x6,
+               typename CallbackParamTraits<X7>::ForwardType x7)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4), CallbackForward(x5),
+                   CallbackForward(x6), CallbackForward(x7));
+  }
+};
+
+// Arity 7 -> 1.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4, typename X5, typename X6, typename X7>
+struct Invoker<6, StorageType, R(X1, X2, X3, X4, X5, X6, X7)> {
+  typedef R(RunType)(BindStateBase*,
+      typename CallbackParamTraits<X7>::ForwardType);
+
+  typedef R(UnboundRunType)(X7);
+
+  static R Run(BindStateBase* base,
+      typename CallbackParamTraits<X7>::ForwardType x7) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+    typedef typename StorageType::Bound2UnwrapTraits Bound2UnwrapTraits;
+    typedef typename StorageType::Bound3UnwrapTraits Bound3UnwrapTraits;
+    typedef typename StorageType::Bound4UnwrapTraits Bound4UnwrapTraits;
+    typedef typename StorageType::Bound5UnwrapTraits Bound5UnwrapTraits;
+    typedef typename StorageType::Bound6UnwrapTraits Bound6UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    typename Bound2UnwrapTraits::ForwardType x2 =
+        Bound2UnwrapTraits::Unwrap(storage->p2_);
+    typename Bound3UnwrapTraits::ForwardType x3 =
+        Bound3UnwrapTraits::Unwrap(storage->p3_);
+    typename Bound4UnwrapTraits::ForwardType x4 =
+        Bound4UnwrapTraits::Unwrap(storage->p4_);
+    typename Bound5UnwrapTraits::ForwardType x5 =
+        Bound5UnwrapTraits::Unwrap(storage->p5_);
+    typename Bound6UnwrapTraits::ForwardType x6 =
+        Bound6UnwrapTraits::Unwrap(storage->p6_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename Bound2UnwrapTraits::ForwardType,
+               typename Bound3UnwrapTraits::ForwardType,
+               typename Bound4UnwrapTraits::ForwardType,
+               typename Bound5UnwrapTraits::ForwardType,
+               typename Bound6UnwrapTraits::ForwardType,
+               typename CallbackParamTraits<X7>::ForwardType x7)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4), CallbackForward(x5),
+                   CallbackForward(x6), CallbackForward(x7));
+  }
+};
+
+// Arity 7 -> 0.
+template <typename StorageType, typename R,typename X1, typename X2,
+    typename X3, typename X4, typename X5, typename X6, typename X7>
+struct Invoker<7, StorageType, R(X1, X2, X3, X4, X5, X6, X7)> {
+  typedef R(RunType)(BindStateBase*);
+
+  typedef R(UnboundRunType)();
+
+  static R Run(BindStateBase* base) {
+    StorageType* storage = static_cast<StorageType*>(base);
+
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    typedef typename StorageType::Bound1UnwrapTraits Bound1UnwrapTraits;
+    typedef typename StorageType::Bound2UnwrapTraits Bound2UnwrapTraits;
+    typedef typename StorageType::Bound3UnwrapTraits Bound3UnwrapTraits;
+    typedef typename StorageType::Bound4UnwrapTraits Bound4UnwrapTraits;
+    typedef typename StorageType::Bound5UnwrapTraits Bound5UnwrapTraits;
+    typedef typename StorageType::Bound6UnwrapTraits Bound6UnwrapTraits;
+    typedef typename StorageType::Bound7UnwrapTraits Bound7UnwrapTraits;
+
+    typename Bound1UnwrapTraits::ForwardType x1 =
+        Bound1UnwrapTraits::Unwrap(storage->p1_);
+    typename Bound2UnwrapTraits::ForwardType x2 =
+        Bound2UnwrapTraits::Unwrap(storage->p2_);
+    typename Bound3UnwrapTraits::ForwardType x3 =
+        Bound3UnwrapTraits::Unwrap(storage->p3_);
+    typename Bound4UnwrapTraits::ForwardType x4 =
+        Bound4UnwrapTraits::Unwrap(storage->p4_);
+    typename Bound5UnwrapTraits::ForwardType x5 =
+        Bound5UnwrapTraits::Unwrap(storage->p5_);
+    typename Bound6UnwrapTraits::ForwardType x6 =
+        Bound6UnwrapTraits::Unwrap(storage->p6_);
+    typename Bound7UnwrapTraits::ForwardType x7 =
+        Bound7UnwrapTraits::Unwrap(storage->p7_);
+    return InvokeHelper<StorageType::IsWeakCall::value, R,
+           typename StorageType::RunnableType,
+           void(typename Bound1UnwrapTraits::ForwardType,
+               typename Bound2UnwrapTraits::ForwardType,
+               typename Bound3UnwrapTraits::ForwardType,
+               typename Bound4UnwrapTraits::ForwardType,
+               typename Bound5UnwrapTraits::ForwardType,
+               typename Bound6UnwrapTraits::ForwardType,
+               typename Bound7UnwrapTraits::ForwardType)>
+               ::MakeItSo(storage->runnable_, CallbackForward(x1),
+                   CallbackForward(x2), CallbackForward(x3),
+                   CallbackForward(x4), CallbackForward(x5),
+                   CallbackForward(x6), CallbackForward(x7));
+  }
+};
+
+
+// BindState<>
+//
+// This stores all the state passed into Bind() and is also where most
+// of the template resolution magic occurs.
+//
+// Runnable is the functor we are binding arguments to.
+// RunType is type of the Run() function that the Invoker<> should use.
+// Normally, this is the same as the RunType of the Runnable, but it can
+// be different if an adapter like IgnoreResult() has been used.
+//
+// BoundArgsType contains the storage type for all the bound arguments by
+// (ab)using a function type.
+template <typename Runnable, typename RunType, typename BoundArgsType>
+struct BindState;
+
+template <typename Runnable, typename RunType>
+struct BindState<Runnable, RunType, void()> : public BindStateBase {
+  typedef Runnable RunnableType;
+  typedef false_type IsWeakCall;
+  typedef Invoker<0, BindState, RunType> InvokerType;
+  typedef typename InvokerType::UnboundRunType UnboundRunType;
+  explicit BindState(const Runnable& runnable)
+      : runnable_(runnable) {
+  }
+
+  virtual ~BindState() {  }
+
+  RunnableType runnable_;
+};
+
+template <typename Runnable, typename RunType, typename P1>
+struct BindState<Runnable, RunType, void(P1)> : public BindStateBase {
+  typedef Runnable RunnableType;
+  typedef IsWeakMethod<HasIsMethodTag<Runnable>::value, P1> IsWeakCall;
+  typedef Invoker<1, BindState, RunType> InvokerType;
+  typedef typename InvokerType::UnboundRunType UnboundRunType;
+
+  // Convenience typedefs for bound argument types.
+  typedef UnwrapTraits<P1> Bound1UnwrapTraits;
+
+  BindState(const Runnable& runnable, const P1& p1)
+      : runnable_(runnable),
+        p1_(p1) {
+    MaybeRefcount<HasIsMethodTag<Runnable>::value, P1>::AddRef(p1_);
+  }
+
+  virtual ~BindState() {    MaybeRefcount<HasIsMethodTag<Runnable>::value,
+      P1>::Release(p1_);  }
+
+  RunnableType runnable_;
+  P1 p1_;
+};
+
+template <typename Runnable, typename RunType, typename P1, typename P2>
+struct BindState<Runnable, RunType, void(P1, P2)> : public BindStateBase {
+  typedef Runnable RunnableType;
+  typedef IsWeakMethod<HasIsMethodTag<Runnable>::value, P1> IsWeakCall;
+  typedef Invoker<2, BindState, RunType> InvokerType;
+  typedef typename InvokerType::UnboundRunType UnboundRunType;
+
+  // Convenience typedefs for bound argument types.
+  typedef UnwrapTraits<P1> Bound1UnwrapTraits;
+  typedef UnwrapTraits<P2> Bound2UnwrapTraits;
+
+  BindState(const Runnable& runnable, const P1& p1, const P2& p2)
+      : runnable_(runnable),
+        p1_(p1),
+        p2_(p2) {
+    MaybeRefcount<HasIsMethodTag<Runnable>::value, P1>::AddRef(p1_);
+  }
+
+  virtual ~BindState() {    MaybeRefcount<HasIsMethodTag<Runnable>::value,
+      P1>::Release(p1_);  }
+
+  RunnableType runnable_;
+  P1 p1_;
+  P2 p2_;
+};
+
+template <typename Runnable, typename RunType, typename P1, typename P2,
+    typename P3>
+struct BindState<Runnable, RunType, void(P1, P2, P3)> : public BindStateBase {
+  typedef Runnable RunnableType;
+  typedef IsWeakMethod<HasIsMethodTag<Runnable>::value, P1> IsWeakCall;
+  typedef Invoker<3, BindState, RunType> InvokerType;
+  typedef typename InvokerType::UnboundRunType UnboundRunType;
+
+  // Convenience typedefs for bound argument types.
+  typedef UnwrapTraits<P1> Bound1UnwrapTraits;
+  typedef UnwrapTraits<P2> Bound2UnwrapTraits;
+  typedef UnwrapTraits<P3> Bound3UnwrapTraits;
+
+  BindState(const Runnable& runnable, const P1& p1, const P2& p2, const P3& p3)
+      : runnable_(runnable),
+        p1_(p1),
+        p2_(p2),
+        p3_(p3) {
+    MaybeRefcount<HasIsMethodTag<Runnable>::value, P1>::AddRef(p1_);
+  }
+
+  virtual ~BindState() {    MaybeRefcount<HasIsMethodTag<Runnable>::value,
+      P1>::Release(p1_);  }
+
+  RunnableType runnable_;
+  P1 p1_;
+  P2 p2_;
+  P3 p3_;
+};
+
+template <typename Runnable, typename RunType, typename P1, typename P2,
+    typename P3, typename P4>
+struct BindState<Runnable, RunType, void(P1, P2, P3,
+    P4)> : public BindStateBase {
+  typedef Runnable RunnableType;
+  typedef IsWeakMethod<HasIsMethodTag<Runnable>::value, P1> IsWeakCall;
+  typedef Invoker<4, BindState, RunType> InvokerType;
+  typedef typename InvokerType::UnboundRunType UnboundRunType;
+
+  // Convenience typedefs for bound argument types.
+  typedef UnwrapTraits<P1> Bound1UnwrapTraits;
+  typedef UnwrapTraits<P2> Bound2UnwrapTraits;
+  typedef UnwrapTraits<P3> Bound3UnwrapTraits;
+  typedef UnwrapTraits<P4> Bound4UnwrapTraits;
+
+  BindState(const Runnable& runnable, const P1& p1, const P2& p2, const P3& p3,
+      const P4& p4)
+      : runnable_(runnable),
+        p1_(p1),
+        p2_(p2),
+        p3_(p3),
+        p4_(p4) {
+    MaybeRefcount<HasIsMethodTag<Runnable>::value, P1>::AddRef(p1_);
+  }
+
+  virtual ~BindState() {    MaybeRefcount<HasIsMethodTag<Runnable>::value,
+      P1>::Release(p1_);  }
+
+  RunnableType runnable_;
+  P1 p1_;
+  P2 p2_;
+  P3 p3_;
+  P4 p4_;
+};
+
+template <typename Runnable, typename RunType, typename P1, typename P2,
+    typename P3, typename P4, typename P5>
+struct BindState<Runnable, RunType, void(P1, P2, P3, P4,
+    P5)> : public BindStateBase {
+  typedef Runnable RunnableType;
+  typedef IsWeakMethod<HasIsMethodTag<Runnable>::value, P1> IsWeakCall;
+  typedef Invoker<5, BindState, RunType> InvokerType;
+  typedef typename InvokerType::UnboundRunType UnboundRunType;
+
+  // Convenience typedefs for bound argument types.
+  typedef UnwrapTraits<P1> Bound1UnwrapTraits;
+  typedef UnwrapTraits<P2> Bound2UnwrapTraits;
+  typedef UnwrapTraits<P3> Bound3UnwrapTraits;
+  typedef UnwrapTraits<P4> Bound4UnwrapTraits;
+  typedef UnwrapTraits<P5> Bound5UnwrapTraits;
+
+  BindState(const Runnable& runnable, const P1& p1, const P2& p2, const P3& p3,
+      const P4& p4, const P5& p5)
+      : runnable_(runnable),
+        p1_(p1),
+        p2_(p2),
+        p3_(p3),
+        p4_(p4),
+        p5_(p5) {
+    MaybeRefcount<HasIsMethodTag<Runnable>::value, P1>::AddRef(p1_);
+  }
+
+  virtual ~BindState() {    MaybeRefcount<HasIsMethodTag<Runnable>::value,
+      P1>::Release(p1_);  }
+
+  RunnableType runnable_;
+  P1 p1_;
+  P2 p2_;
+  P3 p3_;
+  P4 p4_;
+  P5 p5_;
+};
+
+template <typename Runnable, typename RunType, typename P1, typename P2,
+    typename P3, typename P4, typename P5, typename P6>
+struct BindState<Runnable, RunType, void(P1, P2, P3, P4, P5,
+    P6)> : public BindStateBase {
+  typedef Runnable RunnableType;
+  typedef IsWeakMethod<HasIsMethodTag<Runnable>::value, P1> IsWeakCall;
+  typedef Invoker<6, BindState, RunType> InvokerType;
+  typedef typename InvokerType::UnboundRunType UnboundRunType;
+
+  // Convenience typedefs for bound argument types.
+  typedef UnwrapTraits<P1> Bound1UnwrapTraits;
+  typedef UnwrapTraits<P2> Bound2UnwrapTraits;
+  typedef UnwrapTraits<P3> Bound3UnwrapTraits;
+  typedef UnwrapTraits<P4> Bound4UnwrapTraits;
+  typedef UnwrapTraits<P5> Bound5UnwrapTraits;
+  typedef UnwrapTraits<P6> Bound6UnwrapTraits;
+
+  BindState(const Runnable& runnable, const P1& p1, const P2& p2, const P3& p3,
+      const P4& p4, const P5& p5, const P6& p6)
+      : runnable_(runnable),
+        p1_(p1),
+        p2_(p2),
+        p3_(p3),
+        p4_(p4),
+        p5_(p5),
+        p6_(p6) {
+    MaybeRefcount<HasIsMethodTag<Runnable>::value, P1>::AddRef(p1_);
+  }
+
+  virtual ~BindState() {    MaybeRefcount<HasIsMethodTag<Runnable>::value,
+      P1>::Release(p1_);  }
+
+  RunnableType runnable_;
+  P1 p1_;
+  P2 p2_;
+  P3 p3_;
+  P4 p4_;
+  P5 p5_;
+  P6 p6_;
+};
+
+template <typename Runnable, typename RunType, typename P1, typename P2,
+    typename P3, typename P4, typename P5, typename P6, typename P7>
+struct BindState<Runnable, RunType, void(P1, P2, P3, P4, P5, P6,
+    P7)> : public BindStateBase {
+  typedef Runnable RunnableType;
+  typedef IsWeakMethod<HasIsMethodTag<Runnable>::value, P1> IsWeakCall;
+  typedef Invoker<7, BindState, RunType> InvokerType;
+  typedef typename InvokerType::UnboundRunType UnboundRunType;
+
+  // Convenience typedefs for bound argument types.
+  typedef UnwrapTraits<P1> Bound1UnwrapTraits;
+  typedef UnwrapTraits<P2> Bound2UnwrapTraits;
+  typedef UnwrapTraits<P3> Bound3UnwrapTraits;
+  typedef UnwrapTraits<P4> Bound4UnwrapTraits;
+  typedef UnwrapTraits<P5> Bound5UnwrapTraits;
+  typedef UnwrapTraits<P6> Bound6UnwrapTraits;
+  typedef UnwrapTraits<P7> Bound7UnwrapTraits;
+
+  BindState(const Runnable& runnable, const P1& p1, const P2& p2, const P3& p3,
+      const P4& p4, const P5& p5, const P6& p6, const P7& p7)
+      : runnable_(runnable),
+        p1_(p1),
+        p2_(p2),
+        p3_(p3),
+        p4_(p4),
+        p5_(p5),
+        p6_(p6),
+        p7_(p7) {
+    MaybeRefcount<HasIsMethodTag<Runnable>::value, P1>::AddRef(p1_);
+  }
+
+  virtual ~BindState() {    MaybeRefcount<HasIsMethodTag<Runnable>::value,
+      P1>::Release(p1_);  }
+
+  RunnableType runnable_;
+  P1 p1_;
+  P2 p2_;
+  P3 p3_;
+  P4 p4_;
+  P5 p5_;
+  P6 p6_;
+  P7 p7_;
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_BIND_INTERNAL_H_

+ 88 - 0
base/bit_array.h

@@ -0,0 +1,88 @@
+// Copyright (c) 2014 Baidu.com, Inc. All Rights Reserved
+//
+// Provide functions to get/set bits of an integral array. These functions
+// are not threadsafe because operations on different bits may modify a same
+// integer.
+//
+// Author: Ge,Jun (gejun@base.com)
+// Date: Tue Feb 25 23:43:39 CST 2014
+#ifndef BASE_BIT_ARRAY_H
+#define BASE_BIT_ARRAY_H
+
+#include <stdint.h>
+
+namespace base {
+
+// Create an array with at least |nbit| bits. The array is not cleared.
+inline uint64_t* bit_array_malloc(size_t nbit)
+{
+    if (!nbit) {
+        return NULL;
+    }
+    return (uint64_t*)malloc((nbit + 63 ) / 64 * 8/*different from /8*/);
+}
+
+// Set bit 0 ~ nbit-1 of |array| to be 0
+inline void bit_array_clear(uint64_t* array, size_t nbit)
+{
+    const size_t off = (nbit >> 6);
+    memset(array, 0, off * 8);
+    const size_t last = (off << 6);
+    if (last != nbit) {
+        array[off] &= ~((((uint64_t)1) << (nbit - last)) - 1);
+    }
+}
+
+// Set i-th bit (from left, counting from 0) of |array| to be 1
+inline void bit_array_set(uint64_t* array, size_t i)
+{
+    const size_t off = (i >> 6);
+    array[off] |= (((uint64_t)1) << (i - (off << 6)));
+}
+
+// Set i-th bit (from left, counting from 0) of |array| to be 0
+inline void bit_array_unset(uint64_t* array, size_t i)
+{
+    const size_t off = (i >> 6);
+    array[off] &= ~(((uint64_t)1) << (i - (off << 6)));
+}
+
+// Get i-th bit (from left, counting from 0) of |array|
+inline uint64_t bit_array_get(const uint64_t* array, size_t i)
+{
+    const size_t off = (i >> 6);
+    return (array[off] & (((uint64_t)1) << (i - (off << 6))));
+}
+
+// Find index of first 1-bit from bit |begin| to |end| in |array|.
+// Returns |end| if all bits are 0.
+// This function is of O(nbit) complexity.
+inline size_t bit_array_first1(const uint64_t* array, size_t begin, size_t end)
+{
+    size_t off1 = (begin >> 6);
+    const size_t first = (off1 << 6);
+    if (first != begin) {
+        const uint64_t v =
+            array[off1] & ~((((uint64_t)1) << (begin - first)) - 1);
+        if (v) {
+            return std::min(first + __builtin_ctzl(v), end);
+        }
+        ++off1;
+    }
+    
+    const size_t off2 = (end >> 6);
+    for (size_t i = off1; i < off2; ++i) {
+        if (array[i]) {
+            return i * 64 + __builtin_ctzl(array[i]);
+        }
+    }
+    const size_t last = (off2 << 6);
+    if (last != end && array[off2]) {
+        return std::min(last + __builtin_ctzl(array[off2]), end);
+    }
+    return end;
+}
+
+}  // end namespace base
+
+#endif  // BASE_BIT_ARRAY_H

+ 47 - 0
base/bits.h

@@ -0,0 +1,47 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines some bit utilities.
+
+#ifndef BASE_BITS_H_
+#define BASE_BITS_H_
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+
+namespace base {
+namespace bits {
+
+// Returns the integer i such as 2^i <= n < 2^(i+1)
+inline int Log2Floor(uint32_t n) {
+  if (n == 0)
+    return -1;
+  int log = 0;
+  uint32_t value = n;
+  for (int i = 4; i >= 0; --i) {
+    int shift = (1 << i);
+    uint32_t x = value >> shift;
+    if (x != 0) {
+      value = x;
+      log += shift;
+    }
+  }
+  DCHECK_EQ(value, 1u);
+  return log;
+}
+
+// Returns the integer i such as 2^(i-1) < n <= 2^i
+inline int Log2Ceiling(uint32_t n) {
+  if (n == 0) {
+    return -1;
+  } else {
+    // Log2Floor returns -1 for 0, so the following works correctly for n=1.
+    return 1 + Log2Floor(n - 1);
+  }
+}
+
+}  // namespace bits
+}  // namespace base
+
+#endif  // BASE_BITS_H_

+ 176 - 0
base/build_config.h

@@ -0,0 +1,176 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file adds defines about the platform we're currently building on.
+//  Operating System:
+//    OS_WIN / OS_MACOSX / OS_LINUX / OS_POSIX (MACOSX or LINUX) /
+//    OS_NACL (NACL_SFI or NACL_NONSFI) / OS_NACL_SFI / OS_NACL_NONSFI
+//  Compiler:
+//    COMPILER_MSVC / COMPILER_GCC
+//  Processor:
+//    ARCH_CPU_X86 / ARCH_CPU_X86_64 / ARCH_CPU_X86_FAMILY (X86 or X86_64)
+//    ARCH_CPU_32_BITS / ARCH_CPU_64_BITS
+
+#ifndef BASE_BUILD_CONFIG_H_
+#define BASE_BUILD_CONFIG_H_
+
+// A set of macros to use for platform detection.
+#if defined(__native_client__)
+// __native_client__ must be first, so that other OS_ defines are not set.
+#define OS_NACL 1
+// OS_NACL comes in two sandboxing technology flavors, SFI or Non-SFI.
+// PNaCl toolchain defines __native_client_nonsfi__ macro in Non-SFI build
+// mode, while it does not in SFI build mode.
+#if defined(__native_client_nonsfi__)
+#define OS_NACL_NONSFI
+#else
+#define OS_NACL_SFI
+#endif
+#elif defined(ANDROID)
+#define OS_ANDROID 1
+#elif defined(__APPLE__)
+// only include TargetConditions after testing ANDROID as some android builds
+// on mac don't have this header available and it's not needed unless the target
+// is really mac/ios.
+#include <TargetConditionals.h>
+#define OS_MACOSX 1
+#if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE
+#define OS_IOS 1
+#endif  // defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE
+#elif defined(__linux__)
+#define OS_LINUX 1
+// include a system header to pull in features.h for glibc/uclibc macros.
+#include <unistd.h>
+#if defined(__GLIBC__) && !defined(__UCLIBC__)
+// we really are using glibc, not uClibc pretending to be glibc
+#define LIBC_GLIBC 1
+#endif
+#elif defined(_WIN32)
+#define OS_WIN 1
+#define TOOLKIT_VIEWS 1
+#elif defined(__FreeBSD__)
+#define OS_FREEBSD 1
+#elif defined(__OpenBSD__)
+#define OS_OPENBSD 1
+#elif defined(__sun)
+#define OS_SOLARIS 1
+#elif defined(__QNXNTO__)
+#define OS_QNX 1
+#else
+#error Please add support for your platform in base/build_config.h
+#endif
+
+#if defined(USE_OPENSSL_CERTS) && defined(USE_NSS_CERTS)
+#error Cannot use both OpenSSL and NSS for certificates
+#endif
+
+// For access to standard BSD features, use OS_BSD instead of a
+// more specific macro.
+#if defined(OS_FREEBSD) || defined(OS_OPENBSD)
+#define OS_BSD 1
+#endif
+
+// For access to standard POSIXish features, use OS_POSIX instead of a
+// more specific macro.
+#if defined(OS_MACOSX) || defined(OS_LINUX) || defined(OS_FREEBSD) ||     \
+    defined(OS_OPENBSD) || defined(OS_SOLARIS) || defined(OS_ANDROID) ||  \
+    defined(OS_NACL) || defined(OS_QNX)
+#define OS_POSIX 1
+#endif
+
+// Use tcmalloc
+#if (defined(OS_WIN) || defined(OS_LINUX) || defined(OS_ANDROID)) && \
+    !defined(NO_TCMALLOC)
+#define USE_TCMALLOC 1
+#endif
+
+// Compiler detection.
+#if defined(__GNUC__)
+#define COMPILER_GCC 1
+#elif defined(_MSC_VER)
+#define COMPILER_MSVC 1
+#else
+#error Please add support for your compiler in base/build_config.h
+#endif
+
+// Processor architecture detection.  For more info on what's defined, see:
+//   http://msdn.microsoft.com/en-us/library/b0084kay.aspx
+//   http://www.agner.org/optimize/calling_conventions.pdf
+//   or with gcc, run: "echo | gcc -E -dM -"
+#if defined(_M_X64) || defined(__x86_64__)
+#define ARCH_CPU_X86_FAMILY 1
+#define ARCH_CPU_X86_64 1
+#define ARCH_CPU_64_BITS 1
+#define ARCH_CPU_LITTLE_ENDIAN 1
+#elif defined(_M_IX86) || defined(__i386__)
+#define ARCH_CPU_X86_FAMILY 1
+#define ARCH_CPU_X86 1
+#define ARCH_CPU_32_BITS 1
+#define ARCH_CPU_LITTLE_ENDIAN 1
+#elif defined(__ARMEL__)
+#define ARCH_CPU_ARM_FAMILY 1
+#define ARCH_CPU_ARMEL 1
+#define ARCH_CPU_32_BITS 1
+#define ARCH_CPU_LITTLE_ENDIAN 1
+#elif defined(__aarch64__)
+#define ARCH_CPU_ARM_FAMILY 1
+#define ARCH_CPU_ARM64 1
+#define ARCH_CPU_64_BITS 1
+#define ARCH_CPU_LITTLE_ENDIAN 1
+#elif defined(__pnacl__)
+#define ARCH_CPU_32_BITS 1
+#define ARCH_CPU_LITTLE_ENDIAN 1
+#elif defined(__MIPSEL__)
+#if defined(__LP64__)
+#define ARCH_CPU_MIPS64_FAMILY 1
+#define ARCH_CPU_MIPS64EL 1
+#define ARCH_CPU_64_BITS 1
+#define ARCH_CPU_LITTLE_ENDIAN 1
+#else
+#define ARCH_CPU_MIPS_FAMILY 1
+#define ARCH_CPU_MIPSEL 1
+#define ARCH_CPU_32_BITS 1
+#define ARCH_CPU_LITTLE_ENDIAN 1
+#endif
+#else
+#error Please add support for your architecture in base/build_config.h
+#endif
+
+// Type detection for wchar_t.
+#if defined(OS_WIN)
+#define WCHAR_T_IS_UTF16
+#elif defined(OS_POSIX) && defined(COMPILER_GCC) && \
+    defined(__WCHAR_MAX__) && \
+    (__WCHAR_MAX__ == 0x7fffffff || __WCHAR_MAX__ == 0xffffffff)
+#define WCHAR_T_IS_UTF32
+#elif defined(OS_POSIX) && defined(COMPILER_GCC) && \
+    defined(__WCHAR_MAX__) && \
+    (__WCHAR_MAX__ == 0x7fff || __WCHAR_MAX__ == 0xffff)
+// On Posix, we'll detect short wchar_t, but projects aren't guaranteed to
+// compile in this mode (in particular, Chrome doesn't). This is intended for
+// other projects using base who manage their own dependencies and make sure
+// short wchar works for them.
+#define WCHAR_T_IS_UTF16
+#else
+#error Please add support for your compiler in base/build_config.h
+#endif
+
+#if defined(OS_ANDROID)
+// The compiler thinks std::string::const_iterator and "const char*" are
+// equivalent types.
+#define STD_STRING_ITERATOR_IS_CHAR_POINTER
+// The compiler thinks base::string16::const_iterator and "char16*" are
+// equivalent types.
+#define BASE_STRING16_ITERATOR_IS_CHAR16_POINTER
+#endif
+
+#if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L
+#define BASE_CXX11_ENABLED 1
+#endif
+
+#if !defined(BASE_CXX11_ENABLED)
+#define nullptr NULL
+#endif
+
+#endif  // BASE_BUILD_CONFIG_H_

+ 25 - 0
base/build_time.cc

@@ -0,0 +1,25 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/build_time.h"
+
+#include "base/logging.h"
+#include "base/time/time.h"
+
+namespace base {
+
+Time GetBuildTime() {
+  Time integral_build_time;
+  // The format of __DATE__ and __TIME__ is specified by the ANSI C Standard,
+  // section 6.8.8.
+  //
+  // __DATE__ is exactly "Mmm DD YYYY".
+  // __TIME__ is exactly "hh:mm:ss".
+  const char kDateTime[] = __DATE__ " " __TIME__ " PST";
+  bool result = Time::FromString(kDateTime, &integral_build_time);
+  DCHECK(result);
+  return integral_build_time;
+}
+
+}  // namespace base

+ 25 - 0
base/build_time.h

@@ -0,0 +1,25 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BUILD_TIME_
+#define BASE_BUILD_TIME_
+
+#include "base/base_export.h"
+#include "base/time/time.h"
+
+namespace base {
+
+// GetBuildTime returns the time at which the current binary was built.
+//
+// This uses the __DATE__ and __TIME__ macros, which don't trigger a rebuild
+// when they change. However, official builds will always be rebuilt from
+// scratch.
+//
+// Also, since __TIME__ doesn't include a timezone, this value should only be
+// considered accurate to a day.
+Time BASE_EXPORT GetBuildTime();
+
+}  // namespace base
+
+#endif  // BASE_BUILD_TIME_

+ 770 - 0
base/callback.h

@@ -0,0 +1,770 @@
+// This file was GENERATED by command:
+//     pump.py callback.h.pump
+// DO NOT EDIT BY HAND!!!
+
+
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CALLBACK_H_
+#define BASE_CALLBACK_H_
+
+#include "base/callback_forward.h"
+#include "base/callback_internal.h"
+#include "base/type_traits.h"
+
+// NOTE: Header files that do not require the full definition of Callback or
+// Closure should #include "base/callback_forward.h" instead of this file.
+
+// -----------------------------------------------------------------------------
+// Introduction
+// -----------------------------------------------------------------------------
+//
+// The templated Callback class is a generalized function object. Together
+// with the Bind() function in bind.h, they provide a type-safe method for
+// performing partial application of functions.
+//
+// Partial application (or "currying") is the process of binding a subset of
+// a function's arguments to produce another function that takes fewer
+// arguments. This can be used to pass around a unit of delayed execution,
+// much like lexical closures are used in other languages. For example, it
+// is used in Chromium code to schedule tasks on different MessageLoops.
+//
+// A callback with no unbound input parameters (base::Callback<void(void)>)
+// is called a base::Closure. Note that this is NOT the same as what other
+// languages refer to as a closure -- it does not retain a reference to its
+// enclosing environment.
+//
+// MEMORY MANAGEMENT AND PASSING
+//
+// The Callback objects themselves should be passed by const-reference, and
+// stored by copy. They internally store their state via a refcounted class
+// and thus do not need to be deleted.
+//
+// The reason to pass via a const-reference is to avoid unnecessary
+// AddRef/Release pairs to the internal state.
+//
+//
+// -----------------------------------------------------------------------------
+// Quick reference for basic stuff
+// -----------------------------------------------------------------------------
+//
+// BINDING A BARE FUNCTION
+//
+//   int Return5() { return 5; }
+//   base::Callback<int(void)> func_cb = base::Bind(&Return5);
+//   LOG(INFO) << func_cb.Run();  // Prints 5.
+//
+// BINDING A CLASS METHOD
+//
+//   The first argument to bind is the member function to call, the second is
+//   the object on which to call it.
+//
+//   class Ref : public base::RefCountedThreadSafe<Ref> {
+//    public:
+//     int Foo() { return 3; }
+//     void PrintBye() { LOG(INFO) << "bye."; }
+//   };
+//   scoped_refptr<Ref> ref = new Ref();
+//   base::Callback<void(void)> ref_cb = base::Bind(&Ref::Foo, ref);
+//   LOG(INFO) << ref_cb.Run();  // Prints out 3.
+//
+//   By default the object must support RefCounted or you will get a compiler
+//   error. If you're passing between threads, be sure it's
+//   RefCountedThreadSafe! See "Advanced binding of member functions" below if
+//   you don't want to use reference counting.
+//
+// RUNNING A CALLBACK
+//
+//   Callbacks can be run with their "Run" method, which has the same
+//   signature as the template argument to the callback.
+//
+//   void DoSomething(const base::Callback<void(int, std::string)>& callback) {
+//     callback.Run(5, "hello");
+//   }
+//
+//   Callbacks can be run more than once (they don't get deleted or marked when
+//   run). However, this precludes using base::Passed (see below).
+//
+//   void DoSomething(const base::Callback<double(double)>& callback) {
+//     double myresult = callback.Run(3.14159);
+//     myresult += callback.Run(2.71828);
+//   }
+//
+// PASSING UNBOUND INPUT PARAMETERS
+//
+//   Unbound parameters are specified at the time a callback is Run(). They are
+//   specified in the Callback template type:
+//
+//   void MyFunc(int i, const std::string& str) {}
+//   base::Callback<void(int, const std::string&)> cb = base::Bind(&MyFunc);
+//   cb.Run(23, "hello, world");
+//
+// PASSING BOUND INPUT PARAMETERS
+//
+//   Bound parameters are specified when you create thee callback as arguments
+//   to Bind(). They will be passed to the function and the Run()ner of the
+//   callback doesn't see those values or even know that the function it's
+//   calling.
+//
+//   void MyFunc(int i, const std::string& str) {}
+//   base::Callback<void(void)> cb = base::Bind(&MyFunc, 23, "hello world");
+//   cb.Run();
+//
+//   A callback with no unbound input parameters (base::Callback<void(void)>)
+//   is called a base::Closure. So we could have also written:
+//
+//   base::Closure cb = base::Bind(&MyFunc, 23, "hello world");
+//
+//   When calling member functions, bound parameters just go after the object
+//   pointer.
+//
+//   base::Closure cb = base::Bind(&MyClass::MyFunc, this, 23, "hello world");
+//
+// PARTIAL BINDING OF PARAMETERS
+//
+//   You can specify some parameters when you create the callback, and specify
+//   the rest when you execute the callback.
+//
+//   void MyFunc(int i, const std::string& str) {}
+//   base::Callback<void(const std::string&)> cb = base::Bind(&MyFunc, 23);
+//   cb.Run("hello world");
+//
+//   When calling a function bound parameters are first, followed by unbound
+//   parameters.
+//
+//
+// -----------------------------------------------------------------------------
+// Quick reference for advanced binding
+// -----------------------------------------------------------------------------
+//
+// BINDING A CLASS METHOD WITH WEAK POINTERS
+//
+//   base::Bind(&MyClass::Foo, GetWeakPtr());
+//
+//   The callback will not be run if the object has already been destroyed.
+//   DANGER: weak pointers are not threadsafe, so don't use this
+//   when passing between threads!
+//
+// BINDING A CLASS METHOD WITH MANUAL LIFETIME MANAGEMENT
+//
+//   base::Bind(&MyClass::Foo, base::Unretained(this));
+//
+//   This disables all lifetime management on the object. You're responsible
+//   for making sure the object is alive at the time of the call. You break it,
+//   you own it!
+//
+// BINDING A CLASS METHOD AND HAVING THE CALLBACK OWN THE CLASS
+//
+//   MyClass* myclass = new MyClass;
+//   base::Bind(&MyClass::Foo, base::Owned(myclass));
+//
+//   The object will be deleted when the callback is destroyed, even if it's
+//   not run (like if you post a task during shutdown). Potentially useful for
+//   "fire and forget" cases.
+//
+// IGNORING RETURN VALUES
+//
+//   Sometimes you want to call a function that returns a value in a callback
+//   that doesn't expect a return value.
+//
+//   int DoSomething(int arg) { cout << arg << endl; }
+//   base::Callback<void<int>) cb =
+//       base::Bind(base::IgnoreResult(&DoSomething));
+//
+//
+// -----------------------------------------------------------------------------
+// Quick reference for binding parameters to Bind()
+// -----------------------------------------------------------------------------
+//
+// Bound parameters are specified as arguments to Bind() and are passed to the
+// function. A callback with no parameters or no unbound parameters is called a
+// Closure (base::Callback<void(void)> and base::Closure are the same thing).
+//
+// PASSING PARAMETERS OWNED BY THE CALLBACK
+//
+//   void Foo(int* arg) { cout << *arg << endl; }
+//   int* pn = new int(1);
+//   base::Closure foo_callback = base::Bind(&foo, base::Owned(pn));
+//
+//   The parameter will be deleted when the callback is destroyed, even if it's
+//   not run (like if you post a task during shutdown).
+//
+// PASSING PARAMETERS AS A scoped_ptr
+//
+//   void TakesOwnership(scoped_ptr<Foo> arg) {}
+//   scoped_ptr<Foo> f(new Foo);
+//   // f becomes null during the following call.
+//   base::Closure cb = base::Bind(&TakesOwnership, base::Passed(&f));
+//
+//   Ownership of the parameter will be with the callback until the it is run,
+//   when ownership is passed to the callback function. This means the callback
+//   can only be run once. If the callback is never run, it will delete the
+//   object when it's destroyed.
+//
+// PASSING PARAMETERS AS A scoped_refptr
+//
+//   void TakesOneRef(scoped_refptr<Foo> arg) {}
+//   scoped_refptr<Foo> f(new Foo)
+//   base::Closure cb = base::Bind(&TakesOneRef, f);
+//
+//   This should "just work." The closure will take a reference as long as it
+//   is alive, and another reference will be taken for the called function.
+//
+// PASSING PARAMETERS BY REFERENCE
+//
+//   Const references are *copied* unless ConstRef is used. Example:
+//
+//   void foo(const int& arg) { printf("%d %p\n", arg, &arg); }
+//   int n = 1;
+//   base::Closure has_copy = base::Bind(&foo, n);
+//   base::Closure has_ref = base::Bind(&foo, base::ConstRef(n));
+//   n = 2;
+//   foo(n);                        // Prints "2 0xaaaaaaaaaaaa"
+//   has_copy.Run();                // Prints "1 0xbbbbbbbbbbbb"
+//   has_ref.Run();                 // Prints "2 0xaaaaaaaaaaaa"
+//
+//   Normally parameters are copied in the closure. DANGER: ConstRef stores a
+//   const reference instead, referencing the original parameter. This means
+//   that you must ensure the object outlives the callback!
+//
+//
+// -----------------------------------------------------------------------------
+// Implementation notes
+// -----------------------------------------------------------------------------
+//
+// WHERE IS THIS DESIGN FROM:
+//
+// The design Callback and Bind is heavily influenced by C++'s
+// tr1::function/tr1::bind, and by the "Google Callback" system used inside
+// Google.
+//
+//
+// HOW THE IMPLEMENTATION WORKS:
+//
+// There are three main components to the system:
+//   1) The Callback classes.
+//   2) The Bind() functions.
+//   3) The arguments wrappers (e.g., Unretained() and ConstRef()).
+//
+// The Callback classes represent a generic function pointer. Internally,
+// it stores a refcounted piece of state that represents the target function
+// and all its bound parameters.  Each Callback specialization has a templated
+// constructor that takes an BindState<>*.  In the context of the constructor,
+// the static type of this BindState<> pointer uniquely identifies the
+// function it is representing, all its bound parameters, and a Run() method
+// that is capable of invoking the target.
+//
+// Callback's constructor takes the BindState<>* that has the full static type
+// and erases the target function type as well as the types of the bound
+// parameters.  It does this by storing a pointer to the specific Run()
+// function, and upcasting the state of BindState<>* to a
+// BindStateBase*. This is safe as long as this BindStateBase pointer
+// is only used with the stored Run() pointer.
+//
+// To BindState<> objects are created inside the Bind() functions.
+// These functions, along with a set of internal templates, are responsible for
+//
+//  - Unwrapping the function signature into return type, and parameters
+//  - Determining the number of parameters that are bound
+//  - Creating the BindState storing the bound parameters
+//  - Performing compile-time asserts to avoid error-prone behavior
+//  - Returning an Callback<> with an arity matching the number of unbound
+//    parameters and that knows the correct refcounting semantics for the
+//    target object if we are binding a method.
+//
+// The Bind functions do the above using type-inference, and template
+// specializations.
+//
+// By default Bind() will store copies of all bound parameters, and attempt
+// to refcount a target object if the function being bound is a class method.
+// These copies are created even if the function takes parameters as const
+// references. (Binding to non-const references is forbidden, see bind.h.)
+//
+// To change this behavior, we introduce a set of argument wrappers
+// (e.g., Unretained(), and ConstRef()).  These are simple container templates
+// that are passed by value, and wrap a pointer to argument.  See the
+// file-level comment in base/bind_helpers.h for more info.
+//
+// These types are passed to the Unwrap() functions, and the MaybeRefcount()
+// functions respectively to modify the behavior of Bind().  The Unwrap()
+// and MaybeRefcount() functions change behavior by doing partial
+// specialization based on whether or not a parameter is a wrapper type.
+//
+// ConstRef() is similar to tr1::cref.  Unretained() is specific to Chromium.
+//
+//
+// WHY NOT TR1 FUNCTION/BIND?
+//
+// Direct use of tr1::function and tr1::bind was considered, but ultimately
+// rejected because of the number of copy constructors invocations involved
+// in the binding of arguments during construction, and the forwarding of
+// arguments during invocation.  These copies will no longer be an issue in
+// C++0x because C++0x will support rvalue reference allowing for the compiler
+// to avoid these copies.  However, waiting for C++0x is not an option.
+//
+// Measured with valgrind on gcc version 4.4.3 (Ubuntu 4.4.3-4ubuntu5), the
+// tr1::bind call itself will invoke a non-trivial copy constructor three times
+// for each bound parameter.  Also, each when passing a tr1::function, each
+// bound argument will be copied again.
+//
+// In addition to the copies taken at binding and invocation, copying a
+// tr1::function causes a copy to be made of all the bound parameters and
+// state.
+//
+// Furthermore, in Chromium, it is desirable for the Callback to take a
+// reference on a target object when representing a class method call.  This
+// is not supported by tr1.
+//
+// Lastly, tr1::function and tr1::bind has a more general and flexible API.
+// This includes things like argument reordering by use of
+// tr1::bind::placeholder, support for non-const reference parameters, and some
+// limited amount of subtyping of the tr1::function object (e.g.,
+// tr1::function<int(int)> is convertible to tr1::function<void(int)>).
+//
+// These are not features that are required in Chromium. Some of them, such as
+// allowing for reference parameters, and subtyping of functions, may actually
+// become a source of errors. Removing support for these features actually
+// allows for a simpler implementation, and a terser Currying API.
+//
+//
+// WHY NOT GOOGLE CALLBACKS?
+//
+// The Google callback system also does not support refcounting.  Furthermore,
+// its implementation has a number of strange edge cases with respect to type
+// conversion of its arguments.  In particular, the argument's constness must
+// at times match exactly the function signature, or the type-inference might
+// break.  Given the above, writing a custom solution was easier.
+//
+//
+// MISSING FUNCTIONALITY
+//  - Invoking the return of Bind.  Bind(&foo).Run() does not work;
+//  - Binding arrays to functions that take a non-const pointer.
+//    Example:
+//      void Foo(const char* ptr);
+//      void Bar(char* ptr);
+//      Bind(&Foo, "test");
+//      Bind(&Bar, "test");  // This fails because ptr is not const.
+
+namespace base {
+
+// First, we forward declare the Callback class template. This informs the
+// compiler that the template only has 1 type parameter which is the function
+// signature that the Callback is representing.
+//
+// After this, create template specializations for 0-7 parameters. Note that
+// even though the template typelist grows, the specialization still
+// only has one type: the function signature.
+//
+// If you are thinking of forward declaring Callback in your own header file,
+// please include "base/callback_forward.h" instead.
+template <typename Sig>
+class Callback;
+
+namespace internal {
+template <typename Runnable, typename RunType, typename BoundArgsType>
+struct BindState;
+}  // namespace internal
+
+template <typename R>
+class Callback<R(void)> : public internal::CallbackBase {
+ public:
+  typedef R(RunType)();
+
+  Callback() : CallbackBase(NULL) { }
+
+  // Note that this constructor CANNOT be explicit, and that Bind() CANNOT
+  // return the exact Callback<> type.  See base/bind.h for details.
+  template <typename Runnable, typename BindRunType, typename BoundArgsType>
+  Callback(internal::BindState<Runnable, BindRunType,
+           BoundArgsType>* bind_state)
+      : CallbackBase(bind_state) {
+
+    // Force the assignment to a local variable of PolymorphicInvoke
+    // so the compiler will typecheck that the passed in Run() method has
+    // the correct type.
+    PolymorphicInvoke invoke_func =
+        &internal::BindState<Runnable, BindRunType, BoundArgsType>
+            ::InvokerType::Run;
+    polymorphic_invoke_ = reinterpret_cast<InvokeFuncStorage>(invoke_func);
+  }
+
+  bool Equals(const Callback& other) const {
+    return CallbackBase::Equals(other);
+  }
+
+  R Run() const {
+    PolymorphicInvoke f =
+        reinterpret_cast<PolymorphicInvoke>(polymorphic_invoke_);
+
+    return f(bind_state_.get());
+  }
+
+ private:
+  typedef R(*PolymorphicInvoke)(
+      internal::BindStateBase*);
+
+};
+
+template <typename R, typename A1>
+class Callback<R(A1)> : public internal::CallbackBase {
+ public:
+  typedef R(RunType)(A1);
+
+  Callback() : CallbackBase(NULL) { }
+
+  // Note that this constructor CANNOT be explicit, and that Bind() CANNOT
+  // return the exact Callback<> type.  See base/bind.h for details.
+  template <typename Runnable, typename BindRunType, typename BoundArgsType>
+  Callback(internal::BindState<Runnable, BindRunType,
+           BoundArgsType>* bind_state)
+      : CallbackBase(bind_state) {
+
+    // Force the assignment to a local variable of PolymorphicInvoke
+    // so the compiler will typecheck that the passed in Run() method has
+    // the correct type.
+    PolymorphicInvoke invoke_func =
+        &internal::BindState<Runnable, BindRunType, BoundArgsType>
+            ::InvokerType::Run;
+    polymorphic_invoke_ = reinterpret_cast<InvokeFuncStorage>(invoke_func);
+  }
+
+  bool Equals(const Callback& other) const {
+    return CallbackBase::Equals(other);
+  }
+
+  R Run(typename internal::CallbackParamTraits<A1>::ForwardType a1) const {
+    PolymorphicInvoke f =
+        reinterpret_cast<PolymorphicInvoke>(polymorphic_invoke_);
+
+    return f(bind_state_.get(), internal::CallbackForward(a1));
+  }
+
+ private:
+  typedef R(*PolymorphicInvoke)(
+      internal::BindStateBase*,
+          typename internal::CallbackParamTraits<A1>::ForwardType);
+
+};
+
+template <typename R, typename A1, typename A2>
+class Callback<R(A1, A2)> : public internal::CallbackBase {
+ public:
+  typedef R(RunType)(A1, A2);
+
+  Callback() : CallbackBase(NULL) { }
+
+  // Note that this constructor CANNOT be explicit, and that Bind() CANNOT
+  // return the exact Callback<> type.  See base/bind.h for details.
+  template <typename Runnable, typename BindRunType, typename BoundArgsType>
+  Callback(internal::BindState<Runnable, BindRunType,
+           BoundArgsType>* bind_state)
+      : CallbackBase(bind_state) {
+
+    // Force the assignment to a local variable of PolymorphicInvoke
+    // so the compiler will typecheck that the passed in Run() method has
+    // the correct type.
+    PolymorphicInvoke invoke_func =
+        &internal::BindState<Runnable, BindRunType, BoundArgsType>
+            ::InvokerType::Run;
+    polymorphic_invoke_ = reinterpret_cast<InvokeFuncStorage>(invoke_func);
+  }
+
+  bool Equals(const Callback& other) const {
+    return CallbackBase::Equals(other);
+  }
+
+  R Run(typename internal::CallbackParamTraits<A1>::ForwardType a1,
+        typename internal::CallbackParamTraits<A2>::ForwardType a2) const {
+    PolymorphicInvoke f =
+        reinterpret_cast<PolymorphicInvoke>(polymorphic_invoke_);
+
+    return f(bind_state_.get(), internal::CallbackForward(a1),
+             internal::CallbackForward(a2));
+  }
+
+ private:
+  typedef R(*PolymorphicInvoke)(
+      internal::BindStateBase*,
+          typename internal::CallbackParamTraits<A1>::ForwardType,
+          typename internal::CallbackParamTraits<A2>::ForwardType);
+
+};
+
+template <typename R, typename A1, typename A2, typename A3>
+class Callback<R(A1, A2, A3)> : public internal::CallbackBase {
+ public:
+  typedef R(RunType)(A1, A2, A3);
+
+  Callback() : CallbackBase(NULL) { }
+
+  // Note that this constructor CANNOT be explicit, and that Bind() CANNOT
+  // return the exact Callback<> type.  See base/bind.h for details.
+  template <typename Runnable, typename BindRunType, typename BoundArgsType>
+  Callback(internal::BindState<Runnable, BindRunType,
+           BoundArgsType>* bind_state)
+      : CallbackBase(bind_state) {
+
+    // Force the assignment to a local variable of PolymorphicInvoke
+    // so the compiler will typecheck that the passed in Run() method has
+    // the correct type.
+    PolymorphicInvoke invoke_func =
+        &internal::BindState<Runnable, BindRunType, BoundArgsType>
+            ::InvokerType::Run;
+    polymorphic_invoke_ = reinterpret_cast<InvokeFuncStorage>(invoke_func);
+  }
+
+  bool Equals(const Callback& other) const {
+    return CallbackBase::Equals(other);
+  }
+
+  R Run(typename internal::CallbackParamTraits<A1>::ForwardType a1,
+        typename internal::CallbackParamTraits<A2>::ForwardType a2,
+        typename internal::CallbackParamTraits<A3>::ForwardType a3) const {
+    PolymorphicInvoke f =
+        reinterpret_cast<PolymorphicInvoke>(polymorphic_invoke_);
+
+    return f(bind_state_.get(), internal::CallbackForward(a1),
+             internal::CallbackForward(a2),
+             internal::CallbackForward(a3));
+  }
+
+ private:
+  typedef R(*PolymorphicInvoke)(
+      internal::BindStateBase*,
+          typename internal::CallbackParamTraits<A1>::ForwardType,
+          typename internal::CallbackParamTraits<A2>::ForwardType,
+          typename internal::CallbackParamTraits<A3>::ForwardType);
+
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4>
+class Callback<R(A1, A2, A3, A4)> : public internal::CallbackBase {
+ public:
+  typedef R(RunType)(A1, A2, A3, A4);
+
+  Callback() : CallbackBase(NULL) { }
+
+  // Note that this constructor CANNOT be explicit, and that Bind() CANNOT
+  // return the exact Callback<> type.  See base/bind.h for details.
+  template <typename Runnable, typename BindRunType, typename BoundArgsType>
+  Callback(internal::BindState<Runnable, BindRunType,
+           BoundArgsType>* bind_state)
+      : CallbackBase(bind_state) {
+
+    // Force the assignment to a local variable of PolymorphicInvoke
+    // so the compiler will typecheck that the passed in Run() method has
+    // the correct type.
+    PolymorphicInvoke invoke_func =
+        &internal::BindState<Runnable, BindRunType, BoundArgsType>
+            ::InvokerType::Run;
+    polymorphic_invoke_ = reinterpret_cast<InvokeFuncStorage>(invoke_func);
+  }
+
+  bool Equals(const Callback& other) const {
+    return CallbackBase::Equals(other);
+  }
+
+  R Run(typename internal::CallbackParamTraits<A1>::ForwardType a1,
+        typename internal::CallbackParamTraits<A2>::ForwardType a2,
+        typename internal::CallbackParamTraits<A3>::ForwardType a3,
+        typename internal::CallbackParamTraits<A4>::ForwardType a4) const {
+    PolymorphicInvoke f =
+        reinterpret_cast<PolymorphicInvoke>(polymorphic_invoke_);
+
+    return f(bind_state_.get(), internal::CallbackForward(a1),
+             internal::CallbackForward(a2),
+             internal::CallbackForward(a3),
+             internal::CallbackForward(a4));
+  }
+
+ private:
+  typedef R(*PolymorphicInvoke)(
+      internal::BindStateBase*,
+          typename internal::CallbackParamTraits<A1>::ForwardType,
+          typename internal::CallbackParamTraits<A2>::ForwardType,
+          typename internal::CallbackParamTraits<A3>::ForwardType,
+          typename internal::CallbackParamTraits<A4>::ForwardType);
+
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5>
+class Callback<R(A1, A2, A3, A4, A5)> : public internal::CallbackBase {
+ public:
+  typedef R(RunType)(A1, A2, A3, A4, A5);
+
+  Callback() : CallbackBase(NULL) { }
+
+  // Note that this constructor CANNOT be explicit, and that Bind() CANNOT
+  // return the exact Callback<> type.  See base/bind.h for details.
+  template <typename Runnable, typename BindRunType, typename BoundArgsType>
+  Callback(internal::BindState<Runnable, BindRunType,
+           BoundArgsType>* bind_state)
+      : CallbackBase(bind_state) {
+
+    // Force the assignment to a local variable of PolymorphicInvoke
+    // so the compiler will typecheck that the passed in Run() method has
+    // the correct type.
+    PolymorphicInvoke invoke_func =
+        &internal::BindState<Runnable, BindRunType, BoundArgsType>
+            ::InvokerType::Run;
+    polymorphic_invoke_ = reinterpret_cast<InvokeFuncStorage>(invoke_func);
+  }
+
+  bool Equals(const Callback& other) const {
+    return CallbackBase::Equals(other);
+  }
+
+  R Run(typename internal::CallbackParamTraits<A1>::ForwardType a1,
+        typename internal::CallbackParamTraits<A2>::ForwardType a2,
+        typename internal::CallbackParamTraits<A3>::ForwardType a3,
+        typename internal::CallbackParamTraits<A4>::ForwardType a4,
+        typename internal::CallbackParamTraits<A5>::ForwardType a5) const {
+    PolymorphicInvoke f =
+        reinterpret_cast<PolymorphicInvoke>(polymorphic_invoke_);
+
+    return f(bind_state_.get(), internal::CallbackForward(a1),
+             internal::CallbackForward(a2),
+             internal::CallbackForward(a3),
+             internal::CallbackForward(a4),
+             internal::CallbackForward(a5));
+  }
+
+ private:
+  typedef R(*PolymorphicInvoke)(
+      internal::BindStateBase*,
+          typename internal::CallbackParamTraits<A1>::ForwardType,
+          typename internal::CallbackParamTraits<A2>::ForwardType,
+          typename internal::CallbackParamTraits<A3>::ForwardType,
+          typename internal::CallbackParamTraits<A4>::ForwardType,
+          typename internal::CallbackParamTraits<A5>::ForwardType);
+
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6>
+class Callback<R(A1, A2, A3, A4, A5, A6)> : public internal::CallbackBase {
+ public:
+  typedef R(RunType)(A1, A2, A3, A4, A5, A6);
+
+  Callback() : CallbackBase(NULL) { }
+
+  // Note that this constructor CANNOT be explicit, and that Bind() CANNOT
+  // return the exact Callback<> type.  See base/bind.h for details.
+  template <typename Runnable, typename BindRunType, typename BoundArgsType>
+  Callback(internal::BindState<Runnable, BindRunType,
+           BoundArgsType>* bind_state)
+      : CallbackBase(bind_state) {
+
+    // Force the assignment to a local variable of PolymorphicInvoke
+    // so the compiler will typecheck that the passed in Run() method has
+    // the correct type.
+    PolymorphicInvoke invoke_func =
+        &internal::BindState<Runnable, BindRunType, BoundArgsType>
+            ::InvokerType::Run;
+    polymorphic_invoke_ = reinterpret_cast<InvokeFuncStorage>(invoke_func);
+  }
+
+  bool Equals(const Callback& other) const {
+    return CallbackBase::Equals(other);
+  }
+
+  R Run(typename internal::CallbackParamTraits<A1>::ForwardType a1,
+        typename internal::CallbackParamTraits<A2>::ForwardType a2,
+        typename internal::CallbackParamTraits<A3>::ForwardType a3,
+        typename internal::CallbackParamTraits<A4>::ForwardType a4,
+        typename internal::CallbackParamTraits<A5>::ForwardType a5,
+        typename internal::CallbackParamTraits<A6>::ForwardType a6) const {
+    PolymorphicInvoke f =
+        reinterpret_cast<PolymorphicInvoke>(polymorphic_invoke_);
+
+    return f(bind_state_.get(), internal::CallbackForward(a1),
+             internal::CallbackForward(a2),
+             internal::CallbackForward(a3),
+             internal::CallbackForward(a4),
+             internal::CallbackForward(a5),
+             internal::CallbackForward(a6));
+  }
+
+ private:
+  typedef R(*PolymorphicInvoke)(
+      internal::BindStateBase*,
+          typename internal::CallbackParamTraits<A1>::ForwardType,
+          typename internal::CallbackParamTraits<A2>::ForwardType,
+          typename internal::CallbackParamTraits<A3>::ForwardType,
+          typename internal::CallbackParamTraits<A4>::ForwardType,
+          typename internal::CallbackParamTraits<A5>::ForwardType,
+          typename internal::CallbackParamTraits<A6>::ForwardType);
+
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7>
+class Callback<R(A1, A2, A3, A4, A5, A6, A7)> : public internal::CallbackBase {
+ public:
+  typedef R(RunType)(A1, A2, A3, A4, A5, A6, A7);
+
+  Callback() : CallbackBase(NULL) { }
+
+  // Note that this constructor CANNOT be explicit, and that Bind() CANNOT
+  // return the exact Callback<> type.  See base/bind.h for details.
+  template <typename Runnable, typename BindRunType, typename BoundArgsType>
+  Callback(internal::BindState<Runnable, BindRunType,
+           BoundArgsType>* bind_state)
+      : CallbackBase(bind_state) {
+
+    // Force the assignment to a local variable of PolymorphicInvoke
+    // so the compiler will typecheck that the passed in Run() method has
+    // the correct type.
+    PolymorphicInvoke invoke_func =
+        &internal::BindState<Runnable, BindRunType, BoundArgsType>
+            ::InvokerType::Run;
+    polymorphic_invoke_ = reinterpret_cast<InvokeFuncStorage>(invoke_func);
+  }
+
+  bool Equals(const Callback& other) const {
+    return CallbackBase::Equals(other);
+  }
+
+  R Run(typename internal::CallbackParamTraits<A1>::ForwardType a1,
+        typename internal::CallbackParamTraits<A2>::ForwardType a2,
+        typename internal::CallbackParamTraits<A3>::ForwardType a3,
+        typename internal::CallbackParamTraits<A4>::ForwardType a4,
+        typename internal::CallbackParamTraits<A5>::ForwardType a5,
+        typename internal::CallbackParamTraits<A6>::ForwardType a6,
+        typename internal::CallbackParamTraits<A7>::ForwardType a7) const {
+    PolymorphicInvoke f =
+        reinterpret_cast<PolymorphicInvoke>(polymorphic_invoke_);
+
+    return f(bind_state_.get(), internal::CallbackForward(a1),
+             internal::CallbackForward(a2),
+             internal::CallbackForward(a3),
+             internal::CallbackForward(a4),
+             internal::CallbackForward(a5),
+             internal::CallbackForward(a6),
+             internal::CallbackForward(a7));
+  }
+
+ private:
+  typedef R(*PolymorphicInvoke)(
+      internal::BindStateBase*,
+          typename internal::CallbackParamTraits<A1>::ForwardType,
+          typename internal::CallbackParamTraits<A2>::ForwardType,
+          typename internal::CallbackParamTraits<A3>::ForwardType,
+          typename internal::CallbackParamTraits<A4>::ForwardType,
+          typename internal::CallbackParamTraits<A5>::ForwardType,
+          typename internal::CallbackParamTraits<A6>::ForwardType,
+          typename internal::CallbackParamTraits<A7>::ForwardType);
+
+};
+
+
+// Syntactic sugar to make Callbacks<void(void)> easier to declare since it
+// will be used in a lot of APIs with delayed execution.
+typedef Callback<void(void)> Closure;
+
+}  // namespace base
+
+#endif  // BASE_CALLBACK_H

+ 17 - 0
base/callback_forward.h

@@ -0,0 +1,17 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CALLBACK_FORWARD_H_
+#define BASE_CALLBACK_FORWARD_H_
+
+namespace base {
+
+template <typename Sig>
+class Callback;
+
+typedef Callback<void(void)> Closure;
+
+}  // namespace base
+
+#endif  // BASE_CALLBACK_FORWARD_H

+ 42 - 0
base/callback_helpers.cc

@@ -0,0 +1,42 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/callback_helpers.h"
+
+#include "base/callback.h"
+
+namespace base {
+
+ScopedClosureRunner::ScopedClosureRunner() {
+}
+
+ScopedClosureRunner::ScopedClosureRunner(const Closure& closure)
+    : closure_(closure) {
+}
+
+ScopedClosureRunner::~ScopedClosureRunner() {
+  if (!closure_.is_null())
+    closure_.Run();
+}
+
+void ScopedClosureRunner::Reset() {
+  Closure old_closure = Release();
+  if (!old_closure.is_null())
+    old_closure.Run();
+}
+
+void ScopedClosureRunner::Reset(const Closure& closure) {
+  Closure old_closure = Release();
+  closure_ = closure;
+  if (!old_closure.is_null())
+    old_closure.Run();
+}
+
+Closure ScopedClosureRunner::Release() {
+  Closure result = closure_;
+  closure_.Reset();
+  return result;
+}
+
+}  // namespace base

+ 50 - 0
base/callback_helpers.h

@@ -0,0 +1,50 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This defines helpful methods for dealing with Callbacks.  Because Callbacks
+// are implemented using templates, with a class per callback signature, adding
+// methods to Callback<> itself is unattractive (lots of extra code gets
+// generated).  Instead, consider adding methods here.
+//
+// ResetAndReturn(&cb) is like cb.Reset() but allows executing a callback (via a
+// copy) after the original callback is Reset().  This can be handy if Run()
+// reads/writes the variable holding the Callback.
+
+#ifndef BASE_CALLBACK_HELPERS_H_
+#define BASE_CALLBACK_HELPERS_H_
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+
+namespace base {
+
+template <typename Sig>
+base::Callback<Sig> ResetAndReturn(base::Callback<Sig>* cb) {
+  base::Callback<Sig> ret(*cb);
+  cb->Reset();
+  return ret;
+}
+
+// ScopedClosureRunner is akin to scoped_ptr for Closures. It ensures that the
+// Closure is executed and deleted no matter how the current scope exits.
+class BASE_EXPORT ScopedClosureRunner {
+ public:
+  ScopedClosureRunner();
+  explicit ScopedClosureRunner(const Closure& closure);
+  ~ScopedClosureRunner();
+
+  void Reset();
+  void Reset(const Closure& closure);
+  Closure Release() WARN_UNUSED_RESULT;
+
+ private:
+  Closure closure_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedClosureRunner);
+};
+
+}  // namespace base
+
+#endif  // BASE_CALLBACK_HELPERS_H_

+ 38 - 0
base/callback_internal.cc

@@ -0,0 +1,38 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/callback_internal.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace internal {
+
+bool CallbackBase::is_null() const {
+  return bind_state_.get() == NULL;
+}
+
+void CallbackBase::Reset() {
+  polymorphic_invoke_ = NULL;
+  // NULL the bind_state_ last, since it may be holding the last ref to whatever
+  // object owns us, and we may be deleted after that.
+  bind_state_ = NULL;
+}
+
+bool CallbackBase::Equals(const CallbackBase& other) const {
+  return bind_state_.get() == other.bind_state_.get() &&
+         polymorphic_invoke_ == other.polymorphic_invoke_;
+}
+
+CallbackBase::CallbackBase(BindStateBase* bind_state)
+    : bind_state_(bind_state),
+      polymorphic_invoke_(NULL) {
+  DCHECK(!bind_state_.get() || bind_state_->HasOneRef());
+}
+
+CallbackBase::~CallbackBase() {
+}
+
+}  // namespace internal
+}  // namespace base

+ 178 - 0
base/callback_internal.h

@@ -0,0 +1,178 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains utility functions and classes that help the
+// implementation, and management of the Callback objects.
+
+#ifndef BASE_CALLBACK_INTERNAL_H_
+#define BASE_CALLBACK_INTERNAL_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+
+template <typename T>
+class ScopedVector;
+
+namespace base {
+namespace internal {
+
+// BindStateBase is used to provide an opaque handle that the Callback
+// class can use to represent a function object with bound arguments.  It
+// behaves as an existential type that is used by a corresponding
+// DoInvoke function to perform the function execution.  This allows
+// us to shield the Callback class from the types of the bound argument via
+// "type erasure."
+class BindStateBase : public RefCountedThreadSafe<BindStateBase> {
+ protected:
+  friend class RefCountedThreadSafe<BindStateBase>;
+  virtual ~BindStateBase() {}
+};
+
+// Holds the Callback methods that don't require specialization to reduce
+// template bloat.
+class BASE_EXPORT CallbackBase {
+ public:
+  // Returns true if Callback is null (doesn't refer to anything).
+  bool is_null() const;
+
+  // Returns the Callback into an uninitialized state.
+  void Reset();
+
+ protected:
+  // In C++, it is safe to cast function pointers to function pointers of
+  // another type. It is not okay to use void*. We create a InvokeFuncStorage
+  // that that can store our function pointer, and then cast it back to
+  // the original type on usage.
+  typedef void(*InvokeFuncStorage)(void);
+
+  // Returns true if this callback equals |other|. |other| may be null.
+  bool Equals(const CallbackBase& other) const;
+
+  // Allow initializing of |bind_state_| via the constructor to avoid default
+  // initialization of the scoped_refptr.  We do not also initialize
+  // |polymorphic_invoke_| here because doing a normal assignment in the
+  // derived Callback templates makes for much nicer compiler errors.
+  explicit CallbackBase(BindStateBase* bind_state);
+
+  // Force the destructor to be instantiated inside this translation unit so
+  // that our subclasses will not get inlined versions.  Avoids more template
+  // bloat.
+  ~CallbackBase();
+
+  scoped_refptr<BindStateBase> bind_state_;
+  InvokeFuncStorage polymorphic_invoke_;
+};
+
+// A helper template to determine if given type is non-const move-only-type,
+// i.e. if a value of the given type should be passed via .Pass() in a
+// destructive way.
+template <typename T> struct IsMoveOnlyType {
+  template <typename U>
+  static YesType Test(const typename U::MoveOnlyTypeForCPP03*);
+
+  template <typename U>
+  static NoType Test(...);
+
+  static const bool value = sizeof(Test<T>(0)) == sizeof(YesType) &&
+                            !is_const<T>::value;
+};
+
+// This is a typetraits object that's used to take an argument type, and
+// extract a suitable type for storing and forwarding arguments.
+//
+// In particular, it strips off references, and converts arrays to
+// pointers for storage; and it avoids accidentally trying to create a
+// "reference of a reference" if the argument is a reference type.
+//
+// This array type becomes an issue for storage because we are passing bound
+// parameters by const reference. In this case, we end up passing an actual
+// array type in the initializer list which C++ does not allow.  This will
+// break passing of C-string literals.
+template <typename T, bool is_move_only = IsMoveOnlyType<T>::value>
+struct CallbackParamTraits {
+  typedef const T& ForwardType;
+  typedef T StorageType;
+};
+
+// The Storage should almost be impossible to trigger unless someone manually
+// specifies type of the bind parameters.  However, in case they do,
+// this will guard against us accidentally storing a reference parameter.
+//
+// The ForwardType should only be used for unbound arguments.
+template <typename T>
+struct CallbackParamTraits<T&, false> {
+  typedef T& ForwardType;
+  typedef T StorageType;
+};
+
+// Note that for array types, we implicitly add a const in the conversion. This
+// means that it is not possible to bind array arguments to functions that take
+// a non-const pointer. Trying to specialize the template based on a "const
+// T[n]" does not seem to match correctly, so we are stuck with this
+// restriction.
+template <typename T, size_t n>
+struct CallbackParamTraits<T[n], false> {
+  typedef const T* ForwardType;
+  typedef const T* StorageType;
+};
+
+// See comment for CallbackParamTraits<T[n]>.
+template <typename T>
+struct CallbackParamTraits<T[], false> {
+  typedef const T* ForwardType;
+  typedef const T* StorageType;
+};
+
+// Parameter traits for movable-but-not-copyable scopers.
+//
+// Callback<>/Bind() understands movable-but-not-copyable semantics where
+// the type cannot be copied but can still have its state destructively
+// transferred (aka. moved) to another instance of the same type by calling a
+// helper function.  When used with Bind(), this signifies transferal of the
+// object's state to the target function.
+//
+// For these types, the ForwardType must not be a const reference, or a
+// reference.  A const reference is inappropriate, and would break const
+// correctness, because we are implementing a destructive move.  A non-const
+// reference cannot be used with temporaries which means the result of a
+// function or a cast would not be usable with Callback<> or Bind().
+template <typename T>
+struct CallbackParamTraits<T, true> {
+  typedef T ForwardType;
+  typedef T StorageType;
+};
+
+// CallbackForward() is a very limited simulation of C++11's std::forward()
+// used by the Callback/Bind system for a set of movable-but-not-copyable
+// types.  It is needed because forwarding a movable-but-not-copyable
+// argument to another function requires us to invoke the proper move
+// operator to create a rvalue version of the type.  The supported types are
+// whitelisted below as overloads of the CallbackForward() function. The
+// default template compiles out to be a no-op.
+//
+// In C++11, std::forward would replace all uses of this function.  However, it
+// is impossible to implement a general std::forward with C++11 due to a lack
+// of rvalue references.
+//
+// In addition to Callback/Bind, this is used by PostTaskAndReplyWithResult to
+// simulate std::forward() and forward the result of one Callback as a
+// parameter to another callback. This is to support Callbacks that return
+// the movable-but-not-copyable types whitelisted above.
+template <typename T>
+typename enable_if<!IsMoveOnlyType<T>::value, T>::type& CallbackForward(T& t) {
+  return t;
+}
+
+template <typename T>
+typename enable_if<IsMoveOnlyType<T>::value, T>::type CallbackForward(T& t) {
+  return t.Pass();
+}
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_CALLBACK_INTERNAL_H_

+ 406 - 0
base/callback_list.h

@@ -0,0 +1,406 @@
+// This file was GENERATED by command:
+//     pump.py callback_list.h.pump
+// DO NOT EDIT BY HAND!!!
+
+
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CALLBACK_LIST_H_
+#define BASE_CALLBACK_LIST_H_
+
+#include <list>
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/callback_internal.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+
+// OVERVIEW:
+//
+// A container for a list of callbacks.  Unlike a normal STL vector or list,
+// this container can be modified during iteration without invalidating the
+// iterator. It safely handles the case of a callback removing itself
+// or another callback from the list while callbacks are being run.
+//
+// TYPICAL USAGE:
+//
+// class MyWidget {
+//  public:
+//   ...
+//
+//   typedef base::Callback<void(const Foo&)> OnFooCallback;
+//
+//   scoped_ptr<base::CallbackList<void(const Foo&)>::Subscription>
+//   RegisterCallback(const OnFooCallback& cb) {
+//     return callback_list_.Add(cb);
+//   }
+//
+//  private:
+//   void NotifyFoo(const Foo& foo) {
+//      callback_list_.Notify(foo);
+//   }
+//
+//   base::CallbackList<void(const Foo&)> callback_list_;
+//
+//   DISALLOW_COPY_AND_ASSIGN(MyWidget);
+// };
+//
+//
+// class MyWidgetListener {
+//  public:
+//   MyWidgetListener::MyWidgetListener() {
+//     foo_subscription_ = MyWidget::GetCurrent()->RegisterCallback(
+//             base::Bind(&MyWidgetListener::OnFoo, this)));
+//   }
+//
+//   MyWidgetListener::~MyWidgetListener() {
+//      // Subscription gets deleted automatically and will deregister
+//      // the callback in the process.
+//   }
+//
+//  private:
+//   void OnFoo(const Foo& foo) {
+//     // Do something.
+//   }
+//
+//   scoped_ptr<base::CallbackList<void(const Foo&)>::Subscription>
+//       foo_subscription_;
+//
+//   DISALLOW_COPY_AND_ASSIGN(MyWidgetListener);
+// };
+
+namespace base {
+
+namespace internal {
+
+template <typename CallbackType>
+class CallbackListBase {
+ public:
+  class Subscription {
+   public:
+    Subscription(CallbackListBase<CallbackType>* list,
+                 typename std::list<CallbackType>::iterator iter)
+        : list_(list),
+          iter_(iter) {
+    }
+
+    ~Subscription() {
+      if (list_->active_iterator_count_) {
+        iter_->Reset();
+      } else {
+        list_->callbacks_.erase(iter_);
+        if (!list_->removal_callback_.is_null())
+          list_->removal_callback_.Run();
+      }
+    }
+
+   private:
+    CallbackListBase<CallbackType>* list_;
+    typename std::list<CallbackType>::iterator iter_;
+
+    DISALLOW_COPY_AND_ASSIGN(Subscription);
+  };
+
+  // Add a callback to the list. The callback will remain registered until the
+  // returned Subscription is destroyed, which must occur before the
+  // CallbackList is destroyed.
+  scoped_ptr<Subscription> Add(const CallbackType& cb) WARN_UNUSED_RESULT {
+    DCHECK(!cb.is_null());
+    return scoped_ptr<Subscription>(
+        new Subscription(this, callbacks_.insert(callbacks_.end(), cb)));
+  }
+
+  // Sets a callback which will be run when a subscription list is changed.
+  void set_removal_callback(const Closure& callback) {
+    removal_callback_ = callback;
+  }
+
+  // Returns true if there are no subscriptions. This is only valid to call when
+  // not looping through the list.
+  bool empty() {
+    DCHECK_EQ(0, active_iterator_count_);
+    return callbacks_.empty();
+  }
+
+ protected:
+  // An iterator class that can be used to access the list of callbacks.
+  class Iterator {
+   public:
+    explicit Iterator(CallbackListBase<CallbackType>* list)
+        : list_(list),
+          list_iter_(list_->callbacks_.begin()) {
+      ++list_->active_iterator_count_;
+    }
+
+    Iterator(const Iterator& iter)
+        : list_(iter.list_),
+          list_iter_(iter.list_iter_) {
+      ++list_->active_iterator_count_;
+    }
+
+    ~Iterator() {
+      if (list_ && --list_->active_iterator_count_ == 0) {
+        list_->Compact();
+      }
+    }
+
+    CallbackType* GetNext() {
+      while ((list_iter_ != list_->callbacks_.end()) && list_iter_->is_null())
+        ++list_iter_;
+
+      CallbackType* cb = NULL;
+      if (list_iter_ != list_->callbacks_.end()) {
+        cb = &(*list_iter_);
+        ++list_iter_;
+      }
+      return cb;
+    }
+
+   private:
+    CallbackListBase<CallbackType>* list_;
+    typename std::list<CallbackType>::iterator list_iter_;
+  };
+
+  CallbackListBase() : active_iterator_count_(0) {}
+
+  ~CallbackListBase() {
+    DCHECK_EQ(0, active_iterator_count_);
+    DCHECK_EQ(0U, callbacks_.size());
+  }
+
+  // Returns an instance of a CallbackListBase::Iterator which can be used
+  // to run callbacks.
+  Iterator GetIterator() {
+    return Iterator(this);
+  }
+
+  // Compact the list: remove any entries which were NULLed out during
+  // iteration.
+  void Compact() {
+    typename std::list<CallbackType>::iterator it = callbacks_.begin();
+    bool updated = false;
+    while (it != callbacks_.end()) {
+      if ((*it).is_null()) {
+        updated = true;
+        it = callbacks_.erase(it);
+      } else {
+        ++it;
+      }
+
+      if (updated && !removal_callback_.is_null())
+        removal_callback_.Run();
+    }
+  }
+
+ private:
+  std::list<CallbackType> callbacks_;
+  int active_iterator_count_;
+  Closure removal_callback_;
+
+  DISALLOW_COPY_AND_ASSIGN(CallbackListBase);
+};
+
+}  // namespace internal
+
+template <typename Sig> class CallbackList;
+
+template <>
+class CallbackList<void(void)>
+    : public internal::CallbackListBase<Callback<void(void)> > {
+ public:
+  typedef Callback<void(void)> CallbackType;
+
+  CallbackList() {}
+
+  void Notify() {
+    internal::CallbackListBase<CallbackType>::Iterator it =
+        this->GetIterator();
+    CallbackType* cb;
+    while ((cb = it.GetNext()) != NULL) {
+      cb->Run();
+    }
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(CallbackList);
+};
+
+template <typename A1>
+class CallbackList<void(A1)>
+    : public internal::CallbackListBase<Callback<void(A1)> > {
+ public:
+  typedef Callback<void(A1)> CallbackType;
+
+  CallbackList() {}
+
+  void Notify(typename internal::CallbackParamTraits<A1>::ForwardType a1) {
+    typename internal::CallbackListBase<CallbackType>::Iterator it =
+        this->GetIterator();
+    CallbackType* cb;
+    while ((cb = it.GetNext()) != NULL) {
+      cb->Run(a1);
+    }
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(CallbackList);
+};
+
+template <typename A1, typename A2>
+class CallbackList<void(A1, A2)>
+    : public internal::CallbackListBase<Callback<void(A1, A2)> > {
+ public:
+  typedef Callback<void(A1, A2)> CallbackType;
+
+  CallbackList() {}
+
+  void Notify(typename internal::CallbackParamTraits<A1>::ForwardType a1,
+              typename internal::CallbackParamTraits<A2>::ForwardType a2) {
+    typename internal::CallbackListBase<CallbackType>::Iterator it =
+        this->GetIterator();
+    CallbackType* cb;
+    while ((cb = it.GetNext()) != NULL) {
+      cb->Run(a1, a2);
+    }
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(CallbackList);
+};
+
+template <typename A1, typename A2, typename A3>
+class CallbackList<void(A1, A2, A3)>
+    : public internal::CallbackListBase<Callback<void(A1, A2, A3)> > {
+ public:
+  typedef Callback<void(A1, A2, A3)> CallbackType;
+
+  CallbackList() {}
+
+  void Notify(typename internal::CallbackParamTraits<A1>::ForwardType a1,
+              typename internal::CallbackParamTraits<A2>::ForwardType a2,
+              typename internal::CallbackParamTraits<A3>::ForwardType a3) {
+    typename internal::CallbackListBase<CallbackType>::Iterator it =
+        this->GetIterator();
+    CallbackType* cb;
+    while ((cb = it.GetNext()) != NULL) {
+      cb->Run(a1, a2, a3);
+    }
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(CallbackList);
+};
+
+template <typename A1, typename A2, typename A3, typename A4>
+class CallbackList<void(A1, A2, A3, A4)>
+    : public internal::CallbackListBase<Callback<void(A1, A2, A3, A4)> > {
+ public:
+  typedef Callback<void(A1, A2, A3, A4)> CallbackType;
+
+  CallbackList() {}
+
+  void Notify(typename internal::CallbackParamTraits<A1>::ForwardType a1,
+              typename internal::CallbackParamTraits<A2>::ForwardType a2,
+              typename internal::CallbackParamTraits<A3>::ForwardType a3,
+              typename internal::CallbackParamTraits<A4>::ForwardType a4) {
+    typename internal::CallbackListBase<CallbackType>::Iterator it =
+        this->GetIterator();
+    CallbackType* cb;
+    while ((cb = it.GetNext()) != NULL) {
+      cb->Run(a1, a2, a3, a4);
+    }
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(CallbackList);
+};
+
+template <typename A1, typename A2, typename A3, typename A4, typename A5>
+class CallbackList<void(A1, A2, A3, A4, A5)>
+    : public internal::CallbackListBase<Callback<void(A1, A2, A3, A4, A5)> > {
+ public:
+  typedef Callback<void(A1, A2, A3, A4, A5)> CallbackType;
+
+  CallbackList() {}
+
+  void Notify(typename internal::CallbackParamTraits<A1>::ForwardType a1,
+              typename internal::CallbackParamTraits<A2>::ForwardType a2,
+              typename internal::CallbackParamTraits<A3>::ForwardType a3,
+              typename internal::CallbackParamTraits<A4>::ForwardType a4,
+              typename internal::CallbackParamTraits<A5>::ForwardType a5) {
+    typename internal::CallbackListBase<CallbackType>::Iterator it =
+        this->GetIterator();
+    CallbackType* cb;
+    while ((cb = it.GetNext()) != NULL) {
+      cb->Run(a1, a2, a3, a4, a5);
+    }
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(CallbackList);
+};
+
+template <typename A1, typename A2, typename A3, typename A4, typename A5,
+    typename A6>
+class CallbackList<void(A1, A2, A3, A4, A5, A6)>
+    : public internal::CallbackListBase<Callback<void(A1, A2, A3, A4, A5,
+        A6)> > {
+ public:
+  typedef Callback<void(A1, A2, A3, A4, A5, A6)> CallbackType;
+
+  CallbackList() {}
+
+  void Notify(typename internal::CallbackParamTraits<A1>::ForwardType a1,
+              typename internal::CallbackParamTraits<A2>::ForwardType a2,
+              typename internal::CallbackParamTraits<A3>::ForwardType a3,
+              typename internal::CallbackParamTraits<A4>::ForwardType a4,
+              typename internal::CallbackParamTraits<A5>::ForwardType a5,
+              typename internal::CallbackParamTraits<A6>::ForwardType a6) {
+    typename internal::CallbackListBase<CallbackType>::Iterator it =
+        this->GetIterator();
+    CallbackType* cb;
+    while ((cb = it.GetNext()) != NULL) {
+      cb->Run(a1, a2, a3, a4, a5, a6);
+    }
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(CallbackList);
+};
+
+template <typename A1, typename A2, typename A3, typename A4, typename A5,
+    typename A6, typename A7>
+class CallbackList<void(A1, A2, A3, A4, A5, A6, A7)>
+    : public internal::CallbackListBase<Callback<void(A1, A2, A3, A4, A5, A6,
+        A7)> > {
+ public:
+  typedef Callback<void(A1, A2, A3, A4, A5, A6, A7)> CallbackType;
+
+  CallbackList() {}
+
+  void Notify(typename internal::CallbackParamTraits<A1>::ForwardType a1,
+              typename internal::CallbackParamTraits<A2>::ForwardType a2,
+              typename internal::CallbackParamTraits<A3>::ForwardType a3,
+              typename internal::CallbackParamTraits<A4>::ForwardType a4,
+              typename internal::CallbackParamTraits<A5>::ForwardType a5,
+              typename internal::CallbackParamTraits<A6>::ForwardType a6,
+              typename internal::CallbackParamTraits<A7>::ForwardType a7) {
+    typename internal::CallbackListBase<CallbackType>::Iterator it =
+        this->GetIterator();
+    CallbackType* cb;
+    while ((cb = it.GetNext()) != NULL) {
+      cb->Run(a1, a2, a3, a4, a5, a6, a7);
+    }
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(CallbackList);
+};
+
+}  // namespace base
+
+#endif  // BASE_CALLBACK_LIST_H_

+ 272 - 0
base/cancelable_callback.h

@@ -0,0 +1,272 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// CancelableCallback is a wrapper around base::Callback that allows
+// cancellation of a callback. CancelableCallback takes a reference on the
+// wrapped callback until this object is destroyed or Reset()/Cancel() are
+// called.
+//
+// NOTE:
+//
+// Calling CancelableCallback::Cancel() brings the object back to its natural,
+// default-constructed state, i.e., CancelableCallback::callback() will return
+// a null callback.
+//
+// THREAD-SAFETY:
+//
+// CancelableCallback objects must be created on, posted to, cancelled on, and
+// destroyed on the same thread.
+//
+//
+// EXAMPLE USAGE:
+//
+// In the following example, the test is verifying that RunIntensiveTest()
+// Quit()s the message loop within 4 seconds. The cancelable callback is posted
+// to the message loop, the intensive test runs, the message loop is run,
+// then the callback is cancelled.
+//
+// void TimeoutCallback(const std::string& timeout_message) {
+//   FAIL() << timeout_message;
+//   MessageLoop::current()->QuitWhenIdle();
+// }
+//
+// CancelableClosure timeout(base::Bind(&TimeoutCallback, "Test timed out."));
+// MessageLoop::current()->PostDelayedTask(FROM_HERE, timeout.callback(),
+//                                         4000)  // 4 seconds to run.
+// RunIntensiveTest();
+// MessageLoop::current()->Run();
+// timeout.Cancel();  // Hopefully this is hit before the timeout callback runs.
+//
+
+#ifndef BASE_CANCELABLE_CALLBACK_H_
+#define BASE_CANCELABLE_CALLBACK_H_
+
+#include "base/base_export.h"
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/callback_internal.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/memory/weak_ptr.h"
+
+namespace base {
+
+template <typename Sig>
+class CancelableCallback;
+
+template <>
+class CancelableCallback<void(void)> {
+ public:
+  CancelableCallback() : weak_factory_(this) {}
+
+  // |callback| must not be null.
+  explicit CancelableCallback(const base::Callback<void(void)>& callback)
+      : weak_factory_(this),
+        callback_(callback) {
+    DCHECK(!callback.is_null());
+    InitializeForwarder();
+  }
+
+  ~CancelableCallback() {}
+
+  // Cancels and drops the reference to the wrapped callback.
+  void Cancel() {
+    weak_factory_.InvalidateWeakPtrs();
+    forwarder_.Reset();
+    callback_.Reset();
+  }
+
+  // Returns true if the wrapped callback has been cancelled.
+  bool IsCancelled() const {
+    return callback_.is_null();
+  }
+
+  // Sets |callback| as the closure that may be cancelled. |callback| may not
+  // be null. Outstanding and any previously wrapped callbacks are cancelled.
+  void Reset(const base::Callback<void(void)>& callback) {
+    DCHECK(!callback.is_null());
+
+    // Outstanding tasks (e.g., posted to a message loop) must not be called.
+    Cancel();
+
+    // |forwarder_| is no longer valid after Cancel(), so re-bind.
+    InitializeForwarder();
+
+    callback_ = callback;
+  }
+
+  // Returns a callback that can be disabled by calling Cancel().
+  const base::Callback<void(void)>& callback() const {
+    return forwarder_;
+  }
+
+ private:
+  void Forward() {
+    callback_.Run();
+  }
+
+  // Helper method to bind |forwarder_| using a weak pointer from
+  // |weak_factory_|.
+  void InitializeForwarder() {
+    forwarder_ = base::Bind(&CancelableCallback<void(void)>::Forward,
+                            weak_factory_.GetWeakPtr());
+  }
+
+  // Used to ensure Forward() is not run when this object is destroyed.
+  base::WeakPtrFactory<CancelableCallback<void(void)> > weak_factory_;
+
+  // The wrapper closure.
+  base::Callback<void(void)> forwarder_;
+
+  // The stored closure that may be cancelled.
+  base::Callback<void(void)> callback_;
+
+  DISALLOW_COPY_AND_ASSIGN(CancelableCallback);
+};
+
+template <typename A1>
+class CancelableCallback<void(A1)> {
+ public:
+  CancelableCallback() : weak_factory_(this) {}
+
+  // |callback| must not be null.
+  explicit CancelableCallback(const base::Callback<void(A1)>& callback)
+      : weak_factory_(this),
+        callback_(callback) {
+    DCHECK(!callback.is_null());
+    InitializeForwarder();
+  }
+
+  ~CancelableCallback() {}
+
+  // Cancels and drops the reference to the wrapped callback.
+  void Cancel() {
+    weak_factory_.InvalidateWeakPtrs();
+    forwarder_.Reset();
+    callback_.Reset();
+  }
+
+  // Returns true if the wrapped callback has been cancelled.
+  bool IsCancelled() const {
+    return callback_.is_null();
+  }
+
+  // Sets |callback| as the closure that may be cancelled. |callback| may not
+  // be null. Outstanding and any previously wrapped callbacks are cancelled.
+  void Reset(const base::Callback<void(A1)>& callback) {
+    DCHECK(!callback.is_null());
+
+    // Outstanding tasks (e.g., posted to a message loop) must not be called.
+    Cancel();
+
+    // |forwarder_| is no longer valid after Cancel(), so re-bind.
+    InitializeForwarder();
+
+    callback_ = callback;
+  }
+
+  // Returns a callback that can be disabled by calling Cancel().
+  const base::Callback<void(A1)>& callback() const {
+    return forwarder_;
+  }
+
+ private:
+  void Forward(A1 a1) const {
+    callback_.Run(a1);
+  }
+
+  // Helper method to bind |forwarder_| using a weak pointer from
+  // |weak_factory_|.
+  void InitializeForwarder() {
+    forwarder_ = base::Bind(&CancelableCallback<void(A1)>::Forward,
+                            weak_factory_.GetWeakPtr());
+  }
+
+  // Used to ensure Forward() is not run when this object is destroyed.
+  base::WeakPtrFactory<CancelableCallback<void(A1)> > weak_factory_;
+
+  // The wrapper closure.
+  base::Callback<void(A1)> forwarder_;
+
+  // The stored closure that may be cancelled.
+  base::Callback<void(A1)> callback_;
+
+  DISALLOW_COPY_AND_ASSIGN(CancelableCallback);
+};
+
+template <typename A1, typename A2>
+class CancelableCallback<void(A1, A2)> {
+ public:
+  CancelableCallback() : weak_factory_(this) {}
+
+  // |callback| must not be null.
+  explicit CancelableCallback(const base::Callback<void(A1, A2)>& callback)
+      : weak_factory_(this),
+        callback_(callback) {
+    DCHECK(!callback.is_null());
+    InitializeForwarder();
+  }
+
+  ~CancelableCallback() {}
+
+  // Cancels and drops the reference to the wrapped callback.
+  void Cancel() {
+    weak_factory_.InvalidateWeakPtrs();
+    forwarder_.Reset();
+    callback_.Reset();
+  }
+
+  // Returns true if the wrapped callback has been cancelled.
+  bool IsCancelled() const {
+    return callback_.is_null();
+  }
+
+  // Sets |callback| as the closure that may be cancelled. |callback| may not
+  // be null. Outstanding and any previously wrapped callbacks are cancelled.
+  void Reset(const base::Callback<void(A1, A2)>& callback) {
+    DCHECK(!callback.is_null());
+
+    // Outstanding tasks (e.g., posted to a message loop) must not be called.
+    Cancel();
+
+    // |forwarder_| is no longer valid after Cancel(), so re-bind.
+    InitializeForwarder();
+
+    callback_ = callback;
+  }
+
+  // Returns a callback that can be disabled by calling Cancel().
+  const base::Callback<void(A1, A2)>& callback() const {
+    return forwarder_;
+  }
+
+ private:
+  void Forward(A1 a1, A2 a2) const {
+    callback_.Run(a1, a2);
+  }
+
+  // Helper method to bind |forwarder_| using a weak pointer from
+  // |weak_factory_|.
+  void InitializeForwarder() {
+    forwarder_ = base::Bind(&CancelableCallback<void(A1, A2)>::Forward,
+                            weak_factory_.GetWeakPtr());
+  }
+
+  // Used to ensure Forward() is not run when this object is destroyed.
+  base::WeakPtrFactory<CancelableCallback<void(A1, A2)> > weak_factory_;
+
+  // The wrapper closure.
+  base::Callback<void(A1, A2)> forwarder_;
+
+  // The stored closure that may be cancelled.
+  base::Callback<void(A1, A2)> callback_;
+
+  DISALLOW_COPY_AND_ASSIGN(CancelableCallback);
+};
+
+typedef CancelableCallback<void(void)> CancelableClosure;
+
+}  // namespace base
+
+#endif  // BASE_CANCELABLE_CALLBACK_H_

+ 50 - 0
base/class_name.cpp

@@ -0,0 +1,50 @@
+// Copyright (c) 2011 Baidu.com, Inc. All Rights Reserved
+//
+// Implement class_name.h
+//
+// Author: Ge,Jun (gejun@baidu.com)
+// Date: Mon. Nov 7 14:47:36 CST 2011
+
+#include <cxxabi.h>                              // __cxa_demangle
+#include <string>                                // std::string
+#include <stdlib.h>                              // free()
+
+namespace base {
+
+// Try to convert mangled |name| to human-readable name.
+// Returns:
+//   |name|    -  Fail to demangle |name|
+//   otherwise -  demangled name
+std::string demangle(const char* name) {
+    // mangled_name
+    //   A NULL-terminated character string containing the name to
+    //   be demangled.
+    // output_buffer:
+    //   A region of memory, allocated with malloc, of *length bytes,
+    //   into which the demangled name is stored. If output_buffer is
+    //   not long enough, it is expanded using realloc. output_buffer
+    //   may instead be NULL; in that case, the demangled name is placed
+    //   in a region of memory allocated with malloc.
+    // length
+    //   If length is non-NULL, the length of the buffer containing the
+    //   demangled name is placed in *length.
+    // status
+    //   *status is set to one of the following values:
+    //    0: The demangling operation succeeded.
+    //   -1: A memory allocation failure occurred.
+    //   -2: mangled_name is not a valid name under the C++ ABI
+    //       mangling rules.
+    //   -3: One of the arguments is invalid.
+    int status = 0;
+    char* buf = abi::__cxa_demangle(name, NULL, NULL, &status);
+    if (status == 0) {
+        std::string s(buf);
+        free(buf);
+        return s;
+    }
+    return std::string(name);
+}
+
+}  // namespace base
+
+

+ 45 - 0
base/class_name.h

@@ -0,0 +1,45 @@
+// Copyright (c) 2011 Baidu.com, Inc. All Rights Reserved
+//
+// Get name of a class. For example, class_name<T>() returns the name of T
+// (with namespace prefixes). This is useful in template classes.
+//
+// Author: Ge,Jun (gejun@baidu.com)
+// Date: Mon. Nov 7 14:47:36 CST 2011
+
+#ifndef BRPC_BASE_CLASS_NAME_H
+#define BRPC_BASE_CLASS_NAME_H
+
+#include <typeinfo>
+#include <string>                                // std::string
+
+namespace base {
+
+std::string demangle(const char* name);
+
+namespace detail {
+template <typename T> struct ClassNameHelper { static std::string name; };
+template <typename T> std::string ClassNameHelper<T>::name = demangle(typeid(T).name());
+}
+
+// Get name of class |T|, in std::string.
+template <typename T> const std::string& class_name_str() {
+    // We don't use static-variable-inside-function because before C++11
+    // local static variable is not guaranteed to be thread-safe.
+    return detail::ClassNameHelper<T>::name;
+}
+
+// Get name of class |T|, in const char*.
+// Address of returned name never changes.
+template <typename T> const char* class_name() {
+    return class_name_str<T>().c_str();
+}
+
+// Get typename of |obj|, in std::string
+template <typename T> std::string class_name_str(T const& obj) {
+    extern std::string demangle(const char* name);
+    return demangle(typeid(obj).name());
+}
+
+}  // namespace base
+
+#endif  // BRPC_BASE_CLASS_NAME_H

+ 380 - 0
base/comlog_sink.cc

@@ -0,0 +1,380 @@
+// Copyright (c) 2015 Baidu.com, Inc. All Rights Reserved
+//
+// Redirect LOG() into comlog.
+//
+// Author: Ge,Jun (gejun@baidu.com)
+// Date: Mon Jul 20 12:39:39 CST 2015
+
+#include <com_log.h>
+#include "base/memory/singleton.h"
+#include "base/comlog_sink.h"
+#include "base/files/file_path.h"
+#include "base/fd_guard.h"
+#include "base/file_util.h"
+#include "base/endpoint.h"
+
+namespace logging {
+DECLARE_bool(log_year);
+DECLARE_bool(log_hostname);
+
+struct ComlogLayoutOptions {
+    ComlogLayoutOptions() : shorter_log_level(true) {}
+    
+    bool shorter_log_level;
+};
+
+class ComlogLayout : public comspace::Layout {
+public:
+    explicit ComlogLayout(const ComlogLayoutOptions* options);
+    ~ComlogLayout();
+    int format(comspace::Event *evt);
+private:
+    ComlogLayoutOptions _options;
+};
+
+ComlogLayout::ComlogLayout(const ComlogLayoutOptions* options) {
+    if (options) {
+        _options = *options;
+    }
+}
+
+ComlogLayout::~ComlogLayout() {
+}
+
+// Override Layout::format to have shorter prefixes. Patterns are just ignored.
+int ComlogLayout::format(comspace::Event *evt) {
+    const int bufsize = evt->_render_msgbuf_size;
+    char* const buf = evt->_render_msgbuf;
+    if (bufsize < 2){
+        return -1;
+    }
+
+    time_t t = evt->_print_time.tv_sec;
+    struct tm local_tm = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL};
+#if _MSC_VER >= 1400
+    localtime_s(&local_tm, &t);
+#else
+    localtime_r(&t, &local_tm);
+#endif
+    int len = 0;
+    if (_options.shorter_log_level) {
+        buf[len++] = *comspace::getLogName(evt->_log_level);
+    } else {
+        const char* const name = comspace::getLogName(evt->_log_level);
+        int cp_len = std::min(bufsize - len, (int)strlen(name));
+        memcpy(buf + len, name, cp_len);
+        len += cp_len;
+        if (len < bufsize - 1) {
+            buf[len++] = ' ';
+        }
+    }
+    if (len < bufsize - 1) {
+        int ret = 0;
+        if (FLAGS_log_year) {
+            ret = snprintf(buf + len, bufsize - len,
+                           "%04d%02d%02d %02d:%02d:%02d.%06d %5u ",
+                           local_tm.tm_year + 1900,
+                           local_tm.tm_mon + 1,
+                           local_tm.tm_mday,
+                           local_tm.tm_hour,
+                           local_tm.tm_min,
+                           local_tm.tm_sec,
+                           (int)evt->_print_time.tv_usec,
+                           (unsigned int)evt->_thread_id);
+        } else {
+            ret = snprintf(buf + len, bufsize - len,
+                           "%02d%02d %02d:%02d:%02d.%06d %5u ",
+                           local_tm.tm_mon + 1,
+                           local_tm.tm_mday,
+                           local_tm.tm_hour,
+                           local_tm.tm_min,
+                           local_tm.tm_sec,
+                           (int)evt->_print_time.tv_usec,
+                           (unsigned int)evt->_thread_id);
+        }
+        if (ret >= 0) {
+            len += ret;
+        } else {
+            // older glibc may return negative which means the buffer is full.
+            len = bufsize;
+        }
+    }
+    if (len > 0 && len < bufsize - 1) {  // not truncated.
+        // Although it's very stupid, we have to copy the message again due
+        // to the design of comlog.
+        int cp_len = std::min(bufsize - len, evt->_msgbuf_len);
+        memcpy(buf + len, evt->_msgbuf, cp_len);
+        len += cp_len;
+    }
+    if (len >= bufsize - 1) {
+        len = bufsize - 2;
+    }
+    buf[len++] = '\n';
+    buf[len] = 0;
+    evt->_render_msgbuf_len = len;
+    return 0;
+}
+
+ComlogSink* ComlogSink::GetInstance() {
+    return Singleton<ComlogSink, LeakySingletonTraits<ComlogSink> >::get();
+}
+
+ComlogSinkOptions::ComlogSinkOptions()
+    : async(false)
+    , shorter_log_level(true)
+    , log_dir("log")
+    , max_log_length(2048)
+    , print_vlog_as_warning(true)
+    , split_type(COMLOG_SPLIT_TRUNCT)
+    , cut_size_megabytes(2048)
+    , quota_size(0)
+    , cut_interval_minutes(60)
+    , quota_day(0)
+    , quota_hour(0)
+    , quota_min(0)
+    , enable_wf_device(false) {
+}
+
+ComlogSink::ComlogSink() 
+    : _init(false), _dev(NULL) {
+}
+
+int ComlogSink::SetupFromConfig(const std::string& conf_path_str) {
+    Unload();
+    base::FilePath path(conf_path_str);
+    if (com_loadlog(path.DirName().value().c_str(),
+                    path.BaseName().value().c_str()) != 0) {
+        LOG(ERROR) << "Fail to create ComlogSink from `" << conf_path_str << "'";
+        return -1;
+    }
+    _init = true;
+    return 0;
+}
+
+// This is definitely linux specific.
+static std::string GetProcessName() {
+    base::fd_guard fd(open("/proc/self/cmdline", O_RDONLY));
+    if (fd < 0) {
+        return "unknown";
+    }
+    char buf[512];
+    const ssize_t len = read(fd, buf, sizeof(buf) - 1);
+    if (len <= 0) {
+        return "unknown";
+    }
+    buf[len] = '\0';
+    // Not string(buf, len) because we needs to buf to be truncated at first \0.
+    // Under gdb, the first part of cmdline may include path.
+    return base::FilePath(std::string(buf)).BaseName().value();
+}
+
+int ComlogSink::SetupDevice(com_device_t* dev, const char* type, const char* file, bool is_wf) {
+    base::FilePath path(file);
+    snprintf(dev->host, sizeof(dev->host), "%s", path.DirName().value().c_str());
+    if (!is_wf) {
+        snprintf(dev->name, sizeof(dev->name), "%s_0", type);
+        COMLOG_SETSYSLOG(*dev);
+
+        //snprintf(dev->file, COM_MAXFILENAME, "%s", file);
+        snprintf(dev->file, sizeof(dev->file), "%s", path.BaseName().value().c_str());
+    } else {
+        snprintf(dev->name, sizeof(dev->name), "%s_1", type);
+        dev->log_mask = 0;
+        COMLOG_ADDMASK(*dev, COMLOG_WARNING);
+        COMLOG_ADDMASK(*dev, COMLOG_FATAL);
+
+        //snprintf(dev->file, COM_MAXFILENAME, "%s.wf", file);
+        snprintf(dev->file, sizeof(dev->file), "%s.wf", path.BaseName().value().c_str());
+    }
+    
+    snprintf(dev->type, COM_MAXAPPENDERNAME, "%s", type);
+    dev->splite_type = static_cast<int>(_options.split_type);
+    dev->log_size = _options.cut_size_megabytes; // SIZECUT precision in MB
+    dev->compress = 0;
+    dev->cuttime = _options.cut_interval_minutes; // DATECUT time precision in min
+
+    // set quota conf
+    int index = dev->reserved_num;
+    if (dev->splite_type == COMLOG_SPLIT_SIZECUT) {
+        if (_options.cut_size_megabytes <= 0) {
+            LOG(ERROR) << "Invalid ComlogSinkOptions.cut_size_megabytes="
+                       << _options.cut_size_megabytes;
+            return -1;
+        }
+        if (_options.quota_size < 0) {
+            LOG(ERROR) << "Invalid ComlogSinkOptions.quota_size="
+                       << _options.quota_size;
+            return -1;
+        }
+        snprintf(dev->reservedext[index].name, sizeof(dev->reservedext[index].name),
+                 "%s_QUOTA_SIZE", dev->name);
+        snprintf(dev->reservedext[index].value, sizeof(dev->reservedext[index].value),
+                 "%d", _options.quota_size);
+        index++;
+    } else if (dev->splite_type == COMLOG_SPLIT_DATECUT) {
+        if (_options.quota_day < 0) {
+            LOG(ERROR) << "Invalid ComlogSinkOptions.quota_day=" << _options.quota_day;
+            return -1;
+        }
+        if (_options.quota_hour < 0) {
+            LOG(ERROR) << "Invalid ComlogSinkOptions.quota_hour=" << _options.quota_hour;
+            return -1;
+        }
+        if (_options.quota_min < 0) {
+            LOG(ERROR) << "Invalid ComlogSinkOptions.quota_min=" << _options.quota_min;
+            return -1;
+        }
+        if (_options.quota_day > 0) {
+            snprintf(dev->reservedext[index].name, sizeof(dev->reservedext[index].name),
+                     "%s_QUOTA_DAY", (char*)dev->name);
+            snprintf(dev->reservedext[index].value, sizeof(dev->reservedext[index].value),
+                     "%d", _options.quota_day);
+            index++;
+        }
+        if (_options.quota_hour > 0) {
+            snprintf(dev->reservedext[index].name, sizeof(dev->reservedext[index].name),
+                     "%s_QUOTA_HOUR", (char*)dev->name);
+            snprintf(dev->reservedext[index].value, sizeof(dev->reservedext[index].value),
+                     "%d", _options.quota_hour);
+            index++;
+        }
+        if (_options.quota_min > 0) {
+            snprintf(dev->reservedext[index].name, sizeof(dev->reservedext[index].name),
+                     "%s_QUOTA_MIN", (char*)dev->name);
+            snprintf(dev->reservedext[index].value, sizeof(dev->reservedext[index].value),
+                     "%d", _options.quota_min);
+            index++;
+        }
+    }
+    dev->reserved_num = index;
+    dev->reservedconf.item = &dev->reservedext[0];
+    dev->reservedconf.num = dev->reserved_num;
+    dev->reservedconf.size = dev->reserved_num;
+
+    ComlogLayoutOptions layout_options;
+    layout_options.shorter_log_level = _options.shorter_log_level;
+    ComlogLayout* layout = new (std::nothrow) ComlogLayout(&layout_options);
+    if (layout == NULL) {
+        LOG(FATAL) << "Fail to new layout";
+        return -1;
+    }
+    dev->layout = layout;
+
+    return 0;
+}
+
+int ComlogSink::Setup(const ComlogSinkOptions* options) {
+    Unload();
+    if (options) {
+        _options = *options;
+    }
+    if (_options.max_log_length > 0) {
+        comspace::Event::setMaxLogLength(_options.max_log_length);
+    }
+    if (_options.process_name.empty()) {
+        _options.process_name = GetProcessName();
+    }
+
+    char type[COM_MAXAPPENDERNAME];
+    if (_options.async) {
+        snprintf(type, COM_MAXAPPENDERNAME, "AFILE");
+    } else {
+        snprintf(type, COM_MAXAPPENDERNAME, "FILE");
+    }
+    base::FilePath cwd;
+    if (!_options.log_dir.empty()) {
+        base::FilePath log_dir(_options.log_dir);
+        if (log_dir.IsAbsolute()) {
+            cwd = log_dir;
+        } else {
+            if (!base::GetCurrentDirectory(&cwd)) {
+                LOG(ERROR) << "Fail to get cwd";
+                return -1;
+            }
+            cwd = cwd.Append(log_dir);
+        }
+    } else {
+        if (!base::GetCurrentDirectory(&cwd)) {
+            LOG(ERROR) << "Fail to get cwd";
+            return -1;
+        }
+    }
+    base::File::Error err;
+    if (!base::CreateDirectoryAndGetError(cwd, &err)) {
+        LOG(ERROR) << "Fail to create directory, " << err;
+        return -1;
+    }
+    char file[COM_MAXFILENAME];
+    snprintf(file, COM_MAXFILENAME, "%s",
+             cwd.Append(_options.process_name + ".log").value().c_str());
+
+    int dev_num = (_options.enable_wf_device ? 2 : 1);
+    _dev = new (std::nothrow) com_device_t[dev_num];
+    if (NULL == _dev) {
+        LOG(FATAL) << "Fail to new com_device_t";
+        return -1;
+    }
+    if (0 != SetupDevice(&_dev[0], type, file, false)) {
+        LOG(ERROR) << "Fail to setup first com_device_t";
+        return -1;
+    }
+    if (dev_num == 2) {
+        if (0 != SetupDevice(&_dev[1], type, file, true)) {
+            LOG(ERROR) << "Fail to setup second com_device_t";
+            return -1;
+        }
+    }
+    if (com_openlog(_options.process_name.c_str(), _dev, dev_num, NULL) != 0) {
+        LOG(ERROR) << "Fail to com_openlog";
+        return -1;
+    }
+    _init = true;
+    return 0;
+}
+        
+void ComlogSink::Unload() {
+    if (_init) {
+        com_closelog(0);
+        _init = false;
+    }
+    if (_dev) {
+        // FIXME(gejun): Can't delete layout, somewhere in comlog may still
+        // reference the layout after com_closelog.
+        //delete _dev->layout;
+        delete [] _dev;
+        _dev = NULL;
+    }
+}
+
+ComlogSink::~ComlogSink() {
+    Unload();
+}
+
+int const comlog_levels[LOG_NUM_SEVERITIES] = {
+    COMLOG_TRACE, COMLOG_NOTICE, COMLOG_WARNING, COMLOG_FATAL, COMLOG_FATAL };
+
+bool ComlogSink::OnLogMessage(int severity, const char* file, int line,
+                              const base::StringPiece& content) {
+    // Print warning for VLOG since many online servers do not enable COMLOG_TRACE.
+    int comlog_level = 0;
+    if (severity < 0) {
+        comlog_level = _options.print_vlog_as_warning ? COMLOG_WARNING : COMLOG_TRACE;
+    } else {
+        comlog_level = comlog_levels[severity];
+    }
+    if (FLAGS_log_hostname) {
+        base::StringPiece hostname(base::my_hostname());
+        if (hostname.ends_with(".baidu.com")) { // make it shorter
+            hostname.remove_suffix(10);
+        }
+        return com_writelog(comlog_level, "%.*s %s:%d] %.*s",
+                            (int)hostname.size(), hostname.data(),
+                            file, line,
+                            (int)content.size(), content.data()) == 0;
+    }
+    // Using %.*s is faster than %s.
+    return com_writelog(comlog_level, "%s:%d] %.*s", file, line,
+                        (int)content.size(), content.data()) == 0;
+}
+
+}  // namespace logging

+ 152 - 0
base/comlog_sink.h

@@ -0,0 +1,152 @@
+// Copyright (c) 2015 Baidu.com, Inc. All Rights Reserved
+//
+// Redirect LOG() into comlog.
+//
+// Author: Ge,Jun (gejun@baidu.com)
+// Date: Mon Jul 20 12:39:39 CST 2015
+
+#ifndef BASE_COMLOG_SINK_H
+#define BASE_COMLOG_SINK_H
+
+#include "base/logging.h"
+
+struct com_device_t;
+template <typename T> struct DefaultSingletonTraits;
+
+namespace comspace {
+class Event;
+}
+
+namespace logging {
+
+enum ComlogSplitType {
+    COMLOG_SPLIT_TRUNCT = 0,
+    COMLOG_SPLIT_SIZECUT = 1,
+    COMLOG_SPLIT_DATECUT = 2,
+};
+
+// Options to setup ComlogSink.
+struct ComlogSinkOptions {
+    ComlogSinkOptions();
+
+    // true - "AFILE", false - "FILE"
+    // default: false.
+    bool async;
+
+    // Use F W N T instead of FATAL WARNING NOTICE TRACE for shorter prefixes
+    // and better alignment.
+    // default: true
+    bool shorter_log_level;
+
+    // The directory to put logs. Could be absolute or relative path.
+    // default: "log"
+    std::string log_dir;
+
+    // Name of the process. Use argv[0] when it's empty.
+    // default: ""
+    std::string process_name;
+
+    // Logs longer than this value are truncated.
+    // default: 2048
+    int max_log_length;
+
+    // Print VLOG(n) as WARNING instead of TRACE since many online servers
+    // disable TRACE logs.
+    // default: true;
+    bool print_vlog_as_warning;
+
+    // Split Comlog type:
+    //  COMLOG_SPLIT_TRUNCT: rotate the log file every 2G written.
+    //  COMLOG_SPLIT_SIZECUT: move existing logs into a separate file every xxx MB written.
+    //  COMLOG_SPLIT_DATECUT: move existing logs into a separate file periodically.
+    // default: COMLOG_SPLIT_TRUNCT
+    ComlogSplitType split_type;
+
+    // [ Effective when split_type is COMLOG_SPLIT_SIZECUT ]
+    // Move existing logs into a separate file suffixed with datetime every so many MB written.
+    // Default: 2048
+    int cut_size_megabytes;
+    // Remove oldest cutoff log files when they exceed so many megabytes(roughly)
+    // Default: 0 (unlimited)
+    int quota_size;
+
+    // [ Effective when split_type is COMLOG_SPLIT_DATECUT ]
+    // Move existing logs into a separate file suffixed with datetime every so many minutes.
+    // Example: my_app.log is moved to my_app.log.20160905113104
+    // Default: 60
+    int cut_interval_minutes;
+    // Remove cutoff log files older than so many minutes:
+    //   quota_day * 24 * 60 + quota_hour * 60 + quota_min
+    // Default: 0 (unlimited)
+    int quota_day;
+    int quota_hour;
+    int quota_min;
+
+    // Open wf appender device for WARNING/ERROR/FATAL
+    // default: false
+    bool enable_wf_device;
+};
+
+// The LogSink to flush logs into comlog. Notice that this is a singleton class.
+// [ Setup from a Configure file ]
+//   if (logging::ComlogSink::GetInstance()->SetupFromConfig("log/log.conf") != 0) {
+//       LOG(ERROR) << "Fail to setup comlog";
+//       return -1;
+//   }
+//   logging::SetLogSink(ComlogSink::GetInstance());
+//
+// [ Setup from ComlogSinkOptions ]
+//   if (logging::ComlogSink::GetInstance()->Setup(NULL/*default options*/) != 0) {
+//       LOG(ERROR) << "Fail to setup comlog";
+//       return -1;
+//   }
+//   logging::SetLogSink(ComlogSink::GetInstance());
+
+class ComlogSink : public LogSink {
+public:
+    // comlog can have only one instance due to its global open/close.
+    static ComlogSink* GetInstance();
+
+    // Setup comlog in different ways: from a Configure file or
+    // ComlogSinkOptions. Notice that setup can be done multiple times.
+    int SetupFromConfig(const std::string& conf_path);
+    int Setup(const ComlogSinkOptions* options);
+
+    // Close comlog and release resources. This method is automatically
+    // called before Setup and destruction.
+    void Unload();
+
+    // @LogSink
+    bool OnLogMessage(int severity, const char* file, int line,
+                      const base::StringPiece& content);
+private:
+    ComlogSink();
+    ~ComlogSink();
+friend struct DefaultSingletonTraits<ComlogSink>;
+    int SetupDevice(com_device_t* dev, const char* type, const char* file, bool is_wf);
+
+    bool _init;
+    ComlogSinkOptions _options;
+    com_device_t* _dev;
+};
+
+class ComlogInitializer {
+public:
+    ComlogInitializer() {
+        if (com_logstatus() != LOG_NOT_DEFINED) {
+            com_openlog_r();
+        }
+    }
+    ~ComlogInitializer() {
+        if (com_logstatus() != LOG_NOT_DEFINED) {
+            com_closelog_r();
+        }
+    }
+    
+private:
+    DISALLOW_COPY_AND_ASSIGN(ComlogInitializer);
+};
+
+}  // namespace logging
+
+#endif  // BASE_COMLOG_SINK_H

+ 442 - 0
base/command_line.cc

@@ -0,0 +1,442 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/command_line.h"
+
+#include <algorithm>
+#include <ostream>
+
+#include "base/basictypes.h"
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include <shellapi.h>
+#endif
+
+namespace base {
+
+CommandLine* CommandLine::current_process_commandline_ = NULL;
+
+namespace {
+
+const CommandLine::CharType kSwitchTerminator[] = FILE_PATH_LITERAL("--");
+const CommandLine::CharType kSwitchValueSeparator[] = FILE_PATH_LITERAL("=");
+
+// Since we use a lazy match, make sure that longer versions (like "--") are
+// listed before shorter versions (like "-") of similar prefixes.
+#if defined(OS_WIN)
+// By putting slash last, we can control whether it is treaded as a switch
+// value by changing the value of switch_prefix_count to be one less than
+// the array size.
+const CommandLine::CharType* const kSwitchPrefixes[] = {L"--", L"-", L"/"};
+#elif defined(OS_POSIX)
+// Unixes don't use slash as a switch.
+const CommandLine::CharType* const kSwitchPrefixes[] = {"--", "-"};
+#endif
+size_t switch_prefix_count = arraysize(kSwitchPrefixes);
+
+size_t GetSwitchPrefixLength(const CommandLine::StringType& string) {
+  for (size_t i = 0; i < switch_prefix_count; ++i) {
+    CommandLine::StringType prefix(kSwitchPrefixes[i]);
+    if (string.compare(0, prefix.length(), prefix) == 0)
+      return prefix.length();
+  }
+  return 0;
+}
+
+// Fills in |switch_string| and |switch_value| if |string| is a switch.
+// This will preserve the input switch prefix in the output |switch_string|.
+bool IsSwitch(const CommandLine::StringType& string,
+              CommandLine::StringType* switch_string,
+              CommandLine::StringType* switch_value) {
+  switch_string->clear();
+  switch_value->clear();
+  size_t prefix_length = GetSwitchPrefixLength(string);
+  if (prefix_length == 0 || prefix_length == string.length())
+    return false;
+
+  const size_t equals_position = string.find(kSwitchValueSeparator);
+  *switch_string = string.substr(0, equals_position);
+  if (equals_position != CommandLine::StringType::npos)
+    *switch_value = string.substr(equals_position + 1);
+  return true;
+}
+
+// Append switches and arguments, keeping switches before arguments.
+void AppendSwitchesAndArguments(CommandLine& command_line,
+                                const CommandLine::StringVector& argv) {
+  bool parse_switches = true;
+  for (size_t i = 1; i < argv.size(); ++i) {
+    CommandLine::StringType arg = argv[i];
+    TrimWhitespace(arg, TRIM_ALL, &arg);
+
+    CommandLine::StringType switch_string;
+    CommandLine::StringType switch_value;
+    parse_switches &= (arg != kSwitchTerminator);
+    if (parse_switches && IsSwitch(arg, &switch_string, &switch_value)) {
+#if defined(OS_WIN)
+      command_line.AppendSwitchNative(UTF16ToASCII(switch_string),
+                                      switch_value);
+#elif defined(OS_POSIX)
+      command_line.AppendSwitchNative(switch_string, switch_value);
+#endif
+    } else {
+      command_line.AppendArgNative(arg);
+    }
+  }
+}
+
+// Lowercase switches for backwards compatiblity *on Windows*.
+std::string LowerASCIIOnWindows(const std::string& string) {
+#if defined(OS_WIN)
+  return StringToLowerASCII(string);
+#elif defined(OS_POSIX)
+  return string;
+#endif
+}
+
+
+#if defined(OS_WIN)
+// Quote a string as necessary for CommandLineToArgvW compatiblity *on Windows*.
+std::wstring QuoteForCommandLineToArgvW(const std::wstring& arg) {
+  // We follow the quoting rules of CommandLineToArgvW.
+  // http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
+  if (arg.find_first_of(L" \\\"") == std::wstring::npos) {
+    // No quoting necessary.
+    return arg;
+  }
+
+  std::wstring out;
+  out.push_back(L'"');
+  for (size_t i = 0; i < arg.size(); ++i) {
+    if (arg[i] == '\\') {
+      // Find the extent of this run of backslashes.
+      size_t start = i, end = start + 1;
+      for (; end < arg.size() && arg[end] == '\\'; ++end)
+        /* empty */;
+      size_t backslash_count = end - start;
+
+      // Backslashes are escapes only if the run is followed by a double quote.
+      // Since we also will end the string with a double quote, we escape for
+      // either a double quote or the end of the string.
+      if (end == arg.size() || arg[end] == '"') {
+        // To quote, we need to output 2x as many backslashes.
+        backslash_count *= 2;
+      }
+      for (size_t j = 0; j < backslash_count; ++j)
+        out.push_back('\\');
+
+      // Advance i to one before the end to balance i++ in loop.
+      i = end - 1;
+    } else if (arg[i] == '"') {
+      out.push_back('\\');
+      out.push_back('"');
+    } else {
+      out.push_back(arg[i]);
+    }
+  }
+  out.push_back('"');
+
+  return out;
+}
+#endif
+
+}  // namespace
+
+CommandLine::CommandLine(NoProgram no_program)
+    : argv_(1),
+      begin_args_(1) {
+}
+
+CommandLine::CommandLine(const FilePath& program)
+    : argv_(1),
+      begin_args_(1) {
+  SetProgram(program);
+}
+
+CommandLine::CommandLine(int argc, const CommandLine::CharType* const* argv)
+    : argv_(1),
+      begin_args_(1) {
+  InitFromArgv(argc, argv);
+}
+
+CommandLine::CommandLine(const StringVector& argv)
+    : argv_(1),
+      begin_args_(1) {
+  InitFromArgv(argv);
+}
+
+CommandLine::~CommandLine() {
+}
+
+#if defined(OS_WIN)
+// static
+void CommandLine::set_slash_is_not_a_switch() {
+  // The last switch prefix should be slash, so adjust the size to skip it.
+  DCHECK(wcscmp(kSwitchPrefixes[arraysize(kSwitchPrefixes) - 1], L"/") == 0);
+  switch_prefix_count = arraysize(kSwitchPrefixes) - 1;
+}
+#endif
+
+// static
+bool CommandLine::Init(int argc, const char* const* argv) {
+  if (current_process_commandline_) {
+    // If this is intentional, Reset() must be called first. If we are using
+    // the shared build mode, we have to share a single object across multiple
+    // shared libraries.
+    return false;
+  }
+
+  current_process_commandline_ = new CommandLine(NO_PROGRAM);
+#if defined(OS_WIN)
+  current_process_commandline_->ParseFromString(::GetCommandLineW());
+#elif defined(OS_POSIX)
+  current_process_commandline_->InitFromArgv(argc, argv);
+#endif
+
+  return true;
+}
+
+// static
+void CommandLine::Reset() {
+  DCHECK(current_process_commandline_);
+  delete current_process_commandline_;
+  current_process_commandline_ = NULL;
+}
+
+// static
+CommandLine* CommandLine::ForCurrentProcess() {
+  DCHECK(current_process_commandline_);
+  return current_process_commandline_;
+}
+
+// static
+bool CommandLine::InitializedForCurrentProcess() {
+  return !!current_process_commandline_;
+}
+
+#if defined(OS_WIN)
+// static
+CommandLine CommandLine::FromString(const std::wstring& command_line) {
+  CommandLine cmd(NO_PROGRAM);
+  cmd.ParseFromString(command_line);
+  return cmd;
+}
+#endif
+
+void CommandLine::InitFromArgv(int argc,
+                               const CommandLine::CharType* const* argv) {
+  StringVector new_argv;
+  for (int i = 0; i < argc; ++i)
+    new_argv.push_back(argv[i]);
+  InitFromArgv(new_argv);
+}
+
+void CommandLine::InitFromArgv(const StringVector& argv) {
+  argv_ = StringVector(1);
+  switches_.clear();
+  begin_args_ = 1;
+  SetProgram(argv.empty() ? FilePath() : FilePath(argv[0]));
+  AppendSwitchesAndArguments(*this, argv);
+}
+
+CommandLine::StringType CommandLine::GetCommandLineString() const {
+  StringType string(argv_[0]);
+#if defined(OS_WIN)
+  string = QuoteForCommandLineToArgvW(string);
+#endif
+  StringType params(GetArgumentsString());
+  if (!params.empty()) {
+    string.append(StringType(FILE_PATH_LITERAL(" ")));
+    string.append(params);
+  }
+  return string;
+}
+
+CommandLine::StringType CommandLine::GetArgumentsString() const {
+  StringType params;
+  // Append switches and arguments.
+  bool parse_switches = true;
+  for (size_t i = 1; i < argv_.size(); ++i) {
+    StringType arg = argv_[i];
+    StringType switch_string;
+    StringType switch_value;
+    parse_switches &= arg != kSwitchTerminator;
+    if (i > 1)
+      params.append(StringType(FILE_PATH_LITERAL(" ")));
+    if (parse_switches && IsSwitch(arg, &switch_string, &switch_value)) {
+      params.append(switch_string);
+      if (!switch_value.empty()) {
+#if defined(OS_WIN)
+        switch_value = QuoteForCommandLineToArgvW(switch_value);
+#endif
+        params.append(kSwitchValueSeparator + switch_value);
+      }
+    }
+    else {
+#if defined(OS_WIN)
+      arg = QuoteForCommandLineToArgvW(arg);
+#endif
+      params.append(arg);
+    }
+  }
+  return params;
+}
+
+FilePath CommandLine::GetProgram() const {
+  return FilePath(argv_[0]);
+}
+
+void CommandLine::SetProgram(const FilePath& program) {
+  TrimWhitespace(program.value(), TRIM_ALL, &argv_[0]);
+}
+
+bool CommandLine::HasSwitch(const std::string& switch_string) const {
+  return switches_.find(LowerASCIIOnWindows(switch_string)) != switches_.end();
+}
+
+std::string CommandLine::GetSwitchValueASCII(
+    const std::string& switch_string) const {
+  StringType value = GetSwitchValueNative(switch_string);
+  if (!IsStringASCII(value)) {
+    DLOG(WARNING) << "Value of switch (" << switch_string << ") must be ASCII.";
+    return std::string();
+  }
+#if defined(OS_WIN)
+  return UTF16ToASCII(value);
+#else
+  return value;
+#endif
+}
+
+FilePath CommandLine::GetSwitchValuePath(
+    const std::string& switch_string) const {
+  return FilePath(GetSwitchValueNative(switch_string));
+}
+
+CommandLine::StringType CommandLine::GetSwitchValueNative(
+    const std::string& switch_string) const {
+  SwitchMap::const_iterator result =
+    switches_.find(LowerASCIIOnWindows(switch_string));
+  return result == switches_.end() ? StringType() : result->second;
+}
+
+void CommandLine::AppendSwitch(const std::string& switch_string) {
+  AppendSwitchNative(switch_string, StringType());
+}
+
+void CommandLine::AppendSwitchPath(const std::string& switch_string,
+                                   const FilePath& path) {
+  AppendSwitchNative(switch_string, path.value());
+}
+
+void CommandLine::AppendSwitchNative(const std::string& switch_string,
+                                     const CommandLine::StringType& value) {
+  std::string switch_key(LowerASCIIOnWindows(switch_string));
+#if defined(OS_WIN)
+  StringType combined_switch_string(ASCIIToWide(switch_key));
+#elif defined(OS_POSIX)
+  StringType combined_switch_string(switch_string);
+#endif
+  size_t prefix_length = GetSwitchPrefixLength(combined_switch_string);
+  switches_[switch_key.substr(prefix_length)] = value;
+  // Preserve existing switch prefixes in |argv_|; only append one if necessary.
+  if (prefix_length == 0)
+    combined_switch_string = kSwitchPrefixes[0] + combined_switch_string;
+  if (!value.empty())
+    combined_switch_string += kSwitchValueSeparator + value;
+  // Append the switch and update the switches/arguments divider |begin_args_|.
+  argv_.insert(argv_.begin() + begin_args_++, combined_switch_string);
+}
+
+void CommandLine::AppendSwitchASCII(const std::string& switch_string,
+                                    const std::string& value_string) {
+#if defined(OS_WIN)
+  AppendSwitchNative(switch_string, ASCIIToWide(value_string));
+#elif defined(OS_POSIX)
+  AppendSwitchNative(switch_string, value_string);
+#endif
+}
+
+void CommandLine::CopySwitchesFrom(const CommandLine& source,
+                                   const char* const switches[],
+                                   size_t count) {
+  for (size_t i = 0; i < count; ++i) {
+    if (source.HasSwitch(switches[i]))
+      AppendSwitchNative(switches[i], source.GetSwitchValueNative(switches[i]));
+  }
+}
+
+CommandLine::StringVector CommandLine::GetArgs() const {
+  // Gather all arguments after the last switch (may include kSwitchTerminator).
+  StringVector args(argv_.begin() + begin_args_, argv_.end());
+  // Erase only the first kSwitchTerminator (maybe "--" is a legitimate page?)
+  StringVector::iterator switch_terminator =
+      std::find(args.begin(), args.end(), kSwitchTerminator);
+  if (switch_terminator != args.end())
+    args.erase(switch_terminator);
+  return args;
+}
+
+void CommandLine::AppendArg(const std::string& value) {
+#if defined(OS_WIN)
+  DCHECK(IsStringUTF8(value));
+  AppendArgNative(UTF8ToWide(value));
+#elif defined(OS_POSIX)
+  AppendArgNative(value);
+#endif
+}
+
+void CommandLine::AppendArgPath(const FilePath& path) {
+  AppendArgNative(path.value());
+}
+
+void CommandLine::AppendArgNative(const CommandLine::StringType& value) {
+  argv_.push_back(value);
+}
+
+void CommandLine::AppendArguments(const CommandLine& other,
+                                  bool include_program) {
+  if (include_program)
+    SetProgram(other.GetProgram());
+  AppendSwitchesAndArguments(*this, other.argv());
+}
+
+void CommandLine::PrependWrapper(const CommandLine::StringType& wrapper) {
+  if (wrapper.empty())
+    return;
+  // The wrapper may have embedded arguments (like "gdb --args"). In this case,
+  // we don't pretend to do anything fancy, we just split on spaces.
+  StringVector wrapper_argv;
+  SplitString(wrapper, FILE_PATH_LITERAL(' '), &wrapper_argv);
+  // Prepend the wrapper and update the switches/arguments |begin_args_|.
+  argv_.insert(argv_.begin(), wrapper_argv.begin(), wrapper_argv.end());
+  begin_args_ += wrapper_argv.size();
+}
+
+#if defined(OS_WIN)
+void CommandLine::ParseFromString(const std::wstring& command_line) {
+  std::wstring command_line_string;
+  TrimWhitespace(command_line, TRIM_ALL, &command_line_string);
+  if (command_line_string.empty())
+    return;
+
+  int num_args = 0;
+  wchar_t** args = NULL;
+  args = ::CommandLineToArgvW(command_line_string.c_str(), &num_args);
+
+  DPLOG_IF(FATAL, !args) << "CommandLineToArgvW failed on command line: "
+                         << UTF16ToUTF8(command_line);
+  InitFromArgv(num_args, args);
+  LocalFree(args);
+}
+#endif
+
+}  // namespace base

+ 194 - 0
base/command_line.h

@@ -0,0 +1,194 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This class works with command lines: building and parsing.
+// Arguments with prefixes ('--', '-', and on Windows, '/') are switches.
+// Switches will precede all other arguments without switch prefixes.
+// Switches can optionally have values, delimited by '=', e.g., "-switch=value".
+// An argument of "--" will terminate switch parsing during initialization,
+// interpreting subsequent tokens as non-switch arguments, regardless of prefix.
+
+// There is a singleton read-only CommandLine that represents the command line
+// that the current process was started with.  It must be initialized in main().
+
+#ifndef BASE_COMMAND_LINE_H_
+#define BASE_COMMAND_LINE_H_
+
+#include <stddef.h>
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/build_config.h"
+
+namespace base {
+
+class FilePath;
+
+class BASE_EXPORT CommandLine {
+ public:
+#if defined(OS_WIN)
+  // The native command line string type.
+  typedef std::wstring StringType;
+#elif defined(OS_POSIX)
+  typedef std::string StringType;
+#endif
+
+  typedef StringType::value_type CharType;
+  typedef std::vector<StringType> StringVector;
+  typedef std::map<std::string, StringType> SwitchMap;
+
+  // A constructor for CommandLines that only carry switches and arguments.
+  enum NoProgram { NO_PROGRAM };
+  explicit CommandLine(NoProgram no_program);
+
+  // Construct a new command line with |program| as argv[0].
+  explicit CommandLine(const FilePath& program);
+
+  // Construct a new command line from an argument list.
+  CommandLine(int argc, const CharType* const* argv);
+  explicit CommandLine(const StringVector& argv);
+
+  ~CommandLine();
+
+#if defined(OS_WIN)
+  // By default this class will treat command-line arguments beginning with
+  // slashes as switches on Windows, but not other platforms.
+  //
+  // If this behavior is inappropriate for your application, you can call this
+  // function BEFORE initializing the current process' global command line
+  // object and the behavior will be the same as Posix systems (only hyphens
+  // begin switches, everything else will be an arg).
+  static void set_slash_is_not_a_switch();
+#endif
+
+  // Initialize the current process CommandLine singleton. On Windows, ignores
+  // its arguments (we instead parse GetCommandLineW() directly) because we
+  // don't trust the CRT's parsing of the command line, but it still must be
+  // called to set up the command line. Returns false if initialization has
+  // already occurred, and true otherwise. Only the caller receiving a 'true'
+  // return value should take responsibility for calling Reset.
+  static bool Init(int argc, const char* const* argv);
+
+  // Destroys the current process CommandLine singleton. This is necessary if
+  // you want to reset the base library to its initial state (for example, in an
+  // outer library that needs to be able to terminate, and be re-initialized).
+  // If Init is called only once, as in main(), Reset() is not necessary.
+  static void Reset();
+
+  // Get the singleton CommandLine representing the current process's
+  // command line. Note: returned value is mutable, but not thread safe;
+  // only mutate if you know what you're doing!
+  static CommandLine* ForCurrentProcess();
+
+  // Returns true if the CommandLine has been initialized for the given process.
+  static bool InitializedForCurrentProcess();
+
+#if defined(OS_WIN)
+  static CommandLine FromString(const std::wstring& command_line);
+#endif
+
+  // Initialize from an argv vector.
+  void InitFromArgv(int argc, const CharType* const* argv);
+  void InitFromArgv(const StringVector& argv);
+
+  // Constructs and returns the represented command line string.
+  // CAUTION! This should be avoided on POSIX because quoting behavior is
+  // unclear.
+  StringType GetCommandLineString() const;
+
+  // Constructs and returns the represented arguments string.
+  // CAUTION! This should be avoided on POSIX because quoting behavior is
+  // unclear.
+  StringType GetArgumentsString() const;
+
+  // Returns the original command line string as a vector of strings.
+  const StringVector& argv() const { return argv_; }
+
+  // Get and Set the program part of the command line string (the first item).
+  FilePath GetProgram() const;
+  void SetProgram(const FilePath& program);
+
+  // Returns true if this command line contains the given switch.
+  // (Switch names are case-insensitive).
+  bool HasSwitch(const std::string& switch_string) const;
+
+  // Returns the value associated with the given switch. If the switch has no
+  // value or isn't present, this method returns the empty string.
+  std::string GetSwitchValueASCII(const std::string& switch_string) const;
+  FilePath GetSwitchValuePath(const std::string& switch_string) const;
+  StringType GetSwitchValueNative(const std::string& switch_string) const;
+
+  // Get a copy of all switches, along with their values.
+  const SwitchMap& GetSwitches() const { return switches_; }
+
+  // Append a switch [with optional value] to the command line.
+  // Note: Switches will precede arguments regardless of appending order.
+  void AppendSwitch(const std::string& switch_string);
+  void AppendSwitchPath(const std::string& switch_string,
+                        const FilePath& path);
+  void AppendSwitchNative(const std::string& switch_string,
+                          const StringType& value);
+  void AppendSwitchASCII(const std::string& switch_string,
+                         const std::string& value);
+
+  // Copy a set of switches (and any values) from another command line.
+  // Commonly used when launching a subprocess.
+  void CopySwitchesFrom(const CommandLine& source,
+                        const char* const switches[],
+                        size_t count);
+
+  // Get the remaining arguments to the command.
+  StringVector GetArgs() const;
+
+  // Append an argument to the command line. Note that the argument is quoted
+  // properly such that it is interpreted as one argument to the target command.
+  // AppendArg is primarily for ASCII; non-ASCII input is interpreted as UTF-8.
+  // Note: Switches will precede arguments regardless of appending order.
+  void AppendArg(const std::string& value);
+  void AppendArgPath(const FilePath& value);
+  void AppendArgNative(const StringType& value);
+
+  // Append the switches and arguments from another command line to this one.
+  // If |include_program| is true, include |other|'s program as well.
+  void AppendArguments(const CommandLine& other, bool include_program);
+
+  // Insert a command before the current command.
+  // Common for debuggers, like "valgrind" or "gdb --args".
+  void PrependWrapper(const StringType& wrapper);
+
+#if defined(OS_WIN)
+  // Initialize by parsing the given command line string.
+  // The program name is assumed to be the first item in the string.
+  void ParseFromString(const std::wstring& command_line);
+#endif
+
+ private:
+  // Disallow default constructor; a program name must be explicitly specified.
+  CommandLine();
+  // Allow the copy constructor. A common pattern is to copy of the current
+  // process's command line and then add some flags to it. For example:
+  //   CommandLine cl(*CommandLine::ForCurrentProcess());
+  //   cl.AppendSwitch(...);
+
+  // The singleton CommandLine representing the current process's command line.
+  static CommandLine* current_process_commandline_;
+
+  // The argv array: { program, [(--|-|/)switch[=value]]*, [--], [argument]* }
+  StringVector argv_;
+
+  // Parsed-out switch keys and values.
+  SwitchMap switches_;
+
+  // The index after the program and switches, any arguments start here.
+  size_t begin_args_;
+};
+
+}  // namespace base
+
+// TODO(brettw) remove once all callers specify the namespace properly.
+using base::CommandLine;
+
+#endif  // BASE_COMMAND_LINE_H_

+ 282 - 0
base/compiler_specific.h

@@ -0,0 +1,282 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_COMPILER_SPECIFIC_H_
+#define BASE_COMPILER_SPECIFIC_H_
+
+#include "base/build_config.h"
+
+#if defined(COMPILER_MSVC)
+
+// Macros for suppressing and disabling warnings on MSVC.
+//
+// Warning numbers are enumerated at:
+// http://msdn.microsoft.com/en-us/library/8x5x43k7(VS.80).aspx
+//
+// The warning pragma:
+// http://msdn.microsoft.com/en-us/library/2c8f766e(VS.80).aspx
+//
+// Using __pragma instead of #pragma inside macros:
+// http://msdn.microsoft.com/en-us/library/d9x1s805.aspx
+
+// MSVC_SUPPRESS_WARNING disables warning |n| for the remainder of the line and
+// for the next line of the source file.
+#define MSVC_SUPPRESS_WARNING(n) __pragma(warning(suppress:n))
+
+// MSVC_PUSH_DISABLE_WARNING pushes |n| onto a stack of warnings to be disabled.
+// The warning remains disabled until popped by MSVC_POP_WARNING.
+#define MSVC_PUSH_DISABLE_WARNING(n) __pragma(warning(push)) \
+                                     __pragma(warning(disable:n))
+
+// MSVC_PUSH_WARNING_LEVEL pushes |n| as the global warning level.  The level
+// remains in effect until popped by MSVC_POP_WARNING().  Use 0 to disable all
+// warnings.
+#define MSVC_PUSH_WARNING_LEVEL(n) __pragma(warning(push, n))
+
+// Pop effects of innermost MSVC_PUSH_* macro.
+#define MSVC_POP_WARNING() __pragma(warning(pop))
+
+#define MSVC_DISABLE_OPTIMIZE() __pragma(optimize("", off))
+#define MSVC_ENABLE_OPTIMIZE() __pragma(optimize("", on))
+
+// Allows exporting a class that inherits from a non-exported base class.
+// This uses suppress instead of push/pop because the delimiter after the
+// declaration (either "," or "{") has to be placed before the pop macro.
+//
+// Example usage:
+// class EXPORT_API Foo : NON_EXPORTED_BASE(public Bar) {
+//
+// MSVC Compiler warning C4275:
+// non dll-interface class 'Bar' used as base for dll-interface class 'Foo'.
+// Note that this is intended to be used only when no access to the base class'
+// static data is done through derived classes or inline methods. For more info,
+// see http://msdn.microsoft.com/en-us/library/3tdb471s(VS.80).aspx
+#define NON_EXPORTED_BASE(code) MSVC_SUPPRESS_WARNING(4275) \
+                                code
+
+#else  // Not MSVC
+
+#define MSVC_SUPPRESS_WARNING(n)
+#define MSVC_PUSH_DISABLE_WARNING(n)
+#define MSVC_PUSH_WARNING_LEVEL(n)
+#define MSVC_POP_WARNING()
+#define MSVC_DISABLE_OPTIMIZE()
+#define MSVC_ENABLE_OPTIMIZE()
+#define NON_EXPORTED_BASE(code) code
+
+#endif  // COMPILER_MSVC
+
+
+// The C++ standard requires that static const members have an out-of-class
+// definition (in a single compilation unit), but MSVC chokes on this (when
+// language extensions, which are required, are enabled). (You're only likely to
+// notice the need for a definition if you take the address of the member or,
+// more commonly, pass it to a function that takes it as a reference argument --
+// probably an STL function.) This macro makes MSVC do the right thing. See
+// http://msdn.microsoft.com/en-us/library/34h23df8(v=vs.100).aspx for more
+// information. Use like:
+//
+// In .h file:
+//   struct Foo {
+//     static const int kBar = 5;
+//   };
+//
+// In .cc file:
+//   STATIC_CONST_MEMBER_DEFINITION const int Foo::kBar;
+#if defined(COMPILER_MSVC)
+#define STATIC_CONST_MEMBER_DEFINITION __declspec(selectany)
+#else
+#define STATIC_CONST_MEMBER_DEFINITION
+#endif
+
+// Annotate a variable indicating it's ok if the variable is not used.
+// (Typically used to silence a compiler warning when the assignment
+// is important for some other reason.)
+// Use like:
+//   int x ALLOW_UNUSED = ...;
+#if defined(COMPILER_GCC)
+#define ALLOW_UNUSED __attribute__((unused))
+#else
+#define ALLOW_UNUSED
+#endif
+
+// Annotate a function indicating it should not be inlined.
+// Use like:
+//   NOINLINE void DoStuff() { ... }
+#if defined(COMPILER_GCC)
+#define NOINLINE __attribute__((noinline))
+#elif defined(COMPILER_MSVC)
+#define NOINLINE __declspec(noinline)
+#else
+#define NOINLINE
+#endif
+
+#ifndef BASE_FORCE_INLINE
+#if defined(COMPILER_MSVC)
+#define BASE_FORCE_INLINE    __forceinline
+#else
+#define BASE_FORCE_INLINE inline __attribute__((always_inline))
+#endif
+#endif  // BASE_FORCE_INLINE
+
+// Specify memory alignment for structs, classes, etc.
+// Use like:
+//   class ALIGNAS(16) MyClass { ... }
+//   ALIGNAS(16) int array[4];
+#if defined(COMPILER_MSVC)
+#define ALIGNAS(byte_alignment) __declspec(align(byte_alignment))
+#elif defined(COMPILER_GCC)
+#define ALIGNAS(byte_alignment) __attribute__((aligned(byte_alignment)))
+#endif
+
+// Return the byte alignment of the given type (available at compile time).  Use
+// sizeof(type) prior to checking __alignof to workaround Visual C++ bug:
+// http://goo.gl/isH0C
+// Use like:
+//   ALIGNOF(int32_t)  // this would be 4
+#if defined(COMPILER_MSVC)
+#define ALIGNOF(type) (sizeof(type) - sizeof(type) + __alignof(type))
+#elif defined(COMPILER_GCC)
+#define ALIGNOF(type) __alignof__(type)
+#endif
+
+// Annotate a virtual method indicating it must be overriding a virtual
+// method in the parent class.
+// Use like:
+//   virtual void foo() OVERRIDE;
+#if defined(__clang__) || defined(COMPILER_MSVC)
+#define OVERRIDE override
+#elif defined(COMPILER_GCC) && __cplusplus >= 201103 && \
+      (__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40700
+// GCC 4.7 supports explicit virtual overrides when C++11 support is enabled.
+#define OVERRIDE override
+#else
+#define OVERRIDE
+#endif
+
+// Annotate a virtual method indicating that subclasses must not override it,
+// or annotate a class to indicate that it cannot be subclassed.
+// Use like:
+//   virtual void foo() FINAL;
+//   class B FINAL : public A {};
+#if defined(__clang__) || defined(COMPILER_MSVC)
+#define FINAL final
+#elif defined(COMPILER_GCC) && __cplusplus >= 201103 && \
+      (__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40700
+// GCC 4.7 supports explicit virtual overrides when C++11 support is enabled.
+#define FINAL final
+#else
+#define FINAL
+#endif
+
+// Annotate a function indicating the caller must examine the return value.
+// Use like:
+//   int foo() WARN_UNUSED_RESULT;
+// To explicitly ignore a result, see |ignore_result()| in "base/basictypes.h".
+// FIXME(gejun): GCC 3.4 report "unused" variable incorrectly (actually used).
+#if defined(COMPILER_GCC) && __cplusplus >= 201103 && \
+      (__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40700
+#define WARN_UNUSED_RESULT __attribute__((warn_unused_result))
+#else
+#define WARN_UNUSED_RESULT
+#endif
+
+// Tell the compiler a function is using a printf-style format string.
+// |format_param| is the one-based index of the format string parameter;
+// |dots_param| is the one-based index of the "..." parameter.
+// For v*printf functions (which take a va_list), pass 0 for dots_param.
+// (This is undocumented but matches what the system C headers do.)
+#if defined(COMPILER_GCC)
+#define PRINTF_FORMAT(format_param, dots_param) \
+    __attribute__((format(printf, format_param, dots_param)))
+#else
+#define PRINTF_FORMAT(format_param, dots_param)
+#endif
+
+// WPRINTF_FORMAT is the same, but for wide format strings.
+// This doesn't appear to yet be implemented in any compiler.
+// See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=38308 .
+#define WPRINTF_FORMAT(format_param, dots_param)
+// If available, it would look like:
+//   __attribute__((format(wprintf, format_param, dots_param)))
+
+// MemorySanitizer annotations.
+#if defined(MEMORY_SANITIZER) && !defined(OS_NACL)
+#include <sanitizer/msan_interface.h>
+
+// Mark a memory region fully initialized.
+// Use this to annotate code that deliberately reads uninitialized data, for
+// example a GC scavenging root set pointers from the stack.
+#define MSAN_UNPOISON(p, s)  __msan_unpoison(p, s)
+#else  // MEMORY_SANITIZER
+#define MSAN_UNPOISON(p, s)
+#endif  // MEMORY_SANITIZER
+
+// Macro useful for writing cross-platform function pointers.
+#if !defined(CDECL)
+#if defined(OS_WIN)
+#define CDECL __cdecl
+#else  // defined(OS_WIN)
+#define CDECL
+#endif  // defined(OS_WIN)
+#endif  // !defined(CDECL)
+
+// Mark a branch likely or unlikely to be true.
+// We can't remove the BAIDU_ prefix because the name is likely to conflict,
+// namely kylin already has the macro.
+#if defined(COMPILER_GCC)
+#  if defined(__cplusplus)
+#    define BAIDU_LIKELY(expr) (__builtin_expect((bool)(expr), true))
+#    define BAIDU_UNLIKELY(expr) (__builtin_expect((bool)(expr), false))
+#  else
+#    define BAIDU_LIKELY(expr) (__builtin_expect(!!(expr), 1))
+#    define BAIDU_UNLIKELY(expr) (__builtin_expect(!!(expr), 0))
+#  endif
+#else
+#  define BAIDU_LIKELY(expr) (expr)
+#  define BAIDU_UNLIKELY(expr) (expr)
+#endif
+
+// BAIDU_DEPRECATED void dont_call_me_anymore(int arg);
+// ...
+// warning: 'void dont_call_me_anymore(int)' is deprecated
+#if defined(COMPILER_GCC)
+# define BAIDU_DEPRECATED __attribute__((deprecated))
+#elif defined(COMPILER_MSVC)
+# define BAIDU_DEPRECATED __declspec(deprecated)
+#else
+# define BAIDU_DEPRECATED
+#endif
+
+// Mark function as weak. This is GCC only feature.
+#if defined(COMPILER_GCC)
+# define BAIDU_WEAK __attribute__((weak))
+#else
+# define BAIDU_WEAK
+#endif
+
+// Cacheline related --------------------------------------
+#define BAIDU_CACHELINE_SIZE 64
+
+#ifdef _MSC_VER
+# define BAIDU_CACHELINE_ALIGNMENT __declspec(align(BAIDU_CACHELINE_SIZE))
+#endif /* _MSC_VER */
+
+#ifdef __GNUC__
+# define BAIDU_CACHELINE_ALIGNMENT __attribute__((aligned(BAIDU_CACHELINE_SIZE)))
+#endif /* __GNUC__ */
+
+#ifndef BAIDU_CACHELINE_ALIGNMENT
+# define BAIDU_CACHELINE_ALIGNMENT /*BAIDU_CACHELINE_ALIGNMENT*/
+#endif
+
+#ifndef BAIDU_NOEXCEPT
+# if defined(BASE_CXX11_ENABLED)
+#  define BAIDU_NOEXCEPT noexcept
+# else
+#  define BAIDU_NOEXCEPT
+# endif
+#endif
+
+#endif  // BASE_COMPILER_SPECIFIC_H_

+ 267 - 0
base/containers/bounded_queue.h

@@ -0,0 +1,267 @@
+// Copyright (c) 2012 Baidu.com, Inc. All Rights Reserved
+//
+// A thread-unsafe bounded queue(ring buffer). It can push/pop from both
+// sides and is more handy than thread-safe queues in single thread. Use
+// boost::lockfree::spsc_queue or boost::lockfree::queue in multi-threaded
+// scenarios.
+//
+// Author: Ge,Jun (gejun@baidu.com)
+// Date: Sat Aug 18 12:42:16 CST 2012
+
+#ifndef BASE_BOUNDED_QUEUE_H
+#define BASE_BOUNDED_QUEUE_H
+
+#include "base/macros.h"
+#include "base/logging.h"
+
+namespace base {
+
+// NOTE: This version requires storage in ctor rather than allocating it, 
+// which is different from DP version.
+// Example:
+//   char storage[64];
+//   base::BoundedQueue<int> q(storage, sizeof(storage), base::NOT_OWN_STORAGE);
+//   q.push(1);
+//   q.push(2);
+//   ...
+
+enum StorageOwnership { OWNS_STORAGE, NOT_OWN_STORAGE };
+
+template <typename T>
+class BoundedQueue {
+public:
+    // You have to pass the memory for storing items at creation.
+    // The queue contains at most size / sizeof(T) items.
+    BoundedQueue(void* spaces, size_t size, StorageOwnership ownership)
+        : _count(0)
+        , _cap(size / sizeof(T))
+        , _start(0)
+        , _ownership(ownership)
+        , _items(spaces) {
+        DCHECK(_items);
+    };
+
+    BoundedQueue()
+        : _count(0)
+        , _cap(0)
+        , _start(0)
+        , _ownership(NOT_OWN_STORAGE)
+        , _items(NULL) {
+    };
+
+    ~BoundedQueue() {
+        clear();
+        if (_ownership == OWNS_STORAGE) {
+            free(_items);
+            _items = NULL;
+        }
+    }
+
+    // Push |item| into bottom side of this queue.
+    // Returns true on success, false if queue is full.
+    bool push(const T& item) {
+        if (_count < _cap) {
+            new ((T*)_items + _mod(_start + _count, _cap)) T(item);
+            ++_count;
+            return true;
+        }
+        return false;
+    }
+
+    // Push |item| into bottom side of this queue. If the queue is full,
+    // pop topmost item first.
+    void elim_push(const T& item) {
+        if (_count < _cap) {
+            new ((T*)_items + _mod(_start + _count, _cap)) T(item);
+            ++_count;
+        } else {
+            ((T*)_items)[_start] = item;
+            _start = _mod(_start + 1, _cap);
+        }
+    }
+    
+    // Push a default-constructed item into bottom side of this queue
+    // Returns address of the item inside this queue
+    T* push() {
+        if (_count < _cap) {
+            return new ((T*)_items + _mod(_start + _count++, _cap)) T();
+        }
+        return NULL;
+    }
+
+    // Push |item| into top side of this queue
+    // Returns true on success, false if queue is full.
+    bool push_top(const T& item) {
+        if (_count < _cap) {
+            _start = _start ? (_start - 1) : (_cap - 1);
+            ++_count;
+            new ((T*)_items + _start) T(item);
+            return true;
+        }
+        return false;
+    }    
+    
+    // Push a default-constructed item into top side of this queue
+    // Returns address of the item inside this queue
+    T* push_top() {
+        if (_count < _cap) {
+            _start = _start ? (_start - 1) : (_cap - 1);
+            ++_count;
+            return new ((T*)_items + _start) T();
+        }
+        return NULL;
+    }
+    
+    // Pop top-most item from this queue
+    // Returns true on success, false if queue is empty
+    bool pop() {
+        if (_count) {
+            --_count;
+            ((T*)_items + _start)->~T();
+            _start = _mod(_start + 1, _cap);
+            return true;
+        }
+        return false;
+    }
+
+    // Pop top-most item from this queue and copy into |item|.
+    // Returns true on success, false if queue is empty
+    bool pop(T* item) {
+        if (_count) {
+            --_count;
+            *item = ((T*)_items)[_start];
+            ((T*)_items)[_start].~T();
+            _start = _mod(_start + 1, _cap);
+            return true;
+        }
+        return false;
+    }
+
+    // Pop bottom-most item from this queue
+    // Returns true on success, false if queue is empty
+    bool pop_bottom() {
+        if (_count) {
+            --_count;
+            ((T*)_items + _start + _count)->~T();
+            return true;
+        }
+        return false;
+    }
+
+    // Pop bottom-most item from this queue and copy into |item|.
+    // Returns true on success, false if queue is empty
+    bool pop_bottom(T* item) {
+        if (_count) {
+            --_count;
+            *item = ((T*)_items)[_start + _count];
+            ((T*)_items)[_start + _count].~T();
+            return true;
+        }
+        return false;
+    }
+
+    // Pop all items
+    void clear() {
+        for (uint32_t i = 0; i < _count; ++i) {
+            ((T*)_items + _mod(_start + i, _cap))->~T();
+        }
+        _count = 0;
+        _start = 0;
+    }
+
+    // Get address of top-most item, NULL if queue is empty
+    T* top() { 
+        return _count ? ((T*)_items + _start) : NULL; 
+    }
+    const T* top() const { 
+        return _count ? ((const T*)_items + _start) : NULL; 
+    }
+
+    // Randomly access item from top side.
+    // top(0) == top(), top(size()-1) == bottom()
+    // Returns NULL if |index| is out of range.
+    T* top(size_t index) {
+        if (index < _count) {
+            return (T*)_items + _mod(_start + index, _cap);
+        }
+        return NULL;   // including _count == 0
+    }
+    const T* top(size_t index) const {
+        if (index < _count) {
+            return (const T*)_items + _mod(_start + index, _cap);
+        }
+        return NULL;   // including _count == 0
+    }
+
+    // Get address of bottom-most item, NULL if queue is empty
+    T* bottom() { 
+        return _count ? ((T*)_items + _mod(_start + _count - 1, _cap)) : NULL; 
+    }
+    const T* bottom() const {
+        return _count ? ((const T*)_items + _mod(_start + _count - 1, _cap)) : NULL; 
+    }
+    
+    // Randomly access item from bottom side.
+    // bottom(0) == bottom(), bottom(size()-1) == top()
+    // Returns NULL if |index| is out of range.
+    T* bottom(size_t index) {
+        if (index < _count) {
+            return (T*)_items + _mod(_start + _count - index - 1, _cap);
+        }
+        return NULL;  // including _count == 0
+    }
+    const T* bottom(size_t index) const {
+        if (index < _count) {
+            return (const T*)_items + _mod(_start + _count - index - 1, _cap);
+        }
+        return NULL;  // including _count == 0
+    }
+
+    bool empty() const { return !_count; }
+    bool full() const { return _cap == _count; }
+
+    // Number of items
+    size_t size() const { return _count; }
+
+    // Maximum number of items that can be in this queue
+    size_t capacity() const { return _cap; }
+
+    // Maximum value of capacity()
+    size_t max_capacity() const { return (1UL << (sizeof(_cap) * 8)) - 1; }
+
+    // True if the queue was constructed successfully.
+    bool initialized() const { return _items != NULL; }
+
+    // Swap internal fields with another queue.
+    void swap(BoundedQueue& rhs) {
+        std::swap(_count, rhs._count);
+        std::swap(_cap, rhs._cap);
+        std::swap(_start, rhs._start);
+        std::swap(_ownership, rhs._ownership);
+        std::swap(_items, rhs._items);
+    }
+
+private:
+    // Since the space is possibly not owned, we disable copying.
+    DISALLOW_COPY_AND_ASSIGN(BoundedQueue);
+    
+    // This is faster than % in this queue because most |off| are smaller
+    // than |cap|. This is probably not true in other place, be careful
+    // before you use this trick.
+    static uint32_t _mod(uint32_t off, uint32_t cap) {
+        while (off >= cap) {
+            off -= cap;
+        }
+        return off;
+    }
+    
+    uint32_t _count;
+    uint32_t _cap;
+    uint32_t _start;
+    StorageOwnership _ownership;
+    void* _items;
+};
+
+}  // namespace base
+
+#endif  // BASE_BOUNDED_QUEUE_H

+ 40 - 0
base/containers/case_ignored_flat_map.cpp

@@ -0,0 +1,40 @@
+// Baidu RPC - A framework to host and access services throughout Baidu.
+// Copyright (c) 2016 Baidu.com, Inc. All Rights Reserved
+
+// Author: The baidu-rpc authors (pbrpc@baidu.com)
+// Date: Sun Dec  4 14:57:27 CST 2016
+
+namespace base {
+
+static const char g_tolower_map_base[] = {
+    -128, -127, -126, -125, -124, -123, -122, -121, -120,
+    -119, -118, -117, -116, -115, -114, -113, -112, -111, -110,
+    -109, -108, -107, -106, -105, -104, -103, -102, -101, -100,
+    -99, -98, -97, -96, -95, -94, -93, -92, -91, -90,
+    -89, -88, -87, -86, -85, -84, -83, -82, -81, -80,
+    -79, -78, -77, -76, -75, -74, -73, -72, -71, -70,
+    -69, -68, -67, -66, -65, -64, -63, -62, -61, -60,
+    -59, -58, -57, -56, -55, -54, -53, -52, -51, -50,
+    -49, -48, -47, -46, -45, -44, -43, -42, -41, -40,
+    -39, -38, -37, -36, -35, -34, -33, -32, -31, -30,
+    -29, -28, -27, -26, -25, -24, -23, -22, -21, -20,
+    -19, -18, -17, -16, -15, -14, -13, -12, -11, -10,
+    -9, -8, -7, -6, -5, -4, -3, -2, -1,
+    0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+    10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+    20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+    30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+    40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+    50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+    60, 61, 62, 63, 64, 'a', 'b', 'c', 'd', 'e',
+    'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
+    'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y',
+    'z', 91, 92, 93, 94, 95, 96, 97, 98, 99,
+    100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
+    110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
+    120, 121, 122, 123, 124, 125, 126, 127
+};
+
+extern const char* const g_tolower_map = g_tolower_map_base + 128;
+
+} // namespace base

+ 59 - 0
base/containers/case_ignored_flat_map.h

@@ -0,0 +1,59 @@
+// Baidu RPC - A framework to host and access services throughout Baidu.
+// Copyright (c) 2016 Baidu.com, Inc. All Rights Reserved
+
+// Author: The baidu-rpc authors (pbrpc@baidu.com)
+// Date: Sun Dec  4 14:57:27 CST 2016
+
+#ifndef BASE_CASE_IGNORED_FLAT_MAP_H
+#define BASE_CASE_IGNORED_FLAT_MAP_H
+
+#include "base/containers/flat_map.h"
+
+namespace base {
+
+// NOTE: Using ascii_tolower instead of ::tolower shortens 150ns in
+// FlatMapTest.perf_small_string_map (with -O2 added, -O0 by default)
+inline char ascii_tolower(char c) {
+    extern const char* const g_tolower_map;
+    return g_tolower_map[(int)c];
+}
+
+struct CaseIgnoredHasher {
+    size_t operator()(const std::string& s) const {
+        std::size_t result = 0;                                               
+        for (std::string::const_iterator i = s.begin(); i != s.end(); ++i) {
+            result = result * 101 + ascii_tolower(*i);
+        }
+        return result;
+    }
+    size_t operator()(const char* s) const {
+        std::size_t result = 0;                                               
+        for (; *s; ++s) {
+            result = result * 101 + ascii_tolower(*s);
+        }
+        return result;
+    }
+};
+
+struct CaseIgnoredEqual {
+    // NOTE: No overload for base::StringPiece. It needs strncasecmp
+    // which is much slower than strcasecmp in micro-benchmarking. As a
+    // result, methods in HttpHeader does not accept StringPiece as well.
+    bool operator()(const std::string& s1, const std::string& s2) const {
+        return s1.size() == s2.size() &&
+            strcasecmp(s1.c_str(), s2.c_str()) == 0;
+    }
+    bool operator()(const std::string& s1, const char* s2) const
+    { return strcasecmp(s1.c_str(), s2) == 0; }
+};
+
+template <typename T>
+class CaseIgnoredFlatMap : public base::FlatMap<
+    std::string, T, CaseIgnoredHasher, CaseIgnoredEqual> {};
+
+class CaseIgnoredFlatSet : public base::FlatMap<
+    std::string, CaseIgnoredHasher, CaseIgnoredEqual> {};
+
+} // namespace base
+
+#endif  // BASE_CASE_IGNORED_FLAT_MAP_H

+ 399 - 0
base/containers/doubly_buffered_data.h

@@ -0,0 +1,399 @@
+// Baidu RPC - A framework to host and access services throughout Baidu.
+// Copyright (c) 2014 Baidu.com, Inc. All Rights Reserved
+
+// Author: Ge,Jun (gejun@baidu.com)
+// Date: Mon Sep 22 22:23:13 CST 2014
+
+#ifndef BASE_DOUBLY_BUFFERED_DATA_H
+#define BASE_DOUBLY_BUFFERED_DATA_H
+
+#include <vector>                                       // std::vector
+#include <pthread.h>
+#include "base/scoped_lock.h"
+#include "base/thread_local.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/type_traits.h"
+#include "base/errno.h"
+
+namespace base {
+
+// This data structure makes Read() almost lock-free by making Modify()
+// *much* slower. It's very suitable for implementing LoadBalancers which
+// have a lot of concurrent read-only ops from many threads and occasional
+// modifications of data. As a side effect, this data structure can store
+// a thread-local data for user.
+//
+// Read(): begin with a thread-local mutex locked then read the foreground
+// instance which will not be changed before the mutex is unlocked. Since the
+// mutex is only locked by Modify() with an empty critical section, the
+// function is almost lock-free.
+//
+// Modify(): Modify background instance which is not used by any Read(), flip
+// foreground and background, lock thread-local mutexes one by one to make
+// sure all existing Read() finish and later Read() see new foreground,
+// then modify background(foreground before flip) again.
+
+class Void { };
+
+template <typename T, typename TLS = Void>
+class DoublyBufferedData {
+    class Wrapper;
+public:
+    class ScopedPtr {
+    friend class DoublyBufferedData;
+    public:
+        ScopedPtr() : _data(NULL), _w(NULL) {}
+        ~ScopedPtr() {
+            if (_w) {
+                _w->EndRead();
+            }
+        }
+        const T* get() const { return _data; }
+        const T& operator*() const { return *_data; }
+        const T* operator->() const { return _data; }
+        TLS& tls() { return _w->user_tls(); }
+        
+    private:
+        DISALLOW_COPY_AND_ASSIGN(ScopedPtr);
+        const T* _data;
+        Wrapper* _w;
+    };
+    
+    DoublyBufferedData();
+    ~DoublyBufferedData();
+
+    // Put foreground instance into ptr. The instance will not be changed until
+    // ptr is destructed.
+    // This function is not blocked by Read() and Modify() in other threads.
+    // Returns 0 on success, -1 otherwise.
+    int Read(ScopedPtr* ptr);
+
+    // Modify background and foreground instances. fn(T&, ...) will be called
+    // twice. Modify() from different threads are exclusive from each other.
+    // NOTE: Call same series of fn to different equivalent instances should
+    // result in equivalent instances, otherwise foreground and background
+    // instance will be inconsistent.
+    template <typename Fn> size_t Modify(Fn& fn);
+    template <typename Fn, typename Arg1> size_t Modify(Fn& fn, const Arg1&);
+    template <typename Fn, typename Arg1, typename Arg2>
+    size_t Modify(Fn& fn, const Arg1&, const Arg2&);
+
+    // fn(T& background, const T& foreground, ...) will be called to background
+    // and foreground instances respectively.
+    template <typename Fn> size_t ModifyWithForeground(Fn& fn);
+    template <typename Fn, typename Arg1>
+    size_t ModifyWithForeground(Fn& fn, const Arg1&);
+    template <typename Fn, typename Arg1, typename Arg2>
+    size_t ModifyWithForeground(Fn& fn, const Arg1&, const Arg2&);
+    
+private:
+    template <typename Fn>
+    struct WithFG0 {
+        WithFG0(Fn& fn, T* data) : _fn(fn), _data(data) { }
+        size_t operator()(T& bg) {
+            return _fn(bg, (const T&)_data[&bg == _data]);
+        }
+    private:
+        Fn& _fn;
+        T* _data;
+    };
+
+    template <typename Fn, typename Arg1>
+    struct WithFG1 {
+        WithFG1(Fn& fn, T* data, const Arg1& arg1)
+            : _fn(fn), _data(data), _arg1(arg1) {}
+        size_t operator()(T& bg) {
+            return _fn(bg, (const T&)_data[&bg == _data], _arg1);
+        }
+    private:
+        Fn& _fn;
+        T* _data;
+        const Arg1& _arg1;
+    };
+
+    template <typename Fn, typename Arg1, typename Arg2>
+    struct WithFG2 {
+        WithFG2(Fn& fn, T* data, const Arg1& arg1, const Arg2& arg2)
+            : _fn(fn), _data(data), _arg1(arg1), _arg2(arg2) {}
+        size_t operator()(T& bg) {
+            return _fn(bg, (const T&)_data[&bg == _data], _arg1, _arg2);
+        }
+    private:
+        Fn& _fn;
+        T* _data;
+        const Arg1& _arg1;
+        const Arg2& _arg2;
+    };
+
+    template <typename Fn, typename Arg1>
+    struct Closure1 {
+        Closure1(Fn& fn, const Arg1& arg1) : _fn(fn), _arg1(arg1) {}
+        size_t operator()(T& bg) { return _fn(bg, _arg1); }
+    private:
+        Fn& _fn;
+        const Arg1& _arg1;
+    };
+
+    template <typename Fn, typename Arg1, typename Arg2>
+    struct Closure2 {
+        Closure2(Fn& fn, const Arg1& arg1, const Arg2& arg2)
+            : _fn(fn), _arg1(arg1), _arg2(arg2) {}
+        size_t operator()(T& bg) { return _fn(bg, _arg1, _arg2); }
+    private:
+        Fn& _fn;
+        const Arg1& _arg1;
+        const Arg2& _arg2;
+    };
+
+    const T* UnsafeRead() const { return _data + _index; }
+    Wrapper* AddWrapper();
+    void RemoveWrapper(Wrapper*);
+
+    // Foreground and background void.
+    T _data[2];
+
+    // Index of foreground instance.
+    short _index;
+
+    // Key to access thread-local wrappers.
+    bool _created_key;
+    pthread_key_t _wrapper_key;
+
+    // All thread-local instances.
+    std::vector<Wrapper*> _wrappers;
+
+    // Sequence access to _wrappers.
+    pthread_mutex_t _wrappers_mutex;
+
+    // Sequence modifications.
+    pthread_mutex_t _modify_mutex;
+};
+
+static const pthread_key_t INVALID_PTHREAD_KEY = (pthread_key_t)-1;
+
+template <typename T, typename TLS>
+class DoublyBufferedDataWrapperBase {
+public:
+    TLS& user_tls() { return _user_tls; }
+protected:
+    TLS _user_tls;
+};
+
+template <typename T>
+class DoublyBufferedDataWrapperBase<T, Void> {
+};
+
+
+template <typename T, typename TLS>
+class DoublyBufferedData<T, TLS>::Wrapper
+    : public DoublyBufferedDataWrapperBase<T, TLS> {
+friend class DoublyBufferedData;
+public:
+    explicit Wrapper(DoublyBufferedData* c) : _control(c) {
+        pthread_mutex_init(&_mutex, NULL);
+    }
+    
+    ~Wrapper() {
+        if (_control != NULL) {
+            _control->RemoveWrapper(this);
+        }
+        pthread_mutex_destroy(&_mutex);
+    }
+
+    // _mutex will be locked by the calling pthread and DoublyBufferedData.
+    // Most of the time, no modifications are done, so the mutex is
+    // uncontended and fast.
+    inline void BeginRead() {
+        pthread_mutex_lock(&_mutex);
+    }
+
+    inline void EndRead() {
+        pthread_mutex_unlock(&_mutex);
+    }
+
+    inline void WaitReadDone() {
+        BAIDU_SCOPED_LOCK(_mutex);
+    }
+    
+private:
+    DoublyBufferedData* _control;
+    pthread_mutex_t _mutex;
+};
+
+// Called when thread initializes thread-local wrapper.
+template <typename T, typename TLS>
+typename DoublyBufferedData<T, TLS>::Wrapper*
+DoublyBufferedData<T, TLS>::AddWrapper() {
+    Wrapper* w = new (std::nothrow) Wrapper(this);
+    if (NULL == w) {
+        return NULL;
+    }
+    try {
+        BAIDU_SCOPED_LOCK(_wrappers_mutex);
+        _wrappers.push_back(w);
+    } catch (std::exception& e) {
+        return NULL;
+    }
+    return w;
+}
+
+// Called when thread quits.
+template <typename T, typename TLS>
+void DoublyBufferedData<T, TLS>::RemoveWrapper(
+    typename DoublyBufferedData<T, TLS>::Wrapper* w) {
+    if (NULL == w) {
+        return;
+    }
+    BAIDU_SCOPED_LOCK(_wrappers_mutex);
+    for (size_t i = 0; i < _wrappers.size(); ++i) {
+        if (_wrappers[i] == w) {
+            _wrappers[i] = _wrappers.back();
+            _wrappers.pop_back();
+            return;
+        }
+    }
+}
+
+template <typename T, typename TLS>
+DoublyBufferedData<T, TLS>::DoublyBufferedData()
+    : _index(0)
+    , _created_key(false)
+    , _wrapper_key(0) {
+    _wrappers.reserve(64);
+    pthread_mutex_init(&_modify_mutex, NULL);
+    pthread_mutex_init(&_wrappers_mutex, NULL);
+    const int rc = pthread_key_create(&_wrapper_key,
+                                      base::delete_object<Wrapper>);
+    if (rc != 0) {
+        LOG(FATAL) << "Fail to pthread_key_create: " << berror(rc);
+    } else {
+        _created_key = true;
+    }
+    // Initialize _data for some POD types. This is essential for pointer
+    // types because they should be Read() as NULL before any Modify().
+    if (is_integral<T>::value || is_floating_point<T>::value ||
+        is_pointer<T>::value || is_member_function_pointer<T>::value) {
+        _data[0] = T();
+        _data[1] = T();
+    }
+}
+
+template <typename T, typename TLS>
+DoublyBufferedData<T, TLS>::~DoublyBufferedData() {
+    // User is responsible for synchronizations between Read()/Modify() and
+    // this function.
+    if (_created_key) {
+        pthread_key_delete(_wrapper_key);
+    }
+    
+    {
+        BAIDU_SCOPED_LOCK(_wrappers_mutex);
+        for (size_t i = 0; i < _wrappers.size(); ++i) {
+            _wrappers[i]->_control = NULL;  // hack: disable removal.
+            delete _wrappers[i];
+        }
+        _wrappers.clear();
+    }
+    pthread_mutex_destroy(&_modify_mutex);
+    pthread_mutex_destroy(&_wrappers_mutex);
+}
+
+template <typename T, typename TLS>
+int DoublyBufferedData<T, TLS>::Read(
+    typename DoublyBufferedData<T, TLS>::ScopedPtr* ptr) {
+    if (BAIDU_UNLIKELY(!_created_key)) {
+        return -1;
+    }
+    Wrapper* w = static_cast<Wrapper*>(pthread_getspecific(_wrapper_key));
+    if (BAIDU_LIKELY(w != NULL)) {
+        w->BeginRead();
+        ptr->_data = UnsafeRead();
+        ptr->_w = w;
+        return 0;
+    }
+    w = AddWrapper();
+    if (BAIDU_LIKELY(w != NULL)) {
+        const int rc = pthread_setspecific(_wrapper_key, w);
+        if (rc == 0) {
+            w->BeginRead();
+            ptr->_data = UnsafeRead();
+            ptr->_w = w;
+            return 0;
+        }
+    }
+    return -1;
+}
+
+template <typename T, typename TLS>
+template <typename Fn>
+size_t DoublyBufferedData<T, TLS>::Modify(Fn& fn) {
+    // _modify_mutex sequences modifications. Using a separate mutex rather
+    // than _wrappers_mutex is to avoid blocking threads calling
+    // AddWrapper() or RemoveWrapper() too long. Most of the time, modifications
+    // are done by one thread, contention should be negligible.
+    BAIDU_SCOPED_LOCK(_modify_mutex);
+    // background instance is not accessed by other threads, being safe to
+    // modify.
+    const size_t ret = fn(_data[!_index]);
+    if (!ret) {
+        return 0;
+    }
+
+    // Publish, flip background and foreground.
+    _index = !_index;
+    
+    // Wait until all threads finishes current reading. When they begin next
+    // read, they should see updated _index.
+    {
+        BAIDU_SCOPED_LOCK(_wrappers_mutex);
+        for (size_t i = 0; i < _wrappers.size(); ++i) {
+            _wrappers[i]->WaitReadDone();
+        }
+    }
+
+    const size_t ret2 = fn(_data[!_index]);
+    CHECK_EQ(ret2, ret) << "index=" << _index;
+    return ret2;
+}
+
+template <typename T, typename TLS>
+template <typename Fn, typename Arg1>
+size_t DoublyBufferedData<T, TLS>::Modify(Fn& fn, const Arg1& arg1) {
+    Closure1<Fn, Arg1> c(fn, arg1);
+    return Modify(c);
+}
+
+template <typename T, typename TLS>
+template <typename Fn, typename Arg1, typename Arg2>
+size_t DoublyBufferedData<T, TLS>::Modify(
+    Fn& fn, const Arg1& arg1, const Arg2& arg2) {
+    Closure2<Fn, Arg1, Arg2> c(fn, arg1, arg2);
+    return Modify(c);
+}
+
+template <typename T, typename TLS>
+template <typename Fn>
+size_t DoublyBufferedData<T, TLS>::ModifyWithForeground(Fn& fn) {
+    WithFG0<Fn> c(fn, _data);
+    return Modify(c);
+}
+
+template <typename T, typename TLS>
+template <typename Fn, typename Arg1>
+size_t DoublyBufferedData<T, TLS>::ModifyWithForeground(Fn& fn, const Arg1& arg1) {
+    WithFG1<Fn, Arg1> c(fn, _data, arg1);
+    return Modify(c);
+}
+
+template <typename T, typename TLS>
+template <typename Fn, typename Arg1, typename Arg2>
+size_t DoublyBufferedData<T, TLS>::ModifyWithForeground(
+    Fn& fn, const Arg1& arg1, const Arg2& arg2) {
+    WithFG2<Fn, Arg1, Arg2> c(fn, _data, arg1, arg2);
+    return Modify(c);
+}
+
+}  // namespace base
+
+#endif  // BASE_DOUBLY_BUFFERED_DATA_H

+ 471 - 0
base/containers/flat_map.h

@@ -0,0 +1,471 @@
+// Copyright (c) 2013 Baidu.com, Inc. All Rights Reserved
+//
+// This closed addressing hash-map puts first linked node in bucket array
+// directly to save an extra memory indirection. As a result, this map yields
+// close performance to raw array on nearly all operations, probably being the
+// fastest hashmap for small-sized key/value ever.
+//
+// Performance comparisons between several maps:
+//  [ value = 8 bytes ]
+//  Sequentially inserting 100 into FlatMap/std::map/base::PooledMap/base::hash_map takes 11/108/55/58
+//  Sequentially erasing 100 from FlatMap/std::map/base::PooledMap/base::hash_map takes 7/123/55/37
+//  Sequentially inserting 1000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 10/92/55/54
+//  Sequentially erasing 1000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 6/67/51/35
+//  Sequentially inserting 10000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 10/100/66/54
+//  Sequentially erasing 10000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 6/72/55/35
+//  [ value = 32 bytes ]
+//  Sequentially inserting 100 into FlatMap/std::map/base::PooledMap/base::hash_map takes 14/108/56/56
+//  Sequentially erasing 100 from FlatMap/std::map/base::PooledMap/base::hash_map takes 6/77/53/38
+//  Sequentially inserting 1000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 14/94/54/53
+//  Sequentially erasing 1000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 4/66/49/36
+//  Sequentially inserting 10000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 13/106/62/54
+//  Sequentially erasing 10000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 4/69/53/36
+//  [ value = 128 bytes ]
+//  Sequentially inserting 100 into FlatMap/std::map/base::PooledMap/base::hash_map takes 31/182/96/96
+//  Sequentially erasing 100 from FlatMap/std::map/base::PooledMap/base::hash_map takes 8/117/51/44
+//  Sequentially inserting 1000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 29/191/100/97
+//  Sequentially erasing 1000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 6/100/49/44
+//  Sequentially inserting 10000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 30/184/113/114
+//  Sequentially erasing 10000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 6/99/52/43
+//  [ value = 8 bytes ]
+//  Randomly inserting 100 into FlatMap/std::map/base::PooledMap/base::hash_map takes 11/171/108/60
+//  Randomly erasing 100 from FlatMap/std::map/base::PooledMap/base::hash_map takes 8/158/126/37
+//  Randomly inserting 1000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 10/159/117/54
+//  Randomly erasing 1000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 6/153/135/36
+//  Randomly inserting 10000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 12/223/180/55
+//  Randomly erasing 10000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 7/237/210/48
+//  [ value = 32 bytes ]
+//  Randomly inserting 100 into FlatMap/std::map/base::PooledMap/base::hash_map takes 16/179/108/57
+//  Randomly erasing 100 from FlatMap/std::map/base::PooledMap/base::hash_map takes 5/157/120/38
+//  Randomly inserting 1000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 15/168/127/54
+//  Randomly erasing 1000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 5/164/135/39
+//  Randomly inserting 10000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 19/241/201/56
+//  Randomly erasing 10000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 5/235/218/54
+//  [ value = 128 bytes ]
+//  Randomly inserting 100 into FlatMap/std::map/base::PooledMap/base::hash_map takes 35/242/154/97
+//  Randomly erasing 100 from FlatMap/std::map/base::PooledMap/base::hash_map takes 7/185/119/56
+//  Randomly inserting 1000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 35/262/182/99
+//  Randomly erasing 1000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 6/215/157/66
+//  Randomly inserting 10000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 44/330/278/114
+//  Randomly erasing 10000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 6/307/242/90
+//  [ value = 8 bytes ]
+//  Seeking 100 from FlatMap/std::map/base::PooledMap/base::hash_map takes 6/51/52/13
+//  Seeking 1000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 4/98/82/14
+//  Seeking 10000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 4/175/170/14
+//  [ value = 32 bytes ]
+//  Seeking 100 from FlatMap/std::map/base::PooledMap/base::hash_map takes 3/52/52/14
+//  Seeking 1000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 3/84/82/13
+//  Seeking 10000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 3/164/156/14
+//  [ value = 128 bytes ]
+//  Seeking 100 from FlatMap/std::map/base::PooledMap/base::hash_map takes 3/54/53/14
+//  Seeking 1000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 4/88/90/13
+//  Seeking 10000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 4/178/185/14
+//  [ value = 8 bytes ]
+//  Seeking 100 from FlatMap/std::map/base::PooledMap/base::hash_map takes 5/51/49/14
+//  Seeking 1000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 4/86/94/14
+//  Seeking 10000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 4/177/171/14
+//  [ value = 32 bytes ]
+//  Seeking 100 from FlatMap/std::map/base::PooledMap/base::hash_map takes 3/51/53/14
+//  Seeking 1000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 3/98/82/13
+//  Seeking 10000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 3/163/156/14
+//  [ value = 128 bytes ]
+//  Seeking 100 from FlatMap/std::map/base::PooledMap/base::hash_map takes 3/55/53/14
+//  Seeking 1000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 4/88/89/13
+//  Seeking 10000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 4/177/185/14
+//
+// Author: Ge,Jun (gejun@baidu.com)
+// Date: Wed Nov 27 12:59:20 CST 2013
+
+#ifndef BASE_FLAT_MAP_H
+#define BASE_FLAT_MAP_H
+
+#include <stdint.h>
+#include <functional>
+#include <iostream>                               // std::ostream
+#include "base/type_traits.h"
+#include "base/logging.h"
+#include "base/find_cstr.h"
+#include "base/single_threaded_pool.h"            // SingleThreadedPool
+#include "base/containers/hash_tables.h"          // hash<>
+#include "base/bit_array.h"                       // bit_array_*
+#include "base/strings/string_piece.h"            // StringPiece
+
+namespace base {
+
+template <typename _Map, typename _Element> class FlatMapIterator;
+template <typename _Map, typename _Element> class SparseFlatMapIterator;
+template <typename K, typename T> struct FlatMapElement;
+struct FlatMapVoid {}; // Replace void which is not constructible.
+template <typename K> struct DefaultHasher;
+template <typename K> struct DefaultEqualTo;
+
+struct BucketInfo {
+    size_t longest_length;
+    double average_length;
+};
+
+// NOTE: Objects stored in FlatMap MUST be copyable.
+template <typename _K, typename _T,
+          // Compute hash code from key.
+          // Use public/murmurhash3 to make better distributions.
+          typename _Hash = DefaultHasher<_K>,
+          // Test equivalence between stored-key and passed-key.
+          // stored-key is always on LHS, passed-key is always on RHS.
+          typename _Equal = DefaultEqualTo<_K>,
+          bool _Sparse = false>
+class FlatMap {
+public:
+    typedef _K key_type;
+    typedef _T mapped_type;
+    typedef FlatMapElement<_K, _T> Element;
+    typedef typename Element::value_type value_type;
+    typedef typename conditional<
+        _Sparse, SparseFlatMapIterator<FlatMap, value_type>,
+        FlatMapIterator<FlatMap, value_type> >::type iterator;
+    typedef typename conditional<
+        _Sparse, SparseFlatMapIterator<FlatMap, const value_type>, 
+        FlatMapIterator<FlatMap, const value_type> >::type const_iterator;
+    typedef _Hash hasher;
+    typedef _Equal key_equal;
+    struct PositionHint {
+        size_t nbucket;
+        size_t offset;
+        bool at_entry;
+        key_type key;
+    };
+    
+    FlatMap(const hasher& hashfn = hasher(), const key_equal& eql = key_equal());
+    ~FlatMap();
+    FlatMap(const FlatMap& rhs);    
+    void operator=(const FlatMap& rhs);
+    void swap(FlatMap & rhs);
+
+    // Must be called to initialize this map, otherwise insert/operator[]
+    // crashes, and seek/erase fails.
+    // `nbucket' is the initial number of buckets. `load_factor' is the 
+    // maximum value of size()*100/nbucket, if the value is reached, nbucket
+    // will be doubled and all items stored will be rehashed which is costly.
+    // Choosing proper values for these 2 parameters reduces costs.
+    int init(size_t nbucket, u_int load_factor = 80);
+    
+    // Insert a pair of |key| and |value|. If size()*100/bucket_count() is 
+    // more than load_factor(), a resize() will be done.
+    // Returns address of the inserted value, NULL on error.
+    mapped_type* insert(const key_type& key, const mapped_type& value);
+
+    // Remove |key| and the associated value
+    // Returns: 1 on erased, 0 otherwise.
+    template <typename K2> size_t erase(const K2& key);
+    
+    // Remove all items. Allocated spaces are NOT returned by system.
+    void clear();
+
+    // Remove all items and return all allocated spaces to system.
+    void clear_and_reset_pool();
+        
+    // Search for the value associated with |key|
+    // Returns: address of the value
+    template <typename K2> mapped_type* seek(const K2& key) const;
+
+    // Get the value associated with |key|. If |key| does not exist,
+    // insert with a default-constructed value. If size()*100/bucket_count()
+    // is more than load_factor, a resize will be done.
+    // Returns reference of the value
+    mapped_type& operator[](const key_type& key);
+
+    // Resize this map. This is optional because resizing will be triggered by
+    // insert() or operator[] if there're too many items.
+    // Returns successful or not.
+    bool resize(size_t nbucket);
+    
+    // Iterators
+    iterator begin();
+    iterator end();
+    const_iterator begin() const;
+    const_iterator end() const;
+
+    // Iterate FlatMap inconsistently in more-than-one passes. This is used
+    // in multi-threaded environment to divide the critical sections of
+    // iterating big maps into smaller ones. "inconsistently" means that:
+    //  * elements added during iteration may be missed.
+    //  * some elements may be iterated more than once.
+    //  * iteration is restarted at beginning when the map is resized.
+    // Example: (copying all keys in multi-threaded environment)
+    //   LOCK;
+    //   size_t n = 0;
+    //   for (Map::const_iterator it = map.begin(); it != map.end(); ++it) {
+    //     if (++n >= 256/*max iterated one pass*/) {
+    //       Map::PositionHint hint;
+    //       map.save_iterator(it, &hint);
+    //       n = 0;
+    //       UNLOCK;
+    //       LOCK;
+    //       it = map.restore_iterator(hint);
+    //       if (it == map.begin()) { // resized
+    //         keys->clear();
+    //       }
+    //       if (it == map.end()) {
+    //         break;
+    //       }
+    //     }
+    //     keys->push_back(it->first);
+    //   }
+    //   UNLOCK;
+    void save_iterator(const const_iterator&, PositionHint*) const;
+    const_iterator restore_iterator(const PositionHint&) const;
+
+    // True if init() was successfully called.
+    bool initialized() const { return _buckets != NULL; }
+
+    bool empty() const { return _size == 0; }
+    size_t size() const { return _size; }
+    size_t bucket_count() const { return _nbucket; }
+    u_int load_factor () const { return _load_factor; }
+
+    // Returns #nodes of longest bucket in this map. This scans all buckets.
+    BucketInfo bucket_info() const;
+
+    struct Bucket {
+        explicit Bucket(const _K& k) : next(NULL)
+        { new (element_spaces) Element(k); }
+        Bucket(const Bucket& other) : next(NULL)
+        { new (element_spaces) Element(other.element()); }
+        bool is_valid() const { return next != (const Bucket*)-1UL; }
+        void set_invalid() { next = (Bucket*)-1UL; }
+        // NOTE: Only be called when in_valid() is true.
+        Element& element() {
+            void* spaces = element_spaces; // Suppress strict-aliasing
+            return *reinterpret_cast<Element*>(spaces);
+        }
+        const Element& element() const {
+            const void* spaces = element_spaces;
+            return *reinterpret_cast<const Element*>(spaces);
+        }
+        Bucket* next;
+        char element_spaces[sizeof(Element)];
+    };
+
+private:
+template <typename _Map, typename _Element> friend class FlatMapIterator;
+template <typename _Map, typename _Element> friend class FlatMapSparseIterator;
+    // True if buckets need to be resized before holding `size' elements.
+    inline bool is_too_crowded(size_t size) const
+    { return size * 100 >= _nbucket * _load_factor; }
+        
+    size_t _size;
+    size_t _nbucket;
+    Bucket* _buckets;
+    uint64_t* _thumbnail;
+    u_int _load_factor;
+    hasher _hashfn;
+    key_equal _eql;
+    SingleThreadedPool<sizeof(Bucket), 1024, 16> _pool;
+};
+
+template <typename _K,
+          typename _Hash = DefaultHasher<_K>,
+          typename _Equal = DefaultEqualTo<_K>,
+          bool _Sparse = false>
+class FlatSet {
+public:
+    typedef FlatMap<_K, FlatMapVoid, _Hash, _Equal, _Sparse> Map;
+    typedef typename Map::key_type key_type;
+    typedef typename Map::value_type value_type;
+    typedef typename Map::Bucket Bucket;
+    typedef typename Map::iterator iterator;
+    typedef typename Map::const_iterator const_iterator;
+    typedef typename Map::hasher hasher;
+    typedef typename Map::key_equal key_equal;
+    
+    FlatSet(const hasher& hashfn = hasher(), const key_equal& eql = key_equal())
+        : _map(hashfn, eql) {}
+    void swap(FlatSet & rhs) { _map.swap(rhs._map); }
+
+    int init(size_t nbucket, u_int load_factor = 80)
+    { return _map.init(nbucket, load_factor); }
+
+    const void* insert(const key_type& key)
+    { return _map.insert(key, FlatMapVoid()); }
+
+    template <typename K2>
+    size_t erase(const K2& key) { return _map.erase(key); }
+
+    void clear() { return _map.clear(); }
+    void clear_and_reset_pool() { return _map.clear_and_reset_pool(); }
+
+    template <typename K2>
+    const void* seek(const K2& key) const { return _map.seek(key); }
+
+    bool resize(size_t nbucket) { return _map.resize(nbucket); }
+    
+    iterator begin() { return _map.begin(); }
+    iterator end() { return _map.end(); }
+    const_iterator begin() const { return _map.begin(); }
+    const_iterator end() const { return _map.end(); }
+
+    bool initialized() const { return _map.initialized(); }
+    bool empty() const { return _map.empty(); }
+    size_t size() const { return _map.size(); }
+    size_t bucket_count() const { return _map.bucket_count(); }
+    u_int load_factor () const { return _map.load_factor(); }
+    BucketInfo bucket_info() const { return _map.bucket_info(); }
+
+private:
+    Map _map;
+};
+
+template <typename _K, typename _T,
+          typename _Hash = DefaultHasher<_K>,
+          typename _Equal = DefaultEqualTo<_K> >
+class SparseFlatMap : public FlatMap<_K, _T, _Hash, _Equal, true> {
+};
+
+template <typename _K,
+          typename _Hash = DefaultHasher<_K>,
+          typename _Equal = DefaultEqualTo<_K> >
+class SparseFlatSet : public FlatSet<_K, _Hash, _Equal, true> {
+};
+
+// Implement FlatMapElement
+template <typename K, typename T>
+class FlatMapElement {
+public:
+    typedef std::pair<const K, T> value_type;
+    // NOTE: Have to initialize _value in this way which is treated by GCC
+    // specially that _value is zeroized(POD) or constructed(non-POD). Other
+    // methods do not work. For example, if we put _value into the std::pair
+    // and do initialization by calling _pair(k, T()), _value will be copy
+    // constructed from the defaultly constructed instance(not zeroized for
+    // POD) which is wrong generally.
+    explicit FlatMapElement(const K& k) : _key(k), _value(T()) {}
+    //                                             ^^^^^^^^^^^
+    const K& first_ref() const { return _key; }
+    T& second_ref() { return _value; }
+    value_type& value_ref() { return *reinterpret_cast<value_type*>(this); }
+    inline static const K& first_ref_from_value(const value_type& v)
+    { return v.first; }
+    inline static const T& second_ref_from_value(const value_type& v)
+    { return v.second; }
+private:
+    const K _key;
+    T _value;
+};
+
+template <typename K>
+class FlatMapElement<K, FlatMapVoid> {
+public:
+    typedef const K value_type;
+    explicit FlatMapElement(const K& k) : _key(k) {}
+    const K& first_ref() const { return _key; }
+    FlatMapVoid& second_ref() { return second_ref_from_value(_key); }
+    value_type& value_ref() { return _key; }
+    inline static const K& first_ref_from_value(value_type& v) { return v; }
+    inline static FlatMapVoid& second_ref_from_value(value_type&) {
+        static FlatMapVoid dummy;
+        return dummy;
+    }
+private:
+    K _key;
+};
+
+// Implement DefaultHasher and DefaultEqualTo
+template <typename K>
+struct DefaultHasher : public BASE_HASH_NAMESPACE::hash<K> {
+};
+
+template <>
+struct DefaultHasher<std::string> {
+    std::size_t operator()(const base::StringPiece& s) const {
+        std::size_t result = 0;
+        for (base::StringPiece::const_iterator
+                 i = s.begin(); i != s.end(); ++i) {
+            result = result * 101 + *i;
+        }
+        return result;
+    }
+    std::size_t operator()(const char* s) const {
+        std::size_t result = 0;
+        for (; *s; ++s) {
+            result = result * 101 + *s;
+        }
+        return result;
+    }
+    std::size_t operator()(const std::string& s) const {
+        std::size_t result = 0;
+        for (std::string::const_iterator i = s.begin(); i != s.end(); ++i) {
+            result = result * 101 + *i;
+        }
+        return result;        
+    }
+};
+
+template <typename K>
+struct DefaultEqualTo : public std::equal_to<K> {
+};
+
+template <>
+struct DefaultEqualTo<std::string> {
+    bool operator()(const std::string& s1, const std::string& s2) const
+    { return s1 == s2; }
+    bool operator()(const std::string& s1, const base::StringPiece& s2) const
+    { return s1 == s2; }
+    bool operator()(const std::string& s1, const char* s2) const
+    { return s1 == s2; }
+};
+
+// find_cstr and find_lowered_cstr
+template <typename _T, typename _Hash, typename _Equal, bool _Sparse>
+const _T* find_cstr(const FlatMap<std::string, _T, _Hash, _Equal, _Sparse>& m,
+                    const char* key) {
+    return m.seek(key);
+}
+
+template <typename _T, typename _Hash, typename _Equal, bool _Sparse>
+_T* find_cstr(FlatMap<std::string, _T, _Hash, _Equal, _Sparse>& m,
+              const char* key) {
+    return m.seek(key);
+}
+
+template <typename _T, typename _Hash, typename _Equal, bool _Sparse>
+const _T* find_cstr(const FlatMap<std::string, _T, _Hash, _Equal, _Sparse>& m,
+                    const char* key, size_t length) {
+    return m.seek(base::StringPiece(key, length));
+}
+
+template <typename _T, typename _Hash, typename _Equal, bool _Sparse>
+_T* find_cstr(FlatMap<std::string, _T, _Hash, _Equal, _Sparse>& m,
+              const char* key, size_t length) {
+    return m.seek(base::StringPiece(key, length));
+}
+
+template <typename _T, typename _Hash, typename _Equal, bool _Sparse>
+const _T* find_lowered_cstr(
+    const FlatMap<std::string, _T, _Hash, _Equal, _Sparse>& m,
+    const char* key) {
+    return m.seek(*tls_stringmap_temp.get_lowered_string(key));
+}
+
+template <typename _T, typename _Hash, typename _Equal, bool _Sparse>
+_T* find_lowered_cstr(FlatMap<std::string, _T, _Hash, _Equal, _Sparse>& m,
+                      const char* key) {
+    return m.seek(*tls_stringmap_temp.get_lowered_string(key));
+}
+
+template <typename _T, typename _Hash, typename _Equal, bool _Sparse>
+const _T* find_lowered_cstr(
+    const FlatMap<std::string, _T, _Hash, _Equal, _Sparse>& m,
+    const char* key, size_t length) {
+    return m.seek(*tls_stringmap_temp.get_lowered_string(key, length));
+}
+
+template <typename _T, typename _Hash, typename _Equal, bool _Sparse>
+_T* find_lowered_cstr(FlatMap<std::string, _T, _Hash, _Equal, _Sparse>& m,
+                      const char* key, size_t length) {
+    return m.seek(*tls_stringmap_temp.get_lowered_string(key, length));
+}
+
+}  // namespace baidu
+
+#include "base/containers/flat_map_inl.h"
+
+#endif  //BASE_FLAT_MAP_H

+ 639 - 0
base/containers/flat_map_inl.h

@@ -0,0 +1,639 @@
+// Copyright (c) 2013 Baidu.com, Inc. All Rights Reserved
+//
+// Implement flat_map.h
+// 
+// Author: Ge,Jun (gejun@baidu.com)
+// Date: Wed Nov 27 12:59:20 CST 2013
+//
+#ifndef BASE_FLAT_MAP_INL_H
+#define BASE_FLAT_MAP_INL_H
+
+namespace base {
+
+inline uint32_t find_next_prime(uint32_t nbucket) {
+    static const unsigned long prime_list[] = {
+        29ul, 
+        53ul,         97ul,         193ul,       389ul,       769ul,
+        1543ul,       3079ul,       6151ul,      12289ul,     24593ul,
+        49157ul,      98317ul,      196613ul,    393241ul,    786433ul,
+        1572869ul,    3145739ul,    6291469ul,   12582917ul,  25165843ul,
+        50331653ul,   100663319ul,  201326611ul, 402653189ul, 805306457ul,
+        1610612741ul, 3221225473ul, 4294967291ul
+    };
+    const size_t nprimes = sizeof(prime_list) / sizeof(prime_list[0]);
+    for (size_t i = 0; i < nprimes; i++) {
+        if (nbucket <= prime_list[i]) {
+            return prime_list[i];
+        }
+    }
+    return nbucket;
+}
+
+inline uint64_t find_power2(uint64_t b) {
+    b -= 1;
+    b |= (b >> 1);
+    b |= (b >> 2);
+    b |= (b >> 4);
+    b |= (b >> 8);
+    b |= (b >> 16);
+    b |= (b >> 32);
+    return b + 1;
+}
+
+// Using next prime is slower for 10ns on average (due to %). If quality of
+// the hash code is good enough, primeness of nbucket is not important. We
+// choose to trust the hash code (or user should use a better hash algorithm
+// when the collisions are significant) and still stick to round-to-power-2
+// solution right now.
+inline size_t flatmap_round(size_t nbucket) {
+#ifdef FLAT_MAP_ROUND_BUCKET_BY_USE_NEXT_PRIME    
+    return find_next_prime(nbucket);
+#else
+    return find_power2(nbucket);
+#endif
+}
+
+inline size_t flatmap_mod(size_t hash_code, size_t nbucket) {
+#ifdef FLAT_MAP_ROUND_BUCKET_BY_USE_NEXT_PRIME
+    return hash_code % nbucket;
+#else
+    return hash_code & (nbucket - 1);
+#endif
+}
+
+// Iterate FlatMap
+template <typename Map, typename Value> class FlatMapIterator {
+public:
+    typedef Value value_type;
+    typedef Value& reference;
+    typedef Value* pointer;
+    typedef typename add_const<Value>::type ConstValue;
+    typedef ConstValue& const_reference;
+    typedef ConstValue* const_pointer;
+    typedef std::forward_iterator_tag iterator_category;
+    typedef ptrdiff_t difference_type;
+    typedef typename remove_const<Value>::type NonConstValue;
+    
+    FlatMapIterator() : _node(NULL), _entry(NULL) {}    
+    FlatMapIterator(const Map* map, size_t pos) {
+        if (map->initialized()) {
+            _entry = map->_buckets + pos;
+            find_and_set_valid_node();
+        } else {
+            _node = NULL;
+            _entry = NULL;
+        }
+    }
+    FlatMapIterator(const FlatMapIterator<Map, NonConstValue>& rhs)
+        : _node(rhs._node), _entry(rhs._entry) {}
+    ~FlatMapIterator() {}  // required by style-checker
+    
+    // *this == rhs
+    bool operator==(const FlatMapIterator& rhs) const
+    { return _node == rhs._node; }
+
+    // *this != rhs
+    bool operator!=(const FlatMapIterator& rhs) const
+    { return _node != rhs._node; }
+        
+    // ++ it
+    FlatMapIterator& operator++() {
+        if (NULL == _node->next) {
+            ++_entry;
+            find_and_set_valid_node();
+        } else {
+            _node = _node->next;
+        }
+        return *this;
+    }
+
+    // it ++
+    FlatMapIterator operator++(int) {
+        FlatMapIterator tmp = *this;
+        this->operator++();
+        return tmp;
+    }
+
+    reference operator*() { return _node->element().value_ref(); }
+    pointer operator->() { return &_node->element().value_ref(); }
+    const_reference operator*() const { return _node->element().value_ref(); }
+    const_pointer operator->() const { return &_node->element().value_ref(); }
+
+private:
+friend class FlatMapIterator<Map, ConstValue>;
+friend class FlatMap<typename Map::key_type, typename Map::mapped_type,
+                     typename Map::hasher, typename Map::key_equal>;
+
+    void find_and_set_valid_node() {
+        for (; !_entry->is_valid(); ++_entry);
+        _node = _entry;
+    }
+  
+    typename Map::Bucket* _node;
+    typename Map::Bucket* _entry;
+};
+
+// Iterate SparseFlatMap
+template <typename Map, typename Value> class SparseFlatMapIterator {
+public:
+    typedef Value value_type;
+    typedef Value& reference;
+    typedef Value* pointer;
+    typedef typename add_const<Value>::type ConstValue;
+    typedef ConstValue& const_reference;
+    typedef ConstValue* const_pointer;
+    typedef std::forward_iterator_tag iterator_category;
+    typedef ptrdiff_t difference_type;
+    typedef typename remove_const<Value>::type NonConstValue;
+    
+    SparseFlatMapIterator() : _node(NULL), _pos(0), _map(NULL) {}
+    SparseFlatMapIterator(const Map* map, size_t pos) {
+        if (map->initialized()) {
+            _map = map;
+            _pos = pos;
+            find_and_set_valid_node();
+        } else {
+            _node = NULL;
+            _map = NULL;
+            _pos = 0;
+        }
+    }
+    SparseFlatMapIterator(const SparseFlatMapIterator<Map, NonConstValue>& rhs)
+        : _node(rhs._node), _pos(rhs._pos), _map(rhs._map)
+    {}
+    ~SparseFlatMapIterator() {}  // required by style-checker
+    
+    // *this == rhs
+    bool operator==(const SparseFlatMapIterator& rhs) const
+    { return _node == rhs._node; }
+
+    // *this != rhs
+    bool operator!=(const SparseFlatMapIterator& rhs) const
+    { return _node != rhs._node; }
+        
+    // ++ it
+    SparseFlatMapIterator& operator++() {
+        if (NULL == _node->next) {
+            ++_pos;
+            find_and_set_valid_node();
+        } else {
+            _node = _node->next;
+        }
+        return *this;
+    }
+
+    // it ++
+    SparseFlatMapIterator operator++(int) {
+        SparseFlatMapIterator tmp = *this;
+        this->operator++();
+        return tmp;
+    }
+
+    reference operator*() { return _node->element().value_ref(); }
+    pointer operator->() { return &_node->element().value_ref(); }
+    const_reference operator*() const { return _node->element().value_ref(); }
+    const_pointer operator->() const { return &_node->element().value_ref(); }
+
+private:
+friend class SparseFlatMapIterator<Map, ConstValue>;
+    
+    void find_and_set_valid_node() {
+        if (!_map->_buckets[_pos].is_valid()) {
+            _pos = bit_array_first1(_map->_thumbnail, _pos + 1, _map->_nbucket);
+        }
+        _node = _map->_buckets + _pos;
+    }
+
+    typename Map::Bucket* _node;
+    size_t _pos;
+    const Map* _map;
+};
+ 
+
+template <typename _K, typename _T, typename _H, typename _E, bool _S>
+FlatMap<_K, _T, _H, _E, _S>::FlatMap(const hasher& hashfn, const key_equal& eql)
+    : _size(0)
+    , _nbucket(0)
+    , _buckets(NULL)
+    , _thumbnail(NULL)
+    , _load_factor(0)
+    , _hashfn(hashfn)
+    , _eql(eql)
+{}
+
+template <typename _K, typename _T, typename _H, typename _E, bool _S>
+FlatMap<_K, _T, _H, _E, _S>::~FlatMap() {
+    clear();
+    free(_buckets);
+    _buckets = NULL;
+    free(_thumbnail);
+    _thumbnail = NULL;
+    _nbucket = 0;
+    _load_factor = 0;
+}
+
+template <typename _K, typename _T, typename _H, typename _E, bool _S>
+FlatMap<_K, _T, _H, _E, _S>::FlatMap(const FlatMap& rhs)
+    : _size(0)
+    , _nbucket(0)
+    , _buckets(NULL)
+    , _thumbnail(NULL)
+    , _load_factor(rhs._load_factor)
+    , _hashfn(rhs._hashfn)
+    , _eql(rhs._eql) {
+    operator=(rhs);
+}
+
+template <typename _K, typename _T, typename _H, typename _E, bool _S>
+void
+FlatMap<_K, _T, _H, _E, _S>::operator=(const FlatMap<_K, _T, _H, _E, _S>& rhs) {
+    if (this == &rhs) {
+        return;
+    }
+    // NOTE: assignment does not change _load_factor/_hashfn/_eql if |this| is
+    // initialized
+    clear();
+    if (rhs.empty()) {
+        return;
+    }
+    if (!initialized()) {
+        _load_factor = rhs._load_factor;
+    }
+    if (_buckets == NULL || is_too_crowded(rhs._size)) {
+        free(_buckets);
+        _nbucket = rhs._nbucket;
+        // note: need an extra bucket to let iterator know where buckets end
+        _buckets = (Bucket*)malloc(sizeof(Bucket) * (_nbucket + 1/*note*/));
+        if (NULL == _buckets) {
+            LOG(ERROR) << "Fail to new _buckets";
+            return;
+        }
+        if (_S) {
+            free(_thumbnail);
+            _thumbnail = bit_array_malloc(_nbucket);
+            if (NULL == _thumbnail) {
+                LOG(ERROR) << "Fail to new _thumbnail";
+                return;
+            }
+            bit_array_clear(_thumbnail, _nbucket);
+        }
+    }
+    if (_nbucket == rhs._nbucket) {
+        // For equivalent _nbucket, walking through _buckets instead of using
+        // iterators is more efficient.
+        for (size_t i = 0; i < rhs._nbucket; ++i) {
+            if (!rhs._buckets[i].is_valid()) {
+                _buckets[i].set_invalid();
+            } else {
+                if (_S) {
+                    bit_array_set(_thumbnail, i);
+                }
+                new (&_buckets[i]) Bucket(rhs._buckets[i]);
+                Bucket* p1 = &_buckets[i];
+                Bucket* p2 = rhs._buckets[i].next;
+                while (p2) {
+                    p1->next = new (_pool.get()) Bucket(*p2);
+                    p1 = p1->next;
+                    p2 = p2->next;
+                }
+            }
+        }
+        _buckets[rhs._nbucket].next = NULL;
+        _size = rhs._size;
+    } else {
+        for (const_iterator it = rhs.begin(); it != rhs.end(); ++it) {
+            operator[](Element::first_ref_from_value(*it)) = 
+                Element::second_ref_from_value(*it);
+        }
+    }
+}
+
+template <typename _K, typename _T, typename _H, typename _E, bool _S>
+int FlatMap<_K, _T, _H, _E, _S>::init(size_t nbucket, u_int load_factor) {
+    if (initialized()) {
+        LOG(ERROR) << "Already initialized";
+        return -1;
+    }
+    if (load_factor < 10 || load_factor > 100) {
+        LOG(ERROR) << "Invalid load_factor=" << load_factor;
+        return -1;
+    }
+    _size = 0;
+    _nbucket = flatmap_round(nbucket);
+    _load_factor = load_factor;
+                                
+    _buckets = (Bucket*)malloc(sizeof(Bucket) * (_nbucket + 1));
+    if (NULL == _buckets) {
+        LOG(ERROR) << "Fail to new _buckets";
+        return -1;
+    }
+    for (size_t i = 0; i < _nbucket; ++i) {
+        _buckets[i].set_invalid();
+    }
+    _buckets[_nbucket].next = NULL;
+
+    if (_S) {
+        _thumbnail = bit_array_malloc(_nbucket);
+        if (NULL == _thumbnail) {
+            LOG(ERROR) << "Fail to new _thumbnail";
+            return -1;
+        }
+        bit_array_clear(_thumbnail, _nbucket);
+    }
+    return 0;
+}
+
+template <typename _K, typename _T, typename _H, typename _E, bool _S>
+void FlatMap<_K, _T, _H, _E, _S>::swap(FlatMap<_K, _T, _H, _E, _S> & rhs) {
+    std::swap(rhs._size, _size);
+    std::swap(rhs._nbucket, _nbucket);
+    std::swap(rhs._buckets, _buckets);
+    std::swap(rhs._thumbnail, _thumbnail);
+    std::swap(rhs._load_factor, _load_factor);
+    std::swap(rhs._hashfn, _hashfn);
+    std::swap(rhs._eql, _eql);
+    rhs._pool.swap(_pool);
+}
+
+template <typename _K, typename _T, typename _H, typename _E, bool _S>
+_T* FlatMap<_K, _T, _H, _E, _S>::insert(const key_type& key,
+                                        const mapped_type& value) {
+    mapped_type *p = &operator[](key);
+    *p = value;
+    return p;
+}
+
+template <typename _K, typename _T, typename _H, typename _E, bool _S>
+template <typename K2>
+size_t FlatMap<_K, _T, _H, _E, _S>::erase(const K2& key) {
+    if (!initialized()) {
+        return 0;
+    }
+    // TODO: Do we need auto collapsing here?
+    const size_t index = flatmap_mod(_hashfn(key), _nbucket);
+    Bucket& first_node = _buckets[index];
+    if (!first_node.is_valid()) {
+        return 0;
+    }
+    if (_eql(first_node.element().first_ref(), key)) {
+        if (first_node.next == NULL) {
+            first_node.element().~Element();
+            first_node.set_invalid();
+            if (_S) {
+                bit_array_unset(_thumbnail, index);
+            }
+        } else {
+            // A seemingly correct solution is to copy the memory of *p to
+            // first_node directly like this:
+            //   first_node.element().~Element();
+            //   first_node = *p;
+            // It works at most of the time, but is wrong generally.
+            // If _T references self inside like this:
+            //   Value {
+            //     Value() : num(0), num_ptr(&num) {}
+            //     int num;
+            //     int* num_ptr;
+            //   };
+            // After copying, num_ptr will be invalid.
+            // Calling operator= is the price that we have to pay.
+            Bucket* p = first_node.next;
+            first_node.next = p->next;
+            const_cast<_K&>(first_node.element().first_ref()) =
+                p->element().first_ref();
+            first_node.element().second_ref() = p->element().second_ref();
+            p->element().~Element();
+            _pool.back(p);
+        }
+        --_size;
+        return 1UL;
+    }
+    Bucket *p = first_node.next;
+    Bucket *last_p = &first_node;
+    while (p) {
+        if (_eql(p->element().first_ref(), key)) {
+            last_p->next = p->next;
+            p->element().~Element();
+            _pool.back(p);
+            --_size;
+            return 1UL;
+        }
+        last_p = p;
+        p = p->next;
+    }
+    return 0;
+}
+
+template <typename _K, typename _T, typename _H, typename _E, bool _S>
+void FlatMap<_K, _T, _H, _E, _S>::clear() {
+    if (0 == _size) {
+        return;
+    }
+    _size = 0;
+    if (NULL != _buckets) {
+        for (size_t i = 0; i < _nbucket; ++i) {
+            Bucket& first_node = _buckets[i];
+            if (first_node.is_valid()) {
+                first_node.element().~Element();
+                Bucket* p = first_node.next;
+                while (p) {
+                    Bucket* next_p = p->next;
+                    p->element().~Element();
+                    _pool.back(p);
+                    p = next_p;
+                }
+                first_node.set_invalid();
+            }
+        }
+    }
+    if (NULL != _thumbnail) {
+        bit_array_clear(_thumbnail, _nbucket);
+    }
+}
+
+template <typename _K, typename _T, typename _H, typename _E, bool _S>
+void FlatMap<_K, _T, _H, _E, _S>::clear_and_reset_pool() {
+    clear();
+    _pool.reset();
+}
+
+template <typename _K, typename _T, typename _H, typename _E, bool _S>
+template <typename K2>
+_T* FlatMap<_K, _T, _H, _E, _S>::seek(const K2& key) const {
+    if (!initialized()) {
+        return NULL;
+    }
+    Bucket& first_node = _buckets[flatmap_mod(_hashfn(key), _nbucket)];
+    if (!first_node.is_valid()) {
+        return NULL;
+    }
+    if (_eql(first_node.element().first_ref(), key)) {
+        return &first_node.element().second_ref();
+    }
+    Bucket *p = first_node.next;
+    while (p) {
+        if (_eql(p->element().first_ref(), key)) {
+            return &p->element().second_ref();
+        }
+        p = p->next;
+    }
+    return NULL;
+}
+
+template <typename _K, typename _T, typename _H, typename _E, bool _S>
+_T& FlatMap<_K, _T, _H, _E, _S>::operator[](const key_type& key) {
+    const size_t index = flatmap_mod(_hashfn(key), _nbucket);
+    Bucket& first_node = _buckets[index];
+    if (!first_node.is_valid()) {
+        ++_size;
+        if (_S) {
+            bit_array_set(_thumbnail, index);
+        }
+        new (&first_node) Bucket(key);
+        return first_node.element().second_ref();
+    }
+    if (_eql(first_node.element().first_ref(), key)) {
+        return first_node.element().second_ref();
+    }
+    Bucket *p = first_node.next;
+    if (NULL == p) {
+        if (is_too_crowded(_size)) {
+            if (resize(_nbucket + 1)) {
+                return operator[](key);
+            }
+            // fail to resize is OK
+        }
+        ++_size;
+        Bucket* newp = new (_pool.get()) Bucket(key);
+        first_node.next = newp;
+        return newp->element().second_ref();
+    }
+    while (1) {
+        if (_eql(p->element().first_ref(), key)) {
+            return p->element().second_ref();
+        }
+        if (NULL == p->next) {
+            if (is_too_crowded(_size)) {
+                if (resize(_nbucket + 1)) {
+                    return operator[](key);
+                }
+                // fail to resize is OK
+            }
+            ++_size;
+            Bucket* newp = new (_pool.get()) Bucket(key);
+            p->next = newp;
+            return newp->element().second_ref();
+        }
+        p = p->next;
+    }
+}
+
+template <typename _K, typename _T, typename _H, typename _E, bool _S>
+void FlatMap<_K, _T, _H, _E, _S>::save_iterator(
+    const const_iterator& it, PositionHint* hint) const {
+    hint->nbucket = _nbucket;
+    hint->offset = it._entry - _buckets;
+    if (it != end()) {
+        hint->at_entry = (it._entry == it._node);
+        hint->key = it->first;
+    } else {
+        hint->at_entry = false;
+        hint->key = key_type();
+    }
+}
+
+template <typename _K, typename _T, typename _H, typename _E, bool _S>
+typename FlatMap<_K, _T, _H, _E, _S>::const_iterator
+FlatMap<_K, _T, _H, _E, _S>::restore_iterator(const PositionHint& hint) const {
+    if (hint.nbucket != _nbucket/*resized*/ ||
+        hint.offset >= _nbucket/*invalid hint*/) {
+        return begin();  // restart
+    }
+    Bucket& first_node = _buckets[hint.offset];
+    if (hint.at_entry) {
+        return const_iterator(this, hint.offset);
+    }
+    if (!first_node.is_valid()) {
+        // All elements hashed to the entry were removed, try next entry.
+        return const_iterator(this, hint.offset + 1);
+    }
+    Bucket *p = &first_node;
+    do {
+        if (_eql(p->element().first_ref(), hint.key)) {
+            const_iterator it;
+            it._node = p;
+            it._entry = &first_node;
+            return it;
+        }
+        p = p->next;
+    } while (p);
+    // Last element that we iterated (and saved in PositionHint) was removed,
+    // don't know which element to start, just restart at the beginning of
+    // the entry. Some elements in the entry may be revisited, which
+    // shouldn't be often.
+    return const_iterator(this, hint.offset);
+}
+
+template <typename _K, typename _T, typename _H, typename _E, bool _S>
+bool FlatMap<_K, _T, _H, _E, _S>::resize(size_t nbucket2) {
+    nbucket2 = flatmap_round(nbucket2);
+    if (_nbucket == nbucket2) {
+        return false;
+    }
+
+    FlatMap new_map;
+    if (new_map.init(nbucket2, _load_factor) != 0) {
+        LOG(ERROR) << "Fail to init new_map, nbucket=" << nbucket2;
+        return false;
+    }
+    for (iterator it = begin(); it != end(); ++it) {
+        new_map[Element::first_ref_from_value(*it)] = 
+            Element::second_ref_from_value(*it);
+    }
+    new_map.swap(*this);
+    return true;
+}
+
+template <typename _K, typename _T, typename _H, typename _E, bool _S>
+BucketInfo FlatMap<_K, _T, _H, _E, _S>::bucket_info() const {
+    size_t max_n = 0;
+    size_t nentry = 0;
+    for (size_t i = 0; i < _nbucket; ++i) {
+        if (_buckets[i].is_valid()) {
+            size_t n = 1;
+            for (Bucket* p = _buckets[i].next; p; p = p->next, ++n);
+            max_n = std::max(max_n, n);
+            ++nentry;
+        }
+    }
+    const BucketInfo info = { max_n, size() / (double)nentry };
+    return info;
+}
+
+inline std::ostream& operator<<(std::ostream& os, const BucketInfo& info) {
+    return os << "{maxb=" << info.longest_length
+              << " avgb=" << info.average_length << '}';
+}
+
+template <typename _K, typename _T, typename _H, typename _E, bool _S>
+typename FlatMap<_K, _T, _H, _E, _S>::iterator FlatMap<_K, _T, _H, _E, _S>::begin() {
+    return iterator(this, 0);
+}
+
+template <typename _K, typename _T, typename _H, typename _E, bool _S>
+typename FlatMap<_K, _T, _H, _E, _S>::iterator FlatMap<_K, _T, _H, _E, _S>::end() {
+    return iterator(this, _nbucket);
+}
+
+template <typename _K, typename _T, typename _H, typename _E, bool _S>
+typename FlatMap<_K, _T, _H, _E, _S>::const_iterator FlatMap<_K, _T, _H, _E, _S>::begin() const {
+    return const_iterator(this, 0);
+}
+
+template <typename _K, typename _T, typename _H, typename _E, bool _S>
+typename FlatMap<_K, _T, _H, _E, _S>::const_iterator FlatMap<_K, _T, _H, _E, _S>::end() const {
+    return const_iterator(this, _nbucket);
+}
+
+}  // namespace base
+
+#endif  //BASE_FLAT_MAP_INL_H

+ 277 - 0
base/containers/hash_tables.h

@@ -0,0 +1,277 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+//
+// Deal with the differences between Microsoft and GNU implemenations
+// of hash_map. Allows all platforms to use |base::hash_map| and
+// |base::hash_set|.
+//  eg:
+//   base::hash_map<int> my_map;
+//   base::hash_set<int> my_set;
+//
+// NOTE: It is an explicit non-goal of this class to provide a generic hash
+// function for pointers.  If you want to hash a pointers to a particular class,
+// please define the template specialization elsewhere (for example, in its
+// header file) and keep it specific to just pointers to that class.  This is
+// because identity hashes are not desirable for all types that might show up
+// in containers as pointers.
+
+#ifndef BASE_CONTAINERS_HASH_TABLES_H_
+#define BASE_CONTAINERS_HASH_TABLES_H_
+
+#include <utility>
+
+#include "base/basictypes.h"
+#include "base/strings/string16.h"
+#include "base/build_config.h"
+
+#if defined(COMPILER_MSVC)
+#include <hash_map>
+#include <hash_set>
+
+#define BASE_HASH_NAMESPACE stdext
+
+#elif defined(COMPILER_GCC)
+#if defined(OS_ANDROID)
+#define BASE_HASH_NAMESPACE std
+#else
+#define BASE_HASH_NAMESPACE __gnu_cxx
+#endif
+
+// This is a hack to disable the gcc 4.4 warning about hash_map and hash_set
+// being deprecated.  We can get rid of this when we upgrade to VS2008 and we
+// can use <tr1/unordered_map> and <tr1/unordered_set>.
+#ifdef __DEPRECATED
+#define CHROME_OLD__DEPRECATED __DEPRECATED
+#undef __DEPRECATED
+#endif
+
+#if defined(OS_ANDROID)
+#include <hash_map>
+#include <hash_set>
+#else
+#include <ext/hash_map>
+#include <ext/hash_set>
+#endif
+
+#include <string>
+
+#ifdef CHROME_OLD__DEPRECATED
+#define __DEPRECATED CHROME_OLD__DEPRECATED
+#undef CHROME_OLD__DEPRECATED
+#endif
+
+namespace BASE_HASH_NAMESPACE {
+
+#if !defined(OS_ANDROID)
+// The GNU C++ library provides identity hash functions for many integral types,
+// but not for |long long|.  This hash function will truncate if |size_t| is
+// narrower than |long long|.  This is probably good enough for what we will
+// use it for.
+
+#define DEFINE_TRIVIAL_HASH(integral_type) \
+    template<> \
+    struct hash<integral_type> { \
+      std::size_t operator()(integral_type value) const { \
+        return static_cast<std::size_t>(value); \
+      } \
+    }
+
+DEFINE_TRIVIAL_HASH(long long);
+DEFINE_TRIVIAL_HASH(unsigned long long);
+
+#undef DEFINE_TRIVIAL_HASH
+#endif  // !defined(OS_ANDROID)
+
+// Implement string hash functions so that strings of various flavors can
+// be used as keys in STL maps and sets.  The hash algorithm comes from the
+// GNU C++ library, in <tr1/functional>.  It is duplicated here because GCC
+// versions prior to 4.3.2 are unable to compile <tr1/functional> when RTTI
+// is disabled, as it is in our build.
+
+#define DEFINE_STRING_HASH(string_type) \
+    template<> \
+    struct hash<string_type> { \
+      std::size_t operator()(const string_type& s) const { \
+        std::size_t result = 0; \
+        for (string_type::const_iterator i = s.begin(); i != s.end(); ++i) \
+          result = (result * 131) + *i; \
+        return result; \
+      } \
+    }
+
+DEFINE_STRING_HASH(std::string);
+DEFINE_STRING_HASH(base::string16);
+
+#undef DEFINE_STRING_HASH
+
+}  // namespace BASE_HASH_NAMESPACE
+
+#else  // COMPILER
+#error define BASE_HASH_NAMESPACE for your compiler
+#endif  // COMPILER
+
+namespace base {
+using BASE_HASH_NAMESPACE::hash_map;
+using BASE_HASH_NAMESPACE::hash_multimap;
+using BASE_HASH_NAMESPACE::hash_multiset;
+using BASE_HASH_NAMESPACE::hash_set;
+
+// Implement hashing for pairs of at-most 32 bit integer values.
+// When size_t is 32 bits, we turn the 64-bit hash code into 32 bits by using
+// multiply-add hashing. This algorithm, as described in
+// Theorem 4.3.3 of the thesis "Über die Komplexität der Multiplikation in
+// eingeschränkten Branchingprogrammmodellen" by Woelfel, is:
+//
+//   h32(x32, y32) = (h64(x32, y32) * rand_odd64 + rand16 * 2^16) % 2^64 / 2^32
+//
+// Contact danakj@chromium.org for any questions.
+inline std::size_t HashInts32(uint32_t value1, uint32_t value2) {
+  uint64_t value1_64 = value1;
+  uint64_t hash64 = (value1_64 << 32) | value2;
+
+  if (sizeof(std::size_t) >= sizeof(uint64_t))
+    return static_cast<std::size_t>(hash64);
+
+  uint64_t odd_random = 481046412LL << 32 | 1025306955LL;
+  uint32_t shift_random = 10121U << 16;
+
+  hash64 = hash64 * odd_random + shift_random;
+  std::size_t high_bits = static_cast<std::size_t>(
+      hash64 >> (8 * (sizeof(uint64_t) - sizeof(std::size_t))));
+  return high_bits;
+}
+
+// Implement hashing for pairs of up-to 64-bit integer values.
+// We use the compound integer hash method to produce a 64-bit hash code, by
+// breaking the two 64-bit inputs into 4 32-bit values:
+// http://opendatastructures.org/versions/edition-0.1d/ods-java/node33.html#SECTION00832000000000000000
+// Then we reduce our result to 32 bits if required, similar to above.
+inline std::size_t HashInts64(uint64_t value1, uint64_t value2) {
+  uint32_t short_random1 = 842304669U;
+  uint32_t short_random2 = 619063811U;
+  uint32_t short_random3 = 937041849U;
+  uint32_t short_random4 = 3309708029U;
+
+  uint32_t value1a = static_cast<uint32_t>(value1 & 0xffffffff);
+  uint32_t value1b = static_cast<uint32_t>((value1 >> 32) & 0xffffffff);
+  uint32_t value2a = static_cast<uint32_t>(value2 & 0xffffffff);
+  uint32_t value2b = static_cast<uint32_t>((value2 >> 32) & 0xffffffff);
+
+  uint64_t product1 = static_cast<uint64_t>(value1a) * short_random1;
+  uint64_t product2 = static_cast<uint64_t>(value1b) * short_random2;
+  uint64_t product3 = static_cast<uint64_t>(value2a) * short_random3;
+  uint64_t product4 = static_cast<uint64_t>(value2b) * short_random4;
+
+  uint64_t hash64 = product1 + product2 + product3 + product4;
+
+  if (sizeof(std::size_t) >= sizeof(uint64_t))
+    return static_cast<std::size_t>(hash64);
+
+  uint64_t odd_random = 1578233944LL << 32 | 194370989LL;
+  uint32_t shift_random = 20591U << 16;
+
+  hash64 = hash64 * odd_random + shift_random;
+  std::size_t high_bits = static_cast<std::size_t>(
+      hash64 >> (8 * (sizeof(uint64_t) - sizeof(std::size_t))));
+  return high_bits;
+}
+
+#define DEFINE_32BIT_PAIR_HASH(Type1, Type2) \
+inline std::size_t HashPair(Type1 value1, Type2 value2) { \
+  return HashInts32(value1, value2); \
+}
+
+DEFINE_32BIT_PAIR_HASH(int16_t, int16_t);
+DEFINE_32BIT_PAIR_HASH(int16_t, uint16_t);
+DEFINE_32BIT_PAIR_HASH(int16_t, int32_t);
+DEFINE_32BIT_PAIR_HASH(int16_t, uint32_t);
+DEFINE_32BIT_PAIR_HASH(uint16_t, int16_t);
+DEFINE_32BIT_PAIR_HASH(uint16_t, uint16_t);
+DEFINE_32BIT_PAIR_HASH(uint16_t, int32_t);
+DEFINE_32BIT_PAIR_HASH(uint16_t, uint32_t);
+DEFINE_32BIT_PAIR_HASH(int32_t, int16_t);
+DEFINE_32BIT_PAIR_HASH(int32_t, uint16_t);
+DEFINE_32BIT_PAIR_HASH(int32_t, int32_t);
+DEFINE_32BIT_PAIR_HASH(int32_t, uint32_t);
+DEFINE_32BIT_PAIR_HASH(uint32_t, int16_t);
+DEFINE_32BIT_PAIR_HASH(uint32_t, uint16_t);
+DEFINE_32BIT_PAIR_HASH(uint32_t, int32_t);
+DEFINE_32BIT_PAIR_HASH(uint32_t, uint32_t);
+
+#undef DEFINE_32BIT_PAIR_HASH
+
+#define DEFINE_64BIT_PAIR_HASH(Type1, Type2) \
+inline std::size_t HashPair(Type1 value1, Type2 value2) { \
+  return HashInts64(value1, value2); \
+}
+
+DEFINE_64BIT_PAIR_HASH(int16_t, int64_t);
+DEFINE_64BIT_PAIR_HASH(int16_t, uint64_t);
+DEFINE_64BIT_PAIR_HASH(uint16_t, int64_t);
+DEFINE_64BIT_PAIR_HASH(uint16_t, uint64_t);
+DEFINE_64BIT_PAIR_HASH(int32_t, int64_t);
+DEFINE_64BIT_PAIR_HASH(int32_t, uint64_t);
+DEFINE_64BIT_PAIR_HASH(uint32_t, int64_t);
+DEFINE_64BIT_PAIR_HASH(uint32_t, uint64_t);
+DEFINE_64BIT_PAIR_HASH(int64_t, int16_t);
+DEFINE_64BIT_PAIR_HASH(int64_t, uint16_t);
+DEFINE_64BIT_PAIR_HASH(int64_t, int32_t);
+DEFINE_64BIT_PAIR_HASH(int64_t, uint32_t);
+DEFINE_64BIT_PAIR_HASH(int64_t, int64_t);
+DEFINE_64BIT_PAIR_HASH(int64_t, uint64_t);
+DEFINE_64BIT_PAIR_HASH(uint64_t, int16_t);
+DEFINE_64BIT_PAIR_HASH(uint64_t, uint16_t);
+DEFINE_64BIT_PAIR_HASH(uint64_t, int32_t);
+DEFINE_64BIT_PAIR_HASH(uint64_t, uint32_t);
+DEFINE_64BIT_PAIR_HASH(uint64_t, int64_t);
+DEFINE_64BIT_PAIR_HASH(uint64_t, uint64_t);
+
+#undef DEFINE_64BIT_PAIR_HASH
+}  // namespace base
+
+namespace BASE_HASH_NAMESPACE {
+
+// Implement methods for hashing a pair of integers, so they can be used as
+// keys in STL containers.
+
+// NOTE(gejun): Specialize ptr as well which is supposed to work with 
+// containers by default
+
+#if defined(COMPILER_MSVC)
+
+template<typename Type1, typename Type2>
+inline std::size_t hash_value(const std::pair<Type1, Type2>& value) {
+  return base::HashPair(value.first, value.second);
+}
+template<typename Type>
+inline std::size_t hash_value(Type* ptr) {
+  return (uintptr_t)ptr;
+}
+
+#elif defined(COMPILER_GCC)
+template<typename Type1, typename Type2>
+struct hash<std::pair<Type1, Type2> > {
+  std::size_t operator()(std::pair<Type1, Type2> value) const {
+    return base::HashPair(value.first, value.second);
+  }
+};
+template<typename Type>
+struct hash<Type*> {
+  std::size_t operator()(Type* ptr) const {
+    return (uintptr_t)ptr;
+  }
+};
+
+#else
+#error define hash<std::pair<Type1, Type2> > for your compiler
+#endif  // COMPILER
+
+}
+
+#undef DEFINE_PAIR_HASH_FUNCTION_START
+#undef DEFINE_PAIR_HASH_FUNCTION_END
+
+#endif  // BASE_CONTAINERS_HASH_TABLES_H_

+ 196 - 0
base/containers/linked_list.h

@@ -0,0 +1,196 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_LINKED_LIST_H_
+#define BASE_CONTAINERS_LINKED_LIST_H_
+
+#include "base/macros.h"
+
+// Simple LinkedList type. (See the Q&A section to understand how this
+// differs from std::list).
+//
+// To use, start by declaring the class which will be contained in the linked
+// list, as extending LinkNode (this gives it next/previous pointers).
+//
+//   class MyNodeType : public LinkNode<MyNodeType> {
+//     ...
+//   };
+//
+// Next, to keep track of the list's head/tail, use a LinkedList instance:
+//
+//   LinkedList<MyNodeType> list;
+//
+// To add elements to the list, use any of LinkedList::Append,
+// LinkNode::InsertBefore, or LinkNode::InsertAfter:
+//
+//   LinkNode<MyNodeType>* n1 = ...;
+//   LinkNode<MyNodeType>* n2 = ...;
+//   LinkNode<MyNodeType>* n3 = ...;
+//
+//   list.Append(n1);
+//   list.Append(n3);
+//   n3->InsertBefore(n3);
+//
+// Lastly, to iterate through the linked list forwards:
+//
+//   for (LinkNode<MyNodeType>* node = list.head();
+//        node != list.end();
+//        node = node->next()) {
+//     MyNodeType* value = node->value();
+//     ...
+//   }
+//
+// Or to iterate the linked list backwards:
+//
+//   for (LinkNode<MyNodeType>* node = list.tail();
+//        node != list.end();
+//        node = node->previous()) {
+//     MyNodeType* value = node->value();
+//     ...
+//   }
+//
+// Questions and Answers:
+//
+// Q. Should I use std::list or base::LinkedList?
+//
+// A. The main reason to use base::LinkedList over std::list is
+//    performance. If you don't care about the performance differences
+//    then use an STL container, as it makes for better code readability.
+//
+//    Comparing the performance of base::LinkedList<T> to std::list<T*>:
+//
+//    * Erasing an element of type T* from base::LinkedList<T> is
+//      an O(1) operation. Whereas for std::list<T*> it is O(n).
+//      That is because with std::list<T*> you must obtain an
+//      iterator to the T* element before you can call erase(iterator).
+//
+//    * Insertion operations with base::LinkedList<T> never require
+//      heap allocations.
+//
+// Q. How does base::LinkedList implementation differ from std::list?
+//
+// A. Doubly-linked lists are made up of nodes that contain "next" and
+//    "previous" pointers that reference other nodes in the list.
+//
+//    With base::LinkedList<T>, the type being inserted already reserves
+//    space for the "next" and "previous" pointers (base::LinkNode<T>*).
+//    Whereas with std::list<T> the type can be anything, so the implementation
+//    needs to glue on the "next" and "previous" pointers using
+//    some internal node type.
+
+namespace base {
+
+template <typename T>
+class LinkNode {
+ public:
+  // LinkNode are self-referential as default.
+  LinkNode() : previous_(this), next_(this) {}
+    
+  LinkNode(LinkNode<T>* previous, LinkNode<T>* next)
+      : previous_(previous), next_(next) {}
+
+  // Insert |this| into the linked list, before |e|.
+  void InsertBefore(LinkNode<T>* e) {
+    this->next_ = e;
+    this->previous_ = e->previous_;
+    e->previous_->next_ = this;
+    e->previous_ = this;
+  }
+
+  // Insert |this| as a circular linked list into the linked list, before |e|.
+  void InsertBeforeAsList(LinkNode<T>* e) {
+    LinkNode<T>* prev = this->previous_;
+    prev->next_ = e;
+    this->previous_ = e->previous_;
+    e->previous_->next_ = this;
+    e->previous_ = prev;
+  }
+    
+  // Insert |this| into the linked list, after |e|.
+  void InsertAfter(LinkNode<T>* e) {
+    this->next_ = e->next_;
+    this->previous_ = e;
+    e->next_->previous_ = this;
+    e->next_ = this;
+  }
+
+  // Insert |this| as a circular list into the linked list, after |e|.
+  void InsertAfterAsList(LinkNode<T>* e) {
+    LinkNode<T>* prev = this->previous_;
+    prev->next_ = e->next_;
+    this->previous_ = e;
+    e->next_->previous_ = prev;
+    e->next_ = this;
+  }
+
+  // Remove |this| from the linked list.
+  void RemoveFromList() {
+    this->previous_->next_ = this->next_;
+    this->next_->previous_ = this->previous_;
+    // next() and previous() return non-NULL if and only this node is not in any
+    // list.
+    this->next_ = this;
+    this->previous_ = this;
+  }
+
+  LinkNode<T>* previous() const {
+    return previous_;
+  }
+
+  LinkNode<T>* next() const {
+    return next_;
+  }
+
+  // Cast from the node-type to the value type.
+  const T* value() const {
+    return static_cast<const T*>(this);
+  }
+
+  T* value() {
+    return static_cast<T*>(this);
+  }
+
+ private:
+  LinkNode<T>* previous_;
+  LinkNode<T>* next_;
+
+  DISALLOW_COPY_AND_ASSIGN(LinkNode);
+};
+
+template <typename T>
+class LinkedList {
+ public:
+  // The "root" node is self-referential, and forms the basis of a circular
+  // list (root_.next() will point back to the start of the list,
+  // and root_->previous() wraps around to the end of the list).
+  LinkedList() {}
+
+  // Appends |e| to the end of the linked list.
+  void Append(LinkNode<T>* e) {
+    e->InsertBefore(&root_);
+  }
+
+  LinkNode<T>* head() const {
+    return root_.next();
+  }
+
+  LinkNode<T>* tail() const {
+    return root_.previous();
+  }
+
+  const LinkNode<T>* end() const {
+    return &root_;
+  }
+
+  bool empty() const { return head() == end(); }
+
+ private:
+  LinkNode<T> root_;
+
+  DISALLOW_COPY_AND_ASSIGN(LinkedList);
+};
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_LINKED_LIST_H_

+ 310 - 0
base/containers/mru_cache.h

@@ -0,0 +1,310 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains a template for a Most Recently Used cache that allows
+// constant-time access to items using a key, but easy identification of the
+// least-recently-used items for removal.  Each key can only be associated with
+// one payload item at a time.
+//
+// The key object will be stored twice, so it should support efficient copying.
+//
+// NOTE: While all operations are O(1), this code is written for
+// legibility rather than optimality. If future profiling identifies this as
+// a bottleneck, there is room for smaller values of 1 in the O(1). :]
+
+#ifndef BASE_CONTAINERS_MRU_CACHE_H_
+#define BASE_CONTAINERS_MRU_CACHE_H_
+
+#include <list>
+#include <map>
+#include <utility>
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/logging.h"
+
+namespace base {
+
+// MRUCacheBase ----------------------------------------------------------------
+
+// This template is used to standardize map type containers that can be used
+// by MRUCacheBase. This level of indirection is necessary because of the way
+// that template template params and default template params interact.
+template <class KeyType, class ValueType>
+struct MRUCacheStandardMap {
+  typedef std::map<KeyType, ValueType> Type;
+};
+
+// Base class for the MRU cache specializations defined below.
+// The deletor will get called on all payloads that are being removed or
+// replaced.
+template <class KeyType, class PayloadType, class DeletorType,
+          template <typename, typename> class MapType = MRUCacheStandardMap>
+class MRUCacheBase {
+ public:
+  // The payload of the list. This maintains a copy of the key so we can
+  // efficiently delete things given an element of the list.
+  typedef std::pair<KeyType, PayloadType> value_type;
+
+ private:
+  typedef std::list<value_type> PayloadList;
+  typedef typename MapType<KeyType,
+                           typename PayloadList::iterator>::Type KeyIndex;
+
+ public:
+  typedef typename PayloadList::size_type size_type;
+
+  typedef typename PayloadList::iterator iterator;
+  typedef typename PayloadList::const_iterator const_iterator;
+  typedef typename PayloadList::reverse_iterator reverse_iterator;
+  typedef typename PayloadList::const_reverse_iterator const_reverse_iterator;
+
+  enum { NO_AUTO_EVICT = 0 };
+
+  // The max_size is the size at which the cache will prune its members to when
+  // a new item is inserted. If the caller wants to manager this itself (for
+  // example, maybe it has special work to do when something is evicted), it
+  // can pass NO_AUTO_EVICT to not restrict the cache size.
+  explicit MRUCacheBase(size_type max_size) : max_size_(max_size) {
+  }
+
+  MRUCacheBase(size_type max_size, const DeletorType& deletor)
+      : max_size_(max_size), deletor_(deletor) {
+  }
+
+  virtual ~MRUCacheBase() {
+    iterator i = begin();
+    while (i != end())
+      i = Erase(i);
+  }
+
+  size_type max_size() const { return max_size_; }
+
+  // Inserts a payload item with the given key. If an existing item has
+  // the same key, it is removed prior to insertion. An iterator indicating the
+  // inserted item will be returned (this will always be the front of the list).
+  //
+  // The payload will be copied. In the case of an OwningMRUCache, this function
+  // will take ownership of the pointer.
+  iterator Put(const KeyType& key, const PayloadType& payload) {
+    // Remove any existing payload with that key.
+    typename KeyIndex::iterator index_iter = index_.find(key);
+    if (index_iter != index_.end()) {
+      // Erase the reference to it. This will call the deletor on the removed
+      // element. The index reference will be replaced in the code below.
+      Erase(index_iter->second);
+    } else if (max_size_ != NO_AUTO_EVICT) {
+      // New item is being inserted which might make it larger than the maximum
+      // size: kick the oldest thing out if necessary.
+      ShrinkToSize(max_size_ - 1);
+    }
+
+    ordering_.push_front(value_type(key, payload));
+    index_.insert(std::make_pair(key, ordering_.begin()));
+    return ordering_.begin();
+  }
+
+  // Retrieves the contents of the given key, or end() if not found. This method
+  // has the side effect of moving the requested item to the front of the
+  // recency list.
+  //
+  // TODO(brettw) We may want a const version of this function in the future.
+  iterator Get(const KeyType& key) {
+    typename KeyIndex::iterator index_iter = index_.find(key);
+    if (index_iter == index_.end())
+      return end();
+    typename PayloadList::iterator iter = index_iter->second;
+
+    // Move the touched item to the front of the recency ordering.
+    ordering_.splice(ordering_.begin(), ordering_, iter);
+    return ordering_.begin();
+  }
+
+  // Retrieves the payload associated with a given key and returns it via
+  // result without affecting the ordering (unlike Get).
+  iterator Peek(const KeyType& key) {
+    typename KeyIndex::const_iterator index_iter = index_.find(key);
+    if (index_iter == index_.end())
+      return end();
+    return index_iter->second;
+  }
+
+  const_iterator Peek(const KeyType& key) const {
+    typename KeyIndex::const_iterator index_iter = index_.find(key);
+    if (index_iter == index_.end())
+      return end();
+    return index_iter->second;
+  }
+
+  // Erases the item referenced by the given iterator. An iterator to the item
+  // following it will be returned. The iterator must be valid.
+  iterator Erase(iterator pos) {
+    deletor_(pos->second);
+    index_.erase(pos->first);
+    return ordering_.erase(pos);
+  }
+
+  // MRUCache entries are often processed in reverse order, so we add this
+  // convenience function (not typically defined by STL containers).
+  reverse_iterator Erase(reverse_iterator pos) {
+    // We have to actually give it the incremented iterator to delete, since
+    // the forward iterator that base() returns is actually one past the item
+    // being iterated over.
+    return reverse_iterator(Erase((++pos).base()));
+  }
+
+  // Shrinks the cache so it only holds |new_size| items. If |new_size| is
+  // bigger or equal to the current number of items, this will do nothing.
+  void ShrinkToSize(size_type new_size) {
+    for (size_type i = size(); i > new_size; i--)
+      Erase(rbegin());
+  }
+
+  // Deletes everything from the cache.
+  void Clear() {
+    for (typename PayloadList::iterator i(ordering_.begin());
+         i != ordering_.end(); ++i)
+      deletor_(i->second);
+    index_.clear();
+    ordering_.clear();
+  }
+
+  // Returns the number of elements in the cache.
+  size_type size() const {
+    // We don't use ordering_.size() for the return value because
+    // (as a linked list) it can be O(n).
+    DCHECK(index_.size() == ordering_.size());
+    return index_.size();
+  }
+
+  // Allows iteration over the list. Forward iteration starts with the most
+  // recent item and works backwards.
+  //
+  // Note that since these iterators are actually iterators over a list, you
+  // can keep them as you insert or delete things (as long as you don't delete
+  // the one you are pointing to) and they will still be valid.
+  iterator begin() { return ordering_.begin(); }
+  const_iterator begin() const { return ordering_.begin(); }
+  iterator end() { return ordering_.end(); }
+  const_iterator end() const { return ordering_.end(); }
+
+  reverse_iterator rbegin() { return ordering_.rbegin(); }
+  const_reverse_iterator rbegin() const { return ordering_.rbegin(); }
+  reverse_iterator rend() { return ordering_.rend(); }
+  const_reverse_iterator rend() const { return ordering_.rend(); }
+
+  bool empty() const { return ordering_.empty(); }
+
+ private:
+  PayloadList ordering_;
+  KeyIndex index_;
+
+  size_type max_size_;
+
+  DeletorType deletor_;
+
+  DISALLOW_COPY_AND_ASSIGN(MRUCacheBase);
+};
+
+// MRUCache --------------------------------------------------------------------
+
+// A functor that does nothing. Used by the MRUCache.
+template<class PayloadType>
+class MRUCacheNullDeletor {
+ public:
+  void operator()(PayloadType& /*payload*/) {
+  }
+};
+
+// A container that does not do anything to free its data. Use this when storing
+// value types (as opposed to pointers) in the list.
+template <class KeyType, class PayloadType>
+class MRUCache : public MRUCacheBase<KeyType,
+                                     PayloadType,
+                                     MRUCacheNullDeletor<PayloadType> > {
+ private:
+  typedef MRUCacheBase<KeyType, PayloadType,
+      MRUCacheNullDeletor<PayloadType> > ParentType;
+
+ public:
+  // See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
+  explicit MRUCache(typename ParentType::size_type max_size)
+      : ParentType(max_size) {
+  }
+  virtual ~MRUCache() {
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MRUCache);
+};
+
+// OwningMRUCache --------------------------------------------------------------
+
+template<class PayloadType>
+class MRUCachePointerDeletor {
+ public:
+  void operator()(PayloadType& payload) {
+    delete payload;
+  }
+};
+
+// A cache that owns the payload type, which must be a non-const pointer type.
+// The pointers will be deleted when they are removed, replaced, or when the
+// cache is destroyed.
+template <class KeyType, class PayloadType>
+class OwningMRUCache
+    : public MRUCacheBase<KeyType,
+                          PayloadType,
+                          MRUCachePointerDeletor<PayloadType> > {
+ private:
+  typedef MRUCacheBase<KeyType, PayloadType,
+      MRUCachePointerDeletor<PayloadType> > ParentType;
+
+ public:
+  // See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
+  explicit OwningMRUCache(typename ParentType::size_type max_size)
+      : ParentType(max_size) {
+  }
+  virtual ~OwningMRUCache() {
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(OwningMRUCache);
+};
+
+// HashingMRUCache ------------------------------------------------------------
+
+template <class KeyType, class ValueType>
+struct MRUCacheHashMap {
+  typedef base::hash_map<KeyType, ValueType> Type;
+};
+
+// This class is similar to MRUCache, except that it uses base::hash_map as
+// the map type instead of std::map. Note that your KeyType must be hashable
+// to use this cache.
+template <class KeyType, class PayloadType>
+class HashingMRUCache : public MRUCacheBase<KeyType,
+                                            PayloadType,
+                                            MRUCacheNullDeletor<PayloadType>,
+                                            MRUCacheHashMap> {
+ private:
+  typedef MRUCacheBase<KeyType, PayloadType,
+                       MRUCacheNullDeletor<PayloadType>,
+                       MRUCacheHashMap> ParentType;
+
+ public:
+  // See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
+  explicit HashingMRUCache(typename ParentType::size_type max_size)
+      : ParentType(max_size) {
+  }
+  virtual ~HashingMRUCache() {
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(HashingMRUCache);
+};
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_MRU_CACHE_H_

+ 182 - 0
base/containers/pooled_map.h

@@ -0,0 +1,182 @@
+// Copyright (c) 2016 Baidu.com, Inc. All Rights Reserved
+//
+// A drop-in replacement for std::map to improve insert/erase performance slightly
+// 
+// Author: The baidu-rpc authors (pbrpc@baidu.com)
+// Date: Sat Dec  3 13:11:32 CST 2016
+
+#ifndef BASE_POOLED_MAP_H
+#define BASE_POOLED_MAP_H
+
+#include "base/single_threaded_pool.h"
+#include <new>
+#include <map>
+
+namespace base {
+namespace details {
+template <class T1, size_t BLOCK_SIZE> class PooledAllocator;
+}
+
+// When do use PooledMap?
+//   A std::map with 10~100 elements. insert/erase performance will be slightly
+//   improved. Performance of find() is unaffected.
+// When do NOT use PooledMap?
+//   When the std::map has less that 10 elements, PooledMap is probably slower
+//   because it allocates BLOCK_SIZE memory at least. When the std::map has more than
+//   100 elements, you should use base::FlatMap instead.
+
+// insert/erase comparisons between several maps:
+// [ value = 8 bytes ]
+// Sequentially inserting 100 into FlatMap/std::map/base::PooledMap/base::hash_map takes 15/114/54/60
+// Sequentially erasing 100 from FlatMap/std::map/base::PooledMap/base::hash_map takes 6/123/56/37
+// Sequentially inserting 1000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 9/92/56/54
+// Sequentially erasing 1000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 3/68/51/35
+// Sequentially inserting 10000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 10/99/63/54
+// Sequentially erasing 10000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 4/73/54/35
+// [ value = 32 bytes ]
+// Sequentially inserting 100 into FlatMap/std::map/base::PooledMap/base::hash_map takes 14/107/57/57
+// Sequentially erasing 100 from FlatMap/std::map/base::PooledMap/base::hash_map takes 5/75/53/37
+// Sequentially inserting 1000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 13/94/55/53
+// Sequentially erasing 1000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 4/67/50/37
+// Sequentially inserting 10000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 13/102/63/54
+// Sequentially erasing 10000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 4/69/53/36
+// [ value = 128 bytes ]
+// Sequentially inserting 100 into FlatMap/std::map/base::PooledMap/base::hash_map takes 35/160/96/98
+// Sequentially erasing 100 from FlatMap/std::map/base::PooledMap/base::hash_map takes 7/96/53/42
+// Sequentially inserting 1000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 30/159/98/98
+// Sequentially erasing 1000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 6/82/49/43
+// Sequentially inserting 10000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 29/155/114/116
+// Sequentially erasing 10000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 6/81/53/43
+
+// [ value = 8 bytes ]
+// Randomly inserting 100 into FlatMap/std::map/base::PooledMap/base::hash_map takes 13/168/103/59
+// Randomly erasing 100 from FlatMap/std::map/base::PooledMap/base::hash_map takes 6/159/125/37
+// Randomly inserting 1000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 10/157/115/54
+// Randomly erasing 1000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 4/175/138/36
+// Randomly inserting 10000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 11/219/177/56
+// Randomly erasing 10000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 4/229/207/47
+// [ value = 32 bytes ]
+// Randomly inserting 100 into FlatMap/std::map/base::PooledMap/base::hash_map takes 17/178/112/57
+// Randomly erasing 100 from FlatMap/std::map/base::PooledMap/base::hash_map takes 6/149/117/38
+// Randomly inserting 1000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 15/169/135/54
+// Randomly erasing 1000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 5/157/129/39
+// Randomly inserting 10000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 19/242/203/55
+// Randomly erasing 10000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 5/233/218/54
+// [ value = 128 bytes ]
+// Randomly inserting 100 into FlatMap/std::map/base::PooledMap/base::hash_map takes 36/214/145/96
+// Randomly erasing 100 from FlatMap/std::map/base::PooledMap/base::hash_map takes 7/166/122/53
+// Randomly inserting 1000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 36/230/174/100
+// Randomly erasing 1000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 6/193/153/65
+// Randomly inserting 10000 into FlatMap/std::map/base::PooledMap/base::hash_map takes 45/304/270/115
+// Randomly erasing 10000 from FlatMap/std::map/base::PooledMap/base::hash_map takes 7/299/246/88
+
+template <typename K, typename V, size_t BLOCK_SIZE = 512,
+          typename C = std::less<K> >
+class PooledMap
+    : public std::map<K, V, C, details::PooledAllocator<int, BLOCK_SIZE> > {
+    
+};
+
+namespace details {
+// Specialize for void
+template <size_t BLOCK_SIZE>
+class PooledAllocator<void, BLOCK_SIZE> {
+public:
+    typedef void * pointer;
+    typedef const void* const_pointer;
+    typedef void value_type;
+    template <class U1> struct rebind {
+        typedef PooledAllocator<U1, BLOCK_SIZE> other;
+    };
+};
+
+template <class T1, size_t BLOCK_SIZE>
+class PooledAllocator {
+public:
+    typedef T1 value_type;
+    typedef size_t size_type;
+    typedef ptrdiff_t difference_type;
+    typedef T1* pointer;
+    typedef const T1* const_pointer;
+    typedef T1& reference;
+    typedef const T1& const_reference;
+    template <class U1> struct rebind {
+        typedef PooledAllocator<U1, BLOCK_SIZE> other;
+    };
+    
+public:
+    PooledAllocator() {}
+    PooledAllocator(const PooledAllocator&) {}
+    template <typename U1, size_t BS2>
+    PooledAllocator(const PooledAllocator<U1, BS2>&) {}
+    void operator=(const PooledAllocator&) {}
+    template <typename U1, size_t BS2>
+    void operator=(const PooledAllocator<U1, BS2>&) {}
+
+    void swap(PooledAllocator& other) { _pool.swap(other._pool); }
+
+    // Convert references to pointers.
+    pointer address(reference r) const { return &r; };
+    const_pointer address(const_reference r) const { return &r; };
+
+    // Allocate storage for n values of T1.
+    pointer allocate(size_type n, PooledAllocator<void, 0>::const_pointer = 0) {
+        if (n == 1) {
+            return (pointer)_pool.get();
+        } else {
+            return (pointer)malloc(n * sizeof(T1));
+        }
+    };
+
+    // Deallocate storage obtained by a call to allocate.
+    void deallocate(pointer p, size_type n) {
+        if (n == 1) {
+            return _pool.back(p);
+        } else {
+            free(p);
+        }
+    };
+
+    // Return the largest possible storage available through a call to allocate.
+    size_type max_size() const { return 0xFFFFFFFF / sizeof(T1); };
+
+    void construct(pointer ptr) { ::new (ptr) T1; };
+    void construct(pointer ptr, const T1& val) { ::new (ptr) T1(val); };
+    template <class U1> void construct(pointer ptr, const U1& val)
+    { ::new (ptr) T1(val); }
+
+    void destroy(pointer p) { p->T1::~T1(); };
+
+private:
+    base::SingleThreadedPool<sizeof(T1), BLOCK_SIZE, 1> _pool;
+};
+
+// Return true if b could be used to deallocate storage obtained through a
+// and vice versa. It's clear that our allocator can't be exchanged.
+template <typename T1, size_t S1, typename T2, size_t S2>
+bool operator==(const PooledAllocator<T1, S1>&, const PooledAllocator<T2, S2>&)
+{ return false; };
+template <typename T1, size_t S1, typename T2, size_t S2>
+bool operator!=(const PooledAllocator<T1, S1>& a, const PooledAllocator<T2, S2>& b)
+{ return !(a == b); };
+
+} // namespace details
+} // namespace base
+
+// Since this allocator can't be exchanged(check impl. of operator==) nor
+// copied, specializing swap() is a must to make map.swap() work.
+#if !defined(BASE_CXX11_ENABLED)
+#include <algorithm>  // std::swap until C++11
+#else
+#include <utility>    // std::swap since C++11
+#endif
+
+namespace std {
+template <class T1, size_t BLOCK_SIZE>
+inline void swap(::base::details::PooledAllocator<T1, BLOCK_SIZE> &lhs,
+                 ::base::details::PooledAllocator<T1, BLOCK_SIZE> &rhs){
+    lhs.swap(rhs);
+}
+}  // namespace std
+
+#endif  // BASE_POOLED_MAP_H

+ 157 - 0
base/containers/scoped_ptr_hash_map.h

@@ -0,0 +1,157 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_SCOPED_PTR_HASH_MAP_H_
+#define BASE_CONTAINERS_SCOPED_PTR_HASH_MAP_H_
+
+#include <algorithm>
+#include <utility>
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/stl_util.h"
+
+namespace base {
+
+// This type acts like a hash_map<K, scoped_ptr<V> >, based on top of
+// base::hash_map. The ScopedPtrHashMap has ownership of all values in the data
+// structure.
+template <typename Key, typename Value>
+class ScopedPtrHashMap {
+  typedef base::hash_map<Key, Value*> Container;
+
+ public:
+  typedef typename Container::key_type key_type;
+  typedef typename Container::mapped_type mapped_type;
+  typedef typename Container::value_type value_type;
+  typedef typename Container::iterator iterator;
+  typedef typename Container::const_iterator const_iterator;
+
+  ScopedPtrHashMap() {}
+
+  ~ScopedPtrHashMap() { clear(); }
+
+  void swap(ScopedPtrHashMap<Key, Value>& other) {
+    data_.swap(other.data_);
+  }
+
+  // Replaces value but not key if key is already present.
+  iterator set(const Key& key, scoped_ptr<Value> data) {
+    iterator it = find(key);
+    if (it != end()) {
+      delete it->second;
+      it->second = data.release();
+      return it;
+    }
+
+    return data_.insert(std::make_pair(key, data.release())).first;
+  }
+
+  // Does nothing if key is already present
+  std::pair<iterator, bool> add(const Key& key, scoped_ptr<Value> data) {
+    std::pair<iterator, bool> result =
+        data_.insert(std::make_pair(key, data.get()));
+    if (result.second)
+      ignore_result(data.release());
+    return result;
+  }
+
+  void erase(iterator it) {
+    delete it->second;
+    data_.erase(it);
+  }
+
+  size_t erase(const Key& k) {
+    iterator it = data_.find(k);
+    if (it == data_.end())
+      return 0;
+    erase(it);
+    return 1;
+  }
+
+  scoped_ptr<Value> take(iterator it) {
+    DCHECK(it != data_.end());
+    if (it == data_.end())
+      return scoped_ptr<Value>();
+
+    scoped_ptr<Value> ret(it->second);
+    it->second = NULL;
+    return ret.Pass();
+  }
+
+  scoped_ptr<Value> take(const Key& k) {
+    iterator it = find(k);
+    if (it == data_.end())
+      return scoped_ptr<Value>();
+
+    return take(it);
+  }
+
+  scoped_ptr<Value> take_and_erase(iterator it) {
+    DCHECK(it != data_.end());
+    if (it == data_.end())
+      return scoped_ptr<Value>();
+
+    scoped_ptr<Value> ret(it->second);
+    data_.erase(it);
+    return ret.Pass();
+  }
+
+  scoped_ptr<Value> take_and_erase(const Key& k) {
+    iterator it = find(k);
+    if (it == data_.end())
+      return scoped_ptr<Value>();
+
+    return take_and_erase(it);
+  }
+
+  // Returns the element in the hash_map that matches the given key.
+  // If no such element exists it returns NULL.
+  Value* get(const Key& k) const {
+    const_iterator it = find(k);
+    if (it == end())
+      return NULL;
+    return it->second;
+  }
+
+  inline bool contains(const Key& k) const { return data_.count(k) > 0; }
+
+  inline void clear() { STLDeleteValues(&data_); }
+
+  inline const_iterator find(const Key& k) const { return data_.find(k); }
+  inline iterator find(const Key& k) { return data_.find(k); }
+
+  inline size_t count(const Key& k) const { return data_.count(k); }
+  inline std::pair<const_iterator, const_iterator> equal_range(
+      const Key& k) const {
+    return data_.equal_range(k);
+  }
+  inline std::pair<iterator, iterator> equal_range(const Key& k) {
+    return data_.equal_range(k);
+  }
+
+  inline size_t size() const { return data_.size(); }
+  inline size_t max_size() const { return data_.max_size(); }
+
+  inline bool empty() const { return data_.empty(); }
+
+  inline size_t bucket_count() const { return data_.bucket_count(); }
+  inline void resize(size_t size) { return data_.resize(size); }
+
+  inline iterator begin() { return data_.begin(); }
+  inline const_iterator begin() const { return data_.begin(); }
+  inline iterator end() { return data_.end(); }
+  inline const_iterator end() const { return data_.end(); }
+
+ private:
+  Container data_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedPtrHashMap);
+};
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_SCOPED_PTR_HASH_MAP_H_

+ 652 - 0
base/containers/small_map.h

@@ -0,0 +1,652 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_SMALL_MAP_H_
+#define BASE_CONTAINERS_SMALL_MAP_H_
+
+#include <map>
+#include <string>
+#include <utility>
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/logging.h"
+#include "base/memory/manual_constructor.h"
+
+namespace base {
+
+// An STL-like associative container which starts out backed by a simple
+// array but switches to some other container type if it grows beyond a
+// fixed size.
+//
+// WHAT TYPE OF MAP SHOULD YOU USE?
+// --------------------------------
+//
+//  - std::map should be the default if you're not sure, since it's the most
+//    difficult to mess up. Generally this is backed by a red-black tree. It
+//    will generate a lot of code (if you use a common key type like int or
+//    string the linker will probably emiminate the duplicates). It will
+//    do heap allocations for each element.
+//
+//  - If you only ever keep a couple of items and have very simple usage,
+//    consider whether a using a vector and brute-force searching it will be
+//    the most efficient. It's not a lot of generated code (less then a
+//    red-black tree if your key is "weird" and not eliminated as duplicate of
+//    something else) and will probably be faster and do fewer heap allocations
+//    than std::map if you have just a couple of items.
+//
+//  - base::hash_map should be used if you need O(1) lookups. It may waste
+//    space in the hash table, and it can be easy to write correct-looking
+//    code with the default hash function being wrong or poorly-behaving.
+//
+//  - SmallMap combines the performance benefits of the brute-force-searched
+//    vector for small cases (no extra heap allocations), but can efficiently
+//    fall back if you end up adding many items. It will generate more code
+//    than std::map (at least 160 bytes for operator[]) which is bad if you
+//    have a "weird" key where map functions can't be
+//    duplicate-code-eliminated. If you have a one-off key and aren't in
+//    performance-critical code, this bloat may negate some of the benefits and
+//    you should consider on of the other options.
+//
+// SmallMap will pick up the comparator from the underlying map type. In
+// std::map (and in MSVC additionally hash_map) only a "less" operator is
+// defined, which requires us to do two comparisons per element when doing the
+// brute-force search in the simple array.
+//
+// We define default overrides for the common map types to avoid this
+// double-compare, but you should be aware of this if you use your own
+// operator< for your map and supply yor own version of == to the SmallMap.
+// You can use regular operator== by just doing:
+//
+//   base::SmallMap<std::map<MyKey, MyValue>, 4, std::equal_to<KyKey> >
+//
+//
+// USAGE
+// -----
+//
+// NormalMap:  The map type to fall back to.  This also defines the key
+//             and value types for the SmallMap.
+// kArraySize:  The size of the initial array of results. This will be
+//              allocated with the SmallMap object rather than separately on
+//              the heap. Once the map grows beyond this size, the map type
+//              will be used instead.
+// EqualKey:  A functor which tests two keys for equality.  If the wrapped
+//            map type has a "key_equal" member (hash_map does), then that will
+//            be used by default. If the wrapped map type has a strict weak
+//            ordering "key_compare" (std::map does), that will be used to
+//            implement equality by default.
+// MapInit: A functor that takes a ManualConstructor<NormalMap>* and uses it to
+//          initialize the map. This functor will be called at most once per
+//          SmallMap, when the map exceeds the threshold of kArraySize and we
+//          are about to copy values from the array to the map. The functor
+//          *must* call one of the Init() methods provided by
+//          ManualConstructor, since after it runs we assume that the NormalMap
+//          has been initialized.
+//
+// example:
+//   base::SmallMap< std::map<string, int> > days;
+//   days["sunday"   ] = 0;
+//   days["monday"   ] = 1;
+//   days["tuesday"  ] = 2;
+//   days["wednesday"] = 3;
+//   days["thursday" ] = 4;
+//   days["friday"   ] = 5;
+//   days["saturday" ] = 6;
+//
+// You should assume that SmallMap might invalidate all the iterators
+// on any call to erase(), insert() and operator[].
+
+namespace internal {
+
+template <typename NormalMap>
+class SmallMapDefaultInit {
+ public:
+  void operator()(ManualConstructor<NormalMap>* map) const {
+    map->Init();
+  }
+};
+
+// has_key_equal<M>::value is true iff there exists a type M::key_equal. This is
+// used to dispatch to one of the select_equal_key<> metafunctions below.
+template <typename M>
+struct has_key_equal {
+  typedef char sml;  // "small" is sometimes #defined so we use an abbreviation.
+  typedef struct { char dummy[2]; } big;
+  // Two functions, one accepts types that have a key_equal member, and one that
+  // accepts anything. They each return a value of a different size, so we can
+  // determine at compile-time which function would have been called.
+  template <typename U> static big test(typename U::key_equal*);
+  template <typename> static sml test(...);
+  // Determines if M::key_equal exists by looking at the size of the return
+  // type of the compiler-chosen test() function.
+  static const bool value = (sizeof(test<M>(0)) == sizeof(big));
+};
+template <typename M> const bool has_key_equal<M>::value;
+
+// Base template used for map types that do NOT have an M::key_equal member,
+// e.g., std::map<>. These maps have a strict weak ordering comparator rather
+// than an equality functor, so equality will be implemented in terms of that
+// comparator.
+//
+// There's a partial specialization of this template below for map types that do
+// have an M::key_equal member.
+template <typename M, bool has_key_equal_value>
+struct select_equal_key {
+  struct equal_key {
+    bool operator()(const typename M::key_type& left,
+                    const typename M::key_type& right) {
+      // Implements equality in terms of a strict weak ordering comparator.
+      typename M::key_compare comp;
+      return !comp(left, right) && !comp(right, left);
+    }
+  };
+};
+
+// Provide overrides to use operator== for key compare for the "normal" map and
+// hash map types. If you override the default comparator or allocator for a
+// map or hash_map, or use another type of map, this won't get used.
+//
+// If we switch to using std::unordered_map for base::hash_map, then the
+// hash_map specialization can be removed.
+template <typename KeyType, typename ValueType>
+struct select_equal_key< std::map<KeyType, ValueType>, false> {
+  struct equal_key {
+    bool operator()(const KeyType& left, const KeyType& right) {
+      return left == right;
+    }
+  };
+};
+template <typename KeyType, typename ValueType>
+struct select_equal_key< base::hash_map<KeyType, ValueType>, false> {
+  struct equal_key {
+    bool operator()(const KeyType& left, const KeyType& right) {
+      return left == right;
+    }
+  };
+};
+
+// Partial template specialization handles case where M::key_equal exists, e.g.,
+// hash_map<>.
+template <typename M>
+struct select_equal_key<M, true> {
+  typedef typename M::key_equal equal_key;
+};
+
+}  // namespace internal
+
+template <typename NormalMap,
+          int kArraySize = 4,
+          typename EqualKey =
+              typename internal::select_equal_key<
+                  NormalMap,
+                  internal::has_key_equal<NormalMap>::value>::equal_key,
+          typename MapInit = internal::SmallMapDefaultInit<NormalMap> >
+class SmallMap {
+  // We cannot rely on the compiler to reject array of size 0.  In
+  // particular, gcc 2.95.3 does it but later versions allow 0-length
+  // arrays.  Therefore, we explicitly reject non-positive kArraySize
+  // here.
+  COMPILE_ASSERT(kArraySize > 0, default_initial_size_should_be_positive);
+
+ public:
+  typedef typename NormalMap::key_type key_type;
+  typedef typename NormalMap::mapped_type data_type;
+  typedef typename NormalMap::mapped_type mapped_type;
+  typedef typename NormalMap::value_type value_type;
+  typedef EqualKey key_equal;
+
+  SmallMap() : size_(0), functor_(MapInit()) {}
+
+  explicit SmallMap(const MapInit& functor) : size_(0), functor_(functor) {}
+
+  // Allow copy-constructor and assignment, since STL allows them too.
+  SmallMap(const SmallMap& src) {
+    // size_ and functor_ are initted in InitFrom()
+    InitFrom(src);
+  }
+  void operator=(const SmallMap& src) {
+    if (&src == this) return;
+
+    // This is not optimal. If src and dest are both using the small
+    // array, we could skip the teardown and reconstruct. One problem
+    // to be resolved is that the value_type itself is pair<const K,
+    // V>, and const K is not assignable.
+    Destroy();
+    InitFrom(src);
+  }
+  ~SmallMap() {
+    Destroy();
+  }
+
+  class const_iterator;
+
+  class iterator {
+   public:
+    typedef typename NormalMap::iterator::iterator_category iterator_category;
+    typedef typename NormalMap::iterator::value_type value_type;
+    typedef typename NormalMap::iterator::difference_type difference_type;
+    typedef typename NormalMap::iterator::pointer pointer;
+    typedef typename NormalMap::iterator::reference reference;
+
+    inline iterator(): array_iter_(NULL) {}
+
+    inline iterator& operator++() {
+      if (array_iter_ != NULL) {
+        ++array_iter_;
+      } else {
+        ++hash_iter_;
+      }
+      return *this;
+    }
+    inline iterator operator++(int /*unused*/) {
+      iterator result(*this);
+      ++(*this);
+      return result;
+    }
+    inline iterator& operator--() {
+      if (array_iter_ != NULL) {
+        --array_iter_;
+      } else {
+        --hash_iter_;
+      }
+      return *this;
+    }
+    inline iterator operator--(int /*unused*/) {
+      iterator result(*this);
+      --(*this);
+      return result;
+    }
+    inline value_type* operator->() const {
+      if (array_iter_ != NULL) {
+        return array_iter_->get();
+      } else {
+        return hash_iter_.operator->();
+      }
+    }
+
+    inline value_type& operator*() const {
+      if (array_iter_ != NULL) {
+        return *array_iter_->get();
+      } else {
+        return *hash_iter_;
+      }
+    }
+
+    inline bool operator==(const iterator& other) const {
+      if (array_iter_ != NULL) {
+        return array_iter_ == other.array_iter_;
+      } else {
+        return other.array_iter_ == NULL && hash_iter_ == other.hash_iter_;
+      }
+    }
+
+    inline bool operator!=(const iterator& other) const {
+      return !(*this == other);
+    }
+
+    bool operator==(const const_iterator& other) const;
+    bool operator!=(const const_iterator& other) const;
+
+   private:
+    friend class SmallMap;
+    friend class const_iterator;
+    inline explicit iterator(ManualConstructor<value_type>* init)
+      : array_iter_(init) {}
+    inline explicit iterator(const typename NormalMap::iterator& init)
+      : array_iter_(NULL), hash_iter_(init) {}
+
+    ManualConstructor<value_type>* array_iter_;
+    typename NormalMap::iterator hash_iter_;
+  };
+
+  class const_iterator {
+   public:
+    typedef typename NormalMap::const_iterator::iterator_category
+        iterator_category;
+    typedef typename NormalMap::const_iterator::value_type value_type;
+    typedef typename NormalMap::const_iterator::difference_type difference_type;
+    typedef typename NormalMap::const_iterator::pointer pointer;
+    typedef typename NormalMap::const_iterator::reference reference;
+
+    inline const_iterator(): array_iter_(NULL) {}
+    // Non-explicit ctor lets us convert regular iterators to const iterators
+    inline const_iterator(const iterator& other)
+      : array_iter_(other.array_iter_), hash_iter_(other.hash_iter_) {}
+
+    inline const_iterator& operator++() {
+      if (array_iter_ != NULL) {
+        ++array_iter_;
+      } else {
+        ++hash_iter_;
+      }
+      return *this;
+    }
+    inline const_iterator operator++(int /*unused*/) {
+      const_iterator result(*this);
+      ++(*this);
+      return result;
+    }
+
+    inline const_iterator& operator--() {
+      if (array_iter_ != NULL) {
+        --array_iter_;
+      } else {
+        --hash_iter_;
+      }
+      return *this;
+    }
+    inline const_iterator operator--(int /*unused*/) {
+      const_iterator result(*this);
+      --(*this);
+      return result;
+    }
+
+    inline const value_type* operator->() const {
+      if (array_iter_ != NULL) {
+        return array_iter_->get();
+      } else {
+        return hash_iter_.operator->();
+      }
+    }
+
+    inline const value_type& operator*() const {
+      if (array_iter_ != NULL) {
+        return *array_iter_->get();
+      } else {
+        return *hash_iter_;
+      }
+    }
+
+    inline bool operator==(const const_iterator& other) const {
+      if (array_iter_ != NULL) {
+        return array_iter_ == other.array_iter_;
+      } else {
+        return other.array_iter_ == NULL && hash_iter_ == other.hash_iter_;
+      }
+    }
+
+    inline bool operator!=(const const_iterator& other) const {
+      return !(*this == other);
+    }
+
+   private:
+    friend class SmallMap;
+    inline explicit const_iterator(
+        const ManualConstructor<value_type>* init)
+      : array_iter_(init) {}
+    inline explicit const_iterator(
+        const typename NormalMap::const_iterator& init)
+      : array_iter_(NULL), hash_iter_(init) {}
+
+    const ManualConstructor<value_type>* array_iter_;
+    typename NormalMap::const_iterator hash_iter_;
+  };
+
+  iterator find(const key_type& key) {
+    key_equal compare;
+    if (size_ >= 0) {
+      for (int i = 0; i < size_; i++) {
+        if (compare(array_[i]->first, key)) {
+          return iterator(array_ + i);
+        }
+      }
+      return iterator(array_ + size_);
+    } else {
+      return iterator(map()->find(key));
+    }
+  }
+
+  const_iterator find(const key_type& key) const {
+    key_equal compare;
+    if (size_ >= 0) {
+      for (int i = 0; i < size_; i++) {
+        if (compare(array_[i]->first, key)) {
+          return const_iterator(array_ + i);
+        }
+      }
+      return const_iterator(array_ + size_);
+    } else {
+      return const_iterator(map()->find(key));
+    }
+  }
+
+  // Invalidates iterators.
+  data_type& operator[](const key_type& key) {
+    key_equal compare;
+
+    if (size_ >= 0) {
+      // operator[] searches backwards, favoring recently-added
+      // elements.
+      for (int i = size_-1; i >= 0; --i) {
+        if (compare(array_[i]->first, key)) {
+          return array_[i]->second;
+        }
+      }
+      if (size_ == kArraySize) {
+        ConvertToRealMap();
+        return (*map_)[key];
+      } else {
+        array_[size_].Init(key, data_type());
+        return array_[size_++]->second;
+      }
+    } else {
+      return (*map_)[key];
+    }
+  }
+
+  // Invalidates iterators.
+  std::pair<iterator, bool> insert(const value_type& x) {
+    key_equal compare;
+
+    if (size_ >= 0) {
+      for (int i = 0; i < size_; i++) {
+        if (compare(array_[i]->first, x.first)) {
+          return std::make_pair(iterator(array_ + i), false);
+        }
+      }
+      if (size_ == kArraySize) {
+        ConvertToRealMap();  // Invalidates all iterators!
+        std::pair<typename NormalMap::iterator, bool> ret = map_->insert(x);
+        return std::make_pair(iterator(ret.first), ret.second);
+      } else {
+        array_[size_].Init(x);
+        return std::make_pair(iterator(array_ + size_++), true);
+      }
+    } else {
+      std::pair<typename NormalMap::iterator, bool> ret = map_->insert(x);
+      return std::make_pair(iterator(ret.first), ret.second);
+    }
+  }
+
+  // Invalidates iterators.
+  template <class InputIterator>
+  void insert(InputIterator f, InputIterator l) {
+    while (f != l) {
+      insert(*f);
+      ++f;
+    }
+  }
+
+  iterator begin() {
+    if (size_ >= 0) {
+      return iterator(array_);
+    } else {
+      return iterator(map_->begin());
+    }
+  }
+  const_iterator begin() const {
+    if (size_ >= 0) {
+      return const_iterator(array_);
+    } else {
+      return const_iterator(map_->begin());
+    }
+  }
+
+  iterator end() {
+    if (size_ >= 0) {
+      return iterator(array_ + size_);
+    } else {
+      return iterator(map_->end());
+    }
+  }
+  const_iterator end() const {
+    if (size_ >= 0) {
+      return const_iterator(array_ + size_);
+    } else {
+      return const_iterator(map_->end());
+    }
+  }
+
+  void clear() {
+    if (size_ >= 0) {
+      for (int i = 0; i < size_; i++) {
+        array_[i].Destroy();
+      }
+    } else {
+      map_.Destroy();
+    }
+    size_ = 0;
+  }
+
+  // Invalidates iterators.
+  void erase(const iterator& position) {
+    if (size_ >= 0) {
+      int i = position.array_iter_ - array_;
+      array_[i].Destroy();
+      --size_;
+      if (i != size_) {
+        array_[i].Init(*array_[size_]);
+        array_[size_].Destroy();
+      }
+    } else {
+      map_->erase(position.hash_iter_);
+    }
+  }
+
+  size_t erase(const key_type& key) {
+    iterator iter = find(key);
+    if (iter == end()) return 0u;
+    erase(iter);
+    return 1u;
+  }
+
+  size_t count(const key_type& key) const {
+    return (find(key) == end()) ? 0 : 1;
+  }
+
+  size_t size() const {
+    if (size_ >= 0) {
+      return static_cast<size_t>(size_);
+    } else {
+      return map_->size();
+    }
+  }
+
+  bool empty() const {
+    if (size_ >= 0) {
+      return (size_ == 0);
+    } else {
+      return map_->empty();
+    }
+  }
+
+  // Returns true if we have fallen back to using the underlying map
+  // representation.
+  bool UsingFullMap() const {
+    return size_ < 0;
+  }
+
+  inline NormalMap* map() {
+    CHECK(UsingFullMap());
+    return map_.get();
+  }
+  inline const NormalMap* map() const {
+    CHECK(UsingFullMap());
+    return map_.get();
+  }
+
+ private:
+  int size_;  // negative = using hash_map
+
+  MapInit functor_;
+
+  // We want to call constructors and destructors manually, but we don't
+  // want to allocate and deallocate the memory used for them separately.
+  // So, we use this crazy ManualConstructor class.
+  //
+  // Since array_ and map_ are mutually exclusive, we'll put them in a
+  // union, too.  We add in a dummy_ value which quiets MSVC from otherwise
+  // giving an erroneous "union member has copy constructor" error message
+  // (C2621). This dummy member has to come before array_ to quiet the
+  // compiler.
+  //
+  // TODO(brettw) remove this and use C++11 unions when we require C++11.
+  union {
+    ManualConstructor<value_type> dummy_;
+    ManualConstructor<value_type> array_[kArraySize];
+    ManualConstructor<NormalMap> map_;
+  };
+
+  void ConvertToRealMap() {
+    // Move the current elements into a temporary array.
+    ManualConstructor<value_type> temp_array[kArraySize];
+
+    for (int i = 0; i < kArraySize; i++) {
+      temp_array[i].Init(*array_[i]);
+      array_[i].Destroy();
+    }
+
+    // Initialize the map.
+    size_ = -1;
+    functor_(&map_);
+
+    // Insert elements into it.
+    for (int i = 0; i < kArraySize; i++) {
+      map_->insert(*temp_array[i]);
+      temp_array[i].Destroy();
+    }
+  }
+
+  // Helpers for constructors and destructors.
+  void InitFrom(const SmallMap& src) {
+    functor_ = src.functor_;
+    size_ = src.size_;
+    if (src.size_ >= 0) {
+      for (int i = 0; i < size_; i++) {
+        array_[i].Init(*src.array_[i]);
+      }
+    } else {
+      functor_(&map_);
+      (*map_.get()) = (*src.map_.get());
+    }
+  }
+  void Destroy() {
+    if (size_ >= 0) {
+      for (int i = 0; i < size_; i++) {
+        array_[i].Destroy();
+      }
+    } else {
+      map_.Destroy();
+    }
+  }
+};
+
+template <typename NormalMap, int kArraySize, typename EqualKey,
+          typename Functor>
+inline bool SmallMap<NormalMap, kArraySize, EqualKey,
+                     Functor>::iterator::operator==(
+    const const_iterator& other) const {
+  return other == *this;
+}
+template <typename NormalMap, int kArraySize, typename EqualKey,
+          typename Functor>
+inline bool SmallMap<NormalMap, kArraySize, EqualKey,
+                     Functor>::iterator::operator!=(
+    const const_iterator& other) const {
+  return other != *this;
+}
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_SMALL_MAP_H_

+ 265 - 0
base/containers/stack_container.h

@@ -0,0 +1,265 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_STACK_CONTAINER_H_
+#define BASE_CONTAINERS_STACK_CONTAINER_H_
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/memory/aligned_memory.h"
+#include "base/strings/string16.h"
+#include "base/build_config.h"
+
+namespace base {
+
+// This allocator can be used with STL containers to provide a stack buffer
+// from which to allocate memory and overflows onto the heap. This stack buffer
+// would be allocated on the stack and allows us to avoid heap operations in
+// some situations.
+//
+// STL likes to make copies of allocators, so the allocator itself can't hold
+// the data. Instead, we make the creator responsible for creating a
+// StackAllocator::Source which contains the data. Copying the allocator
+// merely copies the pointer to this shared source, so all allocators created
+// based on our allocator will share the same stack buffer.
+//
+// This stack buffer implementation is very simple. The first allocation that
+// fits in the stack buffer will use the stack buffer. Any subsequent
+// allocations will not use the stack buffer, even if there is unused room.
+// This makes it appropriate for array-like containers, but the caller should
+// be sure to reserve() in the container up to the stack buffer size. Otherwise
+// the container will allocate a small array which will "use up" the stack
+// buffer.
+template<typename T, size_t stack_capacity>
+class StackAllocator : public std::allocator<T> {
+ public:
+  typedef typename std::allocator<T>::pointer pointer;
+  typedef typename std::allocator<T>::size_type size_type;
+
+  // Backing store for the allocator. The container owner is responsible for
+  // maintaining this for as long as any containers using this allocator are
+  // live.
+  struct Source {
+    Source() : used_stack_buffer_(false) {
+    }
+
+    // Casts the buffer in its right type.
+    T* stack_buffer() { return stack_buffer_.template data_as<T>(); }
+    const T* stack_buffer() const {
+      return stack_buffer_.template data_as<T>();
+    }
+
+    // The buffer itself. It is not of type T because we don't want the
+    // constructors and destructors to be automatically called. Define a POD
+    // buffer of the right size instead.
+    base::AlignedMemory<sizeof(T[stack_capacity]), ALIGNOF(T)> stack_buffer_;
+#if defined(__GNUC__) && !defined(ARCH_CPU_X86_FAMILY)
+    COMPILE_ASSERT(ALIGNOF(T) <= 16, crbug_115612);
+#endif
+
+    // Set when the stack buffer is used for an allocation. We do not track
+    // how much of the buffer is used, only that somebody is using it.
+    bool used_stack_buffer_;
+  };
+
+  // Used by containers when they want to refer to an allocator of type U.
+  template<typename U>
+  struct rebind {
+    typedef StackAllocator<U, stack_capacity> other;
+  };
+
+  // For the straight up copy c-tor, we can share storage.
+  StackAllocator(const StackAllocator<T, stack_capacity>& rhs)
+      : std::allocator<T>(), source_(rhs.source_) {
+  }
+
+  // ISO C++ requires the following constructor to be defined,
+  // and std::vector in VC++2008SP1 Release fails with an error
+  // in the class _Container_base_aux_alloc_real (from <xutility>)
+  // if the constructor does not exist.
+  // For this constructor, we cannot share storage; there's
+  // no guarantee that the Source buffer of Ts is large enough
+  // for Us.
+  // TODO: If we were fancy pants, perhaps we could share storage
+  // iff sizeof(T) == sizeof(U).
+  template<typename U, size_t other_capacity>
+  StackAllocator(const StackAllocator<U, other_capacity>& other)
+      : source_(NULL) {
+  }
+
+  // This constructor must exist. It creates a default allocator that doesn't
+  // actually have a stack buffer. glibc's std::string() will compare the
+  // current allocator against the default-constructed allocator, so this
+  // should be fast.
+  StackAllocator() : source_(NULL) {
+  }
+
+  explicit StackAllocator(Source* source) : source_(source) {
+  }
+
+  // Actually do the allocation. Use the stack buffer if nobody has used it yet
+  // and the size requested fits. Otherwise, fall through to the standard
+  // allocator.
+  pointer allocate(size_type n, void* hint = 0) {
+    if (source_ != NULL && !source_->used_stack_buffer_
+        && n <= stack_capacity) {
+      source_->used_stack_buffer_ = true;
+      return source_->stack_buffer();
+    } else {
+      return std::allocator<T>::allocate(n, hint);
+    }
+  }
+
+  // Free: when trying to free the stack buffer, just mark it as free. For
+  // non-stack-buffer pointers, just fall though to the standard allocator.
+  void deallocate(pointer p, size_type n) {
+    if (source_ != NULL && p == source_->stack_buffer())
+      source_->used_stack_buffer_ = false;
+    else
+      std::allocator<T>::deallocate(p, n);
+  }
+
+ private:
+  Source* source_;
+};
+
+// A wrapper around STL containers that maintains a stack-sized buffer that the
+// initial capacity of the vector is based on. Growing the container beyond the
+// stack capacity will transparently overflow onto the heap. The container must
+// support reserve().
+//
+// WATCH OUT: the ContainerType MUST use the proper StackAllocator for this
+// type. This object is really intended to be used only internally. You'll want
+// to use the wrappers below for different types.
+template<typename TContainerType, int stack_capacity>
+class StackContainer {
+ public:
+  typedef TContainerType ContainerType;
+  typedef typename ContainerType::value_type ContainedType;
+  typedef StackAllocator<ContainedType, stack_capacity> Allocator;
+
+  // Allocator must be constructed before the container!
+  StackContainer() : allocator_(&stack_data_), container_(allocator_) {
+    // Make the container use the stack allocation by reserving our buffer size
+    // before doing anything else.
+    container_.reserve(stack_capacity);
+  }
+
+  // Getters for the actual container.
+  //
+  // Danger: any copies of this made using the copy constructor must have
+  // shorter lifetimes than the source. The copy will share the same allocator
+  // and therefore the same stack buffer as the original. Use std::copy to
+  // copy into a "real" container for longer-lived objects.
+  ContainerType& container() { return container_; }
+  const ContainerType& container() const { return container_; }
+
+  // Support operator-> to get to the container. This allows nicer syntax like:
+  //   StackContainer<...> foo;
+  //   std::sort(foo->begin(), foo->end());
+  ContainerType* operator->() { return &container_; }
+  const ContainerType* operator->() const { return &container_; }
+
+#ifdef UNIT_TEST
+  // Retrieves the stack source so that that unit tests can verify that the
+  // buffer is being used properly.
+  const typename Allocator::Source& stack_data() const {
+    return stack_data_;
+  }
+#endif
+
+ protected:
+  typename Allocator::Source stack_data_;
+  Allocator allocator_;
+  ContainerType container_;
+
+  DISALLOW_COPY_AND_ASSIGN(StackContainer);
+};
+
+// StackString -----------------------------------------------------------------
+
+template<size_t stack_capacity>
+class StackString : public StackContainer<
+    std::basic_string<char,
+                      std::char_traits<char>,
+                      StackAllocator<char, stack_capacity> >,
+    stack_capacity> {
+ public:
+  StackString() : StackContainer<
+      std::basic_string<char,
+                        std::char_traits<char>,
+                        StackAllocator<char, stack_capacity> >,
+      stack_capacity>() {
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(StackString);
+};
+
+// StackStrin16 ----------------------------------------------------------------
+
+template<size_t stack_capacity>
+class StackString16 : public StackContainer<
+    std::basic_string<char16,
+                      base::string16_char_traits,
+                      StackAllocator<char16, stack_capacity> >,
+    stack_capacity> {
+ public:
+  StackString16() : StackContainer<
+      std::basic_string<char16,
+                        base::string16_char_traits,
+                        StackAllocator<char16, stack_capacity> >,
+      stack_capacity>() {
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(StackString16);
+};
+
+// StackVector -----------------------------------------------------------------
+
+// Example:
+//   StackVector<int, 16> foo;
+//   foo->push_back(22);  // we have overloaded operator->
+//   foo[0] = 10;         // as well as operator[]
+template<typename T, size_t stack_capacity>
+class StackVector : public StackContainer<
+    std::vector<T, StackAllocator<T, stack_capacity> >,
+    stack_capacity> {
+ public:
+  StackVector() : StackContainer<
+      std::vector<T, StackAllocator<T, stack_capacity> >,
+      stack_capacity>() {
+  }
+
+  // We need to put this in STL containers sometimes, which requires a copy
+  // constructor. We can't call the regular copy constructor because that will
+  // take the stack buffer from the original. Here, we create an empty object
+  // and make a stack buffer of its own.
+  StackVector(const StackVector<T, stack_capacity>& other)
+      : StackContainer<
+            std::vector<T, StackAllocator<T, stack_capacity> >,
+            stack_capacity>() {
+    this->container().assign(other->begin(), other->end());
+  }
+
+  StackVector<T, stack_capacity>& operator=(
+      const StackVector<T, stack_capacity>& other) {
+    this->container().assign(other->begin(), other->end());
+    return *this;
+  }
+
+  // Vectors are commonly indexed, which isn't very convenient even with
+  // operator-> (using "->at()" does exception stuff we don't want).
+  T& operator[](size_t i) { return this->container().operator[](i); }
+  const T& operator[](size_t i) const {
+    return this->container().operator[](i);
+  }
+};
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_STACK_CONTAINER_H_

+ 244 - 0
base/cpu.cc

@@ -0,0 +1,244 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/cpu.h"
+
+#include <string.h>
+
+#include <algorithm>
+
+#include "base/basictypes.h"
+#include "base/build_config.h"
+
+#if defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
+#include "base/file_util.h"
+#include "base/lazy_instance.h"
+#endif
+
+#if defined(ARCH_CPU_X86_FAMILY)
+#if defined(_MSC_VER)
+#include <intrin.h>
+#include <immintrin.h>  // For _xgetbv()
+#endif
+#endif
+
+namespace base {
+
+CPU::CPU()
+  : signature_(0),
+    type_(0),
+    family_(0),
+    model_(0),
+    stepping_(0),
+    ext_model_(0),
+    ext_family_(0),
+    has_mmx_(false),
+    has_sse_(false),
+    has_sse2_(false),
+    has_sse3_(false),
+    has_ssse3_(false),
+    has_sse41_(false),
+    has_sse42_(false),
+    has_avx_(false),
+    has_avx_hardware_(false),
+    has_aesni_(false),
+    has_non_stop_time_stamp_counter_(false),
+    cpu_vendor_("unknown") {
+  Initialize();
+}
+
+namespace {
+
+#if defined(ARCH_CPU_X86_FAMILY)
+#ifndef _MSC_VER
+
+#if defined(__pic__) && defined(__i386__)
+
+void __cpuid(int cpu_info[4], int info_type) {
+  __asm__ volatile (
+    "mov %%ebx, %%edi\n"
+    "cpuid\n"
+    "xchg %%edi, %%ebx\n"
+    : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
+    : "a"(info_type)
+  );
+}
+
+#else
+
+void __cpuid(int cpu_info[4], int info_type) {
+  __asm__ volatile (
+    "cpuid \n\t"
+    : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
+    : "a"(info_type)
+  );
+}
+
+#endif
+
+// _xgetbv returns the value of an Intel Extended Control Register (XCR).
+// Currently only XCR0 is defined by Intel so |xcr| should always be zero.
+uint64_t _xgetbv(uint32_t xcr) {
+  uint32_t eax, edx;
+
+// NOTE(gejun): xgetbv does not exist in gcc before 4.4, disable the use of
+// AVX instruction set.
+#if defined(COMPILER_GCC) &&                                    \
+    (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4))
+  __asm__ volatile ("xgetbv" : "=a" (eax), "=d" (edx) : "c" (xcr));
+#else
+  __asm__ volatile (".byte 0x0f, 0x01, 0xd0" : "=a"(eax),"=d"(edx) : "c"(xcr) : );
+#endif
+  return (static_cast<uint64_t>(edx) << 32) | eax;
+}
+
+#endif  // !_MSC_VER
+#endif  // ARCH_CPU_X86_FAMILY
+
+#if defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
+
+// Returns the string found in /proc/cpuinfo under the key "model name" or
+// "Processor". "model name" is used in Linux 3.8 and later (3.7 and later for
+// arm64) and is shown once per CPU. "Processor" is used in earler versions and
+// is shown only once at the top of /proc/cpuinfo regardless of the number CPUs.
+std::string ParseCpuInfo() {
+  const char kModelNamePrefix[] = "model name\t: ";
+  const char kProcessorPrefix[] = "Processor\t: ";
+  std::string contents;
+  ReadFileToString(FilePath("/proc/cpuinfo"), &contents);
+  DCHECK(!contents.empty());
+  std::string cpu_brand;
+  if (!contents.empty()) {
+    std::istringstream iss(contents);
+    std::string line;
+    while (std::getline(iss, line)) {
+      if (line.compare(0, strlen(kModelNamePrefix), kModelNamePrefix) == 0) {
+        cpu_brand.assign(line.substr(strlen(kModelNamePrefix)));
+        break;
+      }
+      if (line.compare(0, strlen(kProcessorPrefix), kProcessorPrefix) == 0) {
+        cpu_brand.assign(line.substr(strlen(kProcessorPrefix)));
+        break;
+      }
+    }
+  }
+  return cpu_brand;
+}
+
+class LazyCpuInfoValue {
+ public:
+  LazyCpuInfoValue() : value_(ParseCpuInfo()) {}
+  const std::string& value() { return value_; }
+
+ private:
+  const std::string value_;
+  DISALLOW_COPY_AND_ASSIGN(LazyCpuInfoValue);
+};
+
+base::LazyInstance<LazyCpuInfoValue> g_lazy_cpu_brand =
+    LAZY_INSTANCE_INITIALIZER;
+
+const std::string& CpuBrandInfo() {
+  return g_lazy_cpu_brand.Get().value();
+}
+
+#endif  // defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) ||
+        // defined(OS_LINUX))
+
+}  // anonymous namespace
+
+void CPU::Initialize() {
+#if defined(ARCH_CPU_X86_FAMILY)
+  int cpu_info[4] = {-1};
+  char cpu_string[48];
+
+  // __cpuid with an InfoType argument of 0 returns the number of
+  // valid Ids in CPUInfo[0] and the CPU identification string in
+  // the other three array elements. The CPU identification string is
+  // not in linear order. The code below arranges the information
+  // in a human readable form. The human readable order is CPUInfo[1] |
+  // CPUInfo[3] | CPUInfo[2]. CPUInfo[2] and CPUInfo[3] are swapped
+  // before using memcpy to copy these three array elements to cpu_string.
+  __cpuid(cpu_info, 0);
+  int num_ids = cpu_info[0];
+  std::swap(cpu_info[2], cpu_info[3]);
+  memcpy(cpu_string, &cpu_info[1], 3 * sizeof(cpu_info[1]));
+  cpu_vendor_.assign(cpu_string, 3 * sizeof(cpu_info[1]));
+
+  // Interpret CPU feature information.
+  if (num_ids > 0) {
+    __cpuid(cpu_info, 1);
+    signature_ = cpu_info[0];
+    stepping_ = cpu_info[0] & 0xf;
+    model_ = ((cpu_info[0] >> 4) & 0xf) + ((cpu_info[0] >> 12) & 0xf0);
+    family_ = (cpu_info[0] >> 8) & 0xf;
+    type_ = (cpu_info[0] >> 12) & 0x3;
+    ext_model_ = (cpu_info[0] >> 16) & 0xf;
+    ext_family_ = (cpu_info[0] >> 20) & 0xff;
+    has_mmx_ =   (cpu_info[3] & 0x00800000) != 0;
+    has_sse_ =   (cpu_info[3] & 0x02000000) != 0;
+    has_sse2_ =  (cpu_info[3] & 0x04000000) != 0;
+    has_sse3_ =  (cpu_info[2] & 0x00000001) != 0;
+    has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
+    has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
+    has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
+    has_avx_hardware_ =
+                 (cpu_info[2] & 0x10000000) != 0;
+    // AVX instructions will generate an illegal instruction exception unless
+    //   a) they are supported by the CPU,
+    //   b) XSAVE is supported by the CPU and
+    //   c) XSAVE is enabled by the kernel.
+    // See http://software.intel.com/en-us/blogs/2011/04/14/is-avx-enabled
+    //
+    // In addition, we have observed some crashes with the xgetbv instruction
+    // even after following Intel's example code. (See crbug.com/375968.)
+    // Because of that, we also test the XSAVE bit because its description in
+    // the CPUID documentation suggests that it signals xgetbv support.
+    has_avx_ =
+        has_avx_hardware_ &&
+        (cpu_info[2] & 0x04000000) != 0 /* XSAVE */ &&
+        (cpu_info[2] & 0x08000000) != 0 /* OSXSAVE */ &&
+        (_xgetbv(0) & 6) == 6 /* XSAVE enabled by kernel */;
+    has_aesni_ = (cpu_info[2] & 0x02000000) != 0;
+  }
+
+  // Get the brand string of the cpu.
+  __cpuid(cpu_info, 0x80000000);
+  const int parameter_end = 0x80000004;
+  int max_parameter = cpu_info[0];
+
+  if (cpu_info[0] >= parameter_end) {
+    char* cpu_string_ptr = cpu_string;
+
+    for (int parameter = 0x80000002; parameter <= parameter_end &&
+         cpu_string_ptr < &cpu_string[sizeof(cpu_string)]; parameter++) {
+      __cpuid(cpu_info, parameter);
+      memcpy(cpu_string_ptr, cpu_info, sizeof(cpu_info));
+      cpu_string_ptr += sizeof(cpu_info);
+    }
+    cpu_brand_.assign(cpu_string, cpu_string_ptr - cpu_string);
+  }
+
+  const int parameter_containing_non_stop_time_stamp_counter = 0x80000007;
+  if (max_parameter >= parameter_containing_non_stop_time_stamp_counter) {
+    __cpuid(cpu_info, parameter_containing_non_stop_time_stamp_counter);
+    has_non_stop_time_stamp_counter_ = (cpu_info[3] & (1 << 8)) != 0;
+  }
+#elif defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
+  cpu_brand_.assign(CpuBrandInfo());
+#endif
+}
+
+CPU::IntelMicroArchitecture CPU::GetIntelMicroArchitecture() const {
+  if (has_avx()) return AVX;
+  if (has_sse42()) return SSE42;
+  if (has_sse41()) return SSE41;
+  if (has_ssse3()) return SSSE3;
+  if (has_sse3()) return SSE3;
+  if (has_sse2()) return SSE2;
+  if (has_sse()) return SSE;
+  return PENTIUM;
+}
+
+}  // namespace base

+ 90 - 0
base/cpu.h

@@ -0,0 +1,90 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CPU_H_
+#define BASE_CPU_H_
+
+#include <string>
+
+#include "base/base_export.h"
+
+namespace base {
+
+// Query information about the processor.
+class BASE_EXPORT CPU {
+ public:
+  // Constructor
+  CPU();
+
+  enum IntelMicroArchitecture {
+    PENTIUM,
+    SSE,
+    SSE2,
+    SSE3,
+    SSSE3,
+    SSE41,
+    SSE42,
+    AVX,
+    MAX_INTEL_MICRO_ARCHITECTURE
+  };
+
+  // Accessors for CPU information.
+  const std::string& vendor_name() const { return cpu_vendor_; }
+  int signature() const { return signature_; }
+  int stepping() const { return stepping_; }
+  int model() const { return model_; }
+  int family() const { return family_; }
+  int type() const { return type_; }
+  int extended_model() const { return ext_model_; }
+  int extended_family() const { return ext_family_; }
+  bool has_mmx() const { return has_mmx_; }
+  bool has_sse() const { return has_sse_; }
+  bool has_sse2() const { return has_sse2_; }
+  bool has_sse3() const { return has_sse3_; }
+  bool has_ssse3() const { return has_ssse3_; }
+  bool has_sse41() const { return has_sse41_; }
+  bool has_sse42() const { return has_sse42_; }
+  bool has_avx() const { return has_avx_; }
+  // has_avx_hardware returns true when AVX is present in the CPU. This might
+  // differ from the value of |has_avx()| because |has_avx()| also tests for
+  // operating system support needed to actually call AVX instuctions.
+  // Note: you should never need to call this function. It was added in order
+  // to workaround a bug in NSS but |has_avx()| is what you want.
+  bool has_avx_hardware() const { return has_avx_hardware_; }
+  bool has_aesni() const { return has_aesni_; }
+  bool has_non_stop_time_stamp_counter() const {
+    return has_non_stop_time_stamp_counter_;
+  }
+  IntelMicroArchitecture GetIntelMicroArchitecture() const;
+  const std::string& cpu_brand() const { return cpu_brand_; }
+
+ private:
+  // Query the processor for CPUID information.
+  void Initialize();
+
+  int signature_;  // raw form of type, family, model, and stepping
+  int type_;  // process type
+  int family_;  // family of the processor
+  int model_;  // model of processor
+  int stepping_;  // processor revision number
+  int ext_model_;
+  int ext_family_;
+  bool has_mmx_;
+  bool has_sse_;
+  bool has_sse2_;
+  bool has_sse3_;
+  bool has_ssse3_;
+  bool has_sse41_;
+  bool has_sse42_;
+  bool has_avx_;
+  bool has_avx_hardware_;
+  bool has_aesni_;
+  bool has_non_stop_time_stamp_counter_;
+  std::string cpu_vendor_;
+  std::string cpu_brand_;
+};
+
+}  // namespace base
+
+#endif  // BASE_CPU_H_

+ 457 - 0
base/crc32c.cc

@@ -0,0 +1,457 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under the BSD-style license found in the
+//  LICENSE file in the root directory of this source tree. An additional grant
+//  of patent rights can be found in the PATENTS file in the same directory.
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+//
+// A portable implementation of crc32c, optimized to handle
+// four bytes at a time.
+
+#include "base/crc32c.h"
+
+#include <string.h>
+#include <stdint.h>
+#ifdef __SSE4_2__
+#include <nmmintrin.h>
+#endif
+#include "base/build_config.h"
+
+namespace base {
+namespace crc32c {
+
+static const uint32_t table0_[256] = {
+  0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4,
+  0xc79a971f, 0x35f1141c, 0x26a1e7e8, 0xd4ca64eb,
+  0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b,
+  0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24,
+  0x105ec76f, 0xe235446c, 0xf165b798, 0x030e349b,
+  0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384,
+  0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54,
+  0x5d1d08bf, 0xaf768bbc, 0xbc267848, 0x4e4dfb4b,
+  0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a,
+  0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35,
+  0xaa64d611, 0x580f5512, 0x4b5fa6e6, 0xb93425e5,
+  0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa,
+  0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45,
+  0xf779deae, 0x05125dad, 0x1642ae59, 0xe4292d5a,
+  0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a,
+  0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595,
+  0x417b1dbc, 0xb3109ebf, 0xa0406d4b, 0x522bee48,
+  0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957,
+  0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687,
+  0x0c38d26c, 0xfe53516f, 0xed03a29b, 0x1f682198,
+  0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927,
+  0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38,
+  0xdbfc821c, 0x2997011f, 0x3ac7f2eb, 0xc8ac71e8,
+  0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7,
+  0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096,
+  0xa65c047d, 0x5437877e, 0x4767748a, 0xb50cf789,
+  0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859,
+  0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46,
+  0x7198540d, 0x83f3d70e, 0x90a324fa, 0x62c8a7f9,
+  0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6,
+  0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36,
+  0x3cdb9bdd, 0xceb018de, 0xdde0eb2a, 0x2f8b6829,
+  0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c,
+  0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93,
+  0x082f63b7, 0xfa44e0b4, 0xe9141340, 0x1b7f9043,
+  0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c,
+  0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3,
+  0x55326b08, 0xa759e80b, 0xb4091bff, 0x466298fc,
+  0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c,
+  0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033,
+  0xa24bb5a6, 0x502036a5, 0x4370c551, 0xb11b4652,
+  0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d,
+  0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d,
+  0xef087a76, 0x1d63f975, 0x0e330a81, 0xfc588982,
+  0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d,
+  0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622,
+  0x38cc2a06, 0xcaa7a905, 0xd9f75af1, 0x2b9cd9f2,
+  0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed,
+  0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530,
+  0x0417b1db, 0xf67c32d8, 0xe52cc12c, 0x1747422f,
+  0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff,
+  0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0,
+  0xd3d3e1ab, 0x21b862a8, 0x32e8915c, 0xc083125f,
+  0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540,
+  0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90,
+  0x9e902e7b, 0x6cfbad78, 0x7fab5e8c, 0x8dc0dd8f,
+  0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee,
+  0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1,
+  0x69e9f0d5, 0x9b8273d6, 0x88d28022, 0x7ab90321,
+  0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e,
+  0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81,
+  0x34f4f86a, 0xc69f7b69, 0xd5cf889d, 0x27a40b9e,
+  0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e,
+  0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351
+};
+static const uint32_t table1_[256] = {
+  0x00000000, 0x13a29877, 0x274530ee, 0x34e7a899,
+  0x4e8a61dc, 0x5d28f9ab, 0x69cf5132, 0x7a6dc945,
+  0x9d14c3b8, 0x8eb65bcf, 0xba51f356, 0xa9f36b21,
+  0xd39ea264, 0xc03c3a13, 0xf4db928a, 0xe7790afd,
+  0x3fc5f181, 0x2c6769f6, 0x1880c16f, 0x0b225918,
+  0x714f905d, 0x62ed082a, 0x560aa0b3, 0x45a838c4,
+  0xa2d13239, 0xb173aa4e, 0x859402d7, 0x96369aa0,
+  0xec5b53e5, 0xfff9cb92, 0xcb1e630b, 0xd8bcfb7c,
+  0x7f8be302, 0x6c297b75, 0x58ced3ec, 0x4b6c4b9b,
+  0x310182de, 0x22a31aa9, 0x1644b230, 0x05e62a47,
+  0xe29f20ba, 0xf13db8cd, 0xc5da1054, 0xd6788823,
+  0xac154166, 0xbfb7d911, 0x8b507188, 0x98f2e9ff,
+  0x404e1283, 0x53ec8af4, 0x670b226d, 0x74a9ba1a,
+  0x0ec4735f, 0x1d66eb28, 0x298143b1, 0x3a23dbc6,
+  0xdd5ad13b, 0xcef8494c, 0xfa1fe1d5, 0xe9bd79a2,
+  0x93d0b0e7, 0x80722890, 0xb4958009, 0xa737187e,
+  0xff17c604, 0xecb55e73, 0xd852f6ea, 0xcbf06e9d,
+  0xb19da7d8, 0xa23f3faf, 0x96d89736, 0x857a0f41,
+  0x620305bc, 0x71a19dcb, 0x45463552, 0x56e4ad25,
+  0x2c896460, 0x3f2bfc17, 0x0bcc548e, 0x186eccf9,
+  0xc0d23785, 0xd370aff2, 0xe797076b, 0xf4359f1c,
+  0x8e585659, 0x9dface2e, 0xa91d66b7, 0xbabffec0,
+  0x5dc6f43d, 0x4e646c4a, 0x7a83c4d3, 0x69215ca4,
+  0x134c95e1, 0x00ee0d96, 0x3409a50f, 0x27ab3d78,
+  0x809c2506, 0x933ebd71, 0xa7d915e8, 0xb47b8d9f,
+  0xce1644da, 0xddb4dcad, 0xe9537434, 0xfaf1ec43,
+  0x1d88e6be, 0x0e2a7ec9, 0x3acdd650, 0x296f4e27,
+  0x53028762, 0x40a01f15, 0x7447b78c, 0x67e52ffb,
+  0xbf59d487, 0xacfb4cf0, 0x981ce469, 0x8bbe7c1e,
+  0xf1d3b55b, 0xe2712d2c, 0xd69685b5, 0xc5341dc2,
+  0x224d173f, 0x31ef8f48, 0x050827d1, 0x16aabfa6,
+  0x6cc776e3, 0x7f65ee94, 0x4b82460d, 0x5820de7a,
+  0xfbc3faf9, 0xe861628e, 0xdc86ca17, 0xcf245260,
+  0xb5499b25, 0xa6eb0352, 0x920cabcb, 0x81ae33bc,
+  0x66d73941, 0x7575a136, 0x419209af, 0x523091d8,
+  0x285d589d, 0x3bffc0ea, 0x0f186873, 0x1cbaf004,
+  0xc4060b78, 0xd7a4930f, 0xe3433b96, 0xf0e1a3e1,
+  0x8a8c6aa4, 0x992ef2d3, 0xadc95a4a, 0xbe6bc23d,
+  0x5912c8c0, 0x4ab050b7, 0x7e57f82e, 0x6df56059,
+  0x1798a91c, 0x043a316b, 0x30dd99f2, 0x237f0185,
+  0x844819fb, 0x97ea818c, 0xa30d2915, 0xb0afb162,
+  0xcac27827, 0xd960e050, 0xed8748c9, 0xfe25d0be,
+  0x195cda43, 0x0afe4234, 0x3e19eaad, 0x2dbb72da,
+  0x57d6bb9f, 0x447423e8, 0x70938b71, 0x63311306,
+  0xbb8de87a, 0xa82f700d, 0x9cc8d894, 0x8f6a40e3,
+  0xf50789a6, 0xe6a511d1, 0xd242b948, 0xc1e0213f,
+  0x26992bc2, 0x353bb3b5, 0x01dc1b2c, 0x127e835b,
+  0x68134a1e, 0x7bb1d269, 0x4f567af0, 0x5cf4e287,
+  0x04d43cfd, 0x1776a48a, 0x23910c13, 0x30339464,
+  0x4a5e5d21, 0x59fcc556, 0x6d1b6dcf, 0x7eb9f5b8,
+  0x99c0ff45, 0x8a626732, 0xbe85cfab, 0xad2757dc,
+  0xd74a9e99, 0xc4e806ee, 0xf00fae77, 0xe3ad3600,
+  0x3b11cd7c, 0x28b3550b, 0x1c54fd92, 0x0ff665e5,
+  0x759baca0, 0x663934d7, 0x52de9c4e, 0x417c0439,
+  0xa6050ec4, 0xb5a796b3, 0x81403e2a, 0x92e2a65d,
+  0xe88f6f18, 0xfb2df76f, 0xcfca5ff6, 0xdc68c781,
+  0x7b5fdfff, 0x68fd4788, 0x5c1aef11, 0x4fb87766,
+  0x35d5be23, 0x26772654, 0x12908ecd, 0x013216ba,
+  0xe64b1c47, 0xf5e98430, 0xc10e2ca9, 0xd2acb4de,
+  0xa8c17d9b, 0xbb63e5ec, 0x8f844d75, 0x9c26d502,
+  0x449a2e7e, 0x5738b609, 0x63df1e90, 0x707d86e7,
+  0x0a104fa2, 0x19b2d7d5, 0x2d557f4c, 0x3ef7e73b,
+  0xd98eedc6, 0xca2c75b1, 0xfecbdd28, 0xed69455f,
+  0x97048c1a, 0x84a6146d, 0xb041bcf4, 0xa3e32483
+};
+static const uint32_t table2_[256] = {
+  0x00000000, 0xa541927e, 0x4f6f520d, 0xea2ec073,
+  0x9edea41a, 0x3b9f3664, 0xd1b1f617, 0x74f06469,
+  0x38513ec5, 0x9d10acbb, 0x773e6cc8, 0xd27ffeb6,
+  0xa68f9adf, 0x03ce08a1, 0xe9e0c8d2, 0x4ca15aac,
+  0x70a27d8a, 0xd5e3eff4, 0x3fcd2f87, 0x9a8cbdf9,
+  0xee7cd990, 0x4b3d4bee, 0xa1138b9d, 0x045219e3,
+  0x48f3434f, 0xedb2d131, 0x079c1142, 0xa2dd833c,
+  0xd62de755, 0x736c752b, 0x9942b558, 0x3c032726,
+  0xe144fb14, 0x4405696a, 0xae2ba919, 0x0b6a3b67,
+  0x7f9a5f0e, 0xdadbcd70, 0x30f50d03, 0x95b49f7d,
+  0xd915c5d1, 0x7c5457af, 0x967a97dc, 0x333b05a2,
+  0x47cb61cb, 0xe28af3b5, 0x08a433c6, 0xade5a1b8,
+  0x91e6869e, 0x34a714e0, 0xde89d493, 0x7bc846ed,
+  0x0f382284, 0xaa79b0fa, 0x40577089, 0xe516e2f7,
+  0xa9b7b85b, 0x0cf62a25, 0xe6d8ea56, 0x43997828,
+  0x37691c41, 0x92288e3f, 0x78064e4c, 0xdd47dc32,
+  0xc76580d9, 0x622412a7, 0x880ad2d4, 0x2d4b40aa,
+  0x59bb24c3, 0xfcfab6bd, 0x16d476ce, 0xb395e4b0,
+  0xff34be1c, 0x5a752c62, 0xb05bec11, 0x151a7e6f,
+  0x61ea1a06, 0xc4ab8878, 0x2e85480b, 0x8bc4da75,
+  0xb7c7fd53, 0x12866f2d, 0xf8a8af5e, 0x5de93d20,
+  0x29195949, 0x8c58cb37, 0x66760b44, 0xc337993a,
+  0x8f96c396, 0x2ad751e8, 0xc0f9919b, 0x65b803e5,
+  0x1148678c, 0xb409f5f2, 0x5e273581, 0xfb66a7ff,
+  0x26217bcd, 0x8360e9b3, 0x694e29c0, 0xcc0fbbbe,
+  0xb8ffdfd7, 0x1dbe4da9, 0xf7908dda, 0x52d11fa4,
+  0x1e704508, 0xbb31d776, 0x511f1705, 0xf45e857b,
+  0x80aee112, 0x25ef736c, 0xcfc1b31f, 0x6a802161,
+  0x56830647, 0xf3c29439, 0x19ec544a, 0xbcadc634,
+  0xc85da25d, 0x6d1c3023, 0x8732f050, 0x2273622e,
+  0x6ed23882, 0xcb93aafc, 0x21bd6a8f, 0x84fcf8f1,
+  0xf00c9c98, 0x554d0ee6, 0xbf63ce95, 0x1a225ceb,
+  0x8b277743, 0x2e66e53d, 0xc448254e, 0x6109b730,
+  0x15f9d359, 0xb0b84127, 0x5a968154, 0xffd7132a,
+  0xb3764986, 0x1637dbf8, 0xfc191b8b, 0x595889f5,
+  0x2da8ed9c, 0x88e97fe2, 0x62c7bf91, 0xc7862def,
+  0xfb850ac9, 0x5ec498b7, 0xb4ea58c4, 0x11abcaba,
+  0x655baed3, 0xc01a3cad, 0x2a34fcde, 0x8f756ea0,
+  0xc3d4340c, 0x6695a672, 0x8cbb6601, 0x29faf47f,
+  0x5d0a9016, 0xf84b0268, 0x1265c21b, 0xb7245065,
+  0x6a638c57, 0xcf221e29, 0x250cde5a, 0x804d4c24,
+  0xf4bd284d, 0x51fcba33, 0xbbd27a40, 0x1e93e83e,
+  0x5232b292, 0xf77320ec, 0x1d5de09f, 0xb81c72e1,
+  0xccec1688, 0x69ad84f6, 0x83834485, 0x26c2d6fb,
+  0x1ac1f1dd, 0xbf8063a3, 0x55aea3d0, 0xf0ef31ae,
+  0x841f55c7, 0x215ec7b9, 0xcb7007ca, 0x6e3195b4,
+  0x2290cf18, 0x87d15d66, 0x6dff9d15, 0xc8be0f6b,
+  0xbc4e6b02, 0x190ff97c, 0xf321390f, 0x5660ab71,
+  0x4c42f79a, 0xe90365e4, 0x032da597, 0xa66c37e9,
+  0xd29c5380, 0x77ddc1fe, 0x9df3018d, 0x38b293f3,
+  0x7413c95f, 0xd1525b21, 0x3b7c9b52, 0x9e3d092c,
+  0xeacd6d45, 0x4f8cff3b, 0xa5a23f48, 0x00e3ad36,
+  0x3ce08a10, 0x99a1186e, 0x738fd81d, 0xd6ce4a63,
+  0xa23e2e0a, 0x077fbc74, 0xed517c07, 0x4810ee79,
+  0x04b1b4d5, 0xa1f026ab, 0x4bdee6d8, 0xee9f74a6,
+  0x9a6f10cf, 0x3f2e82b1, 0xd50042c2, 0x7041d0bc,
+  0xad060c8e, 0x08479ef0, 0xe2695e83, 0x4728ccfd,
+  0x33d8a894, 0x96993aea, 0x7cb7fa99, 0xd9f668e7,
+  0x9557324b, 0x3016a035, 0xda386046, 0x7f79f238,
+  0x0b899651, 0xaec8042f, 0x44e6c45c, 0xe1a75622,
+  0xdda47104, 0x78e5e37a, 0x92cb2309, 0x378ab177,
+  0x437ad51e, 0xe63b4760, 0x0c158713, 0xa954156d,
+  0xe5f54fc1, 0x40b4ddbf, 0xaa9a1dcc, 0x0fdb8fb2,
+  0x7b2bebdb, 0xde6a79a5, 0x3444b9d6, 0x91052ba8
+};
+static const uint32_t table3_[256] = {
+  0x00000000, 0xdd45aab8, 0xbf672381, 0x62228939,
+  0x7b2231f3, 0xa6679b4b, 0xc4451272, 0x1900b8ca,
+  0xf64463e6, 0x2b01c95e, 0x49234067, 0x9466eadf,
+  0x8d665215, 0x5023f8ad, 0x32017194, 0xef44db2c,
+  0xe964b13d, 0x34211b85, 0x560392bc, 0x8b463804,
+  0x924680ce, 0x4f032a76, 0x2d21a34f, 0xf06409f7,
+  0x1f20d2db, 0xc2657863, 0xa047f15a, 0x7d025be2,
+  0x6402e328, 0xb9474990, 0xdb65c0a9, 0x06206a11,
+  0xd725148b, 0x0a60be33, 0x6842370a, 0xb5079db2,
+  0xac072578, 0x71428fc0, 0x136006f9, 0xce25ac41,
+  0x2161776d, 0xfc24ddd5, 0x9e0654ec, 0x4343fe54,
+  0x5a43469e, 0x8706ec26, 0xe524651f, 0x3861cfa7,
+  0x3e41a5b6, 0xe3040f0e, 0x81268637, 0x5c632c8f,
+  0x45639445, 0x98263efd, 0xfa04b7c4, 0x27411d7c,
+  0xc805c650, 0x15406ce8, 0x7762e5d1, 0xaa274f69,
+  0xb327f7a3, 0x6e625d1b, 0x0c40d422, 0xd1057e9a,
+  0xaba65fe7, 0x76e3f55f, 0x14c17c66, 0xc984d6de,
+  0xd0846e14, 0x0dc1c4ac, 0x6fe34d95, 0xb2a6e72d,
+  0x5de23c01, 0x80a796b9, 0xe2851f80, 0x3fc0b538,
+  0x26c00df2, 0xfb85a74a, 0x99a72e73, 0x44e284cb,
+  0x42c2eeda, 0x9f874462, 0xfda5cd5b, 0x20e067e3,
+  0x39e0df29, 0xe4a57591, 0x8687fca8, 0x5bc25610,
+  0xb4868d3c, 0x69c32784, 0x0be1aebd, 0xd6a40405,
+  0xcfa4bccf, 0x12e11677, 0x70c39f4e, 0xad8635f6,
+  0x7c834b6c, 0xa1c6e1d4, 0xc3e468ed, 0x1ea1c255,
+  0x07a17a9f, 0xdae4d027, 0xb8c6591e, 0x6583f3a6,
+  0x8ac7288a, 0x57828232, 0x35a00b0b, 0xe8e5a1b3,
+  0xf1e51979, 0x2ca0b3c1, 0x4e823af8, 0x93c79040,
+  0x95e7fa51, 0x48a250e9, 0x2a80d9d0, 0xf7c57368,
+  0xeec5cba2, 0x3380611a, 0x51a2e823, 0x8ce7429b,
+  0x63a399b7, 0xbee6330f, 0xdcc4ba36, 0x0181108e,
+  0x1881a844, 0xc5c402fc, 0xa7e68bc5, 0x7aa3217d,
+  0x52a0c93f, 0x8fe56387, 0xedc7eabe, 0x30824006,
+  0x2982f8cc, 0xf4c75274, 0x96e5db4d, 0x4ba071f5,
+  0xa4e4aad9, 0x79a10061, 0x1b838958, 0xc6c623e0,
+  0xdfc69b2a, 0x02833192, 0x60a1b8ab, 0xbde41213,
+  0xbbc47802, 0x6681d2ba, 0x04a35b83, 0xd9e6f13b,
+  0xc0e649f1, 0x1da3e349, 0x7f816a70, 0xa2c4c0c8,
+  0x4d801be4, 0x90c5b15c, 0xf2e73865, 0x2fa292dd,
+  0x36a22a17, 0xebe780af, 0x89c50996, 0x5480a32e,
+  0x8585ddb4, 0x58c0770c, 0x3ae2fe35, 0xe7a7548d,
+  0xfea7ec47, 0x23e246ff, 0x41c0cfc6, 0x9c85657e,
+  0x73c1be52, 0xae8414ea, 0xcca69dd3, 0x11e3376b,
+  0x08e38fa1, 0xd5a62519, 0xb784ac20, 0x6ac10698,
+  0x6ce16c89, 0xb1a4c631, 0xd3864f08, 0x0ec3e5b0,
+  0x17c35d7a, 0xca86f7c2, 0xa8a47efb, 0x75e1d443,
+  0x9aa50f6f, 0x47e0a5d7, 0x25c22cee, 0xf8878656,
+  0xe1873e9c, 0x3cc29424, 0x5ee01d1d, 0x83a5b7a5,
+  0xf90696d8, 0x24433c60, 0x4661b559, 0x9b241fe1,
+  0x8224a72b, 0x5f610d93, 0x3d4384aa, 0xe0062e12,
+  0x0f42f53e, 0xd2075f86, 0xb025d6bf, 0x6d607c07,
+  0x7460c4cd, 0xa9256e75, 0xcb07e74c, 0x16424df4,
+  0x106227e5, 0xcd278d5d, 0xaf050464, 0x7240aedc,
+  0x6b401616, 0xb605bcae, 0xd4273597, 0x09629f2f,
+  0xe6264403, 0x3b63eebb, 0x59416782, 0x8404cd3a,
+  0x9d0475f0, 0x4041df48, 0x22635671, 0xff26fcc9,
+  0x2e238253, 0xf36628eb, 0x9144a1d2, 0x4c010b6a,
+  0x5501b3a0, 0x88441918, 0xea669021, 0x37233a99,
+  0xd867e1b5, 0x05224b0d, 0x6700c234, 0xba45688c,
+  0xa345d046, 0x7e007afe, 0x1c22f3c7, 0xc167597f,
+  0xc747336e, 0x1a0299d6, 0x782010ef, 0xa565ba57,
+  0xbc65029d, 0x6120a825, 0x0302211c, 0xde478ba4,
+  0x31035088, 0xec46fa30, 0x8e647309, 0x5321d9b1,
+  0x4a21617b, 0x9764cbc3, 0xf54642fa, 0x2803e842
+};
+
+// Lower-level versions of Get... that read directly from a character buffer
+// without any bounds checking.
+
+static inline uint32_t DecodeFixed32(const char* ptr) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN) && ARCH_CPU_LITTLE_ENDIAN
+    // Load the raw bytes
+    uint32_t result;
+    memcpy(&result, ptr, sizeof(result));  // gcc optimizes this to a plain load
+    return result;
+#else
+    return ((static_cast<uint32_t>(static_cast<unsigned char>(ptr[0])))
+        | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[1])) << 8)
+        | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[2])) << 16)
+        | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[3])) << 24));
+#endif
+}
+
+inline uint64_t DecodeFixed64(const char* ptr) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN) && ARCH_CPU_LITTLE_ENDIAN
+    // Load the raw bytes
+    uint64_t result;
+    memcpy(&result, ptr, sizeof(result));  // gcc optimizes this to a plain load
+    return result;
+#else
+    uint64_t lo = DecodeFixed32(ptr);
+    uint64_t hi = DecodeFixed32(ptr + 4);
+    return (hi << 32) | lo;
+#endif
+}
+
+// Used to fetch a naturally-aligned 32-bit word in little endian byte-order
+static inline uint32_t LE_LOAD32(const uint8_t *p) {
+  return DecodeFixed32(reinterpret_cast<const char*>(p));
+}
+
+#ifdef __SSE4_2__
+#ifdef __LP64__
+static inline uint64_t LE_LOAD64(const uint8_t *p) {
+  return DecodeFixed64(reinterpret_cast<const char*>(p));
+}
+#endif
+#endif
+
+static inline void Slow_CRC32(uint64_t* l, uint8_t const **p) {
+  uint32_t c = static_cast<uint32_t>(*l ^ LE_LOAD32(*p));
+  *p += 4;
+  *l = table3_[c & 0xff] ^
+  table2_[(c >> 8) & 0xff] ^
+  table1_[(c >> 16) & 0xff] ^
+  table0_[c >> 24];
+  // DO it twice.
+  c = static_cast<uint32_t>(*l ^ LE_LOAD32(*p));
+  *p += 4;
+  *l = table3_[c & 0xff] ^
+  table2_[(c >> 8) & 0xff] ^
+  table1_[(c >> 16) & 0xff] ^
+  table0_[c >> 24];
+}
+
+static inline void Fast_CRC32(uint64_t* l, uint8_t const **p) {
+#ifdef __SSE4_2__
+#ifdef __LP64__
+  *l = _mm_crc32_u64(*l, LE_LOAD64(*p));
+  *p += 8;
+#else
+  *l = _mm_crc32_u32(static_cast<unsigned int>(*l), LE_LOAD32(*p));
+  *p += 4;
+  *l = _mm_crc32_u32(static_cast<unsigned int>(*l), LE_LOAD32(*p));
+  *p += 4;
+#endif
+#else
+  Slow_CRC32(l, p);
+#endif
+}
+
+class FastCRC32Functor {
+public:
+  inline void operator()(uint64_t* l, uint8_t const **p) const {
+    return Fast_CRC32(l , p);
+  }
+};
+
+class SlowCRC32Functor {
+public:
+  inline void operator()(uint64_t* l, uint8_t const **p) const {
+    return Slow_CRC32(l , p);
+  }
+};
+
+template<typename CRC32Functor>
+uint32_t ExtendImpl(uint32_t crc, const char* buf, size_t size) {
+  CRC32Functor CRC32;
+
+  const uint8_t *p = reinterpret_cast<const uint8_t *>(buf);
+  const uint8_t *e = p + size;
+  uint64_t l = crc ^ 0xffffffffu;
+
+// Align n to (1 << m) byte boundary
+#define ALIGN(n, m)     ((n + ((1 << m) - 1)) & ~((1 << m) - 1))
+
+#define STEP1 do {                              \
+    int c = (l & 0xff) ^ *p++;                  \
+    l = table0_[c] ^ (l >> 8);                  \
+} while (0)
+
+
+  // Point x at first 16-byte aligned byte in string.  This might be
+  // just past the end of the string.
+  const uintptr_t pval = reinterpret_cast<uintptr_t>(p);
+  const uint8_t* x = reinterpret_cast<const uint8_t*>(ALIGN(pval, 4));
+  if (x <= e) {
+    // Process bytes until finished or p is 16-byte aligned
+    while (p != x) {
+      STEP1;
+    }
+  }
+  // Process bytes 16 at a time
+  while ((e-p) >= 16) {
+    CRC32(&l, &p);
+    CRC32(&l, &p);
+  }
+  // Process bytes 8 at a time
+  while ((e-p) >= 8) {
+    CRC32(&l, &p);
+  }
+  // Process the last few bytes
+  while (p != e) {
+    STEP1;
+  }
+#undef STEP1
+#undef ALIGN
+  return static_cast<uint32_t>(l ^ 0xffffffffu);
+}
+
+// Detect if SS42 or not.
+static bool isSSE42() {
+#if defined(__GNUC__) && defined(__x86_64__) && !defined(IOS_CROSS_COMPILE)
+  uint32_t c_;
+  uint32_t d_;
+  __asm__("cpuid" : "=c"(c_), "=d"(d_) : "a"(1) : "ebx");
+  return c_ & (1U << 20);  // copied from CpuId.h in Folly.
+#else
+  return false;
+#endif
+}
+
+typedef uint32_t (*Function)(uint32_t, const char*, size_t);
+
+static inline Function Choose_Extend() {
+  return isSSE42() ? (Function)ExtendImpl<FastCRC32Functor> : 
+                    (Function)ExtendImpl<SlowCRC32Functor>;
+}
+
+bool IsFastCrc32Supported() {
+#ifdef __SSE4_2__
+  return isSSE42();
+#else
+  return false;
+#endif
+}
+
+uint32_t Extend(uint32_t crc, const char* buf, size_t size) {
+  static Function ChosenExtend = Choose_Extend();
+  return ChosenExtend(crc, buf, size);
+}
+
+}  // namespace crc32c
+}  // namespace base

+ 52 - 0
base/crc32c.h

@@ -0,0 +1,52 @@
+//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
+//  This source code is licensed under the BSD-style license found in the
+//  LICENSE file in the root directory of this source tree. An additional grant
+//  of patent rights can be found in the PATENTS file in the same directory.
+//
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#ifndef BASE_CRC32C_H
+#define BASE_CRC32C_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace base {
+namespace crc32c {
+
+extern bool IsFastCrc32Supported();
+
+// Return the crc32c of concat(A, data[0,n-1]) where init_crc is the
+// crc32c of some string A.  Extend() is often used to maintain the
+// crc32c of a stream of data.
+extern uint32_t Extend(uint32_t init_crc, const char* data, size_t n);
+
+// Return the crc32c of data[0,n-1]
+inline uint32_t Value(const char* data, size_t n) {
+  return Extend(0, data, n);
+}
+
+static const uint32_t kMaskDelta = 0xa282ead8ul;
+
+// Return a masked representation of crc.
+//
+// Motivation: it is problematic to compute the CRC of a string that
+// contains embedded CRCs.  Therefore we recommend that CRCs stored
+// somewhere (e.g., in files) should be masked before being stored.
+inline uint32_t Mask(uint32_t crc) {
+  // Rotate right by 15 bits and add a constant.
+  return ((crc >> 15) | (crc << 17)) + kMaskDelta;
+}
+
+// Return the crc whose masked representation is masked_crc.
+inline uint32_t Unmask(uint32_t masked_crc) {
+  uint32_t rot = masked_crc - kMaskDelta;
+  return ((rot >> 17) | (rot << 15));
+}
+
+}  // namespace crc32c
+}  // namespace base
+
+#endif  // BASE_CRC32C_H

+ 23 - 0
base/debug/alias.cc

@@ -0,0 +1,23 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/alias.h"
+#include "base/build_config.h"
+
+namespace base {
+namespace debug {
+
+#if defined(COMPILER_MSVC)
+#pragma optimize("", off)
+#endif
+
+void Alias(const void* var) {
+}
+
+#if defined(COMPILER_MSVC)
+#pragma optimize("", on)
+#endif
+
+}  // namespace debug
+}  // namespace base

+ 21 - 0
base/debug/alias.h

@@ -0,0 +1,21 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_ALIAS_H_
+#define BASE_DEBUG_ALIAS_H_
+
+#include "base/base_export.h"
+
+namespace base {
+namespace debug {
+
+// Make the optimizer think that var is aliased. This is to prevent it from
+// optimizing out variables that that would not otherwise be live at the point
+// of a potential crash.
+void BASE_EXPORT Alias(const void* var);
+
+}  // namespace debug
+}  // namespace base
+
+#endif  // BASE_DEBUG_ALIAS_H_

+ 94 - 0
base/debug/asan_invalid_access.cc

@@ -0,0 +1,94 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+#include "base/debug/alias.h"
+#include "base/debug/asan_invalid_access.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+
+namespace base {
+namespace debug {
+
+namespace {
+
+#if defined(SYZYASAN)
+// Corrupt a memory block and make sure that the corruption gets detected either
+// when we free it or when another crash happens (if |induce_crash| is set to
+// true).
+NOINLINE void CorruptMemoryBlock(bool induce_crash) {
+  // NOTE(sebmarchand): We intentionally corrupt a memory block here in order to
+  //     trigger an Address Sanitizer (ASAN) error report.
+  static const int kArraySize = 5;
+  int* array = new int[kArraySize];
+  // Encapsulate the invalid memory access into a try-catch statement to prevent
+  // this function from being instrumented. This way the underflow won't be
+  // detected but the corruption will (as the allocator will still be hooked).
+  try {
+    // Declares the dummy value as volatile to make sure it doesn't get
+    // optimized away.
+    int volatile dummy = array[-1]--;
+    base::debug::Alias(const_cast<int*>(&dummy));
+  } catch (...) {
+  }
+  if (induce_crash)
+    CHECK(false);
+  delete[] array;
+}
+#endif
+
+}  // namespace
+
+#if defined(ADDRESS_SANITIZER) || defined(SYZYASAN)
+// NOTE(sebmarchand): We intentionally perform some invalid heap access here in
+//     order to trigger an AddressSanitizer (ASan) error report.
+
+static const int kArraySize = 5;
+
+void AsanHeapOverflow() {
+  scoped_ptr<int[]> array(new int[kArraySize]);
+  // Declares the dummy value as volatile to make sure it doesn't get optimized
+  // away.
+  int volatile dummy = 0;
+  dummy = array[kArraySize];
+  base::debug::Alias(const_cast<int*>(&dummy));
+}
+
+void AsanHeapUnderflow() {
+  scoped_ptr<int[]> array(new int[kArraySize]);
+  // Declares the dummy value as volatile to make sure it doesn't get optimized
+  // away.
+  int volatile dummy = 0;
+  dummy = array[-1];
+  base::debug::Alias(const_cast<int*>(&dummy));
+}
+
+void AsanHeapUseAfterFree() {
+  scoped_ptr<int[]> array(new int[kArraySize]);
+  // Declares the dummy value as volatile to make sure it doesn't get optimized
+  // away.
+  int volatile dummy = 0;
+  int* dangling = array.get();
+  array.reset();
+  dummy = dangling[kArraySize / 2];
+  base::debug::Alias(const_cast<int*>(&dummy));
+}
+
+#endif  // ADDRESS_SANITIZER || SYZYASAN
+
+#if defined(SYZYASAN)
+void AsanCorruptHeapBlock() {
+  CorruptMemoryBlock(false);
+}
+
+void AsanCorruptHeap() {
+  CorruptMemoryBlock(true);
+}
+#endif  // SYZYASAN
+
+}  // namespace debug
+}  // namespace base

+ 47 - 0
base/debug/asan_invalid_access.h

@@ -0,0 +1,47 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Defines some functions that intentionally do an invalid memory access in
+// order to trigger an AddressSanitizer (ASan) error report.
+
+#ifndef BASE_DEBUG_ASAN_INVALID_ACCESS_H_
+#define BASE_DEBUG_ASAN_INVALID_ACCESS_H_
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+
+namespace base {
+namespace debug {
+
+#if defined(ADDRESS_SANITIZER) || defined(SYZYASAN)
+
+// Generates an heap buffer overflow.
+BASE_EXPORT NOINLINE void AsanHeapOverflow();
+
+// Generates an heap buffer underflow.
+BASE_EXPORT NOINLINE void AsanHeapUnderflow();
+
+// Generates an use after free.
+BASE_EXPORT NOINLINE void AsanHeapUseAfterFree();
+
+#endif  // ADDRESS_SANITIZER || SYZYASAN
+
+// The "corrupt-block" and "corrupt-heap" classes of bugs is specific to
+// SyzyASan.
+#if defined(SYZYASAN)
+
+// Corrupts a memory block and makes sure that the corruption gets detected when
+// we try to free this block.
+BASE_EXPORT NOINLINE void AsanCorruptHeapBlock();
+
+// Corrupts the heap and makes sure that the corruption gets detected when a
+// crash occur.
+BASE_EXPORT NOINLINE void AsanCorruptHeap();
+
+#endif  // SYZYASAN
+
+}  // namespace debug
+}  // namespace base
+
+#endif  // BASE_DEBUG_ASAN_INVALID_ACCESS_H_

+ 202 - 0
base/debug/crash_logging.cc

@@ -0,0 +1,202 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/crash_logging.h"
+
+#include <cmath>
+#include <map>
+
+#include "base/debug/stack_trace.h"
+#include "base/format_macros.h"
+#include "base/logging.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+
+namespace base {
+namespace debug {
+
+namespace {
+
+// Global map of crash key names to registration entries.
+typedef std::map<base::StringPiece, CrashKey> CrashKeyMap;
+CrashKeyMap* g_crash_keys_ = NULL;
+
+// The maximum length of a single chunk.
+size_t g_chunk_max_length_ = 0;
+
+// String used to format chunked key names.
+const char kChunkFormatString[] = "%s-%" PRIuS;
+
+// The functions that are called to actually set the key-value pairs in the
+// crash reportng system.
+SetCrashKeyValueFuncT g_set_key_func_ = NULL;
+ClearCrashKeyValueFuncT g_clear_key_func_ = NULL;
+
+// For a given |length|, computes the number of chunks a value of that size
+// will occupy.
+size_t NumChunksForLength(size_t length) {
+    return (size_t)std::ceil(length / static_cast<double>(g_chunk_max_length_));
+}
+
+// The longest max_length allowed by the system.
+const size_t kLargestValueAllowed = 1024;
+
+}  // namespace
+
+void SetCrashKeyValue(const base::StringPiece& key,
+                      const base::StringPiece& value) {
+  if (!g_set_key_func_ || !g_crash_keys_)
+    return;
+
+  const CrashKey* crash_key = LookupCrashKey(key);
+
+  DCHECK(crash_key) << "All crash keys must be registered before use "
+                    << "(key = " << key << ")";
+
+  // Handle the un-chunked case.
+  if (!crash_key || crash_key->max_length <= g_chunk_max_length_) {
+    g_set_key_func_(key, value);
+    return;
+  }
+
+  // Unset the unused chunks.
+  std::vector<std::string> chunks =
+      ChunkCrashKeyValue(*crash_key, value, g_chunk_max_length_);
+  for (size_t i = chunks.size();
+       i < NumChunksForLength(crash_key->max_length);
+       ++i) {
+    g_clear_key_func_(base::StringPrintf(kChunkFormatString, key.data(), i+1));
+  }
+
+  // Set the chunked keys.
+  for (size_t i = 0; i < chunks.size(); ++i) {
+    g_set_key_func_(base::StringPrintf(kChunkFormatString, key.data(), i+1),
+                    chunks[i]);
+  }
+}
+
+void ClearCrashKey(const base::StringPiece& key) {
+  if (!g_clear_key_func_ || !g_crash_keys_)
+    return;
+
+  const CrashKey* crash_key = LookupCrashKey(key);
+
+  // Handle the un-chunked case.
+  if (!crash_key || crash_key->max_length <= g_chunk_max_length_) {
+    g_clear_key_func_(key);
+    return;
+  }
+
+  for (size_t i = 0; i < NumChunksForLength(crash_key->max_length); ++i) {
+    g_clear_key_func_(base::StringPrintf(kChunkFormatString, key.data(), i+1));
+  }
+}
+
+void SetCrashKeyToStackTrace(const base::StringPiece& key,
+                             const StackTrace& trace) {
+  size_t count = 0;
+  const void* const* addresses = trace.Addresses(&count);
+  SetCrashKeyFromAddresses(key, addresses, count);
+}
+
+void SetCrashKeyFromAddresses(const base::StringPiece& key,
+                              const void* const* addresses,
+                              size_t count) {
+  std::string value = "<null>";
+  if (addresses && count) {
+    const size_t kBreakpadValueMax = 255;
+
+    std::vector<std::string> hex_backtrace;
+    size_t length = 0;
+
+    for (size_t i = 0; i < count; ++i) {
+      std::string s = base::StringPrintf("%p", addresses[i]);
+      length += s.length() + 1;
+      if (length > kBreakpadValueMax)
+        break;
+      hex_backtrace.push_back(s);
+    }
+
+    value = JoinString(hex_backtrace, ' ');
+
+    // Warn if this exceeds the breakpad limits.
+    DCHECK_LE(value.length(), kBreakpadValueMax);
+  }
+
+  SetCrashKeyValue(key, value);
+}
+
+ScopedCrashKey::ScopedCrashKey(const base::StringPiece& key,
+                               const base::StringPiece& value)
+    : key_(key.as_string()) {
+  SetCrashKeyValue(key, value);
+}
+
+ScopedCrashKey::~ScopedCrashKey() {
+  ClearCrashKey(key_);
+}
+
+size_t InitCrashKeys(const CrashKey* const keys, size_t count,
+                     size_t chunk_max_length) {
+  DCHECK(!g_crash_keys_) << "Crash logging may only be initialized once";
+  if (!keys) {
+    delete g_crash_keys_;
+    g_crash_keys_ = NULL;
+    return 0;
+  }
+
+  g_crash_keys_ = new CrashKeyMap;
+  g_chunk_max_length_ = chunk_max_length;
+
+  size_t total_keys = 0;
+  for (size_t i = 0; i < count; ++i) {
+    g_crash_keys_->insert(std::make_pair(keys[i].key_name, keys[i]));
+    total_keys += NumChunksForLength(keys[i].max_length);
+    DCHECK_LT(keys[i].max_length, kLargestValueAllowed);
+  }
+  DCHECK_EQ(count, g_crash_keys_->size())
+      << "Duplicate crash keys were registered";
+
+  return total_keys;
+}
+
+const CrashKey* LookupCrashKey(const base::StringPiece& key) {
+  if (!g_crash_keys_)
+    return NULL;
+  CrashKeyMap::const_iterator it = g_crash_keys_->find(key.as_string());
+  if (it == g_crash_keys_->end())
+    return NULL;
+  return &(it->second);
+}
+
+void SetCrashKeyReportingFunctions(
+    SetCrashKeyValueFuncT set_key_func,
+    ClearCrashKeyValueFuncT clear_key_func) {
+  g_set_key_func_ = set_key_func;
+  g_clear_key_func_ = clear_key_func;
+}
+
+std::vector<std::string> ChunkCrashKeyValue(const CrashKey& crash_key,
+                                            const base::StringPiece& value,
+                                            size_t chunk_max_length) {
+  std::string value_string = value.substr(0, crash_key.max_length).as_string();
+  std::vector<std::string> chunks;
+  for (size_t offset = 0; offset < value_string.length(); ) {
+    std::string chunk = value_string.substr(offset, chunk_max_length);
+    chunks.push_back(chunk);
+    offset += chunk.length();
+  }
+  return chunks;
+}
+
+void ResetCrashLoggingForTesting() {
+  delete g_crash_keys_;
+  g_crash_keys_ = NULL;
+  g_chunk_max_length_ = 0;
+  g_set_key_func_ = NULL;
+  g_clear_key_func_ = NULL;
+}
+
+}  // namespace debug
+}  // namespace base

+ 104 - 0
base/debug/crash_logging.h

@@ -0,0 +1,104 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_CRASH_LOGGING_H_
+#define BASE_DEBUG_CRASH_LOGGING_H_
+
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/basictypes.h"
+#include "base/strings/string_piece.h"
+
+// These functions add metadata to the upload payload when sending crash reports
+// to the crash server.
+//
+// IMPORTANT: On OS X and Linux, the key/value pairs are only sent as part of
+// the upload and are not included in the minidump!
+
+namespace base {
+namespace debug {
+
+class StackTrace;
+
+// Set or clear a specific key-value pair from the crash metadata. Keys and
+// values are terminated at the null byte.
+BASE_EXPORT void SetCrashKeyValue(const base::StringPiece& key,
+                                  const base::StringPiece& value);
+BASE_EXPORT void ClearCrashKey(const base::StringPiece& key);
+
+// Records the given StackTrace into a crash key.
+BASE_EXPORT void SetCrashKeyToStackTrace(const base::StringPiece& key,
+                                         const StackTrace& trace);
+
+// Formats |count| instruction pointers from |addresses| using %p and
+// sets the resulting string as a value for crash key |key|. A maximum of 23
+// items will be encoded, since breakpad limits values to 255 bytes.
+BASE_EXPORT void SetCrashKeyFromAddresses(const base::StringPiece& key,
+                                          const void* const* addresses,
+                                          size_t count);
+
+// A scoper that sets the specified key to value for the lifetime of the
+// object, and clears it on destruction.
+class BASE_EXPORT ScopedCrashKey {
+ public:
+  ScopedCrashKey(const base::StringPiece& key, const base::StringPiece& value);
+  ~ScopedCrashKey();
+
+ private:
+  std::string key_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedCrashKey);
+};
+
+// Before setting values for a key, all the keys must be registered.
+struct BASE_EXPORT CrashKey {
+  // The name of the crash key, used in the above functions.
+  const char* key_name;
+
+  // The maximum length for a value. If the value is longer than this, it will
+  // be truncated. If the value is larger than the |chunk_max_length| passed to
+  // InitCrashKeys() but less than this value, it will be split into multiple
+  // numbered chunks.
+  size_t max_length;
+};
+
+// Before the crash key logging mechanism can be used, all crash keys must be
+// registered with this function. The function returns the amount of space
+// the crash reporting implementation should allocate space for the registered
+// crash keys. |chunk_max_length| is the maximum size that a value in a single
+// chunk can be.
+BASE_EXPORT size_t InitCrashKeys(const CrashKey* const keys, size_t count,
+                                 size_t chunk_max_length);
+
+// Returns the correspnding crash key object or NULL for a given key.
+BASE_EXPORT const CrashKey* LookupCrashKey(const base::StringPiece& key);
+
+// In the platform crash reporting implementation, these functions set and
+// clear the NUL-termianted key-value pairs.
+typedef void (*SetCrashKeyValueFuncT)(const base::StringPiece&,
+                                      const base::StringPiece&);
+typedef void (*ClearCrashKeyValueFuncT)(const base::StringPiece&);
+
+// Sets the function pointers that are used to integrate with the platform-
+// specific crash reporting libraries.
+BASE_EXPORT void SetCrashKeyReportingFunctions(
+    SetCrashKeyValueFuncT set_key_func,
+    ClearCrashKeyValueFuncT clear_key_func);
+
+// Helper function that breaks up a value according to the parameters
+// specified by the crash key object.
+BASE_EXPORT std::vector<std::string> ChunkCrashKeyValue(
+    const CrashKey& crash_key,
+    const base::StringPiece& value,
+    size_t chunk_max_length);
+
+// Resets the crash key system so it can be reinitialized. For testing only.
+BASE_EXPORT void ResetCrashLoggingForTesting();
+
+}  // namespace debug
+}  // namespace base
+
+#endif  // BASE_DEBUG_CRASH_LOGGING_H_

+ 41 - 0
base/debug/debugger.cc

@@ -0,0 +1,41 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/debugger.h"
+#include "base/logging.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+namespace debug {
+
+static bool is_debug_ui_suppressed = false;
+
+bool WaitForDebugger(int wait_seconds, bool silent) {
+#if defined(OS_ANDROID)
+  // The pid from which we know which process to attach to are not output by
+  // android ddms, so we have to print it out explicitly.
+  DLOG(INFO) << "DebugUtil::WaitForDebugger(pid=" << static_cast<int>(getpid())
+             << ")";
+#endif
+  for (int i = 0; i < wait_seconds * 10; ++i) {
+    if (BeingDebugged()) {
+      if (!silent)
+        BreakDebugger();
+      return true;
+    }
+    PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
+  }
+  return false;
+}
+
+void SetSuppressDebugUI(bool suppress) {
+  is_debug_ui_suppressed = suppress;
+}
+
+bool IsDebugUISuppressed() {
+  return is_debug_ui_suppressed;
+}
+
+}  // namespace debug
+}  // namespace base

+ 44 - 0
base/debug/debugger.h

@@ -0,0 +1,44 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a cross platform interface for helper functions related to
+// debuggers.  You should use this to test if you're running under a debugger,
+// and if you would like to yield (breakpoint) into the debugger.
+
+#ifndef BASE_DEBUG_DEBUGGER_H
+#define BASE_DEBUG_DEBUGGER_H
+
+#include "base/base_export.h"
+
+namespace base {
+namespace debug {
+
+// Waits wait_seconds seconds for a debugger to attach to the current process.
+// When silent is false, an exception is thrown when a debugger is detected.
+BASE_EXPORT bool WaitForDebugger(int wait_seconds, bool silent);
+
+// Returns true if the given process is being run under a debugger.
+//
+// On OS X, the underlying mechanism doesn't work when the sandbox is enabled.
+// To get around this, this function caches its value.
+//
+// WARNING: Because of this, on OS X, a call MUST be made to this function
+// BEFORE the sandbox is enabled.
+BASE_EXPORT bool BeingDebugged();
+
+// Break into the debugger, assumes a debugger is present.
+BASE_EXPORT void BreakDebugger();
+
+// Used in test code, this controls whether showing dialogs and breaking into
+// the debugger is suppressed for debug errors, even in debug mode (normally
+// release mode doesn't do this stuff --  this is controlled separately).
+// Normally UI is not suppressed.  This is normally used when running automated
+// tests where we want a crash rather than a dialog or a debugger.
+BASE_EXPORT void SetSuppressDebugUI(bool suppress);
+BASE_EXPORT bool IsDebugUISuppressed();
+
+}  // namespace debug
+}  // namespace base
+
+#endif  // BASE_DEBUG_DEBUGGER_H

+ 255 - 0
base/debug/debugger_posix.cc

@@ -0,0 +1,255 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/debugger.h"
+#include "base/build_config.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/param.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <vector>
+
+#if defined(__GLIBCXX__)
+#include <cxxabi.h>
+#endif
+
+#if defined(OS_MACOSX)
+#include <AvailabilityMacros.h>
+#endif
+
+#if defined(OS_MACOSX) || defined(OS_BSD)
+#include <sys/sysctl.h>
+#endif
+
+#if defined(OS_FREEBSD)
+#include <sys/user.h>
+#endif
+
+#include <ostream>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/safe_strerror_posix.h"
+#include "base/strings/string_piece.h"
+
+#if defined(USE_SYMBOLIZE)
+#include "base/third_party/symbolize/symbolize.h"
+#endif
+
+#if defined(OS_ANDROID)
+#include "base/threading/platform_thread.h"
+#endif
+
+namespace base {
+namespace debug {
+
+#if defined(OS_MACOSX) || defined(OS_BSD)
+
+// Based on Apple's recommended method as described in
+// http://developer.apple.com/qa/qa2004/qa1361.html
+bool BeingDebugged() {
+  // NOTE: This code MUST be async-signal safe (it's used by in-process
+  // stack dumping signal handler). NO malloc or stdio is allowed here.
+  //
+  // While some code used below may be async-signal unsafe, note how
+  // the result is cached (see |is_set| and |being_debugged| static variables
+  // right below). If this code is properly warmed-up early
+  // in the start-up process, it should be safe to use later.
+
+  // If the process is sandboxed then we can't use the sysctl, so cache the
+  // value.
+  static bool is_set = false;
+  static bool being_debugged = false;
+
+  if (is_set)
+    return being_debugged;
+
+  // Initialize mib, which tells sysctl what info we want.  In this case,
+  // we're looking for information about a specific process ID.
+  int mib[] = {
+    CTL_KERN,
+    KERN_PROC,
+    KERN_PROC_PID,
+    getpid()
+#if defined(OS_OPENBSD)
+    , sizeof(struct kinfo_proc),
+    0
+#endif
+  };
+
+  // Caution: struct kinfo_proc is marked __APPLE_API_UNSTABLE.  The source and
+  // binary interfaces may change.
+  struct kinfo_proc info;
+  size_t info_size = sizeof(info);
+
+#if defined(OS_OPENBSD)
+  if (sysctl(mib, arraysize(mib), NULL, &info_size, NULL, 0) < 0)
+    return -1;
+
+  mib[5] = (info_size / sizeof(struct kinfo_proc));
+#endif
+
+  int sysctl_result = sysctl(mib, arraysize(mib), &info, &info_size, NULL, 0);
+  DCHECK_EQ(sysctl_result, 0);
+  if (sysctl_result != 0) {
+    is_set = true;
+    being_debugged = false;
+    return being_debugged;
+  }
+
+  // This process is being debugged if the P_TRACED flag is set.
+  is_set = true;
+#if defined(OS_FREEBSD)
+  being_debugged = (info.ki_flag & P_TRACED) != 0;
+#elif defined(OS_BSD)
+  being_debugged = (info.p_flag & P_TRACED) != 0;
+#else
+  being_debugged = (info.kp_proc.p_flag & P_TRACED) != 0;
+#endif
+  return being_debugged;
+}
+
+#elif defined(OS_LINUX) || defined(OS_ANDROID)
+
+// We can look in /proc/self/status for TracerPid.  We are likely used in crash
+// handling, so we are careful not to use the heap or have side effects.
+// Another option that is common is to try to ptrace yourself, but then we
+// can't detach without forking(), and that's not so great.
+// static
+bool BeingDebugged() {
+  // NOTE: This code MUST be async-signal safe (it's used by in-process
+  // stack dumping signal handler). NO malloc or stdio is allowed here.
+
+  int status_fd = open("/proc/self/status", O_RDONLY);
+  if (status_fd == -1)
+    return false;
+
+  // We assume our line will be in the first 1024 characters and that we can
+  // read this much all at once.  In practice this will generally be true.
+  // This simplifies and speeds up things considerably.
+  char buf[1024];
+
+  ssize_t num_read = HANDLE_EINTR(read(status_fd, buf, sizeof(buf)));
+  if (IGNORE_EINTR(close(status_fd)) < 0)
+    return false;
+
+  if (num_read <= 0)
+    return false;
+
+  StringPiece status(buf, num_read);
+  StringPiece tracer("TracerPid:\t");
+
+  StringPiece::size_type pid_index = status.find(tracer);
+  if (pid_index == StringPiece::npos)
+    return false;
+
+  // Our pid is 0 without a debugger, assume this for any pid starting with 0.
+  pid_index += tracer.size();
+  return pid_index < status.size() && status[pid_index] != '0';
+}
+
+#else
+
+bool BeingDebugged() {
+  NOTIMPLEMENTED();
+  return false;
+}
+
+#endif
+
+// We want to break into the debugger in Debug mode, and cause a crash dump in
+// Release mode. Breakpad behaves as follows:
+//
+// +-------+-----------------+-----------------+
+// | OS    | Dump on SIGTRAP | Dump on SIGABRT |
+// +-------+-----------------+-----------------+
+// | Linux |       N         |        Y        |
+// | Mac   |       Y         |        N        |
+// +-------+-----------------+-----------------+
+//
+// Thus we do the following:
+// Linux: Debug mode if a debugger is attached, send SIGTRAP; otherwise send
+//        SIGABRT
+// Mac: Always send SIGTRAP.
+
+#if defined(ARCH_CPU_ARMEL)
+#define DEBUG_BREAK_ASM() asm("bkpt 0")
+#elif defined(ARCH_CPU_ARM64)
+#define DEBUG_BREAK_ASM() asm("brk 0")
+#elif defined(ARCH_CPU_MIPS_FAMILY)
+#define DEBUG_BREAK_ASM() asm("break 2")
+#elif defined(ARCH_CPU_X86_FAMILY)
+#define DEBUG_BREAK_ASM() asm("int3")
+#endif
+
+#if defined(NDEBUG) && !defined(OS_MACOSX) && !defined(OS_ANDROID)
+#define DEBUG_BREAK() abort()
+#elif defined(OS_NACL)
+// The NaCl verifier doesn't let use use int3.  For now, we call abort().  We
+// should ask for advice from some NaCl experts about the optimum thing here.
+// http://code.google.com/p/nativeclient/issues/detail?id=645
+#define DEBUG_BREAK() abort()
+#elif !defined(OS_MACOSX)
+// Though Android has a "helpful" process called debuggerd to catch native
+// signals on the general assumption that they are fatal errors. If no debugger
+// is attached, we call abort since Breakpad needs SIGABRT to create a dump.
+// When debugger is attached, for ARM platform the bkpt instruction appears
+// to cause SIGBUS which is trapped by debuggerd, and we've had great
+// difficulty continuing in a debugger once we stop from SIG triggered by native
+// code, use GDB to set |go| to 1 to resume execution; for X86 platform, use
+// "int3" to setup breakpiont and raise SIGTRAP.
+//
+// On other POSIX architectures, except Mac OS X, we use the same logic to
+// ensure that breakpad creates a dump on crashes while it is still possible to
+// use a debugger.
+namespace {
+void DebugBreak() {
+  if (!BeingDebugged()) {
+    abort();
+  } else {
+#if defined(DEBUG_BREAK_ASM)
+    DEBUG_BREAK_ASM();
+#else
+    volatile int go = 0;
+    while (!go) {
+      base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(100));
+    }
+#endif
+  }
+}
+}  // namespace
+#define DEBUG_BREAK() DebugBreak()
+#elif defined(DEBUG_BREAK_ASM)
+#define DEBUG_BREAK() DEBUG_BREAK_ASM()
+#else
+#error "Don't know how to debug break on this architecture/OS"
+#endif
+
+void BreakDebugger() {
+  // NOTE: This code MUST be async-signal safe (it's used by in-process
+  // stack dumping signal handler). NO malloc or stdio is allowed here.
+
+  DEBUG_BREAK();
+#if defined(OS_ANDROID) && !defined(OFFICIAL_BUILD)
+  // For Android development we always build release (debug builds are
+  // unmanageably large), so the unofficial build is used for debugging. It is
+  // helpful to be able to insert BreakDebugger() statements in the source,
+  // attach the debugger, inspect the state of the program and then resume it by
+  // setting the 'go' variable above.
+#elif defined(NDEBUG)
+  // Terminate the program after signaling the debug break.
+  _exit(1);
+#endif
+}
+
+}  // namespace debug
+}  // namespace base

+ 32 - 0
base/debug/dump_without_crashing.cc

@@ -0,0 +1,32 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/dump_without_crashing.h"
+
+#include "base/logging.h"
+
+namespace {
+
+// Pointer to the function that's called by DumpWithoutCrashing() to dump the
+// process's memory.
+void (CDECL *dump_without_crashing_function_)() = NULL;
+
+}  // namespace
+
+namespace base {
+
+namespace debug {
+
+void DumpWithoutCrashing() {
+  if (dump_without_crashing_function_)
+    (*dump_without_crashing_function_)();
+}
+
+void SetDumpWithoutCrashingFunction(void (CDECL *function)()) {
+  dump_without_crashing_function_ = function;
+}
+
+}  // namespace debug
+
+}  // namespace base

+ 27 - 0
base/debug/dump_without_crashing.h

@@ -0,0 +1,27 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_DUMP_WITHOUT_CRASHING_H_
+#define BASE_DEBUG_DUMP_WITHOUT_CRASHING_H_
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/build_config.h"
+
+namespace base {
+
+namespace debug {
+
+// Handler to silently dump the current process without crashing.
+BASE_EXPORT void DumpWithoutCrashing();
+
+// Sets a function that'll be invoked to dump the current process when
+// DumpWithoutCrashing() is called.
+BASE_EXPORT void SetDumpWithoutCrashingFunction(void (CDECL *function)());
+
+}  // namespace debug
+
+}  // namespace base
+
+#endif  // BASE_DEBUG_DUMP_WITHOUT_CRASHING_H_

+ 55 - 0
base/debug/leak_annotations.h

@@ -0,0 +1,55 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_LEAK_ANNOTATIONS_H_
+#define BASE_DEBUG_LEAK_ANNOTATIONS_H_
+
+#include "base/basictypes.h"
+#include "base/build_config.h"
+
+// This file defines macros which can be used to annotate intentional memory
+// leaks. Support for annotations is implemented in LeakSanitizer. Annotated
+// objects will be treated as a source of live pointers, i.e. any heap objects
+// reachable by following pointers from an annotated object will not be
+// reported as leaks.
+//
+// ANNOTATE_SCOPED_MEMORY_LEAK: all allocations made in the current scope
+// will be annotated as leaks.
+// ANNOTATE_LEAKING_OBJECT_PTR(X): the heap object referenced by pointer X will
+// be annotated as a leak.
+
+#if defined(LEAK_SANITIZER) && !defined(OS_NACL)
+
+// Public LSan API from <sanitizer/lsan_interface.h>.
+extern "C" {
+void __lsan_disable();
+void __lsan_enable();
+void __lsan_ignore_object(const void *p);
+
+// Invoke leak detection immediately. If leaks are found, the process will exit.
+void __lsan_do_leak_check();
+}  // extern "C"
+
+class ScopedLeakSanitizerDisabler {
+ public:
+  ScopedLeakSanitizerDisabler() { __lsan_disable(); }
+  ~ScopedLeakSanitizerDisabler() { __lsan_enable(); }
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ScopedLeakSanitizerDisabler);
+};
+
+#define ANNOTATE_SCOPED_MEMORY_LEAK \
+    ScopedLeakSanitizerDisabler leak_sanitizer_disabler; static_cast<void>(0)
+
+#define ANNOTATE_LEAKING_OBJECT_PTR(X) __lsan_ignore_object(X);
+
+#else
+
+// If neither HeapChecker nor LSan are used, the annotations should be no-ops.
+#define ANNOTATE_SCOPED_MEMORY_LEAK ((void)0)
+#define ANNOTATE_LEAKING_OBJECT_PTR(X) ((void)0)
+
+#endif
+
+#endif  // BASE_DEBUG_LEAK_ANNOTATIONS_H_

+ 138 - 0
base/debug/leak_tracker.h

@@ -0,0 +1,138 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_LEAK_TRACKER_H_
+#define BASE_DEBUG_LEAK_TRACKER_H_
+
+#include "base/build_config.h"
+
+// Only enable leak tracking in non-uClibc debug builds.
+#if !defined(NDEBUG) && !defined(__UCLIBC__)
+#define ENABLE_LEAK_TRACKER
+#endif
+
+#ifdef ENABLE_LEAK_TRACKER
+#include "base/containers/linked_list.h"
+#include "base/debug/stack_trace.h"
+#include "base/logging.h"
+#endif  // ENABLE_LEAK_TRACKER
+
+// LeakTracker is a helper to verify that all instances of a class
+// have been destroyed.
+//
+// It is particularly useful for classes that are bound to a single thread --
+// before destroying that thread, one can check that there are no remaining
+// instances of that class.
+//
+// For example, to enable leak tracking for class net::URLRequest, start by
+// adding a member variable of type LeakTracker<net::URLRequest>.
+//
+//   class URLRequest {
+//     ...
+//    private:
+//     base::LeakTracker<URLRequest> leak_tracker_;
+//   };
+//
+//
+// Next, when we believe all instances of net::URLRequest have been deleted:
+//
+//   LeakTracker<net::URLRequest>::CheckForLeaks();
+//
+// Should the check fail (because there are live instances of net::URLRequest),
+// then the allocation callstack for each leaked instances is dumped to
+// the error log.
+//
+// If ENABLE_LEAK_TRACKER is not defined, then the check has no effect.
+
+namespace base {
+namespace debug {
+
+#ifndef ENABLE_LEAK_TRACKER
+
+// If leak tracking is disabled, do nothing.
+template<typename T>
+class LeakTracker {
+ public:
+  ~LeakTracker() {}
+  static void CheckForLeaks() {}
+  static int NumLiveInstances() { return -1; }
+};
+
+#else
+
+// If leak tracking is enabled we track where the object was allocated from.
+
+template<typename T>
+class LeakTracker : public LinkNode<LeakTracker<T> > {
+ public:
+  LeakTracker() {
+    instances()->Append(this);
+  }
+
+  ~LeakTracker() {
+    this->RemoveFromList();
+  }
+
+  static void CheckForLeaks() {
+    // Walk the allocation list and print each entry it contains.
+    size_t count = 0;
+
+    // Copy the first 3 leak allocation callstacks onto the stack.
+    // This way if we hit the CHECK() in a release build, the leak
+    // information will be available in mini-dump.
+    const size_t kMaxStackTracesToCopyOntoStack = 3;
+    StackTrace stacktraces[kMaxStackTracesToCopyOntoStack];
+
+    for (LinkNode<LeakTracker<T> >* node = instances()->head();
+         node != instances()->end();
+         node = node->next()) {
+      StackTrace& allocation_stack = node->value()->allocation_stack_;
+
+      if (count < kMaxStackTracesToCopyOntoStack)
+        stacktraces[count] = allocation_stack;
+
+      ++count;
+      if (LOG_IS_ON(ERROR)) {
+        LOG_STREAM(ERROR) << "Leaked " << node << " which was allocated by:";
+        allocation_stack.OutputToStream(&LOG_STREAM(ERROR));
+      }
+    }
+
+    CHECK_EQ(0u, count);
+
+    // Hack to keep |stacktraces| and |count| alive (so compiler
+    // doesn't optimize it out, and it will appear in mini-dumps).
+    if (count == 0x1234) {
+      for (size_t i = 0; i < kMaxStackTracesToCopyOntoStack; ++i)
+        stacktraces[i].Print();
+    }
+  }
+
+  static int NumLiveInstances() {
+    // Walk the allocation list and count how many entries it has.
+    int count = 0;
+    for (LinkNode<LeakTracker<T> >* node = instances()->head();
+         node != instances()->end();
+         node = node->next()) {
+      ++count;
+    }
+    return count;
+  }
+
+ private:
+  // Each specialization of LeakTracker gets its own static storage.
+  static LinkedList<LeakTracker<T> >* instances() {
+    static LinkedList<LeakTracker<T> > list;
+    return &list;
+  }
+
+  StackTrace allocation_stack_;
+};
+
+#endif  // ENABLE_LEAK_TRACKER
+
+}  // namespace debug
+}  // namespace base
+
+#endif  // BASE_DEBUG_LEAK_TRACKER_H_

+ 167 - 0
base/debug/proc_maps_linux.cc

@@ -0,0 +1,167 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/proc_maps_linux.h"
+
+#include <fcntl.h>
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+#include <inttypes.h>
+#endif
+
+#include "base/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/strings/string_split.h"
+
+#if defined(OS_ANDROID) && !defined(__LP64__)
+// In 32-bit mode, Bionic's inttypes.h defines PRI/SCNxPTR as an
+// unsigned long int, which is incompatible with Bionic's stdint.h
+// defining uintptr_t as an unsigned int:
+// https://code.google.com/p/android/issues/detail?id=57218
+#undef SCNxPTR
+#define SCNxPTR "x"
+#endif
+
+namespace base {
+namespace debug {
+
+// Scans |proc_maps| starting from |pos| returning true if the gate VMA was
+// found, otherwise returns false.
+static bool ContainsGateVMA(std::string* proc_maps, size_t pos) {
+#if defined(ARCH_CPU_ARM_FAMILY)
+  // The gate VMA on ARM kernels is the interrupt vectors page.
+  return proc_maps->find(" [vectors]\n", pos) != std::string::npos;
+#elif defined(ARCH_CPU_X86_64)
+  // The gate VMA on x86 64-bit kernels is the virtual system call page.
+  return proc_maps->find(" [vsyscall]\n", pos) != std::string::npos;
+#else
+  // Otherwise assume there is no gate VMA in which case we shouldn't
+  // get duplicate entires.
+  return false;
+#endif
+}
+
+bool ReadProcMaps(std::string* proc_maps) {
+  // seq_file only writes out a page-sized amount on each call. Refer to header
+  // file for details.
+  const long kReadSize = sysconf(_SC_PAGESIZE);
+
+  base::ScopedFD fd(HANDLE_EINTR(open("/proc/self/maps", O_RDONLY)));
+  if (!fd.is_valid()) {
+    DPLOG(ERROR) << "Couldn't open /proc/self/maps";
+    return false;
+  }
+  proc_maps->clear();
+
+  while (true) {
+    // To avoid a copy, resize |proc_maps| so read() can write directly into it.
+    // Compute |buffer| afterwards since resize() may reallocate.
+    size_t pos = proc_maps->size();
+    proc_maps->resize(pos + kReadSize);
+    void* buffer = &(*proc_maps)[pos];
+
+    ssize_t bytes_read = HANDLE_EINTR(read(fd.get(), buffer, kReadSize));
+    if (bytes_read < 0) {
+      DPLOG(ERROR) << "Couldn't read /proc/self/maps";
+      proc_maps->clear();
+      return false;
+    }
+
+    // ... and don't forget to trim off excess bytes.
+    proc_maps->resize(pos + bytes_read);
+
+    if (bytes_read == 0)
+      break;
+
+    // The gate VMA is handled as a special case after seq_file has finished
+    // iterating through all entries in the virtual memory table.
+    //
+    // Unfortunately, if additional entries are added at this point in time
+    // seq_file gets confused and the next call to read() will return duplicate
+    // entries including the gate VMA again.
+    //
+    // Avoid this by searching for the gate VMA and breaking early.
+    if (ContainsGateVMA(proc_maps, pos))
+      break;
+  }
+
+  return true;
+}
+
+bool ParseProcMaps(const std::string& input,
+                   std::vector<MappedMemoryRegion>* regions_out) {
+  CHECK(regions_out);
+  std::vector<MappedMemoryRegion> regions;
+
+  // This isn't async safe nor terribly efficient, but it doesn't need to be at
+  // this point in time.
+  std::vector<std::string> lines;
+  SplitString(input, '\n', &lines);
+
+  for (size_t i = 0; i < lines.size(); ++i) {
+    // Due to splitting on '\n' the last line should be empty.
+    if (i == lines.size() - 1) {
+      if (!lines[i].empty()) {
+        DLOG(WARNING) << "Last line not empty";
+        return false;
+      }
+      break;
+    }
+
+    MappedMemoryRegion region;
+    const char* line = lines[i].c_str();
+    char permissions[5] = {'\0'};  // Ensure NUL-terminated string.
+    uint8_t dev_major = 0;
+    uint8_t dev_minor = 0;
+    long inode = 0;
+    int path_index = 0;
+
+    // Sample format from man 5 proc:
+    //
+    // address           perms offset  dev   inode   pathname
+    // 08048000-08056000 r-xp 00000000 03:0c 64593   /usr/sbin/gpm
+    //
+    // The final %n term captures the offset in the input string, which is used
+    // to determine the path name. It *does not* increment the return value.
+    // Refer to man 3 sscanf for details.
+    if (sscanf(line, "%" SCNxPTR "-%" SCNxPTR " %4c %llx %hhx:%hhx %ld %n",
+               &region.start, &region.end, permissions, &region.offset,
+               &dev_major, &dev_minor, &inode, &path_index) < 7) {
+      DPLOG(WARNING) << "sscanf failed for line: " << line;
+      return false;
+    }
+
+    region.permissions = 0;
+
+    if (permissions[0] == 'r')
+      region.permissions |= MappedMemoryRegion::READ;
+    else if (permissions[0] != '-')
+      return false;
+
+    if (permissions[1] == 'w')
+      region.permissions |= MappedMemoryRegion::WRITE;
+    else if (permissions[1] != '-')
+      return false;
+
+    if (permissions[2] == 'x')
+      region.permissions |= MappedMemoryRegion::EXECUTE;
+    else if (permissions[2] != '-')
+      return false;
+
+    if (permissions[3] == 'p')
+      region.permissions |= MappedMemoryRegion::PRIVATE;
+    else if (permissions[3] != 's' && permissions[3] != 'S')  // Shared memory.
+      return false;
+
+    // Pushing then assigning saves us a string copy.
+    regions.push_back(region);
+    regions.back().path.assign(line + path_index);
+  }
+
+  regions_out->swap(regions);
+  return true;
+}
+
+}  // namespace debug
+}  // namespace base

+ 90 - 0
base/debug/proc_maps_linux.h

@@ -0,0 +1,90 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_PROC_MAPS_LINUX_H_
+#define BASE_DEBUG_PROC_MAPS_LINUX_H_
+
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/basictypes.h"
+
+namespace base {
+namespace debug {
+
+// Describes a region of mapped memory and the path of the file mapped.
+struct MappedMemoryRegion {
+  enum Permission {
+    READ = 1 << 0,
+    WRITE = 1 << 1,
+    EXECUTE = 1 << 2,
+    PRIVATE = 1 << 3,  // If set, region is private, otherwise it is shared.
+  };
+
+  // The address range [start,end) of mapped memory.
+  uintptr_t start;
+  uintptr_t end;
+
+  // Byte offset into |path| of the range mapped into memory.
+  unsigned long long offset;
+
+  // Bitmask of read/write/execute/private/shared permissions.
+  uint8_t permissions;
+
+  // Name of the file mapped into memory.
+  //
+  // NOTE: path names aren't guaranteed to point at valid files. For example,
+  // "[heap]" and "[stack]" are used to represent the location of the process'
+  // heap and stack, respectively.
+  std::string path;
+};
+
+// Reads the data from /proc/self/maps and stores the result in |proc_maps|.
+// Returns true if successful, false otherwise.
+//
+// There is *NO* guarantee that the resulting contents will be free of
+// duplicates or even contain valid entries by time the method returns.
+//
+//
+// THE GORY DETAILS
+//
+// Did you know it's next-to-impossible to atomically read the whole contents
+// of /proc/<pid>/maps? You would think that if we passed in a large-enough
+// buffer to read() that It Should Just Work(tm), but sadly that's not the case.
+//
+// Linux's procfs uses seq_file [1] for handling iteration, text formatting,
+// and dealing with resulting data that is larger than the size of a page. That
+// last bit is especially important because it means that seq_file will never
+// return more than the size of a page in a single call to read().
+//
+// Unfortunately for a program like Chrome the size of /proc/self/maps is
+// larger than the size of page so we're forced to call read() multiple times.
+// If the virtual memory table changed in any way between calls to read() (e.g.,
+// a different thread calling mprotect()), it can make seq_file generate
+// duplicate entries or skip entries.
+//
+// Even if seq_file was changed to keep flushing the contents of its page-sized
+// buffer to the usermode buffer inside a single call to read(), it has to
+// release its lock on the virtual memory table to handle page faults while
+// copying data to usermode. This puts us in the same situation where the table
+// can change while we're copying data.
+//
+// Alternatives such as fork()-and-suspend-the-parent-while-child-reads were
+// attempted, but they present more subtle problems than it's worth. Depending
+// on your use case your best bet may be to read /proc/<pid>/maps prior to
+// starting other threads.
+//
+// [1] http://kernelnewbies.org/Documents/SeqFileHowTo
+BASE_EXPORT bool ReadProcMaps(std::string* proc_maps);
+
+// Parses /proc/<pid>/maps input data and stores in |regions|. Returns true
+// and updates |regions| if and only if all of |input| was successfully parsed.
+BASE_EXPORT bool ParseProcMaps(const std::string& input,
+                               std::vector<MappedMemoryRegion>* regions);
+
+}  // namespace debug
+}  // namespace base
+
+#endif  // BASE_DEBUG_PROC_MAPS_LINUX_H_

+ 219 - 0
base/debug/profiler.cc

@@ -0,0 +1,219 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/profiler.h"
+
+#include <string>
+
+#include "base/process/process_handle.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+
+#if defined(OS_WIN)
+#include "base/win/pe_image.h"
+#endif  // defined(OS_WIN)
+
+// TODO(peria): Enable profiling on Windows.
+#if defined(ENABLE_PROFILING) && !defined(NO_TCMALLOC) && !defined(OS_WIN)
+#include "third_party/tcmalloc/chromium/src/gperftools/profiler.h"
+#endif
+
+namespace base {
+namespace debug {
+
+// TODO(peria): Enable profiling on Windows.
+#if defined(ENABLE_PROFILING) && !defined(NO_TCMALLOC) && !defined(OS_WIN)
+
+static int profile_count = 0;
+
+void StartProfiling(const std::string& name) {
+  ++profile_count;
+  std::string full_name(name);
+  std::string pid = StringPrintf("%d", GetCurrentProcId());
+  std::string count = StringPrintf("%d", profile_count);
+  ReplaceSubstringsAfterOffset(&full_name, 0, "{pid}", pid);
+  ReplaceSubstringsAfterOffset(&full_name, 0, "{count}", count);
+  ProfilerStart(full_name.c_str());
+}
+
+void StopProfiling() {
+  ProfilerFlush();
+  ProfilerStop();
+}
+
+void FlushProfiling() {
+  ProfilerFlush();
+}
+
+bool BeingProfiled() {
+  return ProfilingIsEnabledForAllThreads();
+}
+
+void RestartProfilingAfterFork() {
+  ProfilerRegisterThread();
+}
+
+#else
+
+void StartProfiling(const std::string& name) {
+}
+
+void StopProfiling() {
+}
+
+void FlushProfiling() {
+}
+
+bool BeingProfiled() {
+  return false;
+}
+
+void RestartProfilingAfterFork() {
+}
+
+#endif
+
+#if !defined(OS_WIN)
+
+bool IsBinaryInstrumented() {
+  return false;
+}
+
+ReturnAddressLocationResolver GetProfilerReturnAddrResolutionFunc() {
+  return NULL;
+}
+
+DynamicFunctionEntryHook GetProfilerDynamicFunctionEntryHookFunc() {
+  return NULL;
+}
+
+AddDynamicSymbol GetProfilerAddDynamicSymbolFunc() {
+  return NULL;
+}
+
+MoveDynamicSymbol GetProfilerMoveDynamicSymbolFunc() {
+  return NULL;
+}
+
+#else  // defined(OS_WIN)
+
+// http://blogs.msdn.com/oldnewthing/archive/2004/10/25/247180.aspx
+extern "C" IMAGE_DOS_HEADER __ImageBase;
+
+bool IsBinaryInstrumented() {
+  enum InstrumentationCheckState {
+    UNINITIALIZED,
+    INSTRUMENTED_IMAGE,
+    NON_INSTRUMENTED_IMAGE,
+  };
+
+  static InstrumentationCheckState state = UNINITIALIZED;
+
+  if (state == UNINITIALIZED) {
+    HMODULE this_module = reinterpret_cast<HMODULE>(&__ImageBase);
+    base::win::PEImage image(this_module);
+
+    // Check to be sure our image is structured as we'd expect.
+    DCHECK(image.VerifyMagic());
+
+    // Syzygy-instrumented binaries contain a PE image section named ".thunks",
+    // and all Syzygy-modified binaries contain the ".syzygy" image section.
+    // This is a very fast check, as it only looks at the image header.
+    if ((image.GetImageSectionHeaderByName(".thunks") != NULL) &&
+        (image.GetImageSectionHeaderByName(".syzygy") != NULL)) {
+      state = INSTRUMENTED_IMAGE;
+    } else {
+      state = NON_INSTRUMENTED_IMAGE;
+    }
+  }
+  DCHECK(state != UNINITIALIZED);
+
+  return state == INSTRUMENTED_IMAGE;
+}
+
+namespace {
+
+struct FunctionSearchContext {
+  const char* name;
+  FARPROC function;
+};
+
+// Callback function to PEImage::EnumImportChunks.
+bool FindResolutionFunctionInImports(
+    const base::win::PEImage &image, const char* module_name,
+    PIMAGE_THUNK_DATA unused_name_table, PIMAGE_THUNK_DATA import_address_table,
+    PVOID cookie) {
+  FunctionSearchContext* context =
+      reinterpret_cast<FunctionSearchContext*>(cookie);
+
+  DCHECK_NE(static_cast<FunctionSearchContext*>(NULL), context);
+  DCHECK_EQ(static_cast<FARPROC>(NULL), context->function);
+
+  // Our import address table contains pointers to the functions we import
+  // at this point. Let's retrieve the first such function and use it to
+  // find the module this import was resolved to by the loader.
+  const wchar_t* function_in_module =
+      reinterpret_cast<const wchar_t*>(import_address_table->u1.Function);
+
+  // Retrieve the module by a function in the module.
+  const DWORD kFlags = GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
+                       GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT;
+  HMODULE module = NULL;
+  if (!::GetModuleHandleEx(kFlags, function_in_module, &module)) {
+    // This can happen if someone IAT patches us to a thunk.
+    return true;
+  }
+
+  // See whether this module exports the function we're looking for.
+  FARPROC exported_func = ::GetProcAddress(module, context->name);
+  if (exported_func != NULL) {
+    // We found it, return the function and terminate the enumeration.
+    context->function = exported_func;
+    return false;
+  }
+
+  // Keep going.
+  return true;
+}
+
+template <typename FunctionType>
+FunctionType FindFunctionInImports(const char* function_name) {
+  if (!IsBinaryInstrumented())
+    return NULL;
+
+  HMODULE this_module = reinterpret_cast<HMODULE>(&__ImageBase);
+  base::win::PEImage image(this_module);
+
+  FunctionSearchContext ctx = { function_name, NULL };
+  image.EnumImportChunks(FindResolutionFunctionInImports, &ctx);
+
+  return reinterpret_cast<FunctionType>(ctx.function);
+}
+
+}  // namespace
+
+ReturnAddressLocationResolver GetProfilerReturnAddrResolutionFunc() {
+  return FindFunctionInImports<ReturnAddressLocationResolver>(
+      "ResolveReturnAddressLocation");
+}
+
+DynamicFunctionEntryHook GetProfilerDynamicFunctionEntryHookFunc() {
+  return FindFunctionInImports<DynamicFunctionEntryHook>(
+      "OnDynamicFunctionEntry");
+}
+
+AddDynamicSymbol GetProfilerAddDynamicSymbolFunc() {
+  return FindFunctionInImports<AddDynamicSymbol>(
+      "AddDynamicSymbol");
+}
+
+MoveDynamicSymbol GetProfilerMoveDynamicSymbolFunc() {
+  return FindFunctionInImports<MoveDynamicSymbol>(
+      "MoveDynamicSymbol");
+}
+
+#endif  // defined(OS_WIN)
+
+}  // namespace debug
+}  // namespace base

+ 90 - 0
base/debug/profiler.h

@@ -0,0 +1,90 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_PROFILER_H
+#define BASE_DEBUG_PROFILER_H
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/basictypes.h"
+
+// The Profiler functions allow usage of the underlying sampling based
+// profiler. If the application has not been built with the necessary
+// flags (-DENABLE_PROFILING and not -DNO_TCMALLOC) then these functions
+// are noops.
+namespace base {
+namespace debug {
+
+// Start profiling with the supplied name.
+// {pid} will be replaced by the process' pid and {count} will be replaced
+// by the count of the profile run (starts at 1 with each process).
+BASE_EXPORT void StartProfiling(const std::string& name);
+
+// Stop profiling and write out data.
+BASE_EXPORT void StopProfiling();
+
+// Force data to be written to file.
+BASE_EXPORT void FlushProfiling();
+
+// Returns true if process is being profiled.
+BASE_EXPORT bool BeingProfiled();
+
+// Reset profiling after a fork, which disables timers.
+BASE_EXPORT void RestartProfilingAfterFork();
+
+// Returns true iff this executable is instrumented with the Syzygy profiler.
+BASE_EXPORT bool IsBinaryInstrumented();
+
+// There's a class of profilers that use "return address swizzling" to get a
+// hook on function exits. This class of profilers uses some form of entry hook,
+// like e.g. binary instrumentation, or a compiler flag, that calls a hook each
+// time a function is invoked. The hook then switches the return address on the
+// stack for the address of an exit hook function, and pushes the original
+// return address to a shadow stack of some type. When in due course the CPU
+// executes a return to the exit hook, the exit hook will do whatever work it
+// does on function exit, then arrange to return to the original return address.
+// This class of profiler does not play well with programs that look at the
+// return address, as does e.g. V8. V8 uses the return address to certain
+// runtime functions to find the JIT code that called it, and from there finds
+// the V8 data structures associated to the JS function involved.
+// A return address resolution function is used to fix this. It allows such
+// programs to resolve a location on stack where a return address originally
+// resided, to the shadow stack location where the profiler stashed it.
+typedef uintptr_t (*ReturnAddressLocationResolver)(
+    uintptr_t return_addr_location);
+
+// This type declaration must match V8's FunctionEntryHook.
+typedef void (*DynamicFunctionEntryHook)(uintptr_t function,
+                                         uintptr_t return_addr_location);
+
+// The functions below here are to support profiling V8-generated code.
+// V8 has provisions for generating a call to an entry hook for newly generated
+// JIT code, and it can push symbol information on code generation and advise
+// when the garbage collector moves code. The functions declarations below here
+// make glue between V8's facilities and a profiler.
+
+// This type declaration must match V8's FunctionEntryHook.
+typedef void (*DynamicFunctionEntryHook)(uintptr_t function,
+                                         uintptr_t return_addr_location);
+
+typedef void (*AddDynamicSymbol)(const void* address,
+                                 size_t length,
+                                 const char* name,
+                                 size_t name_len);
+typedef void (*MoveDynamicSymbol)(const void* address, const void* new_address);
+
+
+// If this binary is instrumented and the instrumentation supplies a function
+// for each of those purposes, find and return the function in question.
+// Otherwise returns NULL.
+BASE_EXPORT ReturnAddressLocationResolver GetProfilerReturnAddrResolutionFunc();
+BASE_EXPORT DynamicFunctionEntryHook GetProfilerDynamicFunctionEntryHookFunc();
+BASE_EXPORT AddDynamicSymbol GetProfilerAddDynamicSymbolFunc();
+BASE_EXPORT MoveDynamicSymbol GetProfilerMoveDynamicSymbolFunc();
+
+}  // namespace debug
+}  // namespace base
+
+#endif  // BASE_DEBUG_DEBUGGER_H

+ 43 - 0
base/debug/stack_trace.cc

@@ -0,0 +1,43 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/stack_trace.h"
+
+#include "base/basictypes.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <sstream>
+
+namespace base {
+namespace debug {
+
+StackTrace::StackTrace(const void* const* trace, size_t count) {
+  count = std::min(count, arraysize(trace_));
+  if (count)
+    memcpy(trace_, trace, count * sizeof(trace_[0]));
+  count_ = count;
+}
+
+StackTrace::~StackTrace() {
+}
+
+const void *const *StackTrace::Addresses(size_t* count) const {
+  *count = count_;
+  if (count_)
+    return trace_;
+  return NULL;
+}
+
+std::string StackTrace::ToString() const {
+  std::stringstream stream;
+#if !defined(__UCLIBC__)
+  OutputToStream(&stream);
+#endif
+  return stream.str();
+}
+
+}  // namespace debug
+}  // namespace base

+ 112 - 0
base/debug/stack_trace.h

@@ -0,0 +1,112 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_STACK_TRACE_H_
+#define BASE_DEBUG_STACK_TRACE_H_
+
+#include <iosfwd>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/build_config.h"
+
+#if defined(OS_POSIX)
+#include <unistd.h>
+#endif
+
+#if defined(OS_WIN)
+struct _EXCEPTION_POINTERS;
+#endif
+
+namespace base {
+namespace debug {
+
+// Enables stack dump to console output on exception and signals.
+// When enabled, the process will quit immediately. This is meant to be used in
+// unit_tests only! This is not thread-safe: only call from main thread.
+BASE_EXPORT bool EnableInProcessStackDumping();
+
+// A different version of EnableInProcessStackDumping that also works for
+// sandboxed processes.  For more details take a look at the description
+// of EnableInProcessStackDumping.
+// Calling this function on Linux opens /proc/self/maps and caches its
+// contents. In DEBUG builds, this function also opens the object files that
+// are loaded in memory and caches their file descriptors (this cannot be
+// done in official builds because it has security implications).
+BASE_EXPORT bool EnableInProcessStackDumpingForSandbox();
+
+// A stacktrace can be helpful in debugging. For example, you can include a
+// stacktrace member in a object (probably around #ifndef NDEBUG) so that you
+// can later see where the given object was created from.
+class BASE_EXPORT StackTrace {
+ public:
+  // Creates a stacktrace from the current location.
+  StackTrace();
+
+  // Creates a stacktrace from an existing array of instruction
+  // pointers (such as returned by Addresses()).  |count| will be
+  // trimmed to |kMaxTraces|.
+  StackTrace(const void* const* trace, size_t count);
+
+#if defined(OS_WIN)
+  // Creates a stacktrace for an exception.
+  // Note: this function will throw an import not found (StackWalk64) exception
+  // on system without dbghelp 5.1.
+  StackTrace(const _EXCEPTION_POINTERS* exception_pointers);
+#endif
+
+  // Copying and assignment are allowed with the default functions.
+
+  ~StackTrace();
+
+  // Gets an array of instruction pointer values. |*count| will be set to the
+  // number of elements in the returned array.
+  const void* const* Addresses(size_t* count) const;
+
+  // Prints the stack trace to stderr.
+  void Print() const;
+
+#if !defined(__UCLIBC__)
+  // Resolves backtrace to symbols and write to stream.
+  void OutputToStream(std::ostream* os) const;
+#endif
+
+  // Resolves backtrace to symbols and returns as string.
+  std::string ToString() const;
+
+ private:
+  // From http://msdn.microsoft.com/en-us/library/bb204633.aspx,
+  // the sum of FramesToSkip and FramesToCapture must be less than 63,
+  // so set it to 62. Even if on POSIX it could be a larger value, it usually
+  // doesn't give much more information.
+  static const int kMaxTraces = 62;
+
+  void* trace_[kMaxTraces];
+
+  // The number of valid frames in |trace_|.
+  size_t count_;
+};
+
+namespace internal {
+
+#if defined(OS_POSIX) && !defined(OS_ANDROID)
+// POSIX doesn't define any async-signal safe function for converting
+// an integer to ASCII. We'll have to define our own version.
+// itoa_r() converts a (signed) integer to ASCII. It returns "buf", if the
+// conversion was successful or NULL otherwise. It never writes more than "sz"
+// bytes. Output will be truncated as needed, and a NUL character is always
+// appended.
+BASE_EXPORT char *itoa_r(intptr_t i,
+                         char *buf,
+                         size_t sz,
+                         int base,
+                         size_t padding);
+#endif  // defined(OS_POSIX) && !defined(OS_ANDROID)
+
+}  // namespace internal
+
+}  // namespace debug
+}  // namespace base
+
+#endif  // BASE_DEBUG_STACK_TRACE_H_

+ 843 - 0
base/debug/stack_trace_posix.cc

@@ -0,0 +1,843 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/stack_trace.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/param.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <map>
+#include <ostream>
+#include <string>
+#include <vector>
+
+#if defined(__GLIBCXX__)
+#include <cxxabi.h>
+#endif
+#if !defined(__UCLIBC__)
+#include <execinfo.h>
+#endif
+
+#if defined(OS_MACOSX)
+#include <AvailabilityMacros.h>
+#endif
+
+#include "base/basictypes.h"
+#include "base/debug/debugger.h"
+#include "base/debug/proc_maps_linux.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/singleton.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/build_config.h"
+
+#if defined(USE_SYMBOLIZE)
+#include "base/third_party/symbolize/symbolize.h"
+#endif
+
+namespace base {
+namespace debug {
+
+namespace {
+
+volatile sig_atomic_t in_signal_handler = 0;
+
+#if !defined(USE_SYMBOLIZE) && defined(__GLIBCXX__)
+// The prefix used for mangled symbols, per the Itanium C++ ABI:
+// http://www.codesourcery.com/cxx-abi/abi.html#mangling
+const char kMangledSymbolPrefix[] = "_Z";
+
+// Characters that can be used for symbols, generated by Ruby:
+// (('a'..'z').to_a+('A'..'Z').to_a+('0'..'9').to_a + ['_']).join
+const char kSymbolCharacters[] =
+    "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
+#endif  // !defined(USE_SYMBOLIZE) && defined(__GLIBCXX__)
+
+#if !defined(USE_SYMBOLIZE)
+// Demangles C++ symbols in the given text. Example:
+//
+// "out/Debug/base_unittests(_ZN10StackTraceC1Ev+0x20) [0x817778c]"
+// =>
+// "out/Debug/base_unittests(StackTrace::StackTrace()+0x20) [0x817778c]"
+void DemangleSymbols(std::string* text) {
+  // Note: code in this function is NOT async-signal safe (std::string uses
+  // malloc internally).
+
+#if defined(__GLIBCXX__) && !defined(__UCLIBC__)
+
+  std::string::size_type search_from = 0;
+  while (search_from < text->size()) {
+    // Look for the start of a mangled symbol, from search_from.
+    std::string::size_type mangled_start =
+        text->find(kMangledSymbolPrefix, search_from);
+    if (mangled_start == std::string::npos) {
+      break;  // Mangled symbol not found.
+    }
+
+    // Look for the end of the mangled symbol.
+    std::string::size_type mangled_end =
+        text->find_first_not_of(kSymbolCharacters, mangled_start);
+    if (mangled_end == std::string::npos) {
+      mangled_end = text->size();
+    }
+    std::string mangled_symbol =
+        text->substr(mangled_start, mangled_end - mangled_start);
+
+    // Try to demangle the mangled symbol candidate.
+    int status = 0;
+    scoped_ptr<char, base::FreeDeleter> demangled_symbol(
+        abi::__cxa_demangle(mangled_symbol.c_str(), NULL, 0, &status));
+    if (status == 0) {  // Demangling is successful.
+      // Remove the mangled symbol.
+      text->erase(mangled_start, mangled_end - mangled_start);
+      // Insert the demangled symbol.
+      text->insert(mangled_start, demangled_symbol.get());
+      // Next time, we'll start right after the demangled symbol we inserted.
+      search_from = mangled_start + strlen(demangled_symbol.get());
+    } else {
+      // Failed to demangle.  Retry after the "_Z" we just found.
+      search_from = mangled_start + 2;
+    }
+  }
+
+#endif  // defined(__GLIBCXX__) && !defined(__UCLIBC__)
+}
+#endif  // !defined(USE_SYMBOLIZE)
+
+class BacktraceOutputHandler {
+ public:
+  virtual void HandleOutput(const char* output) = 0;
+
+ protected:
+  virtual ~BacktraceOutputHandler() {}
+};
+
+void OutputPointer(void* pointer, BacktraceOutputHandler* handler) {
+  // This should be more than enough to store a 64-bit number in hex:
+  // 16 hex digits + 1 for null-terminator.
+  char buf[17] = { '\0' };
+  handler->HandleOutput("0x");
+  internal::itoa_r(reinterpret_cast<intptr_t>(pointer),
+                   buf, sizeof(buf), 16, 12);
+  handler->HandleOutput(buf);
+}
+
+#if defined(USE_SYMBOLIZE)
+void OutputFrameId(intptr_t frame_id, BacktraceOutputHandler* handler) {
+  // Max unsigned 64-bit number in decimal has 20 digits (18446744073709551615).
+  // Hence, 30 digits should be more than enough to represent it in decimal
+  // (including the null-terminator).
+  char buf[30] = { '\0' };
+  handler->HandleOutput("#");
+  internal::itoa_r(frame_id, buf, sizeof(buf), 10, 1);
+  handler->HandleOutput(buf);
+}
+#endif  // defined(USE_SYMBOLIZE)
+
+void ProcessBacktrace(void *const *trace,
+                      size_t size,
+                      BacktraceOutputHandler* handler) {
+  // NOTE: This code MUST be async-signal safe (it's used by in-process
+  // stack dumping signal handler). NO malloc or stdio is allowed here.
+
+#if defined(USE_SYMBOLIZE)
+  for (size_t i = 0; i < size; ++i) {
+    OutputFrameId(i, handler);
+    handler->HandleOutput(" ");
+    OutputPointer(trace[i], handler);
+    handler->HandleOutput(" ");
+
+    char buf[1024] = { '\0' };
+
+    // Subtract by one as return address of function may be in the next
+    // function when a function is annotated as noreturn.
+    void* address = static_cast<char*>(trace[i]) - 1;
+    if (google::Symbolize(address, buf, sizeof(buf)))
+      handler->HandleOutput(buf);
+    else
+      handler->HandleOutput("<unknown>");
+
+    handler->HandleOutput("\n");
+  }
+#elif !defined(__UCLIBC__)
+  bool printed = false;
+
+  // Below part is async-signal unsafe (uses malloc), so execute it only
+  // when we are not executing the signal handler.
+  if (in_signal_handler == 0) {
+    scoped_ptr<char*, FreeDeleter>
+        trace_symbols(backtrace_symbols(trace, size));
+    if (trace_symbols.get()) {
+      for (size_t i = 0; i < size; ++i) {
+        std::string trace_symbol = trace_symbols.get()[i];
+        DemangleSymbols(&trace_symbol);
+        handler->HandleOutput(trace_symbol.c_str());
+        handler->HandleOutput("\n");
+      }
+
+      printed = true;
+    }
+  }
+
+  if (!printed) {
+    for (size_t i = 0; i < size; ++i) {
+      handler->HandleOutput(" [");
+      OutputPointer(trace[i], handler);
+      handler->HandleOutput("]\n");
+    }
+  }
+#endif  // defined(USE_SYMBOLIZE)
+}
+
+void PrintToStderr(const char* output) {
+  // NOTE: This code MUST be async-signal safe (it's used by in-process
+  // stack dumping signal handler). NO malloc or stdio is allowed here.
+  ignore_result(HANDLE_EINTR(write(STDERR_FILENO, output, strlen(output))));
+}
+
+void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
+  // NOTE: This code MUST be async-signal safe.
+  // NO malloc or stdio is allowed here.
+
+  // Record the fact that we are in the signal handler now, so that the rest
+  // of StackTrace can behave in an async-signal-safe manner.
+  in_signal_handler = 1;
+
+  if (BeingDebugged())
+    BreakDebugger();
+
+  PrintToStderr("Received signal ");
+  char buf[1024] = { 0 };
+  internal::itoa_r(signal, buf, sizeof(buf), 10, 0);
+  PrintToStderr(buf);
+  if (signal == SIGBUS) {
+    if (info->si_code == BUS_ADRALN)
+      PrintToStderr(" BUS_ADRALN ");
+    else if (info->si_code == BUS_ADRERR)
+      PrintToStderr(" BUS_ADRERR ");
+    else if (info->si_code == BUS_OBJERR)
+      PrintToStderr(" BUS_OBJERR ");
+    else
+      PrintToStderr(" <unknown> ");
+  } else if (signal == SIGFPE) {
+    if (info->si_code == FPE_FLTDIV)
+      PrintToStderr(" FPE_FLTDIV ");
+    else if (info->si_code == FPE_FLTINV)
+      PrintToStderr(" FPE_FLTINV ");
+    else if (info->si_code == FPE_FLTOVF)
+      PrintToStderr(" FPE_FLTOVF ");
+    else if (info->si_code == FPE_FLTRES)
+      PrintToStderr(" FPE_FLTRES ");
+    else if (info->si_code == FPE_FLTSUB)
+      PrintToStderr(" FPE_FLTSUB ");
+    else if (info->si_code == FPE_FLTUND)
+      PrintToStderr(" FPE_FLTUND ");
+    else if (info->si_code == FPE_INTDIV)
+      PrintToStderr(" FPE_INTDIV ");
+    else if (info->si_code == FPE_INTOVF)
+      PrintToStderr(" FPE_INTOVF ");
+    else
+      PrintToStderr(" <unknown> ");
+  } else if (signal == SIGILL) {
+    if (info->si_code == ILL_BADSTK)
+      PrintToStderr(" ILL_BADSTK ");
+    else if (info->si_code == ILL_COPROC)
+      PrintToStderr(" ILL_COPROC ");
+    else if (info->si_code == ILL_ILLOPN)
+      PrintToStderr(" ILL_ILLOPN ");
+    else if (info->si_code == ILL_ILLADR)
+      PrintToStderr(" ILL_ILLADR ");
+    else if (info->si_code == ILL_ILLTRP)
+      PrintToStderr(" ILL_ILLTRP ");
+    else if (info->si_code == ILL_PRVOPC)
+      PrintToStderr(" ILL_PRVOPC ");
+    else if (info->si_code == ILL_PRVREG)
+      PrintToStderr(" ILL_PRVREG ");
+    else
+      PrintToStderr(" <unknown> ");
+  } else if (signal == SIGSEGV) {
+    if (info->si_code == SEGV_MAPERR)
+      PrintToStderr(" SEGV_MAPERR ");
+    else if (info->si_code == SEGV_ACCERR)
+      PrintToStderr(" SEGV_ACCERR ");
+    else
+      PrintToStderr(" <unknown> ");
+  }
+  if (signal == SIGBUS || signal == SIGFPE ||
+      signal == SIGILL || signal == SIGSEGV) {
+    internal::itoa_r(reinterpret_cast<intptr_t>(info->si_addr),
+                     buf, sizeof(buf), 16, 12);
+    PrintToStderr(buf);
+  }
+  PrintToStderr("\n");
+
+  debug::StackTrace().Print();
+
+#if defined(OS_LINUX)
+#if ARCH_CPU_X86_FAMILY
+  ucontext_t* context = reinterpret_cast<ucontext_t*>(void_context);
+  const struct {
+    const char* label;
+    greg_t value;
+  } registers[] = {
+#if ARCH_CPU_32_BITS
+    { "  gs: ", context->uc_mcontext.gregs[REG_GS] },
+    { "  fs: ", context->uc_mcontext.gregs[REG_FS] },
+    { "  es: ", context->uc_mcontext.gregs[REG_ES] },
+    { "  ds: ", context->uc_mcontext.gregs[REG_DS] },
+    { " edi: ", context->uc_mcontext.gregs[REG_EDI] },
+    { " esi: ", context->uc_mcontext.gregs[REG_ESI] },
+    { " ebp: ", context->uc_mcontext.gregs[REG_EBP] },
+    { " esp: ", context->uc_mcontext.gregs[REG_ESP] },
+    { " ebx: ", context->uc_mcontext.gregs[REG_EBX] },
+    { " edx: ", context->uc_mcontext.gregs[REG_EDX] },
+    { " ecx: ", context->uc_mcontext.gregs[REG_ECX] },
+    { " eax: ", context->uc_mcontext.gregs[REG_EAX] },
+    { " trp: ", context->uc_mcontext.gregs[REG_TRAPNO] },
+    { " err: ", context->uc_mcontext.gregs[REG_ERR] },
+    { "  ip: ", context->uc_mcontext.gregs[REG_EIP] },
+    { "  cs: ", context->uc_mcontext.gregs[REG_CS] },
+    { " efl: ", context->uc_mcontext.gregs[REG_EFL] },
+    { " usp: ", context->uc_mcontext.gregs[REG_UESP] },
+    { "  ss: ", context->uc_mcontext.gregs[REG_SS] },
+#elif ARCH_CPU_64_BITS
+    { "  r8: ", context->uc_mcontext.gregs[REG_R8] },
+    { "  r9: ", context->uc_mcontext.gregs[REG_R9] },
+    { " r10: ", context->uc_mcontext.gregs[REG_R10] },
+    { " r11: ", context->uc_mcontext.gregs[REG_R11] },
+    { " r12: ", context->uc_mcontext.gregs[REG_R12] },
+    { " r13: ", context->uc_mcontext.gregs[REG_R13] },
+    { " r14: ", context->uc_mcontext.gregs[REG_R14] },
+    { " r15: ", context->uc_mcontext.gregs[REG_R15] },
+    { "  di: ", context->uc_mcontext.gregs[REG_RDI] },
+    { "  si: ", context->uc_mcontext.gregs[REG_RSI] },
+    { "  bp: ", context->uc_mcontext.gregs[REG_RBP] },
+    { "  bx: ", context->uc_mcontext.gregs[REG_RBX] },
+    { "  dx: ", context->uc_mcontext.gregs[REG_RDX] },
+    { "  ax: ", context->uc_mcontext.gregs[REG_RAX] },
+    { "  cx: ", context->uc_mcontext.gregs[REG_RCX] },
+    { "  sp: ", context->uc_mcontext.gregs[REG_RSP] },
+    { "  ip: ", context->uc_mcontext.gregs[REG_RIP] },
+    { " efl: ", context->uc_mcontext.gregs[REG_EFL] },
+    { " cgf: ", context->uc_mcontext.gregs[REG_CSGSFS] },
+    { " erf: ", context->uc_mcontext.gregs[REG_ERR] },
+    { " trp: ", context->uc_mcontext.gregs[REG_TRAPNO] },
+    { " msk: ", context->uc_mcontext.gregs[REG_OLDMASK] },
+    { " cr2: ", context->uc_mcontext.gregs[REG_CR2] },
+#endif
+  };
+
+#if ARCH_CPU_32_BITS
+  const int kRegisterPadding = 8;
+#elif ARCH_CPU_64_BITS
+  const int kRegisterPadding = 16;
+#endif
+
+  for (size_t i = 0; i < ARRAYSIZE_UNSAFE(registers); i++) {
+    PrintToStderr(registers[i].label);
+    internal::itoa_r(registers[i].value, buf, sizeof(buf),
+                     16, kRegisterPadding);
+    PrintToStderr(buf);
+
+    if ((i + 1) % 4 == 0)
+      PrintToStderr("\n");
+  }
+  PrintToStderr("\n");
+#endif
+#elif defined(OS_MACOSX)
+  // TODO(shess): Port to 64-bit, and ARM architecture (32 and 64-bit).
+#if ARCH_CPU_X86_FAMILY && ARCH_CPU_32_BITS
+  ucontext_t* context = reinterpret_cast<ucontext_t*>(void_context);
+  size_t len;
+
+  // NOTE: Even |snprintf()| is not on the approved list for signal
+  // handlers, but buffered I/O is definitely not on the list due to
+  // potential for |malloc()|.
+  len = static_cast<size_t>(
+      snprintf(buf, sizeof(buf),
+               "ax: %x, bx: %x, cx: %x, dx: %x\n",
+               context->uc_mcontext->__ss.__eax,
+               context->uc_mcontext->__ss.__ebx,
+               context->uc_mcontext->__ss.__ecx,
+               context->uc_mcontext->__ss.__edx));
+  write(STDERR_FILENO, buf, std::min(len, sizeof(buf) - 1));
+
+  len = static_cast<size_t>(
+      snprintf(buf, sizeof(buf),
+               "di: %x, si: %x, bp: %x, sp: %x, ss: %x, flags: %x\n",
+               context->uc_mcontext->__ss.__edi,
+               context->uc_mcontext->__ss.__esi,
+               context->uc_mcontext->__ss.__ebp,
+               context->uc_mcontext->__ss.__esp,
+               context->uc_mcontext->__ss.__ss,
+               context->uc_mcontext->__ss.__eflags));
+  write(STDERR_FILENO, buf, std::min(len, sizeof(buf) - 1));
+
+  len = static_cast<size_t>(
+      snprintf(buf, sizeof(buf),
+               "ip: %x, cs: %x, ds: %x, es: %x, fs: %x, gs: %x\n",
+               context->uc_mcontext->__ss.__eip,
+               context->uc_mcontext->__ss.__cs,
+               context->uc_mcontext->__ss.__ds,
+               context->uc_mcontext->__ss.__es,
+               context->uc_mcontext->__ss.__fs,
+               context->uc_mcontext->__ss.__gs));
+  write(STDERR_FILENO, buf, std::min(len, sizeof(buf) - 1));
+#endif  // ARCH_CPU_32_BITS
+#endif  // defined(OS_MACOSX)
+  _exit(1);
+}
+
+class PrintBacktraceOutputHandler : public BacktraceOutputHandler {
+ public:
+  PrintBacktraceOutputHandler() {}
+
+  virtual void HandleOutput(const char* output) OVERRIDE {
+    // NOTE: This code MUST be async-signal safe (it's used by in-process
+    // stack dumping signal handler). NO malloc or stdio is allowed here.
+    PrintToStderr(output);
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(PrintBacktraceOutputHandler);
+};
+
+class StreamBacktraceOutputHandler : public BacktraceOutputHandler {
+ public:
+  explicit StreamBacktraceOutputHandler(std::ostream* os) : os_(os) {
+  }
+
+  virtual void HandleOutput(const char* output) OVERRIDE {
+    (*os_) << output;
+  }
+
+ private:
+  std::ostream* os_;
+
+  DISALLOW_COPY_AND_ASSIGN(StreamBacktraceOutputHandler);
+};
+
+void WarmUpBacktrace() {
+  // Warm up stack trace infrastructure. It turns out that on the first
+  // call glibc initializes some internal data structures using pthread_once,
+  // and even backtrace() can call malloc(), leading to hangs.
+  //
+  // Example stack trace snippet (with tcmalloc):
+  //
+  // #8  0x0000000000a173b5 in tc_malloc
+  //             at ./third_party/tcmalloc/chromium/src/debugallocation.cc:1161
+  // #9  0x00007ffff7de7900 in _dl_map_object_deps at dl-deps.c:517
+  // #10 0x00007ffff7ded8a9 in dl_open_worker at dl-open.c:262
+  // #11 0x00007ffff7de9176 in _dl_catch_error at dl-error.c:178
+  // #12 0x00007ffff7ded31a in _dl_open (file=0x7ffff625e298 "libgcc_s.so.1")
+  //             at dl-open.c:639
+  // #13 0x00007ffff6215602 in do_dlopen at dl-libc.c:89
+  // #14 0x00007ffff7de9176 in _dl_catch_error at dl-error.c:178
+  // #15 0x00007ffff62156c4 in dlerror_run at dl-libc.c:48
+  // #16 __GI___libc_dlopen_mode at dl-libc.c:165
+  // #17 0x00007ffff61ef8f5 in init
+  //             at ../sysdeps/x86_64/../ia64/backtrace.c:53
+  // #18 0x00007ffff6aad400 in pthread_once
+  //             at ../nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S:104
+  // #19 0x00007ffff61efa14 in __GI___backtrace
+  //             at ../sysdeps/x86_64/../ia64/backtrace.c:104
+  // #20 0x0000000000752a54 in base::debug::StackTrace::StackTrace
+  //             at base/debug/stack_trace_posix.cc:175
+  // #21 0x00000000007a4ae5 in
+  //             base::(anonymous namespace)::StackDumpSignalHandler
+  //             at base/process_util_posix.cc:172
+  // #22 <signal handler called>
+  StackTrace stack_trace;
+}
+
+}  // namespace
+
+#if defined(USE_SYMBOLIZE)
+
+// class SandboxSymbolizeHelper.
+//
+// The purpose of this class is to prepare and install a "file open" callback
+// needed by the stack trace symbolization code
+// (base/third_party/symbolize/symbolize.h) so that it can function properly
+// in a sandboxed process.  The caveat is that this class must be instantiated
+// before the sandboxing is enabled so that it can get the chance to open all
+// the object files that are loaded in the virtual address space of the current
+// process.
+class SandboxSymbolizeHelper {
+ public:
+  // Returns the singleton instance.
+  static SandboxSymbolizeHelper* GetInstance() {
+    return Singleton<SandboxSymbolizeHelper>::get();
+  }
+
+ private:
+  friend struct DefaultSingletonTraits<SandboxSymbolizeHelper>;
+
+  SandboxSymbolizeHelper()
+      : is_initialized_(false) {
+    Init();
+  }
+
+  ~SandboxSymbolizeHelper() {
+    UnregisterCallback();
+    CloseObjectFiles();
+  }
+
+  // Returns a O_RDONLY file descriptor for |file_path| if it was opened
+  // sucessfully during the initialization.  The file is repositioned at
+  // offset 0.
+  // IMPORTANT: This function must be async-signal-safe because it can be
+  // called from a signal handler (symbolizing stack frames for a crash).
+  int GetFileDescriptor(const char* file_path) {
+    int fd = -1;
+
+#if !defined(NDEBUG)
+    if (file_path) {
+      // The assumption here is that iterating over std::map<std::string, int>
+      // using a const_iterator does not allocate dynamic memory, hense it is
+      // async-signal-safe.
+      std::map<std::string, int>::const_iterator it;
+      for (it = modules_.begin(); it != modules_.end(); ++it) {
+        if (strcmp((it->first).c_str(), file_path) == 0) {
+          // POSIX.1-2004 requires an implementation to guarantee that dup()
+          // is async-signal-safe.
+          fd = dup(it->second);
+          break;
+        }
+      }
+      // POSIX.1-2004 requires an implementation to guarantee that lseek()
+      // is async-signal-safe.
+      if (fd >= 0 && lseek(fd, 0, SEEK_SET) < 0) {
+        // Failed to seek.
+        fd = -1;
+      }
+    }
+#endif  // !defined(NDEBUG)
+
+    return fd;
+  }
+
+  // Searches for the object file (from /proc/self/maps) that contains
+  // the specified pc.  If found, sets |start_address| to the start address
+  // of where this object file is mapped in memory, sets the module base
+  // address into |base_address|, copies the object file name into
+  // |out_file_name|, and attempts to open the object file.  If the object
+  // file is opened successfully, returns the file descriptor.  Otherwise,
+  // returns -1.  |out_file_name_size| is the size of the file name buffer
+  // (including the null terminator).
+  // IMPORTANT: This function must be async-signal-safe because it can be
+  // called from a signal handler (symbolizing stack frames for a crash).
+  static int OpenObjectFileContainingPc(uint64_t pc, uint64_t& start_address,
+                                        uint64_t& base_address, char* file_path,
+                                        int file_path_size) {
+    // This method can only be called after the singleton is instantiated.
+    // This is ensured by the following facts:
+    // * This is the only static method in this class, it is private, and
+    //   the class has no friends (except for the DefaultSingletonTraits).
+    //   The compiler guarantees that it can only be called after the
+    //   singleton is instantiated.
+    // * This method is used as a callback for the stack tracing code and
+    //   the callback registration is done in the constructor, so logically
+    //   it cannot be called before the singleton is created.
+    SandboxSymbolizeHelper* instance = GetInstance();
+
+    // The assumption here is that iterating over
+    // std::vector<MappedMemoryRegion> using a const_iterator does not allocate
+    // dynamic memory, hence it is async-signal-safe.
+    std::vector<MappedMemoryRegion>::const_iterator it;
+    bool is_first = true;
+    for (it = instance->regions_.begin(); it != instance->regions_.end();
+         ++it, is_first = false) {
+      const MappedMemoryRegion& region = *it;
+      if (region.start <= pc && pc < region.end) {
+        start_address = region.start;
+        // Don't subtract 'start_address' from the first entry:
+        // * If a binary is compiled w/o -pie, then the first entry in
+        //   process maps is likely the binary itself (all dynamic libs
+        //   are mapped higher in address space). For such a binary,
+        //   instruction offset in binary coincides with the actual
+        //   instruction address in virtual memory (as code section
+        //   is mapped to a fixed memory range).
+        // * If a binary is compiled with -pie, all the modules are
+        //   mapped high at address space (in particular, higher than
+        //   shadow memory of the tool), so the module can't be the
+        //   first entry.
+        base_address = (is_first ? 0U : start_address) - region.offset;
+        if (file_path && file_path_size > 0) {
+          strncpy(file_path, region.path.c_str(), file_path_size);
+          // Ensure null termination.
+          file_path[file_path_size - 1] = '\0';
+        }
+        return instance->GetFileDescriptor(region.path.c_str());
+      }
+    }
+    return -1;
+  }
+
+  // Parses /proc/self/maps in order to compile a list of all object file names
+  // for the modules that are loaded in the current process.
+  // Returns true on success.
+  bool CacheMemoryRegions() {
+    // Reads /proc/self/maps.
+    std::string contents;
+    if (!ReadProcMaps(&contents)) {
+      LOG(ERROR) << "Failed to read /proc/self/maps";
+      return false;
+    }
+
+    // Parses /proc/self/maps.
+    if (!ParseProcMaps(contents, &regions_)) {
+      LOG(ERROR) << "Failed to parse the contents of /proc/self/maps";
+      return false;
+    }
+
+    is_initialized_ = true;
+    return true;
+  }
+
+    // FIXME(gejun): Missing O_CLOEXEC from our linux headers. The flag should
+    // work on majority machines which installed 2.6.23 or newer kernels. But
+    // if the kernel is older, I'm not sure that open() will fail or ignore
+    // the flag.
+#ifndef O_CLOEXEC
+#define O_CLOEXEC 02000000
+#endif
+    
+  // Opens all object files and caches their file descriptors.
+  void OpenSymbolFiles() {
+    // Pre-opening and caching the file descriptors of all loaded modules is
+    // not considered safe for retail builds.  Hence it is only done in debug
+    // builds.  For more details, take a look at: http://crbug.com/341966
+    // Enabling this to release mode would require approval from the security
+    // team.
+#if !defined(NDEBUG)
+    // Open the object files for all read-only executable regions and cache
+    // their file descriptors.
+    std::vector<MappedMemoryRegion>::const_iterator it;
+    for (it = regions_.begin(); it != regions_.end(); ++it) {
+      const MappedMemoryRegion& region = *it;
+      // Only interesed in read-only executable regions.
+      if ((region.permissions & MappedMemoryRegion::READ) ==
+              MappedMemoryRegion::READ &&
+          (region.permissions & MappedMemoryRegion::WRITE) == 0 &&
+          (region.permissions & MappedMemoryRegion::EXECUTE) ==
+              MappedMemoryRegion::EXECUTE) {
+        if (region.path.empty()) {
+          // Skip regions with empty file names.
+          continue;
+        }
+        if (region.path[0] == '[') {
+          // Skip pseudo-paths, like [stack], [vdso], [heap], etc ...
+          continue;
+        }
+        // Avoid duplicates.
+        if (modules_.find(region.path) == modules_.end()) {
+          int fd = open(region.path.c_str(), O_RDONLY | O_CLOEXEC);
+          if (fd >= 0) {
+            modules_.insert(std::make_pair(region.path, fd));
+          } else {
+            LOG(WARNING) << "Failed to open file: " << region.path
+                         << "\n  Error: " << strerror(errno);
+          }
+        }
+      }
+    }
+#endif  // !defined(NDEBUG)
+  }
+
+  // Initializes and installs the symbolization callback.
+  void Init() {
+    if (CacheMemoryRegions()) {
+      OpenSymbolFiles();
+      google::InstallSymbolizeOpenObjectFileCallback(
+          &OpenObjectFileContainingPc);
+    }
+  }
+
+  // Unregister symbolization callback.
+  void UnregisterCallback() {
+    if (is_initialized_) {
+      google::InstallSymbolizeOpenObjectFileCallback(NULL);
+      is_initialized_ = false;
+    }
+  }
+
+  // Closes all file descriptors owned by this instance.
+  void CloseObjectFiles() {
+#if !defined(NDEBUG)
+    std::map<std::string, int>::iterator it;
+    for (it = modules_.begin(); it != modules_.end(); ++it) {
+      int ret = IGNORE_EINTR(close(it->second));
+      DCHECK(!ret);
+      it->second = -1;
+    }
+    modules_.clear();
+#endif  // !defined(NDEBUG)
+  }
+
+  // Set to true upon successful initialization.
+  bool is_initialized_;
+
+#if !defined(NDEBUG)
+  // Mapping from file name to file descriptor.  Includes file descriptors
+  // for all successfully opened object files and the file descriptor for
+  // /proc/self/maps.  This code is not safe for release builds so
+  // this is only done for DEBUG builds.
+  std::map<std::string, int> modules_;
+#endif  // !defined(NDEBUG)
+
+  // Cache for the process memory regions.  Produced by parsing the contents
+  // of /proc/self/maps cache.
+  std::vector<MappedMemoryRegion> regions_;
+
+  DISALLOW_COPY_AND_ASSIGN(SandboxSymbolizeHelper);
+};
+#endif  // USE_SYMBOLIZE
+
+bool EnableInProcessStackDumpingForSandbox() {
+#if defined(USE_SYMBOLIZE)
+  SandboxSymbolizeHelper::GetInstance();
+#endif  // USE_SYMBOLIZE
+
+  return EnableInProcessStackDumping();
+}
+
+bool EnableInProcessStackDumping() {
+  // When running in an application, our code typically expects SIGPIPE
+  // to be ignored.  Therefore, when testing that same code, it should run
+  // with SIGPIPE ignored as well.
+  struct sigaction sigpipe_action;
+  memset(&sigpipe_action, 0, sizeof(sigpipe_action));
+  sigpipe_action.sa_handler = SIG_IGN;
+  sigemptyset(&sigpipe_action.sa_mask);
+  bool success = (sigaction(SIGPIPE, &sigpipe_action, NULL) == 0);
+
+  // Avoid hangs during backtrace initialization, see above.
+  WarmUpBacktrace();
+
+  struct sigaction action;
+  memset(&action, 0, sizeof(action));
+  action.sa_flags = SA_RESETHAND | SA_SIGINFO;
+  action.sa_sigaction = &StackDumpSignalHandler;
+  sigemptyset(&action.sa_mask);
+
+  success &= (sigaction(SIGILL, &action, NULL) == 0);
+  success &= (sigaction(SIGABRT, &action, NULL) == 0);
+  success &= (sigaction(SIGFPE, &action, NULL) == 0);
+  success &= (sigaction(SIGBUS, &action, NULL) == 0);
+  success &= (sigaction(SIGSEGV, &action, NULL) == 0);
+// On Linux, SIGSYS is reserved by the kernel for seccomp-bpf sandboxing.
+#if !defined(OS_LINUX)
+  success &= (sigaction(SIGSYS, &action, NULL) == 0);
+#endif  // !defined(OS_LINUX)
+
+  return success;
+}
+
+StackTrace::StackTrace() {
+  // NOTE: This code MUST be async-signal safe (it's used by in-process
+  // stack dumping signal handler). NO malloc or stdio is allowed here.
+
+#if !defined(__UCLIBC__)
+  // Though the backtrace API man page does not list any possible negative
+  // return values, we take no chance.
+  count_ = base::saturated_cast<size_t>(backtrace(trace_, arraysize(trace_)));
+#else
+  count_ = 0;
+#endif
+}
+
+void StackTrace::Print() const {
+  // NOTE: This code MUST be async-signal safe (it's used by in-process
+  // stack dumping signal handler). NO malloc or stdio is allowed here.
+
+#if !defined(__UCLIBC__)
+  PrintBacktraceOutputHandler handler;
+  ProcessBacktrace(trace_, count_, &handler);
+#endif
+}
+
+#if !defined(__UCLIBC__)
+void StackTrace::OutputToStream(std::ostream* os) const {
+  StreamBacktraceOutputHandler handler(os);
+  ProcessBacktrace(trace_, count_, &handler);
+}
+#endif
+
+namespace internal {
+
+// NOTE: code from sandbox/linux/seccomp-bpf/demo.cc.
+char *itoa_r(intptr_t i, char *buf, size_t sz, int base, size_t padding) {
+  // Make sure we can write at least one NUL byte.
+  size_t n = 1;
+  if (n > sz)
+    return NULL;
+
+  if (base < 2 || base > 16) {
+    buf[0] = '\000';
+    return NULL;
+  }
+
+  char *start = buf;
+
+  uintptr_t j = i;
+
+  // Handle negative numbers (only for base 10).
+  if (i < 0 && base == 10) {
+    j = -i;
+
+    // Make sure we can write the '-' character.
+    if (++n > sz) {
+      buf[0] = '\000';
+      return NULL;
+    }
+    *start++ = '-';
+  }
+
+  // Loop until we have converted the entire number. Output at least one
+  // character (i.e. '0').
+  char *ptr = start;
+  do {
+    // Make sure there is still enough space left in our output buffer.
+    if (++n > sz) {
+      buf[0] = '\000';
+      return NULL;
+    }
+
+    // Output the next digit.
+    *ptr++ = "0123456789abcdef"[j % base];
+    j /= base;
+
+    if (padding > 0)
+      padding--;
+  } while (j > 0 || padding > 0);
+
+  // Terminate the output with a NUL character.
+  *ptr = '\000';
+
+  // Conversion to ASCII actually resulted in the digits being in reverse
+  // order. We can't easily generate them in forward order, as we can't tell
+  // the number of characters needed until we are done converting.
+  // So, now, we reverse the string (except for the possible "-" sign).
+  while (--ptr > start) {
+    char ch = *ptr;
+    *ptr = *start;
+    *start++ = ch;
+  }
+  return buf;
+}
+
+}  // namespace internal
+
+}  // namespace debug
+}  // namespace base

Algúns arquivos non se mostraron porque demasiados arquivos cambiaron neste cambio