Explorar el Código

tarscpp support co

ruanshudong hace 2 años
padre
commit
e5e58abc78
Se han modificado 49 ficheros con 12111 adiciones y 6075 borrados
  1. 17 12
      util/include/util/tc_clientsocket.h
  2. 3 0
      util/include/util/tc_common.h
  3. 784 0
      util/include/util/tc_coroutine.h
  4. 363 0
      util/include/util/tc_coroutine_queue.h
  5. 2459 2398
      util/include/util/tc_epoll_server.h
  6. 513 301
      util/include/util/tc_epoller.h
  7. 7 0
      util/include/util/tc_ex.h
  8. 29 2
      util/include/util/tc_http.h
  9. 501 523
      util/include/util/tc_http_async.h
  10. 3 3
      util/include/util/tc_json.h
  11. 32 11
      util/include/util/tc_logger.h
  12. 582 373
      util/include/util/tc_network_buffer.h
  13. 1 1
      util/include/util/tc_option.h
  14. 32 17
      util/include/util/tc_port.h
  15. 280 0
      util/include/util/tc_proxy_info.h
  16. 23 23
      util/include/util/tc_singleton.h
  17. 39 17
      util/include/util/tc_socket.h
  18. 70 3
      util/include/util/tc_thread.h
  19. 12 12
      util/include/util/tc_thread_pool.h
  20. 34 2
      util/include/util/tc_thread_queue.h
  21. 6 0
      util/include/util/tc_timeout_queue.h
  22. 4 4
      util/include/util/tc_timeout_queue_noid.h
  23. 8 4
      util/include/util/tc_timeprovider.h
  24. 178 102
      util/include/util/tc_timer.h
  25. 724 0
      util/include/util/tc_transceiver.h
  26. 130 0
      util/include/util/tc_uuid_generator.h
  27. 14 13
      util/src/epoll_windows/src/epoll.cpp
  28. 65 31
      util/src/tc_clientsocket.cpp
  29. 23 8
      util/src/tc_common.cpp
  30. 865 0
      util/src/tc_coroutine.cpp
  31. 6 6
      util/src/tc_encoder.cpp
  32. 564 846
      util/src/tc_epoll_server.cpp
  33. 645 319
      util/src/tc_epoller.cpp
  34. 13 5
      util/src/tc_ex.cpp
  35. 78 42
      util/src/tc_http.cpp
  36. 450 608
      util/src/tc_http_async.cpp
  37. 2 0
      util/src/tc_logger.cpp
  38. 399 127
      util/src/tc_network_buffer.cpp
  39. 19 6
      util/src/tc_openssl.cpp
  40. 2 2
      util/src/tc_option.cpp
  41. 106 40
      util/src/tc_port.cpp
  42. 311 0
      util/src/tc_proxy_info.cpp
  43. 52 1
      util/src/tc_socket.cpp
  44. 17 11
      util/src/tc_spin_lock.cpp
  45. 115 7
      util/src/tc_thread.cpp
  46. 8 1
      util/src/tc_thread_pool.cpp
  47. 19 18
      util/src/tc_timeprovider.cpp
  48. 232 176
      util/src/tc_timer.cpp
  49. 1272 0
      util/src/tc_transceiver.cpp

+ 17 - 12
util/include/util/tc_clientsocket.h

@@ -63,7 +63,12 @@ struct TC_EndpointParse_Exception : public TC_Exception
 class TC_Endpoint
 {
 public:
+    //监听类型
 	enum EType { UDP = 0, TCP = 1, SSL = 2 };
+
+    //鉴权类型
+    enum AUTH_TYPE { AUTH_TYPENONE = 0, AUTH_TYPELOCAL = 1};
+
     /**
      *
      */
@@ -79,7 +84,7 @@ public:
      * @param type, SOCK_STREAM或SOCK_DGRAM
      * @param type, SOCK_STREAM or SOCK_DGRAM
      */
-    TC_Endpoint(const string& host, int port, int timeout, EType type = TCP, int grid = 0, int qos = 0, int weight = -1, unsigned int weighttype = 0, int authType = 0)
+    TC_Endpoint(const string& host, int port, int timeout, EType type = TCP, int grid = 0, int qos = 0, int weight = -1, unsigned int weighttype = 0, AUTH_TYPE authType = AUTH_TYPENONE)
     {
         init(host, port, timeout, type, grid, qos, weight, weighttype, authType);
     }
@@ -198,12 +203,12 @@ public:
     int getTimeout() const              { return _timeout; }
 
     /**
-     * @brief  是否是TCP, 否则则为UDP
+     * @brief  是否是TCP/SSL, 否则则为UDP
      * @brief  Determine whether it uses TCP or UDP
      *
      * @return bool
      */
-    int  isTcp() const                  { return _type == TCP || _type == SSL; }
+    bool isTcp() const                  { return _type == TCP || _type == SSL; }
 
     /**
      * @brief  是否是SSL
@@ -211,15 +216,14 @@ public:
      *
      * @return int
      */
-    int isSSL() const                  { return _type == SSL; }
+    bool isSSL() const                  { return _type == SSL; }
 
     /**
      * @brief 设置为TCP或UDP
      * @brief Set to TCP or UDP
      * @param bTcp
      */
-	int isUdp() const                  { return _type == UDP; }
-//    void setTcp(bool bTcp)              { _type = bTcp; }
+	bool isUdp() const                  { return _type == UDP; }
 
     /**
      * @brief 设置为TCP/UDP/SSL
@@ -227,12 +231,12 @@ public:
      * @param type
      */
     void setType(EType type)             { _type = type; }
+    
     /**
      * @brief 获取协议类型
      * @brief Get the protocol type
      */
     EType getType() const                { return _type; }
-	
     /**
      * @brief 获取路由状态
      * @brief Get route status 
@@ -307,13 +311,13 @@ public:
      * @brief 获取认证类型
      * @brief Get authentication type
      */
-    int getAuthType() const             { return _authType; }
+    AUTH_TYPE getAuthType() const             { return _authType; }
 
 	/**
      * @brief 设置认证类型
      * @brief Set authentication type
      */
-    void setAuthType(int type)          { _authType = type; }
+    void setAuthType(AUTH_TYPE type)          { _authType = type; }
 
     /**
      * @brief 字符串描述
@@ -335,7 +339,7 @@ public:
         if(_timeout != 0) os << " -t " << _timeout;
         if (_grid != 0) os << " -g " << _grid;
         if (_qos != 0) os << " -q " << _qos;
-        if (_weight != -1) os << " -w " << _weight;
+        if (_weight > 0 ) os << " -w " << _weight;
         if (_weighttype != 0) os << " -v " << _weighttype;
 		if (_authType != 0) os << " -e " << _authType;
         return os.str();
@@ -366,7 +370,7 @@ public:
     void parse(const string &desc);
 
 private:
-    void init(const string& host, int port, int timeout, EType type, int grid, int qos, int weight, unsigned int weighttype, int authType);
+    void init(const string& host, int port, int timeout, EType type, int grid, int qos, int weight, unsigned int weighttype, AUTH_TYPE authType);
 
 protected:
     /**
@@ -415,11 +419,12 @@ protected:
      *  the weight usage of nodes
      */
     unsigned int  _weighttype = 0;
+
     /**
      *  鉴权类型
      *  Authentication Type
      */
-    int         _authType = 0;
+    AUTH_TYPE      _authType = AUTH_TYPENONE;
 
     /**
      * _host is ipv6 or not

+ 3 - 0
util/include/util/tc_common.h

@@ -28,12 +28,15 @@
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <cassert>
+#include <list>
+#include <thread>
 #include <cstdio>
 #include <string>
 #include <iostream>
 #include <sstream>
 #include <stdexcept>
 #include <algorithm>
+#include <sstream>
 #include <map>
 #include <set>
 #include <unordered_map>

+ 784 - 0
util/include/util/tc_coroutine.h

@@ -0,0 +1,784 @@
+/**
+ * Tencent is pleased to support the open source community by making Tars available.
+ *
+ * Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
+ *
+ * Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 
+ * in compliance with the License. You may obtain a copy of the License at
+ *
+ * https://opensource.org/licenses/BSD-3-Clause
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed 
+ * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+ * CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+ * specific language governing permissions and limitations under the License.
+ */
+
+#ifndef _TC_COROUTINES_H_
+#define _TC_COROUTINES_H_
+
+#include <cstddef>
+#include <list>
+#include <set>
+#include <deque>
+#include <map>
+#include <functional>
+
+#include "util/tc_fcontext.h"
+#include "util/tc_thread_queue.h"
+#include "util/tc_monitor.h"
+#include "util/tc_thread.h"
+#include "util/tc_epoller.h"
+
+using namespace std;
+
+namespace tars
+{
+/////////////////////////////////////////////////
+/**
+ * @file  tc_coroutine.h
+ * @brief  协程操作封装类
+ * @brief  coroutine encapsulation class
+ *
+ * 设计说明:
+ * - 每个线程可以有多个协程, 协程底层使用的boost的几个主要的宏来实现协程间的跳转
+ * - 协程需要使用的栈, 提前就要分配好, 通常启动协程的时候需要设置
+ * - 协程调度运行是通过epoller来实现的, 这样可以完美的和网络IO关联到一起, 在网络IO过程中方便的完成协程的切换, 这个是当前携程调度的核心
+ *
+ * 主要类说明如下:
+ * - TC_CoroutineInfo, 协程信息类, 每个协程都对应一个TC_CoroutineInfo对象, 协程切换本质就是切换TC_CoroutineInfo对象, 正常情况下业务不需要感知该对象
+ * - TC_CoroutineScheduler, 协程调度器类, 负责管理和调度协程, 本质上就是管理和调度TC_CoroutineInfo
+ * - TC_Coroutine, 协程类, 继承于线程类(TC_Thread), 用来给业务快速使用协程
+ *
+ * TC_CoroutineScheduler详细说明:
+ * - 该类是协程调度的核心, 业务使用上, 框架需要和这个类打交道, 业务上除非自己来实现协程管理逻辑, 否则通常可以不深入了解该类的实现
+ * - 注意每个协程都是需要使用栈空间的, 因此TC_CoroutineScheduler有init来初始化: <总共内存大小,栈大小>, 栈大小通常使用128k, 两者相除就是该调度协程器, 最大调度的协程个数
+ * - 该类对象使用线程私有变量保存, 可以通过静态函数来创建/获取/设置
+ * - 每个线程都有自己的调度器对象, 调度器对象只能调度自身线程的协程, 调度过程(运行run)本质上就是阻塞在线程的过程(run不会退出, 直到有terminate调用)
+ * - 调度过程简单的理解就是: 检查是否有需要执行的协程, 有则执行之, 没有则等待在epoll对象上, 直到有唤醒或者超时
+ * - 调度器底层使用tc_epoller来完成协程的切换, 等待和阻塞等操作, 可以和网络IO无缝粘合, 因此可以通过TC_CoroutineScheduler对象拿到TC_Epoller指针, 并用于网络IO上
+ * - 由于网络IO也是用相同的epoller对象, 因此可以做到当有数据发送/接受时, 唤醒epoll对象, 从而完成协程的切换
+ * - 协程启动通过: createCoroutine 函数来完成
+ * - 协程在运行中, 主要使用三个函数来完成, 调度控制: yield/sleep/put
+ *
+ * TC_Coroutine详细说明:
+ * - 使用线程模拟协程组, 即多个协程同时被创建出来
+ * - 业务可以直接继承这个类, 使用时, 首先要调用setCoroInfo方法设置协程的基本信息
+ * - 实现这个类的: handle 方法, 然后类似启动线程一样(start)方法即可, 会同时有多个协程执行handle方法
+ * - 调用start函数, 启动线程, 同时会创建(iNum, iMaxNum)个协程
+ * - terminate结束
+ */
+/////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////////
+/**
+ * @brief  协程异常类
+ */
+struct TC_CoroutineException : public TC_Exception
+{
+	TC_CoroutineException(const string &buffer) : TC_Exception(buffer){};
+	TC_CoroutineException(const string &buffer, int err) : TC_Exception(buffer, err){};
+	~TC_CoroutineException() throw() {};
+};
+
+/////////////////////////////////////////////
+/**
+ * 协程使用的栈内容信息
+ */
+struct stack_context
+{
+	std::size_t  size;
+	void*        sp;
+
+	stack_context()
+		: size(0)
+		, sp(0)
+	{}
+};
+
+struct stack_traits
+{
+	static bool is_unbounded();
+
+	static std::size_t page_size();
+
+	static std::size_t default_size();
+
+	static std::size_t minimum_size();
+
+	static std::size_t maximum_size();
+
+	static stack_context allocate(std::size_t);
+
+	static void deallocate( stack_context &);
+};
+
+
+class TC_CoroutineScheduler;
+
+///////////////////////////////////////////
+/**
+ * 协程信息类
+ * 保存了协程的基本信息, 并将协程用链表的形式组织在一起
+ */
+class TC_CoroutineInfo
+{
+public:
+	/**
+ 	* 协程的状态信息
+ 	*/
+	enum CORO_STATUS
+	{
+		CORO_FREE       = 0,
+		CORO_ACTIVE     = 1,
+		CORO_AVAIL      = 2,
+		CORO_INACTIVE   = 3,
+		CORO_TIMEOUT    = 4
+	};
+
+	/////////////////////////////////////////////
+	/*
+	 * 协程内部使用的函数
+	 */
+	struct CoroutineFunc
+	{
+		std::function<void(void*, transfer_t)>  coroFunc;
+		void*                                   args;
+	};
+
+    /**
+     * 链表初始化
+     */
+    static inline void CoroutineHeadInit(TC_CoroutineInfo *coro)
+    {
+        coro->_next = coro;
+        coro->_prev = coro;
+    }
+
+    /**
+     * 链表是否为空
+     */
+    static inline bool CoroutineHeadEmpty(TC_CoroutineInfo *coro_head)
+    {
+        return coro_head->_next == coro_head;
+    }
+
+    /**
+     * 插入
+     */
+    static inline void __CoroutineAdd(TC_CoroutineInfo *coro, TC_CoroutineInfo *prev, TC_CoroutineInfo *next)
+    {
+        next->_prev = coro;
+        coro->_next = next;
+        coro->_prev = prev;
+        prev->_next = coro;
+    }
+
+    /**
+     * 插入头部
+     */
+    static inline void CoroutineAdd(TC_CoroutineInfo *new_coro, TC_CoroutineInfo *coro_head)
+    {
+        __CoroutineAdd(new_coro, coro_head, coro_head->_next);
+    }
+
+    /**
+     * 插入尾部
+     */
+    static inline void CoroutineAddTail(TC_CoroutineInfo *new_coro, TC_CoroutineInfo *coro_head)
+    {
+        __CoroutineAdd(new_coro, coro_head->_prev, coro_head);
+    }
+
+    /**
+     * 删除
+     */
+    static inline void __CoroutineDel(TC_CoroutineInfo * prev, TC_CoroutineInfo * next)
+    {
+        next->_prev = prev;
+        prev->_next = next;
+    }
+
+    /**
+     * 删除
+     */
+    static inline void CoroutineDel(TC_CoroutineInfo *coro)
+    {
+        __CoroutineDel(coro->_prev, coro->_next);
+        coro->_next = NULL;
+        coro->_prev = NULL;
+    }
+
+    /**
+     * 从一个链表移动到另外一个链表头部
+     */
+    static inline void CoroutineMove(TC_CoroutineInfo *coro, TC_CoroutineInfo *coro_head)
+    {
+        CoroutineDel(coro);
+        CoroutineAdd(coro, coro_head);
+    }
+
+    /**
+     * 从一个链表移动到另外一个链表尾部
+     */
+    static inline void CoroutineMoveTail(TC_CoroutineInfo *coro, TC_CoroutineInfo *coro_head)
+    {
+        CoroutineDel(coro);
+        CoroutineAddTail(coro, coro_head);
+    }
+
+protected:
+	//协程的入口函数
+	static void corotineEntry(transfer_t q);
+
+	//在协程里执行实际逻辑的入口函数
+	static void corotineProc(void * args, transfer_t t);
+
+public:
+    /**
+     * 构造函数
+     */
+    TC_CoroutineInfo();
+
+    /**
+     * 构造函数
+     */
+    TC_CoroutineInfo(TC_CoroutineScheduler* scheduler, uint32_t iUid, stack_context stack_ctx);
+
+    /**
+     * 析构函数
+     */
+    ~TC_CoroutineInfo();
+
+    /**
+     * 注册协程实际的处理函数
+     */
+    void registerFunc(const std::function<void ()> &callback);
+
+    /**
+     * 设置协程的内存空间
+     */
+    void setStackContext(stack_context stack_ctx);
+
+    /**
+     * 获取协程的内存空间
+     */
+    inline stack_context& getStackContext() { return _stack_ctx; }
+
+    /**
+     * 获取协程所处的调度器
+     */
+    inline TC_CoroutineScheduler* getScheduler() { return _scheduler; }
+
+    /**
+     * 获取协程的标志
+     */
+    inline uint32_t getUid() const { return _uid; }
+
+    /**
+     * 设置协程的标志
+     */
+    inline void setUid(uint32_t iUid) { _uid = iUid; }
+
+    /**
+     * 获取协程的状态
+     */
+    inline CORO_STATUS getStatus() const { return _eStatus; }
+
+    /**
+     * 设置协程的状态
+     */
+    inline void setStatus(CORO_STATUS status) { _eStatus = status; }
+
+    /**
+     * 获取协程所处的上下文
+     */
+	inline fcontext_t getCtx() const { return _ctx; }
+	inline void setCtx(fcontext_t ctx) { _ctx = ctx; }
+public:
+    /*
+     * 双向链表指针
+     */
+    TC_CoroutineInfo*                _prev;
+    TC_CoroutineInfo*                _next;
+
+private:
+
+    /*
+     * 协程所属的调度器
+     */
+    TC_CoroutineScheduler*        _scheduler;
+
+    /*I
+     * 协程的标识
+     */
+    uint32_t                      _uid;
+
+    /*
+     * 协程的状态
+     */
+    CORO_STATUS                   _eStatus;
+
+    /*
+     * 协程的内存空间
+     */
+    stack_context                _stack_ctx;
+
+    /*
+     * 创建协程后,协程所在的上下文
+     */
+	fcontext_t					 _ctx = NULL;
+
+    /*
+     * 协程初始化函数入口函数
+     */
+    CoroutineFunc                _init_func;
+
+    /*
+     * 协程具体执行函数
+     */
+    std::function<void ()> 		_callback;
+};
+
+///////////////////////////////////////////
+/**
+ * 协程调度类
+ */
+class TC_CoroutineScheduler
+{    
+protected:
+    static thread_local shared_ptr<TC_CoroutineScheduler> g_scheduler;
+
+public:
+
+    /**
+     * 如果没有, 则创建(线程私有变量, 每个线程有一个)
+     */ 
+    static const shared_ptr<TC_CoroutineScheduler> &create();
+
+    /**
+     * 获取scheduler, 没有则返回null, (线程私有变量, 每个线程有一个)
+     */ 
+    static const shared_ptr<TC_CoroutineScheduler> &scheduler();
+
+    /**
+     * 释放协程调度器
+     */
+    static void reset();
+
+    /**
+     * 构造函数(每个线程最多有一个)
+     */
+    TC_CoroutineScheduler();
+
+    /**
+     * 析构函数
+     */
+    ~TC_CoroutineScheduler();
+
+    /**
+     * 初始化协程池的大小、以及协程的堆栈大小
+     */
+    void setPoolStackSize(uint32_t iPoolSize, size_t iStackSize);
+
+    /**
+     * 创建协程
+     */
+    uint32_t createCoroutine(const std::function<void ()> &callback);
+
+    /**
+     * 通知循环醒过来
+     */
+    void notify();
+
+    /**
+     * 启动协程调度(没有活跃协程会阻塞, 阻塞在epoll上)
+     */
+    void run();
+
+	/**
+	 * 已经在运行中了
+	 * @return
+	 */
+	bool isReady() const { return _ready; }
+
+	/**
+	 * 开启epoll模式, 调度器使用run时, 会阻塞在epollwait上, 该epoll对象可以用于网络
+	 */
+	inline TC_Epoller* getEpoller() { return _epoller; }
+
+	/**
+	 * 获取epoller对象
+	 * @return
+	 */
+	TC_Epoller* epoller() { return _epoller; }
+
+    /**
+     * 当前协程放弃继续执行
+     * @param bFlag: true, 会自动唤醒(等到下次协程调度, 都会再激活当前线程), false: 不再自动唤醒, 除非自己调度该协程(比如put到调度器中)
+     */
+    void yield(bool bFlag = true);
+
+    /**
+     * 当前协程休眠iSleepTime时间(单位:毫秒),然后会被唤醒继续执行
+     */
+    void sleep(int millseconds);
+
+    /**
+     * 放入需要唤醒的协程, 将协程放入到调度器中, 马上会被调度器调度
+     */
+    void put(uint32_t iCoroId);
+
+    /**
+     * 协程切换
+     */
+	void switchCoro(TC_CoroutineInfo *to);
+
+    /**
+     * 停止
+     */
+    void terminate();
+
+    /**
+     * 资源销毁
+     */
+    void destroy();
+
+    /**
+     * 协程调度是否已经结束
+     * @return
+     */
+    bool isTerminate() const { return _epoller->isTerminate(); }
+
+    /**
+     * 协程是否用完了
+     * @return
+     */
+    bool full();
+
+    /**
+     * 获取最大的协程数目
+     */
+    inline uint32_t getPoolSize() { return _poolSize; }
+
+    /**
+     * 获取当前已经创建的协程数目
+     */
+    inline uint32_t getCurrentSize() { return _currentSize; }
+
+    /**
+     * 获取请求响应回来的协程数目
+     */
+    inline size_t getResponseCoroSize() { return _activeCoroQueue.size(); }
+
+    /**
+     * 获取理论上空闲的协程数目
+     */
+    inline uint32_t getFreeSize() { return _poolSize - _usedSize; }
+
+    /**
+     * 减少正在使用的协程数目
+     */
+    inline void decUsedSize() { --_usedSize; }
+
+    /**
+     * 增加正在使用的协程数目
+     */
+    inline void incUsedSize() { ++_usedSize; }
+
+    /**
+     * 是否在主协程中
+     */
+    inline bool isMainCoroutine() { return _currentCoro->getUid() == 0; }
+    
+    /**
+     * 调度器中的主协程
+     */
+    inline TC_CoroutineInfo& getMainCoroutine() { return _mainCoro; }
+
+    /**
+     * 设置主协程
+     */
+	inline void setMainCtx(fcontext_t ctx) { _mainCoro.setCtx(ctx); }
+
+    /**
+     * 当前协程的标识Id
+     */
+    inline uint32_t getCoroutineId() { return _currentCoro->getUid(); }
+
+    /**
+     * 设置当前所有协程执行完毕时的回调
+     */
+    inline void setNoCoroutineCallback(std::function<void(TC_CoroutineScheduler*)> noCoroutineCallback) { _noCoroutineCallback = noCoroutineCallback; }
+
+    friend class TC_CoroutineInfo;
+
+protected:
+
+	/**
+	 * 初始化
+	 */
+	void init();
+
+	/**
+	 * 释放所有协程资源
+	 */
+	void createCoroutineInfo(size_t poolSize);
+
+    /**
+     * 产生协程id
+     */
+    uint32_t generateId();
+
+    /**
+     * 增加协程池的大小
+     */
+    int increaseCoroPoolSize();
+
+    /**
+     * 唤醒需要运行的协程
+     */
+    void wakeup();
+
+    /**
+     * 唤醒自己放弃运行的协程
+     */
+    void wakeupbyself();
+
+    /**
+     * 唤醒休眠的协程
+     */
+    void wakeupbytimeout();
+
+    /**
+     * 放到active的协程链表中
+     */
+    void moveToActive(TC_CoroutineInfo *coro);
+
+    /**
+     * 放到avail的协程链表中
+     */
+    void moveToAvail(TC_CoroutineInfo *coro);
+
+    /**
+     * 放到inactive的协程链表中
+     */
+    void moveToInactive(TC_CoroutineInfo *coro);
+
+    /**
+     * 放到超时等待的协程链表中
+     */
+    void moveToTimeout(TC_CoroutineInfo *coro);
+
+    /**
+     * 放到空闲的协程链表中
+     */
+    void moveToFreeList(TC_CoroutineInfo *coro);
+
+private:
+
+    /*
+     * 协程池的大小
+     */
+    uint32_t                _poolSize = 1000;
+
+    /*
+     * 协程的栈空间大小
+     */
+    size_t                  _stackSize = 128*1024;
+
+    /*
+     * 当前已经创建的协程数
+     */
+    uint32_t                _currentSize;
+
+    /*
+     * 正在使用的协程数
+     */
+    uint32_t                _usedSize;
+
+    /*
+     * 产生协程Id的变量
+     */
+    uint32_t                _uniqId;
+
+    /*
+     * 主协程
+     */
+    TC_CoroutineInfo        _mainCoro;
+
+    /*
+     * 当前运行的协程
+     */
+    TC_CoroutineInfo*       _currentCoro;
+
+    /*
+     * 存放所有协程的数组指针
+     */
+    TC_CoroutineInfo**      _all_coro = NULL;
+
+    /*
+     * 活跃的协程链表
+     */
+    TC_CoroutineInfo        _active;
+
+    /*
+     * 可用的协程链表
+     */
+    TC_CoroutineInfo        _avail;
+
+    /*
+     * 不活跃的协程链表
+     */
+    TC_CoroutineInfo        _inactive;
+
+    /*
+     * 超时的协程链表
+     */
+    TC_CoroutineInfo        _timeout;
+
+    /*
+     * 空闲的协程链表
+     */
+    TC_CoroutineInfo        _free;
+
+    /*
+     * 需要激活的协程队列,其他线程使用,用来激活等待结果的协程
+     */
+	deque<uint32_t>        _activeCoroQueue;
+
+	/*
+	 * 需要激活的协程队列,本线程使用
+	 */
+    list<uint32_t>        _needActiveCoroId;
+
+    /*
+     * 存放超时的协程
+     */
+    multimap<int64_t, uint32_t> _timeoutCoroId;
+
+    /**
+     * epoller
+     */
+    TC_Epoller*             _epoller = NULL;
+
+    /**
+     * 当协程都处理完毕后的回调
+     */ 
+    std::function<void(TC_CoroutineScheduler*)> _noCoroutineCallback;
+
+    /**
+     * 是否正在运行中
+     */
+    bool                    _ready = false;
+};
+
+/**
+ * 对线程进行包装的协程类,主要用于在自己起的线程中使用协程,
+ * 使用方式:
+ * 1 业务可以继承这个类
+ * 2 实现handleCoroutine函数(协程具体执行代码), 开发在这里面可以再启动更多的其他协程
+ * 3 调用start函数, 启动线程, 同时会创建iNum个协程, 调度器中最多存在iPoolSize个协程同时运行
+ * 4 terminate结束
+ */
+class TC_Coroutine : public TC_Thread
+{
+public:
+    /**
+     * 构造函数
+     */
+    TC_Coroutine();
+
+    /**
+     * 析构函数
+     */
+    virtual ~TC_Coroutine();
+
+    /**
+     * 初始化
+     * @iNum, 表示同时会启动多少个协程,即会有多少个coroFunc运行的协程
+     * @iPoolSize,表示这个线程调度器最多包含的协程个数
+     * @iStackSize,协程的栈大小
+     */
+    void setCoroInfo(uint32_t iNum, uint32_t iPoolSize, size_t iStackSize);
+
+    /**
+     * 创建协程,在已经创建的协程中使用
+     * 返回值为协程的id,大于0,表示成功,,小于等于0,表示失败
+     */
+    uint32_t createCoroutine(const std::function<void ()> &coroFunc);
+
+    /**
+     * 当前协程自己放弃执行,会自动被调度器唤醒
+     * 在已经创建的协程中使用
+     */
+    void yield();
+
+    /**
+     * 当前协程休眠iSleepTime时间(单位:毫秒),时间到了,会自动被调度器唤醒
+     * 在已经创建的协程中使用
+     */
+    void sleep(int millseconds);
+
+    /**
+     * 获取设置的最大协程的数目
+     */
+    uint32_t getMaxCoroNum() { return _maxNum; }
+
+    /**
+     * 获取启动时,设置的协程的数目
+     */
+    uint32_t getCoroNum() { return _num; }
+
+    /**
+     * 设置协程的栈大小
+     */
+    size_t getCoroStackSize() { return _stackSize; }
+
+    /**
+     * 停止
+     */
+    void terminate();
+
+protected:
+    /**
+     * 线程处理方法
+     */
+    virtual void run();
+
+    /**
+     *  静态函数, 协程入口. 
+     */
+    static void coroEntry(TC_Coroutine *pCoro);
+
+    /**
+     * 协程运行的函数,根据_num的数目,会启动_num个这个函数
+     */
+    virtual void handle() = 0;
+
+protected:
+    /**
+     * 线程已经启动, 进入协程处理前调用
+     */
+    virtual void initialize() {}
+
+    /**
+     * 所有协程停止运行之后,线程退出之前时调用
+     */
+    virtual void destroy() {}
+
+    /**
+     * 具体的处理逻辑
+     */
+    virtual void handleCoro();
+
+protected:
+    shared_ptr<TC_CoroutineScheduler> _coroSched;
+    uint32_t            _num;
+    uint32_t            _maxNum;
+    size_t              _stackSize;
+};
+
+}
+
+#endif

+ 363 - 0
util/include/util/tc_coroutine_queue.h

@@ -0,0 +1,363 @@
+/**
+ * Tencent is pleased to support the open source community by making Tars available.
+ *
+ * Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
+ *
+ * Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 
+ * in compliance with the License. You may obtain a copy of the License at
+ *
+ * https://opensource.org/licenses/BSD-3-Clause
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed 
+ * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+ * CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+ * specific language governing permissions and limitations under the License.
+ */
+
+#ifndef __TC_COROUTINE_QUEUE_H_
+#define __TC_COROUTINE_QUEUE_H_
+
+#include <deque>
+#include <vector>
+#include <cassert>
+#include <mutex>
+#include <unordered_set>
+#include "util/tc_coroutine.h"
+
+using namespace std;
+
+namespace tars
+{
+    /////////////////////////////////////////////////
+/** 
+ * @file tc_coroutine_queue.h
+ * @brief 协程队列类, 用于跨协程交互数据
+ *
+ * 使用说明:
+ * - TC_CoroutineQueue的目的在于在多个协程间交互数据
+ * - 一个协程push数据到队列, 另外的协程通过调用exec来完成数据获取, 如果没有数据, 则当前协程yield, 直到有数据被唤醒
+ * - 注意可以跨不同线程的协程传递数据
+ * @author ruanshudong@qq.com
+ */
+       
+/////////////////////////////////////////////////
+/**
+ * @brief 线程/协程安全队列, 必须在协程执行, 否则会异常!
+ */
+template<typename T, typename D = deque<T> >
+class TC_CoroutineQueue
+{
+public:
+    TC_CoroutineQueue() {};
+
+    typedef D queue_type;
+
+    /**
+     * @brief 当队列有数据时, 则执行函数, 如果没有数据则协程yield, 如果调用terminate, exec才返回
+     *
+     * @param t 
+     */
+    void exec(std::function<void(const T &)>);
+
+    /**
+     * @brief 结束队列, 注意会唤醒所有等待在队列上的协程(执行exec而阻塞的协程)
+     */
+    void terminate();
+
+    /**
+     * @brief 通知等待在队列上面的线程都醒过来
+     */
+    void notifyT();
+
+    /**
+	 * @brief 放数据到队列后端. 
+	 *  
+     * @param t
+     */
+    void push_back(const T& t, bool notify = true);
+
+    /**
+	 * @brief  放数据到队列后端. 
+	 *  
+     * @param vt
+     */
+    void push_back(const queue_type &qt, bool notify = true);
+
+    /**
+	 * @brief  放数据到队列前端. 
+	 *  
+     * @param t
+     */
+    void push_front(const T& t, bool notify = true);
+
+    /**
+	 * @brief  放数据到队列前端. 
+	 *  
+     * @param vt
+     */
+    void push_front(const queue_type &qt, bool notify = true);
+
+    /**
+	 * @brief  交换数据
+	 *  
+     * @param q
+     * @param 是否等待有数据
+     * @return 有数据返回true, 无数据返回false
+     */
+    bool swap(queue_type &q);
+
+    /**
+     * @brief  队列大小.
+     *
+     * @return size_t 队列大小
+     */
+    size_t size() const;
+
+    /**
+     * @brief  清空队列
+     */
+    void clear();
+
+    /**
+     * @brief  是否数据为空.
+     *
+     * @return bool 为空返回true,否则返回false
+     */
+    bool empty() const;
+
+protected:
+	TC_CoroutineQueue(const TC_CoroutineQueue&) = delete;
+	TC_CoroutineQueue(TC_CoroutineQueue&&) = delete;
+	TC_CoroutineQueue& operator=(const TC_CoroutineQueue&) = delete;
+	TC_CoroutineQueue& operator=(TC_CoroutineQueue&&) = delete;
+
+protected:
+    /**
+     * 队列
+     */
+    queue_type          _queue;
+
+	//锁
+    mutable std::mutex _mutex;
+
+    /**
+     * 协程调度器
+     */
+	unordered_set<shared_ptr<TC_CoroutineScheduler>> _schedulers;
+
+	/**
+	 * 结束协程exec
+	 */
+	bool _terminate = false;
+};
+
+//template<typename T, typename D> T TC_CoroutineQueue<T, D>::front()
+//{
+//	std::unique_lock<std::mutex> lock(_mutex);
+//
+//	return  _queue.front();
+//}
+
+template<typename T, typename D> void TC_CoroutineQueue<T, D>::terminate()
+{
+	_terminate = true;
+
+	{
+		std::lock_guard<std::mutex> lock(_mutex);
+		for(auto scheduler : _schedulers)
+		{
+			scheduler->notify();
+		}
+	}
+}
+
+template<typename T, typename D> void TC_CoroutineQueue<T, D>::exec(std::function<void(const T &)> func)
+{
+	assert(TC_CoroutineScheduler::scheduler() != NULL);
+
+	auto scheduler = TC_CoroutineScheduler::scheduler();
+
+	bool flag;
+
+	do
+	{
+		T t;
+		{
+			std::lock_guard<std::mutex> lock(_mutex);
+			if (_queue.empty()) {
+				flag = false;
+			}
+			else
+			{
+				flag = true;
+				t = _queue.front();
+				_queue.pop_front();
+			}
+		}
+
+		if(!flag)
+		{
+			{
+				std::lock_guard<std::mutex> lock(_mutex);
+				_schedulers.insert(scheduler);
+			}
+
+			scheduler->yield();
+
+			{
+				std::lock_guard<std::mutex> lock(_mutex);
+				_schedulers.erase(scheduler);
+			}
+		}
+		else
+		{
+			try { func(t); } catch(...) {}
+		}
+
+	}while(!_terminate && !scheduler->isTerminate());
+}
+
+template<typename T, typename D> void TC_CoroutineQueue<T, D>::notifyT()
+{
+	{
+		std::lock_guard<std::mutex> lock(_mutex);
+
+		if(!_schedulers.empty())
+		{
+			(*_schedulers.begin())->notify();
+		}
+	}
+}
+
+template<typename T, typename D> void TC_CoroutineQueue<T, D>::push_back(const T& t, bool notify)
+{
+    if(notify) {
+	    {
+		    std::unique_lock<std::mutex> lock(_mutex);
+
+		    _queue.push_back(t);
+	    }
+
+	    notifyT();
+
+    }
+    else
+    {
+        std::lock_guard<std::mutex> lock (_mutex);
+        _queue.push_back(t);
+    }
+}
+
+template<typename T, typename D> void TC_CoroutineQueue<T, D>::push_back(const queue_type &qt, bool notify)
+{
+    if(notify) {
+	    {
+		    std::unique_lock<std::mutex> lock(_mutex);
+
+		    typename queue_type::const_iterator it = qt.begin();
+		    typename queue_type::const_iterator itEnd = qt.end();
+		    while (it != itEnd) {
+			    _queue.push_back(*it);
+			    ++it;
+		    }
+	    }
+
+	    notifyT();
+    }
+    else
+    {
+        std::lock_guard<std::mutex> lock (_mutex);
+
+        typename queue_type::const_iterator it = qt.begin();
+        typename queue_type::const_iterator itEnd = qt.end();
+        while (it != itEnd) {
+            _queue.push_back(*it);
+            ++it;
+        }
+    }
+}
+
+template<typename T, typename D> void TC_CoroutineQueue<T, D>::push_front(const T& t, bool notify)
+{
+    if(notify) {
+	    {
+		    std::unique_lock<std::mutex> lock(_mutex);
+
+		    _queue.push_front(t);
+	    }
+
+	    notifyT();
+    }
+    else
+    {
+        std::lock_guard<std::mutex> lock (_mutex);
+
+        _queue.push_front(t);
+    }
+}
+
+template<typename T, typename D> void TC_CoroutineQueue<T, D>::push_front(const queue_type &qt, bool notify)
+{
+    if(notify) {
+	    {
+		    std::unique_lock<std::mutex> lock(_mutex);
+
+		    typename queue_type::const_iterator it = qt.begin();
+		    typename queue_type::const_iterator itEnd = qt.end();
+		    while (it != itEnd) {
+			    _queue.push_front(*it);
+			    ++it;
+		    }
+	    }
+
+	    notifyT();
+    }
+    else
+    {
+        std::lock_guard<std::mutex> lock (_mutex);
+
+        typename queue_type::const_iterator it = qt.begin();
+        typename queue_type::const_iterator itEnd = qt.end();
+        while (it != itEnd) {
+            _queue.push_front(*it);
+            ++it;
+        }
+    }
+}
+
+template<typename T, typename D> bool TC_CoroutineQueue<T, D>::swap(queue_type &q)
+{
+    {
+        std::lock_guard<std::mutex> lock (_mutex);
+
+        if (_queue.empty()) {
+            return false;
+        }
+
+        q.swap(_queue);
+
+        return true;
+    }
+}
+
+template<typename T, typename D> size_t TC_CoroutineQueue<T, D>::size() const
+{
+	std::lock_guard<std::mutex> lock(_mutex);
+    return _queue.size();
+}
+
+template<typename T, typename D> void TC_CoroutineQueue<T, D>::clear()
+{
+	std::lock_guard<std::mutex> lock(_mutex);
+    _queue.clear();
+}
+
+template<typename T, typename D> bool TC_CoroutineQueue<T, D>::empty() const
+{
+	std::lock_guard<std::mutex> lock(_mutex);
+    return _queue.empty();
+}
+
+}
+#endif
+

+ 2459 - 2398
util/include/util/tc_epoll_server.h

@@ -1,2398 +1,2459 @@
-/**
- * Tencent is pleased to support the open source community by making Tars available.
- *
- * Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
- *
- * Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 
- * in compliance with the License. You may obtain a copy of the License at
- *
- * https://opensource.org/licenses/BSD-3-Clause
- *
- * Unless required by applicable law or agreed to in writing, software distributed 
- * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
- * CONDITIONS OF ANY KIND, either express or implied. See the License for the 
- * specific language governing permissions and limitations under the License.
- */
-
-#ifndef __TARS_TC_EPOLL_SERVER_H_
-#define __TARS_TC_EPOLL_SERVER_H_
-
-#include <string>
-#include <memory>
-#include <map>
-#include <unordered_map>
-#include <vector>
-#include <list>
-#include <algorithm>
-#include <functional>
-#include "util/tc_epoller.h"
-#include "util/tc_thread.h"
-#include "util/tc_clientsocket.h"
-#include "util/tc_logger.h"
-#include "util/tc_common.h"
-#include "util/tc_network_buffer.h"
-#include "util/tc_cas_queue.h"
-#include "util/tc_openssl.h"
-
-using namespace std;
-
-namespace tars
-{
-
-/////////////////////////////////////////////////
-/**
- * @file  tc_epoll_server.h
- * @brief  EpollServer类
- * @brief  EpollServer Class
- *
- */
-/////////////////////////////////////////////////
-/**
- * Server基类
- * Server Basic Class
- * 注册协议解析器
- * Register Protocol Resolver
- * 注册逻辑处理器
- * Register Logical Processor
- * 注册管理端口处理器
- * Register Management Port Processor
- */
-
-class PropertyReport;
-
-class TC_EpollServer : public TC_HandleBase
-{
-public:
-
-    enum EM_CLOSE_T
-    {
-        /**Client active shutdown*/
-        EM_CLIENT_CLOSE = 0,         //客户端主动关闭
-        /**The service-side business proactively calls 'close' to close the connection, 
-         * or the framework proactively closes the connection due to an exception.*/
-        EM_SERVER_CLOSE = 1,        //服务端业务主动调用close关闭连接,或者框架因某种异常主动关闭连接
-        /**Connection timed out, server actively closed*/
-        EM_SERVER_TIMEOUT_CLOSE = 2  //连接超时了,服务端主动关闭
-    };
-
-    enum
-    {
-        /**Empty connection timeout (ms)*/
-        MIN_EMPTY_CONN_TIMEOUT  = 2*1000,    /*空链接超时时间(ms)*/
-        /**The size of received buffer of the default data*/
-        DEFAULT_RECV_BUFFERSIZE = 64*1024    /*缺省数据接收buffer的大小*/
-    };
-
-    //定义加入到网络线程的fd类别
-    //Define the FD categories added to network threads
-    enum CONN_TYPE
-    {
-        TCP_CONNECTION = 0,
-        UDP_CONNECTION = 1,
-    };
-
-    /**
-     * 定义协议解析接口的操作对象
-     * 注意必须是线程安全的或是可以重入的
-     * Define the Operating Object of the Protocol Resolution Interface
-     * Note that it must be thread safe or reentrant
-     */
-	typedef std::function<TC_NetWorkBuffer::PACKET_TYPE(TC_NetWorkBuffer::PACKET_TYPE, vector<char>&)> header_filter_functor;
-
-    class NetThread;
-    class Connection;
-    class BindAdapter;
-    typedef TC_AutoPtr<BindAdapter> BindAdapterPtr;
-
-    class Handle;
-    typedef TC_AutoPtr<Handle> HandlePtr;
-
-	class RecvContext;
-
-	/**
-	 * 发送包的上下文
-	 * 由RecvContext创建出来
-     * Context of sending packets
-     * Created by RecvContext
-	 */
-	class SendContext
-	{
-	public:
-		SendContext(const shared_ptr<RecvContext> &context, char cmd) : _context(context), _cmd(cmd)
-		{
-			_sbuffer = std::make_shared<TC_NetWorkBuffer::Buffer>();
-		}
-
-		const shared_ptr<RecvContext> &getRecvContext() { return _context; }
-		const shared_ptr<TC_NetWorkBuffer::Buffer> & buffer()       { return _sbuffer; }
-		char cmd() const        { return _cmd; }
-		uint32_t uid() const    { return _context->uid(); }
-		int fd() const          { return _context->fd(); }
-		const string &ip() const { return _context->ip(); }
-		uint16_t port() const   { return _context->port(); }
-
-		friend class RecvContext;
-
-	protected:
-		shared_ptr<RecvContext>     _context;
-        /**Send package is valid. Command: 'c', close FD; 's', data need to be sent*/
-		char                        _cmd;            /**send包才有效, 命令:'c',关闭fd; 's',有数据需要发送*/
-        /**Sent context*/
-		shared_ptr<TC_NetWorkBuffer::Buffer> _sbuffer;        /**发送的内容*/
-	};
-
-	////////////////////////////////////////////////////////////////////////////
-	/**
-	 * 接收包的上下文
-     * Context of receiving package
-	 */
-	class RecvContext : public std::enable_shared_from_this<RecvContext>
-	{
-	public:
-		RecvContext(uint32_t uid, const string &ip, int64_t port, int fd, const BindAdapterPtr &adapter, bool isClosed = false, int closeType = EM_CLIENT_CLOSE)
-			: _uid(uid), _ip(ip), _port(port), _fd(fd), _adapter(adapter), _isClosed(isClosed), _closeType(closeType), _recvTimeStamp(TNOWMS)
-		{
-		}
-		uint32_t uid()       const       { return _uid; }
-		const string &ip()   const       { return _ip; }
-		uint16_t port()      const       { return _port; }
-		vector<char> &buffer()           { return _rbuffer; }
-		const vector<char> &buffer()  const { return _rbuffer; }
-
-		int64_t recvTimeStamp() const    { return _recvTimeStamp; }
-		bool isOverload()       const    { return _isOverload; }
-		void setOverload()               { _isOverload = true; }
-		bool isClosed()         const    { return _isClosed; }
-		int fd()             const       { return _fd; }
-		BindAdapterPtr &adapter()    { return _adapter; }
-		int closeType()      const       { return _closeType; }
-		void setCloseType(int closeType) { _closeType = closeType;}
-
-		shared_ptr<SendContext> createSendContext()  { return std::make_shared<SendContext>(shared_from_this(), 's'); }
-		shared_ptr<SendContext> createCloseContext() { return std::make_shared<SendContext>(shared_from_this(), 'c'); }
-
-	protected:
-        /**Connection Label*/
-		uint32_t        _uid;            /**连接标示*/
-		/**IP for remote connection*/
-        string          _ip;             /**远程连接的ip*/
-		/**Port for remote connection*/
-        uint16_t        _port;           /**远程连接的端口*/
-		/**Save the FD that generated the message to select the network thread when returning the package*/
-        int				_fd;				/*保存产生该消息的fd,用于回包时选择网络线程*/
-		/**Message identifying which adapter*/
-        BindAdapterPtr  _adapter;        /**标识哪一个adapter的消息*/
-		/**Received message*/
-        vector<char>    _rbuffer;        /**接收的内容*/
-		/**Is overloaded*/
-        bool            _isOverload = false;     /**是否已过载 */
-		/**Is closed*/
-        bool            _isClosed   = false;       /**是否已关闭*/
-		/**If the message package is closed, the type of closure is identified, 
-         * 0: the client actively closes, 
-         * 1: the server actively closes, 
-         * 2: the connection timeout server actively closes.
-         * */
-        int             _closeType;     /*如果是关闭消息包,则标识关闭类型,0:表示客户端主动关闭;1:服务端主动关闭;2:连接超时服务端主动关闭*/
-		/**Time to receive data*/
-        int64_t         _recvTimeStamp;  /**接收到数据的时间*/
-	};
-
-//	typedef TC_CasQueue<shared_ptr<RecvContext>> recv_queue;
-	typedef TC_ThreadQueue<shared_ptr<RecvContext>> recv_queue;
-//	typedef TC_CasQueue<shared_ptr<SendContext>> send_queue;
-	typedef TC_ThreadQueue<shared_ptr<SendContext>> send_queue;
-	typedef recv_queue::queue_type recv_queue_type;
-
-	////////////////////////////////////////////////////////////////////////////
-	/**
-	 * 链接状态
-     * Connection state
-	 */
-	struct ConnStatus
-	{
-		string          ip;
-		int32_t         uid;
-		uint16_t        port;
-		int             timeout;
-		int             iLastRefreshTime;
-		size_t          recvBufferSize;
-		size_t          sendBufferSize;
-	};
-
-    ////////////////////////////////////////////////////////////////////////////
-    /**
-     * @brief 定义服务逻辑处理的接口
-     * @brief Define interfaces for logical processing of services
-     *
-     */
-    /**
-     * 服务的逻辑处理代码
-     * Logical Processing Code for Services
-     */
-    class Handle : public TC_Thread, public TC_HandleBase
-    {
-    public:
-        /**
-         * 构造, 默认没有请求, 等待10s
-         * Constructor, default no request, wait 10s
-         */
-        Handle();
-
-        /**
-         * 析构函数
-         * Destructor
-         */
-        virtual ~Handle();
-
-        /**
-         * 获取服务
-         * Get Service
-         * @return TC_EpollServer*
-         */
-		TC_EpollServer* getEpollServer() const { return _pEpollServer; };
-
-		/**
-		 * 获取adapter
-         * Get adapter
-		 * @return
-		 */
-		BindAdapter *getBindAdapter() const { return _bindAdapter; }
-
-		/**
-		 * 获取Handle的索引(0~handle个数-1)
-         * Get the index of Handle(0~handle count-1)
-		 * @return
-		 */
-		uint32_t getHandleIndex() const { return _handleIndex; }
-
-		/**
-		 * 设置网络线程
-         * Set up network thread
-		 */
-		void setNetThread(NetThread *netThread);
-
-		/**
-		 * 获取网络线程
-         * Get network thread
-		 * @return
-		 */
-		NetThread *getNetThread() { return _netThread; }
-
-		/**
-		 * 处理
-         * Process
-		 */
-		void process(shared_ptr<RecvContext> data);
-
-        /**
-         * 线程处理方法
-         * Thread processing method
-         */
-        virtual void run();
-
-    public:
-        /**
-         * 发送数据
-         * Send data
-         * @param stRecvData
-         * @param sSendBuffer
-         */
-		void sendResponse(const shared_ptr<SendContext> &data);
-
-        /**
-         * 关闭链接
-         * Close connection
-         * @param stRecvData
-         */
-        void close(const shared_ptr<RecvContext> &data);
-
-        /**
-         * 设置等待时间
-         * Set up waiting time
-         * @param iWaitTime
-         */
-        void setWaitTime(uint32_t iWaitTime);
-
-        /**
-         * 对象初始化
-         * Object initialization
-         */
-        virtual void initialize() {};
-
-        /**
-         * 唤醒handle对应的处理线程
-         * Wake up the process thread corresponding to the handle
-         */
-        virtual void notifyFilter();
-
-        /**
-         * 心跳(每处理完一个请求或者等待请求超时都会调用一次)
-         * Heartbeat(Called every time a request has been processed or the waiting request has timed out)
-         */
-        virtual void heartbeat() {}
-
-    protected:
-        /**
-         * 具体的处理逻辑
-         * Specific processing logic
-         */
-        virtual void handleImp();
-
-		/**
-		 * 处理函数
-         * Processing Function
-		 * @param stRecvData: 接收到的数据
-         * @param stRecvData: received data
-		 */
-		virtual void handle(const shared_ptr<RecvContext> &data) = 0;
-
-		/**
-		 * 处理超时数据, 即数据在队列中的时间已经超过
-		 * 默认直接关闭连接
-         * Processing timeout data, i.e. data has been queued longer than
-         * Close connection directly by default
-		 * @param stRecvData: 接收到的数据
-         * @param stRecvData: received data
-		 */
-		virtual void handleTimeout(const shared_ptr<TC_EpollServer::RecvContext> &data);
-
-		/**
-		 * 处理连接关闭通知,包括
-         * Handle connection shutdown notifications, including:
-		 * 1.close by peer
-		 * 2.recv/send fail
-		 * 3.close by timeout or overload
-		 * @param stRecvData:
-		 */
-		virtual void handleClose(const shared_ptr<TC_EpollServer::RecvContext> &data);
-
-		/**
-		 * 处理overload数据 即数据队列中长度已经超过允许值
-		 * 默认直接关闭连接
-         * Processing overload data means that the length of the data queue has exceeded the allowable value
-         * Close connection directly by default
-		 * @param stRecvData: 接收到的数据
-         * @param stRecvData: received data 
-		 */
-		virtual void handleOverload(const shared_ptr<TC_EpollServer::RecvContext> &data);
-
-        /**
-         * 处理异步回调队列
-         * Handle asynchronous callback queues
-
-         */
-        virtual void handleAsyncResponse() {}
-
-           /**
-         * handleFilter拆分的第二部分,处理用户自有数据
-         * 非游戏逻辑可忽略bExpectIdle参数
-         * The second part of handleFilter splitting, dealing with user-owned data
-         * Non-game logic ignores bExpectIdle parameter
-         */
-        virtual void handleCustomMessage(bool bExpectIdle = false) {}
-
-        /**
-         * 线程已经启动, 进入具体处理前调用
-         * Thread has been started and called before entering specific processing
-         */
-        virtual void startHandle() {}
-
-        /**
-         * 线程马上要退出时调用
-         * Call when the thread is about to exit
-         */
-        virtual void stopHandle() {}
-
-        /**
-         * 是否所有的Adpater队列都为空
-         * Whether all adapter queues are empty.
-         * @return bool
-         */
-        virtual bool allAdapterIsEmpty();
-
-		/**
-		 * 是否所有的servant都没有resp消息待处理
-         * Whether all servant don't have resp message to deal.
-		 * @return bool
-		 */
-		virtual bool allFilterIsEmpty();
-
-		/**
-		 * 设置服务
-         * Set up service
-		 * @param pEpollServer
-		 */
-		void setEpollServer(TC_EpollServer *pEpollServer);
-
-		/**
-		 * 设置Adapter
-         * Set up Apdater
-		 * @param pEpollServer
-		 */
-		void setBindAdapter(BindAdapter*  bindAdapter);
-
-		/**
-		 * 设置index
-         * Set up index
-		 * @param index
-		 */
-		void setHandleIndex(uint32_t index);
-
-		/**
-		 * 等待在队列上
-         * On the waiting queue
-		 */
-		void wait();
-
-		/**
-		 * 从队列中获取数据
-         * Receive data from the queue
-		 * @param recv
-		 * @return
-		 */
-		bool popRecvQueue(shared_ptr<RecvContext> &recv);
-
-        /**
-         * 友元类
-         * Friend Class
-         */
-        friend class BindAdapter;
-    protected:
-        /**
-         * 服务
-         * Service
-         */
-        TC_EpollServer  *_pEpollServer;
-
-		/**
-		 * handle对应的网路线程(网络线程和handle线程合并的情况下有效)
-         * Network threads corresponding to handle (valid when network threads and handle threads are merged)
-		 */
-		NetThread       *_netThread = NULL;
-
-		/**
-		 * 所属handle组
-         * The handle group to which one belongs
-		 */
-		BindAdapter*    _bindAdapter;
-
-		/**
-		 * 等待时间
-         * Waiting time
-		 */
-		uint32_t        _iWaitTime;
-
-		/**
-		 * Handle的索引
-         * Index of the Handle
-		 */
-		uint32_t        _handleIndex;
-
-    };
-
-    using close_functor = std::function<void (void*, EM_CLOSE_T )>;
-    using auth_process_wrapper_functor = std::function<bool (Connection *c, const shared_ptr<RecvContext> &recv )>;
-
-    ////////////////////////////////////////////////////////////////////////////
-    // 服务端口管理,监听socket信息
-    // Service port management, listening for socket information
-    class BindAdapter : public TC_HandleBase
-    {
-    public:
-        /**
-         * 缺省的一些定义
-         * Defualt definitions
-         */
-        enum
-        {
-            /**Flow*/
-            DEFAULT_QUEUE_CAP       = 10*1024,    /**流量*/
-            /**Queue minimum timeout (ms)*/
-            MIN_QUEUE_TIMEOUT       = 3*1000,     /**队列最小超时时间(ms)*/
-            /**Default maximum connections*/
-            DEFAULT_MAX_CONN        = 1024,       /**缺省最大连接数*/
-            /**Default queue timeout (ms)*/
-            DEFAULT_QUEUE_TIMEOUT   = 60*1000,    /**缺省的队列超时时间(ms)*/
-        };
-        /**
-         * 顺序
-         * Order
-         */
-        enum EOrder
-        {
-            ALLOW_DENY,
-            DENY_ALLOW
-        };
-
-		/**
-		 * 数据队列
-         * Data Queue
-		 */
-		struct DataQueue
-		{
-			/**
-			 * 接收的数据队列
-             * Received data queue
-			 */
-			recv_queue      _rbuffer;
-
-			/**
-			 * 锁
-             * Lock
-			 */
-			TC_ThreadLock   _monitor;
-		};
-
-        /**
-         * 构造函数
-         * Constructor
-         */
-        BindAdapter(TC_EpollServer *pEpollServer);
-
-        /**
-         * 析够函数
-         * Destructor
-         */
-        ~BindAdapter();
-
-		/**
-         * 设置需要手工监听
-         * Set requires manual listening
-         */
-		void enableManualListen() { _manualListen = true; }
-
-		/**
-		 * 是否手工监听端口
-         * Whether to manual listen the port or not
-		 * @return
-		 */
-		bool isManualListen() const { return _manualListen; }
-
-		/**
-         * 手工绑定端口
-         * Manual port binding
-         */
-		void manualListen();
-
-        /**
-         * 设置adapter name
-         * Set up adapter name
-         * @param name
-         */
-        void setName(const string &name);
-
-        /**
-         * 获取adapter name
-         * Get adapter name
-         * @return string
-         */
-        string getName() const;
-
-		/**
-		 * 增加处理线程对应的接收队列
-         * Add the corresponding receiving queue for processing threads
-		 * @return string
-		 */		
-		void initThreadRecvQueue(uint32_t handeIndex);
-
-        /**
-         * 获取queue capacity
-         * Get queue capacity
-         * @return int
-         */
-        int getQueueCapacity() const;
-
-        /**
-         * 设置queue capacity
-         * Set up queue capacity
-         * @param n
-         */
-        void setQueueCapacity(int n);
-
-        /**
-         * 设置协议名称
-         * Set up the protocol name
-         * @param name
-         */
-        void setProtocolName(const string& name);
-
-        /**
-         * 获取协议名称
-         * Get the protocol name
-         * @return const string&
-         */
-        const string& getProtocolName();
-
-        /**
-         * 是否tars协议
-         * Whether it is the tars protocol
-         * @return bool
-         */
-        bool isTarsProtocol();
-
-        /**
-         * 判断是否需要过载保护
-         * Determine whether it needs overload protection
-         * @return bool
-         */
-        int isOverloadorDiscard();
-
-        /**
-         * 设置消息在队列中的超时时间, t为毫秒
-         * (超时时间精度只能是s)
-         * Set the timeout time of the message in the queue, t is milliseconds
-         * (timeout precision can only be s)
-         * 
-         * @param t
-         */
-        void setQueueTimeout(int t);
-
-        /**
-         * 获取消息在队列中的超时时间, 毫秒
-         * Get timeout of message in queue, MS
-         * @return int
-         */
-        int getQueueTimeout() const;
-
-        /**
-         * 设置endpoint
-         * Set up endpoint
-         * @param str
-         */
-        void setEndpoint(const string &str);
-
-        /**
-         * 获取ip
-         * Get ip
-         * @return const string&
-         */
-        TC_Endpoint getEndpoint() const;
-
-        /**
-         * 监听socket
-         * Listen socket
-         * @return TC_Socket
-         */
-        TC_Socket &getSocket();
-
-        /**
-         * 设置最大连接数
-         * Set the maximum connection number
-         * @param iMaxConns
-         */
-        void setMaxConns(int iMaxConns);
-
-        /**
-         * 获取最大连接数
-         * Get the maximum connection number
-         * @return size_t
-         */
-        size_t getMaxConns() const;
-
-        /**
-         * 设置HeartBeat时间
-         * Set the HeartBeat time
-         * @param n
-         */
-        void setHeartBeatTime(time_t t);
-
-        /**
-         * 获取HeartBeat时间
-         * Get the HeartBeat time
-         * @return size_t
-         */
-        time_t getHeartBeatTime() const;
-
-        /**
-         * 设置allow deny次序
-         * Set the allow deny order
-         * @param eOrder
-         */
-        void setOrder(EOrder eOrder);
-
-        /**
-         * 设置允许ip
-         * Set allowed ip
-         * @param vtAllow
-         */
-        void setAllow(const vector<string> &vtAllow);
-
-        /**
-         * 设置禁止ip
-         * Set the disabled ip
-         * @param vtDeny
-         */
-        void setDeny(const vector<string> &vtDeny);
-
-        /**
-         * 获取允许ip
-         * Get the allowed ip
-         * @return vector<string>: ip列表
-         * @return vector<string>: ip list
-         */
-        const vector<string> &getAllow() const;
-
-         /**
-         * 获取禁止ip
-         * Get the disable ip
-         * @return vector<string>: ip列表
-         * @return vector<string>: ip list
-         */
-        const vector<string> &getDeny() const;
-
-         /**
-         * 获取allow deny次序
-         * Get the allow deny order
-         * @return EOrder
-         */
-        EOrder getOrder() const;
-
-        /**
-         * 是否Ip被允许
-         * Whether the ip is allowed or not
-         * @param ip
-         * @return bool
-         */
-        bool isIpAllow(const string& ip) const;
-
-        /**
-         * 是否超过了最大连接数
-         * Whether it exceeds the maximum connection number
-         * @return bool
-         */
-        bool isLimitMaxConnection() const;
-
-        /**
-         * 减少当前连接数
-         * Reduce current connections
-         */
-        void decreaseNowConnection();
-
-		/**
-		 * 增加当前连接数
-         * Increase current connections
-		 */
-		void increaseNowConnection();
-
-		/**
-		 * 获取所有链接状态
-         * Get all connection states
-		 * @return ConnStatus
-		 */
-		vector<ConnStatus> getConnStatus();
-
-		/**
-		 * 获取当前连接数
-         * Get current connections
-		 * @return int
-		 */
-		int getNowConnection() const;
-
-		/**
-		 * 获取服务
-         * Get service
-		 * @return TC_EpollServer*
-		 */
-		TC_EpollServer* getEpollServer() const { return _pEpollServer; };
-
-		/**
-		 * 获取对应的网络线程
-         * Get the corresponding network thread
-		 * @param fd
-		 * @return
-		 */
-		inline NetThread* getNetThreadOfFd(int fd) const { return _pEpollServer->getNetThreadOfFd(fd); }
-
-		/**
-		 * 注册协议解析器
-         * Registration Protocol parser
-		 * @param pp
-		 */
-		void setProtocol(const TC_NetWorkBuffer::protocol_functor& pf, int iHeaderLen = 0, const header_filter_functor& hf = echo_header_filter);
-
-		/**
-		 * 获取协议解析器
-         * Get Registration Protocol parser
-		 * @return protocol_functor&
-		 */
-		TC_NetWorkBuffer::protocol_functor &getProtocol();
-
-		/**
-		 * 解析包头处理对象
-         * Resolve Package Header Processing Objects
-		 * @return protocol_functor&
-		 */
-		header_filter_functor &getHeaderFilterFunctor();
-
-		/**
-		 * 增加数据到队列中
-         * Add data to the queue
-		 * @param vtRecvData
-		 * @param bPushBack 后端插入
-         * @param bPushBack Backend insert
-		 * @param sBuffer
-		 */
-		void insertRecvQueue(const shared_ptr<RecvContext> &recv);//, bool bPushBack = true);
-
-		/**
-		 * 等待数据
-         * Wait for data
-		 * @return bool
-		 */
-		bool waitForRecvQueue(uint32_t handleIndex, shared_ptr<RecvContext> &recv);
-
-		/**
-		 * 接收队列的大小
-         * Size of the received queue
-		 * @return size_t
-		 */
-		size_t getRecvBufferSize() const;
-
-		/**
-		 * 发送队列的大小
-         * Size of the sent queue
-		 * @return size_t
-		 */
-		size_t getSendBufferSize() const;
-
-		/**
-		 * add send buffer size
-		 */
-		inline void increaseSendBufferSize() { ++_iSendBufferSize; }
-
-		/**
-		 * increase send buffer size
-		 */
-		inline void decreaseSendBufferSize(size_t s = 1) { _iSendBufferSize.fetch_sub(s); }
-
-		/**
-		 * 默认的协议解析类, 直接echo
-         * Default protocol resolution class, direct echo
-		 * @param r
-		 * @param o
-		 * @return int
-		 */
-		static TC_NetWorkBuffer::PACKET_TYPE echo_protocol(TC_NetWorkBuffer &r, vector<char> &o);
-
-		/**
-		 * 默认的包头处理
-         * Default header handling
-		 * @param i
-		 * @param o
-		 * @return int
-		 */
-		static TC_NetWorkBuffer::PACKET_TYPE echo_header_filter(TC_NetWorkBuffer::PACKET_TYPE i, vector<char> &o);
-
-        /**
-         * 获取需要过滤的包头长度
-         * Get the header length that needs to be filtered
-         */
-        int getHeaderFilterLen();
-
-        /**
-         * 所属handle组的handle数(每个handle一个对象)
-         * Number of handles belonging to the handle group (one object per handle)
-         * @return int
-         */
-        int getHandleNum();
-
-		/**
-		 * 初始化处理线程,线程将会启动
-         * Initialize the processing thread, which will start
-		 */
-		template<typename T, typename ...Args>
-		void setHandle(size_t n, Args&&... args)
-		{
-			if(!_handles.empty())
-			{
-                getEpollServer()->error("[BindAdapter::setHandle] handle is not empty!");
-				return;
-			}
-
-			_iHandleNum = n;
-
-			_threadDataQueue.resize(_iHandleNum + 1);
-			_threadDataQueue[0] = std::make_shared<BindAdapter::DataQueue>();
-
-			if(_pEpollServer->isMergeHandleNetThread())
-			{
-				_iHandleNum = _pEpollServer->_netThreadNum;
-			}
-
-			for (int32_t i = 0; i < _iHandleNum ; ++i)
-			{
-				HandlePtr handle = new T(args...);
-
-				handle->setHandleIndex(i);
-
-				handle->setEpollServer(this->getEpollServer());
-
-				handle->setBindAdapter(this);
-
-				_handles.push_back(handle);
-			}
-		}
-
-		/**
-		 * 获取第几个句柄
-         * Get the index of the handle
-		 * @param index
-		 * @return
-		 */
-		HandlePtr getHandle(size_t index) {
-			assert(index <= _iHandleNum);
-			assert(getEpollServer()->isMergeHandleNetThread());
-			return _handles[index];
-		}
-
-		/*
-		 * 设置服务端积压缓存的大小限制(超过大小启用)
-         * Set the size limit of the server's backlog cache (exceeding the size enabled)
-		 */
-		void setBackPacketBuffLimit(size_t iLimitSize) { _iBackPacketBuffLimit = iLimitSize; }
-
-		/**
-		 * 获取服务端回包缓存的大小限制(超过大小启用)
-         * Get the size limit of the server-side packet back cache (exceeding the size enabled)
-		 */
-		size_t getBackPacketBuffLimit() const { return _iBackPacketBuffLimit; }
-
-		/*
-		 * 设置服务端5/s最低发送字节
-         * Set the Server 5/s Minimum Sending Bytes
-		 */
-		void setBackPacketBuffMin(size_t iMinLimit) { _iBackPacketBuffMin = iMinLimit; }
-
-		/**
-		 * 获取服务端5/s最低发送字节
-         * Get the Server 5/s Minimum Sending Bytes
-		 */
-		size_t getBackPacketBuffMin() const { return _iBackPacketBuffMin; }
-
-		/**
-		 * 获取服务端接收队列(如果_rnbuffer有多个, 则根据调用者的线程id来hash获取)
-         * Get the server receive queue (if there's more than one _rnbuffer, get from the hash based on the caller's thread id)
-         * 
-		 */
-		recv_queue &getRecvQueue(uint32_t handleIndex);
-
-		/**
-		 * 获取handles
-         * Get handles
-		 */
-		const vector<HandlePtr> &getHandles() { return _handles; }
-
-		/**
-		 * 是否是队列模式(默认是False的)
-         * Whether it is the queue mode (Defualt false)
-		 */
-		bool isQueueMode() const { return _queueMode; }
-
-		/**
-		 * 开启队列模式(同一个连接的请求, 落在同一个handle处理线程中)
-         * Open queue mode (The requests from the same connecion will fall in the same handle processing thread )
-		 */
-		void enableQueueMode() { _queueMode = true; }
-
-        /**
-         * 等待队列数据
-         * Wait for the queue data
-         */
-        void waitAtQueue(uint32_t handleIndex, uint32_t waitTime);
-
-        /**
-         * 通知某个具体handle醒过来
-         * Notify a specific handle to wake up
-         * @param handleIndex
-         */
-        void notifyHandle(uint32_t handleIndex);
-
-        /**  
-         * 设置close回调函数 
-         * Set close callback function
-         */
-        void setOnClose(const close_functor& f) { _closeFunc = f; } 
-
-        /**
-         * 注册鉴权包裹函数
-         * Regist Authentication Package Function
-         * @param apwf
-         */
-        void setAuthProcessWrapper(const auth_process_wrapper_functor& apwf) { _authWrapper = apwf; }
-
-        void setAkSk(const std::string& ak, const std::string& sk) { _accessKey = ak; _secretKey = sk; }
-
-        bool checkAkSk(const std::string& ak, const std::string& sk) { return ak == _accessKey && sk == _secretKey; }
-
-        std::string getSk(const std::string& ak) const { return (_accessKey == ak) ? _secretKey : ""; }
-
-        void setSSLCtx(const shared_ptr<TC_OpenSSL::CTX>& ctx) { _ctx = ctx; }
-        shared_ptr<TC_OpenSSL::CTX> getSSLCtx() { return _ctx; };
-
-	private:
-		/**
-		 * 获取等待的队列锁
-         * Get the waiting queue lock
-		 * @return
-		 */
-		TC_ThreadLock &getLock(uint32_t handleIndex);
-
-    public:
-
-        //统计上报的对象
-        //Count reporting objects
-        PropertyReport * _pReportQueue      = NULL;
-        PropertyReport * _pReportConRate    = NULL;
-        PropertyReport * _pReportTimeoutNum = NULL;
-
-    protected:
-        friend class TC_EpollServer;
-
-		/**
-		 * 加锁
-         * Add lock
-		 */
-		mutable std::mutex		_mutex;
-
-        /**
-         * 服务
-         * Service
-         */
-        TC_EpollServer  *_pEpollServer = NULL;
-
-		/**
-		 * Adapter所用的HandleGroup
-         * the HandleGroup used by Adapter
-		 */
-		vector<HandlePtr> _handles;
-
-        /**
-         * 协议解析
-         * Destruct the protocol
-         */
-        TC_NetWorkBuffer::protocol_functor _pf;
-
-        /**
-         * 首个数据包包头过滤
-         * First packet header filtering
-         */
-        header_filter_functor _hf;
-
-        /**
-         * adapter的名字
-         * adapter name
-         */
-        string          _name;
-
-        /**
-         * 监听fd
-         * listen fd
-         */
-        TC_Socket       _s;
-
-        /**
-         * 绑定的IP
-         * binded ip
-         */
-        TC_Endpoint     _ep;
-
-        /**
-         * 最大连接数
-         * the maximum number of connections
-         */
-        int             _iMaxConns;
-
-        /**
-         * 当前连接数
-         * the current number of connections
-         */
-        std::atomic<int> _iCurConns;
-
-        /**
-         * Handle个数
-         * the number of Handle
-         */
-        size_t          _iHandleNum;
-
-        /**
-         * 允许的Order
-         * the Allowed Order
-         */
-        volatile EOrder _eOrder;
-
-        /**
-         * 允许的ip
-         * the Allowed IP
-         */
-        vector<string>  _vtAllow;
-
-        /**
-         * 禁止的ip
-         * the Disabled IP
-         */
-        vector<string>  _vtDeny;
-
-		/**
-		 * 每个线程都有自己的队列
-		 * 0: 给共享队列模式时使用
-		 * 1~handle个数: 队列模式时使用
-         * Every thread has its own queue.
-         * 0: Use when sharing queue mode
-         * 1~handle count: Use when queue mode
-		 */
-		vector<shared_ptr<DataQueue>> _threadDataQueue;
-
-		/**
-		 * 接收队列数据总个数
-         * the total amount of the received queue data
-		 */
-		atomic<size_t>  _iRecvBufferSize{0};
-
-		/**
-		 * 发送队列数据总个数
-         * the total amount of the sent queue data
-		 */
-		atomic<size_t>  _iSendBufferSize{0};
-
-		/**
-		 * 队列最大容量
-         * the maximum capacity of the queue
-		 */
-		int             _iQueueCapacity;
-
-        /**
-         * 消息超时时间(从入队列到出队列间隔)(毫秒)
-         * Message timeout (from queue entry to queue exit interval) (milliseconds)
-         */
-        int             _iQueueTimeout;
-
-        /**
-         * 首个数据包包头长度
-         * First packet header length
-         */
-        int             _iHeaderLen;
-
-        /**
-         * 上次心跳发送时间
-         * Last heartbeat sent time
-         */
-        volatile time_t          _iHeartBeatTime;
-
-        /**
-         * 协议名称,缺省为"tars"
-         * Protocol name, default is "tars"
-         */
-        string                  _protocolName;
-
-		/**
-		 * 回包缓存限制大小
-         * Packet Back Cache Limit Size
-		 */
-		size_t					_iBackPacketBuffLimit = 0;
-
-		/**
-		 * 回包速度最低限制(5/s), 默认1K
-         * Minimum Packet Return Speed Limit (5/s), default 1K
-		 */
-		size_t					_iBackPacketBuffMin = 1024;
-
-		//队列模式
-        //Queue Mode
-		bool _queueMode 		= false;
-
-		//listen模式
-        //Listen Mode
-		bool _manualListen		= false;
-
-        /**
-         * 包裹认证函数,不能为空
-         * Package authentication function, cannot be empty
-         */
-        auth_process_wrapper_functor _authWrapper;
-
-        /**
-         * 该obj的AK SK
-         * the AK  SK of the object
-         */
-        std::string              _accessKey;
-        std::string              _secretKey;
-
-        /**
-         * ssl ctx
-         */
-	    shared_ptr<TC_OpenSSL::CTX> _ctx;
-
-        //连接关闭的回调函数 
-        //Callback function with connection closed
-        close_functor           _closeFunc;
-
-    };
-
-    ////////////////////////////////////////////////////////////////////////////
-    // 服务连接管理
-    // Service Connection Management
-    /**
-     *  建立连接的socket信息
-     *  Socket information for establishing connections
-     */
-    class Connection
-    {
-    public:
-        enum EnumConnectionType
-        {
-            EM_TCP = 0,
-            EM_UDP = 1,
-        };
-
-        /**
-         * 构造函数
-         * Constructor
-         * @param lfd
-         * @param s
-         * @param ip
-         * @param port
-         */
-        Connection(BindAdapter *pBindAdapter, int lfd, int timeout, int fd, const string& ip, uint16_t port);
-
-        /**
-         * udp连接
-         * UDP connection
-         * @param fd
-         */
-        Connection(BindAdapter *pBindAdapter, int fd);
-
-        /**
-         * 析构函数
-         * Destructor
-         */
-        virtual ~Connection();
-
-        /**
-         * 链接所属的adapter
-         * the adapter of the connection
-         */
-		BindAdapterPtr& getBindAdapter()       { return _pBindAdapter; }
-
-        /**
-         * 初始化
-         * Initialization
-         * @param id, 连接的唯一id
-         * @param id, the connection unique id
-         */
-        void init(unsigned int uid)         { _uid = uid; }
-
-        /**
-         * 获取连接超时时间
-         * Get connection timeout
-         *
-         * @return int
-         */
-        int getTimeout() const              { return _timeout; }
-
-        /**
-         * 获取线程的惟一id
-         * Get thread unique id
-         *
-         * @return unsigned int
-         */
-        uint32_t getId() const              { return _uid; }
-
-        /**
-         * 获取监听fd
-         * Get listening id
-         *
-         * @return int
-         */
-        int getListenfd() const             { return _lfd; }
-
-        /**
-         * 当前连接fd
-         * Current connection fd
-         *
-         * @return int
-         */
-        int getfd() const                   { return _sock.getfd(); }
-
-        /**
-         * 是否有效
-         * Whether it is valid
-         *
-         * @return bool
-         */
-        bool isValid() const                { return _sock.isValid();}
-
-        /**
-         * 远程IP
-         * Remote IP
-         *
-         * @return string
-         */
-        string getIp() const                { return _ip; }
-
-        /**
-         * 远程端口
-         * Remote Port
-         *
-         * @return uint16_t
-         */
-        uint16_t getPort() const            { return _port; }
-
-        /**
-         * 设置首个数据包包头需要过滤的字节数
-         * Set the number of bytes the first packet header needs to filter
-         */
-        void setHeaderFilterLen(int iHeaderLen)     { _iHeaderLen = iHeaderLen; }
-
-        /**
-         * 设置关闭,发送完当前数据就关闭连接
-         * Set shutdown to close connection after sending current data
-         */
-        bool setClose();
-
-        /**
-         * 获取连接类型
-         * Get the type of the connection
-         */
-        EnumConnectionType getType() const          { return _enType; }
-
-	    /**
-	 	* 是否是空连接
-        * Whether there's empty connection.
-	 	*/
-        bool isEmptyConn() const  {return _bEmptyConn;}
-
-        /**
-         * Init Auth State;
-         */
-        void tryInitAuthState(int initState);
-
-		/**
-		 * 接收数据buffer
-         * Receive data buffer
-		 */
-		TC_NetWorkBuffer &getRecvBuffer() { return _recvBuffer; }
-
-		/**
-		 * 发送数据buffer
-         * Send data buffer
-		 */
-		TC_NetWorkBuffer &getSendBuffer() { return _sendBuffer; }
-
-		/**
-		 * 发送buffer里面数据
-         * Send the data in the bufer
-		 * @return
-		 */
-		int sendBuffer();
-    
-        /**
-         * 直接发送裸得应答数据,业务层一般不直接使用,仅仅tcp支持
-         * send naked response data
-         * @param buffer
-         * @return int, -1:发送出错, 0:无数据, 1:发送完毕, 2:还有数据
-         * @return int, -1: sending error, 0: no data, 1: send completely, 2: data retains
-         * @return
-         */
-        int sendBufferDirect(const std::string& buff);
-
-        /**
-         * 直接发送裸得应答数据,业务层一般不直接使用,仅仅tcp支持
-         * send naked response data
-         * @param buffer
-         * @return int, -1:发送出错, 0:无数据, 1:发送完毕, 2:还有数据
-         * @return int, -1: sending error, 0: no data, 1: send completely, 2: data retains
-         * @return
-         */
-        int sendBufferDirect(const std::vector<char>& buff);
-
-	    /**
-		 * 关闭连接
-		 * Close the connection
-		 * @param fd
-		 */
-	    void close();
-
-	    friend class NetThread;
-
-    protected:
-
-		/**
-		 * 添加发送buffer
-         * Add sanding buffer
-		 * @param buffer
-		 * @return int, -1:发送出错, 0:无数据, 1:发送完毕, 2:还有数据
-         * @return int, -1: sending error, 0: no data, 1: send completely, 2: data retains
-		 */
-		int send(const shared_ptr<SendContext> &data);
-
-		/**
-		 * 读取数据
-         * Read data
-		 * @param fd
-		 * @return int, -1:接收出错, 0:接收不全, 1:接收到一个完整包
-         * @return int, -1: received error, 0: not receive completely, 1: receive a complete package
-		 */
-		int recv();
-
-		/**
-		* 接收TCP
-        * Receive TCP
-		*/
-		int recvTcp();
-
-		/**
-		* 接收Udp
-        * Receive UDP
-		*/
-		int recvUdp();
-
-        /**
-         * 解析协议
-         * Destruct protocol
-         * @param o
-         */
-        int parseProtocol(TC_NetWorkBuffer &rbuf);
-
-        /**
-         * 增加数据到队列中
-         * Add data to the queue
-         * @param vtRecvData
-         */
-		void insertRecvQueue(const shared_ptr<RecvContext> &recv);
-
-        /**
-         * 对于udp方式的连接,分配指定大小的接收缓冲区
-         * For udp-mode connections, allocate receive buffers of a specified size
-         *@param nSize
-            */
-        bool setRecvBuffer(size_t nSize=DEFAULT_RECV_BUFFERSIZE);
-
-		/**
-		 * 是否是tcp连接
-         * Whether it is TCP connection.
-		 * @return
-		 */
-		bool isTcp() const { return _lfd != -1; }
-
-    public:
-        /**
-         * 最后刷新时间
-         * Last refresh time
-         */
-        time_t              _iLastRefreshTime;
-
-    protected:
-
-        /**
-         * 适配器
-         * Adapter
-         */
-		BindAdapterPtr      _pBindAdapter;
-
-        /**
-         * TC_Socket
-         */
-        TC_Socket           _sock;
-
-        /**
-         * 连接的唯一编号
-         * the unique id of the connection
-         */
-        volatile uint32_t   _uid;
-
-        /**
-         * 监听的socket
-         * Listening socket
-         */
-        int                 _lfd;
-
-        /**
-         * 超时时间
-         * Timeout
-         */
-        int                 _timeout;
-
-        /**
-         * ip
-         */
-        string              _ip;
-
-        /**
-         * 端口
-         * Port
-         */
-        uint16_t             _port;
-
-        /**
-         * 接收数据buffer
-         * the buffer to receive data
-         */
-        TC_NetWorkBuffer     _recvBuffer;
-
-        /**
-         * 发送数据buffer
-         * the buffer to send data
-         */
-        TC_NetWorkBuffer    _sendBuffer;
-
-		/**
-		 * 发送数据
-         * Send data
-		 */
-		size_t              _sendBufferSize = 0;
-
-		/**
-		 * 检查时间
-         * Check time
-		 */
-		time_t              _lastCheckTime = 0;
-
-		/**
-		 * 发送的检查<已经发送数据, 剩余buffer大小>
-         * Check Sent <Data Sent, Remaining Buffer Size>
-		 */
-		vector<pair<size_t, size_t>> _checkSend;
-
-		/**
-		 * 需要过滤的头部字节数
-         * Number of header bytes to filter
-		 */
-		int                 _iHeaderLen;
-
-        /**
-         * 发送完当前数据就关闭连接
-         * Close connection after sending current data
-         */
-        bool                _bClose;
-
-        /**
-         * 连接类型
-         * Connection Type
-         */
-        EnumConnectionType  _enType;
-
-        bool                _bEmptyConn;
-
-        /*
-        *接收数据的临时buffer,加这个目的是对udp接收数据包大小进行设置
-        *Temporary buffer to receive data, plus this is to set the UDP receive packet size
-        */
-        char                *_pRecvBuffer = NULL;
-
-        size_t              _nRecvBufferSize;
-
-    public:
-        /*
-        *该连接的鉴权状态
-        *Authentication status of the connection
-        */
-        int                 _authState;
-        /*
-        *该连接的鉴权状态是否初始化了
-        */
-        bool                _authInit;
-
-        std::shared_ptr<TC_OpenSSL> _openssl;
-    };
-    ////////////////////////////////////////////////////////////////////////////
-    /**
-     * 带有时间链表的map
-     * Map with Time Chain Table
-     */
-    class ConnectionList
-    {
-    public:
-        /**
-         * 构造函数
-         * Constructor
-         */
-        ConnectionList(NetThread *pEpollServer);
-
-        /**
-         * 析够函数
-         * Destructor
-         */
-        ~ConnectionList()
-        {
-	        if(_vConn)
-	        {
-		        //服务停止时, 主动关闭一下连接, 这样客户端会检测到, 不需要等下一个发送包时, 发送失败才知道连接被关闭
-		        for (auto it = _tl.begin(); it != _tl.end(); ++it) {
-			        if (_vConn[it->second].first != NULL) {
-				        _vConn[it->second].first->close();
-			        }
-		        }
-		        delete[] _vConn;
-	        }
-        }
-
-        /**
-         * 初始化大小
-         * Initial size
-         * @param size
-         */
-        void init(uint32_t size, uint32_t iIndex = 0);
-
-        /**
-         * 获取惟一ID
-         * Get the unique ID
-         *
-         * @return unsigned int
-         */
-        uint32_t getUniqId();
-
-        /**
-         * 添加连接
-         * Add Connection
-         * @param cPtr
-         * @param iTimeOutStamp
-         */
-        void add(Connection *cPtr, time_t iTimeOutStamp);
-
-        /**
-         * 刷新时间链
-         * Refresh the connectiom
-         * @param uid
-         * @param iTimeOutStamp, 超时时间点
-         * @param iTimeOutStamp, Timeout Point
-         */
-        void refresh(uint32_t uid, time_t iTimeOutStamp);
-
-        /**
-         * 检查超时数据
-         * Check Timeout
-         */
-        void checkTimeout(time_t iCurTime);
-
-        /**
-         * 获取某个监听端口的连接
-         * Get a connection to a listening port
-         * @param lfd
-         * @return vector<TC_EpollServer::ConnStatus>
-         */
-        vector<ConnStatus> getConnStatus(int lfd);
-
-        /**
-         * 获取某一个连接
-         * Get a certain connection
-         * @param p
-         * @return T
-         */
-        Connection* get(uint32_t uid);
-
-        /**
-         * 删除连接
-         * Delete connection
-         * @param uid
-         */
-        void del(uint32_t uid);
-
-        /**
-         * 大小
-         * Size
-         * @return size_t
-         */
-        size_t size();
-
-    protected:
-        typedef pair<Connection*, multimap<time_t, uint32_t>::iterator> list_data;
-
-        /**
-         * 内部删除, 不加锁
-         * Internal Delete, No Lock
-         * @param uid
-         */
-        void _del(uint32_t uid);
-
-    protected:
-		/**
-		 * 无锁
-         * No Lock
-		 */
-		TC_ThreadMutex                 _mutex;
-
-		/**
-		 * 服务
-         * Service
-		 */
-		NetThread                      *_pEpollServer;
-
-        /**
-         * 总计连接数
-         * Total connection amount
-         */
-        volatile uint32_t               _total;
-
-        /**
-         * 空闲链表
-         * Empty link list
-         */
-        list<uint32_t>                  _free;
-
-        /**
-         * 空闲链元素个数
-         * number of the elements in the empty link
-         */
-        volatile size_t                 _free_size;
-
-        /**
-         * 链接
-         * Connection
-         */
-        list_data                       *_vConn;
-
-        /**
-         * 超时链表
-         * Timeout link list
-         */
-        multimap<time_t, uint32_t>      _tl;
-
-        /**
-         * 上次检查超时时间
-         * Last timeout time
-         */
-        time_t                          _lastTimeoutTime;
-
-        /**
-         * 链接ID的魔数
-         * Magic Number of Link IDs
-         */
-        uint32_t                        _iConnectionMagic;
-    };
-
-    ////////////////////////////////////////////////////////////////////////////
-    class NetThread : public TC_Thread, public TC_HandleBase
-    {
-    public:
-        
-        ////////////////////////////////////////////////////////////////////////////
-    public:
-        /**
-         * 构造函数
-         * Constructor
-         */
-        NetThread(TC_EpollServer *epollServer, int index);
-
-        /**
-         * 析构函数
-         * Destructor
-         */
-        virtual ~NetThread();
-
-		/**
-		 * 获取网络线程的index
-         * Get the index for the network threads
-		* @return
-		*/
-		int getIndex() const { return _threadIndex; }
-
-        /**
-         * 网络线程执行函数
-         * Network thread execution function
-         */
-        virtual void run();
-
-        /**
-         * 停止网络线程
-         * Stop the network thread
-         */
-        void terminate();
-
-        /**
-         * 生成epoll
-         * Generate epoll
-         */
-        void createEpoll(uint32_t maxAllConn);
-
-        /**
-         * 初始化udp监听
-         * Initialize UDP listening
-         */
-        void initUdp(const unordered_map<int, BindAdapterPtr> &listeners);
-
-        /**
-         * 是否服务结束了
-         * Whether the service is end.
-         *
-         * @return bool
-         */
-        bool isTerminate() const    { return _bTerminate; }
-
-        /**
-         * 获取Epoller对象
-         * Get the Epoller Object
-         * 
-         * @return TC_Epoller*
-         */
-        TC_Epoller* getEpoller()    { return &_epoller; }
-
-		/**
-		 * 唤醒网络线程
-         * Wake up the network thread
-		 */
-		void notify();
-
-		/**
-		 * 关闭连接
-         * Close Connection
-		 * @param uid
-		 */
-		void close(const shared_ptr<RecvContext> &data);
-
-		/**
-		* 发送数据
-        * Send data
-		* @param uid
-		* @param s
-		*/
-		void send(const shared_ptr<SendContext> &data);
-
-		/**
-		 * 获取某一监听端口的连接数
-         * Get the number of connections for a listening port
-		 * @param lfd
-		 *
-		 * @return vector<TC_EpollServer::ConnStatus>
-		 */
-		vector<TC_EpollServer::ConnStatus> getConnStatus(int lfd);
-
-        /**
-         * 获取连接数
-         * Get the number of connections
-         *
-         * @return size_t
-         */
-        size_t getConnectionCount()     { return _list.size(); }
-
-        /**
-         * 记录日志
-         * Logging
-         * @param s
-         */
-        void debug(const string &s) const;
-
-        /**
-         * INFO日志
-         * INFO LOG
-         * @param s
-         */
-        void info(const string &s) const;
-
-	    /**
-		 * TARS日志
-         * TARS LOG
-		 * @param s
-		 */
-	    void tars(const string &s) const;
-
-        /**
-         * 记录错误日志
-         * Log errors
-         * @param s
-         */
-        void error(const string &s) const;
-
-        /**
-         * 是否启用防止空链接攻击的机制
-         * Whether the mechanism to prevent null link attacks is enabled.
-         * @param bEnable
-         */
-        void enAntiEmptyConnAttack(bool bEnable);
-
-        /**
-         *设置空连接超时时间
-         *Set empty connection timeout
-         */
-        void setEmptyConnTimeout(int timeout);
-
-        /**
-         *设置udp的接收缓存区大小,单位是B,最小值为8192,最大值为DEFAULT_RECV_BUFFERSIZE
-         *Sets the size of the receiving buffer in UDP in B, with a minimum of 8192 and a maximum of DEFAULT_RECV_BUFFERSIZE
-         */
-        void setUdpRecvBufferSize(size_t nSize=DEFAULT_RECV_BUFFERSIZE);
-
-
-    protected:
-
-        /**
-         * 获取连接
-         * Get connection
-         * @param id
-         *
-         * @return ConnectionPtr
-         */
-        Connection *getConnectionPtr(uint32_t uid)      { return _list.get(uid); }
-
-        /**
-         * 添加tcp链接
-         * Add TCP connection
-         * @param cPtr
-         * @param iIndex
-         */
-        void addTcpConnection(Connection *cPtr);
-
-        /**
-         * 添加udp连接
-         * Add UDP connection
-         * @param cPtr
-         * @param index
-         */
-        void addUdpConnection(Connection *cPtr);
-
-        /**
-         * 删除链接
-         * Delete connection
-         * @param cPtr
-         * @param bEraseList 是否是超时连接的删除
-         * @param bEraseList Whether it is deletion of timeout connection
-         * @param closeType  关闭类型,0:表示客户端主动关闭;1:服务端主动关闭;2:连接超时服务端主动关闭
-         * @param closeType  Close type, 0: indicates active closure of client, 1: active closure of server, 2: active closure of connection timeout server
-         */
-        void delConnection(Connection *cPtr, bool bEraseList = true, EM_CLOSE_T closeType=EM_CLIENT_CLOSE);
-
-        /**
-         * 处理管道消息
-         * Processing Pipeline Messages
-         */
-        void processPipe();
-
-        /**
-         * 处理网络请求
-         * Processing Network Request
-         */
-        void processNet(const epoll_event &ev);
-
-        /**
-         * 空连接超时时间
-         * Empty connection timeout
-         */
-        int getEmptyConnTimeout() const;
-
-        /**
-         *是否空连接检测
-         *Empty connection detection examination
-         */
-        bool isEmptyConnCheck() const;
-
-        friend class BindAdapter;
-        friend class ConnectionList;
-        friend class TC_EpollServer;
-
-    private:
-        /**
-         * 服务
-         * Service
-         */
-        TC_EpollServer              *_epollServer;
-
-		/**
-		 * net线程的id
-         * the net thread id
-		 */
-		std::thread::id             _threadId;
-
-		/**
-		 * 线程索引
-         * the thread index
-		 */
-		int                         _threadIndex;
-
-        /**
-         * epoll
-         */
-        TC_Epoller                  _epoller;
-
-        /**
-         * 停止
-         * Stop
-         */
-        bool                        _bTerminate;
-
-		/**
-		 * 通知epoll
-         * Notify epoll
-		 */ 
-		TC_Epoller::NotifyInfo 		_notify;
-
-        /**
-         * 管理的连接链表
-         * Managed Link List
-         */
-        ConnectionList              _list;
-
-        /**
-         * 发送队列
-         * Sending Queue
-         */
-        send_queue                  _sbuffer;
-
-        /**
-         *空连接检测机制开关
-         *Switch for empty connection detection mechanism
-         */
-        bool                         _bEmptyConnAttackCheck;
-
-
-        /**
-         * 空连接超时时间,单位是毫秒,默认值2s,
-         * 该时间必须小于等于adapter自身的超时时间
-         * Empty connection timeout in milliseconds, default 2s,
-         * The time must be less than or equal to the adapter's own timeout
-         */
-        int                            _iEmptyCheckTimeout;
-
-        /**
-         * udp连接时接收包缓存大小,针对所有udp接收缓存有效
-         * Received packet cache size on UDP connection, valid for all UDP receive caches
-         */
-        size_t                         _nUdpRecvBufferSize;
-
-		/**
-		 * 通知信号
-         * Notification signal
-		 */
-		bool                        _notifySignal = false;
-    };
-    ////////////////////////////////////////////////////////////////////////////
-public:
-    /**
-     * 构造函数
-     * Constructor
-     */
-    TC_EpollServer(unsigned int iNetThreadNum = 1);
-
-    /**
-     * 析构函数
-     * Destructor
-     */
-    ~TC_EpollServer();
-
-    /**
-     * 是否启用防止空链接攻击的机制
-     * Whether mechanisms to prevent empty link attacks are enabled
-     * @param bEnable
-     */
-    void enAntiEmptyConnAttack(bool bEnable);
-
-    /**
-     *设置空连接超时时间
-     *Set empty connection timeout
-     */
-    void setEmptyConnTimeout(int timeout);
-
-    /**
-     * 设置本地日志
-     * Set local log
-     * @param plocalLogger
-     */
-    void setLocalLogger(RollWrapperInterface *pLocalLogger)       { _pLocalLogger = pLocalLogger; }
-
-	/**
-	 * 选择网络线程
-     * Select network threads
-	 * @param fd
-	 */
-	inline NetThread* getNetThreadOfFd(int fd) { return _netThreads[fd % _netThreads.size()]; }
-
-	/**
-	 * 合并handle线程和网络线程
-     * Merge handle and network threads
-	 * @param merge
-	 */
-	void setMergeHandleNetThread(bool merge) { _mergeHandleNetThread = merge; }
-
-	/**
-	 * 是否合并handle线程网络线程
-     * Whether to merge handle thread network threads
-	 * @return
-	 */
-	inline bool isMergeHandleNetThread() const { return _mergeHandleNetThread; }
-
-    /**
-     * 绑定监听socket
-     * Bind listening socket
-     * @param ls
-     */
-    int  bind(BindAdapterPtr &lsPtr);
-
-    /**
-     * 启动业务处理线程
-     * Start Business Processing Thread
-     */
-    void startHandle();
-
-    /**
-     * 生成epoll
-     * Generate epoll
-     */
-    void createEpoll();
-
-    /**
-     * 运行
-     * Run
-     */
-    void waitForShutdown();
-
-    /**
-     * 停止服务
-     * Stop Service
-     */
-    void terminate();
-
-    /**
-     * 是否服务结束了
-     * Whether the service is over
-     *
-     * @return bool
-     */
-    bool isTerminate() const    { return _bTerminate; }
-
-    /**
-     * 根据名称获取BindAdapter
-     * Get BindAdapter according to the name
-     * @param sName
-     * @return BindAdapterPtr
-     */
-    BindAdapterPtr getBindAdapter(const string &sName);
-
-	/**
-	 * 获取所有adatapters
-     * Get all adapters
-	 * @return
-	 */
-	vector<BindAdapterPtr> getBindAdapters();
-
-    /**
-     * 向网络线程添加连接
-     * Add remote connection to the network thread
-     */
-    void addConnection(Connection * cPtr, int fd, CONN_TYPE iType);
-
-	/**
-	 * 关闭连接
-     * Close connection
-	 * @param uid
-	 */
-	void close(const shared_ptr<TC_EpollServer::RecvContext> &data);
-
-	/**
-	 * 发送数据
-     * Send data
-	 * @param uid
-	 * @param s
-	 */
-	void send(const shared_ptr<SendContext> &data);
-
-    /**
-     * 获取某一监听端口的连接数
-     * Get the connection amount of a certain listening port
-     * @param lfd
-     *
-     * @return vector<TC_EpollServer::ConnStatus>
-     */
-    vector<ConnStatus> getConnStatus(int lfd);
-
-	/**
-	 * 获取监听socket信息
-     * Get the information of the listening socket
-	 *
-	 * @return map<int,ListenSocket>
-	 */
-	unordered_map<int, BindAdapterPtr> getListenSocketInfo();
-
-    /**
-     * 获取所有连接的数目
-     * Get the amount of all connections
-     *
-     * @return size_t
-     */
-    size_t getConnectionCount();
-
-    /**
-     * 记录日志
-     * Logging
-     * @param s
-     */
-    void debug(const string &s) const;
-
-    /**
-     * INFO日志
-     * INFO LOG
-     * @param s
-     */
-    void info(const string &s) const;
-
-     /**
-     * 记录错误日志
-     * Log errors
-     * @param s
-     */
-    void error(const string &s) const;
-
-	/**
-	 * tars日志
-     * tars log
-	 * @param s
-	 */
-	void tars(const string &s) const;
-
-    /**
-     * 获取网络线程的数目
-     * Get the amount of the network threads
-     */
-    unsigned int getNetThreadNum() { return _netThreadNum; }
-
-    /**
-     * 获取网络线程的指针集合
-     * Get the collection of pointers for a network thread 
-     */
-    vector<TC_EpollServer::NetThread*> getNetThread() { return _netThreads; }
-
-    /**
-     * 停止线程
-     * Stop the thread
-     */
-    void stopThread();
-
-    /**
-     * 获取所有业务线程的数目
-     * Get the amount of all the bussiness threads
-     */
-    size_t getLogicThreadNum();
-
-    // 接收新的客户端链接时的回调
-    typedef std::function<void (TC_EpollServer::Connection*)> accept_callback_functor;
-
-    /*
-     * 设置接收链接的回调
-     */
-    void setOnAccept(const accept_callback_functor& f) { _acceptFunc = f; }
-
-	//回调给应用服务
-    //Callback to application service
-	typedef std::function<void(TC_EpollServer*)> application_callback_functor;
-
-	/**
-	 * 设置waitForShutdown线程回调的心跳
-     * Set the heartbeat of the thread callback of the waitForShutdown
-	 * @param hf [description]
-	 */
-	void setCallbackFunctor(const application_callback_functor &hf) { _hf = hf; }
-
-    //网络线程发送心跳的函数
-    //Function for network threads to send heartbeats
-    typedef std::function<void(const string &)> heartbeat_callback_functor;
-
-    /**
-     * 设置netthread网络线程发送心跳的函数
-     * Function for setting netthreaded network threads to send heartbeats
-     * @param hf [description]
-     */
-    void setHeartBeatFunctor(const heartbeat_callback_functor& heartFunc) { _heartFunc = heartFunc; }
-    heartbeat_callback_functor& getHeartBeatFunctor() { return _heartFunc; }
-
-protected:
-
-    friend class BindAdapter;
-
-	/**
-	 * 接收句柄
-     * Receiving handle
-	 * @param fd
-	 * @return
-	 */
-	bool accept(int fd, int domain = AF_INET);
-
-	/**
-	 * 绑定端口
-     * Bind Port
-	 * @param ep
-	 * @param s
-	 * @param manualListen
-	 */
-	void bind(const TC_Endpoint &ep, TC_Socket &s, bool manualListen);
-
-	static void applicationCallback(TC_EpollServer *epollServer);
-private:
-    /**
-     * 网络线程
-     * Network Thread
-     */
-    std::vector<NetThread*>     _netThreads;
-
-    /*
-     * 网络线程数目
-     * Network Thread Amount
-     */
-    unsigned int                _netThreadNum;
-
-	/**
- 	* epoll
- 	*/
-	TC_Epoller                  _epoller;
-
-	/**
-	 * 通知epoll
-     * Notify epoll
-	 */
-	TC_Epoller::NotifyInfo 		_notify;
-
-    /*
-     * 服务是否停止
-     * Whether the service is stopped
-     */
-    bool                        _bTerminate;
-
-    /*
-     * 业务线程是否启动
-     * Whether the bussiness thread is started.
-     */
-    bool                        _handleStarted;
-
-	/**
-	 * 合并网络和业务线程
-     * Merge network and business threads
-	 */
-	bool                        _mergeHandleNetThread = false;
-
-    /**
-     * 本地循环日志
-     * Local Loop Log
-     */
-    RollWrapperInterface        *_pLocalLogger;
-
-	/**
-	 *
-	 */
-	vector<BindAdapterPtr>       _bindAdapters;
-
-	/**
- 	* 监听socket
-    * Listening socket
- 	*/
-	unordered_map<int, BindAdapterPtr>    _listeners;
-
-	/**
-	 * 应用回调
-     * Application callback
-	 */
-	application_callback_functor _hf;
-
-    /**
-     * 发送心跳的函数
-     * Heartbeat Sending Function
-     */
-    heartbeat_callback_functor _heartFunc;
-
-    /**
-     * 接收链接的回调函数
-     */
-    accept_callback_functor _acceptFunc;
-};
-
-typedef TC_AutoPtr<TC_EpollServer> TC_EpollServerPtr;
-
-}
-
-#endif
+/**
+ * Tencent is pleased to support the open source community by making Tars available.
+ *
+ * Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
+ *
+ * Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 
+ * in compliance with the License. You may obtain a copy of the License at
+ *
+ * https://opensource.org/licenses/BSD-3-Clause
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed 
+ * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+ * CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+ * specific language governing permissions and limitations under the License.
+ */
+
+#pragma once
+
+#include <string>
+#include <memory>
+#include <map>
+#include <unordered_map>
+#include <vector>
+#include <list>
+#include <algorithm>
+#include <functional>
+#include "util/tc_epoller.h"
+#include "util/tc_thread.h"
+#include "util/tc_clientsocket.h"
+#include "util/tc_logger.h"
+#include "util/tc_thread_pool.h"
+#include "util/tc_common.h"
+#include "util/tc_network_buffer.h"
+#include "util/tc_transceiver.h"
+#include "util/tc_cas_queue.h"
+#include "util/tc_coroutine.h"
+#include "util/tc_openssl.h"
+
+using namespace std;
+
+/**
+ * 服务模型说明:
+ * - 服务模型全面支持协程化, 一共有四种模式, 可以通过setOpenCoroutine来设置, 这四种模式是:
+ * 1 NET_THREAD_QUEUE_HANDLES_THREAD: 独立网路线程 + 独立handle线程: 网络线程负责收发包, 通过队列唤醒handle线程中处理
+ * 2 NET_THREAD_QUEUE_HANDLES_CO: 独立网路线程组 + 独立handle线程: 网络线程负责收发包, 通过队列唤醒handle线程中处理, handle线程中启动协程处理
+ * 3 NET_THREAD_MERGE_HANDLES_THREAD: 合并网路线程 + handle线程(线程个数以处理线程配置为准, 网络线程配置无效): 连接分配到不同线程中处理(如果是UDP, 则网络线程竞争接收包), 这种模式下延时最小, 相当于每个包的收发以及业务处理都在一个线程中
+ * 4 NET_THREAD_MERGE_HANDLES_CO: 合并网路线程 + handle线程(线程个数以处理线程配置为准, 网络线程配置无效): 连接分配到不同线程中处理(如果是UDP, 则网络线程竞争接收包), 每个包会启动协程来处理
+ *
+ * 设计说明:
+ * - 设计上支持协程化, 这样所有服务端处理请求的线程, 其实本质上都在协程中, 协程的调度背后使用的epoller, 参考tc_coroutine的说明
+ * - 结合到客户端rpc调用, 服务端处理过程中如果发起对另外一个服务的rpc, 这时rpc作为客户端能复用服务处理线程中的epoller, 从而使得客户端和服务端在同一个epoller中使用协程调度
+ * - 这样设计的优势是无论是rpc同步调用还是异步调用, 都在一个线程中执行, 减少了线程的切换, 同时大幅度减少了请求的延时!
+ *
+ * 队列说明:
+ * - 网络线程和业务逻辑线程之间, 存在队列DataQueue
+ * - 这些DataQueue都封装在DataBuffer类中
+ * - DataBuffer对象被BindAdapter以及处理类(Handle)所持有, 即每个BindAdapter会初始化一个DataBuffer并设置给相关的Handle
+ * - 不同服务模型下以及设置了队列模式下, 队列的使用是不同的
+ * 缺省, 非队列模式(enableQueueMode: false)
+ * 1 NET_THREAD_QUEUE_HANDLES_THREAD
+ * - DataBuffer中实际使用的DataQueue就一个, 多个Handle线程竞争该DataQueue, 以获取数据
+ * - 多个handle线程都会等待在队列上
+ * - 网络线程接收到数据后, 会唤醒等待在队列上handle线程
+ * 2 NET_THREAD_QUEUE_HANDLES_CO
+ * - DataBuffer中实际使用的DataQueue就一个, 多个Handle协程竞争该DataQueue, 以获取数据
+ * - 注意: handle线程(协程)都并不会等待在队列上, 而是通过协程调度, 如果没有数据, 协程调度阻塞
+ * - 网络线程接收数据后, 会通过协程调度器唤醒协程
+ * 3 NET_THREAD_MERGE_HANDLES_THREAD
+ * - 网络和handle都在同一个线程中, 但是都以协程模式在运行, 注意: handle协程并不是一个请求一个协程
+ * - 此时DataBuffer中实际使用的DataQueue和Handle是一对一的
+ * - Handle本质运行在协程中, 但是不会每个请求都创建一个协程来执行, 而是连续执行一定的请求后, 会yield释放协程, 让网络线程继续
+ * - Handle协程不会阻塞在队列上, 而是通过协程调度器来唤醒
+ * - 网络线程接收数据后, 会通过协程调度器唤醒协程
+ * 4 NET_THREAD_MERGE_HANDLES_CO
+ * - 网络和handle都在同一个线程中, 但是都以协程模式在运行, 注意: handle协程一个请求创建一个协程
+ * - 此时DataBuffer中实际使用的DataQueue和Handle是一对一的
+ * - Handle协程不会阻塞在队列上, 而是通过协程调度器来唤醒
+ * - 网络线程接收数据后, 会通过协程调度器唤醒协程
+ *
+ * 在队列模式下, 一个连接上来的请求都在同一个handle线程中处理!
+ */
+namespace tars
+{
+class PropertyReport;
+
+namespace detail
+{
+    /**
+    * log接口
+    */
+    class LogInterface
+    {
+    public:
+        virtual ~LogInterface() {}
+
+        /**
+         * 记录日志
+         * @param s
+         */
+        virtual void debug(const string &s) const = 0;
+
+        /**
+        * INFO日志
+        * @param s
+        */
+        virtual void info(const string &s) const = 0;
+
+        /**
+         * taf日志
+         * @param s
+         */
+        virtual void tars(const string &s) const = 0;
+
+        /**
+         * 记录错误日志
+         * @param s
+         */
+        virtual void error(const string &s) const = 0;
+
+    };
+}
+
+struct TC_EpollServer_Exception : public TC_Exception
+{
+   TC_EpollServer_Exception(const string &buffer) : TC_Exception(buffer) {};
+   ~TC_EpollServer_Exception() {};
+};
+
+class TC_EpollServer : public TC_HandleBase, public detail::LogInterface
+{
+public:
+
+    enum EM_CLOSE_T
+    {
+        EM_CLIENT_CLOSE = 0,         //客户端主动关闭
+        EM_SERVER_CLOSE = 1,        //服务端业务主动调用close关闭连接,或者框架因某种异常主动关闭连接
+        EM_SERVER_TIMEOUT_CLOSE = 2  //连接超时了,服务端主动关闭
+    };
+
+    enum
+    {
+        MIN_EMPTY_CONN_TIMEOUT  = 5 * 1000,      /*空链接超时时间(ms)*/
+        DEFAULT_RECV_BUFFERSIZE = 64 * 1024     /*缺省数据接收buffer的大小*/
+    };
+
+    //定义加入到网络线程的fd类别
+    enum CONN_TYPE
+    {
+        TCP_CONNECTION = 0,
+        UDP_CONNECTION = 1,
+    };
+
+    class RecvContext;
+    class SendContext;
+    class NetThread;
+    class ConnectionList;
+    class Connection;
+    class BindAdapter;
+    class Handle;
+
+    typedef shared_ptr<BindAdapter> BindAdapterPtr;
+    typedef shared_ptr<Handle> HandlePtr;
+
+    // typedef TC_AutoPtr<BindAdapter> BindAdapterPtr;
+    // typedef TC_AutoPtr<Handle> HandlePtr;
+
+//    using close_functor = std::function<void(void *, EM_CLOSE_T)>;
+    using header_filter_functor = std::function<TC_NetWorkBuffer::PACKET_TYPE(TC_NetWorkBuffer::PACKET_TYPE, vector<char> &)> ;
+
+    ////////////////////////////////////////////////////////////////////////////
+    /**
+    * 接收包的上下文
+    */
+    class RecvContext : public std::enable_shared_from_this<RecvContext>
+    {
+    public:
+        RecvContext(int threadIndex, uint32_t uid,
+                    const TC_Socket::addr_type &addr,
+                    int fd,
+                    const BindAdapterPtr & adapter,
+                    bool isClosed = false,
+                    int closeType = EM_CLIENT_CLOSE)
+                : _threadIndex(threadIndex)
+                , _uid(uid)
+                , _addr(addr)
+                , _fd(fd)
+                , _adapter(adapter)
+                , _isClosed(isClosed)
+                , _closeType(closeType)
+                , _recvTimeStamp(TNOWMS)
+        {}
+        inline int threadIndex() const     { return _threadIndex; }
+        inline uint32_t uid() const        { return _uid; }
+        inline const TC_Socket::addr_type& addr() const      { return _addr; }
+        inline const string & ip() const   { parseIpPort(); return _ip; }
+        inline uint16_t port() const       { parseIpPort(); return _port; }
+        inline vector<char> & buffer()     { return _rbuffer; }
+        inline const vector<char> & buffer() const { return _rbuffer; }
+        inline int64_t recvTimeStamp() const { return _recvTimeStamp; }
+        inline bool isOverload() const     { return _isOverload; }
+        inline void setOverload()          { _isOverload = true; }
+        inline bool isClosed() const       { return _isClosed; }
+        inline int fd() const { return _fd; }
+	    //连接是否还存在(客户端已经关闭)
+	    bool connectionExists() const
+        {
+            auto adapter = _adapter.lock();
+            if (adapter)
+            {
+                Connection *cPtr = adapter->getNetThread(_threadIndex)->getConnectionPtr(_uid);
+                return cPtr != NULL;
+            }
+            return false;
+        }
+        //  { Connection *cPtr = _adapter->getNetThread(_threadIndex)->getConnectionPtr(_uid); return cPtr != NULL; }
+        inline BindAdapterPtr adapter()  { return _adapter.lock(); }
+        inline int closeType() const       { return _closeType; }
+        inline void setCloseType(int closeType) { _closeType = closeType; }
+        inline shared_ptr<SendContext> createSendContext()     { return std::make_shared<SendContext>(shared_from_this(), 's'); }
+        inline shared_ptr<SendContext> createCloseContext()    { return std::make_shared<SendContext>(shared_from_this(), 'c'); }
+    protected:
+        void parseIpPort() const;
+    protected:
+    	int _threadIndex;       //网络线程id
+        uint32_t _uid;            /**连接标示*/
+        TC_Socket::addr_type _addr;
+        mutable string _ip;             /**远程连接的ip*/
+        mutable uint16_t _port;           /**远程连接的端口*/
+        int _fd;                /*保存产生该消息的fd,用于回包时选择网络线程*/
+        weak_ptr<BindAdapter> _adapter;        /**标识哪一个adapter的消息*/
+        vector<char> _rbuffer;        /**接收的内容*/
+        bool _isOverload = false;     /**是否已过载 */
+        bool _isClosed = false;       /**是否已关闭*/
+        int _closeType;     /*如果是关闭消息包,则标识关闭类型,0:表示客户端主动关闭;1:服务端主动关闭;2:连接超时服务端主动关闭*/
+        int64_t _recvTimeStamp;  /**接收到数据的时间*/
+    };
+
+    /**
+    * 发送包的上下文
+    * 由RecvContext创建出来
+    */
+    class SendContext
+    {
+    public:
+        SendContext(const shared_ptr<RecvContext> & context, char cmd)
+                : _context(context), _cmd(cmd)
+        {
+            _sbuffer = std::make_shared<TC_NetWorkBuffer::Buffer>();
+        }
+
+        inline const shared_ptr<RecvContext> & getRecvContext() { return _context; }
+        inline void setBuffer(const shared_ptr<TC_NetWorkBuffer::Buffer>& buff) { _sbuffer = buff; }
+        inline const shared_ptr<TC_NetWorkBuffer::Buffer> & buffer() { return _sbuffer; }
+        inline char cmd() const        { return _cmd; }
+        inline uint32_t uid() const    { return _context->uid(); }
+        inline int fd() const          { return _context->fd(); }
+        inline const string & ip() const { return _context->ip(); }
+        inline uint16_t port() const   { return _context->port(); }
+
+        friend class RecvContext;
+
+    protected:
+        shared_ptr<RecvContext>              _context;
+        char _cmd;                                            /**send包才有效, 命令:'c',关闭fd; 's',有数据需要发送*/
+        shared_ptr<TC_NetWorkBuffer::Buffer> _sbuffer;        /**发送的内容*/
+    };
+
+//    typedef TC_CasQueue<shared_ptr<RecvContext>> recv_queue;
+    typedef TC_ThreadQueue<shared_ptr<RecvContext>> recv_queue;
+//    typedef TC_CasQueue<shared_ptr<SendContext>> send_queue;
+    typedef TC_ThreadQueue<shared_ptr<SendContext>> send_queue;
+
+//    typedef recv_queue::queue_type recv_queue_type;
+
+    ////////////////////////////////////////////////////////////////////////////
+
+    /**
+     * 数据队列包装
+     */
+    class DataBuffer
+    {
+    public:
+        /**
+         * 数据队列
+         */
+        class DataQueue
+        {
+        public:
+        	/**
+        	 * 通知等待在队列上线程都醒过来
+        	 */
+            inline void notify() { return _rbuffer.notifyT(); }
+
+            /**
+             * push数据到队列中, 同时唤醒某个等待处理线程
+             * @param recv
+             */
+            inline void push_back(const shared_ptr<RecvContext> &recv ) { _rbuffer.push_back(recv); }
+
+            /**
+             * 在队列上等待
+             * @param millseconds
+             * @return
+             */
+            inline bool wait(size_t millseconds) { return _rbuffer.wait(millseconds); }
+
+            /**
+             * 弹出头部数据(如果没有数据也不阻塞)
+             * @param data
+             * @return
+             */
+            inline bool pop_front(shared_ptr<RecvContext> &data) { return _rbuffer.pop_front(data, 0, false); }
+
+        protected:
+            /**
+             * 接收的数据队列
+             */
+            recv_queue _rbuffer;
+        };
+
+        /**
+         * 构造, 传入handle处理线程,
+         * @param handleNum
+         */
+        DataBuffer(int handleNum);
+
+        /**
+         * 通知唤醒
+         * @param handleIndex
+         */
+        void notifyBuffer(uint32_t handleIndex);
+
+        /**
+         * 插入队列
+         * @param recv
+         */
+        void insertRecvQueue(const shared_ptr<RecvContext> &recv);
+
+        /**
+         * 等待在队列上
+         * @param handleIndex
+         * @return
+         */
+        bool wait(uint32_t handleIndex);
+
+        /**
+         * 弹出数据
+         * @param handleIndex
+         * @param data
+         * @return
+         */
+        bool pop(uint32_t handleIndex, shared_ptr<RecvContext> &data);
+
+        /**
+         * 是否开启队列模式
+         * @return
+         */
+        inline bool isQueueMode() const { return _queueMode; }
+
+        /**
+         * 启用队列模式
+         */
+        inline void enableQueueMode() { _queueMode = true; }
+
+        /**
+         * 接收buffer的大小
+         * @return
+         */
+        inline size_t getRecvBufferSize() const { return _iRecvBufferSize; }
+
+        /**
+         * 协程注册到DataBuffer中
+         * @param handleIndex
+         * @param scheduler
+         */
+        void registerScheduler(uint32_t handleIndex, const shared_ptr<TC_CoroutineScheduler> & scheduler);
+
+        /**
+         * 协程反注册
+         * @param handleIndex
+         */
+        void unregisterScheduler(uint32_t handleIndex);
+
+        /**
+         * 获取handle对应的协程
+         * @param handleIndex
+         * @return
+         */
+		const shared_ptr<TC_CoroutineScheduler> &getScheduler(uint32_t handleIndex);
+
+        /**
+         * 设置等待时间
+         * @param iWaitTime
+         */
+        inline void setWaitTime(uint32_t iWaitTime) { _iWaitTime = iWaitTime; }
+
+    protected:
+
+        inline int index(uint32_t handleIndex) { return handleIndex % _threadDataQueue.size(); }
+
+        const shared_ptr<DataQueue> &getDataQueue(uint32_t handleIndex);
+
+    protected:
+
+        /**
+         * 接收队列数据总个数
+         */
+        atomic<size_t>                  _iRecvBufferSize {0};
+
+        /**
+         * 是否启用队列模式, 相同连接固定
+         */
+        bool                            _queueMode = false;
+
+        /**
+         * 每个线程都有自己的队列
+         * 0: 给共享队列模式时使用
+         * 1~handle个数: 队列模式时使用
+         */
+        vector<shared_ptr<DataQueue>>   _threadDataQueue;
+
+        /**
+         * NET_THREAD_AND_HANDLES_CO 模式下有效
+         */
+        vector<shared_ptr<TC_CoroutineScheduler>>  _schedulers;
+
+        /**
+         * wait time for queue
+         */
+        int64_t     _iWaitTime = 10000;
+    };
+
+    ////////////////////////////////////////////////////////////////////////////
+
+    /**
+    * 链接状态
+    */
+    struct ConnStatus
+    {
+        string ip;
+        int32_t uid;
+        uint16_t port;
+        int timeout;
+        int iLastRefreshTime;
+        size_t recvBufferSize;
+        size_t sendBufferSize;
+    };
+
+
+    ////////////////////////////////////////////////////////////////////////////
+    // 服务连接管理
+    /**
+    *  建立连接的socket信息
+    */
+    class Connection
+    {
+    public:
+        /**
+         * 构造函数
+         * @param lfd
+         * @param s
+         * @param ip
+         * @param port
+         */
+        Connection(const shared_ptr<ConnectionList> &connlist, BindAdapter *pBindAdapter,
+                   int fd,
+                   const string & ip,
+                   uint16_t port,
+                   detail::LogInterface *logger);
+
+        /**
+         * udp连接
+         * @param fd
+         */
+        Connection(const shared_ptr<ConnectionList> &connlist, BindAdapter *pBindAdapter, int fd, detail::LogInterface *logger);
+
+        /**
+         * 析构函数
+         */
+        virtual ~Connection();
+
+        /**
+         * 初始化
+         * @param id, 连接的唯一id
+         */
+        void initialize(TC_Epoller *epoller, unsigned int uid, NetThread *netThread);
+
+        /**
+         * 获取关联的网络线程
+         * @return
+         */
+        inline NetThread *getNetThread() { return _netThread; }
+
+        /**
+         * 链接所属的adapter
+         */
+        inline BindAdapter* getBindAdapter() { return _pBindAdapter; }
+
+        /**
+         * registry epoll event
+         */ 
+        void registerEvent(NetThread *netThread);
+
+        /**
+         * get epoll info
+         * @return
+         */
+        inline const shared_ptr<TC_Epoller::EpollInfo> &getEpollInfo() { return _trans->getEpollInfo(); }
+
+        /**
+         * 获取连接超时时间
+         *
+         * @return int
+         */
+        inline int getTimeout() const { return this->_pBindAdapter->getEndpoint().getTimeout()/1000; }
+
+        /**
+         * 获取线程的惟一id
+         *
+         * @return unsigned int
+         */
+        inline uint32_t getId() const { return _uid; }
+
+        /**
+         * 当前连接fd
+         *
+         * @return int
+         */
+        inline int getfd() const { return _trans->fd(); }
+
+        /**
+         * 是否有效
+         *
+         * @return bool
+         */
+        inline bool isValid() const { return _trans->isValid(); }
+
+        /**
+         * 远程IP
+         *
+         * @return string
+         */
+        inline const string &getIp() const { return _ip; }
+
+        /**
+         * 远程端口
+         *
+         * @return uint16_t
+         */
+        inline uint16_t getPort() const { return _port; }
+
+        /**
+         * 设置首个数据包包头需要过滤的字节数
+         */
+        inline void setHeaderFilterLen(int iHeaderLen) { _iHeaderLen = iHeaderLen; }
+
+        /**
+         * 设置关闭,发送完当前数据就关闭连接
+         */
+        bool setClose();
+
+        /**
+         * 是否是空连接
+         */
+        inline bool isEmptyConn() const { return _bEmptyConn; }
+
+        /**
+         * 获取连接
+         */ 
+        inline TC_Transceiver* getTransceiver() { return _trans.get(); }
+
+        /**
+         * 接收数据buffer
+         */
+        inline TC_NetWorkBuffer & getRecvBuffer() { return _trans->getRecvBuffer(); }
+
+        /**
+         * 发送数据buffer
+         */
+        inline TC_NetWorkBuffer & getSendBuffer() { return _trans->getSendBuffer(); }
+
+        /**
+         * 发送buffer里面数据
+         * @return
+         */
+        int sendBuffer();
+
+        /**
+         * 直接发送裸得应答数据,业务层一般不直接使用,仅仅tcp支持
+         * send naked response data
+         * @param buffer
+         * @return int, -1:发送出错, 0:无数据, 1:发送完毕, 2:还有数据
+         * @return int, -1: sending error, 0: no data, 1: send completely, 2: data retains
+         * @return
+         */
+        int sendBufferDirect(const std::string& buff);
+
+        /**
+         * 添加发送buffer
+         * @param buffer
+         * @return int, -1:发送出错, 0:无数据, 1:发送完毕, 2:还有数据
+         */
+        int send(const shared_ptr<SendContext> & data);
+
+        /**
+         * 增加数据到队列中
+         * @param vtRecvData
+         */
+        inline void insertRecvQueue(const shared_ptr<RecvContext> & recv) { _pBindAdapter->insertRecvQueue(recv); }
+
+        /**
+         * 对于udp方式的连接,分配指定大小的接收缓冲区
+         *@param nSize
+         */
+        void setUdpRecvBuffer(size_t nSize);
+
+        /**
+         * 是否是tcp连接
+         * @return
+         */
+        inline bool isTcp() const { return _trans->getEndpoint().isTcp(); }
+
+        /**
+         * 是否是udp连接
+         * @return
+         */
+        inline bool isUdp() const { return _trans->getEndpoint().isUdp(); }
+
+        /**
+         * 关闭连接
+         */ 
+        void close();
+
+    protected:
+        /**
+         * 关闭连接
+         * @param fd
+         */
+        void onCloseCallback(TC_Transceiver *trans, TC_Transceiver::CloseReason reason, const string &err);
+
+        void onRequestCallback(TC_Transceiver *trans);
+
+        TC_NetWorkBuffer::PACKET_TYPE onParserCallback(TC_NetWorkBuffer& buff, TC_Transceiver *trans);
+
+        std::shared_ptr<TC_OpenSSL> onOpensslCallback(TC_Transceiver* trans);
+
+        bool handleOutputImp(const shared_ptr<TC_Epoller::EpollInfo> &data);
+        bool handleInputImp(const shared_ptr<TC_Epoller::EpollInfo> &data);
+        bool handleCloseImp(const shared_ptr<TC_Epoller::EpollInfo> &data);
+
+        /**
+         * 检查发送流量(避免发送数据量特别大, 而网络很慢, 导致内存不断在扩大, 相当于内存泄露了!)
+         * @param sendBuffer : 当前正在发送的buffer
+         * @param nowLeftBufferSize, 当前剩余还没有发送的数据大小
+         * @return <0, 被限制了, 0: 可以继续发送
+         */
+	    int checkFlow(TC_NetWorkBuffer& sendBuffer, size_t nowLeftBufferSize);
+    public:
+
+        /**
+         * 最后刷新时间
+         */
+        time_t _iLastRefreshTime;
+
+    protected:
+        /**
+         * 连接list
+         */
+        weak_ptr<ConnectionList> _connList;
+
+        /**
+         * log interface
+         */
+        detail::LogInterface *_logger = NULL;
+
+        /**
+         * 适配器
+         */
+        BindAdapter* _pBindAdapter = NULL;
+
+        /*
+        * 收发包处理
+        */
+        std::unique_ptr<TC_Transceiver>         _trans;
+        
+        /**
+         * 连接的唯一编号
+         */
+        uint32_t _uid;
+
+        /**
+         * fd socket
+         */
+        int _fd;
+
+        /**
+         * ip
+         */
+        string _ip;
+
+        /**
+         * 端口
+         */
+        uint16_t _port;
+
+        /**
+         * 还未发送的数据队列
+         */ 
+        list<shared_ptr<SendContext>> _messages;
+
+        /**
+         * message队列中消息内存大小
+         */
+        size_t _messageSize = 0;
+
+        /**
+         * 每5秒发送的数据
+         */
+        size_t _accumulateBufferSize = 0;
+
+        /**
+         * 检查时间
+         */
+        time_t _lastCheckTime = 0;
+
+        /**
+         * 发送的检查<已经发送数据, 剩余buffer大小>
+         */
+        vector<pair<size_t, size_t>> _checkSend;
+
+        /**
+         * 需要过滤的头部字节数
+         */
+        int _iHeaderLen;
+
+        /**
+         * 发送完当前数据就关闭连接
+         */
+        bool _bClose;
+
+        /**
+         * 即是否是空连接(一个完整包都没有收到过)
+         */ 
+        bool _bEmptyConn;
+
+        /**
+         * 线程的id
+         */
+        NetThread * _netThread = NULL;
+    };
+
+    ////////////////////////////////////////////////////////////////////////////
+    /**
+    * 带有时间链表的map
+    */
+    class ConnectionList
+    {
+    public:
+        /**
+         * 构造函数
+         */
+        ConnectionList(detail::LogInterface *logger);
+
+        /**
+         * 析够函数
+         */
+        ~ConnectionList()
+        {
+            assert(_vConn == NULL);
+        }
+
+        /**
+         * 初始化大小
+         * @param size
+         */
+        void init(uint32_t size, uint32_t iIndex = 0);
+
+        /**
+         * close所有链接
+         */ 
+        void close();
+
+        /**
+         * 关闭
+         * @param bindFd
+         */
+		void closeConnections(weak_ptr<BindAdapter> bindAdapter);
+
+        /**
+         * 获取惟一ID
+         *
+         * @return unsigned int
+         */
+        uint32_t getUniqId();
+
+        /**
+         * 添加连接
+         * @param cPtr
+         * @param iTimeOutStamp
+         */
+        void add(Connection *cPtr, time_t iTimeOutStamp);
+
+        /**
+         * 刷新时间链
+         * @param uid
+         * @param iTimeOutStamp, 超时时间点
+         */
+        void refresh(uint32_t uid, time_t iTimeOutStamp);
+
+        /**
+         * 删除连接
+         * @param cPtr
+         * @param bEraseList
+         * @param closeType
+         */
+        void delConnection(Connection *cPtr, bool bEraseList, EM_CLOSE_T closeType);
+
+        /**
+         * 设置empty conn timeout(ms), 注意精度只能到秒
+         * @param timeout
+         */
+        inline void setEmptyConnTimeout(int timeout) { _emptyCheckTimeout = timeout; }
+
+        /**
+         * get empty connection timeout
+         * @return
+         */
+        inline int getEmptyConnTimeout() { return _emptyCheckTimeout; }
+
+        /**
+         * 检查超时数据
+         */
+        void checkTimeout();
+
+        /**
+         * 获取某个监听端口的连接
+         * @param lfd
+         * @return vector<TC_EpollServer::ConnStatus>
+         */
+        vector<ConnStatus> getConnStatus(int lfd);
+
+        /**
+         * 获取某一个连接
+         * @param p
+         * @return T
+         */
+        Connection *get(uint32_t uid);
+
+        /**
+         * 删除连接
+         * @param uid
+         */
+        void del(uint32_t uid);
+
+        /**
+         * 大小
+         * @return size_t
+         */
+        inline size_t size() { return _total - _free_size; }
+
+    protected:
+        typedef pair<Connection *, multimap<time_t, uint32_t>::iterator> list_data;
+
+        /**
+         * 内部删除, 不加锁
+         * @param uid
+         */
+        void delNoLock(uint32_t uid);
+
+    protected:
+
+        /**
+         * empty 连接检查(s)
+         */
+        int _emptyCheckTimeout;
+
+        /**
+         * log interface
+         */
+        detail::LogInterface *_logger = NULL;
+
+        /**
+         * 锁
+         */
+        TC_ThreadMutex _mutex;
+
+		/**
+		 * 总计连接数
+		 */
+        uint32_t _total;
+
+        /**
+         * 空闲链表
+         */
+        list<uint32_t> _free;
+
+        /**
+         * 空闲链元素个数
+         */
+        size_t _free_size;
+
+        /**
+         * 链接
+         */
+        list_data *_vConn = NULL;
+
+        /**
+         * 超时链表
+         */
+        multimap<time_t, uint32_t> _tl;
+
+        /**
+         * 上次检查超时时间
+         */
+        time_t _lastTimeoutTime;
+
+        /**
+         * 链接ID的魔数
+         */
+        uint32_t _iConnectionMagic;
+
+    };
+
+    ////////////////////////////////////////////////////////////////////////////
+    // 服务端口管理,监听socket信息
+    class BindAdapter : public enable_shared_from_this<BindAdapter> // : public TC_HandleBase
+    {
+    public:
+        /**
+         * 缺省的一些定义
+         */
+        enum
+        {
+            DEFAULT_QUEUE_CAP = 10 * 1024,  /**流量*/
+            MIN_QUEUE_TIMEOUT = 3 * 1000,   /**队列最小超时时间(ms)*/
+            DEFAULT_MAX_CONN = 1024,       /**缺省最大连接数*/
+            DEFAULT_QUEUE_TIMEOUT = 60 * 1000,  /**缺省的队列超时时间(ms)*/
+        };
+        /**
+         * 顺序
+         */
+        enum EOrder
+        {
+            ALLOW_DENY,
+            DENY_ALLOW
+        };
+
+        /**
+         * 析够函数
+         */
+        ~BindAdapter();
+
+        /**
+         * 设置需要手工监听
+         */
+        void enableManualListen();
+
+        /**
+         * 是否手工监听端口
+         * @return
+         */
+        inline bool isManualListen() const { return this->_manualListen; }
+
+        /**
+         * 手工绑定端口
+         */
+        void manualListen();
+
+		/**
+		 * 取消监听
+		 */
+		void cancelListen();
+
+        /**
+         *
+         * @param info
+         */
+        inline void setEpollInfo(const shared_ptr<TC_Epoller::EpollInfo> &info) { _info = info; }
+
+        /**
+         * get epoll info
+         * @return
+         */
+        inline shared_ptr<TC_Epoller::EpollInfo> getEpollInfo() { return _info; }
+
+        /**
+         * 获取adapter name
+         * @return string
+         */
+        const string &getName() const { return _name; }
+
+        /**
+         * set index
+         */ 
+        void setNetThreads(const vector<NetThread*> &netThreads);
+
+        /**
+         * init udp
+         */
+        void initUdp(NetThread* netThread);
+
+        /**
+         * get index
+         */ 
+        inline const vector<NetThread*> & getNetThreads() const { return _netThreads;}
+
+        /**
+         * 获取queue capacity
+         * @return int
+         */
+        inline int getQueueCapacity() const { return _iQueueCapacity; }
+
+        /**
+         * 设置queue capacity
+         * @param n
+         */
+        inline void setQueueCapacity(int n) { _iQueueCapacity = n; }
+
+        /**
+         * 设置协议名称
+         * @param name
+         */
+        void setProtocolName(const string & name);
+
+        /**
+         * 获取协议名称
+         * @return const string&
+         */
+        const string & getProtocolName();
+
+        /**
+         * 是否taf协议
+         * @return bool
+         */
+        bool isTarsProtocol();
+
+        /**
+         * 判断是否需要过载保护
+         * @return bool
+         */
+        int isOverloadorDiscard();
+
+        /**
+         * 设置消息在队列中的超时时间, t为毫秒
+         * (超时时间精度只能是s)
+         * @param t
+         */
+        void setQueueTimeout(int t);
+
+        /**
+         * 获取消息在队列中的超时时间, 毫秒
+         * @return int
+         */
+        inline int getQueueTimeout() const { return _iQueueTimeout; }
+
+        /**
+         * 获取ip
+         * @return const string&
+         */
+        inline const TC_Endpoint & getEndpoint() const { return _ep; }
+
+        /**
+         * 监听socket
+         * @return TC_Socket
+         */
+        inline TC_Socket & getSocket() { return _s; } 
+
+        /**
+         * 设置最大连接数
+         * @param iMaxConns
+         */
+        inline void setMaxConns(int iMaxConns) { _iMaxConns = iMaxConns; }
+
+        /**
+         * 获取最大连接数
+         * @return size_t
+         */
+        inline size_t getMaxConns() const { return _iMaxConns;}
+
+        /**
+         * 设置HeartBeat时间
+         * @param n
+         */
+        inline void setHeartBeatTime(time_t t) { _iHeartBeatTime = t; }
+
+        /**
+         * 获取HeartBeat时间
+         * @return size_t
+         */
+        inline time_t getHeartBeatTime() const { return _iHeartBeatTime; }
+
+        /**
+         * 设置allow deny次序
+         * @param eOrder
+         */
+        inline void setOrder(EOrder eOrder) {  _eOrder = eOrder; }
+
+        /**
+         * 设置允许ip
+         * @param vtAllow
+         */
+        inline void setAllow(const vector<string> & vtAllow) { _vtAllow = vtAllow; }
+
+        /**
+         * 设置禁止ip
+         * @param vtDeny
+         */
+        inline void setDeny(const vector<string> & vtDeny) { _vtDeny = vtDeny; }
+
+        /**
+         * 获取允许ip
+         * @return vector<string>: ip列表
+         */
+        inline const vector<string> & getAllow() const { return _vtAllow; }
+
+        /**
+        * 获取禁止ip
+        * @return vector<string>: ip列表
+        */
+        inline const vector<string> & getDeny() const { return _vtDeny; }
+
+        /**
+        * 获取allow deny次序
+        * @return EOrder
+        */
+        inline EOrder getOrder() const { return _eOrder; }
+
+        /**
+         * 是否Ip被允许
+         * @param ip
+         * @return bool
+         */
+        bool isIpAllow(const string & ip) const;
+
+        /**
+         * 是否超过了最大连接数
+         * @return bool
+         */
+        inline bool isLimitMaxConnection() const { return (_iCurConns + 1 > (int)_iMaxConns) || (_iCurConns + 1 > (int)((uint32_t)1 << 22) - 1); }
+
+        /**
+         * 减少当前连接数
+         */
+        inline void decreaseNowConnection() { --_iCurConns; }
+
+        /**
+         * 增加当前连接数
+         */
+        inline void increaseNowConnection() { ++_iCurConns; }
+
+        /**
+         * 获取所有链接状态
+         * @return ConnStatus
+         */
+        inline vector<ConnStatus> getConnStatus() { return _epollServer->getConnStatus(_s.getfd()); }
+
+        /**
+         * 获取当前连接数
+         * @return int
+         */
+        inline int getNowConnection() const { return _iCurConns; }
+
+        /**
+         * 获取服务
+         * @return TC_EpollServer*
+         */
+        inline TC_EpollServer *getEpollServer() const { return _epollServer; }
+
+        /**
+         * 获取对应的网络线程
+         * @param fd
+         * @return
+         */
+        inline NetThread *getNetThread(size_t threadIndex) const { assert(threadIndex < _netThreads.size()); return _netThreads[threadIndex]; }
+
+        /**
+         * 注册协议解析器
+         * @param pp
+         */
+        void setProtocol(const TC_NetWorkBuffer::protocol_functor & pf, int iHeaderLen = 0, const header_filter_functor & hf = echo_header_filter);
+
+        /**
+         * 获取协议解析器
+         * @return protocol_functor&
+         */
+        inline TC_NetWorkBuffer::protocol_functor & getProtocol() { return _pf; }
+
+        /**
+         * 解析包头处理对象
+         * @return protocol_functor&
+         */
+        inline header_filter_functor & getHeaderFilterFunctor() { return _hf; }
+
+        /**
+         * 获取数据buffer
+         * @return
+         */
+        inline const shared_ptr<DataBuffer> &getDataBuffer() const { return _dataBuffer; }
+
+        /**
+         * 增加数据到队列中
+         * @param recv
+         * @param force 强制必须插入(无论是否过载, 比如close事件)
+         */
+        void insertRecvQueue(const shared_ptr<RecvContext> & recv, bool force = false);
+
+        /**
+         * 接收队列的大小
+         * @return size_t
+         */
+        inline size_t getRecvBufferSize() const { return _dataBuffer->getRecvBufferSize(); }
+ 
+        /**
+         * 发送队列的大小
+         * @return size_t
+         */
+        inline size_t getSendBufferSize() const { return _iSendBufferSize; }
+
+        /**
+         * add send buffer size(个数)
+         */
+        inline void increaseSendBufferSize() { ++_iSendBufferSize; }
+
+        /**
+         * increase send buffer size
+         */
+        inline void decreaseSendBufferSize(size_t s = 1) { _iSendBufferSize.fetch_sub(s); }
+
+        /**
+         * 默认的协议解析类, 直接echo
+         * @param r
+         * @param o
+         * @return int
+         */
+        static TC_NetWorkBuffer::PACKET_TYPE echo_protocol(TC_NetWorkBuffer & r, vector<char> & o);
+
+        /**
+         * 默认的包头处理
+         * @param i
+         * @param o
+         * @return int
+         */
+        static TC_NetWorkBuffer::PACKET_TYPE echo_header_filter(TC_NetWorkBuffer::PACKET_TYPE i, vector<char> & o);
+
+        /**
+         * 获取需要过滤的包头长度
+         */
+        inline int getHeaderFilterLen() { return _iHeaderLen; }
+
+        /**
+         * 所属handle组的handle数(每个handle一个对象)
+         * @return size_t
+         */
+        inline int getHandleNum() { return _iHandleNum; }
+
+        /**
+         * 获取第几个句柄
+         * @param index
+         * @return
+         */
+        HandlePtr getHandle(size_t index)
+        {
+            assert(index <= _iHandleNum);
+    //                assert(getEpollServer()->isMergeHandleNetThread());
+            return _handles[index];
+        }
+
+        /*
+         * 设置服务端积压缓存的大小限制(超过大小启用)
+         */
+        inline void setBackPacketBuffLimit(size_t iLimitSize) { _iBackPacketBuffLimit = iLimitSize; }
+
+        /**
+         * 获取服务端回包缓存的大小限制(超过大小启用)
+         */
+        inline size_t getBackPacketBuffLimit() const { return _iBackPacketBuffLimit; }
+
+        /*
+         * 设置服务端每5s最低发送字节
+         */
+        inline void setBackPacketBuffMin(size_t iMinLimit) { _iBackPacketBuffMin = iMinLimit; }
+
+        /**
+         * 获取服务端5/s最低发送字节
+         */
+        inline size_t getBackPacketBuffMin() const { return _iBackPacketBuffMin; }
+
+        /**
+         * 获取handles
+         */
+        inline vector<HandlePtr> & getHandles() { return _handles; }
+
+        /**
+         * 是否是队列模式(默认是False的)
+         */
+        inline bool isQueueMode() const { return _dataBuffer->isQueueMode(); }
+
+        /**
+         * 开启队列模式(同一个连接的请求, 落在同一个handle处理线程中)
+         */
+        inline void enableQueueMode() { return _dataBuffer->enableQueueMode(); }
+
+//        /**
+//         * 设置close回调函数
+//         */
+//        inline void setOnClose(const close_functor & f) { _closeFunc = f; }
+
+        /**
+         * 设置accesskey & secretKey & callback
+         * @param ak
+         * @param sk
+         */
+        inline void setAkSkCallback(const std::string & accesskey, const std::string & secretKey, const TC_Transceiver::onserververifyauth_callback &onverify)
+        {
+            _accessKey = accesskey;
+            _secretKey = secretKey;
+            _onVerifyCallback = onverify;
+        }
+
+        /**
+         * 检查是否key是否匹配
+         * @param accesskey
+         * @param secretKey
+         * @return
+         */
+        inline bool checkAkSk(const std::string & accesskey, const std::string & secretKey)
+        {
+            return accesskey == _accessKey && secretKey == _secretKey;
+        }
+
+        /**
+         * 根据accesskey 获取secretkey, 如果accesskey不等, 则返回空
+         * @param ak
+         * @return
+         */
+        inline std::string getSk(const std::string & accesskey) const
+        {
+            return (_accessKey == accesskey) ? _secretKey : "";
+        }
+
+        /**
+         * set openssl ctx
+         * @param ctx
+         */
+        inline void setSSLCtx(const shared_ptr<TC_OpenSSL::CTX> &ctx) { _ctx = ctx; }
+
+        /**
+         * get ssl ctx
+         * @return
+         */
+	    shared_ptr<TC_OpenSSL::CTX> getSSLCtx() { return _ctx; };
+
+        /**
+         * 构造函数
+         */
+        BindAdapter(TC_EpollServer *epollServer);
+
+        /**
+         * 设置adapter name
+         * @param name
+         */
+        void setName(const string & name) { _name = name; }
+
+        /**
+         * 设置endpoint
+         * @param str
+         */
+        inline void setEndpoint(const string & str) { _ep.parse(str); }
+
+        /**
+         * 是否是UDP端口
+         */
+        inline bool isUdp() const { return _ep.isUdp(); }
+
+        /**
+         * 初始化处理线程,线程将会启动
+         */
+        template<typename T, typename ...Args>
+        void setHandle(size_t n, Args&&... args)
+        {
+            if (!_handles.empty()) {
+                getEpollServer()->error("[BindAdapter::setHandle] handle is not empty!");
+                return;
+            }
+
+            _iHandleNum = n;
+
+            _dataBuffer.reset(new DataBuffer(_iHandleNum));
+
+            for (size_t i = 0; i < _iHandleNum; ++i)
+            {
+                HandlePtr handle = std::make_shared<T>(args...);
+
+                handle->setHandleIndex(i);
+
+                handle->setEpollServer(this->getEpollServer());
+
+                handle->setBindAdapter(this);
+
+                _handles.push_back(handle);
+            }
+        }
+
+	protected:
+    	/**
+    	 * 绑定服务器
+    	 */
+		void bind();
+
+        friend class TC_EpollServer;
+    public:
+
+        //统计上报的对象
+        PropertyReport *_pReportQueue = NULL;
+        PropertyReport *_pReportConRate = NULL;
+        PropertyReport *_pReportTimeoutNum = NULL;
+
+    protected:
+        /**
+         * epoller指针
+         */
+        TC_EpollServer*            _epollServer = NULL;
+
+        /**
+         * 加锁
+         */
+        mutable std::mutex      _mutex;
+
+        /**
+         * Adapter所用的HandleGroup
+         */
+        vector<HandlePtr>       _handles;
+
+        /**
+         * 协议解析
+         */
+        TC_NetWorkBuffer::protocol_functor _pf;
+
+        /**
+         * 首个数据包包头过滤
+         */
+        header_filter_functor   _hf;
+
+        /**
+         * adapter的名字
+         */
+        string                  _name;
+
+        /**
+         * net threads
+         */ 
+        vector<NetThread*>      _netThreads;
+
+        /**
+         * 监听fd
+         */
+        TC_Socket               _s;
+
+        /**
+         * 绑定的IP
+         */
+        TC_Endpoint             _ep;
+
+        /**
+         * epoll info
+         */
+        shared_ptr<TC_Epoller::EpollInfo>  _info;
+
+        /**
+         * 最大连接数
+         */
+        int                     _iMaxConns;
+
+        /**
+         * 当前连接数
+         */
+        std::atomic<int>        _iCurConns;
+
+        /**
+         * Handle个数
+         */
+        size_t                  _iHandleNum;
+
+        /**
+         * 允许的Order
+         */
+        EOrder         			_eOrder;
+
+        /**
+         * 允许的ip
+         */
+        vector<string>          _vtAllow;
+
+        /**
+         * 禁止的ip
+         */
+        vector<string>          _vtDeny;
+
+        /**
+         * 发送队列数据总个数
+         */
+        atomic<size_t>          _iSendBufferSize {0};
+
+        /**
+         * 数据buffer
+         */
+        shared_ptr<DataBuffer>  _dataBuffer;
+
+        /**
+         * 队列最大容量
+         */
+        int                     _iQueueCapacity;
+
+        /**
+         * 消息超时时间(从入队列到出队列间隔)(毫秒)
+         */
+        int                     _iQueueTimeout;
+
+        /**
+         * 首个数据包包头长度
+         */
+        int                     _iHeaderLen;
+
+        /**
+         * 上次心跳发送时间
+         */
+        time_t         			_iHeartBeatTime;
+
+        /**
+         * 协议名称,缺省为"tars"
+         */
+        string                  _protocolName;
+
+        /**
+         * 回包缓存限制大小
+         */
+        size_t                  _iBackPacketBuffLimit = 1024*1024;
+
+        /**
+         * 回包速度最低限制(5/s), 默认1K
+         */
+        size_t                  _iBackPacketBuffMin = 1024;
+
+        /**
+         * verify auth callback
+         */         
+        TC_Transceiver::onserververifyauth_callback _onVerifyCallback;
+
+        /**
+         * 该obj的AK SK
+         */
+        std::string             _accessKey;
+        std::string             _secretKey;
+
+        /**
+         * 是否手工监听
+         */
+        bool					_manualListen = false;
+//        /**
+//         * 创建一个udp句柄, 用来通知事后listen
+//         */
+//        TC_Socket               _udpNotify;
+//
+//        /**
+//         * 通知
+//         */
+//        TC_Epoller::EpollInfo   *_notifyInfo = NULL;
+
+        //连接关闭的回调函数
+//        close_functor           _closeFunc;
+
+        /**
+        * ssl ctx
+        */
+        shared_ptr<TC_OpenSSL::CTX> _ctx;
+    };
+
+    ////////////////////////////////////////////////////////////////////////////
+    class NetThread : public TC_Thread
+    {
+    public:
+        /**
+         * 构造函数
+         */
+        NetThread(int index, TC_EpollServer *epollServer);
+
+        /**
+         * 析构函数
+         */
+        virtual ~NetThread();
+
+        /**
+         * 获取网络线程的index
+        * @return
+        */
+        inline int getIndex() const { return _threadIndex; }
+
+        /**
+         * connection list
+         * @return
+         */
+        inline const shared_ptr<ConnectionList> &getConnectionList() const { return _list; }
+
+        /**
+         * 网络线程执行函数
+         */
+        virtual void run();
+
+        /**
+         * 停止网络线程
+         */
+        void terminate();
+
+        /**
+         * 生成epoll
+         */
+        void createEpoll(uint32_t maxAllConn);
+
+        /**
+         * 已经启动, 进入网络调度状态
+         * @return
+         */
+        bool isReady() const;
+
+        /**
+         * 获取Epoller对象
+         * @return TC_Epoller*
+         */
+        inline TC_Epoller *getEpoller() { return _epoller; }
+
+        /**
+         * 设置初始化和处理对象
+         * @param initialize
+         * @param handle
+         */
+        void setInitializeHandle(std::function<void()> initialize, std::function<void()> handle);
+
+        /**
+         * 唤醒网络线程
+         */
+        inline void notify() { assert(_scheduler); _scheduler->notify(); }
+
+        /**
+         * 关闭连接
+         * @param uid
+         */
+        void close(const shared_ptr<RecvContext> & data);
+
+        /**
+        * 发送数据
+        * @param uid
+        * @param s
+        */
+        void send(const shared_ptr<SendContext> & data);
+
+        /**
+         * 获取某一监听端口的连接数
+         * @param lfd
+         *
+         * @return vector<ConnStatus>
+         */
+        inline vector<ConnStatus> getConnStatus(int lfd) { return _list->getConnStatus(lfd); }
+
+        /**
+         * 获取连接数
+         *
+         * @return size_t
+         */
+        inline size_t getConnectionCount() { return _list->size(); }
+
+        /**
+         *设置空连接超时时间
+         */
+        inline void setEmptyConnTimeout(int timeout) { 
+            int emptyCheckTimeout = (timeout >= MIN_EMPTY_CONN_TIMEOUT) ? timeout : MIN_EMPTY_CONN_TIMEOUT;
+
+            _list->setEmptyConnTimeout(emptyCheckTimeout); 
+        }
+
+        /**
+         *设置udp的接收缓存区大小,单位是B,最小值为8192,最大值为DEFAULT_RECV_BUFFERSIZE
+         */
+        inline void setUdpRecvBufferSize(size_t nSize = DEFAULT_RECV_BUFFERSIZE) {
+            _nUdpRecvBufferSize = (nSize >= 8192 && nSize <= DEFAULT_RECV_BUFFERSIZE) ? nSize : DEFAULT_RECV_BUFFERSIZE;
+        }
+
+    protected:
+
+        /**
+         * 获取连接
+         * @param id
+         *
+         * @return ConnectionPtr
+         */
+        inline Connection *getConnectionPtr(uint32_t uid) { assert(_list); return  _list->get(uid); }
+
+        /**
+         * 添加tcp链接
+         * @param cPtr
+         * @param iIndex
+         */
+        void addTcpConnection(Connection *cPtr);
+
+        /**
+         * 添加udp连接
+         * @param cPtr
+         * @param index
+         */
+        void addUdpConnection(Connection *cPtr);
+
+        /**
+         * 删除链接
+         * @param cPtr
+         * @param bEraseList 是否是超时连接的删除
+         * @param closeType  关闭类型,0:表示客户端主动关闭;1:服务端主动关闭;2:连接超时服务端主动关闭
+         */
+        void delConnection(Connection *cPtr, bool bEraseList = true, EM_CLOSE_T closeType = EM_CLIENT_CLOSE);
+
+        /**
+         * 处理管道消息
+         */
+        void processPipe();
+
+        /**
+         * 空连接超时时间
+         */
+        inline int getEmptyConnTimeout() const { return _list->getEmptyConnTimeout(); }
+
+        /**
+         *是否空连接检测
+         */
+        inline bool isEmptyConnCheck() const { return _list->getEmptyConnTimeout() > 0; }
+
+        /**
+         * 关联adapter
+         * @param adapter
+         */
+        inline void addAdapter(BindAdapter* adapter) { _adapters.push_back(adapter); }
+
+        /**
+         * 通知关闭连接
+         * @param fd
+         */
+		void notifyCloseConnectionList(const shared_ptr<BindAdapter> &adapter);
+
+        friend class BindAdapter;
+        friend class TC_EpollServer;
+        friend class ConnectionList;
+
+    private:
+
+        /**
+         * scheduler
+         */
+		shared_ptr<TC_CoroutineScheduler> _scheduler;
+
+        /**
+         * epoller
+         */ 
+        TC_Epoller*            _epoller = NULL;
+
+        /**
+         * net线程的id
+         */
+        size_t                  _threadId;
+
+        /**
+         * 线程索引
+         */
+        int                     _threadIndex;
+
+		/**
+		 * 服务
+		 */
+		TC_EpollServer *        _epollServer = NULL;
+
+		/**
+         * 关联的adapters
+         */
+        vector<BindAdapter*>    _adapters;
+
+        /**
+         * 管理的连接链表
+         */
+        shared_ptr<ConnectionList> _list;
+
+        /**
+         * 发送队列
+         */
+        send_queue              _sbuffer;
+
+        // /**
+        //  * 空连接超时时间,单位是毫秒,默认值2s,
+        //  * 该时间必须小于等于adapter自身的超时时间
+        //  */
+        // int                     _iEmptyCheckTimeout;
+
+        /**
+         * udp连接时接收包缓存大小,针对所有udp接收缓存有效
+         */
+        size_t                  _nUdpRecvBufferSize;
+
+        /**
+         * 初始化对象
+         */
+        std::function<void()>   _initialize;
+
+        /**
+         * 处理对象
+         */
+        std::function<void()>   _handle;
+    };
+
+    /**
+    * 具体处理基类
+    */
+    class Handle// :  public TC_HandleBase
+    {
+    public:
+        /**
+         * 构造, 默认没有请求, 等待10s
+         */
+        Handle();
+
+        /**
+         * 析构函数
+         */
+        virtual ~Handle();
+
+        /**
+         * 获取服务
+         * @return TC_EpollServer*
+         */
+        inline TC_EpollServer *getEpollServer() const { return _epollServer; };
+
+        /**
+         * 获取adapter
+         * @return
+         */
+        inline BindAdapter *getBindAdapter() const { return _bindAdapter; }
+
+        /**
+         * 获取Handle的索引(0~handle个数-1)
+         * @return
+         */
+        inline uint32_t getHandleIndex() const { return _handleIndex; }
+
+        /**
+         * 设置网络线程
+         */
+        inline void setNetThread(NetThread *netThread) { _netThread = netThread; }
+
+        /**
+         * 获取网络线程
+         * @return
+         */
+        inline NetThread *getNetThread() { return _netThread; }
+
+        /**
+         * 结束
+         */
+        void terminate();
+
+        /**
+         * 设置数据队列
+         * @param data
+         */
+        inline void setDataBuffer(const shared_ptr<DataBuffer> &data) { _dataBuffer =  data; }
+
+        /**
+         * 协程处理一次
+         */
+        void handleOnceCoroutine();
+
+        /**
+         * 线程处理一次
+         */
+        void handleOnceThread();
+
+        /**
+         * 协程循环处理方法
+         */
+        void handleLoopCoroutine();
+
+        /**
+         * 线程循环处理方法
+         */
+        void handleLoopThread();
+
+        /**
+         * handle是否启动准备好(协程进入调度状态)
+         * @return
+         */
+        bool isReady() const;
+    public:
+        /**
+         * 发送数据
+         * @param stRecvData
+         * @param sSendBuffer
+         */
+        inline void sendResponse(const shared_ptr<SendContext> & data) { _epollServer->send(data); }
+
+        /**
+         * 关闭链接(tcp连接才有效)
+         * @param stRecvData
+         */
+        inline void close(const shared_ptr<RecvContext> & data) { _epollServer->close(data);}
+
+        /**
+         * 对象初始化
+         */
+        virtual void initialize() { };
+
+        /**
+         * 唤醒handle对应的处理线程
+         */
+        virtual void notifyFilter();
+
+        /**
+        * 心跳(每处理完一个请求或者等待请求超时都会调用一次)
+        */
+        virtual void heartbeat() { }
+
+        /**
+         * 协程处理
+         */
+        void handleCoroutine();
+
+        /**
+         * 设置等待队列的时间
+         * @param iWaitTime
+         */
+        void setWaitTime(uint32_t iWaitTime);
+    protected:
+        /**
+         * 处理函数
+         * @param stRecvData: 接收到的数据
+         */
+        virtual void handle(const shared_ptr<RecvContext> & data) = 0;
+
+        /**
+         * 处理超时数据, 即数据在队列中的时间已经超过
+         * 默认直接关闭连接
+         * @param stRecvData: 接收到的数据
+         */
+        virtual void handleTimeout(const shared_ptr<RecvContext> & data);
+
+        /**
+         * 处理连接关闭通知,包括
+         * 1.close by peer
+         * 2.recv/send fail
+         * 3.close by timeout or overload
+         * @param stRecvData:
+         */
+        virtual void handleClose(const shared_ptr<RecvContext> & data);
+
+        /**
+         * 处理overload数据 即数据队列中长度已经超过允许值
+         * 默认直接关闭连接
+         * @param stRecvData: 接收到的数据
+         */
+        virtual void handleOverload(const shared_ptr<RecvContext> & data);
+
+        /**
+         * 处理异步回调队列
+         */
+        virtual void handleAsyncResponse() { }
+
+        /**
+         * handleFilter拆分的第二部分,处理用户自有数据
+         * 非游戏逻辑可忽略bExpectIdle参数
+         */
+        virtual void handleCustomMessage(bool bExpectIdle = false) { }
+
+        /**
+         * 是否所有的Adpater队列都为空
+         * @return bool
+         */
+        virtual bool allAdapterIsEmpty();
+
+        /**
+         * 是否所有的servant都没有resp消息待处理
+         * @return bool
+         */
+        virtual bool allFilterIsEmpty();
+
+        /**
+         * 设置服务
+         * @param pEpollServer
+         */
+        inline void setEpollServer(TC_EpollServer *pEpollServer) { _epollServer = pEpollServer; }
+
+        /**
+         * 设置Adapter
+         * @param pEpollServer
+         */
+        inline void setBindAdapter(BindAdapter *bindAdapter) { _bindAdapter = bindAdapter; }
+
+        /**
+         * 设置index
+         * @param index
+         */
+        inline void setHandleIndex(uint32_t index) { _handleIndex = index; }
+
+        /**
+         * 友元类
+         */
+        friend class BindAdapter;
+        friend class TC_EpollServer;
+    protected:
+        /**
+         * 服务
+         */
+        TC_EpollServer *        _epollServer;
+
+        /**
+         * handle对应的网路线程(网络线程和handle线程合并的情况下有效)
+         */
+        NetThread *             _netThread = NULL;
+
+        /**
+         * 所属handle组
+         */
+        BindAdapter *           _bindAdapter;
+
+        /**
+         * 数据队列
+         */
+        shared_ptr<DataBuffer>  _dataBuffer;
+
+        /**
+         * Handle的索引
+         */
+        uint32_t                _handleIndex;
+
+//        /**
+//         * 结束
+//         */
+//        bool                    _terminate =  false;
+
+        /**
+         * 协程模式下有效
+         */
+		shared_ptr<TC_CoroutineScheduler> _scheduler;
+    };
+
+    /**
+     * 几种服务模式
+     */
+    enum SERVER_OPEN_COROUTINE
+    {
+	    //独立网路线程 + 独立handle线程: 网络线程负责收发包, 通过队列唤醒handle线程中处理
+        NET_THREAD_QUEUE_HANDLES_THREAD   = 0,
+	    //独立网路线程组 + 独立handle线程: 网络线程负责收发包, 通过队列唤醒handle线程中处理, handle线程中启动协程处理
+        NET_THREAD_QUEUE_HANDLES_CO       = 1,
+        //合并网路线程 + handle线程(线程个数以处理线程配置为准, 网络线程配置无效): 连接分配到不同线程中处理(如果是UDP, 则网络线程竞争接收包), 这种模式下延时最小, 相当于每个包的收发以及业务处理都在一个线程中
+        NET_THREAD_MERGE_HANDLES_THREAD        = 2,
+	    //合并网路线程 + handle线程(线程个数以处理线程配置为准, 网络线程配置无效): 连接分配到不同线程中处理(如果是UDP, 则网络线程竞争接收包), 每个包会启动协程来处理
+        NET_THREAD_MERGE_HANDLES_CO            = 3
+    };
+
+    /**
+     * 构造函数
+     */
+    TC_EpollServer(unsigned int iNetThreadNum=1);
+
+    /**
+     * 析构函数
+     */
+    virtual ~TC_EpollServer();
+
+    /**
+     *设置空连接超时时间(timeout>0: 启动, timeout<=0: 不做连接超时控制)
+     */
+    void setEmptyConnTimeout(int timeout);
+
+    /**
+     * 设置本地日志
+     * @param plocalLogger
+     */
+    inline void setLocalLogger(RollWrapperInterface *pLocalLogger) { _pLocalLogger = pLocalLogger; }
+
+    /**
+     * 获取server type
+     * @return
+     */
+    inline SERVER_OPEN_COROUTINE getOpenCoroutine() const { return _openCoroutine; }
+
+    /**
+     * set server type
+     * @param serverType
+     */
+    void setOpenCoroutine(SERVER_OPEN_COROUTINE serverPatten);
+
+    /**
+     * 设置单线程中协程使用的堆栈大小
+     * 注意单线程协程消耗的内存: iPoolSize * iStackSize
+     * @param iPoolSize:  协程池大小(最多多少个协程) (默认服务内置的是1w个)
+     * @param iStackSize: 每个协程的堆栈大小(比如64k: 64*1024)
+     */
+	void setCoroutineStack(uint32_t iPoolSize, size_t iStackSize);
+
+	/**
+	 * 获取协程池大小
+	 * @return
+	 */
+	uint32_t getCoroutinePoolSize() const { return _iCoroutinePoolSize; }
+
+	/**
+	 * 获取协程堆栈对消
+	 * @return
+	 */
+	size_t getCoroutineStackSize() const { return _iCoroutineStackSize; }
+
+	/**
+     * 设置线程数量(非merge的情况下有效)
+     * @param threadNum
+     */
+    void setThreadNum(int threadNum)
+    {
+        _threadNum = threadNum;
+        if (_threadNum <= 0) _threadNum = 1;
+        if (_threadNum > 15) _threadNum = 15;
+    }
+
+    /**
+     * 创建BindAdpater
+     * @param name: adapter 名称
+     * @param ep: 服务绑定地址
+     * @param handleNum: handle处理线程个数
+     * @param args: Handle构造函数需要的参数
+     */
+    template<typename T, typename ...Args>
+    BindAdapterPtr createBindAdapter(const string &name, const string &ep, size_t handleNum, Args&&... args)
+    {
+        TC_EpollServer::BindAdapterPtr adapter = getBindAdapter(name);
+        if(adapter)
+        {
+            throw TC_EpollServer_Exception("[TC_EpollServer::createBindAdapter] adapter '" + name +"' has exists.");
+        }
+
+		adapter = std::make_shared<TC_EpollServer::BindAdapter>(this);
+
+		adapter->setName(name);
+		adapter->setEndpoint(ep);
+		adapter->setMaxConns(10240);
+		adapter->setHandle<T>(handleNum, args...);
+
+        return adapter;
+    }
+
+    /**
+     * 绑定监听socket
+     * @param ls
+     */
+    int bind(BindAdapterPtr &lsPtr);
+
+    /**
+     * 初始化handle对象
+     */
+    void initHandle();
+
+    /**
+     * 启动业务处理线程
+     */
+    void startHandle();
+
+    /**
+     * 生成epoll
+     * Generate epoll
+     */
+    void createEpoll();
+
+    /**
+     * 等待处理线程/协程都启动成功
+     */
+    void waitForReady();
+
+    /**
+     * 运行
+     */
+    void waitForShutdown();
+
+    /**
+     * 停止服务
+     */
+    void terminate();
+
+    /**
+     * 终止
+     */    
+    inline bool isTerminate() const { return _epoller == NULL || _epoller->isTerminate(); }
+
+    /**
+     * 获取epoller地址
+     * @return
+     */
+    inline TC_Epoller *getEpoller() { return _epoller; }
+
+    /**
+     * 根据名称获取BindAdapter
+     * @param sName
+     * @return BindAdapterPtr
+     */
+    BindAdapterPtr getBindAdapter(const string &sName);
+
+    /**
+     * 获取所有adatapters
+     * @return
+     */
+    vector<BindAdapterPtr> getBindAdapters();
+
+    /**
+     * 关闭连接
+     * @param uid
+     */
+    void close(const shared_ptr<RecvContext> &data);
+
+    /**
+     * 发送数据
+     * @param uid
+     * @param s
+     */
+    void send(const shared_ptr<SendContext> &data);
+
+    /**
+     * 获取某一监听端口的连接数
+     * @param lfd
+     *
+     * @return vector<ConnStatus>
+     */
+    vector<ConnStatus> getConnStatus(int lfd);
+
+    /**
+     * 获取监听socket信息
+     *
+     * @return map<int,ListenSocket>
+     */
+    unordered_map<int, BindAdapterPtr> getListenSocketInfo();
+
+    /**
+     * 获取所有连接的数目
+     *
+     * @return size_t
+     */
+    size_t getConnectionCount();
+
+    /**
+     * 记录日志
+     * @param s
+     */
+    virtual void debug(const string &s) const;
+
+    /**
+     * INFO日志
+     * INFO LOG
+     * @param s
+     */
+    virtual void info(const string &s) const;
+
+    /**
+     * taf日志
+     * @param s
+     */
+    virtual void tars(const string &s) const;
+
+    /**
+    * 记录错误日志
+    * @param s
+    */
+    virtual void error(const string &s) const;
+
+    /**
+     * 获取网络线程的数目
+     */
+    inline unsigned int getNetThreadNum() const { return _netThreads.size(); }
+
+    /**
+     * 获取网络线程的指针集合
+     */
+    inline const vector<NetThread *> &getNetThread() const { return _netThreads; }
+
+    /**
+     * 停止线程
+     */
+    void stopThread();
+
+    /**
+     * 获取所有业务线程的数目
+     */
+    size_t getLogicThreadNum();
+
+    //回调给应用服务
+    typedef std::function<void(TC_EpollServer *)> application_callback_functor;
+
+    //网络线程发送心跳的函数
+    //Function for network threads to send heartbeats
+    typedef std::function<void(const string &)> heartbeat_callback_functor;
+
+    /**
+     * 设置waitForShutdown线程回调的心跳
+     * @param hf [description]
+     */
+    void setCallbackFunctor(const application_callback_functor &hf) { _hf = hf; }
+
+	/**
+	 * 设置退出前的回调
+	 * @param hf [description]
+	 */
+	void setDestroyAppFunctor(const application_callback_functor &qf) { _qf = qf; }
+
+	// 接收新的客户端链接时的回调
+	typedef std::function<void (TC_EpollServer::Connection*)> accept_callback_functor;
+
+	/*
+	 * 设置接收链接的回调
+	 */
+	void setOnAccept(const accept_callback_functor& f) { _acceptFunc = f; }
+
+    /**
+     * 设置netthread网络线程发送心跳的函数
+     * Function for setting netthreaded network threads to send heartbeats
+     * @param hf [description]
+     */
+    void setHeartBeatFunctor(const heartbeat_callback_functor &heartFunc) { _heartFunc = heartFunc; }
+
+    /**
+     * 心跳函数
+     * @return
+     */
+    heartbeat_callback_functor &getHeartBeatFunctor() { return _heartFunc; }
+
+    friend class BindAdapter;
+
+protected:
+
+    /**
+     * accept callback
+     * @param data
+     */
+    bool acceptCallback(const shared_ptr<TC_Epoller::EpollInfo> &info, weak_ptr<BindAdapter> adapter);
+
+    /**
+     * listen callback
+     * @param data
+     */
+    void listenCallback(weak_ptr<BindAdapter> adapterPtr);
+
+    /**
+     * 接收句柄
+     * @param fd
+     * @return
+     */
+    bool accept(int fd, int domain = AF_INET);
+
+    /**
+     * 通知线程ready了
+     */
+	void notifyThreadReady();
+
+    static void applicationCallback(TC_EpollServer *epollServer);
+
+private:
+    /**
+     * server 模式
+     */
+    SERVER_OPEN_COROUTINE _openCoroutine = NET_THREAD_QUEUE_HANDLES_THREAD;
+
+//    /**
+//     * 是否已经结束
+//     */
+//    bool                    _terminate = false;
+
+    /**
+     * 网络线程
+     */
+    std::vector<NetThread *> _netThreads;
+
+    /**
+     * epoll
+     */
+    TC_Epoller *_epoller = NULL;
+
+    /*
+     * 线程数量
+     */
+    int _threadNum = 1;
+
+    /**
+     * 已经准备ready的线程数
+     */
+    std::atomic<int> _readyThreadNum{0};
+
+	/**
+	 * 锁
+	 */
+	std::mutex             _readyMutex;
+
+	/**
+	 * 条件变量, 用来等待网络线程启动
+	 */
+	std::condition_variable _readyCond;
+
+		/**
+     * handle处理的线程池(merge模式下无效)
+     */
+    TC_ThreadPool _handlePool;
+
+    /**
+     * 本地循环日志
+     */
+    RollWrapperInterface *_pLocalLogger;
+
+    /**
+     *
+     */
+    vector<BindAdapterPtr> _bindAdapters;
+
+    /**
+     * 监听socket
+     */
+    unordered_map<int, BindAdapterPtr> _listeners;
+
+    /**
+     * 协程池大小
+     */
+	uint32_t _iCoroutinePoolSize = 10000;
+
+	/**
+	 * 堆栈大小
+	 */
+	size_t _iCoroutineStackSize = 64*1024;
+
+    /**
+     * 应用回调
+     */
+    application_callback_functor _hf;
+
+    /**
+     * 退出前的回调
+     */
+	application_callback_functor _qf;
+
+    /**
+     * 发送心跳的函数
+     * Heartbeat Sending Function
+     */
+    heartbeat_callback_functor _heartFunc;
+
+    /**
+     * 接收链接的回调函数
+     */
+    accept_callback_functor _acceptFunc;
+};
+
+typedef TC_AutoPtr<TC_EpollServer> TC_EpollServerPtr;
+
+}
+

+ 513 - 301
util/include/util/tc_epoller.h

@@ -1,301 +1,513 @@
-/**
- * Tencent is pleased to support the open source community by making Tars available.
- *
- * Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
- *
- * Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 
- * in compliance with the License. You may obtain a copy of the License at
- *
- * https://opensource.org/licenses/BSD-3-Clause
- *
- * Unless required by applicable law or agreed to in writing, software distributed 
- * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
- * CONDITIONS OF ANY KIND, either express or implied. See the License for the 
- * specific language governing permissions and limitations under the License.
- */
-
-#ifndef __TC_EPOLLER_H_
-#define __TC_EPOLLER_H_
-
-#include "util/tc_platform.h"
-#include "util/tc_socket.h"
-#include <cassert>
-
-#if TARGET_PLATFORM_IOS
-#include "sys/event.h"
-
-const int EPOLLIN = 0x0001;
-const int EPOLLOUT = 0x0002;
-const int EPOLLERR = 0x0004;
-
-typedef kevent64_s epoll_event;
-
-#else
-#include "sys/epoll.h"
-#endif
-
-namespace tars
-{
-/////////////////////////////////////////////////
-/** 
- * @file  tc_epoller.h 
- * @brief  epoll操作封装类 
- * @brief  Epoll operation encapsulation class
- */
-/////////////////////////////////////////////////
-
-/**
-* @brief epoll异常类
-* @brief epoll exception class
-*/
-struct TC_Epoller_Exception : public TC_Exception
-{
-   TC_Epoller_Exception(const string &buffer, int err) : TC_Exception(buffer, err) {};
-   ~TC_Epoller_Exception() {};
-};
-
-/**
- * @brief epoller操作类,已经默认采用了EPOLLET方式做触发 
- * @brief epoller operation class, EPOLLET has been used by default for triggering 
- */
-class TC_Epoller
-{
-public:
-	/**
-	 * @brief 通知epoll从wait中醒过来 
-	 * @brief Notice epoll to wake up from 'wait'
-	 */
-    class NotifyInfo
-    {
-	public:
-        NotifyInfo();
-		~NotifyInfo();
-
-		/**
-		 * 初始化
-		 * Initialization
-		 */	
-        void init(TC_Epoller *ep);
-
-		/**
-		 * 添加关联数据
-		 * Add corresponding data
-		 */ 
-		void add(uint64_t data);
-
-		/**
-		 * 通知notify醒过来
-		 * Notice notify to wake up
-		 */ 
-        void notify();
-
-		/**
-		 * 释放掉
-		 * Diposit
-		 */ 
-        void release();
-
-		/**
-		 * 获取通知fd
-		 * Get notifyFd
-		 */ 
-		int notifyFd();
-
-
-	protected:
-        //通知fd
-        TC_Socket _notify;
-		TC_Epoller *_ep;
-
-		/*Events associated with the notification handle*/
-		uint64_t _data;		//关联到通知句柄的事件
-
-    };
-
-	/**
-	 * @brief 构造函数. 
-	 * @brief Constructor Function
-	 *  
-     * @param bEt 默认是ET模式,当状态发生变化的时候才获得通知
-	 * @param bEt The default is et mode, which is notified when the status changes
-	 */
-	TC_Epoller();
-
-	/**
-     * @brief 析够函数.
-	 * @brief Destructor
-	 */
-	~TC_Epoller();
-
-	/**
-	 * @brief 生成epoll句柄. 
-	 * @brief Generate epoll handle.
-	 *  
-     * @param max_connections epoll服务需要支持的最大连接数
-	 * @param max_connections Maximum number of connections the epoll service needs to support
-	 */
-	void create(int max_connections);
-
-	/**
-	 * disable et模式
-	 */
-	void enableET(bool enable) { _enableET = enable; };
-
-	/**
-	 * @brief 释放资源
-	 *  
-     * @param 
-	 */
-	void close();
-
-	/**
-	 * @brief 添加监听句柄. 
-	 * @brief Add listening handle.
-	 *  
-     * @param fd    句柄
-	 * @param fd    handle
-     * @param data  辅助的数据, 可以后续在epoll_event中获取到
-	 * @param data  auxiliary data that can be obtained in epoll_event subsequently
-     * @param event 需要监听的事件EPOLLIN|EPOLLOUT
-	 * @param event Events to be listened on EPOLLIN|EPOLLOUT
-     *              
-	 */
-	int add(SOCKET_TYPE fd, uint64_t data, int32_t event);
-
-	/**
-	 * @brief 修改句柄事件. 
-	 * @brief Modify handle event
-	 *  
-     * @param fd    句柄
-	 * @param fd    handle
-     * @param data  辅助的数据, 可以后续在epoll_event中获取到
-	 * @param data  auxiliary data that can be obtained in epoll_event subsequently
-     * @param event 需要监听的事件EPOLLIN|EPOLLOUT
-	 * @param event Events to be listened on EPOLLIN|EPOLLOUT
-	 */
-	int mod(SOCKET_TYPE fd, uint64_t data, int32_t event);
-
-	/**
-	 * @brief 删除句柄事件. 
-	 * @brief Delete handle event.
-	 *  
-     * @param fd    句柄
-	 * @param fd    handle
-     * @param data  辅助的数据, 可以后续在epoll_event中获取到
-	 * @param data  auxiliary data that can be obtained in epoll_event subsequently
-     * @param event 需要监听的事件EPOLLIN|EPOLLOUT
-	 * @param event Events to be listened on EPOLLIN|EPOLLOUT
-	 */
-	int del(SOCKET_TYPE fd, uint64_t data, int32_t event);
-
-	/**
-	 * @brief 等待时间. 
-	 * @brief wait time
-	 *  
-	 * @param millsecond 毫秒 
-     * @return int       有事件触发的句柄数
-	 * @return int       Number of handles triggered by events
-	 */
-	int wait(int millsecond);
-
-	/**
-     * @brief 获取被触发的事件.
-	 * @brief Get the triggered handle
-	 *
-	 * @return struct epoll_event&被触发的事件
-	 * @return Struct epoll_event& triggered event
-	 */
-	epoll_event& get(int i);// { assert(_pevs != 0); return _pevs[i]; }
-
-	/**
-     * @brief 是否有读事件
-	 * @brief whether it have event to read
-	 *
-	 * @return
-	 */		
-	static bool readEvent(const epoll_event &ev);
-
-	/**
-     * @brief 是否有写事件
-	 * @brief whether it have event to write
-	 *
-	 * @return
-	 */		
-	static bool writeEvent(const epoll_event &ev);
-
-	/**
-     * @brief 是否有异常事件
-	 * @brief whether it have eception event
-	 *
-	 * @return
-	 */		
-	static bool errorEvent(const epoll_event &ev);
-
-	/**
-     * @brief 获取低位/高位数据
-	 * @brief Get low/high bit data
-	 * @param high: true:高位, false:低位
-	 * @param high: true:high level, false:low level
-	 * @return
-	 */	
-	static uint32_t getU32(const epoll_event &ev, bool high);
-
-	/**
-     * @brief 获取64bit数据
-	 * @brief Get 64 bit data
-	 * @return
-	 */	
-	static uint64_t getU64(const epoll_event &ev);
-
-protected:
-
-	/**
-	 * @brief 控制epoll,将EPOLL设为边缘触发EPOLLET模式 
-	 * @brief Control epoll, set EPOLL to Edge Trigger EPOLLET mode
-     * @param fd    句柄,在create函数时被赋值
-	 * @param fd    Handle, assigned when creating function
-     * @param data  辅助的数据, 可以后续在epoll_event中获取到
-	 * @param data  auxiliary data that can be obtained in epoll_event subsequently
-     * @param event 需要监听的事件
-	 * @param event the event to be listened
-	 * @param op    EPOLL_CTL_ADD: 注册新的fd到epfd中; 
-	 * 			    EPOLL_CTL_MOD:修改已经注册的fd的监听事件; 
-	 * 			    EPOLL_CTL_DEL:从epfd中删除一个fd; 
-	 * @param op    EPOLL_CTL_ADD:Register new FD into EPFD
-	 * 				EPOLL_CTL_MOD:Modify the monitoring events for registered fd
-	 * 				EPOLL_CTL_DEL:Delete an FD from epfd
-	 *  
-	 */
- 	int ctrl(SOCKET_TYPE fd, uint64_t data, uint32_t events, int op);
-
-protected:
-	/**
-	 * 默认开启ET模式
-	 */
-	bool    _enableET = true;
-
-    /**
-     * 	epoll
-     */
-#if TARGET_PLATFORM_WINDOWS
-	void* _iEpollfd;
-#else
-    int _iEpollfd;
-#endif
-
-	/**
-     * 最大连接数
-	 * The max amount of connections
-	 */
-	int	_max_connections;
-
-	/**
-     * 事件集
-	 * Event Set
-	 */
-	epoll_event *_pevs;
-};
-
-}
-#endif
-
+/**
+ * Tencent is pleased to support the open source community by making Tars available.
+ *
+ * Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
+ *
+ * Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 
+ * in compliance with the License. You may obtain a copy of the License at
+ *
+ * https://opensource.org/licenses/BSD-3-Clause
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed 
+ * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+ * CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+ * specific language governing permissions and limitations under the License.
+ */
+
+#ifndef	__TC_EPOLLER_H_
+#define __TC_EPOLLER_H_
+
+#include "util/tc_platform.h"
+#include "util/tc_socket.h"
+#include "util/tc_timer.h"
+#include "util/tc_logger.h"
+#include <cassert>
+#include <unordered_set>
+#include <unordered_map>
+#include <functional>
+
+#if TARGET_PLATFORM_IOS
+#include "sys/event.h"
+
+const int EPOLLIN = 0x0001;
+const int EPOLLOUT = 0x0004;
+const int EPOLLERR = 0x0008;
+
+typedef kevent64_s epoll_event;
+
+#else
+#include "sys/epoll.h"
+#endif
+
+
+namespace tars
+{
+/////////////////////////////////////////////////
+/** 
+ * @file  tc_epoller.h 
+ * @brief  epoll操作封装类 
+ * @brief  Epoll operation encapsulation class
+ * 使用说明如下:
+ * 1 TC_Epoller协程配合, 即tc_coroutine底层其实有一个epoller对象, 协程的切换都是通过epoller来进行的, 之所以这样设计是为了保证协程中网络IO调用的时候能顺利完成协程的切换
+ * 2 TC_Epoller继承至TC_TimerBase, 即它本身也是一个定时器, 即可以用做timer
+ * 3 TC_Epoller需要调用create来完成初始化
+ * 4 TC_Epoller中有一个子类EpollInfo, 该子类非常重要, 当需要用epoll操控某个句柄是, 你可以通过epoll.createEpollInfo创建对象
+ * 5 拥有EpollInfo对象后, 可以通过它的registerCallback来注册epoll事件以及回调, 当有对应的事件产生时, 会触发回调
+ * 6 registerCallback第二参数表示需要add的事件, 如果业务不需要registerCallback, 可以用epoller的add函数去添加事件
+ * 7 EpollInfo有一个cookie方法, 可以在EpollInfo中存入一个指针和析构函数, 会在EpollInfo析构是调用, 一般用来保证cookie设置的对象有机会被释放到
+ * 8 EpollInfo它通过epoller.createEpollInfo()创建出来后, 需要持有住, 知道句柄被close后, 调用Epoller::releaseEpollInfo来释放掉
+ * 9 TC_Epoller对象的loop方法, 会发起一个epoll wait的事件循环, 会阻塞当前线程
+ * 10 TC_Epoller对象的done方法, 会执行一次epoll wait事件, 如果没有任何事件发生, 则只会等待最后ms毫秒(参数确定)
+ * 11 TC_Epoller对象中的notify方法, 可以主动唤醒epoll wait
+ */
+/////////////////////////////////////////////////
+
+/**
+ * @brief epoller操作类,已经默认采用了EPOLLET方式做触发
+ * @brief epoller operation class, EPOLLET has been used by default for triggering 
+ */
+class TC_Epoller : public TC_TimerBase
+{
+
+public:
+    class EpollInfo : public enable_shared_from_this<EpollInfo>
+	{
+	public:
+		/**
+		 * 构造函数
+		 * @param epoller
+		 * @param fd
+		 */
+		EpollInfo(TC_Epoller* epoller, int fd) : _epoller(epoller), _fd(fd)
+		{
+		}
+
+		/**
+		 * 析构
+		 */
+		~EpollInfo();
+
+		//注意: 返回false, 表示socket有问题, 框架如果发现是false, 则epoller不再监听socket的事件
+		typedef std::function<bool(const shared_ptr<TC_Epoller::EpollInfo> &)> EVENT_CALLBACK;
+
+		/**
+		 * 句柄
+		 * @return
+		 */
+		inline int fd() { return _fd; }
+
+		/**
+		 * 是否有效
+		 * @return
+		 */
+		inline bool valid() { return _fd != INVALID_SOCKET; }
+
+		/**
+		 * 设置cookie和析构器, 可以在EpollInfo析构时调用
+		 * @param p
+		 * @param deconstructor
+		 */
+		inline void cookie(void *p, function<void(void*)> deconstructor = function<void(void*)>()) 
+		{ 
+			_cookie = p; 
+			_deconstructor = deconstructor;
+		}
+
+		/**
+		 * 获取cookie
+		 * @return
+		 */
+		inline void *cookie() { return _cookie; }
+
+		/**
+		 * 通用callback, 只要任何事件来了, 都会回到一次
+		 */
+		void setCallback(const std::function<void(const shared_ptr<TC_Epoller::EpollInfo> &)> &callback) { _callback = callback; }
+
+		/**
+		 * registry event callback
+		 * @param callbacks: <EPOLLIN/EPOLLOUT/EPOLLERR, EVENT_CALLBACK>
+		 * @param events: 需要add的事件, 如果为0, 则不add事件
+		 */
+		void registerCallback(const map<uint32_t, EVENT_CALLBACK> & callbacks, uint32_t events);
+
+		/**
+		 * 清除所有callback
+		 */
+		void clearCallback();
+
+		/**
+		 * @brief 添加监听句柄
+		 *
+		 * @param events
+		 */
+		void add(uint32_t events);
+
+		/**
+		 * @brief 修改句柄事件.
+		 *
+		 * @param events 需要监听的事件EPOLLIN|EPOLLOUT
+		 */
+		void mod(uint32_t events);
+
+		/**
+		 * @brief 删除句柄事件, 会释放EpollInfo, del后, 不能再使用EpollInfo
+		 *
+		 * @param events  通常传0
+		 */
+		void del(uint32_t events);
+
+	protected:
+
+		/**
+		 * this to data
+		 * @return
+		 */
+		inline uint64_t data() { return (uint64_t)this; }
+
+		/**
+		 * 关联的句柄无效了, 释放掉
+		 */
+		void release();
+
+		friend class TC_Epoller;
+
+	protected:
+
+	    /**
+		 * 触发事件
+		 * @param event
+		 * @return true: 事件都正常执行完, false: 事件执行有异常(句柄关闭)
+		 */
+	    bool fireEvent(uint32_t event);
+
+	protected:
+
+		TC_Epoller*				_epoller;
+
+		int						_fd;
+
+		void*					_cookie;
+
+		function<void(void*)>	_deconstructor;
+
+		EVENT_CALLBACK 	        _callbacks[3];
+
+		std::function<void(const shared_ptr<TC_Epoller::EpollInfo> &)> _callback;
+	};
+
+    /**
+     * @brief 通知epoll从wait中醒过来
+     */
+    class NotifyInfo
+    {
+    public:
+
+        /**
+         *
+         */
+        ~NotifyInfo();
+
+        /**
+         * 初始化
+         */
+        void init(TC_Epoller *epoller);
+
+        /**
+         * 获取通知fd
+         */
+        inline int notifyFd() { return _notify.getfd(); }
+
+        /**
+         * 获取epoll info, 该指针不需要自己手工delete, epoller loop or NotifyInfo析构中会释放
+         * @return
+         */
+        inline shared_ptr<EpollInfo> &getEpollInfo() { return _epollInfo; }
+
+    protected:
+        TC_Epoller* _epoller = NULL;
+
+        //
+        shared_ptr<EpollInfo>  _epollInfo;
+
+        //通知fd
+        TC_Socket   _notify;
+    };
+public:
+
+	/**
+	 * @brief 构造函数. 
+	 * @brief Constructor Function
+	 *  
+	 */
+	TC_Epoller();
+
+	/**
+     * @brief 析够函数.
+	 * @brief Destructor
+	 */
+	~TC_Epoller();
+
+	/**
+	 * 设置名称
+	 * @param name, 给测试用, 知道具体是哪个epoll
+	 */
+	void setName(const string &name = "") { _name = name; }
+
+	/**
+	 * @brief 生成epoll句柄. 
+	 * @brief Generate epoll handle.
+	 *  
+     * @param max_connections epoll服务需要支持的最大连接数(Maximum number of connections the epoll service needs to support)
+	 * @param createNotify, if you what call epoller.notify() to wakeup epoll from epoll wait, you should set createNotify to true;
+	 */
+	void create(int max_connections, bool createNotify = true);
+
+	/**
+	 * disable et模式
+	 */
+	void enableET(bool enable) { _enableET = enable; };
+
+	/**
+	 * @brief 释放资源
+	 *  
+     * @param 
+	 */
+	void close();
+
+	/**
+	 * @brief 添加监听句柄, 这种模式不能指定epoll关联的数据(通过EpollInfo)
+	 * 如果需要关联数据, 则需要用create函数
+	 *
+	 * @param fd
+     * @param data   data
+	 * @param events
+	 */
+    void add(SOCKET_TYPE fd, uint64_t data, uint32_t events);
+
+	/**
+	 * @brief 修改句柄事件, 这种模式不能指定epoll关联的数据(通过EpollInfo)
+	 *
+     * @param fd    句柄
+     * @param data   data
+     * @param events 需要监听的事件EPOLLIN|EPOLLOUT
+	 */
+	void mod(SOCKET_TYPE fd, uint64_t data, uint32_t events);
+
+    /**
+     * @brief 删除句柄事件, 这种模式不能指定epoll关联的数据(通过EpollInfo)
+     *
+     * @param data   data
+     * @param fd    句柄
+     */
+    void del(SOCKET_TYPE fd, uint64_t data, uint32_t events);
+
+	/**
+	 * @brief 添加监听句柄, 会创建EpollInfo, 并将EpollInfo设置为当前fd的data.
+	 * EpollInfo被TC_Epoller管理, 连接关闭时自动释放, 使用者需要保持EpollInfo的智能智能指针, 当fd关闭时, 调用releaseEpollInfo
+     * @param fd    句柄
+     * @return EpollInfo
+	 */
+	shared_ptr<EpollInfo> createEpollInfo(SOCKET_TYPE fd);
+
+	/**
+	 * 释放EpollInfo
+	 * @param epollInfo
+	 */
+	void releaseEpollInfo(const shared_ptr<EpollInfo> &epollInfo);
+
+	/**
+	 * @brief 等待时间. 
+	 * @brief wait time
+	 *  
+	 * @param millsecond 毫秒 
+     * @return int       有事件触发的句柄数
+	 * @return int       Number of handles triggered by events
+	 */
+	int wait(int millsecond);
+
+	/**
+     * @brief 获取被触发的事件.
+	 * @brief Get the triggered handle
+	 *
+	 * @return struct epoll_event&被触发的事件
+	 * @return Struct epoll_event& triggered event
+	 */
+	epoll_event& get(int i);
+
+	/**
+	 * 空闲处理
+	 * @param callback
+	 */
+	void idle(std::function<void()> callback) { _idleCallbacks.push_back(callback); }
+
+	/**
+	 * 循环(wait心跳间隔, 默认1000ms)
+	 */
+	void loop(uint64_t ms = 1000);
+
+	/**
+	 * 处理一次(wait心跳间隔, 默认1000ms)
+	 */
+	void done(uint64_t ms = 1000);
+
+	/**
+	 * 通知epoll wait醒过来
+	 * 注意只有针对create时, 第二个参数传入true的epoll对象才有效!
+	 */
+	void notify();
+
+	/**
+	 * 同步调用, 等待epoll的run线程中同步处理函数以后再回调!超级有用的函数!!!
+	 * 注意性能: 中间要分配句柄, 可能性能没那么高!
+	 * @param func
+	 */
+	void syncCallback(const std::function<void()> &func, int64_t millseconds = -1);
+
+	/**
+	 * 异步调用, func丢给epoll的run线程中处理函数
+	 * 注意性能: 中间要分配句柄, 可能性能没那么高!
+	 * @param func
+	 */
+	void asyncCallback(const std::function<void()>& func);
+
+	/**
+	 * 退出循环
+	 */
+	void terminate();
+
+	/**
+	 * 重置状态, 不在是terminate的
+	 */
+	void reset();
+
+	/**
+	 * 是否结束
+	 * @return
+	 */
+	bool isTerminate() const { return _terminate; }
+
+	/**
+     * @brief 是否有读事件
+	 * @brief whether it have event to read
+	 *
+	 * @return
+	 */		
+	static bool readEvent(const epoll_event &ev);
+
+	/**
+     * @brief 是否有写事件
+	 * @brief whether it have event to write
+	 *
+	 * @return
+	 */		
+	static bool writeEvent(const epoll_event &ev);
+
+	/**
+     * @brief 是否有异常事件
+	 * @brief whether it have eception event
+	 *
+	 * @return
+	 */		
+	static bool errorEvent(const epoll_event &ev);
+
+	/**
+     * @brief 获取低位/高位数据
+	 * @brief Get low/high bit data
+	 * @param high: true:高位, false:低位
+	 * @param high: true:high level, false:low level
+	 * @return
+	 */	
+	static uint32_t getU32(const epoll_event &ev, bool high);
+
+	/**
+     * @brief 获取64bit数据
+	 * @brief Get 64 bit data
+	 * @return
+	 */	
+	static uint64_t getU64(const epoll_event &ev);
+
+protected:
+
+	/**
+	 * @brief 控制epoll,将EPOLL设为边缘触发EPOLLET模式 
+	 * @brief Control epoll, set EPOLL to Edge Trigger EPOLLET mode
+     * @param fd    句柄,在create函数时被赋值
+	 * @param fd    Handle, assigned when creating function
+     * @param data  辅助的数据, 可以后续在epoll_event中获取到
+	 * @param data  auxiliary data that can be obtained in epoll_event subsequently
+     * @param event 需要监听的事件
+	 * @param event the event to be listened
+	 * @param op    EPOLL_CTL_ADD: 注册新的fd到epfd中; 
+	 * 			    EPOLL_CTL_MOD:修改已经注册的fd的监听事件; 
+	 * 			    EPOLL_CTL_DEL:从epfd中删除一个fd; 
+	 * @param op    EPOLL_CTL_ADD:Register new FD into EPFD
+	 * 				EPOLL_CTL_MOD:Modify the monitoring events for registered fd
+	 * 				EPOLL_CTL_DEL:Delete an FD from epfd
+	 *  
+	 */
+	int ctrl(SOCKET_TYPE fd, uint64_t data, uint32_t events, int op);
+
+ 	/**
+ 	 * 执行定时时间
+ 	 * @param func
+ 	 */
+	virtual void onFireEvent(std::function<void()> func);
+
+	/**
+	 * 增加了一个最近的定时器, 需要触发wait唤醒, 等到到最新的时间上
+	 */
+	virtual void onAddTimer();
+
+	friend class EpollInfo;
+
+protected:
+	/**
+	 * epoll名称, 主要给debug使用
+	 */
+	string  _name;
+	/**
+	 * 是否退出循环
+	 */
+	bool    _terminate = false;
+
+	/**
+	 * 默认开启ET模式
+	 */
+	bool    _enableET = true;
+
+	/**
+	 * 通知循环退出
+	 */
+	NotifyInfo*  _notify = NULL;
+
+    /**
+     * 	epoll
+     */
+#if TARGET_PLATFORM_WINDOWS
+	void* _iEpollfd;
+#else
+    int _iEpollfd;
+#endif
+
+	/**
+     * 最大连接数
+	 * The max amount of connections
+	 */
+	int	_max_connections;
+
+	/**
+     * 事件集
+	 * Event Set
+	 */
+	epoll_event *_pevs;
+
+	/**
+	 * 空闲处理
+	 */
+	vector<std::function<void()>> _idleCallbacks;
+};
+
+}
+#endif
+

+ 7 - 0
util/include/util/tc_ex.h

@@ -106,6 +106,13 @@ public:
      */
     static int getSystemCode();
 
+    /**
+     * @brief 获取系统错误码(linux是errno, windows是GetLastError)
+     *
+     * @return 获取系统错误描述
+     */
+    static string getSystemError();
+
 private:
     void getBacktrace();
 

+ 29 - 2
util/include/util/tc_http.h

@@ -712,7 +712,7 @@ public:
 	 * get headers
 	 * @param header
 	 */
-	void getHeaders(map<string, string> &header);
+	void getHeaders(map<string, string> &header) const;
 
      /**
       * @brief 重置
@@ -1075,6 +1075,27 @@ public:
      */
 	bool incrementDecode(TC_NetWorkBuffer &buff);
 
+	/**
+	 * @brief 增量decode,输入的buffer会自动在解析过程中被清除掉,
+	 * 增量decode之前必须reset,
+	 * (网络接收的buffer直接添加到sBuffer里面即可, 然后增量解析)
+	 * (能够解析的数据TC_HttpResponse会自动从sBuffer里面消除,直到网络接收完毕或者解析返回true)
+	 * @brief Incremental decode, input buffers are automatically cleared during parsing.
+	 * Must reset before incremental decode,
+	 * (The buffers received by the network are added directly to the sBuffer and then incrementally resolved)
+	 * (The resolvable data TC_HttpResponse is automatically eliminated from the sBuffer until the network has received it or the resolving returns true)
+	 * @param buffer
+	 * @throws TC_HttpResponse_Exception, 不支持的http协议, 抛出异常
+	 * @throws TC_HttpResponse_Exception, unsupported http protocol, throwing exception
+	 * @return true:解析出一个完整的buffer
+	 *        false:还需要继续解析,如果服务器主动关闭连接的模式下
+	 *        , 也可能不需要再解析了
+	 * @return true: resolves a complete buffer
+	 *         false: You also need to continue parsing if the server actively closes the connection in mode
+	 *         Or you may not need to parse any more
+	 */
+	bool incrementDecode(TC_NetWorkBuffer::Buffer &buff);
+
     /**
      * @brief 解析http应答(采用string方式) ,
      * 注意:如果http头部没有Content-Length且非chunk模式, 则返回true
@@ -1360,7 +1381,7 @@ public:
     TC_HttpRequest()
     {
         TC_HttpRequest::reset();
-        setUserAgent("Tars-Http");
+        //setUserAgent("Tars-Http");
     }
 
     /**
@@ -1487,6 +1508,12 @@ public:
      */
     void encode(vector<char> &buffer);
 
+    /**
+     * encode buffer to TC_NetWorkBuffer
+     * @param buff
+     */
+	void encode(shared_ptr<TC_NetWorkBuffer::Buffer>& buff) ;
+
     /**
      * encode buffer to TC_NetWorkBuffer
      * @param buff

+ 501 - 523
util/include/util/tc_http_async.h

@@ -1,523 +1,501 @@
-/**
- * Tencent is pleased to support the open source community by making Tars available.
- *
- * Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
- *
- * Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 
- * in compliance with the License. You may obtain a copy of the License at
- *
- * https://opensource.org/licenses/BSD-3-Clause
- *
- * Unless required by applicable law or agreed to in writing, software distributed 
- * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
- * CONDITIONS OF ANY KIND, either express or implied. See the License for the 
- * specific language governing permissions and limitations under the License.
- */
-
-#ifndef __TC_HTTP_ASYNC_H_
-#define __TC_HTTP_ASYNC_H_
-
-#include <functional>
-#include "util/tc_platform.h"
-#include "util/tc_thread_pool.h"
-#include "util/tc_network_buffer.h"
-#include "util/tc_http.h"
-#include "util/tc_autoptr.h"
-#include "util/tc_socket.h"
-
-namespace tars
-{
-
-/////////////////////////////////////////////////
-/**
-* @file tc_http_async.h
-* @brief http异步调用类.
-* @brief HTTP asynchronous call class.
-*
-* http同步调用使用TC_HttpRequest::doRequest就可以了
-* 代码示例请参考example_http_async.cpp
-* 说明:
-*     1 背后会启动唯一的网络线程
-*     2 目前只支持http短连接
-*     3 RequestCallback回调里面, onSucc和onFailed是对应的, 每次异步请求, onSucc/onFailed其中之一会被唯一响应
-* Synchronized HTTP calls using TC_HttpRequest:: doRequest is OK
-* See example_for code examplesHttp_Async.cpp
-* Explanation:
-*     1 the only network thread will be launched behind it
-*     2 Only short HTTP connections are currently supported
-*     3 In the RequestCallback callback, onSucc and onFailed correspond. Each asynchronous request, one of onSucc/onFailed is uniquely responded to.
-* @author ruanshudong@qq.com
-*/
-/////////////////////////////////////////////////
-
-/**
- * @brief 异步线程处理类.
- * @brief Asynchronous Thread Processing Class
- */
-class TC_HttpAsync : public TC_Thread, public TC_ThreadLock
-{
-public:
-    /**
-     * @brief 异步请求回调对象
-     * @brief Asynchronous request callback object
-     * onSucc, 收到成功回包时响应
-     * onSucc, Response when a successful return is received
-     * onFailed, 失败时响应
-     * onFailed, Response on Failure
-     * onSucc和onFailed是成对出现的, 且一次请求, 只会一个被响应, 且只响应一次
-     * OnSucc and onFailed occur in pairs and only one request is responded to, and only once.
-     * onFailed被调用时, 链接就会被关闭掉
-     * The link is closed when onFailed is called, 
-     */
-    class RequestCallback : public TC_HandleBase
-    {
-    public:
-        /**
-        * 错误码
-        * Error Code
-        */
-        enum FAILED_CODE
-        {
-            /*Network Error*/
-            Failed_Net     = 0x01,      //网络出错
-            /*Error Connecting to Server*/
-            Failed_Connect = 0x02,      //连接服务器出错
-            /*overtime*/
-            Failed_Timeout = 0x03,      //超时
-            /*Interrupt receiving data*/
-            Failed_Interrupt = 0x04,    //中断接收数据
-            /*The server actively closed the link*/
-            Failed_Close    = 0x05,     //服务器主动关闭了链接
-            /*Link Timeout*/
-            Failed_ConnectTimeout = 0x06, //链接超时
-        };
-
-        /**
-         * @brief 每次收到数据且http头收全了都会调用,
-         * stHttpResponse的数据可能不是完全的http响应数据 ,只有部分body数据
-         * @brief Called every time data is received and the HTTP header is fully received,
-         * the stHttpResponse data may not be the complete HTTP response data, only part of the body data
-         * @param stHttpResponse  收到的http数据
-         * @param stHttpResponse  HTTP data received
-         * @return                true:继续收取数据, false:不收取数据了(会触发onFailed, Failed_Interrupt)
-         * @return                True: Continue collecting data, false: No data will be collected (will trigger onFailed, Failed_Interrupt)
-         */
-        virtual bool onContinue(TC_HttpResponse &stHttpResponse) { return true; }
-
-        /**
-         * @brief 完整的响应回来了.
-         * @brief Full response back
-         *
-         * @param stHttpResponse  http响应包
-         * @param stHttpResponse  HTTP response package
-         */
-        virtual void onSucc(TC_HttpResponse &stHttpResponse) = 0;
-
-        /**
-         * @brief 异常, 发生异常时, onClose也会被调用, 连接会被关闭掉
-         * @brief Exception, when an exception occurs, onClose is also called and the connection is closed
-         *
-         * @param ret, 错误码
-         * @param ret  error code
-         * @param info 异常原因
-         * @param info Exception Reason
-         */
-        virtual void onFailed(FAILED_CODE ret, const string &info) = 0;
-
-        /**
-         * @brief 连接被关闭
-         * @brief Connection closed
-         */
-        virtual void onClose() {};
-    };
-
-    typedef TC_AutoPtr<RequestCallback> RequestCallbackPtr;
-
-protected:
-    /**
-     * @brief 异步http请求(短连接)
-     * @brief Asynchronous HTTP requests (short connections)
-     */
-    class AsyncRequest : public TC_HandleBase
-    {
-    public:
-        /**
-         * @brief 构造.
-         * @brief Constructor
-         *
-         * @param stHttpRequest
-         * @param callbackPtr
-         */
-        AsyncRequest(TC_HttpRequest &stHttpRequest, RequestCallbackPtr &callbackPtr, bool bUseProxy);
-
-        /**
-         * @brief 构造.
-         * @brief Constructor
-         *
-         * @param stHttpRequest
-         * @param callbackPtr
-         * @param addr
-         */
-        AsyncRequest(TC_HttpRequest &stHttpRequest, RequestCallbackPtr &callbackPtr, const string &addr);
-
-        /**
-         * @brief 析构
-         * @brief Destructor
-         */
-        ~AsyncRequest();
-
-        /**
-         * @brief 获取句柄
-         * @brief Get Handle
-         *
-         * @return int
-         */
-        int getfd() const { return _fd.getfd(); }
-
-        /**
-         * @brief 发起建立连接.
-         * @brief Initiate Connection Establishment
-         *
-         */
-        void doConnect();
-
-        /**
-         * @brief 获取系统错误提示
-         * @brief Get System Error Tips
-         * @return
-         */
-        string getError(const string &sDefault) const;
-
-        /**
-        * @brief 发生异常
-        * @brief exception occurred
-        */
-        void doException(RequestCallback::FAILED_CODE ret, const string &e);
-
-        /**
-         * @brief 发送请求
-         * @brief Send Request
-         */
-        void doRequest();
-
-        /**
-         * @brief 接收响应
-         * @brief Receive Response
-         */
-        void doReceive();
-
-        /**
-         * @brief 关闭连接
-         * @brief Close Connection
-         */
-        void doClose();
-
-        /**
-         * @brief 超时
-         * @brief overtime
-         */
-        void timeout();
-
-        /**
-         * @brief 设置唯一ID.
-         * @brief Set Unique ID.
-         *
-         * @param uniqId
-         */
-        void setUniqId(uint32_t uniqId)    { _iUniqId = uniqId;}
-
-        /**
-         * @brief 获取唯一ID.
-         * @brief Get Unique ID.
-         *
-         * @return uint32_t
-         */
-        uint32_t getUniqId() const         { return _iUniqId; }
-
-        /**
-         * @brief 设置处理请求的http异步线程.
-         * @brief Set HTTP asynchronous thread for processing requests.
-         *
-         * @param pHttpAsync :异步线程处理对象
-         * @param pHttpAsync : Asynchronous threading processes objects 
-         */
-        void setHttpAsync(TC_HttpAsync *pHttpAsync) { _pHttpAsync = pHttpAsync; }
-
-        /**
-         * @brief 设置发网络请求时绑定的ip地址.
-         * @brief Set the IP address to bind when sending network requests.
-         *
-         * @param addr
-         */
-        void setBindAddr(const struct sockaddr* addr);
-
-        /**
-         * @brief 链接是否有效
-         * @brief Is the link valid
-         */
-        bool isValid() const { return _fd.isValid(); }
-
-        /**
-         * @brief 是否链接上
-         * @brief Is it linked
-         * @return [description]
-         */
-        bool hasConnected() const { return _isConnected; }
-
-        /**
-         * @brief 设置链接状态
-         * @brief Set Link State
-         * @param isConnected [description]
-         */
-        void setConnected(bool isConnected) { _isConnected = isConnected; }
-
-        /**
-         * 处理网络事件
-         * Handling network events
-         * @param events [description]
-         */
-        void processNet(const epoll_event &ev);
-
-        /**
-         * 处理通知事件
-         * Handle notification events
-         */
-        void processNotify();
-
-    protected:
-        /**
-         * @brief 接收请求.
-         * @brief Receive requests.
-         *
-         * @param buf
-         * @param len
-         * @param flag
-         * @return int
-         */
-        int recv(void* buf, uint32_t len, uint32_t flag);
-
-        /**
-         * @brief 发送请求.
-         * @brief the sent request
-         *
-         * @param buf 发送内容
-         * @param buf the sent content
-         * @param len 发送长度
-         * @param len the sent length
-         * @param flag
-         * @return int
-         */
-        int send(const void* buf, uint32_t len, uint32_t flag);
-
-    protected:
-        TC_HttpAsync               *_pHttpAsync;
-        TC_HttpResponse             _stHttpResp;
-        TC_Socket                   _fd;
-        string                      _sHost;
-        uint32_t                    _iPort;
-        uint32_t                    _iUniqId;
-        TC_NetWorkBuffer            _sendBuffer;
-	    TC_NetWorkBuffer            _recvBuffer;
-        RequestCallbackPtr          _callbackPtr;
-        bool                        _bindAddrSet;
-        struct sockaddr             _bindAddr;
-        bool                        _bUseProxy;
-        bool                        _isConnected;
-    };
-
-    typedef TC_AutoPtr<AsyncRequest> AsyncRequestPtr;
-
-public:
-
-    typedef TC_TimeoutQueue<AsyncRequestPtr> http_queue_type;
-
-    /**
-     * @brief 构造函数
-     * @brief Constructor
-     */
-    TC_HttpAsync();
-
-    /**
-     * @brief 析构函数
-     * @brief Destructor
-     */
-    ~TC_HttpAsync();
-
-    /**
-     * @brief 异步发起请求.
-     * @brief Asynchronous Initiation of Requests
-     *
-     * @param stHttpRequest
-     * @param httpCallbackPtr
-     * @param bUseProxy,是否使用代理方式连接
-     * @param bUseProxy Whether to use proxy connection
-     */
-    void doAsyncRequest(TC_HttpRequest &stHttpRequest, RequestCallbackPtr &callbackPtr, bool bUseProxy = false);
-
-    /**
-     * @brief 异步发起请求.
-     * @brief Asynchronous Initiation of Requests
-     *
-     * @param stHttpRequest
-     * @param httpCallbackPtr
-     * @param addr, 请求地址, ip:port
-     * @param addr  Request Address, ip;port
-     */
-    void doAsyncRequest(TC_HttpRequest &stHttpRequest, RequestCallbackPtr &callbackPtr, const string &addr);
-
-    /**
-     * @brief 设置proxy地址
-     * @brief Set proxy address
-     *
-     */
-    int setProxyAddr(const char* Host, uint16_t Port);
-
-    /**
-     * @brief 设置代理的地址.
-     * @brief Set the address of the agent.
-     *
-     * 不通过域名解析发送,直接发送到代理服务器的ip地址
-     * Send directly to the IP address of the proxy server without domain name resolution
-     * @param sProxyAddr 格式 192.168.1.2:2345 或者 sslproxy.qq.com:2345
-     * @param sProxyAddr format : 192.168.1.2:2345 or sslproxy.qq.com:2345
-     */
-    int setProxyAddr(const char* sProxyAddr);
-
-    /**
-     * @brief 设置绑定的地址.
-     * @brief Set Binding Address.
-     *
-     * @param sProxyAddr 格式 192.168.1.2
-     * @param sProxyAddr format: 192.168.1.2
-     */
-    int setBindAddr(const char* sBindAddr);
-
-    /**
-     * @brief 设置绑定的地址.
-     * @brief Set Binding Address.
-     *
-     * @param addr 直接用 addr 赋值
-     * @param addr Assigning values directly with addr
-     */
-    void setProxyAddr(const struct sockaddr* addr);
-
-    /**
-     * @brief 获取代理地址, 设置代理地址后才有效
-     * @brief Get proxy address, set proxy address before valid
-     * @return [description]
-     */
-    const struct sockaddr* getProxyAddr() const { return &_proxyAddr; }
-
-    /**
-     * @brief 启动异步处理.
-     * @brief Start asynchronous processing
-     *
-     * 参数已经无效(网络层有且只有一个线程)
-     * Parameters are no longer valid (network layer has one and only one thread)
-     * @param num, 异步处理的线程数
-     * @param num  Number of threads processed asynchronously
-     */
-    void start();
-
-    /**
-     * @brief 设置超时(所有请求都只能用一种超时时间).
-     * @brief Set timeout (all requests can only use one timeout)
-     *
-     * @param timeout: 毫秒, 但是具体的超时精度只能在s左右
-     * @param timeout: Milliseconds, but the exact timeout precision can only be around s
-     */
-    void setTimeout(int millsecond) { _data->setTimeout(millsecond); }
-
-    /**
-     * @brief 等待请求全部结束(等待毫秒精度在100ms左右).
-     * @brief Wait for all requests to end (millisecond precision around 100ms).
-     *
-     * @param millsecond, 毫秒 -1表示永远等待
-     * @param millsecond Milliseconds-1 means wait forever
-     */
-    void waitForAllDone(int millsecond = -1);
-
-    /**
-     * @brief 结束线程
-     * @brief End Thread
-     */
-    void terminate();
-
-protected:
-
-    // typedef TC_Functor<void, TL::TLMaker<AsyncRequestPtr, int>::Result> async_process_type;
-    typedef std::function<void(AsyncRequestPtr, int)> async_process_type;
-
-    /**
-     * @brief 超时处理.
-     * @brief Timeout handler.
-     *
-     * @param ptr
-     */
-    static void timeout(AsyncRequestPtr& ptr);
-
-    /**
-     * @brief 确保线程
-     * @brief Ensure Threads
-     * @param _threadId [description]
-     */
-    void assertThreadId() { assert(_threadId == std::this_thread::get_id()); }
-
-    /**
-     * @brief 具体的网络处理逻辑
-     * @brief Specific network processing logic
-     */
-    void run() ;
-
-    /**
-     * @brief 删除异步请求对象
-     * @brief Delete Asynchronous Request Object
-     */
-    void erase(uint32_t uniqId);
-
-    /**
-     * @brief 监控链接
-     * @brief Monitoring Links
-     * @param fd     [description]
-     * @param uniqId [description]
-     * @param events [description]
-     */
-    void addConnection(int fd, uint32_t uniqId, uint32_t events);
-
-    /**
-     * @brief 删除链接
-     * @brief Delete Link
-     * @param fd     [description]
-     * @param events [description]
-     */
-    void delConnection(int fd);
-
-    friend class AsyncRequest;
-
-protected:
-    std::thread::id              _threadId;
-
-    TC_ThreadPool               _tpool;
-
-    http_queue_type             *_data;
-
-    TC_Epoller                  _epoller;
-
-    TC_Epoller::NotifyInfo      _notify;
-
-	std::mutex                  _mutex;
-
-	deque<uint64_t>             _events;
-
-    bool                        _terminate;
-
-    struct sockaddr             _proxyAddr;
-
-    struct sockaddr             _bindAddr;
-
-    bool                        _bindAddrSet;
-};
-
-}
-#endif
-
+/**
+ * Tencent is pleased to support the open source community by making Tars available.
+ *
+ * Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
+ *
+ * Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 
+ * in compliance with the License. You may obtain a copy of the License at
+ *
+ * https://opensource.org/licenses/BSD-3-Clause
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed 
+ * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+ * CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+ * specific language governing permissions and limitations under the License.
+ */
+
+#ifndef __TC_HTTP_ASYNC_H_
+#define __TC_HTTP_ASYNC_H_
+
+#include <functional>
+#include "util/tc_platform.h"
+#include "util/tc_thread_pool.h"
+#include "util/tc_network_buffer.h"
+#include "util/tc_http.h"
+#include "util/tc_autoptr.h"
+#include "util/tc_socket.h"
+#include "util/tc_transceiver.h"
+#include "util/tc_openssl.h"
+
+namespace tars
+{
+
+/////////////////////////////////////////////////
+/**
+* @file tc_http_async.h
+* @brief http异步调用类.
+* @brief HTTP asynchronous call class.
+*
+* http同步调用使用TC_HttpRequest::doRequest就可以了
+* 说明:
+*     1 背后会启动唯一的网络线程
+*     2 目前只支持http短连接
+*     3 RequestCallback回调里面, onSucc和onFailed是对应的, 每次异步请求, onSucc/onFailed其中之一会被唯一响应
+*     4 支持https
+* Synchronized HTTP calls using TC_HttpRequest:: doRequest is OK
+* See example_for code examplesHttp_Async.cpp
+* Explanation:
+*     1 the only network thread will be launched behind it
+*     2 Only short HTTP connections are currently supported
+*     3 In the RequestCallback callback, onSucc and onFailed correspond. Each asynchronous request, one of onSucc/onFailed is uniquely responded to.
+*     4 support https
+* @author ruanshudong@qq.com
+*/
+/////////////////////////////////////////////////
+
+/**
+* @brief socket异常类
+*/
+struct TC_HttpAsync_Exception : public TC_Exception
+{
+    TC_HttpAsync_Exception(const string &buffer) : TC_Exception(buffer) {};
+    ~TC_HttpAsync_Exception() throw() {};
+};
+
+/**
+ * @brief 异步线程处理类.
+ * @brief Asynchronous Thread Processing Class
+ */
+class TC_HttpAsync : public TC_Thread, public TC_ThreadLock
+{
+public:
+    /**
+     * @brief 异步请求回调对象
+     * @brief Asynchronous request callback object
+     * onSucc, 收到成功回包时响应
+     * onSucc, Response when a successful return is received
+     * onFailed, 失败时响应
+     * onFailed, Response on Failure
+     * onSucc和onFailed是成对出现的, 且一次请求, 只会一个被响应, 且只响应一次
+     * OnSucc and onFailed occur in pairs and only one request is responded to, and only once.
+     * onFailed被调用时, 链接就会被关闭掉
+     * The link is closed when onFailed is called, 
+     */
+    class RequestCallback : public TC_HandleBase
+    {
+    public:
+        /**
+        * 错误码
+        * Error Code
+        */
+        enum FAILED_CODE
+        {
+            /*Network Error*/
+            Failed_Net     = 0x01,      //网络出错
+            /*Error Connecting to Server*/
+            Failed_Connect = 0x02,      //连接服务器出错
+            /*overtime*/
+            Failed_Timeout = 0x03,      //超时
+            /*Interrupt receiving data*/
+            Failed_Interrupt = 0x04,    //中断接收数据
+            /*The server actively closed the link*/
+            Failed_Close    = 0x05,     //服务器主动关闭了链接
+            /*Link Timeout*/
+            Failed_ConnectTimeout = 0x06, //链接超时
+            Failed_Request  = 0x07,      //发送出错
+        };
+
+        /**
+         * @brief 每次收到数据且http头收全了都会调用,
+         * stHttpResponse的数据可能不是完全的http响应数据 ,只有部分body数据
+         * @brief Called every time data is received and the HTTP header is fully received,
+         * the stHttpResponse data may not be the complete HTTP response data, only part of the body data
+         * @param stHttpResponse  收到的http数据
+         * @param stHttpResponse  HTTP data received
+         * @return                true:继续收取数据, false:不收取数据了(会触发onFailed, Failed_Interrupt)
+         * @return                True: Continue collecting data, false: No data will be collected (will trigger onFailed, Failed_Interrupt)
+         */
+        virtual bool onContinue(TC_HttpResponse &stHttpResponse) { return true; }
+
+        /**
+         * @brief 完整的响应回来了.
+         * @brief Full response back
+         *
+         * @param stHttpResponse  http响应包
+         * @param stHttpResponse  HTTP response package
+         */
+        virtual void onSucc(TC_HttpResponse &stHttpResponse) = 0;
+
+        /**
+         * @brief 异常, 发生异常时, onClose也会被调用, 连接会被关闭掉
+         * @brief Exception, when an exception occurs, onClose is also called and the connection is closed
+         *
+         * @param ret, 错误码
+         * @param ret  error code
+         * @param info 异常原因
+         * @param info Exception Reason
+         */
+        virtual void onFailed(FAILED_CODE ret, const string &info) = 0;
+
+        /**
+         * @brief 连接被关闭
+         * @brief Connection closed
+         */
+        virtual void onClose() {};
+    };
+
+    typedef TC_AutoPtr<RequestCallback> RequestCallbackPtr;
+
+protected:
+    /**
+     * @brief 异步http请求(短连接)
+     * @brief Asynchronous HTTP requests (short connections)
+     */
+    class AsyncRequest : public TC_HandleBase
+    {
+    public:
+        AsyncRequest() {}
+
+        /**
+         * @brief 构造.
+         * @brief Constructor
+         *
+         * @param stHttpRequest
+         * @param callbackPtr
+         */
+        ~AsyncRequest();
+
+        /**
+         * 初始化
+         * @param stHttpRequest
+         * @param callbackPtr
+         * @param ep
+         */
+        void initialize(TC_Epoller *epoller, const TC_Endpoint &ep, TC_HttpRequest &stHttpRequest, RequestCallbackPtr &callbackPtr);
+
+        /**
+         * @brief 获取句柄
+         * @brief Get Handle
+         *
+         * @return int
+         */
+        int getfd() const { return _trans->fd(); }
+
+        /**
+         * @brief 获取系统错误提示
+         * @brief Get System Error Tips
+         * @return
+         */
+        string getError(const string &sDefault) const;
+
+        /**
+        * @brief 发生异常
+        * @brief exception occurred
+        */
+        void doException(RequestCallback::FAILED_CODE ret, const string &e);
+
+        /**
+         * @brief 发送请求
+         * @brief Send Request
+         */
+        void doRequest();
+
+        /**
+         * @brief 接收响应
+         * @brief Receive Response
+         */
+        void doReceive();
+
+        /**
+         * @brief 关闭连接
+         * @brief Close Connection
+         */
+        void doClose();
+
+        /**
+         * @brief 超时
+         * @brief overtime
+         */
+        void timeout();
+
+        /**
+         * @brief 设置唯一ID.
+         * @brief Set Unique ID.
+         *
+         * @param uniqId
+         */
+        void setUniqId(uint32_t uniqId)    { _iUniqId = uniqId;}
+
+        /**
+         * @brief 获取唯一ID.
+         * @brief Get Unique ID.
+         *
+         * @return uint32_t
+         */
+        uint32_t getUniqId() const         { return _iUniqId; }
+
+        /**
+         * @brief 设置处理请求的http异步线程.
+         * @brief Set HTTP asynchronous thread for processing requests.
+         *
+         * @param pHttpAsync :异步线程处理对象
+         * @param pHttpAsync : Asynchronous threading processes objects 
+         */
+        void setHttpAsync(TC_HttpAsync *pHttpAsync) { _pHttpAsync = pHttpAsync; }
+
+        /**
+         * @brief 设置发网络请求时绑定的ip地址.
+         * @brief Set the IP address to bind when sending network requests.
+         *
+         * @param addr
+         */
+        void setBindAddr(const TC_Socket::addr_type &bindAddr);
+
+        /**
+         * @brief 链接是否有效
+         * @brief Is the link valid
+         */
+        bool isValid() const { return _trans->isValid(); }
+
+        /**
+         * @brief 是否链接上
+         * @brief Is it linked
+         * @return [description]
+         */
+        bool hasConnected() const { return _trans->hasConnected(); }
+
+        /**
+         *
+         * @return
+         */
+        TC_Transceiver *trans() { return _trans.get(); }
+
+    protected:
+        shared_ptr<TC_ProxyInfo> onCreateCallback(TC_Transceiver* trans);
+        std::shared_ptr<TC_OpenSSL> onOpensslCallback(TC_Transceiver* trans);
+        void onCloseCallback(TC_Transceiver* trans);
+        void onConnectCallback(TC_Transceiver* trans);
+        void onRequestCallback(TC_Transceiver* trans);
+        TC_NetWorkBuffer::PACKET_TYPE onParserCallback(TC_NetWorkBuffer& buff, TC_Transceiver* trans);
+
+        friend class TC_HttpAsync;
+
+    protected:
+        TC_HttpAsync               *_pHttpAsync;
+        TC_HttpResponse             _stHttpResp;
+        uint32_t                    _iUniqId;
+        RequestCallbackPtr          _callbackPtr;
+        unique_ptr<TC_Transceiver>  _trans;
+        std::shared_ptr<TC_NetWorkBuffer::Buffer> _buff;
+	    shared_ptr<TC_OpenSSL::CTX> _ctx;
+    };
+
+    typedef TC_AutoPtr<AsyncRequest> AsyncRequestPtr;
+
+public:
+
+    typedef TC_TimeoutQueue<AsyncRequestPtr> http_queue_type;
+
+    /**
+     * @brief 构造函数
+     * @brief Constructor
+     */
+    TC_HttpAsync();
+
+    /**
+     * @brief 析构函数
+     * @brief Destructor
+     */
+    ~TC_HttpAsync();
+
+    /**
+     * 发送异步请求, 发现指定的TC_Endpoint地址
+     * @param stHttpRequest
+     * @param callbackPtr
+     * @param ep
+     */
+    void doAsyncRequest(TC_HttpRequest &stHttpRequest, RequestCallbackPtr &callbackPtr, const TC_Endpoint &ep);
+
+    /**
+     * @brief 异步发起请求(可以使用代理, 否则使用URL地址去发送)
+     * @brief Asynchronous Initiation of Requests
+     *
+     * @param stHttpRequest
+     * @param httpCallbackPtr
+     * @param bUseProxy,是否使用代理方式连接
+     * @param bUseProxy Whether to use proxy connection
+     */
+    void doAsyncRequest(TC_HttpRequest &stHttpRequest, RequestCallbackPtr &callbackPtr, bool bUseProxy = false);
+
+    /**
+     * @brief 异步发起请求, 发送到指定addr的地址.
+     * @brief Asynchronous Initiation of Requests
+     *
+     * @param stHttpRequest
+     * @param httpCallbackPtr
+     * @param addr, 请求地址, ip:port
+     * @param addr  Request Address, ip;port
+     */
+    void doAsyncRequest(TC_HttpRequest &stHttpRequest, RequestCallbackPtr &callbackPtr, const string &addr);
+
+	/**
+	 * 设置ctx
+	 * @param ctx
+	 */
+	void setCtx(const shared_ptr<TC_OpenSSL::CTX> &ctx) { _ctx = ctx; }
+
+	/**
+	 * get ctx
+	 * @return
+	 */
+	shared_ptr<TC_OpenSSL::CTX> getCtx() { return _ctx; }
+
+    /**
+     * set proxy addr
+     * @param ep
+     */
+    void setProxyAddr(const TC_Endpoint &ep);
+
+    /**
+     * @brief 设置proxy地址
+     * @brief Set proxy address
+     *
+     */
+    void setProxyAddr(const char* Host, uint16_t Port);
+
+    /**
+     * @brief 设置代理的地址.
+     * @brief Set the address of the agent.
+     *
+     * 不通过域名解析发送,直接发送到代理服务器的ip地址
+     * Send directly to the IP address of the proxy server without domain name resolution
+     * @param sProxyAddr 格式 192.168.1.2:2345 或者 sslproxy.qq.com:2345
+     * @param sProxyAddr format : 192.168.1.2:2345 or sslproxy.qq.com:2345
+     */
+    void setProxyAddr(const char* sProxyAddr);
+
+    /**
+     * @brief 设置绑定的地址.
+     * @brief Set Binding Address.
+     *
+     * @param sProxyAddr 格式 192.168.1.2
+     * @param sProxyAddr format: 192.168.1.2
+     */
+    void setBindAddr(const char* sBindAddr);
+
+//    /**
+//     * @brief 设置绑定的地址.
+//     * @brief Set Binding Address.
+//     *
+//     * @param addr 直接用 addr 赋值
+//     * @param addr Assigning values directly with addr
+//     */
+//    void setProxyAddr(const struct sockaddr* addr);
+//
+//    /**
+//     * @brief 获取代理地址, 设置代理地址后才有效
+//     * @brief Get proxy address, set proxy address before valid
+//     * @return [description]
+//     */
+//    const struct sockaddr* getProxyAddr() const { return &_proxyAddr; }
+
+    /**
+     * @brief 启动异步处理.
+     * @brief Start asynchronous processing
+     *
+     * 参数已经无效(网络层有且只有一个线程)
+     * Parameters are no longer valid (network layer has one and only one thread)
+     * @param num, 异步处理的线程数
+     * @param num  Number of threads processed asynchronously
+     */
+    void start();
+
+    /**
+     * @brief 设置超时(所有请求都只能用一种超时时间).
+     * @brief Set timeout (all requests can only use one timeout)
+     *
+     * @param timeout: 毫秒, 但是具体的超时精度只能在s左右
+     * @param timeout: Milliseconds, but the exact timeout precision can only be around s
+     */
+    void setTimeout(int millsecond) { _data->setTimeout(millsecond); }
+
+    /**
+     * @brief 等待请求全部结束(等待毫秒精度在100ms左右).
+     * @brief Wait for all requests to end (millisecond precision around 100ms).
+     *
+     * @param millsecond, 毫秒 -1表示永远等待
+     * @param millsecond Milliseconds-1 means wait forever
+     */
+    void waitForAllDone(int millsecond = -1);
+
+    /**
+     * @brief 结束线程
+     * @brief End Thread
+     */
+    void terminate();
+
+protected:
+
+    void addFd(AsyncRequest* asyncRequest);
+
+    bool handleCloseImp(const shared_ptr<TC_Epoller::EpollInfo> &data);
+
+    bool handleInputImp(const shared_ptr<TC_Epoller::EpollInfo> &data);
+
+    bool handleOutputImp(const shared_ptr<TC_Epoller::EpollInfo> &data);
+
+        /**
+     * @brief 超时处理.
+     * @brief Timeout handler.
+     *
+     * @param ptr
+     */
+    static void timeout(AsyncRequestPtr& ptr);
+
+    /**
+     * @brief 确保线程
+     * @brief Ensure Threads
+     * @param _threadId [description]
+     */
+    void assertThreadId() { assert(_threadId == std::this_thread::get_id()); }
+
+    /**
+     * @brief 具体的网络处理逻辑
+     * @brief Specific network processing logic
+     */
+    void run() ;
+
+    /**
+     * @brief 删除异步请求对象
+     * @brief Delete Asynchronous Request Object
+     */
+    void erase(uint32_t uniqId);
+
+    friend class AsyncRequest;
+
+protected:
+    std::thread::id              _threadId;
+
+    TC_ThreadPool               _tpool;
+
+    http_queue_type             *_data;
+
+    TC_Epoller                  _epoller;
+
+	std::mutex                  _mutex;
+
+	deque<uint64_t>             _events;
+
+    deque<uint64_t>             _erases;
+
+    unique_ptr<TC_Endpoint>     _proxyEp;
+
+    TC_Socket::addr_type        _bindAddr;
+
+	shared_ptr<TC_OpenSSL::CTX> _ctx;
+
+};
+
+}
+#endif
+

+ 3 - 3
util/include/util/tc_json.h

@@ -47,7 +47,7 @@ enum eJsonType
 	eJsonTypeNum,
 	eJsonTypeObj,
 	eJsonTypeArray,
-	eJsonTypeBoolean,
+	eJsonTypeBoolean
 };
 
 /*
@@ -100,14 +100,14 @@ public:
 	JsonValueNum(double d,bool b=false):value(d),lvalue(d),isInt(b)
 	{
 	}
-	JsonValueNum(int64_t v,bool b=true):value(v),lvalue(v),isInt(b)
+	JsonValueNum(int64_t d,bool b=true):value(d), lvalue(d),isInt(b)
 	{
 	}
 	JsonValueNum()
 	{
 		isInt=false;
 		value=0.0f;
-		lvalue=0;
+		lvalue = 0;
 	}
 	eJsonType getType()
 	{

+ 32 - 11
util/include/util/tc_logger.h

@@ -496,7 +496,7 @@ namespace tars
 		 * @param stream
 		 * @param mutex
 		 */
-		LoggerStream(const char *header, ostream *stream, ostream *estream, TC_SpinLock &mutex) : _stream(stream), _estream(estream), _mutex(mutex)
+		LoggerStream(const char *header, ostream *stream, ostream *estream, TC_ThreadMutex &mutex) : _stream(stream), _estream(estream), _mutex(mutex)
 		{
 			if (stream)
 			{
@@ -512,7 +512,7 @@ namespace tars
 		{
 			if (_stream)
 			{
-				TC_LockT<TC_SpinLock> lock(_mutex);
+				TC_LockT<TC_ThreadMutex> lock(_mutex);
 				_stream->clear();
 				(*_stream) << _buffer.str();
 				_stream->flush();
@@ -529,7 +529,20 @@ namespace tars
 			if (_stream)
 			{
 				_buffer << t;
-				// *_stream << t;
+			}
+			return *this;
+		}
+
+		/**
+		* @brief 重载<<
+		* @brief Reload<<
+		*/
+		template <typename P>
+		LoggerStream &operator<<(P &t)
+		{
+			if (_stream)
+			{
+				_buffer << t;
 			}
 			return *this;
 		}
@@ -605,7 +618,8 @@ namespace tars
 		 * 锁
 		 * Lock
 		 */
-		TC_SpinLock &_mutex;
+//		TC_SpinLock &_mutex;
+		TC_ThreadMutex &_mutex;
 	};
 
 	/**
@@ -615,6 +629,13 @@ namespace tars
 	template <typename WriteT, template <class> class RollPolicy>
 	class TC_Logger : public RollPolicy<WriteT>::RollWrapperI
 	{
+	protected:
+		/**
+		 * @brief 日志级别名称
+		 * @brief Log Level Name
+		 */
+		static const string LN[];
+
 	public:
 		/**
 		 * @brief 设置显示标题
@@ -662,12 +683,6 @@ namespace tars
 			TARS_LOG_LEVEL = 6, 
 		};
 
-		/**
-		 * @brief 日志级别名称
-		 * @brief Log Level Name
-		 */
-		static const string LN[];
-
 		/**
 		 * @brief 构造函数
 		 * @brief Constructor
@@ -1010,7 +1025,7 @@ namespace tars
 		 * Lock
 		 */
 //		std::mutex _mutex;
-		TC_SpinLock _spinMutex;
+		TC_ThreadMutex _spinMutex;
 
 		/**
 		 * 分隔符
@@ -1979,6 +1994,12 @@ namespace tars
 	};
 
 	typedef TC_Logger<TC_DefaultWriteT, TC_RollByTime> TC_DayLogger;
+
+	UTIL_DLL_API extern TC_RollLogger __global_logger_debug__;
+
+#define LOG_CONSOLE_DEBUG  \
+__global_logger_debug__.any() << TC_Common::now2msstr() <<"|" << std::this_thread::get_id() << "|" << FILE_FUNC_LINE << "|"
+
 }
 
 #endif

+ 582 - 373
util/include/util/tc_network_buffer.h

@@ -1,9 +1,21 @@
-//
-// Created by jarod on 2019-03-01.
-//
+/**
+ * Tencent is pleased to support the open source community by making Tars available.
+ *
+ * Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
+ *
+ * Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 
+ * in compliance with the License. You may obtain a copy of the License at
+ *
+ * https://opensource.org/licenses/BSD-3-Clause
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed 
+ * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+ * CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+ * specific language governing permissions and limitations under the License.
+ */
 
-#ifndef TAF_CPP_TC_NETWORKBUFFER_H
-#define TAF_CPP_TC_NETWORKBUFFER_H
+#ifndef TC_CPP_TC_NETWORKBUFFER_H
+#define TC_CPP_TC_NETWORKBUFFER_H
 
 #include <list>
 #include <vector>
@@ -26,6 +38,15 @@
  * @brief 网络buffer解析, 主要目的是避免buffer的copy, 提速
  * @brief Network buffer resolution, the main purpose is to avoid buffer copy, speed up
  *
+ * TC_NetWorkBuffer说明:
+ * - TC_NetWorkBuffer主要用于网络的收发, 设计的目标是减少内存copy
+ * - Buffer对象是TC_NetWorkBuffer子类, 描述了一个完整连续的空间的buffer
+ * - TC_NetWorkBuffer是由多个Buffer连接到一起构成
+ *
+ * Buffer说明:
+ * - Buffer描述了一整块连续的内存
+ * - 它有几个pos来描述: 读索引: readIdx, 写索引: writeIdx, 容量: capacity
+ * - Buffer中当前有效数据是从: [readIdx, writeIdx)
  */
 
 namespace tars
@@ -43,112 +64,266 @@ struct TC_NetWorkBuffer_Exception : public TC_Exception
 class TC_NetWorkBuffer
 {
 public:
-    ////////////////////////////////////////////////////////////////////////////
-    /**
-     * 定义协议解析的返回值
+	////////////////////////////////////////////////////////////////////////////
+	/**
+	 * 定义协议解析的返回值
 	 * Define return values for protocol resolution
-     */
-    enum PACKET_TYPE
-    {
-        PACKET_LESS = 0,
-        PACKET_FULL = 1,
-		PACKET_FULL_CLOSE = 2,  ///< get whole package, and need close connection, for example: http
-        PACKET_ERR  = -1,   
-    };
-
-    /**
-     * 定义协议解析器接口
+	 */
+	enum PACKET_TYPE
+	{
+		PACKET_LESS = 0,
+		PACKET_FULL = 1,
+		PACKET_FULL_CLOSE = 2,  ///< 收到完整包,并需要结束session,如: http close情况, 只在客户端协议解析中生成, 服务端相当于PACKET_FULL
+		PACKET_ERR  = -1,
+	};
+
+	/**
+	 * 定义协议解析器接口
 	 * Define Protocol Resolver Interface
-     */
-    typedef std::function<PACKET_TYPE(TC_NetWorkBuffer &, vector<char> &)> protocol_functor;
-
-    /**
-    * 发送buffer
-	* Send buffer
-    */
-    class Buffer
-    {
-
-    public:
-	    Buffer() { }
-	    Buffer(const vector<char> &sBuffer) : _buffer(sBuffer) {}
-	    Buffer(const char *sBuffer, size_t length) : _buffer(sBuffer, sBuffer+length) {}
-
-        void swap(vector<char> &buff, size_t pos = 0)
-        {
-	    	if(_pos != 0)
-		    {
-	    	    buff.resize(length());
-	    	    memcpy(&buff[0], buffer(), length());
-		    }
-	        else
-            {
-			    buff.swap(_buffer);
-		    }
-	        _pos = pos;
-        }
-
-        void clear()
-        {
-            _buffer.clear();
-	        _pos = 0;
-        }
-
-        bool empty() const
-        {
-            return _buffer.size() <= _pos;
-        }
-
-        void addBuffer(const vector<char> &buffer)
-        {
-            _buffer.insert(_buffer.end(), buffer.begin(), buffer.end());
-        }
-
-        void assign(const char *buffer, size_t length, size_t pos = 0)
-        {
-            _buffer.assign(buffer, buffer + length);
-	        _pos = pos;
-        }
-
-        void setBuffer(const vector<char> &buff, size_t pos = 0)
-        {
-	        _buffer  = buff;
-	        _pos     = pos;
-        }
-
-	    char *buffer() { return _buffer.data() + _pos; }
-
-        const char *buffer() const { return _buffer.data() + _pos; }
-
-        size_t length() const { return _buffer.size() - _pos; }
-
-        size_t pos() const { return _pos; }
-
-        char &at(size_t offset)
-        {
-	    	if(_pos + offset >= _buffer.size() )
-	    		throw TC_NetWorkBuffer_Exception("[TC_NetWorkBuffer::Buffer] at '" + TC_Common::tostr(offset) + "' offset overflow");
-	    	return _buffer[_pos + offset];
-        }
-
-	    char at(size_t offset) const
-	    {
-		    if(_pos + offset >= _buffer.size() )
-			    throw TC_NetWorkBuffer_Exception("[TC_NetWorkBuffer::Buffer] at '" + TC_Common::tostr(offset) + "' offset overflow");
-		    return _buffer[_pos + offset];
-	    }
-
-	    void add(uint32_t ret)
-        {
-	        _pos += ret;
-            assert(_pos <= _buffer.size());
-        }
-
-    protected:
-	    vector<char>    _buffer;
-	    size_t          _pos = 0;
-
-    };
+	 */
+	typedef std::function<PACKET_TYPE(TC_NetWorkBuffer &, vector<char> &)> protocol_functor;
+
+	/**
+	   * buffer
+	   */
+	class Buffer
+	{
+	public:
+
+		/**
+		 * buffer构造
+		 */
+		Buffer() {}
+
+		/**
+		 * 直接用数据构造
+		 * @param buff
+		 * @param len
+		 */
+		Buffer(const char *buff, size_t len) { addBuffer(buff,len); }
+
+		/**
+		 * 析构函数
+		 */
+		~Buffer()
+		{
+			if(_buffer)
+			{
+				delete[] _buffer;
+				_buffer = NULL;
+			}
+		}
+
+		/**
+		 * 清除_readIdx, _writeIdx值
+		 * 不释放buffer内存空间
+		 */
+		inline void clear()
+		{
+			_readIdx = 0;
+			_writeIdx = 0;
+		}
+
+		/**
+		 * buffer是否是空的
+		 * @return
+		 */
+		inline bool empty() const { assert(_readIdx <= _writeIdx); return _readIdx == _writeIdx; }
+
+		/**
+		 *  清除当前数据并分配空间(拥有buffer的生命周期)
+		 *  如果buffer当前容量已经大于len, 则不重新分配, 只是清除数据
+		 */
+		void alloc(size_t len);
+
+		/**
+		 * 保留当前数据, 有需要则扩展空间容量, 并移除无效数据
+		 *
+		 * @param capacity, 拓展后的空间容量, 如果当前容量>capacity, 保持
+		 */
+		void expansion(size_t capacity);
+
+		/**
+		 * 压缩空间, 剔除无效数据, 相当于把有效数据copy到头部
+		 */
+		void compact();
+
+		/**
+		 *  copy数据到空间(增加到最后)
+		 */
+		inline void addBuffer(const string &buff) { addBuffer(buff.data(), buff.size()); }
+
+		/**
+		 *  copy数据到空间(增加到最后)
+		 */
+		inline void addBuffer(const vector<char> &buff) { addBuffer(buff.data(), buff.size()); }
+
+		/**
+		 *  copy数据到空间(增加到最后)
+		 */
+		void addBuffer(const char *buff, size_t len);
+
+		/**
+		 *  替换buffer, 不做数据copy, 只是设置buff指针, 后续buff的生命周期被Buffer管理了
+		 *  @param buff, 指针
+		 *  @param len, buff长度
+		 */
+		void replaceBuffer(const char *buff, size_t len);
+
+		/**
+		 *  设置buffer, 做数据copy
+		 */
+		void setBuffer(const char *buff, size_t len);
+
+		/**
+		 * 设置buffer, 数据copy
+		 * @param buff
+		 */
+		inline void setBuffer(const vector<char> &buff)
+		{
+			setBuffer(buff.data(), buff.size());
+		}
+
+		/**
+		 * 设置buffer, 数据copy
+		 * @param buff
+		 */
+		inline void setBuffer(const string &buff)
+		{
+			setBuffer(buff.c_str(), buff.size());
+		}
+
+		/**
+		 * 可读取的buffer首地址
+		 * @return
+		 */
+		inline char *buffer() { return (char*)_buffer + _readIdx; }
+
+		/**
+		 * 可读取的buffer首地址
+		 * @return
+		 */
+		inline const char *buffer() const { return _buffer + _readIdx; }
+
+		/**
+		 * 可写入的buffer首地址
+		 * @return
+		 */
+		inline char *free() { return (char*)_buffer + _writeIdx; }
+
+		/**
+		 * 可写入的buffer首地址
+		 * @return
+		 */
+		inline const char *free() const { return _buffer + _writeIdx; }
+
+		/**
+		 * 有效buffer的长度
+		 * @return
+		 */
+		inline size_t length() const { return _writeIdx - _readIdx; }
+
+		/**
+		 * 整个buffer的容量
+		 * @return
+		 */
+		inline size_t capacity() const { return _capacity; }
+
+		/**
+		 * buffer中剩余可写入数据容量
+		 * @return
+		 */
+		inline size_t left() const { return _capacity - _writeIdx; }
+
+		/**
+		 * 读索引
+		 * @return
+		 */
+		inline size_t readIdx() const { return _readIdx; }
+
+		/**
+		 * 写索引
+		 * @return
+		 */
+		inline size_t writeIdx() const { return _writeIdx; }
+
+		/**
+		 * 获取偏移为offset的有效数据的字节
+		 * @param offset
+		 * @return
+		 */
+		inline char &at(size_t offset)
+		{
+			if(_readIdx + offset >= _writeIdx )
+				throw TC_NetWorkBuffer_Exception("[TC_NetWorkBuffer::Buffer] at '" + TC_Common::tostr(offset) + "' offset overflow");
+			return *(char*)(_buffer + _readIdx + offset);
+		}
+
+		/**
+		 * 获取偏移为offset的有效数据的字节
+		 * @param offset
+		 * @return
+		 */
+		inline char at(size_t offset) const
+		{
+			if(_readIdx + offset >= _writeIdx )
+				throw TC_NetWorkBuffer_Exception("[TC_NetWorkBuffer::Buffer] at '" + TC_Common::tostr(offset) + "' offset overflow.");
+			return *(char*)(_buffer + _readIdx + offset);
+		}
+
+		/**
+		 * 增加读索引
+		 * @param len
+		 */
+		inline void addReadIdx(uint32_t len)
+		{
+			if(_readIdx + len > _writeIdx)
+			{
+				throw TC_NetWorkBuffer_Exception("[TC_NetWorkBuffer::Buffer::addReadIdx] len:" + TC_Common::tostr(len) + " overflow.");
+			}
+
+			_readIdx += len;
+		}
+
+		/**
+		 * 增加写索引
+		 * @param len
+		 */
+		inline void addWriteIdx(uint32_t len)
+		{
+			if(len > left())
+			{
+				throw TC_NetWorkBuffer_Exception("[TC_NetWorkBuffer::Buffer::addWriteIdx] len:" + TC_Common::tostr(len) + " overflow.");
+			}
+
+//			assert(ret <= left());
+
+			_writeIdx += len;
+		}
+
+		friend class TC_NetWorkBuffer;
+	protected:
+		/**
+		 * buffer pointer, 内存空间:[0, _capacity), 实际数据: [_readIdx, _writeIdx)
+		 */
+		const char *	_buffer 	= NULL;
+
+		/**
+		 * buffer 可读数据索引, 从_readIdx开始可以读数据, 直到读取到<_writeIdx
+		 */
+		size_t          _readIdx 	= 0;
+
+		/**
+		 * buffer 可写数据索引, 从_writeIdx开始可以写数据, 知道写到<_capacity
+		 */
+		size_t			_writeIdx 	= 0;
+
+		/**
+		 * 总内存空间
+		 */
+		size_t			_capacity 	= 1024*8;
+	};
+
 
 	typedef std::list<std::shared_ptr<Buffer>>::const_iterator buffer_list_iterator;
 
@@ -358,24 +533,24 @@ public:
 		size_t                  _pos = 0;
 	};
 
-    /**
-     * 必须以connection来构造(不同服务模型中获取的对象不一样, 需要自己强制转换)
+	/**
+	 * 必须以connection来构造(不同服务模型中获取的对象不一样, 需要自己强制转换)
 	 * Must be constructed as a connection (different service models get different objects and need to cast themselves)
-     * @param buff
-     */
-    TC_NetWorkBuffer(void *connection) { _connection = connection; }
+	 * @param buff
+	 */
+	TC_NetWorkBuffer(void *connection) { _connection = connection; }
 
-    /**
-     * deconstruct
-     * @param buff
-     */    
-    ~TC_NetWorkBuffer()
-    {
-        if(_deconstruct)
-        {
-            _deconstruct(this);
-        }
-    }
+	/**
+	 * deconstruct
+	 * @param buff
+	 */
+	~TC_NetWorkBuffer()
+	{
+		if(_deconstruct)
+		{
+			_deconstruct(this);
+		}
+	}
 
 	/**
 	 * 获取connection, 不同服务模型中获取的对象不一样, 需要自己强制转换
@@ -391,19 +566,19 @@ public:
 	 */
 	void setConnection(void *connection) { _connection = connection; }
 
-    /**
-     * 设置上下文数据, 可以业务存放数据
+	/**
+	 * 设置上下文数据, 可以业务存放数据
 	 * Set up context data to allow business to store data
-     * @param buff
-     */
-    void setContextData(void *contextData, std::function<void(TC_NetWorkBuffer*)> deconstruct = std::function<void(TC_NetWorkBuffer*)>() ) { _contextData = contextData; _deconstruct = deconstruct; }
+	 * @param buff
+	 */
+	void setContextData(void *contextData, std::function<void(TC_NetWorkBuffer*)> deconstruct = std::function<void(TC_NetWorkBuffer*)>() ) { _contextData = contextData; _deconstruct = deconstruct; }
 
-    /**
-     * 获取上下文数据,  给业务存放数据
+	/**
+	 * 获取上下文数据,  给业务存放数据
 	 * Get context data, store data for business
-     * @param buff
-     */
-    void *getContextData() { return _contextData; }
+	 * @param buff
+	 */
+	void *getContextData() { return _contextData; }
 
 	/**
 	 * 增加buffer
@@ -412,12 +587,12 @@ public:
 	 */
 	void addBuffer(const std::shared_ptr<Buffer> & buff);
 
-    /**
-     * 增加buffer
+	/**
+	 * 增加buffer
 	 * Add buffer
-     * @param buff
-     */
-    void addBuffer(const std::vector<char>& buff);
+	 * @param buff
+	 */
+	void addBuffer(const std::vector<char>& buff);
 
 	/**
 	 * 增加buffer
@@ -432,12 +607,41 @@ public:
      * @param buff
      * @param length
      */
-    void addBuffer(const char* buff, size_t length);
+	void addBuffer(const char* buff, size_t length);
 
-    /**
-     * begin
-     * @return
-     */
+	/**
+	 * 获取或者创建一个Buffer, 保证返回的Buffer至少有minCapacity, 最多有maxCapacity的剩余容量!
+	 * @param minLeftCapacity
+	 * @param maxLeftCapacity
+	 * @return
+	 */
+	shared_ptr<Buffer> getOrCreateBuffer(size_t minLeftCapacity, size_t maxLeftCapacity);
+
+	/**
+	 * 添加数据长度(仅仅调整总体长度, 不调整BufferList中的任何内容)
+	 */
+	void addLength(size_t length);
+
+	/**
+	 * 减去长度(仅仅调整总体长度, 不调整BufferList中的任何内容)
+	 */
+	void subLength(size_t length);
+
+	/**
+	 * 根据当前buffer重新计算length信息
+	 */ 
+	void compute();
+
+	/**
+	 * 获取第一个Buffer对象, 如果不存在则返回空对象
+	 * @return
+	 */
+	shared_ptr<Buffer> getBuffer();
+
+	/**
+	 * begin
+	 * @return
+	 */
 	buffer_iterator begin() const;
 
 	/**
@@ -510,71 +714,79 @@ public:
      * 清空所有buffer
 	 * Empty all buffers
      */
-    void clearBuffers();
+	void clearBuffers();
 
-    /**
-     * 是否为空的
+	/**
+	 * 是否为空的
 	 * Is it empty
-     */
-    bool empty() const;
+	 */
+	bool empty() const;
 
-    /**
-     * 返回所有buffer累计的字节数
+	/**
+	 * 返回所有buffer累计的字节数
 	 * Returns the cumulative number of bytes for all buffers
-     * @return size_t
-     */
-    size_t getBufferLength() const;
+	 * @return size_t
+	 */
+	size_t getBufferLength() const;
 
-    /**
-     * buffer list length
-     * @return
-     */
-    size_t size() const { return _bufferList.size(); }
+	/**
+	 * buffer list length
+	 * @return
+	 */
+	size_t listSize() const { return _bufferList.size(); }
 
-    /**
-     * 获取第一块有效数据buffer的指针, 可以用来发送数据
+	/**
+	 * 获取第一块有效数据buffer的指针, 可以用来发送数据
 	 * A pointer to get the first valid data buffer that can be used to send data
-     * @return
-     */
-    pair<const char*, size_t> getBufferPointer() const;
+	 * @return
+	 */
+	pair<const char*, size_t> getBufferPointer() const;
 
-    /**
-     * 将链表上的所有buffer拼接起来
+	/**
+	 * 将链表上的所有buffer拼接起来
 	 * Stitch together all buffers on the list
-     * @return const char *, 返回第一个数据buffer的指针, 为空则返回NULL
+	 * @return const char *, 返回第一个数据buffer的指针, 为空则返回NULL
 	 * @return const char *, Returns a pointer to the first data buffer, or NULL if empty
-     */
-    const char * mergeBuffers();
+	 */
+	const char * mergeBuffers();
+
+	/**
+	 * 返回所有buffer(将所有buffer拼接起来, 注意性能)
+	 * Return all buffers (stitch all buffers together, pay attention to performance)
+	 * @param buff, return buff
+	 * @return
+	 */
+	void getBuffers(shared_ptr<Buffer> &buff) const;
 
-    /**
-     * 返回所有buffer(将所有buffer拼接起来, 注意性能)
+	/**
+	 * 返回所有buffer(将所有buffer拼接起来, 注意性能)
 	 * Return all buffers (stitch all buffers together, pay attention to performance)
-     * @return string
-     */
-    vector<char> getBuffers() const;
+	 * @return string
+	 */
+	vector<char> getBuffers() const;
 
-    /**
-     * 返回所有buffer(将所有buffer拼接起来, 注意性能)
+	/**
+	 * 返回所有buffer(将所有buffer拼接起来, 注意性能)
 	 * Return all buffers (stitch all buffers together, pay attention to performance)
-     * @return string
-     */
-    string getBuffersString() const;
+	 * @return string
+	 */
+	string getBuffersString() const;
 
-    /**
-     * 读取len字节的buffer(避免len个字节被分割到多个buffer的情况)(注意: 不往后移动)
+	/**
+	 * 读取len字节的buffer(避免len个字节被分割到多个buffer的情况)(注意: 不往后移动)
 	 * Read buffer of len bytes (to avoid splitting len bytes into multiple buffers) (Note: Do not move backwards)
-     * @param len
-     * @return
-     */
-    bool getHeader(size_t len, std::string &buffer) const;
+	 * @param len
+	 * @return
+	 */
+	bool getHeader(size_t len, std::string &buffer) const;
 
-    /**
-     * 读取len字节的buffer(避免len个字节被分割到多个buffer的情况)(注意: 不往后移动)
+	/**
+	 * 读取len字节的buffer(避免len个字节被分割到多个buffer的情况)(注意: 不往后移动)
 	 * Read buffer of len bytes (to avoid splitting len bytes into multiple buffers) (Note: Do not move backwards)
-     * @param len
-     * @return
-     */
-    bool getHeader(size_t len, std::vector<char> &buffer) const;
+	 * @param len
+	 * @return
+	 */
+	bool getHeader(size_t len, std::vector<char> &buffer) const;
 
 	/**
 	 * 读取len字节的buffer(避免len个字节被分割到多个buffer的情况)(注意: 不往后移动)
@@ -583,8 +795,8 @@ public:
 	 * @param len
 	 * @return 不够会抛异常TC_NetWorkBuffer_Exception
 	 */
-    template<typename T>
-    T getHeader(size_t len) const
+	template<typename T>
+	T getHeader(size_t len) const
 	{
 		if(getBufferLength() < len)
 		{
@@ -605,240 +817,237 @@ public:
 		return buffer;
 	}
 
-    /**
-     * 往后移动len个字节
+	/**
+	 * 往后移动len个字节
 	 * Move len bytes backward
-     * @param len
-     */
-    bool moveHeader(size_t len);
+	 * @param len
+	 */
+	bool moveHeader(size_t len);
 
-    /**
-    * 取二个字节(字节序)的整型值, 如果长度<1, 返回0
+	/**
+	* 取二个字节(字节序)的整型值, 如果长度<1, 返回0
 	* Take an integer value of two bytes (byte order), and return 0 if the length is less than 1
-    * @return int8_t
-    */
-    uint8_t getValueOf1() const;
+	* @return int8_t
+	*/
+	uint8_t getValueOf1() const;
 
-    /**
-    * 取二个字节(字节序)的整型值, 如果长度<2, 返回0
+	/**
+	* 取二个字节(字节序)的整型值, 如果长度<2, 返回0
 	* Take an integer value of two bytes (byte order), and return 0 if the length is less than 2
-    * @return int16_t
-    */
-    uint16_t getValueOf2() const;
+	* @return int16_t
+	*/
+	uint16_t getValueOf2() const;
 
-    /**
-     * 取四个字节(字节序)的整型值, 如果长度<4, 返回0
+	/**
+	 * 取四个字节(字节序)的整型值, 如果长度<4, 返回0
 	 * Take an integer value of four bytes (byte order), and return 0 if the length is less than 4
-     * @return int32_t
-     */
-    uint32_t getValueOf4() const;
+	 * @return int32_t
+	 */
+	uint32_t getValueOf4() const;
 
-    /**
-     * http协议判读
+	/**
+	 * http协议判读
 	 * HTTP protocol interpretation
-     * @return
-     */
-    TC_NetWorkBuffer::PACKET_TYPE checkHttp();
+	 * @return
+	 */
+	TC_NetWorkBuffer::PACKET_TYPE checkHttp();
 
-    /**
-    * 解析一个包头是1字节的包, 把包体解析出来(解析后, 往后移动)
+	/**
+	* 解析一个包头是1字节的包, 把包体解析出来(解析后, 往后移动)
 	* Parse a package with a 1-byte header and parse out the package body (move back after parsing)
-    * 注意: buffer只返回包体, 不包括头部的1个字节的长度
+	* 注意: buffer只返回包体, 不包括头部的1个字节的长度
 	* Note: Buffer only returns the package, not including the length of one byte of the head
-    * @param buffer, 输出的buffer
+	* @param buffer, 输出的buffer
 	* @param buffer, Output buffer
-    * @param minLength, buffer最小长度, 如果小于, 则认为是错误包, 会返回PACKET_ERR
+	* @param minLength, buffer最小长度, 如果小于, 则认为是错误包, 会返回PACKET_ERR
 	* @param minLength, minimum buffer length, if less than, is considered an error package and returns PACKET_ERR
-    * @param maxLength, buffer最大长度, 如果超过, 则认为是错误包, 会返回PACKET_ERR
+	* @param maxLength, buffer最大长度, 如果超过, 则认为是错误包, 会返回PACKET_ERR
 	* @param maxLength, maximum buffer length, if exceeded, is considered an error package and returns PACKET_ERR
-    * @return PACKET_TYPE
-    */
-    PACKET_TYPE parseBufferOf1(vector<char> &buffer, uint8_t minLength, uint8_t maxLength);
+	* @return PACKET_TYPE
+	*/
+	PACKET_TYPE parseBufferOf1(vector<char> &buffer, uint8_t minLength, uint8_t maxLength);
 
-    /**
-    * 解析一个包头是2字节(字节序)的包, 把包体解析出来(解析后, 往后移动)
+	/**
+	* 解析一个包头是2字节(字节序)的包, 把包体解析出来(解析后, 往后移动)
 	* Parse a 2-byte (byte order) packet header and parse the package body (move back after parsing)
-    * 注意: buffer只返回包体, 不包括头部的2个字节的长度
+	* 注意: buffer只返回包体, 不包括头部的2个字节的长度
 	* Note: Buffer only returns the package, not including the length of 2 bytes of the header
-    * @param minLength, buffer最小长度, 如果小于, 则认为是错误包, 会返回PACKET_ERR
+	* @param minLength, buffer最小长度, 如果小于, 则认为是错误包, 会返回PACKET_ERR
 	* @param minLength, minimum buffer length, if less than, is considered an error package and returns PACKET_ERR
-    * @param maxLength, buffer最大长度, 如果超过, 则认为是错误包, 会返回PACKET_ERR
+	* @param maxLength, buffer最大长度, 如果超过, 则认为是错误包, 会返回PACKET_ERR
 	* @param maxLength, maximum buffer length, if exceeded, is considered an error package and returns PACKET_ERR
-    * @return PACKET_TYPE
-    */
-    PACKET_TYPE parseBufferOf2(vector<char> &buffer, uint16_t minLength, uint16_t maxLength);
+	* @return PACKET_TYPE
+	*/
+	PACKET_TYPE parseBufferOf2(vector<char> &buffer, uint16_t minLength, uint16_t maxLength);
 
-    /**
-    * 解析一个包头是4字节(字节序)的包, 把包体解析出来(解析后, 往后移动)
+	/**
+	* 解析一个包头是4字节(字节序)的包, 把包体解析出来(解析后, 往后移动)
 	* Parse a package whose header is a 4-byte (byte order) package and parse out the package body (move back after parsing)
-    * 注意: buffer只返回包体, 不包括头部的4个字节的长度
+	* 注意: buffer只返回包体, 不包括头部的4个字节的长度
 	* Note: Buffer only returns the package, not including the length of 4 bytes of the header
-    * @param minLength, buffer最小长度, 如果小于, 则认为是错误包, 会返回PACKET_ERR
+	* @param minLength, buffer最小长度, 如果小于, 则认为是错误包, 会返回PACKET_ERR
 	* @param minLength, minimum buffer length, if less than, is considered an error package and returns PACKET_ERR
-    * @param maxLength, buffer最大长度, 如果超过, 则认为是错误包, 会返回PACKET_ERR
+	* @param maxLength, buffer最大长度, 如果超过, 则认为是错误包, 会返回PACKET_ERR
 	* @param maxLength, maximum buffer length, if exceeded, is considered an error package and returns PACKET_ERR
-    * @return PACKET_TYPE
-     */
-    PACKET_TYPE parseBufferOf4(vector<char> &buffer, uint32_t minLength, uint32_t maxLength);
+	* @return PACKET_TYPE
+	 */
+	PACKET_TYPE parseBufferOf4(vector<char> &buffer, uint32_t minLength, uint32_t maxLength);
 
-    /**
-     * 解析二进制包, 1字节长度+包体(iMinLength<包长<iMaxLength, 否则返回PACKET_ERR)
+	/**
+	 * 解析二进制包, 1字节长度+包体(iMinLength<包长<iMaxLength, 否则返回PACKET_ERR)
 	 * Parse binary package, 1 byte length + package (iMinLength<package length<iMaxLength, otherwise return PACKET_ERR)
-     * 注意: out只返回包体, 不包括头部的1个字节的长度
+	 * 注意: out只返回包体, 不包括头部的1个字节的长度
 	 * Note: out only returns the package, not including the length of one byte of the head
-     * @param in
-     * @param out
-     * @return
-     */
-    template<uint8_t iMinLength, uint8_t iMaxLength>
-    static TC_NetWorkBuffer::PACKET_TYPE parseBinary1(TC_NetWorkBuffer&in, vector<char> &out)
-    {
-        return in.parseBufferOf1(out, iMinLength, iMaxLength);
-    }
-
-    /**
-     * 解析二进制包, 2字节长度(字节序)+包体(iMinLength<包长<iMaxLength, 否则返回PACKET_ERR)
+	 * @param in
+	 * @param out
+	 * @return
+	 */
+	template<uint8_t iMinLength, uint8_t iMaxLength>
+	static TC_NetWorkBuffer::PACKET_TYPE parseBinary1(TC_NetWorkBuffer&in, vector<char> &out)
+	{
+		return in.parseBufferOf1(out, iMinLength, iMaxLength);
+	}
+
+	/**
+	 * 解析二进制包, 2字节长度(字节序)+包体(iMinLength<包长<iMaxLength, 否则返回PACKET_ERR)
 	 * Parse binary package, 2 byte length (byte order) + package (iMinLength<package length<iMaxLength, otherwise return PACKET_ERR)
-     * 注意: out只返回包体, 不包括头部的2个字节的长度
+	 * 注意: out只返回包体, 不包括头部的2个字节的长度
 	 * Note: out returns only the package, not the length of 2 bytes of the header
-     * @param in
-     * @param out
-     * @return
-     */
-    template<uint16_t iMinLength, uint16_t iMaxLength>
-    static TC_NetWorkBuffer::PACKET_TYPE parseBinary2(TC_NetWorkBuffer&in, vector<char> &out)
-    {
-        return in.parseBufferOf2(out, iMinLength, iMaxLength);
-    }
-
-    /**
-     * 解析二进制包, 4字节长度(字节序)+包体(iMinLength<包长<iMaxLength, 否则返回PACKET_ERR)
+	 * @param in
+	 * @param out
+	 * @return
+	 */
+	template<uint16_t iMinLength, uint16_t iMaxLength>
+	static TC_NetWorkBuffer::PACKET_TYPE parseBinary2(TC_NetWorkBuffer&in, vector<char> &out)
+	{
+		return in.parseBufferOf2(out, iMinLength, iMaxLength);
+	}
+
+	/**
+	 * 解析二进制包, 4字节长度(字节序)+包体(iMinLength<包长<iMaxLength, 否则返回PACKET_ERR)
 	 * Parse binary package, 4 byte length (byte order) + package (iMinLength<package length<iMaxLength, otherwise return PACKET_ERR)
-     * 注意: out只返回包体, 不包括头部的4个字节的长度
+	 * 注意: out只返回包体, 不包括头部的4个字节的长度
 	 * Note: out only returns the package, not including the length of 4 bytes of the head
-     * @param in
-     * @param out
-     * @return
-     */
-    template<uint32_t iMinLength, uint32_t iMaxLength>
-    static TC_NetWorkBuffer::PACKET_TYPE parseBinary4(TC_NetWorkBuffer&in, vector<char> &out)
-    {
-        return in.parseBufferOf4(out, iMinLength, iMaxLength);
-    }
-
-    /**
-     * http1
-     * @param in
-     * @param out
-     * @return
-     */
-    static TC_NetWorkBuffer::PACKET_TYPE parseHttp(TC_NetWorkBuffer&in, vector<char> &out);
+	 * @param in
+	 * @param out
+	 * @return
+	 */
+	template<uint32_t iMinLength, uint32_t iMaxLength>
+	static TC_NetWorkBuffer::PACKET_TYPE parseBinary4(TC_NetWorkBuffer&in, vector<char> &out)
+	{
+		return in.parseBufferOf4(out, iMinLength, iMaxLength);
+	}
 
-    /**
-     * echo
-     * @param in
-     * @param out
-     * @return
-     */
-    static TC_NetWorkBuffer::PACKET_TYPE parseEcho(TC_NetWorkBuffer&in, vector<char> &out);
+	/**
+	 * http1
+	 * @param in
+	 * @param out
+	 * @return
+	 */
+	static TC_NetWorkBuffer::PACKET_TYPE parseHttp(TC_NetWorkBuffer&in, vector<char> &out);
 
-    /**
-    * echo
-    * @param in
-    * @param out
-    * @return
-    */
-    static TC_NetWorkBuffer::PACKET_TYPE parseJson(TC_NetWorkBuffer&in, vector<char> &out);
+	/**
+	 * echo
+	 * @param in
+	 * @param out
+	 * @return
+	 */
+	static TC_NetWorkBuffer::PACKET_TYPE parseEcho(TC_NetWorkBuffer&in, vector<char> &out);
 
 protected:
 
 	size_t getBuffers(char *buffer, size_t length) const;
 
-    template<typename T>
-    T getValue() const
-    {
-	    vector<char> buffer;
-
-        if(getHeader(sizeof(T), buffer))
-        {
-            if(sizeof(T) == 2)
-            {
-                return ntohs(*(uint16_t*)buffer.data());
-            }
-            else if(sizeof(T) == 4)
-            {
-                return ntohl(*(uint32_t*)buffer.data());
-            }
-            return *((T*)buffer.data());
-        }
-        return 0;
-    }
-
-    template<typename T>
-    TC_NetWorkBuffer::PACKET_TYPE parseBuffer(vector<char> &buffer, T minLength, T maxLength)
-    {
-        if(getBufferLength() < sizeof(T))
-        {
-            return PACKET_LESS;
-        }
-
-        if(minLength < sizeof(T))
-            minLength = sizeof(T);
-
-        T length = getValue<T>();
-
-        if(length < minLength || length > maxLength)
-        {
-            return PACKET_ERR;
-        }
-
-        if(getBufferLength() < length)
-        {
-            return PACKET_LESS;
-        }
-
-        //往后移动
+	template<typename T>
+	T getValue() const
+	{
+		vector<char> buffer;
+
+		if(getHeader(sizeof(T), buffer))
+		{
+			if(sizeof(T) == 2)
+			{
+				return ntohs(*(uint16_t*)buffer.data());
+			}
+			else if(sizeof(T) == 4)
+			{
+				return ntohl(*(uint32_t*)buffer.data());
+			}
+			return *((T*)buffer.data());
+		}
+		return 0;
+	}
+
+	template<typename T>
+	TC_NetWorkBuffer::PACKET_TYPE parseBuffer(vector<char> &buffer, T minLength, T maxLength)
+	{
+		if(getBufferLength() < sizeof(T))
+		{
+			return PACKET_LESS;
+		}
+
+		if(minLength < sizeof(T))
+			minLength = sizeof(T);
+
+		T length = getValue<T>();
+
+		if(length < minLength || length > maxLength)
+		{
+			return PACKET_ERR;
+		}
+
+		if(getBufferLength() < length)
+		{
+			return PACKET_LESS;
+		}
+
+		//往后移动
 		//move backward
-        moveHeader(sizeof(T));
+		moveHeader(sizeof(T));
 
-        //读取length长度的buffer
+		//读取length长度的buffer
 		//Read buffer of length length
-        if(!getHeader(length - sizeof(T), buffer))
-        {
-            return PACKET_LESS;
-        }
+		if(!getHeader(length - sizeof(T), buffer))
+		{
+			return PACKET_LESS;
+		}
 
-        moveHeader(length - sizeof(T));
-        return PACKET_FULL;
-    }
+		moveHeader(length - sizeof(T));
+		return PACKET_FULL;
+	}
 
 protected:
-    /**
-     * 连接信息(不同的类里面不一样)
+	/**
+	 * 连接信息(不同的类里面不一样)
 	 * Connection information (different within different classes)
-     */
-    void*   _connection = NULL;
+	 */
+	void*   _connection = NULL;
 
-    /**
-     * contextData for use
-     */
-    void*   _contextData = NULL;
+	/**
+	 * contextData for use
+	 */
+	void*   _contextData = NULL;
 
-    /**
-     * deconstruct contextData
-     */
-    std::function<void(TC_NetWorkBuffer*)> _deconstruct;
+	/**
+	 * deconstruct contextData
+	 */
+	std::function<void(TC_NetWorkBuffer*)> _deconstruct;
 
-    /**
-     * buffer list
-     */
+	/**
+	 * buffer list
+	 */
 	std::list<std::shared_ptr<Buffer>> _bufferList;
 
 	/**
 	 * buffer剩余没解析的字节总数
 	 * Total number of bytes left unresolved by buffer
 	 */
-    size_t _length = 0;
+	size_t _length = 0;
+
+	/**
+	 * 缺省的buffer, 作为第一个buff, 这样保证第一个buff不会被释放, 从而能复用空间, 避免经常性的new空间
+	 */
+	std::shared_ptr<Buffer> 	_defaultBuff;
 
 };
 

+ 1 - 1
util/include/util/tc_option.h

@@ -91,7 +91,7 @@ public:
      * @return string 标识的参数值
      * @return string Identified parameter values
      */
-    string getValue(const string &sName) const;
+    string getValue(const string &sName, const string &def = "") const;
 
     /**
      * @brief 获取所有--标识的参数.

+ 32 - 17
util/include/util/tc_port.h

@@ -23,6 +23,7 @@ typedef unsigned short mode_t;
 #endif
 
 #include <stdio.h>
+#include <atomic>
 #include <string>
 #include <vector>
 #include <functional>
@@ -37,6 +38,7 @@ namespace tars
 class TC_Port
 {
 public:
+
     /**
      * @brief 在s1的长度n中搜索s2
      * @return 搜索到的指针, 找不到返回NULL
@@ -82,35 +84,48 @@ public:
 
     static void setEnv(const std::string &name, const std::string &value);
 
-    /**
-     * exec command
-     * @param cmd
-     * @return string
-     */
-	static std::string exec(const char* cmd);
+    static std::string exec(const char* cmd);
+	static std::string exec(const char* cmd, std::string &err);
 
 	/**
-	 * exec command
-	 *
-	 * @param cmd
-	 * @param errstr, if error, get error message
-	 * @return string
+	 * 注册ctrl+c回调事件(SIGINT/CTRL_C_EVENT)
+	 * @param callback
+	 * @return size_t, 注册事件的id, 取消注册时需要
 	 */
-    static std::string exec(const char* cmd, std::string &errstr);
-	
-    static void registerCtrlC(std::function<void()> callback);
+	static size_t registerCtrlC(std::function<void()> callback);
 
-	static void registerTerm(std::function<void()> callback);
+	/**
+	 * 取消注册ctrl+c回调事件
+	 * @param callback
+	 * @return
+	 */
+	static void unregisterCtrlC(size_t id);
+
+	/**
+	 * 注册term事件的回调(SIGTERM/CTRL_SHUTDOWN_EVENT)
+	 * @param callback
+	 * @return size_t, 注册事件的id, 取消注册时需要
+	 */
+	static size_t registerTerm(std::function<void()> callback);
+
+	/**
+	 * 取消注册
+	 * @param id
+	 */
+	static void unregisterTerm(size_t id);
 
 protected:
 
-	static void registerSig(int sig, std::function<void()> callback);
+	static size_t registerSig(int sig, std::function<void()> callback);
+	static void unregisterSig(int sig, size_t id);
 
 	static void registerSig(int sig);
 
     static std::mutex   _mutex;
 
-	static unordered_map<int, vector<std::function<void()>>> _callbacks;
+    static unordered_map<int, unordered_map<size_t, std::function<void()>>> _callbacks;
+
+    static std::atomic<size_t> _callbackId;
 
 #if TARGET_PLATFORM_LINUX || TARGET_PLATFORM_IOS
     static void sighandler( int sig_no );

+ 280 - 0
util/include/util/tc_proxy_info.h

@@ -0,0 +1,280 @@
+/**
+ * Tencent is pleased to support the open source community by making Tars available.
+ *
+ * Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
+ *
+ * Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 
+ * in compliance with the License. You may obtain a copy of the License at
+ *
+ * https://opensource.org/licenses/BSD-3-Clause
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed 
+ * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+ * CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+ * specific language governing permissions and limitations under the License.
+ */
+
+#ifndef TC_CPP_PROXYINFO_H
+#define TC_CPP_PROXYINFO_H
+
+#include "util/tc_clientsocket.h"
+
+namespace tars
+{
+
+/**
+ * sock4 & sock5 & http代理协议支持, 注意sock5的udp模式没有测试过!
+ * 注意: TC_ProxyInfo 中包含状态机, 使用时, 每个连接都应该有一个该对象, 并且连接断掉要调用reset重置
+ *
+ * 使用方式:
+ * 1 构建对应的代理对象, 丢给指针TC_ProxyInfo*
+ * 2 发送网络包之前, 检查isSuccess
+ * 3 如果不成功, 则调用sendProxyPacket, 构建鉴权包,发送给代理
+ * 4 接受响应包, 检查isSuccess
+ * 5 如果不成功, 则调用recvProxyPacket, 解析响应包
+ * 6 检查isSuccess, 如果不成功, 则跳转到3, 直到isSuccess
+ * 7 期间检查sendProxyPacket & recvProxyPacket的返回值, 如果为false, 则表示鉴权不通过.
+ */
+class TC_ProxyInfo
+{
+public:
+	TC_ProxyInfo(const TC_Endpoint & ep, const string & user, const string & pass)
+		: _stage(eProxy_Stage_Establish), _ep(ep), _user(user), _pass(pass)
+	{
+	}
+
+	virtual ~TC_ProxyInfo() {}
+
+	/**
+	 * 设置代理类型
+	 */
+    enum EMProxyType
+    {
+        eProxy_Type_Sock4,
+        eProxy_Type_Sock5,
+        eProxy_Type_Http,
+    };
+
+    /**
+     * 设置代理阶段
+     */
+	enum EMProxyStageType
+	{
+		eProxy_Stage_Establish,
+		eProxy_Stage_ACK1,
+		eProxy_Stage_ACK2,
+		eProxy_Stage_Connected,
+		eProxy_Stage_DisConn,
+	};
+
+	/**
+	 * 代理的基本信息
+	 */
+    struct ProxyBaseInfo
+    {
+        EMProxyType     type;
+        TC_Endpoint     ep;
+        std::string     user;
+        std::string     pass;
+    };
+
+    typedef std::function<void(EMProxyStageType)> ProxyStageChangedCallback;
+
+	/**
+	 * 创建socket信息
+	 * @param type
+	 * @param ep
+	 * @param user
+	 * @param pass
+	 * @return
+	 */
+	static shared_ptr<TC_ProxyInfo> createProxyInfo(const ProxyBaseInfo &baseInfo);
+
+	/**
+	 * 返回代理地址
+	 * @return
+	 */
+	const TC_Endpoint & getEndpoint() const { return _ep; }
+
+	/**
+	 * 是否成功
+	 * @return
+	 */
+    bool isSuccess() { return _stage == eProxy_Stage_Connected; }
+
+    /**
+     * 重置状态
+     */
+    void reset() { _stage = eProxy_Stage_Establish; }
+
+    /**
+     * 获取错误信息, sendProxyPacket & recvProxyPacket 返回false, 则可以通过getErrMsg获取错误信息
+     * @return
+     */
+    const string &getErrMsg() { return _errMsg; }
+
+    /**
+	 * 构建鉴权包
+	 * @param buff
+	 * @param dst
+	 * @return
+	 */
+	virtual bool sendProxyPacket(vector<char> & buff, const TC_Endpoint & dst) = 0;
+
+	/**
+	 * 解析响应包
+	 * @param buff
+	 * @param length
+	 * @return
+	 */
+	virtual bool recvProxyPacket(const char *buff, size_t length) = 0;
+
+protected:
+	void onDisconnect();
+	void onConnSuccess();
+	void setProxyStage(EMProxyStageType proxyStage);
+
+protected:
+	EMProxyStageType _stage;
+	string		_errMsg;
+	TC_Endpoint _ep;
+	std::string _user;
+	std::string _pass;
+};
+
+class TC_ProxySock4 : public TC_ProxyInfo
+{
+public:
+	TC_ProxySock4(const TC_Endpoint & ep)
+		: TC_ProxyInfo(ep, "", "")
+	{
+	}
+
+protected:
+	static const int kProxy_Sock4_Req1_VN = 4;
+	static const int kProxy_Sock4_Req1_CD = 1;
+
+	static const int kProxy_Sock4_Ans1_VN = 0;
+	static const int kProxy_Sock4_Ans1_CD = 90;
+
+	struct sock4req1
+	{
+		char VN;
+		char CD;
+		char DSTPORT[2];
+		char DSTIP[4];
+		char USERID[1];
+	};
+
+	struct sock4ans1
+	{
+		char VN;
+		char CD;
+		char DSTPORT[2];
+		char DSTIP[4];
+	};
+
+public:
+	virtual bool sendProxyPacket(vector<char> & buff, const TC_Endpoint & dst);
+
+	virtual bool recvProxyPacket(const char *buff, size_t length);
+};
+
+class TC_ProxySock5 : public TC_ProxyInfo
+{
+public:
+	TC_ProxySock5(const TC_Endpoint & ep, const string & user, const string & pass)
+		: TC_ProxyInfo(ep, user, pass)
+	{
+	}
+protected:
+	static const int kProxy_Sock5_Req1_Ver = 5;
+	static const int kProxy_Sock5_Req1_nMethods = 2;
+	static const int kProxy_Sock5_Req1_nMethods0 = 0;       //NO AUTHENTICATION REQUIRED
+	static const int kProxy_Sock5_Req1_nMethods1 = 2;       //USERNAME/PASSWORD
+
+	static const int kProxy_Sock5_Req3_Ver = 5;
+	static const int kProxy_Sock5_Req3_Cmd = 1;
+	static const int kProxy_Sock5_Req3_Rsv = 0;
+	static const int kProxy_Sock5_Req3_AtypIpv4 = 1;
+	static const int kProxy_Sock5_Req3_AtypDns  = 3;
+	static const int kProxy_Sock5_Req3_AtypIpv6 = 4;
+
+	static const int kProxy_Sock5_Ans1_Ver = 5;
+	static const int kProxy_Sock5_Ans1_Method_Anonymous = 0;
+	static const int kProxy_Sock5_Ans1_Method_User = 2;
+
+	static const int kProxy_Sock5_Anthans_Ver = 1;
+	static const int kProxy_Sock5_Anthans_Status = 0;
+
+	static const int kProxy_Sock5_Ans2_Ver = 5;
+	static const int kProxy_Sock5_Ans2_Rep = 0;
+
+	static const int kProxy_IP_Length = 4;
+	static const int kProxy_PORT_Length = 2;
+
+	struct sock5req1
+	{
+		char Ver;
+		char nMethods;
+		char Methods[2];
+		//char Methods[255];
+	};
+
+	struct sock5ans1
+	{
+		char Ver;
+		char Method;
+	};
+
+	struct sock5req2
+	{
+		char Ver;
+		char Cmd;
+		char Rsv;
+		char Atyp;
+		//char other[1];
+		char DSTADDR[4];
+		char DSTPORT[2];
+	};
+
+	struct sock5ans2
+	{
+		char Ver;
+		char Rep;
+		char Rsv;
+		char Atyp;
+		char DSTADDR[4];
+		char DSTPORT[2];
+	};
+
+	struct authans
+	{
+		char Ver;
+		char Status;
+	};
+
+public:
+	virtual bool sendProxyPacket(vector<char> & buff, const TC_Endpoint & dst);
+
+	virtual bool recvProxyPacket(const char *buff, size_t length);
+
+};
+
+class TC_ProxyHttp : public TC_ProxyInfo
+{
+public:
+	TC_ProxyHttp(const TC_Endpoint & ep, const string & user, const string & pass)
+		: TC_ProxyInfo(ep, user, pass)
+	{
+	}
+
+	virtual bool sendProxyPacket(vector<char> & buff, const TC_Endpoint & dst);
+
+	virtual bool recvProxyPacket(const char *buff, size_t length);
+
+};
+
+}
+
+#endif //TAF_CPP_PROXYINFO_H

+ 23 - 23
util/include/util/tc_singleton.h

@@ -104,30 +104,30 @@ public:
     }
 };
 
-template<typename T>
-class CreateUsingNew1
-{
-public:
-    /**
-	 * @brief  创建.
-	 *  
-     * @return T*
-     */
-    static T* create() 
-    { 
-        return new T; 
-    }
+// template<typename T>
+// class CreateUsingNew1
+// {
+// public:
+//     /**
+// 	 * @brief  创建.
+// 	 *  
+//      * @return T*
+//      */
+//     static T* create() 
+//     { 
+//         return new T; 
+//     }
 
-    /**
-	 * @brief 释放. 
-	 *  
-     * @param t
-     */
-    static void destroy(T *t) 
-    { 
-        delete t; 
-    }
-};
+//     /**
+// 	 * @brief 释放. 
+// 	 *  
+//      * @param t
+//      */
+//     static void destroy(T *t) 
+//     { 
+//         delete t; 
+//     }
+// };
 
 template<typename T>
 class CreateStatic

+ 39 - 17
util/include/util/tc_socket.h

@@ -57,6 +57,8 @@ namespace tars
 */
 /////////////////////////////////////////////////
 
+class TC_Endpoint;
+
 /**
 * @brief socket异常类
 */
@@ -97,6 +99,9 @@ public:
      */
     virtual ~TC_Socket();
 
+    //定义客户端地址
+    typedef pair<shared_ptr<sockaddr>, SOCKET_LEN_TYPE> addr_type;
+
     /**
      * @brief  初始化. 
      *  
@@ -417,6 +422,12 @@ public:
      */
     void setCloseWaitDefault();
 
+    /**
+     * @brief 设置reuse addr
+     * @throws TC_Socket_Exception
+     */
+    void setReuseAddr();
+
     /**
      * @brief 设置nodelay(只有在打开keepalive才有效).
      *  
@@ -468,8 +479,16 @@ public:
     void ignoreSigPipe();
 
     /**
-    * @brief 设置socket方式. 
-    *  
+     * 解析地址
+     * @param addr
+     * @param host
+     * @param port
+     */
+    static void parseAddr(const addr_type& addr, string& host, uint16_t &port);
+
+    /**
+    * @brief 设置socket方式.
+    *
     * @param fd      句柄
     * @param bBlock  true, 阻塞; false, 非阻塞
     * @throws        TC_Socket_Exception
@@ -513,19 +532,25 @@ public:
     */
     static void parseAddr(const string &sAddr, struct in6_addr &stAddr);
 
-    /**
-     * @brief: Determine whether an address is ipv6 by including the character ':'
-     *         if address is a domain name, return default(not use now)
-     * @param addr: ip address or domain name
-     * @param def_value: if address is a domain name, return default(not use now)
-     * @return: return true if addr is ipv6, false by ipv4, and default by domain name
+	/**
+	 * @brief: Determine whether an address is ipv6 by including the character ':'
+	 *         if address is a domain name, return default(not use now)
+	 * @param addr: ip address or domain name
+	 * @return: return true if addr is ipv6, false by ipv4, and default by domain name
+	 */
+	static bool addressIsIPv6(const string& addr)
+	{
+#define IPv6_ADDRESS_CHAR ':'
+		return (addr.find(IPv6_ADDRESS_CHAR) != string::npos) ? true : false;
+#undef IPv6_ADDRESS_CHAR
+	}
+
+    /**
+     * create sock addr(support ipv4/ipv6)
+     * @param ep
+     * @return
      */
-    static bool addressIsIPv6(const string& addr, bool def_value = false)
-    {
-    #define IPv6_ADDRESS_CHAR ':'
-        return (addr.find(IPv6_ADDRESS_CHAR) != string::npos) ? true : false;
-    #undef IPv6_ADDRESS_CHAR
-    }
+    static TC_Socket::addr_type createSockAddr(const char *addr);
 
     /**
     * @brief 解析地址, 从字符串(ip或域名)端口, 解析到sockaddr_in结构.
@@ -572,9 +597,6 @@ public:
     TC_Socket& operator=(const TC_Socket &tcSock);
     #endif
 
-
-protected:
-
     /**
     * @brief 连接其他服务. 
     *  

+ 70 - 3
util/include/util/tc_thread.h

@@ -30,7 +30,12 @@ namespace tars
 /** 
  * @file tc_thread.h 
  * @brief  线程类(兼容TAF4.x版本, 底层直接封装了c++11 thread, 从而跨平台兼容)
- *  
+ *
+ * 使用说明:
+ * - TC_Thread定义一个线程, 继承TC_Thread, 实现run方法, 调用start即可启动线程
+ * - 注意TC_Thread也可以定义一个协程, 继承TC_Thread, 实现run方法, 调用startCoroutine方法, 即可进入协程执行, 注意startCoroutine定义了线程退出的方式
+ * >- 一种是: 所有协程都执行完毕以后, 自动退出
+ * >- 一种是: 不自动退出, 除非显示调用TC_TC_CoroutineScheduler::terminate方法
  * @author ruanshudong@qq.com  
  */          
 /////////////////////////////////////////////////
@@ -44,6 +49,8 @@ struct TC_ThreadThreadControl_Exception : public TC_Exception
     ~TC_ThreadThreadControl_Exception() throw() {};
 };
 
+class TC_CoroutineScheduler;
+
 /**
  * @brief  线程控制类
  */
@@ -105,7 +112,7 @@ public:
  * @brief 线程基类. 
  * 线程基类,所有自定义线程继承于该类,同时实现run接口即可, 
  *  
- * 可以通过TC_ThreadContorl管理线程。
+ * 可以通过TC_ThreadControl管理线程。
  */
 class TC_Thread : public TC_Runable
 {
@@ -114,18 +121,34 @@ public:
 	/**
      * @brief  构造函数
 	 */
-	TC_Thread();
+	TC_Thread(const string &threadName = "");
 
 	/**
      * @brief  析构函数
 	 */
 	virtual ~TC_Thread();
 
+	/**
+	 * 设置线程名称(一般调试方便)
+	 * @param threadName
+	 */
+	void setThreadName(const string &threadName);
+
 	/**
      * @brief  线程运行
 	 */
 	TC_ThreadControl start();
 
+	/**
+     * @brief  启动, 并处于协程中, 当前线程进入tc_coroutine的run, 当前对象执行TC_Thread::run
+     * @param iPoolSize 协程池个数
+     * @param iStackSize 协程堆栈大小
+     * @param autoQuit 自动退出
+     * 注意: autoQuit=true时, 当前所有协程执行完毕, 则线程调度退出(线程也退出了) 
+     *      autoQuit=false时, 协程调度器不主动退出, 除非调用协程调度器的terminate
+	 */
+	TC_ThreadControl startCoroutine(uint32_t iPoolSize, size_t iStackSize, bool autoQuit = false);
+
     /**
      * @brief  获取线程控制类.
      *
@@ -133,6 +156,24 @@ public:
      */
     TC_ThreadControl getThreadControl();
 
+    /**
+     * @brief  等待当前线程结束, 不能在当前线程上调用
+     * 可以不再使用TC_ThreadControl类
+     */
+    void join();
+
+    /**
+     * 是否可以join
+     * @return
+     */
+    bool joinable();
+
+    /**
+     * @brief  detach, 不能在当前线程上调用
+     * 可以不再使用TC_ThreadControl类
+     */
+    void detach();
+
     /**
      * @brief  线程是否存活.
      *
@@ -154,6 +195,12 @@ public:
 	 */
     std::thread* getThread() { return _th; }
 
+    /**
+     * 协程模式下, 获取调度器, 非协程模式下则为NULL
+     * @return
+     */
+	const shared_ptr<TC_CoroutineScheduler> &getScheduler() { return _scheduler; }
+
 	/**
      * @brief  获取当前线程ID, 用size_t返回
 	 *
@@ -169,12 +216,27 @@ protected:
 	 */
 	static void threadEntry(TC_Thread *pThread);
 
+	/**
+	 * @brief  静态函数, 协程入口. 
+	 *  
+	 * @param pThread 线程对象
+	 */
+	static void coroutineEntry(TC_Thread *pThread, uint32_t iPoolSize, size_t iStackSize, bool autoQuit);
+
 	/**
      * @brief  运行
 	 */
     virtual void run() = 0;
 
+    friend class RunningClosure;
+
 protected:
+
+    /**
+     * 线程名称
+     */
+    string          _threadName;
+
     /**
      * 是否在运行
      */
@@ -187,6 +249,11 @@ protected:
 
     //当前线程
     std::thread     *_th;
+
+    /**
+     * 协程调度器
+     */
+	shared_ptr<TC_CoroutineScheduler> _scheduler;
 };
 
 }

+ 12 - 12
util/include/util/tc_thread_pool.h

@@ -87,7 +87,7 @@ protected:
         { }
 
         std::function<void()>   _func;
-        int64_t                _expireTime = 0;	//超时的绝对时间
+        uint64_t                _expireTime = 0;	//超时的绝对时间
     };
     typedef shared_ptr<TaskFunc> TaskFuncPtr;
 public:
@@ -183,14 +183,19 @@ public:
     }
 
     /**
-    * @brief 等待当前任务队列中, 所有工作全部结束(队列无任务).
-    *
-    * @param millsecond 等待的时间(ms), -1:永远等待
-    * @return           true, 所有工作都处理完毕
-    *                   false,超时退出
-    */
+     * @brief 等待当前任务队列中, 所有task全部结束(队列无任务).
+     *
+     * @param millsecond 等待的时间(ms), -1:永远等待
+     * @return           true, 所有工作都处理完毕 
+     *                   false,超时退出
+     */
     bool waitForAllDone(int millsecond = -1);
 
+    /**
+    * @brief 线程池是否退出
+    */
+    bool isTerminate() { return _bTerminate; }
+
 protected:
     /**
     * @brief 获取任务
@@ -199,11 +204,6 @@ protected:
     */
     bool get(TaskFuncPtr&task);
 
-    /**
-    * @brief 线程池是否退出
-    */
-    bool isTerminate() { return _bTerminate; }
-
     /**
     * @brief 线程运行态
     */

+ 34 - 2
util/include/util/tc_thread_queue.h

@@ -139,6 +139,13 @@ public:
      */
     bool empty() const;
 
+    /**
+     * @brief  无数据则等待.
+     *
+     * @return bool 非空返回true,超时返回false
+     */    
+    bool wait(size_t millsecond);
+
 protected:
 	TC_ThreadQueue(const TC_ThreadQueue&) = delete;
 	TC_ThreadQueue(TC_ThreadQueue&&) = delete;
@@ -164,9 +171,9 @@ protected:
 
 	//锁
     mutable std::mutex _mutex;
-
+   
     //lockId, 判断请求是否唤醒过
-    size_t      _lockId = 0;    
+    size_t      _lockId = 0;
 };
 
 template<typename T, typename D> T TC_ThreadQueue<T, D>::front()
@@ -405,6 +412,31 @@ template<typename T, typename D> bool TC_ThreadQueue<T, D>::empty() const
     return _queue.empty();
 }
 
+template<typename T, typename D> bool TC_ThreadQueue<T, D>::wait(size_t millsecond)
+{
+	size_t lockId = _lockId;
+
+    std::unique_lock<std::mutex> lock(_mutex);
+
+    if (_queue.empty()) {
+        if (millsecond == 0) {
+            return false;
+        }
+        if (millsecond == (size_t) -1) {
+	        _cond.wait(lock, [&] { return !_queue.empty() || hasNotify(lockId); });
+//            _cond.wait(lock);
+        }
+        else {
+            //超时了
+//	        _cond.wait_for(lock, std::chrono::milliseconds(millsecond), [&] { return !_queue.empty() || hasNotify(lockId); });
+
+            return _cond.wait_for(lock, std::chrono::milliseconds(millsecond), [&] { return !_queue.empty() || hasNotify(lockId); });
+        }
+    }  
+
+    return !_queue.empty();
+}
+
 }
 #endif
 

+ 6 - 0
util/include/util/tc_timeout_queue.h

@@ -173,6 +173,12 @@ public:
      */
     size_t size() const { std::lock_guard<std::mutex> lock(_mutex); return _data.size(); }
 
+    /**
+     * @brief is empty
+     * @return
+     */
+    bool empty() const { std::lock_guard<std::mutex> lock(_mutex); return _data.empty(); }
+
 protected:
     uint32_t                        _uniqId;
     time_t                          _timeout;

+ 4 - 4
util/include/util/tc_timeout_queue_noid.h

@@ -45,7 +45,7 @@ public:
     struct PtrInfo;
     struct NodeInfo;
 
-    typedef multimap<int64_t,NodeInfo>      time_type;
+    typedef multimap<uint64_t,NodeInfo>      time_type;
     typedef list<PtrInfo>                   list_type;
 
     struct PtrInfo
@@ -97,7 +97,7 @@ public:
      * @param timeout    超时时间 绝对时间
      * @return true  成功 false 失败
      */
-    bool push(T& ptr, int64_t timeout);
+    bool push(T& ptr, uint64_t timeout);
 
     /**
      * @brief 超时删除数据
@@ -168,7 +168,7 @@ template<typename T> bool TC_TimeoutQueueNoID<T>::pop(T & t)
 }
 
 
-template<typename T> bool TC_TimeoutQueueNoID<T>::push(T& ptr, int64_t timeout)
+template<typename T> bool TC_TimeoutQueueNoID<T>::push(T& ptr, uint64_t timeout)
 {
     PtrInfo pi;
     pi.ptr = ptr;
@@ -186,7 +186,7 @@ template<typename T> bool TC_TimeoutQueueNoID<T>::push(T& ptr, int64_t timeout)
 
 template<typename T> void TC_TimeoutQueueNoID<T>::timeout()
 {
-    int64_t iNow = TNOWMS;
+    uint64_t iNow = TNOWMS;
     while(true)
     {
         typename time_type::iterator it = _time.begin();

+ 8 - 4
util/include/util/tc_timeprovider.h

@@ -42,7 +42,7 @@ class TC_TimeProvider;
 /**
  * @brief 提供秒级别的时间
  */
-class UTIL_DLL_API TC_TimeProvider
+class UTIL_DLL_API TC_TimeProvider : public TC_Thread
 {
 public:
 
@@ -93,10 +93,10 @@ public:
      * @para timeval
      * @return void
      */
-    int64_t getNowMs();
+    uint64_t getNowMs();
 
 protected:
-	static TC_TimeProvider      *g_tp;
+	static TC_TimeProvider *g_tp;
 
 protected:
 
@@ -118,6 +118,8 @@ protected:
 
 	void addTimeOffset(timeval& tt, const int &idx);
 
+    void terminate();
+
 private:
     bool            _use_tsc;
 
@@ -127,7 +129,9 @@ private:
 
     timeval         _t[2];
 
-    uint64_t       _tsc[2];
+    uint64_t        _tsc[2];
+
+    bool            _terminate = false;
 };
 
 }

+ 178 - 102
util/include/util/tc_timer.h

@@ -1,5 +1,20 @@
-#ifndef	__TC_TIMER_H_
-#define __TC_TIMER_H_
+/**
+ * Tencent is pleased to support the open source community by making Tars available.
+ *
+ * Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
+ *
+ * Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 
+ * in compliance with the License. You may obtain a copy of the License at
+ *
+ * https://opensource.org/licenses/BSD-3-Clause
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed 
+ * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+ * CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+ * specific language governing permissions and limitations under the License.
+ */
+
+#pragma once
 
 #include <mutex>
 #include <condition_variable>
@@ -25,12 +40,14 @@ namespace tars
  */          
 /////////////////////////////////////////////////
 
+
 /**
  * @brief  定时器类
  */
-class TC_Timer
+class TC_TimerBase
 {
 protected:
+
 	struct Func
 	{
 		Func( uint64_t fireMillseconds, uint32_t uniqueId) :  _fireMillseconds(fireMillseconds), _uniqueId(uniqueId) { }
@@ -41,109 +58,96 @@ protected:
         uint32_t                _uniqueId = 0;
 	};
 
-	typedef std::unordered_set<uint64_t> EVENT_SET;
+    typedef std::unordered_set<uint64_t> EVENT_SET;
 
-	typedef std::unordered_map<uint64_t, shared_ptr<Func>> MAP_EVENT;
+    typedef std::unordered_map<uint64_t, shared_ptr<Func>> MAP_EVENT;
 
-	typedef map<uint64_t, unordered_set<uint64_t>> MAP_TIMER;
+    typedef map<uint64_t, unordered_set<uint64_t>> MAP_TIMER;
 
 public:
 
-	/**
-	 * 析构
-	 */
-	~TC_Timer();
-	
-	/**
-	 * @brief
-	 * @return num(RUNNER_EVENT), num(ALL_EVENT), num(REPEAT_EVENT)
-	 */
-	tuple<int64_t, int64_t, int64_t> status();
-
-	/**
-	 * 系统定时器
-	 * @param numThread, 回调线程数, 默认是1个
-	 */
-	void startTimer(int numThread = 1);
-
-	/**
-	 * 停止定时器
-	 * 如果当前有定时任务正在执行, 会等执行完毕
-	 */
-	void stopTimer();
+    /**
+     * 析构
+     */
+    virtual ~TC_TimerBase();
 
-	/**
-	 * @brief 指定fireMillseconds时间执行
-	 * @param fireMillseconds, 触发时间(毫秒)
-	 * @return 返回事件Id
-	 */
-	template <class F, class... Args>
-	int64_t postAtTime(int64_t fireMillseconds, F &&f, Args &&... args)
-	{
-		return post(create(fireMillseconds, 0,"", f, args...));
-	}
+    /**
+     * @brief 指定fireMillseconds时间执行
+     * @param fireMillseconds, 触发时间(毫秒)
+     * @return 返回事件Id
+     */
+    template <class F, class... Args>
+    int64_t postAtTime(int64_t fireMillseconds, F &&f, Args &&... args)
+    {
+	    return post(create(fireMillseconds, 0,"", f, args...));
+    }
 
-	/**
-	 * @brief 延时delayMillseconds时间执行
-	 * @param delayMillseconds, 延时时间
-	 * @return 返回事件Id
-	 */
-	template <class F, class... Args>
-	int64_t postDelayed(int64_t delayMillseconds, F &&f, Args &&... args)
-	{
-		uint64_t fireMillseconds = TC_TimeProvider::getInstance()->getNowMs() + delayMillseconds;
+    /**
+     * @brief 延时delayMillseconds时间执行
+     * @param delayMillseconds, 延时时间
+     * @return 返回事件Id
+     */
+    template <class F, class... Args>
+    int64_t postDelayed(int64_t delayMillseconds, F &&f, Args &&... args)
+    {
+        uint64_t fireMillseconds = TC_TimeProvider::getInstance()->getNowMs() + delayMillseconds;
 
-		return post(create(fireMillseconds, 0,"", f, args...));
-	}
+        return post(create(fireMillseconds, 0,"", f, args...));
+    }
 
-	/**
-	 * @brief 重复repeatTime(毫秒)执行一次, 注意repeat是上一次执行完, 才会重新触发下一次事件的计时
-	 * @param repeatTime, 重复时间
-	 * @param execNow, 第一次是否马上执行
-	 * @return 返回事件Id
-	 */
-	template <class F, class... Args>
-	int64_t postRepeated(int64_t repeatTime, bool execNow, F &&f, Args &&... args)
-	{
-		uint64_t fireMillseconds;
+    /**
+     * @brief 重复repeatTime(毫秒)执行一次, 注意repeat是上一次执行完, 才会重新触发下一次事件的计时
+     * @param repeatTime, 重复时间
+     * @param execNow, 第一次是否马上执行
+     * @return 返回事件Id
+     */
+    template <class F, class... Args>
+    int64_t postRepeated(int64_t repeatTime, bool execNow, F &&f, Args &&... args)
+    {
+        uint64_t fireMillseconds;
 
-		if(execNow) {
-			fireMillseconds = TC_TimeProvider::getInstance()->getNowMs();
-		}else {
-			fireMillseconds = TC_TimeProvider::getInstance()->getNowMs() + repeatTime;
-		}
+        if(execNow) {
+            fireMillseconds = TC_TimeProvider::getInstance()->getNowMs();
+        }else {
+            fireMillseconds = TC_TimeProvider::getInstance()->getNowMs() + repeatTime;
+        }
 
 		return post(create(fireMillseconds, repeatTime,"", f, args...),true);
 	}
 
     /**
-     * @brief 
+     * @brief
      * @param cronexpr, crontab 语法  <seconds> <minutes> <hours> <days of month> <months> <days of week> ,具体例子参考 tc_cron.h
      * @return 返回事件Id
      */
-     // CRON	Description
-     // * * * * * *	Every second
-     // */5 * * * * *	Every 5 seconds
-     // 0 */5 */2 * * *	Every 5 minutes, every 2 hours
-     // 0 */2 */2 * */2 */2	Every 2 minutes, every 2 hours, every 2 days of the week, every 2 months
-     // 0 15 10 * * * 	10:15 AM every day
-     // 0 0/5 14 * * *	Every 5 minutes starting at 2 PM and ending at 2:55 PM, every day
-     // 0 10,44 14 * 3 WED	2:10 PM and at 2:44 PM every Wednesday of March
-     // 0 15 10 * * MON-FRI	10:15 AM every Monday, Tuesday, Wednesday, Thursday and Friday
-     // 0 0 12 1/5 * *	12 PM every 5 days every month, starting on the first day of the month
-     // 0 11 11 11 11 *	Every November 11th at 11:11 AM
+    // CRON	Description
+    // * * * * * *	Every second
+    // */5 * * * * *	Every 5 seconds
+    // 0 */5 */2 * * *	Every 5 minutes, every 2 hours
+    // 0 */2 */2 * */2 */2	Every 2 minutes, every 2 hours, every 2 days of the week, every 2 months
+    // 0 15 10 * * * 	10:15 AM every day
+    // 0 0/5 14 * * *	Every 5 minutes starting at 2 PM and ending at 2:55 PM, every day
+    // 0 10,44 14 * 3 WED	2:10 PM and at 2:44 PM every Wednesday of March
+    // 0 15 10 * * MON-FRI	10:15 AM every Monday, Tuesday, Wednesday, Thursday and Friday
+    // 0 0 12 1/5 * *	12 PM every 5 days every month, starting on the first day of the month
+    // 0 11 11 11 11 *	Every November 11th at 11:11 AM
     template <class F, class... Args>
     int64_t postCron(const string&cronexpr, F&& f, Args&&... args)
     {
-        return post(create(0, 0,cronexpr, f, args...),true);
+	    return post(create(0, 0,cronexpr, f, args...),true);
     }
 
-	/**
-	 * 删除事件
-	 * @param uniqId
-	 */
-	void erase(int64_t uniqId);
-    
+    /**
+     * 删除事件
+     * @param uniqId
+     */
+    void erase(int64_t uniqId);
+
+    /**
+     * 下一次定时器的时间
+     */ 
+    int64_t nextTimer() const { return _nextTimer; }
+
     /**
      * 判断循环是否还存在
      * @param uniqId
@@ -156,13 +160,13 @@ public:
     void clear();
 
 protected:
-	template <class F, class... Args>
-	shared_ptr<Func> create(int64_t fireMillseconds, int64_t repeatTime, const string & cronexpr, F &&f, Args &&... args)
-	{
-		//定义返回值类型
-		using RetType = decltype(f(args...));
+    template <class F, class... Args>
+    shared_ptr<Func> create(int64_t fireMillseconds, int64_t repeatTime, const string & cronexpr, F &&f, Args &&... args)
+    {
+        //定义返回值类型
+        using RetType = decltype(f(args...));
 
-		auto task = std::make_shared<std::packaged_task<RetType()>>(std::bind(std::forward<F>(f), std::forward<Args>(args)...));
+        auto task = std::make_shared<std::packaged_task<RetType()>>(std::bind(std::forward<F>(f), std::forward<Args>(args)...));
 
 		shared_ptr<Func> fPtr = std::make_shared<Func>(fireMillseconds, genUniqueId());
 
@@ -211,37 +215,109 @@ protected:
             };
 		}
 
-		return fPtr;
-	}
+        return fPtr;
+    }
 
 	int64_t post(const shared_ptr<Func> &event ,bool repeat = false);
 
-	void fireEvent(const EVENT_SET &el);
+    /**
+     * 触发事件
+     * @param ms: 如果没有事件了, 默认休息时间
+     * @return >=0, 下一次等待时间(如果事件了, 则是ms+TNOWMS)
+     */
+    int64_t fireEvents(int64_t ms);
 
-	void run();
-	
+    /**
+     * 触发定时器, 并返回需要等待的时间
+     * @param el, 返回事件集
+     * @return -1: 无定时时间了, >0: 下一次需要触发的时间间隔
+     */
+    int64_t getEvents(EVENT_SET &el);
+
+    /**
+     * 实际执行事件
+     * @param func
+     */
+    virtual void onFireEvent(std::function<void()> func) = 0;
+
+    /**
+     * 增加了一个最近的定时器, 需要触发wait唤醒, 等到到最新的时间上
+     * 注意回调该函数时, 加了锁的, 因此这个函数中需要尽快处理
+     */
+    virtual void onAddTimer() = 0;
+
+    /**
+     * 生成时间的唯一id
+     * @return
+     */
 	uint32_t genUniqueId();
 
 protected:
-	std::mutex          _mutex;
+    std::mutex          _mutex;
 
-	std::condition_variable _cond;
+    std::condition_variable _cond;
 
-	bool        _terminate = false;
+    int64_t     _nextTimer = -1;//下一次需要触发的时间, <0:  表示无下一次事件
 
-	MAP_EVENT   _mapEvent;      //id, 事件
+    MAP_EVENT   _mapEvent;      //id, 事件
+
+    MAP_TIMER   _mapTimer;      //时间, 事件
 
 	MAP_EVENT   _tmpEvent;      //id, 事件
 
-	MAP_TIMER   _mapTimer;      //时间, 事件
-	
 	atomic_uint _increaseId = {0};
 	
 	set<int64_t> _repeatIds; //循环任务的所有ID
 
+};
+
+
+/**
+ * @brief  定时器类
+ */
+class TC_Timer : public TC_TimerBase
+{
+public:
+
+	/**
+	 * 析构
+	 */
+	~TC_Timer();
+
+	/**
+	 * 系统定时器
+	 * @param numThread, 回调线程数, 默认是1个
+	 */
+	void startTimer(int numThread = 1);
+
+	/**
+	 * 停止定时器
+	 * 如果当前有定时任务正在执行, 会等执行完毕
+	 */
+	void stopTimer();
+
+	/**
+	 * @brief
+	 * @return num(RUNNER_EVENT), num(ALL_EVENT), num(REPEAT_EVENT)
+	 */
+	tuple<int64_t, int64_t, int64_t> status();
+
+
+protected:
+    virtual void onFireEvent(std::function<void()> func);
+
+    /**
+     * 增加了一个最近的定时器, 需要触发wait唤醒, 等到到最新的时间上
+     */ 
+    virtual void onAddTimer();
+
+	void run();
+
+protected:
+	bool        _terminate = false;
+
 	TC_ThreadPool _tpool;
 };
-}
 
-#endif
+}
 

+ 724 - 0
util/include/util/tc_transceiver.h

@@ -0,0 +1,724 @@
+/**
+ * Tencent is pleased to support the open source community by making Tars available.
+ *
+ * Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
+ *
+ * Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 
+ * in compliance with the License. You may obtain a copy of the License at
+ *
+ * https://opensource.org/licenses/BSD-3-Clause
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed 
+ * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+ * CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+ * specific language governing permissions and limitations under the License.
+ */
+
+#ifndef TC_CPP_TRANSCEIVER_H
+#define TC_CPP_TRANSCEIVER_H
+
+#include <list>
+#include "util/tc_network_buffer.h"
+#include "util/tc_clientsocket.h"
+#include "util/tc_epoller.h"
+#include "util/tc_proxy_info.h"
+
+namespace tars
+{
+
+class TC_OpenSSL;
+
+/**
+* @brief 连接异常类
+*/
+struct TC_Transceiver_Exception : public TC_Exception
+{
+    TC_Transceiver_Exception(const string &sBuffer) : TC_Exception(sBuffer){};
+    TC_Transceiver_Exception(const string &sBuffer, int err) : TC_Exception(sBuffer, err){};
+    ~TC_Transceiver_Exception() throw(){};    
+};
+
+
+/**
+ * 网络传输基类
+ * 使用方式说明:
+ * 1 通过暴露注册回调的机制来和使用方交互, 配和epoller类完成网路层的收发都统一, epoller对象需要使用者自己创建和管理, 并传递给TC_Transceiver
+ * 2 即可支持客户端的网络, 也可以支持服务端的网络, 但是两者初始化过程是有区别的(initializeClient & initializeServer)
+ *   客户端网络: 发送connect时, 会创建句柄, 有oncreate_callback调用到业务层, 通过参数将TC_Epoller::EpollInfo*给业务层, 业务层在回调中根据实际情况完成epoll事件函数的注册
+ *   服务端网络: 调用者完成accept, 然后通过bindFd的形式, 将句柄传递进来, bindFd会创建TC_Epoller::EpollInfo*, 返回给调用方
+ * 3 无论是客户端网络还是服务端网络, 使用者都不用管理TC_Epoller::EpollInfo*的生命周期, 它会被TC_Transceiver管理, 连接关闭时释放
+ * 4 作为客户端, 支持sock4/sock5/http代理(通过oncreate_callback回调, 业务层将代理信息TC_ProxyInfo返回给TC_Transceiver), 也可以通过setBindAddr指定客户端发包时的绑定地址
+ * 5 在数据包的管理上, TC_Transceiver内只保存最后一个发送的数据包(可能发送了一半), 因此调用者需要维持发送的数据包队列, 几个回调函数的说明:
+ *   oncreate_callback: 只存在于客户端模式, 当TC_Transceiver创建完客户端fd时回调oncreate_callback, 使用者在回调事件中监听fd事件, 如果有代理, 则返回代理信息, 否则返回null(注意, 如果有, 则没法都要new一个新的, 因为代理是具备状态信息的)
+ *   onconnect_callback: 建立连接以后的回调, 注意对于ssl, 是指已经完成ssl握手后的回调
+ *   onsend_callback: 当TC_Transceiver内的数据都发送完毕, 则回调onsend_callback, 使得调用者可以发送下一个数据
+ *   onrequest_callback: 该回调表示网络层的连接建立完毕(如果SSL则表示SSL握手完成), 可以发送业务层队列的数据了(通过sendRequest来发送)
+ *   onopenssl_callback: 需要常见openssl的对象回调, 业务层完成openssl对象的创建(因为涉及证书管理, 因此丢该业务层来创建)
+ *   onparser_callback: 协议解析的回调 
+ *   onclose_callback: 关闭socket的回调(在close句柄之前回调的)
+ *   这种连接中只有最后一个数据包的模式, 保证当连接异常时,在调用者队列中还没有发送的数据能够通过其他连接来发送
+ * 6 发送数据使用sendRequest函数, 注意返回值
+ *   eRetError: 表示发送失败, 当前的数据包没有发送出去, 需要检查连接有有效性, 如果有效: isValid, 则数据需要进入使用自己的队列, onsend_callback回调的时候再发送
+ *   eRetOk: 发送成功
+ *   eRetFull: 发送了一半, 系统网络buff满了, 即当前fd不能再发送数据了, 等EPOLLOUT事件来的时候, 业务层再发送
+ *   eRetNotSend: 数据没有发送(比如连接还没有建立, ssl没有握手, 没有鉴权等), 业务层可以缓存此数据, 等可以发送时onrequest_callback会被回调
+ * 7 注册事件, 客户端在oncreate_callback回调中注册相关事件, 服务端在accept句柄并bindFd之后, 来注册事件, 通常需要注册三个事件
+ *
+ * 8 注意: 主要接口都以异常的形式对外抛出错误, 因此外部调用时注意捕获异常(一般是在注册的事件中)
+ * 9 连接直接鉴权逻辑, 即可客户端发业务包前, 会发送一个鉴权包到服务器端, 服务器收到同样解包, 鉴权通过后, 才能继续发送业务包
+ * 10 具体客户端使用方式可以参考: CommunicatorEpoll类, 服务端参考: tc_epoll_server
+ * 
+ */
+class TC_Transceiver
+{
+public:
+    /**
+     * 连接状态
+     */
+    enum ConnectStatus
+    {
+        eUnconnected,
+        eConnecting,
+        eConnected,
+    };
+
+    /**
+     * sendRequest返回值
+     */ 
+    enum ReturnStatus
+    {
+        eRetError = -1,
+        eRetOk =0,
+        eRetFull=1,
+	    eRetNotSend = 2,
+    };
+
+    /**
+     * 连接关闭原因
+     */
+    enum CloseReason
+    {
+        CR_Type             = 0,    //连接类型错误
+        CR_Connect          = 1,    //连接出错
+        CR_ConnectTimeout   = 2,    //连接超时
+        CR_SSL              = 3,    //ssl错误
+        CR_PROXY_SEND       = 4,    //代理模式下, 发送包错误
+        CR_PROXY_RECV       = 5,    //代理模式下, 接收包错误
+        CR_PROTOCOL         = 6,    //协议解析错误
+        CR_SEND             = 7,    //发送错误
+        CR_RECV             = 8,    //接收错误
+        CR_ACTIVE           = 9,    //主动调用close触发
+        CR_DECONSTRUCTOR    = 10,   //析构
+	    CR_SSL_HANDSHAKE    = 11,   //ssl handshake错误
+    };
+
+    /**
+     * 鉴权状态
+     */ 
+    enum AUTH_STATUS
+    {
+        eAuthInit = -127,
+        eAuthSucc = 0,
+    };
+
+    enum
+    {
+        DEFAULT_RECV_BUFFERSIZE = 64*1024       /*缺省数据接收buffer的大小*/
+    };
+
+    struct SocketOpt
+    {
+        int level;
+
+        int optname;
+
+        const void *optval;
+
+        SOCKET_LEN_TYPE optlen;
+    };
+
+    //发起连接前, 创建网络句柄后的回调(只对客户端有效)
+    using oncreate_callback = std::function<shared_ptr<TC_ProxyInfo>(TC_Transceiver*)>;
+    //开启openssl对象的回调
+    using onopenssl_callback = std::function<std::shared_ptr<TC_OpenSSL>(TC_Transceiver*)>;
+    //关闭句柄的回调
+    using onclose_callback = std::function<void(TC_Transceiver*, CloseReason reason, const string &err)> ;
+    //建立连接上的回调(只对客户端有效)
+    using onconnect_callback = std::function<void(TC_Transceiver*)> ;
+    //可以发出业务请求了(对客户端: 连接建立完成, 对SSL: 握手完成, 可以发业务数据的回调)
+	using onrequest_callback = std::function<void(TC_Transceiver*)> ;
+    //解析协议的回调
+	using onparser_callback = std::function<TC_NetWorkBuffer::PACKET_TYPE(TC_NetWorkBuffer&, TC_Transceiver*)> ;
+	//完整解析完一个包之后的回调
+	using oncompletepackage_callback = std::function<void(TC_Transceiver*)> ;
+	//cient侧: 发送鉴权包的回调, 业务层在回调里面组织鉴权包
+	using onclientsendauth_callback = std::function<shared_ptr<TC_NetWorkBuffer::Buffer>(TC_Transceiver*)> ;
+    //client侧: 收到鉴权包的的回调, 业务层解包(注意返回PACKET_FULL, 表示鉴权成功)
+	using onclientverifyauth_callback = std::function<TC_NetWorkBuffer::PACKET_TYPE(TC_NetWorkBuffer &, TC_Transceiver*)> ;
+    //server侧: 验证鉴权包并返回验证包的回调
+	using onserververifyauth_callback = std::function<pair<TC_NetWorkBuffer::PACKET_TYPE, shared_ptr<TC_NetWorkBuffer::Buffer>>(TC_NetWorkBuffer &, TC_Transceiver*)> ;
+
+    /**
+     * 构造函数
+     * @param epoller
+     * @param ep, 服务端地址 
+     */
+    TC_Transceiver(TC_Epoller* epoller, const TC_Endpoint &ep);
+
+    /**
+     *
+     *析构函数
+     */
+    virtual ~TC_Transceiver();
+
+    /**
+     *
+     * 初始化客户端的连接
+     * 句柄是connect时创建的
+     */
+    void initializeClient(const oncreate_callback &oncreate, 
+            const onclose_callback &onclose, 
+            const onconnect_callback &onconnect, 
+            const onrequest_callback &onrequest, 
+            const onparser_callback &onparser, 
+            const onopenssl_callback &onopenssl,
+            const oncompletepackage_callback &onfinish = oncompletepackage_callback());
+
+    /**
+     *
+     * 初始化服务端的连接
+     * 句柄是外部accept后, 传递进去的
+     */
+    void initializeServer(const onclose_callback &onclose,
+            const onrequest_callback &onrequest, 
+            const onparser_callback &onparser, 
+            const onopenssl_callback &onopenssl,
+            const oncompletepackage_callback &onfinish = oncompletepackage_callback());
+
+    /**
+     * 设置绑定地址(对客户端有效, 服务端本身就是绑定的)
+     */
+    void setBindAddr(const char *host);
+
+    /**
+     * 设置绑定地址(对客户端有效, 服务端本身就是绑定的)
+     * @param bindAddr
+     */
+    void setBindAddr(const TC_Socket::addr_type &bindAddr);
+
+    /**
+     * 获取client地址, 如果是udp, 则表示最后一个包对应的client地址
+     * @return
+     */
+    const TC_Socket::addr_type & getClientAddr() const { return _clientAddr; }
+
+    /**
+     * 获取server地址(如果是客户端, 并设置了代理, 则返回代理地址)
+     * @return
+     */
+    const TC_Socket::addr_type & getServerAddr() const { return _serverAddr; }
+
+    /**
+     * 设置鉴权回调(不需要鉴权逻辑, 则不需要处理这一步)
+     */ 
+    void setClientAuthCallback(const onclientsendauth_callback &onsendauth, const onclientverifyauth_callback &onverify);
+
+    /**
+     * 设置鉴权回调(不需要鉴权逻辑, 则不需要处理这一步)
+     */ 
+    void setServerAuthCallback(const onserververifyauth_callback &onverify);
+
+    /**
+     * 绑定fd上, 一旦绑定, 就创建对fd的事件监听EPOLLIN|EPOLLOUT
+     * 并且会维持句柄的持有
+     * @param fd
+     * @return TC_Epoller::EpollInfo*
+     */ 
+    shared_ptr<TC_Epoller::EpollInfo> bindFd(int fd);
+
+    /**
+     * 设置udp接收buffer大小(只对udp有效)
+     */ 
+    void setUdpRecvBuffer(size_t nSize);
+
+    /**
+     * 设置socket opt
+     */
+    void setSocketOpt(const vector<SocketOpt> &socketOpts) { _socketOpts = socketOpts; }
+
+    /**
+     * 是否ssl
+     */
+    bool isSSL() const ;
+
+    /**
+     * 创建连接
+     */
+    void connect();
+
+    /*
+     * 关闭连接
+     */
+    void close();
+
+	/**
+	 * send buffer
+	 * @return
+	 */
+	inline TC_NetWorkBuffer &getSendBuffer() { return _sendBuffer; }
+
+	/**
+	 * recv buffer
+	 * @return
+	 */
+	inline TC_NetWorkBuffer &getRecvBuffer() { return _recvBuffer; }
+
+    /**
+     * get epoll info
+     */ 
+    inline const shared_ptr<TC_Epoller::EpollInfo> &getEpollInfo() { return _epollInfo; }
+
+    /**
+     * 发送buffer
+     * @param buff, buffer内容
+     * @param addr, 发送地址(注意如果udp server, 回包一定要指向客户端的地址, 其他情况可以不传入这个地址)
+     */ 
+    virtual ReturnStatus sendRequest(const shared_ptr<TC_NetWorkBuffer::Buffer> &buff, const TC_Socket::addr_type& addr = TC_Socket::addr_type());
+
+    /**
+     * 是否鉴权成功
+     */ 
+    bool isAuthSucc() const { return _authState == eAuthSucc; }
+
+    /**
+     * 鉴权成功
+     */ 
+    void enableAuthSucc() { _authState = eAuthSucc; }
+
+    /**
+     * 授权状态值
+     */ 
+    inline AUTH_STATUS getAuthStatus() const { return _authState; }
+
+    /*
+     * 发送链接中已经有数据, 如果连接数据都已经发送完毕, 则会回调onrequest_callback
+     * @return int throw TC_Transceiver_Exception
+     */
+    void doRequest();
+
+    /*
+     * 接受网络请求
+     * @return throw TC_Transceiver_Exception
+     */
+    virtual void doResponse() = 0;
+
+    /*
+     * 获取文件描述符
+     * @return int
+     */
+    inline int fd() const { return _fd; }
+
+    /*
+     * 是否有效
+     */
+    inline bool isValid() const { return (_fd != -1); }
+
+    /*
+     * 获取端口信息(服务器地址)
+     */
+    inline const TC_Endpoint& getEndpoint() const { return _ep; }
+
+    /**
+     * 获取openssl
+     */ 
+    inline const std::shared_ptr<TC_OpenSSL>& getOpenSSL() { return _openssl; }
+
+    /*
+     * 判断是否已经连接到服务端
+     */
+    inline bool hasConnected() { return isValid() && (_connStatus == eConnected); }
+
+    /*
+     * 判断是否正在连接
+     */
+    inline bool isConnecting() { return isValid() && (_connStatus == eConnecting); }
+
+	/**
+	 * 获取连接地址
+	 * @return
+	 */
+	inline const TC_Endpoint &getConnectEndpoint() const { return _proxyInfo? _proxyInfo->getEndpoint() : _ep; }
+
+    /**
+     * 端口描述
+     */ 
+    inline const string& getConnectionString() const { return _desc; }
+
+	/**
+	 * @brief is ipv6 socket or not
+	 * @return true if is ipv6
+	 */
+	inline bool isConnectIPv6() const  { return getConnectEndpoint().isIPv6(); }
+
+    /**
+     * 设置连接超时时间(tcp下才有效)
+     */
+    void setConnTimeout(int connTimeout) { _connTimeout = connTimeout; }
+
+    /**
+     * 连接超时的时间(tcp下才有效)
+     */
+    int getConnTimeout() const { return _connTimeout; };
+
+    /**
+     * 连接是否超时(tcp下才有效)
+     */
+    inline bool isConnTimeout() const { return _isConnTimeout; }
+
+    /**
+     *
+     * @param connTimeout
+     * @return
+     */
+    inline void setIsConnTimeout(bool connTimeout) { _isConnTimeout = connTimeout; }
+
+protected:
+
+    /**
+     * create socket
+     */ 
+    int createSocket(bool udp, bool isLocal = false, bool isIpv6 = false);
+
+    /**
+     * connect
+     */ 
+	bool doConnect(int, const struct sockaddr *, socklen_t len);
+
+    /**
+     * check connect
+     */ 
+    void checkConnect();
+
+    /*
+     * 设置当前连接态
+     */
+    void setConnected();
+
+	/*
+	 * 设置当前连接态
+	 */
+	void onSetConnected();
+
+	/**
+	 * 检查连接超时
+	 */
+	void checkConnectTimeout();
+
+    /** 
+     ** 物理连接成功回调
+     **/
+    void onConnect();
+
+	/**
+	 ** 发送打通代理请求
+	 **/
+	void connectProxy();
+
+	/**
+	 * 检查是否代理创建成功
+	 * @param buff
+	 * @param length
+	 * @return <0: 失败, 0: 成功: 1: 需要验证
+	 */
+	int doCheckProxy(const char *buff, size_t length);
+
+	/**
+	 * 解析域名
+	 */
+	void parseConnectAddress();
+
+    /**
+     * 发送鉴权代码
+     */ 
+    void doAuthReq();
+
+    /**
+     * 验证鉴权信息
+     */ 
+    void doAuthCheck(TC_NetWorkBuffer *buff);
+
+    /**
+     * 解析协议 
+     */ 
+    int doProtocolAnalysis(TC_NetWorkBuffer *buff);
+
+    /*
+     * 内部关闭连接, udp连接不关闭
+     */
+    void tcpClose(bool deconstructor, CloseReason reason, const string &err);
+
+    /**
+     * udp Close
+     */ 
+    void udpClose();
+
+    /*
+     * 网络发送接口
+     * @param buf
+     * @param len
+     * @param flag
+     * @return int
+     */
+    virtual int send(const void* buf, uint32_t len, uint32_t flag) = 0;
+
+    /*
+     * 网络接收接口
+     * @param buf
+     * @param len
+     * @param flag
+     *
+     * @return int
+     */
+    virtual int recv(void* buf, uint32_t len, uint32_t flag) = 0;
+
+    friend class CloseClourse;
+protected:
+
+    /*
+     * 服务端的地址
+     */
+    TC_Socket::addr_type               _serverAddr;
+
+    /**
+     * 客户端地址
+     */
+    TC_Socket::addr_type               _clientAddr;
+
+    /**
+     * bind addr
+     */
+    TC_Socket::addr_type               _bindAddr;
+
+        /*
+     * epoller
+     */
+    TC_Epoller*             _epoller = NULL;
+
+    /*
+     * 连接的节点信息(服务端地址)
+     */
+    TC_Endpoint             _ep;
+
+    /*
+     * 端口描述
+     */
+    string                  _desc;
+
+    /*
+     * 套接字
+     */
+    int                     _fd;
+
+    /**
+     * true: server端, false: 客户端
+     */ 
+    bool                    _isServer;
+
+    /**
+     * socket选项
+     */ 
+    vector<SocketOpt>       _socketOpts;
+
+    /*
+     * 事件注册信息
+     */
+    shared_ptr<TC_Epoller::EpollInfo>   _epollInfo;
+    
+    /*
+     * 连接状态
+     */
+    ConnectStatus            _connStatus;
+
+    /*
+     * openssl
+     */
+    std::shared_ptr<TC_OpenSSL> _openssl;
+
+    /*
+     * 发送buffer
+     */
+	TC_NetWorkBuffer        _sendBuffer;
+
+	/*
+     * 接收buffer
+     */
+    TC_NetWorkBuffer        _recvBuffer;
+
+	/**
+	 * 代理
+	 */
+	shared_ptr<TC_ProxyInfo> _proxyInfo;
+
+    /* 
+     * 鉴权状态 
+     */
+    AUTH_STATUS              _authState = eAuthInit;
+
+    /**
+     * 最后一个包的发送地址
+     */
+    TC_Socket::addr_type    _lastAddr;
+
+    /*
+     * 接收缓存(udp情况才有效)
+     */
+    shared_ptr<TC_NetWorkBuffer::Buffer> _pRecvBuffer;
+
+    /*
+     * 接收缓存大小(udp情况才有效)
+     */
+    size_t                  _nRecvBufferSize = DEFAULT_RECV_BUFFERSIZE;
+
+    /**
+     * 是否超时
+     */
+    bool                    _isConnTimeout = false;
+
+    /**
+     * 连接超时时间
+     */
+    int                     _connTimeout = 5000;
+
+    int64_t                 _connTimerId = 0;
+
+    oncreate_callback       _createSocketCallback;
+
+    onopenssl_callback      _onOpensslCallback;
+
+    onconnect_callback      _onConnectCallback;
+
+    onrequest_callback      _onRequestCallback;
+
+    onclose_callback        _onCloseCallback;
+
+    onparser_callback       _onParserCallback;
+
+	oncompletepackage_callback      _onCompletePackageCallback;
+
+    onclientsendauth_callback       _onClientSendAuthCallback;
+    
+    onclientverifyauth_callback     _onClientVerifyAuthCallback;
+
+    onserververifyauth_callback     _onServerVerifyAuthCallback;
+};
+
+//////////////////////////////////////////////////////////
+/**
+ * TCP 传输实现
+ */
+class TC_TCPTransceiver : public TC_Transceiver
+{
+public:
+    /**
+     * 构造函数
+     * @param ep
+     * @param fd
+     */
+    TC_TCPTransceiver(TC_Epoller* epoller, const TC_Endpoint &ep);
+
+    /**
+     * TCP 发送实现
+     * @param buf
+     * @param len
+     * @param flag
+     * @return int
+     */
+    virtual int send(const void* buf, uint32_t len, uint32_t flag);
+
+    /**
+     * TCP 接收实现
+     * @param buf
+     * @param len
+     * @param flag
+     *
+     * @return int
+     */
+    virtual int recv(void* buf, uint32_t len, uint32_t flag);
+
+    /**
+     * 处理返回,判断Recv BufferCache是否有完整的包
+     * @return throw
+     */
+	virtual void doResponse();
+};
+
+
+//////////////////////////////////////////////////////////
+/**
+ * SSL 传输实现
+ */
+class TC_SSLTransceiver : public TC_TCPTransceiver
+{
+public:
+    /**
+     * 构造函数
+     * @param ep
+     * @param fd
+     */
+    TC_SSLTransceiver(TC_Epoller* epoller, const TC_Endpoint &ep);
+
+    /**
+     * 处理返回
+     * @return throw 
+     */
+	virtual void doResponse();
+};
+
+//////////////////////////////////////////////////////////
+/**
+ * UDP 传输实现
+ */
+class TC_UDPTransceiver : public TC_Transceiver
+{
+public:
+
+    /**
+     * 构造函数
+     */
+    TC_UDPTransceiver(TC_Epoller* epoller, const TC_Endpoint &ep);
+
+    /**
+     * 析构函数
+     */
+    ~TC_UDPTransceiver();
+
+    /**
+     * UDP 发送实现
+     * @param buf
+     * @param len
+     * @param flag
+     * @return int
+     */
+    virtual int send(const void* buf, uint32_t len, uint32_t flag);
+
+    /**
+     * UDP 接收实现
+     * @param buf
+     * @param len
+     * @param flag
+     * @return int
+     */
+    virtual int recv(void* buf, uint32_t len, uint32_t flag);
+
+    /**
+     * 处理返回,判断Recv BufferCache是否有完整的包
+     * @return throw
+     */
+	virtual void doResponse();
+
+protected:
+};
+
+}
+
+#endif

+ 130 - 0
util/include/util/tc_uuid_generator.h

@@ -0,0 +1,130 @@
+
+#ifndef __TC_UUID_GENERATOR_H
+#define __TC_UUID_GENERATOR_H
+
+#include "util/tc_platform.h"
+#if TARGET_PLATFORM_LINUX || TARGET_PLATFORM_IOS
+#include <unistd.h>
+//#include <arpa/inet.h>
+#else
+#include <process.h>
+//#include <winsock2.h>
+#endif
+
+#include <atomic>
+#include "util/tc_common.h"
+#include "util/tc_timeprovider.h"
+#include "util/tc_socket.h"
+#include "util/tc_singleton.h"
+
+using namespace taf;
+
+class TC_UUIDGenerator : public TC_Singleton<TC_UUIDGenerator>
+{
+public:
+    TC_UUIDGenerator():initOK(false)
+    {
+        init("");
+    }
+
+    void init(const string& sIP = "")
+    {
+        if (isIPV4(sIP))
+        {
+            //ip = (uint32_t)inet_addr(sIP.c_str());
+            ip = ipv4Toint(sIP);
+        }
+        else
+        {
+            //ip = (uint32_t)inet_addr(getLocalIP().c_str());
+            ip = ipv4Toint(getLocalIP().c_str());
+        }
+
+        if (ip == 0)
+        {
+            initOK = false;
+            return;
+        }
+
+        pid = (uint32_t)getpid();
+        seq = 0;
+        initOK = true;
+    }
+
+    string genID()
+    {
+        if (!initOK)
+        {
+            return "";
+        }
+        char buff[33] = {0};
+        sprintf(buff, "%08x%08x%08x%08x", ip, pid, (unsigned int)(TNOW), seq++);
+        return string(buff);
+    }
+
+protected:
+    bool isIPV4(const string& ip)
+    {
+        vector<int> vs = TC_Common::sepstr<int>(ip, ".");
+        if (vs.size() != 4)
+        {
+            return false;
+        }
+
+        for (size_t i = 0; i < vs.size(); i++)
+        {
+            if (vs[i] < 0 || vs[i] > 255)
+            {
+                return false;
+            }
+        }
+
+        return true;
+    }
+
+    uint32_t ipv4Toint(const string& ip)
+    {
+        vector<int> vs = TC_Common::sepstr<int>(ip, ".");
+        if (vs.size() != 4)
+        {
+            return 0;
+        }
+
+        uint32_t ipInt = 0;
+        for (int i = 3; i >= 0; i--)
+        {
+            if (vs[i] < 0 || vs[i] > 255)
+            {
+                return 0;
+            }
+            else
+            {
+                ipInt = (ipInt << 8) + vs[i];
+            }
+        }
+
+        return ipInt;
+    }
+
+    string getLocalIP()
+    {
+        vector<string> vs = TC_Socket::getLocalHosts();
+
+        for (size_t i = 0; i < vs.size(); i++)
+        {
+            if (vs[i] != "127.0.0.1" && (!TC_Socket::addressIsIPv6(vs[i])))
+            {
+                return vs[i];
+            }
+        }
+        return "127.0.0.1";
+    }
+
+private:
+    uint32_t ip;
+    uint32_t pid;
+    std::atomic<uint32_t> seq;
+    bool initOK;
+};
+
+#endif //__TC_UUID_GENERATOR_H

+ 14 - 13
util/src/epoll_windows/src/epoll.cpp

@@ -328,19 +328,20 @@ int epoll_port_data_t::epoll_add(SOCKET sock, struct epoll_event *ev)
 
 int epoll_port_data_t::epoll_mod(SOCKET sock, struct epoll_event *ev)
 {
-    std::lock_guard<std::mutex> lck (_mutex);
-
-    epoll_sock_data_t *sock_data = get(sock);
-    if(sock_data == NULL)
     {
-        SetLastError(ERROR_NOT_FOUND);
-        return -1;
-    }
-    // printf("mod new, %d\n", sock_data->_op_count);
+        std::lock_guard<std::mutex> lck (_mutex);
 
-    sock_data->_registered_events = ev->events | EPOLLERR | EPOLLHUP;
-    sock_data->_user_data         = ev->data.u64;
-    return sock_data->submit();  
+        epoll_sock_data_t *sock_data = get(sock);
+
+        if(sock_data != NULL)
+        {
+            sock_data->_registered_events = ev->events | EPOLLERR | EPOLLHUP;
+            sock_data->_user_data         = ev->data.u64;
+            return sock_data->submit();  
+        }
+    }
+    // LOG_CONSOLE_DEBUG << endl;
+    return epoll_add(sock, ev);
 }
 
 int epoll_port_data_t::epoll_del(SOCKET sock, struct epoll_event *ev)
@@ -465,7 +466,7 @@ int epoll_port_data_t::epoll_wait(OVERLAPPED_ENTRY *entries, ULONG count, struct
             struct epoll_event *ev = events + num_events;
             ev->data.u64 = sock_data->_user_data;
             ev->events = EPOLLERR;
-            ++num_events;
+	        num_events++;
             continue;
         }
 
@@ -542,7 +543,7 @@ int epoll_port_data_t::epoll_wait(OVERLAPPED_ENTRY *entries, ULONG count, struct
             struct epoll_event *ev = events + num_events;
             ev->data.u64           = sock_data->_user_data;
             ev->events             = reported_events;
-            ++num_events;
+	        num_events++;
         }
     }
     return num_events;

+ 65 - 31
util/src/tc_clientsocket.cpp

@@ -34,11 +34,11 @@ TC_Endpoint::TC_Endpoint()
     _qos = 0;
     _weight = -1;
     _weighttype = 0;
-    _authType = 0;
+    _authType = AUTH_TYPENONE;
 	_isIPv6 = TC_Socket::addressIsIPv6(_host);
 }
 
-void TC_Endpoint::init(const string& host, int port, int timeout, EType type, int grid, int qos, int weight, unsigned int weighttype, int authType)
+void TC_Endpoint::init(const string& host, int port, int timeout, EType type, int grid, int qos, int weight, unsigned int weighttype, AUTH_TYPE authType)
 {
     _host = host;
     _port = port;
@@ -72,7 +72,7 @@ void TC_Endpoint::parse(const string &str)
     _qos = 0;
     _weight = -1;
     _weighttype = 0;
-    _authType = 0;
+    _authType = AUTH_TYPENONE;
 
     const string delim = " \t\n\r";
 
@@ -212,11 +212,15 @@ void TC_Endpoint::parse(const string &str)
             // auth type
             case 'e':
             {
+                int v = 0;
                 istringstream p(argument);
-                if (!(p >> const_cast<int&>(_authType)) || !p.eof() || _authType < 0 || _authType > 1)
+
+                if (!(p >> const_cast<int&>(v)) || !p.eof() || (v != AUTH_TYPENONE && v != AUTH_TYPELOCAL))
                 {
                     throw TC_EndpointParse_Exception("TC_Endpoint::parse -e error : " + str);
                 }
+
+                _authType = (AUTH_TYPE)v;
                 break;
             }
             default:
@@ -246,10 +250,10 @@ void TC_Endpoint::parse(const string &str)
     }
     _isIPv6 = TC_Socket::addressIsIPv6(_host);
 
-    if (_authType < 0)
-        _authType = 0;
-    else if (_authType > 0)
-        _authType = 1;
+    // if (_authType < 0)
+    //     _authType = 0;
+    // else if (_authType > 0)
+    //     _authType = 1;
 }
 
 /*************************************TC_TCPClient**************************************/
@@ -273,8 +277,8 @@ void TC_ClientSocket::init(const string &sIp, int iPort, int iTimeout)
 	if(!_epoller)
 	{
 		_epoller = new TC_Epoller();
-		_epoller->create(10);
-		_epoller->enableET(false);
+		_epoller->create(10, false);
+        _epoller->enableET(false); 
 	}
 
     _socket.close();
@@ -300,27 +304,23 @@ int TC_TCPClient::checkSocket()
         try
         {
 #if TARGET_PLATFORM_LINUX || TARGET_PLATFORM_IOS
-	        _socket.createSocket(SOCK_STREAM, _port ? (_isIPv6 ? AF_INET6 : AF_INET) : AF_LOCAL);
-#else
-	        _socket.createSocket(SOCK_STREAM, _isIPv6 ? AF_INET6 : AF_INET);
-#endif
-
-	        _epoller->add(_socket.getfd(), 0, EPOLLOUT | EPOLLIN);
 
+	        _socket.createSocket(SOCK_STREAM, _port ? (_isIPv6 ? AF_INET6 : AF_INET) : AF_LOCAL);
 	        //设置非阻塞模式
             _socket.setblock(false);
             _socket.setNoCloseWait();
+            _socket.setKeepAlive();
+            _socket.setTcpNoDelay();
+            
+	        _epoller->add(_socket.getfd(), 0, EPOLLOUT | EPOLLIN);
             
 	        int iRet;
 
-#if TARGET_PLATFORM_LINUX
-
             if(_port == 0)
             {
                 iRet = _socket.connectNoThrow(_ip.c_str());
             }
             else
-#endif                
             {
 	            iRet = _socket.connectNoThrow(_ip, _port);
             }
@@ -331,6 +331,7 @@ int TC_TCPClient::checkSocket()
                 return EM_CONNECT;
             }
             int iRetCode = _epoller->wait(_timeout);
+
             if (iRetCode < 0)
             {
                 _socket.close();
@@ -343,16 +344,16 @@ int TC_TCPClient::checkSocket()
             }
             else
             {
-#if !TARGET_PLATFORM_WINDOWS
                 for(int i = 0; i < iRetCode; ++i)
                 {
                     const epoll_event& ev = _epoller->get(i);
+
                     if(TC_Epoller::errorEvent(ev))
                     {
                         _socket.close();
                         return EM_CONNECT;
                     }
-                    else
+                    else if(TC_Epoller::writeEvent(ev))
                     {
                         int iVal = 0;
                         socklen_t iLen = static_cast<socklen_t>(sizeof(int));
@@ -363,11 +364,40 @@ int TC_TCPClient::checkSocket()
                         }
                     }
                 }
-#endif
+            }
+            //设置为阻塞模式
+            _socket.setblock(true);
+
+#else
+	        _socket.createSocket(SOCK_STREAM, _isIPv6 ? AF_INET6 : AF_INET);
+	        //设置非阻塞模式
+            _socket.setblock(false);
+            _socket.setNoCloseWait();
+            
+	        _epoller->add(_socket.getfd(), 0, EPOLLOUT | EPOLLIN);
 
+	        int iRet = _socket.connectNoThrow(_ip, _port);
+
+            if(iRet < 0 && !TC_Socket::isInProgress())
+            {
+                _socket.close();
+                return EM_CONNECT;
+            }
+            int iRetCode = _epoller->wait(_timeout);
+            if (iRetCode < 0)
+            {
+                _socket.close();
+                return EM_SELECT;
+            }
+            else if (iRetCode == 0)
+            {
+                _socket.close();
+                return EM_TIMEOUT;
             }
+
             //设置为阻塞模式
             _socket.setblock(true);
+#endif
         }
         catch(TC_Socket_Exception &ex)
         {
@@ -431,7 +461,9 @@ int TC_TCPClient::recv(char *sRecvBuffer, size_t &iRecvLen)
     else if(TC_Epoller::readEvent(ev))
 #endif
     {
-        int iLen = _socket.recv((void*)sRecvBuffer, iRecvLen);
+	    // DEBUG_COST("before recv");
+
+	    int iLen = _socket.recv((void*)sRecvBuffer, iRecvLen);
         if (iLen < 0)
         {
             _socket.close();
@@ -460,11 +492,11 @@ int TC_TCPClient::recvBySep(string &sRecvBuffer, const string &sSep)
         return iRet;
     }
 
-    _epoller->mod(_socket.getfd(), 0, EPOLLIN);
-    
-    while(true)
+    bool succ = false;
+    while(!succ)
     {
         int iRetCode = _epoller->wait(_timeout);
+
         if (iRetCode < 0)
         {
             _socket.close();
@@ -491,7 +523,7 @@ int TC_TCPClient::recvBySep(string &sRecvBuffer, const string &sSep)
             char buffer[LEN_MAXRECV] = "\0";
 
             int len = _socket.recv((void*)&buffer, sizeof(buffer));
-            if (len < 0)
+            if (len < 0 && !TC_Socket::isPending())
             {
                 _socket.close();
                 return EM_RECV;
@@ -502,10 +534,12 @@ int TC_TCPClient::recvBySep(string &sRecvBuffer, const string &sSep)
                 return EM_CLOSE;
             }
 
-            sRecvBuffer.append(buffer, len);
+            if(len > 0)
+            {
+                sRecvBuffer.append(buffer, len);
+            }
 
-            if(sRecvBuffer.length() >= sSep.length() 
-               && sRecvBuffer.compare(sRecvBuffer.length() - sSep.length(), sSep.length(), sSep) == 0)
+            if(sRecvBuffer.length() >= sSep.length() && sRecvBuffer.compare(sRecvBuffer.length() - sSep.length(), sSep.length(), sSep) == 0)
             {
                 break;
             }
@@ -692,7 +726,7 @@ int TC_UDPClient::checkSocket()
             return EM_SOCKET;
         }
 
-	    _epoller->add(_socket.getfd(), 0, EPOLLIN | EPOLLOUT);
+	    _epoller->add(_socket.getfd(), 0, EPOLLIN);
 
 	    try
         {

+ 23 - 8
util/src/tc_common.cpp

@@ -583,10 +583,6 @@ string TC_Common::tm2str(const time_t &t, const string &sFormat)
 void TC_Common::tm2tm(const time_t &t, struct tm &tt)
 {
     tm2time(t, tt);
-    // static TimezoneHelper helper;
-    // time_t localt = t + TimezoneHelper::timezone_diff_secs;
-    // TC_Port::gmtime_r(&localt, &tt);
-
 }
 
 string TC_Common::now2str(const string &sFormat)
@@ -595,10 +591,10 @@ string TC_Common::now2str(const string &sFormat)
     return tm2str(t, sFormat.c_str());
 }
 
-string TC_Common::now2msstr()
-{
-    return ms2str(now2ms());
-}
+//string TC_Common::now2msstr()
+//{
+//    return ms2str(now2ms());
+//}
 
 string TC_Common::ms2str(int64_t ms)
 {
@@ -618,6 +614,25 @@ string TC_Common::ms2str(int64_t ms)
     return s;
 }
 
+string TC_Common::now2msstr()
+{
+    time_t t = time(NULL);
+
+    auto duration_in_ms = now2ms();
+
+    tm tt;
+
+    TC_Port::localtime_r(&t, &tt);
+
+    string s;
+    s.resize(128);
+    const char *szFormat = "%04d-%02d-%02d %02d:%02d:%02d.%03ld";
+    size_t n = snprintf(&s[0], s.size(), szFormat, tt.tm_year + 1900, tt.tm_mon + 1, tt.tm_mday, tt.tm_hour, tt.tm_min, tt.tm_sec, duration_in_ms % 1000);
+    s.resize(n);
+
+    return s;
+}
+
 string TC_Common::now2GMTstr()
 {
     time_t t = time(NULL);

+ 865 - 0
util/src/tc_coroutine.cpp

@@ -0,0 +1,865 @@
+/**
+ * Tencent is pleased to support the open source community by making Tars available.
+ *
+ * Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
+ *
+ * Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 
+ * in compliance with the License. You may obtain a copy of the License at
+ *
+ * https://opensource.org/licenses/BSD-3-Clause
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed 
+ * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+ * CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+ * specific language governing permissions and limitations under the License.
+ */
+
+#include "util/tc_coroutine.h"
+#include "util/tc_platform.h"
+#include "util/tc_logger.h"
+
+#if TARGET_PLATFORM_LINUX || TARGET_PLATFORM_IOS
+#include <fcntl.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+#include <algorithm>
+#include <cmath>
+#include <cstring>
+#include <stdexcept>
+#include <assert.h>
+#include "util/tc_timeprovider.h"
+
+namespace tars
+{
+
+#if TARGET_PLATFORM_WINDOWS
+
+// x86_64
+// test x86_64 before i386 because icc might
+// define __i686__ for x86_64 too
+#if defined(__x86_64__) || defined(__x86_64) \
+    || defined(__amd64__) || defined(__amd64) \
+    || defined(_M_X64) || defined(_M_AMD64)
+
+// Windows seams not to provide a constant or function
+// telling the minimal stacksize
+# define MIN_STACKSIZE  8 * 1024
+#else
+# define MIN_STACKSIZE  4 * 1024
+#endif
+
+void system_info_( SYSTEM_INFO * si) {
+    ::GetSystemInfo( si);
+}
+
+SYSTEM_INFO system_info() {
+    static SYSTEM_INFO si;
+    static std::once_flag flag;
+    std::call_once( flag, static_cast< void(*)( SYSTEM_INFO *) >( system_info_), & si);
+    return si;
+}
+
+std::size_t pagesize() {
+    return static_cast< std::size_t >( system_info().dwPageSize);
+}
+
+// Windows seams not to provide a limit for the stacksize
+// libcoco uses 32k+4k bytes as minimum
+bool stack_traits::is_unbounded() {
+    return true;
+}
+
+std::size_t stack_traits::page_size() {
+    return pagesize();
+}
+
+std::size_t stack_traits::default_size() {
+    return 128 * 1024;
+}
+
+// because Windows seams not to provide a limit for minimum stacksize
+std::size_t stack_traits::minimum_size() {
+    return MIN_STACKSIZE;
+}
+
+// because Windows seams not to provide a limit for maximum stacksize
+// maximum_size() can never be called (pre-condition ! is_unbounded() )
+std::size_t stack_traits::maximum_size() {
+    assert( ! is_unbounded() );
+    return  1 * 1024 * 1024 * 1024; // 1GB
+}
+
+stack_context stack_traits::allocate(size_t size_) {
+	// calculate how many pages are required
+	const std::size_t pages(static_cast< std::size_t >( std::ceil( static_cast< float >( size_) / stack_traits::page_size() ) ) );
+	// add one page at bottom that will be used as guard-page
+	const std::size_t size__ = ( pages + 1) * stack_traits::page_size();
+
+	void * vp = ::VirtualAlloc( 0, size__, MEM_COMMIT, PAGE_READWRITE);
+	if ( ! vp) throw std::bad_alloc();
+
+	DWORD old_options;
+	const BOOL result = ::VirtualProtect(
+		vp, stack_traits::page_size(), PAGE_READWRITE | PAGE_GUARD /*PAGE_NOACCESS*/, & old_options);
+	assert( FALSE != result);
+
+	stack_context sctx;
+	sctx.size = size__;
+	sctx.sp = static_cast< char * >( vp) + sctx.size;
+	return sctx;
+}
+
+void stack_traits::deallocate( stack_context & sctx)  {
+	assert( sctx.sp);
+
+	void * vp = static_cast< char * >( sctx.sp) - sctx.size;
+	::VirtualFree( vp, 0, MEM_RELEASE);
+}
+
+#else
+
+// 128kb recommended stack size
+// # define MINSIGSTKSZ (131072) 
+
+void pagesize_( std::size_t * size)  {
+    // conform to POSIX.1-2001
+    * size = ::sysconf( _SC_PAGESIZE);
+}
+
+void stacksize_limit_( rlimit * limit)  {
+    // conforming to POSIX.1-2001
+    ::getrlimit( RLIMIT_STACK, limit);
+}
+
+std::size_t pagesize()  {
+    static std::size_t size = 0;
+    static std::once_flag flag;
+    std::call_once( flag, pagesize_, & size);
+    return size;
+}
+
+rlimit stacksize_limit()  {
+    static rlimit limit;
+    static std::once_flag flag;
+    std::call_once( flag, stacksize_limit_, & limit);
+    return limit;
+}
+
+bool stack_traits::is_unbounded() {
+    return RLIM_INFINITY == stacksize_limit().rlim_max;
+}
+
+std::size_t stack_traits::page_size() {
+    return pagesize();
+}
+
+std::size_t stack_traits::default_size() {
+	return 128 * 1024;    
+}
+
+std::size_t stack_traits::minimum_size() {
+    return MINSIGSTKSZ;
+}
+
+std::size_t stack_traits::maximum_size() {
+    assert( ! is_unbounded() );
+    return static_cast< std::size_t >( stacksize_limit().rlim_max);
+}
+
+stack_context stack_traits::allocate(std::size_t size_) {
+	// calculate how many pages are required
+	const std::size_t pages(static_cast< std::size_t >( std::ceil( static_cast< float >( size_) / stack_traits::page_size() ) ) );
+	// add one page at bottom that will be used as guard-page
+	const std::size_t size__ = ( pages + 1) * stack_traits::page_size();
+
+	// conform to POSIX.4 (POSIX.1b-1993, _POSIX_C_SOURCE=199309L)
+#if defined(MAP_ANON)
+	void * vp = ::mmap( 0, size__, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
+#else
+	void * vp = ::mmap( 0, size__, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+#endif
+	if ( MAP_FAILED == vp) throw std::bad_alloc();
+
+	// conforming to POSIX.1-2001
+	const int result( ::mprotect( vp, stack_traits::page_size(), PROT_NONE) );
+	assert( 0 == result);
+
+	stack_context sctx;
+	sctx.size = size__;
+	sctx.sp = static_cast< char * >( vp) + sctx.size;
+
+	return sctx;
+}
+
+void stack_traits::deallocate(stack_context & sctx) {
+	assert( sctx.sp);
+
+	void * vp = static_cast< char * >( sctx.sp) - sctx.size;
+	// conform to POSIX.4 (POSIX.1b-1993, _POSIX_C_SOURCE=199309L)
+	::munmap( vp, sctx.size);
+}
+
+#endif
+
+////////////////////////////////////////////////////////
+TC_CoroutineInfo::TC_CoroutineInfo()
+: _prev(NULL)
+, _next(NULL)
+, _scheduler(NULL)
+, _uid(0)
+, _eStatus(CORO_FREE)
+{
+}
+
+TC_CoroutineInfo::TC_CoroutineInfo(TC_CoroutineScheduler* scheduler, uint32_t iUid, stack_context stack_ctx)
+: _prev(NULL)
+, _next(NULL)
+, _scheduler(scheduler)
+, _uid(iUid)
+, _eStatus(CORO_FREE)
+, _stack_ctx(stack_ctx)
+{
+}
+
+TC_CoroutineInfo::~TC_CoroutineInfo()
+{
+}
+
+void TC_CoroutineInfo::setStackContext(stack_context stack_ctx)
+{
+	_stack_ctx = stack_ctx;
+}
+
+void TC_CoroutineInfo::registerFunc(const std::function<void ()>& callback)
+{
+    _callback           = callback;
+
+    _init_func.coroFunc = TC_CoroutineInfo::corotineProc;
+
+    _init_func.args     = this;
+
+	fcontext_t ctx      = make_fcontext(_stack_ctx.sp, _stack_ctx.size, TC_CoroutineInfo::corotineEntry);
+
+	transfer_t tf       = jump_fcontext(ctx, this);
+
+	//实际的ctx
+	this->setCtx(tf.fctx);
+}
+
+void TC_CoroutineInfo::corotineEntry(transfer_t tf)
+{
+    TC_CoroutineInfo * coro = static_cast< TC_CoroutineInfo * >(tf.data);
+
+    auto    func  = coro->_init_func.coroFunc;
+    void*    args = coro->_init_func.args;
+
+	transfer_t t = jump_fcontext(tf.fctx, NULL);
+
+	//拿到自己的协程堆栈, 当前协程结束以后, 好跳转到main
+	coro->_scheduler->setMainCtx(t.fctx);
+
+    //再跳转到具体函数
+    func(args, t);
+}
+
+void TC_CoroutineInfo::corotineProc(void * args, transfer_t t)
+{
+    TC_CoroutineInfo *coro = (TC_CoroutineInfo*)args;
+
+    try
+    {
+    	//执行具体业务代码
+	    coro->_callback();
+    }
+    catch(std::exception &ex)
+    {
+        cerr << "TC_CoroutineInfo::corotineProc exception:" << ex.what() << endl;
+    }
+    catch(...)
+    {
+        cerr << "TC_CoroutineInfo::corotineProc unknown exception." << endl;
+    }
+
+    TC_CoroutineScheduler* scheduler =  coro->getScheduler();
+    scheduler->decUsedSize();
+    scheduler->moveToFreeList(coro);
+
+    //当前业务执行完, 会跳到main
+	scheduler->switchCoro(&(scheduler->getMainCoroutine()));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+thread_local shared_ptr<TC_CoroutineScheduler> TC_CoroutineScheduler::g_scheduler;
+
+const shared_ptr<TC_CoroutineScheduler> &TC_CoroutineScheduler::create()
+{
+    if(!g_scheduler) 
+    {
+        g_scheduler = std::make_shared<TC_CoroutineScheduler>();
+    }
+    
+    return g_scheduler;
+}
+
+const shared_ptr<TC_CoroutineScheduler> &TC_CoroutineScheduler::scheduler()
+{
+    return g_scheduler;
+}
+
+void TC_CoroutineScheduler::reset()
+{
+	g_scheduler.reset();
+}
+
+TC_CoroutineScheduler::TC_CoroutineScheduler()
+: _currentSize(0)
+, _usedSize(0)
+, _uniqId(0)
+, _currentCoro(NULL)
+, _all_coro(NULL)
+{
+    // LOG_CONSOLE_DEBUG << endl;
+
+    _epoller = new TC_Epoller();
+
+    _epoller->create(10240);
+}
+
+TC_CoroutineScheduler::~TC_CoroutineScheduler()
+{
+    // LOG_CONSOLE_DEBUG << endl;
+    if(_epoller)
+	{
+		delete _epoller;
+		_epoller = NULL;
+	}
+}
+
+void TC_CoroutineScheduler::createCoroutineInfo(size_t poolSize)
+{
+	if(_all_coro != NULL)
+	{
+		delete [] _all_coro;
+	}
+
+	_all_coro = new TC_CoroutineInfo*[_poolSize+1];
+	for(size_t i = 0; i <= _poolSize; ++i)
+	{
+        //id=0不使用, 给mainCoro来使用!
+		_all_coro[i] = NULL;
+	}
+}
+
+void TC_CoroutineScheduler::setPoolStackSize(uint32_t iPoolSize, size_t iStackSize)
+{
+	_poolSize   = iPoolSize;
+	_stackSize  = iStackSize;
+}
+
+void TC_CoroutineScheduler::init()
+{
+	_usedSize   = 0;
+	_uniqId     = 0;
+
+    if(_poolSize <= 100)
+    {
+        _currentSize = _poolSize;
+    }
+    else
+    {
+        _currentSize = 100;
+    }
+
+	createCoroutineInfo(_poolSize);
+
+    TC_CoroutineInfo::CoroutineHeadInit(&_active);
+    TC_CoroutineInfo::CoroutineHeadInit(&_avail);
+    TC_CoroutineInfo::CoroutineHeadInit(&_inactive);
+    TC_CoroutineInfo::CoroutineHeadInit(&_timeout);
+    TC_CoroutineInfo::CoroutineHeadInit(&_free);
+
+    int iSucc = 0;
+
+    for(size_t i = 0; i < _currentSize; ++i)
+    {
+        //iId=0不使用, 给mainCoro使用!!!! 
+	    uint32_t iId = generateId();
+
+        assert(iId != 0);
+
+        stack_context s_ctx = stack_traits::allocate(_stackSize);
+
+	    TC_CoroutineInfo *coro = new TC_CoroutineInfo(this, iId, s_ctx);
+
+        _all_coro[iId] = coro;
+
+        TC_CoroutineInfo::CoroutineAddTail(coro, &_free);
+
+        ++iSucc;
+    }
+
+    _currentSize = iSucc;
+
+    _mainCoro.setUid(0);
+    _mainCoro.setStatus(TC_CoroutineInfo::CORO_FREE);
+
+    _currentCoro = &_mainCoro;
+}
+
+int TC_CoroutineScheduler::increaseCoroPoolSize()
+{
+    if(_poolSize <= _currentSize)
+    	return -1;
+
+    int iInc = ((_poolSize - _currentSize) > 100) ? 100 : (_poolSize - _currentSize);
+
+    for(int i = 0; i < iInc; ++i)
+    {
+	    uint32_t iId        = generateId();
+	    stack_context s_ctx = stack_traits::allocate(_stackSize);
+
+	    TC_CoroutineInfo *coro = new TC_CoroutineInfo(this, iId, s_ctx);
+
+        _all_coro[iId] = coro;
+
+        TC_CoroutineInfo::CoroutineAddTail(coro, &_free);
+    }
+
+    _currentSize += iInc;
+
+    return 0;
+}
+
+uint32_t TC_CoroutineScheduler::createCoroutine(const std::function<void ()> &callback)
+{
+	if(!_all_coro)
+	{
+		init();
+	}
+
+    if(_usedSize >= _currentSize || TC_CoroutineInfo::CoroutineHeadEmpty(&_free))
+    {
+        int iRet = increaseCoroPoolSize();
+
+        if(iRet != 0)
+            return 0;
+    }
+
+    TC_CoroutineInfo *coro = _free._next;
+    assert(coro != NULL);
+
+    TC_CoroutineInfo::CoroutineDel(coro);
+
+    _usedSize++;
+
+    coro->setStatus(TC_CoroutineInfo::CORO_AVAIL);
+
+    TC_CoroutineInfo::CoroutineAddTail(coro, &_avail);
+
+    coro->registerFunc(callback);
+
+    return coro->getUid();
+}
+
+bool TC_CoroutineScheduler::full()
+{
+	if(_usedSize >= _currentSize || TC_CoroutineInfo::CoroutineHeadEmpty(&_free))
+	{
+		if(_poolSize <= _currentSize)
+			return true;
+	}
+
+	return false;
+}
+
+void TC_CoroutineScheduler::notify()
+{
+	assert(_epoller);
+
+    _epoller->notify();
+}
+
+void TC_CoroutineScheduler::run()
+{
+	if(!_all_coro)
+	{
+		init();
+	}
+
+	_ready = true;
+
+	while(!_epoller->isTerminate())
+	{
+		if(_activeCoroQueue.empty() && TC_CoroutineInfo::CoroutineHeadEmpty(&_avail) && TC_CoroutineInfo::CoroutineHeadEmpty(&_active))
+		{
+			_epoller->done(1000);
+		}
+
+		//唤醒需要激活的协程
+		wakeup();
+
+		//唤醒sleep的协程
+		wakeupbytimeout();
+
+		//唤醒yield的协程
+		wakeupbyself();
+
+		int iLoop = 100;
+
+		//执行active协程, 每次执行100个, 避免占满cpu
+		while(iLoop > 0 && !TC_CoroutineInfo::CoroutineHeadEmpty(&_active))
+		{
+			TC_CoroutineInfo *coro = _active._next;
+
+			assert(coro != NULL);
+
+			switchCoro(coro);
+
+			--iLoop;
+		}
+
+		//检查yield的线程, 执行
+		if(!TC_CoroutineInfo::CoroutineHeadEmpty(&_avail))
+		{
+			TC_CoroutineInfo *coro = _avail._next;
+
+			assert(coro != NULL);
+
+			switchCoro(coro);
+		}
+
+        //没有任何可执行的写成了, 直接退出!
+        if(_usedSize == 0 && _noCoroutineCallback)
+        {
+            _noCoroutineCallback(this);
+        }
+	}
+
+	destroy();
+
+	_ready = false;
+}
+
+void TC_CoroutineScheduler::yield(bool bFlag)
+{
+    //主协程不允许yield
+    if(_currentCoro->getUid() == 0)
+    {
+        return;
+    }
+
+    if(bFlag)
+    {
+	    _needActiveCoroId.push_back(_currentCoro->getUid());
+    }
+
+    moveToInactive(_currentCoro);
+    switchCoro(&_mainCoro);
+}
+
+void TC_CoroutineScheduler::sleep(int iSleepTime)
+{
+    //主协程不允许sleep
+    if(_currentCoro->getUid() == 0)
+        return;
+
+    int64_t iNow = TNOWMS;
+    int64_t iTimeout = iNow + (iSleepTime >= 0 ? iSleepTime : -iSleepTime);
+
+    _timeoutCoroId.insert(make_pair(iTimeout, _currentCoro->getUid()));
+
+    moveToTimeout(_currentCoro);
+
+    _epoller->postAtTime(iTimeout, [](){});
+
+    switchCoro(&_mainCoro);
+}
+
+void TC_CoroutineScheduler::wakeupbyself()
+{
+    if(!_needActiveCoroId.empty() && !_epoller->isTerminate())
+    {
+        list<uint32_t>::iterator it = _needActiveCoroId.begin();
+        while(it != _needActiveCoroId.end())
+        {
+            TC_CoroutineInfo *coro = _all_coro[*it];
+
+            assert(coro != NULL);
+
+            moveToAvail(coro);
+
+            ++it;
+        }
+        _needActiveCoroId.clear();
+    }
+}
+
+void TC_CoroutineScheduler::put(uint32_t iCoroId)
+{
+    if(!_epoller->isTerminate())
+    {
+        _activeCoroQueue.push_back(iCoroId);
+
+	    _epoller->notify();
+    }
+}
+
+void TC_CoroutineScheduler::wakeup()
+{
+    if(!_activeCoroQueue.empty() && !_epoller->isTerminate())
+    {
+        deque<uint32_t> coroIds;
+
+        _activeCoroQueue.swap(coroIds);
+
+        auto it = coroIds.begin();
+
+        auto itEnd = coroIds.end();
+
+        while(it != itEnd)
+        {
+            TC_CoroutineInfo *coro = _all_coro[*it];
+
+            assert(coro != NULL);
+
+            moveToActive(coro);
+
+            ++it;
+        }
+    }
+}
+
+void TC_CoroutineScheduler::wakeupbytimeout()
+{
+    if(!_timeoutCoroId.empty() && !_epoller->isTerminate())
+    {
+        int64_t iNow = TNOWMS;
+        while(true)
+        {
+            multimap<int64_t, uint32_t>::iterator it = _timeoutCoroId.begin();
+
+            if(it == _timeoutCoroId.end() || it->first > iNow)
+                break;
+
+            TC_CoroutineInfo *coro = _all_coro[it->second];
+
+            assert(coro != NULL);
+
+            moveToActive(coro);
+
+            _timeoutCoroId.erase(it);
+        }
+
+    }
+}
+
+void TC_CoroutineScheduler::terminate()
+{
+	assert(_epoller);
+
+	_epoller->terminate();
+}
+
+uint32_t TC_CoroutineScheduler::generateId()
+{
+    uint32_t i = ++_uniqId;
+    if(i == 0) {
+        i = ++_uniqId;
+    }
+
+    assert(i <= _poolSize);
+
+    return i;
+}
+
+void TC_CoroutineScheduler::switchCoro(TC_CoroutineInfo *to)
+{
+    //跳转到to协程
+    _currentCoro = to;
+
+	transfer_t t = jump_fcontext(to->getCtx(), NULL);
+
+	//并保存协程堆栈
+	to->setCtx(t.fctx);
+}
+
+void TC_CoroutineScheduler::moveToActive(TC_CoroutineInfo *coro)
+{
+    if(coro->getStatus() == TC_CoroutineInfo::CORO_INACTIVE || coro->getStatus() == TC_CoroutineInfo::CORO_TIMEOUT)
+    {
+        TC_CoroutineInfo::CoroutineDel(coro);
+        coro->setStatus(TC_CoroutineInfo::CORO_ACTIVE);
+        TC_CoroutineInfo::CoroutineAddTail(coro, &_active);
+    }
+    else
+    {
+    	assert(false);
+    }
+}
+
+void TC_CoroutineScheduler::moveToAvail(TC_CoroutineInfo *coro)
+{
+    if(coro->getStatus() == TC_CoroutineInfo::CORO_INACTIVE)
+    {
+        TC_CoroutineInfo::CoroutineDel(coro);
+        coro->setStatus(TC_CoroutineInfo::CORO_AVAIL);
+        TC_CoroutineInfo::CoroutineAddTail(coro, &_avail);
+    }
+    else
+    {
+    	assert(false);
+    }
+}
+
+void TC_CoroutineScheduler::moveToInactive(TC_CoroutineInfo *coro)
+{
+    if(coro->getStatus() == TC_CoroutineInfo::CORO_ACTIVE || coro->getStatus() == TC_CoroutineInfo::CORO_AVAIL)
+    {
+        TC_CoroutineInfo::CoroutineDel(coro);
+        coro->setStatus(TC_CoroutineInfo::CORO_INACTIVE);
+        TC_CoroutineInfo::CoroutineAddTail(coro, &_inactive);
+    }
+    else
+    {
+    	assert(false);
+    }
+}
+
+void TC_CoroutineScheduler::moveToTimeout(TC_CoroutineInfo *coro)
+{
+    if(coro->getStatus() == TC_CoroutineInfo::CORO_ACTIVE || coro->getStatus() == TC_CoroutineInfo::CORO_AVAIL)
+    {
+        TC_CoroutineInfo::CoroutineDel(coro);
+        coro->setStatus(TC_CoroutineInfo::CORO_TIMEOUT);
+        TC_CoroutineInfo::CoroutineAddTail(coro, &_timeout);
+    }
+    else
+    {
+    	assert(false);
+    }
+}
+
+void TC_CoroutineScheduler::moveToFreeList(TC_CoroutineInfo *coro)
+{
+    if(coro->getStatus() != TC_CoroutineInfo::CORO_FREE)
+    {
+        TC_CoroutineInfo::CoroutineDel(coro);
+        coro->setStatus(TC_CoroutineInfo::CORO_FREE);
+        TC_CoroutineInfo::CoroutineAddTail(coro, &_free);
+    }
+    else
+    {
+    	assert(false);
+    }
+}
+
+void TC_CoroutineScheduler::destroy()
+{
+    if(_all_coro)
+    {
+        //id=0是保留不用的, 给mainCoro作为id用
+        assert(_all_coro[0] == NULL);
+
+        for (size_t i = 1; i <= _poolSize; i++)
+        {
+            if(_all_coro[i])
+            {
+                stack_traits::deallocate(_all_coro[i]->getStackContext());
+                delete _all_coro[i];
+                _all_coro[i] = NULL;
+            }
+        }
+        delete [] _all_coro;
+		_all_coro = NULL;
+    }
+}
+/////////////////////////////////////////////////////////
+TC_Coroutine::TC_Coroutine()
+: _coroSched(NULL)
+, _num(1)
+, _maxNum(128)
+, _stackSize(128*1024)
+{
+}
+
+TC_Coroutine::~TC_Coroutine()
+{
+    if(isAlive())
+    {
+        terminate();
+
+        getThreadControl().join();
+    }
+}
+
+void TC_Coroutine::setCoroInfo(uint32_t iNum, uint32_t iMaxNum, size_t iStackSize)
+{
+    _maxNum     = (iMaxNum > 0 ? iMaxNum : 1);
+    _num        = (iNum > 0 ? (iNum <= _maxNum ? iNum : _maxNum) : 1);
+    _stackSize  = (iStackSize >= pagesize() ? iStackSize : pagesize());
+}
+
+void TC_Coroutine::run()
+{
+    _coroSched = TC_CoroutineScheduler::create();
+
+    initialize();
+
+    handleCoro();
+
+    destroy();
+}
+
+void TC_Coroutine::terminate()
+{
+    if(_coroSched)
+    {
+        _coroSched->terminate();
+    }
+}
+
+void TC_Coroutine::handleCoro()
+{
+    _coroSched->setPoolStackSize(_maxNum, _stackSize);
+
+    _coroSched->setNoCoroutineCallback([&](TC_CoroutineScheduler *scheduler){scheduler->terminate();});
+
+	//把协程创建出来
+    for(uint32_t i = 0; i < _num; ++i)
+    {
+        _coroSched->createCoroutine(std::bind(&TC_Coroutine::coroEntry, this));
+    }
+
+
+    _coroSched->run();
+}
+
+void TC_Coroutine::coroEntry(TC_Coroutine *pCoro)
+{
+    pCoro->handle();
+}
+
+uint32_t TC_Coroutine::createCoroutine(const std::function<void ()> &coroFunc)
+{
+    return _coroSched->createCoroutine(coroFunc);
+}
+
+void TC_Coroutine::yield()
+{
+    _coroSched->yield();
+}
+
+void TC_Coroutine::sleep(int millseconds)
+{
+    _coroSched->sleep(millseconds);
+}
+
+}

+ 6 - 6
util/src/tc_encoder.cpp

@@ -119,12 +119,12 @@ string TC_Encoder::gbk2utf8(const string &sIn,int mode)
 	char* pIn = (char*)sIn.c_str();
 
 	size_t ret = iconv(cd, &pIn, &isize, &pOut, &osize);
-	if(-1 == ret && TC_Encoder::ICONV_NORMAL == mode){
-		iconv_close(cd);
-		delete []buf;
-		THROW_EXCEPTION_SYSCODE(TC_Encoder_Exception, "[TC_Encoder::gbk2utf8] iconv error");
-		return sOut;
-	}
+    if((size_t)-1 == ret && TC_Encoder::ICONV_NORMAL == mode){
+        iconv_close(cd);
+        delete []buf;
+        THROW_EXCEPTION_SYSCODE(TC_Encoder_Exception, "[TC_Encoder::gbk2utf8] iconv error");
+        return sOut;
+    }
 
 	iconv_close(cd);
 	buf[bufsize-osize]=0;

La diferencia del archivo ha sido suprimido porque es demasiado grande
+ 564 - 846
util/src/tc_epoll_server.cpp


+ 645 - 319
util/src/tc_epoller.cpp

@@ -1,319 +1,645 @@
-/**
- * Tencent is pleased to support the open source community by making Tars available.
- *
- * Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
- *
- * Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 
- * in compliance with the License. You may obtain a copy of the License at
- *
- * https://opensource.org/licenses/BSD-3-Clause
- *
- * Unless required by applicable law or agreed to in writing, software distributed 
- * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
- * CONDITIONS OF ANY KIND, either express or implied. See the License for the 
- * specific language governing permissions and limitations under the License.
- */
-#include "util/tc_epoller.h"
-#if TARGET_PLATFORM_WINDOWS
-#include "sys/epoll.h"
-#else
-#include <unistd.h>
-#endif
-
-namespace tars
-{
-
-TC_Epoller::NotifyInfo::NotifyInfo() : _ep(NULL)
-{
-}
-
-TC_Epoller::NotifyInfo::~NotifyInfo()
-{
-    _notify.close();
-}
-
-void TC_Epoller::NotifyInfo::init(TC_Epoller *ep)
-{
-    _ep = ep;
-
-	_notify.createSocket(SOCK_DGRAM, AF_INET);
-}
-
-void TC_Epoller::NotifyInfo::add(uint64_t data)
-{
-    _data = data;
-    _ep->add(_notify.getfd(), data, EPOLLIN | EPOLLOUT);
-}
-
-void TC_Epoller::NotifyInfo::notify()
-{
-    _ep->mod(_notify.getfd(), _data, EPOLLIN | EPOLLOUT);
-}
-
-void TC_Epoller::NotifyInfo::release()
-{
-    _ep->del(_notify.getfd(), 0, EPOLLIN | EPOLLOUT);
-    _notify.close();
-}
-
-int TC_Epoller::NotifyInfo::notifyFd()
-{
-    return _notify.getfd();
-}
-
-//////////////////////////////////////////////////////////////////////
-
-TC_Epoller::TC_Epoller()
-{
-#if TARGET_PLATFORM_WINDOWS
-    _iEpollfd = NULL;
-#else
-	_iEpollfd = -1;
-#endif
-	_pevs     = nullptr;
-	_max_connections = 1024;
-}
-
-TC_Epoller::~TC_Epoller()
-{
-	if(_pevs != nullptr)
-	{
-		delete[] _pevs;
-		_pevs = nullptr;
-	}
-
-#if TARGET_PLATFORM_WINDOWS
-	if (_iEpollfd != NULL)
-		epoll_close(_iEpollfd);
-#else
-	if (_iEpollfd > 0)
-		::close(_iEpollfd);
-#endif
-
-}
-
-#if TARGET_PLATFORM_IOS
-
-int TC_Epoller::ctrl(SOCKET_TYPE fd, uint64_t data, uint32_t events, int op)
-{
-    if(fd < 0) return -1;
-
-    int n = 0;
-    struct kevent64_s ev[2];
-
-	if(_enableET) {
-		op = op | EV_CLEAR;
-	}
-
-	if (events & EPOLLIN)
-    {
-        EV_SET64(&ev[n++], fd, EVFILT_READ, op, 0, 0, data, 0, 0);
-    }
-
-    if (events & EPOLLOUT)
-    {
-        EV_SET64(&ev[n++], fd, EVFILT_WRITE, op, 0, 0, data, 0, 0);
-    }
-
-    int ret = kevent64(_iEpollfd, ev, n, nullptr, 0, 0, nullptr);
-
-    if(ret == -1)
-    {
-        //一般都是析构的时候出现,有需要close就行
-//        cerr << "[TC_Epoller::ctrl] error, fd:" << fd << ", errno:" << errno  << "|"<< strerror(errno) << endl;
-        ::close(_iEpollfd);
-        _iEpollfd = 0;
-    }
-    return ret;
-}
-
-#else
-int TC_Epoller::ctrl(SOCKET_TYPE fd, uint64_t data, uint32_t events, int op)
-{
-	struct epoll_event ev;
-	ev.data.u64 = data;
-
-#if TARGET_PLATFORM_WINDOWS
-	ev.events = events;
-#else
-    if (_enableET)
-    {
-        events = events | EPOLLET;
-    }
-
-    ev.events   = events;
-#endif
-
-	return epoll_ctl(_iEpollfd, op, fd, &ev);
-}
-#endif
-
-void TC_Epoller::create(int size)
-{
-#if TARGET_PLATFORM_IOS
-    _iEpollfd = kqueue();
-#else
-	_iEpollfd = epoll_create(size);
-#endif
-    if (nullptr != _pevs)
-    {
-        delete[] _pevs;
-    }
-
-    _max_connections = 1024;
-
-    _pevs = new epoll_event[_max_connections];
-}
-
-void TC_Epoller::close()
-{
-#if TARGET_PLATFORM_LINUX || TARGET_PLATFORM_IOS
-    ::close(_iEpollfd);
-#else
-    epoll_close(_iEpollfd);
-#endif
-    _iEpollfd = 0;
-}
-
-int TC_Epoller::add(SOCKET_TYPE fd, uint64_t data, int32_t event)
-{
-#if TARGET_PLATFORM_IOS
-    return ctrl(fd, data, event, EV_ADD|EV_ENABLE);
-#else
-    return ctrl(fd, data, event, EPOLL_CTL_ADD);
-#endif
-}
-
-int TC_Epoller::mod(SOCKET_TYPE fd, uint64_t data, int32_t event)
-{
-#if TARGET_PLATFORM_IOS
-    return add(fd, data, event);
-#else
-    return ctrl(fd, data, event, EPOLL_CTL_MOD);
-#endif
-}
-
-int TC_Epoller::del(SOCKET_TYPE fd, uint64_t data, int32_t event)
-{
-#if TARGET_PLATFORM_IOS    
-    return ctrl(fd, data, event, EV_DELETE);
-#else
-    return ctrl(fd, data, event, EPOLL_CTL_DEL);
-#endif
-}
-
-epoll_event& TC_Epoller::get(int i) 
-{ 
-	assert(_pevs != 0); 
-	return _pevs[i]; 
-}
-
-int TC_Epoller::wait(int millsecond)
-{
-#if !TARGET_PLATFORM_WINDOWS    
-retry:    
-#endif
-
-	int ret;
-#if TARGET_PLATFORM_IOS
-    struct timespec timeout;
-    timeout.tv_sec = millsecond / 1000;
-    timeout.tv_nsec = (millsecond % 1000) * 1000 * 1000;
-	ret = kevent64(_iEpollfd, nullptr, 0, _pevs, _max_connections, 0, &timeout);
-#else
-	ret = epoll_wait(_iEpollfd, _pevs, _max_connections, millsecond);
-#endif
-
-#if TARGET_PLATFORM_WINDOWS
-	return ret;
-#else
-	if(ret < 0 && errno == EINTR)
-	{
-		goto retry;
-	}
-
-	return ret;
-#endif
-}
-
-bool TC_Epoller::readEvent(const epoll_event &ev)
-{
-#if TARGET_PLATFORM_IOS
-    if (ev.filter == EVFILT_READ)
-#else
-    if (ev.events & EPOLLIN)
-#endif
-    {
-        return true;
-    }
-
-    return false;
-}
-
-bool TC_Epoller::writeEvent(const epoll_event &ev)
-{
-#if TARGET_PLATFORM_IOS
-    if (ev.filter == EVFILT_WRITE)
-#else
-    if (ev.events & EPOLLOUT)              
-#endif 
-    {
-        return true;
-    }
-
-    return false;
-}
-
-bool TC_Epoller::errorEvent(const epoll_event &ev)
-{
-#if TARGET_PLATFORM_IOS
-    if (ev.filter == EVFILT_EXCEPT)
-    {
-        return true;
-    }
-#else
-    if (ev.events & EPOLLERR || ev.events & EPOLLHUP)
-    {
-        return true;
-    }
-#endif
-    return false;
-}
-
-uint32_t TC_Epoller::getU32(const epoll_event &ev, bool high)
-{
-    uint32_t u32 = 0;
-    if(high)
-    {
-#if TARGET_PLATFORM_IOS
-        u32 = ev.udata >> 32;
-#else
-        u32 = ev.data.u64 >> 32;
-#endif        
-    }
-    else
-    {
-#if TARGET_PLATFORM_IOS
-        u32 = (uint32_t)ev.udata;
-#else    
-        u32 = ev.data.u32;
-#endif
-    }
-
-    return u32;
-}
-
-uint64_t TC_Epoller::getU64(const epoll_event &ev)
-{
-    uint64_t data;
-#if TARGET_PLATFORM_IOS
-    data = ev.udata;
-#else
-    data = ev.data.u64;
-#endif
-    return data;
-}
-
-}
-
-
+#include "util/tc_epoller.h"
+#include "util/tc_timeprovider.h"
+#include "util/tc_logger.h"
+#include <algorithm>
+
+#if TARGET_PLATFORM_WINDOWS
+#include "sys/epoll.h"
+#else
+#include <unistd.h>
+#endif
+
+
+namespace tars
+{
+
+TC_Epoller::NotifyInfo::~NotifyInfo()
+{
+	if(_epollInfo && _epoller)
+	{
+		// LOG_CONSOLE_DEBUG << this << ", fd:" << notifyFd() << endl;
+
+		_epoller->releaseEpollInfo(_epollInfo);
+
+		_notify.close();
+
+		_epollInfo = NULL;
+		_epoller    = NULL;
+	}
+}
+
+void TC_Epoller::NotifyInfo::init(TC_Epoller *epoller)
+{
+    _epoller = epoller;
+
+	//用udp句柄, 方便唤醒, tcp句柄还得构建连接后才能唤醒
+	_notify.createSocket(SOCK_DGRAM, AF_INET);
+
+    _epollInfo = _epoller->createEpollInfo(notifyFd());
+
+	// LOG_CONSOLE_DEBUG << this << ", fd:" << notifyFd() << endl;
+}
+
+/////////////////////////////////////////////////////////////////////
+
+TC_Epoller::EpollInfo::~EpollInfo()
+{
+//	 LOG_CONSOLE_DEBUG << this << endl;
+
+	clearCallback();
+
+	if(_deconstructor)
+	{
+		_deconstructor(_cookie);
+		_cookie = NULL;
+	}
+}
+
+void TC_Epoller::EpollInfo::clearCallback()
+{
+	_callbacks[0] = EVENT_CALLBACK();
+	_callbacks[1] = EVENT_CALLBACK();
+	_callbacks[2] = EVENT_CALLBACK();
+}
+
+void TC_Epoller::EpollInfo::registerCallback(const map<uint32_t, EVENT_CALLBACK> & callbacks, uint32_t events)
+{
+	for(auto it : callbacks)
+	{
+		switch(it.first)
+		{
+			case EPOLLIN:
+				_callbacks[0]	= it.second;
+				break;
+			case EPOLLOUT:
+				_callbacks[1]	= it.second;
+				break;
+			case EPOLLERR:
+				_callbacks[2]	= it.second;
+				break;
+		}
+	}
+
+    if(events != 0)
+    {
+    	add(events);
+    }
+}
+
+bool TC_Epoller::EpollInfo::fireEvent(uint32_t event)
+{
+    try
+    {
+        auto data = shared_from_this();
+
+        if((event & EPOLLERR) && _callbacks[2])
+        {
+            _callbacks[2](data);
+
+            return false;
+        }
+
+        if((event & EPOLLIN) && _callbacks[0])
+        {
+            if (!_callbacks[0](data))
+                return false;
+        }
+
+        if((event & EPOLLOUT) && _callbacks[1])
+        {
+            if (!_callbacks[1](data))
+                return false;
+        }
+    }
+    catch(exception &ex)
+    {
+        cerr << "TC_Epoller::EpollInfo::fireEvent event:" << event << ", error: " << ex.what() << endl;
+        return false;
+    }
+    catch(...)
+    {
+        cerr << "TC_Epoller::EpollInfo::fireEvent event:" << event << ", error." << endl;
+        return false;
+    }
+
+    return true;
+}
+
+void TC_Epoller::EpollInfo::release()
+{
+	if(this->valid())
+	{
+		assert(_epoller);
+
+		//epoll不再关注该事件
+		del(0);
+
+		_fd = INVALID_SOCKET;
+	}
+}
+
+void TC_Epoller::EpollInfo::add(uint32_t events)
+{
+	if(valid())
+	{
+		_epoller->add(_fd, data(), events);
+	}
+}
+
+void TC_Epoller::EpollInfo::mod(uint32_t events)
+{
+	if(valid())
+	{
+		_epoller->mod(_fd, data(), events);
+	}
+}
+
+void TC_Epoller::EpollInfo::del(uint32_t events)
+{
+	if(valid())
+	{
+		_epoller->del(_fd, 0, events);
+	}
+}
+
+//////////////////////////////////////////////////////////////////////
+
+
+TC_Epoller::TC_Epoller()
+{
+#if TARGET_PLATFORM_WINDOWS
+    _iEpollfd = NULL;
+#else
+	_iEpollfd = -1;
+#endif
+	_pevs     = nullptr;
+	_max_connections = 1024;
+
+}
+
+TC_Epoller::~TC_Epoller()
+{
+	if(_notify != nullptr)
+	{
+        delete _notify;
+		_notify = nullptr;
+	}
+
+	if(_pevs != nullptr)
+	{
+		delete[] _pevs;
+		_pevs = nullptr;
+	}
+
+	clear();
+
+	_idleCallbacks.clear();
+
+#if TARGET_PLATFORM_WINDOWS
+	if (_iEpollfd != NULL)
+	{
+		epoll_close(_iEpollfd);
+		_iEpollfd = NULL;
+	}
+#else
+	if (_iEpollfd >= 0)
+	{
+		::close(_iEpollfd);
+		_iEpollfd = -1;
+	}
+#endif
+
+}
+
+#if TARGET_PLATFORM_IOS
+
+int TC_Epoller::ctrl(SOCKET_TYPE fd, uint64_t data, uint32_t events, int op)
+{
+    if(fd < 0) return -1;
+
+    int n = 0;
+    struct kevent64_s ev[2];
+    
+    if(_enableET)
+    {
+        op = op | EV_CLEAR;
+    }
+
+	if (events & EPOLLIN)
+    {
+        EV_SET64(&ev[n++], fd, EVFILT_READ, op, 0, 0, data, 0, 0);
+    }
+
+    if (events & EPOLLOUT)
+    {
+        EV_SET64(&ev[n++], fd, EVFILT_WRITE, op, 0, 0, data, 0, 0);
+    }
+
+    int ret = kevent64(_iEpollfd, ev, n, nullptr, 0, 0, nullptr);
+
+    if(ret == -1)
+    {
+        //一般都是析构的时候出现,有需要close就行
+//        cerr << "[TC_Epoller::ctrl] error, fd:" << fd << ", errno:" << errno  << "|"<< strerror(errno) << endl;
+		close();
+    }
+
+    return ret;
+}
+
+#else
+int TC_Epoller::ctrl(SOCKET_TYPE fd, uint64_t data, uint32_t events, int op)
+{
+	struct epoll_event ev;
+	ev.data.u64 = data;
+
+#if TARGET_PLATFORM_WINDOWS
+	ev.events = events;
+#else
+    if(_enableET)
+    {
+        ev.events = events | EPOLLET;
+    }
+    else
+    {
+        ev.events = events;
+    }
+#endif
+
+	return epoll_ctl(_iEpollfd, op, fd, &ev);
+}
+#endif
+
+void TC_Epoller::create(int size, bool createNotify)
+{
+#if TARGET_PLATFORM_IOS
+    _iEpollfd = kqueue();
+#else
+	_iEpollfd = epoll_create(size);
+#endif
+    if (nullptr != _pevs)
+    {
+        delete[] _pevs;
+    }
+
+    _max_connections = 128;
+
+    _pevs = new epoll_event[_max_connections];
+
+    if(createNotify)
+    {
+        if (_notify != NULL)
+        {
+            delete _notify;
+            _notify = NULL;
+        }
+
+        _notify = new NotifyInfo();
+        _notify->init(this);
+        _notify->getEpollInfo()->add(EPOLLIN);
+    }
+}
+
+void TC_Epoller::close()
+{
+#if TARGET_PLATFORM_LINUX || TARGET_PLATFORM_IOS
+    ::close(_iEpollfd);
+	_iEpollfd = -1;
+
+#else
+    epoll_close(_iEpollfd);
+    _iEpollfd = NULL;
+#endif
+}
+
+shared_ptr<TC_Epoller::EpollInfo> TC_Epoller::createEpollInfo(SOCKET_TYPE fd)
+{
+	return std::make_shared<TC_Epoller::EpollInfo>(this, fd);
+}
+
+void TC_Epoller::releaseEpollInfo(const shared_ptr<TC_Epoller::EpollInfo> &epollInfo)
+{
+	if(epollInfo)
+	{
+		epollInfo->clearCallback();
+        epollInfo->release();
+	}
+}
+
+void TC_Epoller::add(SOCKET_TYPE fd, uint64_t data, uint32_t events)
+{
+#if TARGET_PLATFORM_IOS
+    ctrl(fd, data, events, EV_ADD|EV_ENABLE);
+#else
+    ctrl(fd, data, events, EPOLL_CTL_ADD);
+#endif
+}
+
+void TC_Epoller::mod(SOCKET_TYPE fd, uint64_t data, uint32_t events)
+{
+#if TARGET_PLATFORM_IOS
+    ctrl(fd, data, events, EV_ADD|EV_ENABLE);
+#else
+    ctrl(fd, data, events, EPOLL_CTL_MOD);
+#endif
+}
+
+void TC_Epoller::del(SOCKET_TYPE fd, uint64_t data, uint32_t events)
+{
+#if TARGET_PLATFORM_IOS
+    ctrl(fd, data, events, EV_DELETE);
+#else
+    ctrl(fd, data, events, EPOLL_CTL_DEL);
+#endif
+}
+
+epoll_event& TC_Epoller::get(int i) 
+{ 
+	assert(_pevs != 0); 
+	return _pevs[i]; 
+}
+
+int TC_Epoller::wait(int millsecond)
+{
+//#if !TARGET_PLATFORM_WINDOWS
+//retry:
+//#endif
+
+	int ret;
+#if TARGET_PLATFORM_IOS
+    struct timespec timeout;
+    timeout.tv_sec = millsecond / 1000;
+    timeout.tv_nsec = (millsecond % 1000) * 1000 * 1000;
+	ret = kevent64(_iEpollfd, nullptr, 0, _pevs, _max_connections, 0, &timeout);
+#else
+	ret = epoll_wait(_iEpollfd, _pevs, _max_connections, millsecond);
+#endif
+
+#if TARGET_PLATFORM_WINDOWS
+	return ret;
+#else
+	if(ret < 0 && errno == EINTR)
+	{
+	    return 0;
+//		goto retry;
+	}
+
+	return ret;
+#endif
+}
+
+bool TC_Epoller::readEvent(const epoll_event &ev)
+{
+#if TARGET_PLATFORM_IOS
+    if (ev.filter == EVFILT_READ)
+#else
+    if (ev.events & EPOLLIN)
+#endif
+    {
+        return true;
+    }
+
+    return false;
+}
+
+bool TC_Epoller::writeEvent(const epoll_event &ev)
+{
+#if TARGET_PLATFORM_IOS
+    if (ev.filter == EVFILT_WRITE)
+#else
+    if (ev.events & EPOLLOUT)              
+#endif 
+    {
+        return true;
+    }
+
+    return false;
+}
+
+bool TC_Epoller::errorEvent(const epoll_event &ev)
+{
+#if TARGET_PLATFORM_IOS
+    if (ev.filter == EVFILT_EXCEPT)
+    {
+        return true;
+    }
+#else
+    if (ev.events & EPOLLERR || ev.events & EPOLLHUP)
+    {
+        return true;
+    }
+#endif
+    return false;
+}
+
+uint32_t TC_Epoller::getU32(const epoll_event &ev, bool high)
+{
+    uint32_t u32 = 0;
+    if(high)
+    {
+#if TARGET_PLATFORM_IOS
+        u32 = ev.udata >> 32;
+#else
+        u32 = ev.data.u64 >> 32;
+#endif        
+    }
+    else
+    {
+#if TARGET_PLATFORM_IOS
+        u32 = (uint32_t)ev.udata;
+#else    
+        u32 = ev.data.u32;
+#endif
+    }
+
+    return u32;
+}
+
+uint64_t TC_Epoller::getU64(const epoll_event &ev)
+{
+    uint64_t data;
+#if TARGET_PLATFORM_IOS
+    data = ev.udata;
+#else
+    data = ev.data.u64;
+#endif
+    return data;
+}
+
+void TC_Epoller::terminate()
+{
+	//清空定时任务
+	clear();
+
+	_terminate = true;
+
+    notify();
+}
+
+void TC_Epoller::reset()
+{
+	clear();
+
+	_terminate = false;
+}
+
+void TC_Epoller::syncCallback(const std::function<void()>& func, int64_t millseconds)
+{
+	TC_Epoller::NotifyInfo syncNotify;
+	std::mutex	syncMutex;
+	std::condition_variable syncCond;
+
+	syncNotify.init(this);
+
+    map<uint32_t, TC_Epoller::EpollInfo::EVENT_CALLBACK> callbacks;
+    callbacks[EPOLLOUT] = [&](const shared_ptr<TC_Epoller::EpollInfo> &data)
+    {
+        try
+        {
+            func();
+        }
+        catch (...)
+        {
+        }
+
+        std::unique_lock<std::mutex> lock(syncMutex);
+        syncCond.notify_one();
+
+        return false;
+    };
+
+    std::unique_lock<std::mutex> lock(syncMutex);
+
+    syncNotify.getEpollInfo()->registerCallback(callbacks, EPOLLOUT);
+
+    if (millseconds >= 0)
+    {
+        syncCond.wait_for(lock, std::chrono::milliseconds(millseconds));
+    }
+    else
+    {
+        syncCond.wait(lock);
+    }
+}
+
+void TC_Epoller::asyncCallback(const std::function<void()>& func)
+{
+    TC_Epoller::NotifyInfo *syncNotify = new TC_Epoller::NotifyInfo();
+    syncNotify->init(this);
+
+    syncNotify->getEpollInfo()->cookie(syncNotify, [](void *p)
+                                       {
+                                           TC_Epoller::NotifyInfo *ni = (TC_Epoller::NotifyInfo *)p;
+                                           delete ni;
+                                       });
+    
+    map<uint32_t, TC_Epoller::EpollInfo::EVENT_CALLBACK> callbacks;
+    callbacks[EPOLLOUT] = [=](const shared_ptr<TC_Epoller::EpollInfo> &data)
+    {
+		try { func(); } catch(...) {}
+
+		//释放到自己的owner, 这样才回保证EpollInfo被自动释放
+		syncNotify->getEpollInfo().reset();
+        return false;
+    };
+
+    syncNotify->getEpollInfo()->registerCallback(callbacks, EPOLLOUT);
+}
+
+void TC_Epoller::notify()
+{
+	if(_notify)
+	{
+		_notify->getEpollInfo()->mod(EPOLLOUT);
+	}
+}
+
+void TC_Epoller::onAddTimer()
+{
+    notify();
+}
+
+void TC_Epoller::onFireEvent(std::function<void()> func)
+{
+    try {func();} catch(...){}
+}
+
+void TC_Epoller::done(uint64_t ms)
+{
+//	LOG_CONSOLE_DEBUG << "fireEvents: " << ms << endl;
+
+    //触发定时事件
+	int64_t nextTimer = fireEvents(ms);
+
+//	LOG_CONSOLE_DEBUG << "wait: " << ms << ", " << ms - TNOWMS << endl;
+
+	int num = wait(nextTimer);
+
+	list<shared_ptr<EpollInfo>> delEpollInfo;
+
+    //先处理epoll的网络事件
+    for (int i = 0; i < num; ++i)
+    {
+		if(_terminate)
+			return;
+
+        const epoll_event& ev = get(i);
+
+        EpollInfo *info = (EpollInfo*)TC_Epoller::getU64(ev);
+
+        if(info == NULL || !info->valid())
+        {
+            continue;
+        }
+
+        assert(info->_epoller == this);
+
+        //返回成智能指针, 保证EpollInfo fireEvent的过程中, 不会被释放掉
+        auto data = info->shared_from_this();
+
+        if(data->_callback)
+        {
+            try {data->_callback(data); } catch(exception &ex) {}
+        }
+
+        uint32_t events = 0;
+
+        if (TC_Epoller::errorEvent(ev)) 
+        {
+            events = EPOLLERR;
+        }
+        else
+        {
+            if (TC_Epoller::writeEvent(ev)) {
+                events |= EPOLLOUT; 
+            }
+
+            if (TC_Epoller::readEvent(ev))
+            {
+                events |= EPOLLIN;  
+            }
+        }
+
+        if(!data->fireEvent(events))
+		{
+			delEpollInfo.push_back(data);
+
+			data->release();
+		}
+
+    }
+
+	std::for_each(_idleCallbacks.begin(), _idleCallbacks.end(), [](const std::function<void()> &f){
+		try {f();} catch(...){}
+	});
+}
+
+void TC_Epoller::loop(uint64_t ms)
+{
+	while(!_terminate)
+	{
+		this->done(ms);
+	}
+}
+}
+

+ 13 - 5
util/src/tc_ex.cpp

@@ -36,7 +36,14 @@ TC_Exception::TC_Exception(const string &buffer)
 
 TC_Exception::TC_Exception(const string &buffer, int err)
 {
-	_buffer = buffer + " :" + parseError(err);
+    if(err != 0)
+    {
+    	_buffer = buffer + " :" + parseError(err);
+    }
+    else
+    {
+        _buffer = buffer;    
+    }
     _code   = err;
 }
 
@@ -112,13 +119,14 @@ string TC_Exception::parseError(int err)
 int TC_Exception::getSystemCode()
 {
 #if TARGET_PLATFORM_WINDOWS        
-    int ret = GetLastError();
-    // cout << "getSystemCode:" << ret << endl;
-
-    return ret; 
+    return GetLastError();
 #else
     return errno; 
 #endif
 }
 
+string TC_Exception::getSystemError()
+{
+    return parseError(getSystemCode());
+}
 }

+ 78 - 42
util/src/tc_http.cpp

@@ -667,7 +667,7 @@ void TC_Http::reset()
     _bIsChunked = false;
 }
 
-void TC_Http::getHeaders(map<string, string> &header)
+void TC_Http::getHeaders(map<string, string> &header) const
 {
 	for(auto it = _headers.begin(); it != _headers.end(); ++it)
 	{
@@ -1166,23 +1166,36 @@ bool TC_HttpResponse::incrementDecode(TC_NetWorkBuffer &buff)
 {
 	if(buff.empty())
 		return false;
+
+	buff.mergeBuffers();
+
+    size_t length = buff.getBufferLength();
+
+    auto sBuf = buff.getBuffer();
+
+    bool flag = incrementDecode(*sBuf.get());
+
+    buff.subLength(length - sBuf->length());
+
+    return flag;
+}
+
+bool TC_HttpResponse::incrementDecode(TC_NetWorkBuffer::Buffer &data)
+{
+	if(data.empty())
+		return false;
 	//解析头部
 	if (_headLength == 0)
 	{
-		//至少把header合并成一个buff
-		buff.mergeBuffers();
-
-		auto data = buff.getBufferPointer();
-
-		const char * p = strnstr(data.first, "\r\n\r\n", data.second);
+		const char * p = strnstr(data.buffer(), "\r\n\r\n", data.length());
 		if(p == NULL)
 		{
 			return false;
 		}
 
-		_headLength = p - data.first + 4;
+		_headLength = p - data.buffer() + 4;
 
-		_iTmpContentLength = parseResponseHeaderString(data.first, data.first + _headLength);
+		_iTmpContentLength = parseResponseHeaderString(data.buffer(), data.buffer() + _headLength);
 
 		//304的返回码中头里本来就没有Content-Length,也不会有数据体,头收全了就是真正的收全了
 		if ( (204 == _status) || (304 == _status) )
@@ -1190,7 +1203,8 @@ bool TC_HttpResponse::incrementDecode(TC_NetWorkBuffer &buff)
 			return true;
 		}
 
-		buff.moveHeader(_headLength);
+		data.addReadIdx(_headLength);
+//		buff.moveHeader(_headLength);
 
 		//重定向就认为成功了
 		if ((_status == 301 || _status == 302) && hasHeader("Location"))
@@ -1198,6 +1212,11 @@ bool TC_HttpResponse::incrementDecode(TC_NetWorkBuffer &buff)
 			return true;
 		}
 
+		if(_iTmpContentLength > 0)
+		{
+			data.expansion(_headLength + _iTmpContentLength);
+		}
+
 		//是否是chunk编码
 		_bIsChunked = checkHeader("Transfer-Encoding", "chunked");
 		if(_bIsChunked) {
@@ -1212,37 +1231,41 @@ bool TC_HttpResponse::incrementDecode(TC_NetWorkBuffer &buff)
 		{
 			const static string sep = "\r\n";
 
-			auto sit = std::search(buff.begin(), buff.end(), sep.c_str(), sep.c_str() + sep.size());
+			char* sit = std::search(data.buffer(), data.buffer() + data.length(), sep.c_str(), sep.c_str() + sep.size());
 
-			if (sit == buff.end())
+			if (sit == data.buffer() + data.length())
 			{
 				return false;
 			}
 
-			string header = buff.iteratorToIterator<string>(buff.begin(), sit);
+//			string header = buff.iteratorToIterator<string>(buff.begin(), sit);
 
-			int iChunkSize    = strtol(header.c_str(), NULL, 16);
+			int iChunkSize    = strtol(data.buffer(), &sit, 16);
 
 			if (iChunkSize <= 0)
 			{
 				break;     //所有chunk都接收完毕
 			}
 
-			if (buff.getBufferLength() < header.size() + 2 + (size_t)iChunkSize + 2)
+			if (data.length() < sit - data.buffer() + 2 + (size_t)iChunkSize + 2)
 			{
 				//没有接收完整的chunk
 				return false;
 			}
 
 			//接收到一个完整的chunk了
-			buff.moveHeader(header.size() + 2);
-			addContent(buff.getHeader<string>(iChunkSize));
+			data.addReadIdx(sit - data.buffer() + 2);
+			addContent(data.buffer(), iChunkSize);
+//			buff.moveHeader(header.size() + 2);
+//			addContent(buff.getHeader<string>(iChunkSize));
 
 			//删除一个chunk
-			buff.moveHeader(iChunkSize + 2);
+			data.addReadIdx(iChunkSize+2);
+//			buff.moveHeader(iChunkSize + 2);
 		}
 
-		buff.clearBuffers();
+		data.clear();
+//		buff.clearBuffers();
 
 		//接收到buffer长度设置好
 		setContentLength(_iRecvContentLength);
@@ -1251,12 +1274,14 @@ bool TC_HttpResponse::incrementDecode(TC_NetWorkBuffer &buff)
 	}
 	else
 	{
-		if (_iTmpContentLength == 0)
-		{
+        if (_iTmpContentLength == 0)
+        {
 			//header长度为0, 但是有body数据
-			addContent(buff.getBuffersString());
+//			addContent(buff.getBuffersString());
+			addContent(data.buffer(), data.length());
 
-			buff.clearBuffers();
+//			buff.clearBuffers();
+			data.clear();
 
 			if(_iRecvContentLength > 0) {
 				setContentLength(_iRecvContentLength);
@@ -1273,22 +1298,26 @@ bool TC_HttpResponse::incrementDecode(TC_NetWorkBuffer &buff)
 			}
 
 			//header中没长度, 但是有body数据
-			addContent(buff.getBuffersString());
+//			addContent(buff.getBuffersString());
+			addContent(data.buffer(), data.length());
 
-			buff.clearBuffers();
+//			buff.clearBuffers();
 
+			data.clear();
 			if(_iRecvContentLength > 0) {
 				setContentLength(_iRecvContentLength);
 			}
 
+//			http 收包这里收包并没有收完应该return false
 			return false;
 		}
 		else
 		{
-			//头部有长度, 接收到长度大于头部为止
-			addContent(buff.getBuffersString());
-
-			buff.clearBuffers();
+            //头部有长度, 接收到长度大于头部为止
+//			addContent(buff.getBuffersString());
+//			buff.clearBuffers();
+			addContent(data.buffer(), data.length());
+			data.clear();
 
 			//头部的长度小于接收的内容, 还需要继续增加解析后续的buffer
 			if (_iTmpContentLength > _iRecvContentLength)
@@ -1745,6 +1774,11 @@ void TC_HttpRequest::encode(TC_NetWorkBuffer &buff)
 	buff.addBuffer(std::move(encode()));
 }
 
+void TC_HttpRequest::encode(shared_ptr<TC_NetWorkBuffer::Buffer>& buff)
+{
+    buff->addBuffer(std::move(encode()));
+}
+
 bool TC_HttpRequest::decode(const string &sBuffer)
 {
     return decode(sBuffer.c_str(), sBuffer.length());
@@ -2127,19 +2161,20 @@ int TC_HttpRequest::doRequest(TC_TCPClient& tcpClient, TC_HttpResponse& stHttpRs
 
     stHttpRsp.reset();
 
-    TC_NetWorkBuffer recvBuffer(NULL);
+	TC_NetWorkBuffer::Buffer recvBuffer;
 
     while (true)
     {
-    	char buffer[8*1024];
-        size_t iRecvLen = sizeof(buffer);
+		recvBuffer.expansion(recvBuffer.length() + 8*1024);
 
-        iRet = tcpClient.recv(buffer, iRecvLen);
+		size_t iRecvLen = recvBuffer.left();
 
-        if (iRet == TC_ClientSocket::EM_SUCCESS)
-        {
-	        recvBuffer.addBuffer(buffer, iRecvLen);
-        }
+		iRet = tcpClient.recv(recvBuffer.free(), iRecvLen);
+
+		if (iRet == TC_ClientSocket::EM_SUCCESS)
+		{
+			recvBuffer.addWriteIdx(iRecvLen);
+		}
 
         switch (iRet)
         {
@@ -2186,18 +2221,19 @@ int TC_HttpRequest::doRequest(TC_HttpResponse &stHttpRsp, int iTimeout)
 
     stHttpRsp.reset();
 
-	TC_NetWorkBuffer recvBuffer(NULL);
+    TC_NetWorkBuffer::Buffer recvBuffer;
 
     while (true)
     {
-	    char buffer[8*1024];
-	    size_t iRecvLen = sizeof(buffer);
+		recvBuffer.expansion(recvBuffer.length() + 8*1024);
+
+	    size_t iRecvLen = recvBuffer.left();
 
-        iRet = tcpClient.recv(buffer, iRecvLen);
+        iRet = tcpClient.recv(recvBuffer.free(), iRecvLen);
 
         if (iRet == TC_ClientSocket::EM_SUCCESS)
         {
-	        recvBuffer.addBuffer(buffer, iRecvLen);
+        	recvBuffer.addWriteIdx(iRecvLen);
         }
 
         switch (iRet)

+ 450 - 608
util/src/tc_http_async.cpp

@@ -1,608 +1,450 @@
-/**
- * Tencent is pleased to support the open source community by making Tars available.
- *
- * Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
- *
- * Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 
- * in compliance with the License. You may obtain a copy of the License at
- *
- * https://opensource.org/licenses/BSD-3-Clause
- *
- * Unless required by applicable law or agreed to in writing, software distributed 
- * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
- * CONDITIONS OF ANY KIND, either express or implied. See the License for the 
- * specific language governing permissions and limitations under the License.
- */
-
-#include "util/tc_http_async.h"
-#include "util/tc_common.h"
-#include "util/tc_timeprovider.h"
-
-namespace tars
-{
-
-TC_HttpAsync::AsyncRequest::AsyncRequest(TC_HttpRequest &stHttpRequest, TC_HttpAsync::RequestCallbackPtr &callbackPtr, bool bUseProxy)
-: _pHttpAsync(NULL)
-, _iUniqId(0)
-, _sendBuffer(this)
-, _recvBuffer(this)
-, _callbackPtr(callbackPtr)
-, _bUseProxy(bUseProxy)
-, _isConnected(false)
-{
-    memset(&_bindAddr, 0, sizeof(struct sockaddr));
-    _bindAddrSet = false;
-
-    vector<char> buff;
-    stHttpRequest.encode(buff);
-
-    _sendBuffer.addBuffer(std::move(buff));
-
-    stHttpRequest.getHostPort(_sHost, _iPort);
-}
-
-TC_HttpAsync::AsyncRequest::AsyncRequest(TC_HttpRequest &stHttpRequest, RequestCallbackPtr &callbackPtr, const string &addr)
-: _pHttpAsync(NULL)
-, _iUniqId(0)
-, _sendBuffer(this)
-, _recvBuffer(this)
-, _callbackPtr(callbackPtr)
-, _bUseProxy(false)
-, _isConnected(false)
-{
-    memset(&_bindAddr, 0, sizeof(struct sockaddr));
-
-    _bindAddrSet = false;
-
-	stHttpRequest.encode(_sendBuffer);
-
-    vector<string> v = TC_Common::sepstr<string>(addr, ":");
-
-    if (v.size() < 2)
-    {
-        stHttpRequest.getHostPort(_sHost, _iPort);    
-    }
-    else
-    {
-        _sHost = v[0];
-        _iPort = TC_Common::strto<uint32_t>(v[1]);    
-    }
-}
-
-TC_HttpAsync::AsyncRequest::~AsyncRequest()
-{
-    doClose();
-}
-
-void TC_HttpAsync::AsyncRequest::doClose()
-{
-    if (_pHttpAsync) _pHttpAsync->assertThreadId();
-
-    if (_fd.isValid())
-    {
-        try { if (_callbackPtr) _callbackPtr->onClose(); } catch (...) {}
-        if (_pHttpAsync)
-        {
-            _pHttpAsync->delConnection(_fd.getfd());
-            _pHttpAsync->erase(_iUniqId);
-        }
-        _fd.close();
-    }
-}
-
-void TC_HttpAsync::AsyncRequest::setBindAddr(const struct sockaddr* addr)
-{
-    memcpy(&_bindAddr, addr, sizeof(struct sockaddr));
-
-    _bindAddrSet = true;
-}
-
-void TC_HttpAsync::AsyncRequest::doConnect()
-{
-    if (_pHttpAsync) 
-        _pHttpAsync->assertThreadId();
-    _fd.createSocket();
-    _fd.setblock();
-    _fd.setTcpNoDelay();
-    _fd.setNoCloseWait();
-
-    try
-    {
-        if (_bindAddrSet)
-        {
-            _fd.bind(&_bindAddr, sizeof(_bindAddr));
-        }
-
-        int ret = 0;
-
-        if (_bUseProxy)
-            ret = _fd.connectNoThrow(_pHttpAsync->getProxyAddr());
-        else
-            ret = _fd.connectNoThrow(_sHost, _iPort);
-
-        if (ret < 0 && !TC_Socket::isInProgress())
-        {
-            doException(RequestCallback::Failed_Connect, getError("connect server error."));
-            return;
-        }
-
-        _pHttpAsync->addConnection(_fd.getfd(), _iUniqId, EPOLLIN | EPOLLOUT);
-
-    }
-    catch (exception &ex)
-    {
-        doException(RequestCallback::Failed_Connect, ex.what());
-    }
-}
-
-
-int TC_HttpAsync::AsyncRequest::recv(void* buf, uint32_t len, uint32_t flag)
-{
-    int ret = ::recv(_fd.getfd(), (char*)buf, len, flag);
-
-    if (ret == 0)
-    {
-        return 0;
-    }
-    else if (ret < 0 && TC_Socket::isPending())
-    {
-        return -1;
-    }
-    else if (ret < 0)
-    {
-        //其他网络异常
-        return -2;
-    }
-
-    //正常发送的数据
-    return ret;
-}
-
-int TC_HttpAsync::AsyncRequest::send(const void* buf, uint32_t len, uint32_t flag)
-{
-    int ret = ::send(_fd.getfd(), (char*)buf, len, flag);
-
-    if (ret < 0 && TC_Socket::isPending())
-    {
-        return -1;
-    }
-    else if (ret < 0)
-    {
-        return -2;
-    }
-    return ret;
-}
-
-void TC_HttpAsync::AsyncRequest::timeout()
-{
-    if (_pHttpAsync) _pHttpAsync->assertThreadId();
-
-    if (hasConnected())
-        doException(RequestCallback::Failed_Timeout, "timeout error.");
-    else
-        doException(RequestCallback::Failed_ConnectTimeout, "connect timeout error.");
-
-}
-
-string TC_HttpAsync::AsyncRequest::getError(const string &sDefault) const
-{
-    int ret = TC_Exception::getSystemCode();
-    if(ret!= 0)
-    {
-        return sDefault + ", ret:" + TC_Common::tostr(ret) + ", msg:" + TC_Exception::parseError(ret);
-    }
-
-    return sDefault + ", ret:" + TC_Common::tostr(ret);
-}
-
-void TC_HttpAsync::AsyncRequest::doException(RequestCallback::FAILED_CODE ret, const string &e)
-{
-    doClose();
-
-    try { if (_callbackPtr) _callbackPtr->onFailed(ret, e); } catch (...) { }
-}
-
-void TC_HttpAsync::AsyncRequest::doRequest()
-{
-    if (_pHttpAsync) _pHttpAsync->assertThreadId();
-    if (!_fd.isValid()) return;
-
-    int ret = -1;
-
-    setConnected(true);
-
-    do
-    {
-        ret = -1;
-
-        if (!_sendBuffer.empty())
-        {
-        	auto data = _sendBuffer.getBufferPointer();
-            if ((ret = this->send(data.first, data.second, 0)) > 0)
-            {
-            	_sendBuffer.moveHeader(ret);
-            }
-        }
-    } while (ret > 0 && !_sendBuffer.empty());
-
-    //网络异常
-    if (ret == -2)
-    {
-        doException(RequestCallback::Failed_Net, getError("send error."));
-    }
-}
-
-void TC_HttpAsync::AsyncRequest::doReceive()
-{
-    if (_pHttpAsync) _pHttpAsync->assertThreadId();
-    if (!_fd.isValid()) return;
-
-    int recv = 0;
-
-    char buff[8192] = {0};
-
-    do
-    {
-        if ((recv = this->recv(buff, sizeof(buff), 0)) > 0)
-        {
-        	_recvBuffer.addBuffer(buff, recv);
-        }
-    }
-    while (recv > 0);
-
-    if (recv == -2)
-    {
-        doException(RequestCallback::Failed_Net, getError("recv error"));
-    }
-    else
-    {
-        //增量decode
-	    bool ret    = _stHttpResp.incrementDecode(_recvBuffer);
-
-	    //有头部数据了
-        if (_callbackPtr && !_stHttpResp.getHeaders().empty())
-        {
-            bool bContinue = _callbackPtr->onContinue(_stHttpResp);
-            if (!bContinue)
-            {
-                doException(RequestCallback::Failed_Interrupt, getError("receive interrupt"));
-                return;
-            }
-        }
-
-        //数据接收完毕
-        if (ret)
-        {
-            //只支持短连接, 数据收取完毕, 关闭链接
-            doClose();
-            try { if (_callbackPtr) _callbackPtr->onSucc(_stHttpResp); } catch (...) { }
-            return;
-        }
-        else
-        {
-            //服务器关闭了连接
-            bool bClose = (recv == 0);
-
-            if (bClose)
-            {
-                doClose();
-
-                try { if (_callbackPtr) _callbackPtr->onSucc(_stHttpResp); } catch (...) { }
-            }
-        }
-    }
-}
-
-void TC_HttpAsync::AsyncRequest::processNet(const epoll_event &ev)
-{
-    if (TC_Epoller::errorEvent(ev))
-    {
-        doException(RequestCallback::Failed_Net, getError("net error"));
-        return;
-    }
-
-    if (TC_Epoller::readEvent(ev))
-    {
-        doReceive();
-    }
-
-    if (TC_Epoller::writeEvent(ev))
-    {
-        doRequest();
-    }
-}
-
-void TC_HttpAsync::AsyncRequest::processNotify()
-{
-    //没有建立连接, 发起连接
-    if (!isValid())
-    {
-        doConnect();
-    }
-    else
-    {
-        doRequest();
-    }
-}
-
-///////////////////////////////////////////////////////////////////////////
-#define H64(x) (((uint64_t)x) << 32)
-
-TC_HttpAsync::TC_HttpAsync() : _terminate(false)
-{
-    memset(&_proxyAddr, 0, sizeof(struct sockaddr));
-    memset(&_bindAddr, 0, sizeof(struct sockaddr));
-
-    _bindAddrSet = false;
-
-    _data = new http_queue_type(10000);
-
-    _epoller.create(20480);
-
-    _notify.init(&_epoller);
-
-    uint64_t data = H64(_notify.notifyFd()) | 0;
-    _notify.add(data);
-}
-
-TC_HttpAsync::~TC_HttpAsync()
-{
-    terminate();
-
-    delete _data;
-
-    _notify.release();
-    _epoller.close();
-
-}
-
-void TC_HttpAsync::start()
-{
-    _tpool.init(1);
-    _tpool.start();
-
-    _tpool.exec(std::bind(&TC_HttpAsync::run, this));
-}
-
-void TC_HttpAsync::waitForAllDone(int millsecond)
-{
-    time_t now = TNOW;
-
-    while (_data->size() > 0)
-    {
-        if (millsecond < 0)
-        {
-            TC_ThreadLock::Lock lock(*this);
-            timedWait(100);
-            continue;
-        }
-
-        {
-            //等待100ms
-            TC_ThreadLock::Lock lock(*this);
-            timedWait(100);
-        }
-
-        if ((TNOW - now) >= (millsecond / 1000))
-            break;
-    }
-
-    terminate();
-}
-
-void TC_HttpAsync::erase(uint32_t uniqId)
-{
-    _data->erase(uniqId);
-
-    TC_ThreadLock::Lock lock(*this);
-    notify();
-}
-
-void TC_HttpAsync::terminate()
-{
-    _terminate = true;
-
-    _notify.notify();
-
-    _tpool.waitForAllDone();
-}
-
-void TC_HttpAsync::timeout(AsyncRequestPtr& ptr)
-{
-    ptr->timeout();
-}
-
-void TC_HttpAsync::doAsyncRequest(TC_HttpRequest &stHttpRequest, RequestCallbackPtr &callbackPtr, bool bUseProxy)
-{
-    AsyncRequest * req = new AsyncRequest(stHttpRequest, callbackPtr, bUseProxy);
-
-    if (_bindAddrSet)
-    {
-        req->setBindAddr(&_bindAddr);
-    }
-
-    uint32_t uniqId = _data->generateId();
-
-    req->setUniqId(uniqId);
-
-    req->setHttpAsync(this);
-
-    _data->push(req, uniqId);
-
-    {
-        std::lock_guard<std::mutex> lock(_mutex);
-        _events.push_back(H64(_notify.notifyFd()) | uniqId);
-    }
-    _notify.notify();
-}
-
-void TC_HttpAsync::doAsyncRequest(TC_HttpRequest &stHttpRequest, RequestCallbackPtr &callbackPtr, const string &addr)
-{
-    AsyncRequest * req = new AsyncRequest(stHttpRequest, callbackPtr, addr);
-
-    if (_bindAddrSet)
-    {
-        req->setBindAddr(&_bindAddr);
-    }
-
-    uint32_t uniqId = _data->generateId();
-
-    req->setUniqId(uniqId);
-
-    req->setHttpAsync(this);
-
-    _data->push(req, uniqId);
-
-    {
-        std::lock_guard<std::mutex> lock(_mutex);
-        _events.push_back(H64(_notify.notifyFd()) | uniqId);
-    }
-    _notify.notify();
-}
-
-void TC_HttpAsync::addConnection(int fd, uint32_t uniqId, uint32_t events)
-{
-    uint64_t data = H64(fd) | uniqId;
-
-    _epoller.add(fd, data, events);
-}
-
-void TC_HttpAsync::delConnection(int fd)
-{
-    _epoller.del(fd, 0, 0);
-}
-
-int TC_HttpAsync::setBindAddr(const char* sBindAddr)
-{
-    memset(&_bindAddr, 0x00, sizeof(_bindAddr));
-
-    struct sockaddr_in* p = (struct sockaddr_in *)&_bindAddr;
-
-    try
-    {
-        TC_Socket::parseAddr(sBindAddr, p->sin_addr);
-    }
-    catch (exception &ex)
-    {
-        return -1;
-    }
-
-    p->sin_family = AF_INET;
-    p->sin_port   = htons(0);
-
-    _bindAddrSet  = true;
-
-    return 0;
-}
-
-int TC_HttpAsync::setProxyAddr(const char* sProxyAddr)
-{
-    vector<string> v = TC_Common::sepstr<string>(sProxyAddr, ":");
-
-    if (v.size() < 2)
-        return -1;
-
-    return setProxyAddr(v[0].c_str(), TC_Common::strto<uint16_t>(v[1]));
-}
-
-int TC_HttpAsync::setProxyAddr(const char* sHost, uint16_t iPort)
-{
-    memset(&_proxyAddr, 0x00, sizeof(_proxyAddr));
-
-    struct sockaddr_in *p = (struct sockaddr_in *)&_proxyAddr;
-
-    try
-    {
-        TC_Socket::parseAddr(sHost, p->sin_addr);
-    }
-    catch (exception &ex)
-    {
-        return -1;
-    }
-
-    p->sin_family = AF_INET;
-    p->sin_port   = htons(iPort);
-
-    return 0;
-}
-
-void TC_HttpAsync::setProxyAddr(const struct sockaddr* addr)
-{
-    memcpy(&_proxyAddr, addr, sizeof(struct sockaddr));
-}
-
-void TC_HttpAsync::run()
-{
-    _threadId = std::this_thread::get_id();
-
-    TC_TimeoutQueue<AsyncRequestPtr>::data_functor df(&TC_HttpAsync::timeout);
-
-    int64_t lastDealTimeout = 0;
-
-    while (!_terminate)
-    {
-        try
-        {
-            int64_t now = TNOWMS;
-            if (lastDealTimeout + 500 < now)
-            {
-                lastDealTimeout = now;
-                _data->timeout(df);
-            }
-
-            int num = _epoller.wait(100);
-
-            for (int i = 0; i < num; ++i)
-            {
-                epoll_event ev = _epoller.get(i);
-
-                if (_terminate)
-                    break;
-
-                uint32_t fd = TC_Epoller::getU32(ev, true);
-
-                if ((int)fd == _notify.notifyFd())
-                {
-                    deque<uint64_t> events;
-
-                    {
-                        std::lock_guard<std::mutex> lock(_mutex);
-
-                        _events.swap(events);
-                    }
-
-                    for(auto data : events)
-                    {                    
-                        uint32_t uniqId = (uint32_t)data;
-
-                        AsyncRequestPtr ptr = _data->getAndRefresh(uniqId);
-                        if (!ptr) 
-                            continue;
-
-                        ptr->processNotify();
-                    }                                          
-                    
-                }
-                else
-                {
-
-                    uint32_t uniqId = TC_Epoller::getU32(ev, false);
-
-                    AsyncRequestPtr ptr = _data->getAndRefresh(uniqId);
-                    if (!ptr) 
-                        continue;
- 
-                    ptr->processNet(ev);
-                }
-            }
-        }
-        catch (exception &ex)
-        {
-            cerr << "[TC_HttpAsync::run] error:" << ex.what() << endl;
-        }
-    }
-}
-
-}
-
-
+#include "util/tc_http_async.h"
+#include "util/tc_common.h"
+#include "util/tc_timeprovider.h"
+
+namespace tars
+{
+
+TC_HttpAsync::AsyncRequest::~AsyncRequest()
+{
+}
+
+void TC_HttpAsync::AsyncRequest::initialize(TC_Epoller *epoller, const TC_Endpoint &ep, TC_HttpRequest &stHttpRequest, RequestCallbackPtr &callbackPtr)
+{
+    _callbackPtr = callbackPtr;
+
+#if TAF_SSL
+    if(ep.isSSL())
+    {
+	    _trans.reset(new TC_SSLTransceiver(epoller, ep));
+    }
+    else {
+	    _trans.reset(new TC_TCPTransceiver(epoller, ep));
+    }
+#else
+	_trans.reset(new TC_TCPTransceiver(epoller, ep));
+#endif
+    _buff = std::make_shared<TC_NetWorkBuffer::Buffer>();
+
+    stHttpRequest.encode(_buff);
+
+    _trans->initializeClient(std::bind(&AsyncRequest::onCreateCallback, this, std::placeholders::_1),
+                             std::bind(&AsyncRequest::onCloseCallback, this, std::placeholders::_1),
+                             std::bind(&AsyncRequest::onConnectCallback, this, std::placeholders::_1),
+                             std::bind(&AsyncRequest::onRequestCallback, this, std::placeholders::_1),
+                             std::bind(&AsyncRequest::onParserCallback, this, std::placeholders::_1, std::placeholders::_2),
+                             std::bind(&AsyncRequest::onOpensslCallback, this, std::placeholders::_1)
+    );
+}
+
+
+shared_ptr<TC_ProxyInfo> TC_HttpAsync::AsyncRequest::onCreateCallback(TC_Transceiver* trans)
+{
+    _pHttpAsync->addFd(this);
+
+    return NULL;
+}
+
+std::shared_ptr<TC_OpenSSL> TC_HttpAsync::AsyncRequest::onOpensslCallback(TC_Transceiver* trans)
+{
+#if TAF_SSL
+	if(trans->isSSL()) {
+		if (!_pHttpAsync->getCtx()) {
+			_ctx = TC_OpenSSL::newCtx("", "", "", false, "");
+		}else
+		{
+			_ctx = _pHttpAsync->getCtx();
+		}
+		return TC_OpenSSL::newSSL(_ctx);
+	}
+	return NULL;
+#else
+	return NULL;
+#endif
+}
+
+void TC_HttpAsync::AsyncRequest::onCloseCallback(TC_Transceiver* trans)
+{
+//	LOG_CONSOLE_DEBUG << endl;
+
+    try { if (_callbackPtr) _callbackPtr->onClose(); } catch (...) {}
+
+    if (_pHttpAsync)
+    {
+        _pHttpAsync->erase(_iUniqId);
+    }
+}
+
+void TC_HttpAsync::AsyncRequest::onConnectCallback(TC_Transceiver* trans)
+{
+//	LOG_CONSOLE_DEBUG << endl;
+}
+
+void TC_HttpAsync::AsyncRequest::onRequestCallback(TC_Transceiver* trans)
+{
+//	LOG_CONSOLE_DEBUG << _buff->length() << endl;
+
+    if(!_buff->empty())
+    {
+        auto iRet = trans->sendRequest(_buff);
+
+	    if (iRet == TC_Transceiver::eRetError)
+        {
+            doException(RequestCallback::Failed_Request, getError("request error"));
+        }
+    }
+}
+
+TC_NetWorkBuffer::PACKET_TYPE TC_HttpAsync::AsyncRequest::onParserCallback(TC_NetWorkBuffer& buff, TC_Transceiver* trans)
+{
+    if(buff.empty())
+    {
+        return TC_NetWorkBuffer::PACKET_LESS;
+    }
+
+    //增量decode
+    bool ret    = _stHttpResp.incrementDecode(buff);
+
+    //有头部数据了
+    if (_callbackPtr && !_stHttpResp.getHeaders().empty())
+    {
+        bool bContinue = _callbackPtr->onContinue(_stHttpResp);
+        if (!bContinue)
+        {
+            doException(RequestCallback::Failed_Interrupt, getError("receive interrupt"));
+            return TC_NetWorkBuffer::PACKET_ERR;
+        }
+    }
+
+    //数据接收完毕
+    if (ret)
+    {
+        try { if (_callbackPtr) _callbackPtr->onSucc(_stHttpResp); } catch (...) { }
+
+        return TC_NetWorkBuffer::PACKET_FULL_CLOSE;
+    }
+
+    return TC_NetWorkBuffer::PACKET_LESS;
+}
+
+void TC_HttpAsync::AsyncRequest::setBindAddr(const TC_Socket::addr_type &bindAddr)
+{
+    _trans->setBindAddr(bindAddr);
+}
+
+void TC_HttpAsync::AsyncRequest::timeout()
+{
+    if (_pHttpAsync) _pHttpAsync->assertThreadId();
+
+    if (hasConnected())
+        doException(RequestCallback::Failed_Timeout, "timeout error.");
+    else
+        doException(RequestCallback::Failed_ConnectTimeout, "connect timeout error.");
+
+}
+
+string TC_HttpAsync::AsyncRequest::getError(const string &sDefault) const
+{
+    int ret = TC_Exception::getSystemCode();
+    if(ret!= 0)
+    {
+        return sDefault + ", ret:" + TC_Common::tostr(ret) + ", msg:" + TC_Exception::parseError(ret);
+    }
+
+    return sDefault + ", ret:" + TC_Common::tostr(ret);
+}
+
+void TC_HttpAsync::AsyncRequest::doException(RequestCallback::FAILED_CODE ret, const string &e)
+{
+    try { if (_callbackPtr) _callbackPtr->onFailed(ret, e); } catch (...) { }
+}
+
+///////////////////////////////////////////////////////////////////////////
+#define H64(x) (((uint64_t)x) << 32)
+
+TC_HttpAsync::TC_HttpAsync() //: _terminate(false)
+{
+    _data = new http_queue_type(10000);
+
+    _epoller.create(10240);
+}
+
+TC_HttpAsync::~TC_HttpAsync()
+{
+    terminate();
+
+    delete _data;
+}
+
+void TC_HttpAsync::start()
+{
+    _tpool.init(1);
+    _tpool.start();
+
+    _tpool.exec(std::bind(&TC_HttpAsync::run, this));
+}
+
+void TC_HttpAsync::waitForAllDone(int millsecond)
+{
+    time_t now = TNOW;
+
+    while (!_data->empty())
+    {
+        if (millsecond < 0)
+        {
+            TC_ThreadLock::Lock lock(*this);
+            timedWait(100);
+            continue;
+        }
+
+        {
+            //等待100ms
+            TC_ThreadLock::Lock lock(*this);
+            timedWait(100);
+        }
+
+        if ((TNOW - now) >= (millsecond / 1000))
+            break;
+    }
+
+    terminate();
+}
+
+void TC_HttpAsync::erase(uint32_t uniqId)
+{
+    _erases.push_back(uniqId);
+}
+
+void TC_HttpAsync::terminate()
+{
+    _epoller.terminate();
+
+    _tpool.waitForAllDone();
+}
+
+void TC_HttpAsync::timeout(AsyncRequestPtr& ptr)
+{
+    if(ptr->isValid())
+    {
+        ptr->timeout();
+    }
+}
+
+void TC_HttpAsync::doAsyncRequest(TC_HttpRequest &stHttpRequest, RequestCallbackPtr &callbackPtr, const TC_Endpoint &ep)
+{
+    AsyncRequestPtr req = new AsyncRequest();
+
+    req->initialize(&_epoller, ep, stHttpRequest, callbackPtr);
+
+    if (_bindAddr.first)
+    {
+        req->setBindAddr(_bindAddr);
+    }
+
+    uint32_t uniqId = _data->generateId();
+
+    req->setUniqId(uniqId);
+
+    req->setHttpAsync(this);
+
+    _data->push(req, uniqId);
+
+    {
+        std::lock_guard<std::mutex> lock(_mutex);
+        _events.push_back(uniqId);
+    }
+
+    _epoller.notify();
+}
+
+void TC_HttpAsync::doAsyncRequest(TC_HttpRequest &stHttpRequest, RequestCallbackPtr &callbackPtr, bool bUseProxy)
+{
+    TC_Endpoint ep;
+
+    if(bUseProxy && _proxyEp)
+    {
+        ep = *this->_proxyEp.get();
+    }
+    else
+    {
+        string sHost;
+        uint32_t iPort;
+        stHttpRequest.getHostPort(sHost, iPort);
+        ep.setHost(sHost);
+        ep.setPort(iPort);
+
+	    if(TC_Port::strcmp(stHttpRequest.getURL().getScheme().c_str(), "https") == 0) {
+		    ep.setType(TC_Endpoint::SSL);
+	    }
+    }
+
+    doAsyncRequest(stHttpRequest, callbackPtr, ep);
+}
+
+void TC_HttpAsync::doAsyncRequest(TC_HttpRequest &stHttpRequest, RequestCallbackPtr &callbackPtr, const string &addr)
+{
+    vector<string> v = TC_Common::sepstr<string>(addr, ":");
+
+    if (v.size() < 2)
+    {
+        throw TC_HttpAsync_Exception("[TC_HttpAsync::doAsyncRequest] addr is error:" + addr);
+    }
+
+    TC_Endpoint ep;
+
+    ep.setHost(v[0]);
+    ep.setPort(TC_Common::strto<uint16_t>(v[1]));
+
+	if(TC_Port::strcmp(stHttpRequest.getURL().getScheme().c_str(), "https") == 0) {
+		ep.setType(TC_Endpoint::SSL);
+	}
+
+    doAsyncRequest(stHttpRequest, callbackPtr, ep);
+}
+
+void TC_HttpAsync::setBindAddr(const char* sBindAddr)
+{
+    _bindAddr = TC_Socket::createSockAddr(sBindAddr);
+}
+
+void TC_HttpAsync::setProxyAddr(const TC_Endpoint &ep)
+{
+    _proxyEp.reset(new TC_Endpoint());
+
+    *_proxyEp.get() = ep;
+}
+
+void TC_HttpAsync::setProxyAddr(const char* sProxyAddr)
+{
+    vector<string> v = TC_Common::sepstr<string>(sProxyAddr, ":");
+
+    if (v.size() < 2)
+    {
+        throw TC_HttpAsync_Exception("[TC_HttpAsync::setProxyAddr] addr is error:" + string(sProxyAddr));
+    }
+
+    TC_Endpoint ep;
+    ep.setHost(v[0]);
+    ep.setPort(TC_Common::strto<uint16_t>(v[1]));
+
+    return setProxyAddr(ep);
+}
+
+void TC_HttpAsync::setProxyAddr(const char* sHost, uint16_t iPort)
+{
+    TC_Endpoint ep;
+    ep.setHost(sHost);
+    ep.setPort(iPort);
+
+    return setProxyAddr(ep);
+}
+
+bool TC_HttpAsync::handleCloseImp(const shared_ptr<TC_Epoller::EpollInfo> &data)
+{
+    AsyncRequest* asyncRequest = (AsyncRequest*)data->cookie();
+
+    asyncRequest->doException(RequestCallback::Failed_Net, asyncRequest->getError("epoller error"));
+
+	asyncRequest->trans()->close();
+
+    return false;
+}
+
+bool TC_HttpAsync::handleInputImp(const shared_ptr<TC_Epoller::EpollInfo> &data)
+{
+    AsyncRequest* asyncRequest = (AsyncRequest*)data->cookie();
+
+    try
+    {
+        asyncRequest->trans()->doResponse();
+    }
+    catch(const std::exception& e)
+    {
+        asyncRequest->doException(RequestCallback::Failed_Net, e.what());
+        return false;
+    }
+
+    return true;
+}
+
+bool TC_HttpAsync::handleOutputImp(const shared_ptr<TC_Epoller::EpollInfo> &data)
+{
+    AsyncRequest* asyncRequest = (AsyncRequest*)data->cookie();
+
+    try
+    {
+        asyncRequest->trans()->doRequest();
+    }
+    catch(const std::exception& e)
+    {
+        asyncRequest->doException(RequestCallback::Failed_Net, e.what());
+        return false;
+    }
+
+    return true;
+}
+
+void TC_HttpAsync::addFd(AsyncRequest* asyncRequest)
+{
+    shared_ptr<TC_Epoller::EpollInfo> epollInfo = asyncRequest->trans()->getEpollInfo();
+
+    epollInfo->cookie(asyncRequest);
+
+	map<uint32_t, TC_Epoller::EpollInfo::EVENT_CALLBACK> callbacks;
+
+	callbacks[EPOLLIN] = std::bind(&TC_HttpAsync::handleInputImp, this, std::placeholders::_1);
+	callbacks[EPOLLOUT] = std::bind(&TC_HttpAsync::handleOutputImp, this, std::placeholders::_1);
+	callbacks[EPOLLERR] = std::bind(&TC_HttpAsync::handleCloseImp, this, std::placeholders::_1);
+
+	epollInfo->registerCallback(callbacks, EPOLLIN|EPOLLOUT);
+}
+
+void TC_HttpAsync::run()
+{
+    _threadId = std::this_thread::get_id();
+
+    TC_TimeoutQueue<AsyncRequestPtr>::data_functor df(&TC_HttpAsync::timeout);
+
+    _epoller.postRepeated(100, false, [&](){ _data->timeout(df); });
+
+    _epoller.idle([&]{
+        deque<uint64_t> events;
+
+        {
+            std::lock_guard<std::mutex> lock(_mutex);
+            _events.swap(events);
+        }
+
+        for(auto data : events)
+        {
+            uint32_t uniqId = (uint32_t)data;
+
+            AsyncRequestPtr ptr = _data->getAndRefresh(uniqId);
+            if (!ptr)
+            {
+                continue;
+            }
+
+            try
+            {
+                ptr->trans()->connect();
+            }
+            catch(exception &ex)
+            {
+                ptr->doException(RequestCallback::Failed_Connect, ex.what());
+            }
+        }
+
+        for(auto it : _erases)
+        {
+            _data->erase(it);
+        }
+        _erases.clear();
+    });
+
+    _epoller.loop();
+}
+
+}
+
+

+ 2 - 0
util/src/tc_logger.cpp

@@ -21,6 +21,8 @@
 namespace tars
 {
 
+TC_RollLogger __global_logger_debug__;
+
 bool TC_LoggerRoll::_bDyeingFlag = false;
 TC_SpinLock TC_LoggerRoll::_mutexDyeing;
 unordered_map<size_t, string>  TC_LoggerRoll::_mapThreadID;

+ 399 - 127
util/src/tc_network_buffer.cpp

@@ -1,8 +1,22 @@
-//
-// Created by jarod on 2019-03-01.
-//
+/**
+ * Tencent is pleased to support the open source community by making Tars available.
+ *
+ * Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
+ *
+ * Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 
+ * in compliance with the License. You may obtain a copy of the License at
+ *
+ * https://opensource.org/licenses/BSD-3-Clause
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed 
+ * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+ * CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+ * specific language governing permissions and limitations under the License.
+ */
+
 #include "util/tc_network_buffer.h"
 #include "util/tc_http.h"
+#include "util/tc_logger.h"
 #include <cmath>
 
 using namespace std;
@@ -10,6 +24,159 @@ using namespace std;
 namespace tars
 {
 
+void TC_NetWorkBuffer::Buffer::alloc(size_t len)
+{
+	_readIdx		= 0;
+	_writeIdx		= 0;
+
+	//没有分配过, 使用当前空间大小
+	if(!_buffer)
+	{
+		_capacity = len;
+	}
+
+	//当前容量小了, 分配到新的容量
+	if(_capacity < len)
+	{
+		_capacity = len;
+		if(_buffer)
+		{
+			delete _buffer;
+			_buffer = NULL;
+		}
+	}
+
+	if(!_buffer)
+	{
+		_buffer = new char[_capacity];
+	}
+}
+
+void TC_NetWorkBuffer::Buffer::compact()
+{
+	//还没分配空间, 分配空间
+	if(_buffer == NULL)
+	{
+		alloc(_capacity);
+	}
+
+	if(_readIdx != 0)
+	{
+		assert(_buffer);
+
+		//当前数据+要添加的数据 < 总容量, 可以复用当前空间, 不再分配新空间
+		memmove((void *)_buffer, buffer(), length());
+
+		_writeIdx = length();
+		_readIdx = 0;
+	}
+
+}
+
+//保留当前数据, 并扩展空间, 并移除无效数据
+void TC_NetWorkBuffer::Buffer::expansion(size_t len)
+{
+	if (_capacity < len)
+	{
+		//重新分配空间, 并copy数据
+		char *p = new char[len];
+
+		if(_buffer)
+		{
+			if(length() > 0)
+			{
+				//之前有数据, copy之前数据到新的缓存
+				memcpy((void*)p, buffer(), length());
+
+				_writeIdx = length();
+				_readIdx = 0;
+			}
+			//删除之前空间
+			delete[]_buffer;
+		}
+
+		_buffer = p;
+		_capacity 	= len;
+
+	}
+	else
+	{
+		//紧凑数据
+		compact();
+	}
+}
+
+void TC_NetWorkBuffer::Buffer::addBuffer(const char *buff, size_t len)
+{
+	//还没分配空间, 分配空间
+	if(_buffer == NULL)
+	{
+		alloc(len);
+	}
+
+	//剩余空间长度大于要添加的数据长度, 直接copy即可
+	if(left() >= len)
+	{
+		memcpy((void *)free(), buff, len);
+		_writeIdx += len;
+	}
+	else if(_capacity >= len + length())
+	{
+		//当前数据+要添加的数据 < 总容量, 可以复用当前空间, 不再分配新空间
+		memmove((void *)_buffer, buffer(), length());
+		memcpy((void*)(_buffer + length()), buff, len);
+
+		_writeIdx = length() + len;
+		_readIdx = 0;
+	}
+	else
+	{
+		//空间容量不够, 得全新分配空间
+		char * p = new char[length() + len];
+		memcpy(p, buffer(), length());
+		memcpy(p + length(), buff, len);
+		if(_buffer)
+		{
+			delete[] _buffer;
+			_buffer = NULL;
+		}
+
+		_buffer 	= p;
+		_readIdx	= 0;
+		_writeIdx 	= length() + len;
+		_capacity 	= _writeIdx;
+	}
+}
+
+void TC_NetWorkBuffer::Buffer::replaceBuffer(const char *buff, size_t len)
+{
+	if(_buffer)
+	{
+		delete [] _buffer;
+	}
+
+	_buffer  = buff;
+	_readIdx = 0;
+	_writeIdx= len;
+	_capacity= len;
+}
+
+void TC_NetWorkBuffer::Buffer::setBuffer(const char *buff, size_t len)
+{
+	if(_buffer == NULL || _capacity <= len)
+	{
+		alloc(len);
+	}
+
+	_readIdx = 0;
+	_writeIdx = 0;
+
+	memcpy((void *)buffer(), buff, len);
+	_writeIdx += len;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+
 void TC_NetWorkBuffer::addBuffer(const shared_ptr<TC_NetWorkBuffer::Buffer> & buff)
 {
 	if(buff->empty()) return;
@@ -20,27 +187,121 @@ void TC_NetWorkBuffer::addBuffer(const shared_ptr<TC_NetWorkBuffer::Buffer> & bu
 
 void TC_NetWorkBuffer::addBuffer(const vector<char>& buff)
 {
-	if(buff.empty()) return;
+	addBuffer(buff.data(), buff.size());
+}
+
+void TC_NetWorkBuffer::addBuffer(const std::string& buff)
+{
+	addBuffer(buff.c_str(), buff.size());
+}
+
+void TC_NetWorkBuffer::addBuffer(const char* buff, size_t length)
+{
+	if(buff == NULL || length == 0) return;
+
+	size_t capacity = length;
+	if(capacity < 1024)
+	{
+		capacity = 1024;
+	}
+	//多预留一些空间
+	auto data = getOrCreateBuffer(capacity, capacity * 1.5);
+
+	assert(data->left() >= length);
 
-    _bufferList.push_back(std::make_shared<Buffer>(buff));
+	memcpy((void*)data->free(), buff, length);
 
-    _length += buff.size();
+	data->addWriteIdx(length);
+
+	_length += length;
 }
 
-void TC_NetWorkBuffer::addBuffer(const std::string& buff)
+shared_ptr<TC_NetWorkBuffer::Buffer> TC_NetWorkBuffer::getOrCreateBuffer(size_t minCapacity, size_t maxCapacity)
 {
-	if(buff.empty()) return;
+	assert(minCapacity <= maxCapacity );
 
-	_bufferList.push_back(std::make_shared<Buffer>(buff.c_str(), buff.size()));
+	if(_bufferList.empty())
+	{
+		if(!_defaultBuff)
+		{
+			_defaultBuff = std::make_shared<Buffer>();
+			_defaultBuff->alloc(maxCapacity);
+		}
+		else
+		{
+//			assert(_defaultBuff->length() == 0);
+			_defaultBuff->clear();
+			_defaultBuff->expansion(maxCapacity);
+
+		}
+
+		_bufferList.push_back(_defaultBuff);
+	}
+	else
+	{
+		auto buff = _bufferList.back();
+		if(buff->left() < minCapacity)
+		{
+			//剩余空间太小了, 检查看看是否容量够, 如果够, compact一下
+			if(buff->capacity() - buff->length() >= minCapacity && buff->length() * 3 < buff->capacity())
+			{
+				buff->compact();
+			}
+			else
+			{
+				buff = std::make_shared<Buffer>();
+				buff->alloc(maxCapacity);
+				_bufferList.push_back(buff);
+			}
+		}
+	}
 
-	_length += buff.size();
+	assert(!_bufferList.empty());
+	
+	return _bufferList.back();
 }
 
-void TC_NetWorkBuffer::addBuffer(const char* buff, size_t length)
+void TC_NetWorkBuffer::addLength(size_t length)
 {
-	if(buff == NULL || length == 0) return;
+	this->_length += length;
+}
 
-	addBuffer(vector<char>(buff, buff + length));
+void TC_NetWorkBuffer::subLength(size_t length)
+{
+	assert(this->_length >= length);
+	
+	this->_length -= length;
+}
+
+void TC_NetWorkBuffer::compute()
+{
+	_length = 0;
+
+	while (!_bufferList.empty())
+	{
+		if((*_bufferList.begin())->empty())
+		{
+			_bufferList.erase(_bufferList.begin());
+		}
+		else
+		{
+			break;
+		}
+	}
+
+	for (auto it : _bufferList)
+	{
+		_length += it->length();
+	}
+}
+
+shared_ptr<TC_NetWorkBuffer::Buffer> TC_NetWorkBuffer::getBuffer()
+{
+	if(_bufferList.empty())
+	{
+		return NULL;
+	}
+	return *_bufferList.begin();
 }
 
 TC_NetWorkBuffer::buffer_iterator TC_NetWorkBuffer::begin() const
@@ -60,52 +321,60 @@ TC_NetWorkBuffer::buffer_iterator TC_NetWorkBuffer::find(const char *str, size_t
 
 void TC_NetWorkBuffer::clearBuffers()
 {
-    _bufferList.clear();
-    _length = 0;
+	_bufferList.clear();
+	_length = 0;
 }
 
 bool TC_NetWorkBuffer::empty() const
 {
-    return _length == 0;
+//	if(_length == 0 && !_bufferList.empty())
+//	{
+//		assert(_bufferList.empty());
+//	}
+	return _length == 0;
 }
 
 size_t TC_NetWorkBuffer::getBufferLength() const
 {
-    return _length;
+	return _length;
 }
 
 pair<const char*, size_t> TC_NetWorkBuffer::getBufferPointer() const
 {
-    if(empty())
-    {
-        return make_pair((const char*)NULL, 0);
-    }
+	if(empty())
+	{
+		return make_pair((const char*)NULL, 0);
+	}
 
-    auto it = _bufferList.begin();
+	auto it = _bufferList.begin();
 
-    return make_pair((*it)->buffer(), (*it)->length());
+	return make_pair((*it)->buffer(), (*it)->length());
 }
 
 const char * TC_NetWorkBuffer::mergeBuffers()
 {
-    //merge to one buffer
-    if(_bufferList.size() > 1)
-    {
-	    std::list<std::shared_ptr<Buffer>> bufferList;
+	//merge to one buffer
+	if(_bufferList.size() > 1)
+	{
+		std::shared_ptr<Buffer> buff = std::make_shared<Buffer>();
 
-	    bufferList.push_back(std::make_shared<Buffer>(getBuffers()));
+		getBuffers(buff);
 
-        _bufferList.swap(bufferList);
-    }
+		std::list<std::shared_ptr<Buffer>> bufferList;
 
-    assert(_bufferList.size() <= 1);
+		bufferList.push_back(buff);
 
-    if(!_bufferList.empty())
-    {
-    	return (*_bufferList.begin())->buffer();
-    }
+		_bufferList.swap(bufferList);
+	}
+
+	assert(_bufferList.size() <= 1);
 
-    return NULL;
+	if(!_bufferList.empty())
+	{
+		return (*_bufferList.begin())->buffer();
+	}
+
+	return NULL;
 }
 
 size_t TC_NetWorkBuffer::getBuffers(char *buffer, size_t length) const
@@ -144,177 +413,180 @@ string TC_NetWorkBuffer::getBuffersString() const
 
 vector<char> TC_NetWorkBuffer::getBuffers() const
 {
-    vector<char> buffer;
+	vector<char> buffer;
 
-    buffer.resize(_length);
+	buffer.resize(_length);
 
 	getBuffers(&buffer[0], _length);
 
 	return buffer;
 }
 
+void TC_NetWorkBuffer::getBuffers(shared_ptr<Buffer> &buff) const
+{
+	buff->alloc(_length);
+
+	getBuffers(buff->buffer(), _length);
+
+	buff->addWriteIdx(_length);
+}
+
 bool TC_NetWorkBuffer::getHeader(size_t len, std::string &buffer) const
 {
-    if(getBufferLength() < len)
-        return false;
+	if(getBufferLength() < len)
+		return false;
 
-    buffer.clear();
+	buffer.clear();
 
-    if(len == 0)
-    {
-        return true;
-    }
+	if(len == 0)
+	{
+		return true;
+	}
 
-    buffer.resize(len);
+	buffer.resize(len);
 
 	getBuffers(&buffer[0], len);
 
-    return true;
+	return true;
 }
 
 bool TC_NetWorkBuffer::getHeader(size_t len, std::vector<char> &buffer) const
 {
-    if(getBufferLength() < len)
-        return false;
+	if(getBufferLength() < len)
+		return false;
 
-    buffer.clear();
+	buffer.clear();
 
-    if(len == 0)
-    {
-        return true;
-    }
+	if(len == 0)
+	{
+		return true;
+	}
 
-    buffer.resize(len);
+	buffer.resize(len);
 
 	getBuffers(&buffer[0], len);
 
-    return true;
+	return true;
 }
 
 bool TC_NetWorkBuffer::moveHeader(size_t len)
 {
-    if(getBufferLength() < len)
-        return false;
+	if(getBufferLength() < len)
+		return false;
 
-    if(len == 0)
-        return true;
+	if(len == 0)
+		return true;
 
-    auto it = _bufferList.begin();
+	auto it = _bufferList.begin();
 
 //    assert(it->size() >= _pos);
 
-    size_t left = (*it)->length();
+	size_t left = (*it)->length();
 
-    if(left > len)
-    {
-	    (*it)->add(len);
-        _length -= len;
-    }
-    else if(left == len)
-    {
-        _length -= len;
-        _bufferList.erase(it);
-    }
-    else
-    {
-        _length -= left;
+	if(left > len)
+	{
+		(*it)->addReadIdx(len);
+		_length -= len;
+	}
+	else if(left == len)
+	{
+		(*it)->addReadIdx(len);
+		_length -= len;
+		_bufferList.erase(it);
+	}
+	else
+	{
+		(*it)->addReadIdx(left);
+		_length -= left;
 
-        _bufferList.erase(it);
+		_bufferList.erase(it);
 
-        return moveHeader(len - left);
-    }
-    return true;
+		return moveHeader(len - left);
+	}
+	return true;
 }
 
 uint8_t TC_NetWorkBuffer::getValueOf1() const
 {
-    return getValue<uint8_t>();
+	return getValue<uint8_t>();
 }
 
 uint16_t TC_NetWorkBuffer::getValueOf2() const
 {
-    return getValue<uint16_t>();
+	return getValue<uint16_t>();
 }
 
 uint32_t TC_NetWorkBuffer::getValueOf4() const
 {
-    return getValue<uint32_t>();
+	return getValue<uint32_t>();
 }
 
 TC_NetWorkBuffer::PACKET_TYPE TC_NetWorkBuffer::parseBufferOf1(vector<char> &buffer, uint8_t minLength, uint8_t maxLength)
 {
-    return parseBuffer<uint8_t>(buffer, minLength, maxLength);
+	return parseBuffer<uint8_t>(buffer, minLength, maxLength);
 }
 
 TC_NetWorkBuffer::PACKET_TYPE TC_NetWorkBuffer::parseBufferOf2(vector<char> &buffer, uint16_t minLength, uint16_t maxLength)
 {
-    return parseBuffer<uint16_t>(buffer, minLength, maxLength);
+	return parseBuffer<uint16_t>(buffer, minLength, maxLength);
 }
 
 TC_NetWorkBuffer::PACKET_TYPE TC_NetWorkBuffer::parseBufferOf4(vector<char> &buffer, uint32_t minLength, uint32_t maxLength)
 {
-    return parseBuffer<uint32_t>(buffer, minLength, maxLength);
+	return parseBuffer<uint32_t>(buffer, minLength, maxLength);
 }
 
 TC_NetWorkBuffer::PACKET_TYPE TC_NetWorkBuffer::checkHttp()
 {
-    try
-    {
-	    bool b = TC_HttpRequest::checkRequest(*this);
+	try
+	{
+		bool b = TC_HttpRequest::checkRequest(*this);
 
-	    return b ? PACKET_FULL : PACKET_LESS;
-    }
-    catch (exception &ex)
-    {
-        return PACKET_ERR;
-    }
+		return b ? PACKET_FULL : PACKET_LESS;
+	}
+	catch (exception &ex)
+	{
+		return PACKET_ERR;
+	}
 
-    return PACKET_LESS;
+	return PACKET_LESS;
 }
 
 TC_NetWorkBuffer::PACKET_TYPE TC_NetWorkBuffer::parseHttp(TC_NetWorkBuffer&in, vector<char> &out)
 {
-    TC_NetWorkBuffer::PACKET_TYPE b = in.checkHttp();
+//	LOG_CONSOLE_DEBUG << in.mergeBuffers() << endl;
 
-    if (b == PACKET_FULL)
-    {
-        out = in.getBuffers();
+	TC_NetWorkBuffer::PACKET_TYPE b = in.checkHttp();
 
-        in.clearBuffers();
-    }
+	if (b == PACKET_FULL)
+	{
+		out = in.getBuffers();
+
+		in.clearBuffers();
+	}
 
-    return b;
+	return b;
 }
 
 
 TC_NetWorkBuffer::PACKET_TYPE TC_NetWorkBuffer::parseEcho(TC_NetWorkBuffer&in, vector<char> &out)
 {
-    try
-    {
-        out = in.getBuffers();
-        in.clearBuffers();
-        return TC_NetWorkBuffer::PACKET_FULL;
-    }
-    catch (exception &ex)
-    {
-        return TC_NetWorkBuffer::PACKET_ERR;
-    }
-
-    return TC_NetWorkBuffer::PACKET_LESS;             //表示收到的包不完全
-}
-
-TC_NetWorkBuffer::PACKET_TYPE TC_NetWorkBuffer::parseJson(TC_NetWorkBuffer&in, vector<char> &out)
-{
-    auto jsonEnd = in.find("}", 1);
-
-    if (jsonEnd != in.end())
-    {
-        out = in.getBuffers();
-        in.clearBuffers();
-        return TC_NetWorkBuffer::PACKET_FULL;   //返回1表示收到的包已经完全
-    }
+	try
+	{
+		if(in.empty())
+		{
+			return TC_NetWorkBuffer::PACKET_LESS;
+		}
+		out = in.getBuffers();
+		in.clearBuffers();
+		return TC_NetWorkBuffer::PACKET_FULL;
+	}
+	catch (exception &ex)
+	{
+		return TC_NetWorkBuffer::PACKET_ERR;
+	}
 
-    return TC_NetWorkBuffer::PACKET_ERR;        //返回-1表示收到包协议错误,框架会自动关闭当前连接
+	return TC_NetWorkBuffer::PACKET_LESS;             //表示收到的包不完全
 }
 
 }

+ 19 - 6
util/src/tc_openssl.cpp

@@ -201,12 +201,21 @@ void TC_OpenSSL::getMemData(BIO* bio, TC_NetWorkBuffer& buf)
 {
 	while (true)
 	{
-		char data[8*1024];
-		int bytes = BIO_read(bio, data, sizeof(data));
+		auto data = buf.getOrCreateBuffer(2*1024, 8*1024);
+
+		int bytes = BIO_read(bio, data->free(), data->left());
 		if (bytes <= 0)
 			return;
 
-		buf.addBuffer(data, bytes);
+		data->addWriteIdx(bytes);
+		buf.addLength(bytes);
+
+//		char data[8*1024];
+//		int bytes = BIO_read(bio, data, sizeof(data));
+//		if (bytes <= 0)
+//			return;
+
+//		buf.addBuffer(data, bytes);
 	}
 }
 
@@ -214,13 +223,17 @@ int TC_OpenSSL::doSSLRead(SSL* ssl, TC_NetWorkBuffer& out)
 {
 	while (true)
 	{
-		char plainBuf[32 * 1024];
+		auto plainBuf = out.getOrCreateBuffer(8 * 1024, 32 * 1024);
+//		char plainBuf[32 * 1024];
 
 		ERR_clear_error();
-		int bytes = SSL_read(ssl, plainBuf, sizeof plainBuf);
+//		int bytes = SSL_read(ssl, plainBuf, sizeof plainBuf);
+		int bytes = SSL_read(ssl, plainBuf->free(), plainBuf->left());
 		if (bytes > 0)
 		{
-			out.addBuffer(plainBuf, bytes);
+//			out.addBuffer(plainBuf, bytes);
+			plainBuf->addWriteIdx(bytes);
+			out.addLength(bytes);
 		}
 		else
 		{

+ 2 - 2
util/src/tc_option.cpp

@@ -74,14 +74,14 @@ void TC_Option::parse(const string &s)
     }
 }
 
-string TC_Option::getValue(const string &sName) const
+string TC_Option::getValue(const string &sName, const string &def) const
 {
     auto it = _mParam.find(sName);
     if( it != _mParam.end())
     {
         return it->second;
     }
-    return "";
+    return def;
 }
 
 bool TC_Option::hasParam(const string &sName) const

+ 106 - 40
util/src/tc_port.cpp

@@ -16,6 +16,7 @@
 
 #include "util/tc_port.h"
 #include "util/tc_common.h"
+#include "util/tc_logger.h"
 #include <thread>
 #include <string.h>
 
@@ -238,14 +239,13 @@ void TC_Port::setEnv(const string &name, const string &value)
 #endif
 }
 
-std::string TC_Port::exec(const char* cmd)
+string TC_Port::exec(const char *cmd)
 {
 	string err;
-
-	return TC_Port::exec(cmd, err);
+	return exec(cmd, err);
 }
 
-string TC_Port::exec(const char *cmd, std::string &errstr)
+std::string TC_Port::exec(const char* cmd, std::string &err)
 {
 	string fileData;
 #if TARGET_PLATFORM_WINDOWS
@@ -253,9 +253,9 @@ string TC_Port::exec(const char *cmd, std::string &errstr)
 #else
     FILE* fp = popen(cmd, "r");
 #endif
-	if (fp == NULL) {
-		errstr = "popen '" + string(cmd) + "' error.";
-		return fileData;
+	if(fp == NULL) {
+		err = "open '" + string(cmd) + "' error";
+		return "";
 	}
     static size_t buf_len = 2 * 1024 * 1024;
     char *buf = new char[buf_len];
@@ -272,10 +272,11 @@ string TC_Port::exec(const char *cmd, std::string &errstr)
 	return fileData;
 }
 
-unordered_map<int, vector<std::function<void()>>> TC_Port::_callbacks;
+unordered_map<int, unordered_map<size_t, std::function<void()>>> TC_Port::_callbacks;
 std::mutex   TC_Port::_mutex;
+std::atomic<size_t> TC_Port::_callbackId{0};
 
-void TC_Port::registerSig(int sig, std::function<void()> callback)
+size_t TC_Port::registerSig(int sig, std::function<void()> callback)
 {
 	std::lock_guard<std::mutex> lock(_mutex);
 
@@ -287,67 +288,132 @@ void TC_Port::registerSig(int sig, std::function<void()> callback)
 		registerSig(sig);
 	}
 
-	_callbacks[sig].push_back(callback);
+	size_t id = ++_callbackId;
+
+	_callbacks[sig][id] = callback;
+
+	return id;
 }
 
-void TC_Port::registerCtrlC(std::function<void()> callback)
+void TC_Port::unregisterSig(int sig, size_t id)
+{
+	std::lock_guard<std::mutex> lock(_mutex);
+	auto it = _callbacks.find(sig);
+
+	if(it != _callbacks.end())
+	{
+		it->second.erase(id);
+	}
+}
+
+size_t TC_Port::registerCtrlC(std::function<void()> callback)
 {
 #if TARGET_PLATFORM_LINUX || TARGET_PLATFORM_IOS
-	registerSig(SIGINT, callback);
+	return registerSig(SIGINT, callback);
 #else
-	registerSig(CTRL_C_EVENT, callback);
+	return registerSig(CTRL_C_EVENT, callback);
 #endif
 }
 
-void TC_Port::registerTerm(std::function<void()> callback)
+void TC_Port::unregisterCtrlC(size_t id)
 {
 #if TARGET_PLATFORM_LINUX || TARGET_PLATFORM_IOS
+	unregisterSig(SIGINT, id);
+#else
+	unregisterSig(CTRL_C_EVENT, id);
+#endif
+}
 
-	registerSig(SIGTERM, callback);
+size_t TC_Port::registerTerm(std::function<void()> callback)
+{
+#if TARGET_PLATFORM_LINUX || TARGET_PLATFORM_IOS
+	return registerSig(SIGTERM, callback);
 #else
-	registerSig(CTRL_SHUTDOWN_EVENT, callback);
+	return registerSig(CTRL_SHUTDOWN_EVENT, callback);
 #endif
 }
 
+void TC_Port::unregisterTerm(size_t id)
+{
+#if TARGET_PLATFORM_LINUX || TARGET_PLATFORM_IOS
+	unregisterSig(SIGTERM, id);
+#else
+	unregisterSig(CTRL_SHUTDOWN_EVENT, id);
+#endif
+}
 
 void TC_Port::registerSig(int sig)
 {
 #if TARGET_PLATFORM_LINUX || TARGET_PLATFORM_IOS
-	std::thread th(signal, sig, TC_Port::sighandler);
-	th.detach();
+	signal(sig, TC_Port::sighandler);
+//    std::thread th(signal, sig, TC_Port::sighandler);
+//    th.detach();
 #else
-	std::thread th([] {SetConsoleCtrlHandler(TC_Port::HandlerRoutine, TRUE); });
-	th.detach();
+	SetConsoleCtrlHandler(TC_Port::HandlerRoutine, TRUE);
+//    std::thread th([] {SetConsoleCtrlHandler(TC_Port::HandlerRoutine, TRUE); });
+//	th.detach();
 #endif
 }
 
 #if TARGET_PLATFORM_LINUX || TARGET_PLATFORM_IOS
 void TC_Port::sighandler( int sig_no )
 {
-	std::lock_guard<std::mutex> lock(_mutex);
-
-	auto it = TC_Port::_callbacks.find(sig_no);
-	if(it != TC_Port::_callbacks.end())
-	{
-		for (auto f : it->second)
-		{
-			try { f(); } catch (...) {}
-		}
-	}
+	std::thread th([&]()
+				   {
+					   unordered_map<size_t, std::function<void()>> data;
+
+					   {
+						   std::lock_guard<std::mutex> lock(_mutex);
+
+						   auto it = TC_Port::_callbacks.find(sig_no);
+						   if (it != TC_Port::_callbacks.end())
+						   {
+							   data = it->second;
+						   }
+					   }
+
+					   for (auto f : data)
+					   {
+						   try
+						   {
+							   f.second();
+						   }
+						   catch (...)
+						   {
+						   }
+					   }
+				   });
+	th.detach();
 }
 #else
 BOOL WINAPI TC_Port::HandlerRoutine(DWORD dwCtrlType)
 {
-	std::lock_guard<std::mutex> lock(_mutex);
-
-	auto it = TC_Port::_callbacks.find(dwCtrlType);
-	if(it != TC_Port::_callbacks.end())
-	{
-		for (auto f : it->second)
-		{
-			try { f(); } catch (...) {}
-		}
-	}
+	std::thread th([&]()
+				   {
+					   unordered_map<size_t, std::function<void()>> data;
+
+					   {
+						   std::lock_guard<std::mutex> lock(_mutex);
+
+						   auto it = TC_Port::_callbacks.find(dwCtrlType);
+						   if (it != TC_Port::_callbacks.end())
+						   {
+							   data = it->second;
+						   }
+					   }
+
+					   for (auto f : data)
+					   {
+						   try
+						   {
+							   f.second();
+						   }
+						   catch (...)
+						   {
+						   }
+					   }
+				   });
+	th.detach();
 	return TRUE;
 }
 #endif

+ 311 - 0
util/src/tc_proxy_info.cpp

@@ -0,0 +1,311 @@
+/**
+ * Tencent is pleased to support the open source community by making Tars available.
+ *
+ * Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
+ *
+ * Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 
+ * in compliance with the License. You may obtain a copy of the License at
+ *
+ * https://opensource.org/licenses/BSD-3-Clause
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed 
+ * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 
+ * CONDITIONS OF ANY KIND, either express or implied. See the License for the 
+ * specific language governing permissions and limitations under the License.
+ */
+
+#include "util/tc_proxy_info.h"
+#include "util/tc_http.h"
+#include "util/tc_base64.h"
+
+namespace tars
+{
+
+shared_ptr<TC_ProxyInfo> TC_ProxyInfo::createProxyInfo(const TC_ProxyInfo::ProxyBaseInfo &baseInfo)
+{
+    shared_ptr<TC_ProxyInfo> proxyInfo;
+
+    switch(baseInfo.type)
+    {
+        case eProxy_Type_Sock4:
+            proxyInfo.reset(new TC_ProxySock4(baseInfo.ep));
+            break;
+        case eProxy_Type_Sock5:
+            proxyInfo.reset(new TC_ProxySock5(baseInfo.ep, baseInfo.user, baseInfo.pass));
+            break;
+        case eProxy_Type_Http:
+            proxyInfo.reset(new TC_ProxyHttp(baseInfo.ep, baseInfo.user, baseInfo.pass));
+            break;
+    }
+    return proxyInfo;
+}
+
+void TC_ProxyInfo::onDisconnect()
+{
+	setProxyStage(eProxy_Stage_DisConn);
+}
+
+void TC_ProxyInfo::onConnSuccess()
+{
+	setProxyStage(eProxy_Stage_Connected);
+}
+
+void TC_ProxyInfo::setProxyStage(TC_ProxyInfo::EMProxyStageType proxyStage)
+{
+	if (_stage == proxyStage) {
+		return;
+	}
+
+	_stage = proxyStage;
+}
+
+////////////////////////////////////////////////////////////////////////
+bool TC_ProxySock4::sendProxyPacket(vector<char> & buff, const TC_Endpoint & dst)
+{
+	//first handshake
+	buff.push_back(kProxy_Sock4_Req1_VN);
+	buff.push_back(kProxy_Sock4_Req1_CD);
+
+	unsigned short nPort = htons(dst.getPort());
+
+	buff.insert(buff.end(), (const char *)&nPort, (const char *)&nPort + sizeof(nPort));
+
+	struct in_addr addr;
+
+	TC_Socket::parseAddr(dst.getHost(), addr);
+
+	int32_t tmpLong = addr.s_addr;
+
+	buff.insert(buff.end(), (const char *)&tmpLong, (const char *)&tmpLong + sizeof(tmpLong));
+
+	buff.push_back('a');
+	buff.push_back(0);
+
+	return true;
+}
+
+bool TC_ProxySock4::recvProxyPacket(const char *buff, size_t length)
+{
+	switch (_stage) {
+		case eProxy_Stage_Establish: {
+			//send first handshake
+			if (sizeof(struct sock4ans1) != length) {
+				_errMsg = "proxy disconnected: Establish protocol length error";
+				onDisconnect();
+				return false;
+			}
+
+			struct sock4ans1 *pSockAns1 = (struct sock4ans1 *) buff;
+			if (pSockAns1->VN != kProxy_Sock4_Ans1_VN || pSockAns1->CD != kProxy_Sock4_Ans1_CD) {
+				_errMsg = "proxy disconnected: Establish protocol version error";
+				onDisconnect();
+				return false;
+			}
+
+			//success
+			onConnSuccess();
+			return true;
+		}
+		default: {
+			assert(false);
+		}
+	}
+
+	return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////////
+
+bool TC_ProxySock5::sendProxyPacket(vector<char> & vBuffer, const TC_Endpoint & dst)
+{
+	switch (_stage) {
+		case eProxy_Stage_DisConn:
+		case eProxy_Stage_Establish: {
+			//first handshake
+			vBuffer.push_back(kProxy_Sock5_Req1_Ver);
+			vBuffer.push_back(kProxy_Sock5_Req1_nMethods);
+			vBuffer.push_back(kProxy_Sock5_Req1_nMethods0);
+			vBuffer.push_back(kProxy_Sock5_Req1_nMethods1);
+
+			return true;
+		}
+		case eProxy_Stage_ACK1: {
+			//second handshake  user pwd
+			char nUserLength = (char) _user.size();
+			char nPwdLength = (char) _pass.size();
+
+			vBuffer.push_back(1);
+			vBuffer.push_back(nUserLength);
+			vBuffer.insert(vBuffer.end(), _user.begin(), _user.end());
+			vBuffer.push_back(nPwdLength);
+			vBuffer.insert(vBuffer.end(), _pass.begin(), _pass.end());
+
+			return true;
+		}
+		case eProxy_Stage_ACK2: {
+			//third handshake
+			vBuffer.push_back(kProxy_Sock5_Req3_Ver);
+			vBuffer.push_back(kProxy_Sock5_Req3_Cmd);
+			vBuffer.push_back(kProxy_Sock5_Req3_Rsv);
+
+			if(dst.isIPv6())
+			{
+				vBuffer.push_back(kProxy_Sock5_Req3_AtypIpv6);
+			}
+			else
+			{
+				vBuffer.push_back(kProxy_Sock5_Req3_AtypIpv4);
+			}
+
+			struct in_addr addr;
+
+			TC_Socket::parseAddr(dst.getHost(), addr);
+
+			int32_t tmpLong = addr.s_addr;
+
+			vBuffer.insert(vBuffer.end(), (const char *)&tmpLong, (const char *)&tmpLong + sizeof(tmpLong));
+
+			unsigned short nPort = htons(dst.getPort());
+			vBuffer.insert(vBuffer.end(), (const char *)&nPort, (const char *)&nPort + sizeof(nPort));
+
+			return true;
+
+		}
+		default: {
+			assert(false);
+		}
+	}
+	return false;
+}
+
+bool TC_ProxySock5::recvProxyPacket(const char *buff, size_t length)
+{
+	switch (_stage) {
+		case eProxy_Stage_Establish: {
+			//send first handshake
+			if (sizeof(struct sock5ans1) != length) {
+				_errMsg = "proxy disconnected: Establish protocol length error";
+				onDisconnect();
+				return false;
+			}
+
+			struct sock5ans1 *pSock5Ans1 = (struct sock5ans1 *) buff;
+			if (pSock5Ans1->Ver != kProxy_Sock5_Ans1_Ver || (pSock5Ans1->Method != kProxy_Sock5_Ans1_Method_Anonymous && pSock5Ans1->Method != kProxy_Sock5_Ans1_Method_User)) 
+			{
+				_errMsg = "proxy disconnected: Establish protocol version error";
+
+				onDisconnect();
+				return false;
+			}
+
+			//need user
+			if (pSock5Ans1->Method == kProxy_Sock5_Ans1_Method_User) {
+				setProxyStage(eProxy_Stage_ACK1);
+				return true;
+			}
+			else {
+				//Anonymous
+				setProxyStage(eProxy_Stage_ACK2);
+				return true;
+			}
+		}
+		case eProxy_Stage_ACK1: {
+			//send second handshake
+			if (sizeof(struct authans) != length) {
+				_errMsg = "proxy disconnected: ACK1 protocol length error";
+				onDisconnect();
+				return false;
+			}
+			struct authans *pSock5Anthans = (struct authans *) buff;
+			if (pSock5Anthans->Ver != kProxy_Sock5_Anthans_Ver || pSock5Anthans->Status != kProxy_Sock5_Anthans_Status) {
+				_errMsg = "proxy disconnected: ACK1 protocol version error";
+				onDisconnect();
+				return false;
+			}
+
+			setProxyStage(eProxy_Stage_ACK2);
+			return true;
+		}
+		case eProxy_Stage_ACK2: {
+			if (sizeof(struct sock5ans2) != length) {
+				_errMsg = "proxy disconnected: ACK2 protocol length error";
+				onDisconnect();
+				return false;
+			}
+			struct sock5ans2 *pSock5An2 = (struct sock5ans2 *) buff;
+			if (pSock5An2->Ver != kProxy_Sock5_Ans2_Ver || pSock5An2->Rep != kProxy_Sock5_Ans2_Rep) {
+				_errMsg = "proxy disconnected: ACK2 protocol version error";
+				onDisconnect();
+				return false;
+			}
+			//success
+			onConnSuccess();
+			return true;
+		}
+		default: {
+			assert(false);
+		}
+	}
+
+	return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////////
+
+bool TC_ProxyHttp::sendProxyPacket(vector<char> & buff, const TC_Endpoint & dst)
+{
+	switch (_stage) {
+		case eProxy_Stage_Establish: {
+			ostringstream oss;
+			//first handshake
+			std::string strRev;
+			if (_user.empty()) {
+				oss << "CONNECT " << dst.getHost() << ":" << dst.getPort()
+				    << " HTTP/1.1\r\nUser-Agent: Mozilla/4.0\r\n\r\n";
+				strRev = oss.str();
+			}
+			else {
+				oss << "CONNECT " << dst.getHost() << ":" << dst.getPort()
+				    << " HTTP/1.1\r\nUser-Agent: Mozilla/4.0\r\n";
+
+				oss << "Proxy-Authorization:Basic " << TC_Base64::encode(_user + ":" + _pass) << "\r\n\r\n";
+				strRev = oss.str();
+			}
+
+			buff.insert(buff.end(), strRev.begin(), strRev.end());
+			return true;
+		}
+		default: {
+			assert(false);
+		}
+	}
+	return false;
+}
+
+bool TC_ProxyHttp::recvProxyPacket(const char *buff, size_t length)
+{
+	switch (_stage) {
+		case eProxy_Stage_Establish: {
+			TC_HttpResponse rsp;
+			rsp.decode(buff, length);
+
+			//send first handshake
+			if (rsp.getStatus() != 200) {
+				_errMsg = "proxy disconnected: " + TC_Common::tostr(rsp.getStatus()) + ", about:" + rsp.getAbout() + " error";
+
+				onDisconnect();
+				return false;
+			}
+			//success
+			onConnSuccess();
+			return true;
+		}
+		default: {
+			assert(false);
+		}
+	}
+
+	return false;
+}
+
+}

+ 52 - 1
util/src/tc_socket.cpp

@@ -315,7 +315,7 @@ void TC_Socket::parseAddr(const string &host, struct in6_addr &stSinAddr)
 		if(rs != 0)
 		{
 			ostringstream os;
-			os << "DNSException ex:(" << strerror(errno) << ")" << rs << ":" << host << ":" << __FILE__ << ":" << __LINE__;
+			os << "DNSException ex:(" << TC_Exception::parseError(TC_Exception::getSystemCode()) << ")" << rs << ":" << host << ":" << __FILE__ << ":" << __LINE__;
 			if(info != NULL)
 			{
 				freeaddrinfo(info);
@@ -331,6 +331,45 @@ void TC_Socket::parseAddr(const string &host, struct in6_addr &stSinAddr)
     }
 }
 
+void TC_Socket::parseAddr(const addr_type& addr, string& host, uint16_t &port)
+{
+    int iDomain;
+    sockaddr_in6 *addr6;
+    sockaddr_in *addr4;
+    if (addr.second == sizeof(sizeof(struct sockaddr_in6)))
+    {
+        iDomain = AF_INET6;
+        addr6 = (sockaddr_in6 *) addr.first.get();
+    }
+    else
+    {
+        iDomain = AF_INET;
+        addr4 = (sockaddr_in *) addr.first.get();
+    }
+
+    char sAddr[INET6_ADDRSTRLEN] = "\0";
+    inet_ntop(iDomain, (AF_INET6 == iDomain) ? (void *) &(addr6->sin6_addr) : (void *) &addr4->sin_addr, sAddr, sizeof(sAddr));
+    host = sAddr;
+    port = (AF_INET6 == iDomain) ? ntohs(addr6->sin6_port) : ntohs(addr4->sin_port);
+}
+
+TC_Socket::addr_type TC_Socket::createSockAddr(const char *str)
+{
+    TC_Socket::addr_type addr;
+
+    if (TC_Socket::addressIsIPv6(str))
+    {
+        addr.first.reset( (sockaddr *)new sockaddr_in6());
+        addr.second = sizeof(struct sockaddr_in6);
+    }
+    else
+    {
+        addr.first.reset((sockaddr *) new sockaddr_in());
+        addr.second = sizeof(struct sockaddr_in);
+    }
+
+    return addr;
+}
 
 void TC_Socket::parseAddrWithPort(const string& host, int port, struct sockaddr_in& addr)
 {
@@ -598,6 +637,18 @@ void TC_Socket::setNoCloseWait()
     }
 }
 
+void TC_Socket::setReuseAddr()
+{
+    int iReuseAddr = 1;
+
+    if (setSockOpt(SO_REUSEADDR, (const void *) &iReuseAddr, sizeof(int), SOL_SOCKET) == -1)
+    {
+        THROW_EXCEPTION_SYSCODE(TC_Socket_Exception, "[TC_Socket::setReuseAddr] error");
+        // throw TC_Socket_Exception("[TC_Socket::setNoCloseWait] error", TC_Exception::getSystemCode());
+    }
+
+}
+
 void TC_Socket::setCloseWait(int delay)
 {
     linger stLinger;

+ 17 - 11
util/src/tc_spin_lock.cpp

@@ -1,10 +1,14 @@
 
 #include "util/tc_spin_lock.h"
+#include "util/tc_common.h"
+
 #include <thread>
 #include <iostream>
 #include <cassert>
 using namespace std;
 
+#define TRYS_COUNT 10
+#define TRYS_SLEEP 1
 namespace tars
 {
 
@@ -19,10 +23,16 @@ TC_SpinLock::~TC_SpinLock()
 
 void TC_SpinLock::lock() const
 {
-
-    for (; _flag.test_and_set(std::memory_order_acquire);) {
-        std::this_thread::yield();
-//        asm volatile("rep; nop":: : "memory");
+    for (size_t i = 1; _flag.test_and_set(std::memory_order_acquire); i++)
+    {
+    	if(i % TRYS_COUNT == 0)
+		{
+    		TC_Common::msleep(TRYS_SLEEP);
+		}
+    	else
+		{
+			std::this_thread::yield();
+		}
     }
 }
 
@@ -30,17 +40,13 @@ void TC_SpinLock::unlock() const
 {
     _flag.clear(std::memory_order_release);
 }
-//
+
 bool TC_SpinLock::tryLock() const
 {
-    int trys = 100;
+    int trys = TRYS_COUNT;
     for (; trys > 0 && _flag.test_and_set(std::memory_order_acquire); --trys)
     {
-        std::this_thread::yield();
-//#if TARGET_PLATFORM_LINUX
-//        asm volatile("rep; nop" ::: "memory");
-//#endif
-
+		std::this_thread::yield();
     }
 
     if (trys > 0)

+ 115 - 7
util/src/tc_thread.cpp

@@ -15,6 +15,9 @@
  */
 
 #include "util/tc_thread.h"
+#include "util/tc_port.h"
+#include "util/tc_coroutine.h"
+#include "util/tc_common.h"
 #include <sstream>
 #include <cerrno>
 #include <cassert>
@@ -61,14 +64,13 @@ void TC_ThreadControl::yield()
     std::this_thread::yield();
 }
 
-TC_Thread::TC_Thread() : _running(false), _th(NULL)
+TC_Thread::TC_Thread(const string &threadName) : _threadName(threadName), _running(false), _th(NULL)
 {
 }
 
 TC_Thread::~TC_Thread()
 {
-
-    if(_th != NULL)
+    if (_th != NULL)
     {
         //如果资源没有被detach或者被join,则自己释放
         if (_th->joinable())
@@ -81,9 +83,33 @@ TC_Thread::~TC_Thread()
     }
 }
 
+void TC_Thread::setThreadName(const string &threadName)
+{
+	_threadName = threadName;
+}
+
+class RunningClosure
+{
+public:
+	RunningClosure(TC_Thread *pThread): _pThread(pThread) {
+		pThread->_running = true;
+	}
+
+	~RunningClosure() {
+		if(!_pThread->getScheduler())
+		{
+			//非协程模式
+			_pThread->_running = false;
+		}
+	}
+protected:
+
+	TC_Thread *_pThread;
+};
+
 void TC_Thread::threadEntry(TC_Thread *pThread)
 {
-    pThread->_running = true;
+	RunningClosure r(pThread);
 
     {
         TC_ThreadLock::Lock sync(pThread->_lock);
@@ -96,15 +122,13 @@ void TC_Thread::threadEntry(TC_Thread *pThread)
     }
     catch (exception &ex)
     {
-        pThread->_running = false;
+        cerr << std::this_thread::get_id() << "|" << ex.what() << endl;
         throw ex;
     }
     catch (...)
     {
-        pThread->_running = false;
         throw;
     }
-    pThread->_running = false;
 }
 
 TC_ThreadControl TC_Thread::start()
@@ -131,6 +155,90 @@ TC_ThreadControl TC_Thread::start()
     return TC_ThreadControl(_th);
 }
 
+void TC_Thread::coroutineEntry(TC_Thread *pThread, uint32_t iPoolSize, size_t iStackSize, bool autoQuit)
+{
+	pThread->_scheduler = TC_CoroutineScheduler::create();
+
+	pThread->_scheduler->setPoolStackSize(iPoolSize, iStackSize);
+
+    if(autoQuit)
+    {
+	    pThread->_scheduler->setNoCoroutineCallback([](TC_CoroutineScheduler *scheduler){
+            scheduler->terminate();
+        });
+    }
+
+	pThread->_scheduler->createCoroutine(std::bind(TC_Thread::threadEntry, pThread));
+
+    {
+        TC_ThreadLock::Lock sync(pThread->_lock);
+        pThread->_lock.notifyAll();
+    }
+
+	pThread->_scheduler->run();
+
+    pThread->_running = false;
+
+    pThread->_scheduler.reset();
+    TC_CoroutineScheduler::reset();
+}
+
+TC_ThreadControl TC_Thread::startCoroutine(uint32_t iPoolSize, size_t iStackSize, bool autoQuit)
+{
+    TC_ThreadLock::Lock sync(_lock);
+
+    if (_running)
+    {
+        throw TC_ThreadThreadControl_Exception("[TC_Thread::startCoroutine] thread has start");
+    }
+
+    try
+    {
+        _th = new std::thread(&TC_Thread::coroutineEntry, this, iPoolSize, iStackSize, autoQuit);
+    }
+    catch(...)
+    {
+        throw TC_ThreadThreadControl_Exception("[TC_Thread::startCoroutine] thread start error");
+    }
+
+    _lock.wait();
+
+    return TC_ThreadControl(_th);
+    
+}
+
+void TC_Thread::join()
+{
+    if(!_th)
+    {
+        return;
+    }
+    if (std::this_thread::get_id() == _th->get_id())
+    {
+        throw TC_ThreadThreadControl_Exception("[TC_Thread::join] can't be called in the same thread");
+    }
+
+    if (_th->joinable())
+    {
+        _th->join();
+    }
+}
+
+bool TC_Thread::joinable()
+{
+    if(!_th)
+    {
+        return false;
+    }
+
+    return _th->joinable();
+}
+
+void TC_Thread::detach()
+{
+    _th->detach();
+}
+
 TC_ThreadControl TC_Thread::getThreadControl() 
 {
     return TC_ThreadControl(_th);

+ 8 - 1
util/src/tc_thread_pool.cpp

@@ -23,7 +23,7 @@ namespace tars
 {
 
 TC_ThreadPool::TC_ThreadPool()
-    :  _threadNum(1), _bTerminate(false)
+    :  _threadNum(1), _bTerminate(true)
 {
 }
 
@@ -46,6 +46,11 @@ void TC_ThreadPool::init(size_t num)
 
 void TC_ThreadPool::stop()
 {
+    if(_bTerminate)
+    {
+        return ;
+    }
+    
     {
         std::unique_lock<std::mutex> lock(_mutex);
 
@@ -77,6 +82,8 @@ void TC_ThreadPool::start()
         throw TC_ThreadPool_Exception("[TC_ThreadPool::start] thread pool has start!");
     }
 
+    _bTerminate = false;
+
     for (size_t i = 0; i < _threadNum; i++)
     {
         _threads.push_back(new thread(&TC_ThreadPool::run, this));

+ 19 - 18
util/src/tc_timeprovider.cpp

@@ -15,31 +15,23 @@
  */
 
 #include "util/tc_timeprovider.h"
+#include "util/tc_logger.h"
 #include <cmath>
 
 namespace tars
 {
 
-TC_TimeProvider *TC_TimeProvider::g_tp = NULL;
+TC_TimeProvider* TC_TimeProvider::g_tp = NULL;
 
 TC_TimeProvider* TC_TimeProvider::getInstance()
 {
-    if (!g_tp)
-    {
-        static std::mutex m;
-       	std::lock_guard<std::mutex> lock(m);
-
-        // static TC_ThreadMutex mutex;
-
-        // TC_LockT<TC_ThreadMutex> lock(mutex);
-
-        if (!g_tp)
-        {
+    static std::once_flag flag;
+    std::call_once(flag, []()
+        { 
             g_tp = new TC_TimeProvider();
-			std::thread t(&TC_TimeProvider::run, g_tp);
-			t.detach();
-        }
-    }
+            g_tp->start();
+        });
+
     return g_tp;
 }
 
@@ -114,7 +106,7 @@ void TC_TimeProvider::getNow(timeval *tv)
 #endif
 }
 
-int64_t TC_TimeProvider::getNowMs()
+uint64_t TC_TimeProvider::getNowMs()
 {
     struct timeval tv;
     getNow(&tv);
@@ -126,7 +118,7 @@ void TC_TimeProvider::run()
 {
     memset(_tsc, 0x00, sizeof(_tsc));
 
-    while (true)
+    while (!_terminate)
     {
         timeval& tt = _t[!_buf_idx];
 
@@ -140,6 +132,15 @@ void TC_TimeProvider::run()
     }
 }
 
+void TC_TimeProvider::terminate()
+{
+    _terminate = true;
+
+    if(joinable())
+    {
+        join();
+    }
+}
 // double TC_TimeProvider::cpuMHz()
 // {
 //     if (_cpu_cycle != 0)

+ 232 - 176
util/src/tc_timer.cpp

@@ -13,212 +13,268 @@
  * CONDITIONS OF ANY KIND, either express or implied. See the License for the 
  * specific language governing permissions and limitations under the License.
  */
-#include "util/tc_timer.h"
 
+#include "util/tc_timer.h"
+#include "util/tc_logger.h"
 
 namespace tars
 {
 
-TC_Timer::~TC_Timer()
+TC_TimerBase::~TC_TimerBase()
 {
-    stopTimer();
 }
 
-void TC_Timer::startTimer(int numThread)
+void TC_TimerBase::clear()
 {
-    if (numThread <= 0)
-    {
-        numThread = 1;
-    }
-    
-    _terminate = false;
-    
-    _tpool.init(numThread + 1);
-    _tpool.start();
-    _tpool.exec(std::bind(&TC_Timer::run, this));
+	std::lock_guard<std::mutex> lock(_mutex);
+
+	_mapEvent.clear();
+	_mapTimer.clear();
+	_repeatIds.clear();
+	_nextTimer = -1;
 }
 
-void TC_Timer::stopTimer()
+uint32_t TC_TimerBase::genUniqueId()
 {
-    if (_terminate)
-    {
-        return;
-    }
-    
-    {
-        std::unique_lock<std::mutex> lck(_mutex);
-        _terminate = true;
-        _cond.notify_all();
-    }
-    
-    _tpool.stop();
+	uint32_t i = ++_increaseId;
+	if (i == 0)
+	{
+		i = ++_increaseId;
+	}
+
+	return i;
 }
 
-bool TC_Timer::exist(int64_t uniqId,bool repeat )
+
+bool TC_TimerBase::exist(int64_t uniqId,bool repeat )
 {
-    std::lock_guard<std::mutex> lock(_mutex);
-    if (repeat)
-    {
-        return  _repeatIds.find(uniqId) != _repeatIds.end();
-    }
-    else
-    {
-        return _mapEvent.find(uniqId) != _mapEvent.end();
-    }
+	std::lock_guard<std::mutex> lock(_mutex);
+	if (repeat)
+	{
+		return  _repeatIds.find(uniqId) != _repeatIds.end();
+	}
+	else
+	{
+		return _mapEvent.find(uniqId) != _mapEvent.end();
+	}
 }
 
-void TC_Timer::clear()
+void TC_TimerBase::erase(int64_t uniqId)
 {
 	std::lock_guard<std::mutex> lock(_mutex);
+	auto it = _mapEvent.find(uniqId);
 
-	_mapEvent.clear();
-	_mapTimer.clear();
-	_repeatIds.clear();
+	//LOG_CONSOLE_DEBUG << "before erase event!" << ",uniqId=" << uniqId << "|event size :" << _mapEvent.size() << "|timer size:" << _mapTimer.size() << endl;
+	if (it != _mapEvent.end())
+	{
+		auto itEvent = _mapTimer.find(it->second->_fireMillseconds);
+		if (itEvent != _mapTimer.end())
+		{
+			itEvent->second.erase(uniqId);
+			if (itEvent->second.empty())
+			{
+				_mapTimer.erase(itEvent);
+			}
+		}
+		it->second->_func = nullptr;
+		_mapEvent.erase(it);
+	}
+	_repeatIds.erase(uniqId);
 }
 
-void TC_Timer::erase(int64_t uniqId)
-{
-    std::lock_guard<std::mutex> lock(_mutex);
-    auto it = _mapEvent.find(uniqId);
-    
-    //LOG_CONSOLE_DEBUG << "before erase event!" << ",uniqId=" << uniqId << "|event size :" << _mapEvent.size() << "|timer size:" << _mapTimer.size() << endl;
-    if (it != _mapEvent.end())
-    {
-        auto itEvent = _mapTimer.find(it->second->_fireMillseconds);
-        if (itEvent != _mapTimer.end())
-        {
-            itEvent->second.erase(uniqId);
-            if (itEvent->second.empty())
-            {
-                _mapTimer.erase(itEvent);
-            }
-        }
-        it->second->_func = nullptr;
-        _mapEvent.erase(it);
-    }
-    _repeatIds.erase(uniqId);
-//    LOG_CONSOLE_DEBUG << "after erase event!" << ",uniqId=" << uniqId << "|event size :" << _mapEvent.size() << "|timer size:" << _mapTimer.size() << endl;
-}
-
-int64_t TC_Timer::post(const shared_ptr<TC_Timer::Func> &event ,bool repeat)
-{
-    std::unique_lock<std::mutex> lock(_mutex);
-    
-    int64_t uniqId = event->_uniqueId;
-    
-    if (repeat)
-    {
-        _repeatIds.insert(uniqId);
-    }
-    
-    if (_mapEvent.find(uniqId) == _mapEvent.end())
-    {
-        _mapEvent[uniqId] = event;
-    }
-    _mapTimer[event->_fireMillseconds].insert(uniqId);
-//    LOG_CONSOLE_DEBUG << "post event! fire time:" << TC_Common::msToTimeString(event->_fireMillseconds ) << ",uniqId=" << uniqId << "|event size :"
-//                      << _mapEvent.size() << "|timer size:" << _mapTimer.size() <<",_func=" << &(event->_func)<< endl;
-    _cond.notify_one();
-    return uniqId;
-}
-
-void TC_Timer::fireEvent(const EVENT_SET &el)
-{
-    auto itList = el.begin();
-    
-    while (itList != el.end())
-    {
-        shared_ptr<Func> func;
-        
-        {
-            std::lock_guard<std::mutex> lock(_mutex);
-            
-            auto it = _mapEvent.find(*itList);
-            if (it != _mapEvent.end())
-            {
-                func = it->second;
-                _tmpEvent[it->first] = it->second;
-                _mapEvent.erase(it);
-            }
-        }
-        if (func)
-        {
-            //执行具体事件对象
-            _tpool.exec(func->_func);
-        }
-        
-        ++itList;
-    }
+int64_t TC_TimerBase::post(const shared_ptr<TC_TimerBase::Func> & event, bool repeat)
+{
+	std::unique_lock <std::mutex> lock(_mutex);
+
+	int64_t uniqId = event->_uniqueId;
+	if (repeat) {
+		_repeatIds.insert(uniqId);
+	}
+
+	if(_mapEvent.find(uniqId) == _mapEvent.end()) {
+		_mapEvent[uniqId] = event;
+	}
+
+	_mapTimer[event->_fireMillseconds].insert(uniqId);
+
+	// LOG_CONSOLE_DEBUG << "fireMillseconds:" << event->_fireMillseconds << ", " << TNOWMS << ", " << event->_fireMillseconds - TNOWMS << endl;
+
+	if (_nextTimer < 0)
+	{
+		//无下一个事件了, 且还没有wait, 不需要onAddTimer去唤醒wait(在epoll wait醒来后,把定时器处理完, 如果是重复定时器, 就会进入这种状态)
+		_nextTimer = event->_fireMillseconds;
+		onAddTimer();
+	}
+	else if(event->_fireMillseconds < (uint64_t)_nextTimer)
+	{
+		//新时间更近, 需要唤醒wait, 等到最新时间上
+		_nextTimer = (uint64_t)event->_fireMillseconds;
+		onAddTimer();
+	}
+
+	// LOG_CONSOLE_DEBUG << _nextTimer << ", " << TNOWMS - _nextTimer << endl;
+	
+	return uniqId;
 }
 
-void TC_Timer::run()
+int64_t TC_TimerBase::fireEvents(int64_t ms)
+{
+	//get timer event
+	EVENT_SET el;
+
+	//获得事件, 返回下一次时间
+	getEvents(el);
+
+	auto itList = el.begin();
+
+	while (itList != el.end())
+	{
+		shared_ptr<Func> func ;
+
+		{
+			std::lock_guard<std::mutex> lock(_mutex);
+
+			auto it = _mapEvent.find(*itList);
+
+			if (it != _mapEvent.end()) {
+
+				func = it->second;
+
+				_tmpEvent[it->first] = it->second;
+
+				_mapEvent.erase(it);
+			}
+		}
+
+		if(func) {
+			//执行具体事件对象
+			onFireEvent(func->_func);
+		}
+
+		++itList;
+	}
+
+	std::lock_guard <std::mutex> lock(_mutex);
+
+	int64_t waitTime = ms;
+
+	if(_nextTimer > 0)
+	{
+		waitTime = _nextTimer - TNOWMS;
+	}
+
+	if(waitTime < 0)
+	{
+		waitTime = 0;
+	}
+
+	return waitTime;
+}
+
+int64_t TC_TimerBase::getEvents(TC_TimerBase::EVENT_SET &el)
+{
+	std::unique_lock <std::mutex> lock(_mutex);
+
+	_nextTimer = -1;
+
+	for (auto it = _mapTimer.begin(); it != _mapTimer.end();)
+	{
+		if ((uint64_t)it->first <= TNOWMS)
+		{
+			//时间过了, 有事件需要触发了
+			el.insert(it->second.begin(), it->second.end());
+			_mapTimer.erase(it++);
+		}
+		else
+		{
+			//时间还没到
+			_nextTimer = it->first;
+			break;
+		}
+	}
+
+	return _nextTimer;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+TC_Timer::~TC_Timer()
+{
+	stopTimer();
+}
+
+void TC_Timer::startTimer(int numThread)
+{
+	if (numThread <= 0)
+	{
+		numThread = 1;
+	}
+
+	_terminate = false;
+
+	//多个线程, 其中一个给TC_Timer::run使用
+	_tpool.init(numThread + 1);
+	_tpool.start();
+	_tpool.exec(std::bind(&TC_Timer::run, this));
+}
+
+void TC_Timer::stopTimer()
+{
+	if (_terminate)
+	{
+		return;
+	}
+
+	{
+		std::unique_lock<std::mutex> lck(_mutex);
+		_terminate = true;
+		_cond.notify_all();
+	}
+
+	_tpool.stop();
+}
+
+void TC_Timer::onAddTimer()
 {
-    while (!_terminate)
-    {
-        try
-        {
-            EVENT_SET el;
-            
-            {
-                std::unique_lock<std::mutex> lock(_mutex);
-                
-                auto it = _mapTimer.begin();
-                
-                if (it != _mapTimer.end())
-                {
-                    uint64_t ms = TC_TimeProvider::getInstance()->getNowMs();
-                    
-//                    LOG_CONSOLE_DEBUG << "now ms:" << TC_Common::msToTimeString(ms ) << "|timer:" << TC_Common::msToTimeString(it->first) << endl;
-                    if (it->first <= ms)
-                    {
-                        //时间过了, 有事件需要触发了
-                        el.swap(it->second);
-                        _mapTimer.erase(it);
-                    }
-                    else
-                    {
-                        //时间还没到
-                        ms = it->first - ms;
-                        _cond.wait_for(lock, std::chrono::milliseconds(ms));
-                    }
-                }
-                else
-                {
-                    //没有任何事件, 等待一下
-                    _cond.wait_for(lock, std::chrono::milliseconds(1000));
-                }
-            }
-            
-            if (!el.empty())
-            {
-                //触发相关所有事件
-                fireEvent(el);
-            }
-            
-        }
-        catch (exception &ex)
-        {
-            cerr << ex.what() << endl;
-        }
-    }
-}
-
-uint32_t TC_Timer::genUniqueId()
-{
-    uint32_t i = ++_increaseId;
-    if (i == 0)
-    {
-        i = ++_increaseId;
-    }
-    
-    return i;
+	_cond.notify_one();
 }
 
+void TC_Timer::onFireEvent(std::function<void()> func)
+{
+	//执行具体事件对象
+	_tpool.exec(func);
+}
+
+
 tuple<int64_t, int64_t, int64_t> TC_Timer::status()
 {
-    std::unique_lock<std::mutex> lock(_mutex);
-    return make_tuple(_tpool.getJobNum(), _mapEvent.size(), _repeatIds.size());
+	std::unique_lock<std::mutex> lock(_mutex);
+	return make_tuple(_tpool.getJobNum(), _mapEvent.size(), _repeatIds.size());
 }
+
+void TC_Timer::run()
+{
+	while (!_terminate)
+	{
+		try
+		{
+			fireEvents(1000);
+
+			std::unique_lock <std::mutex> lock(_mutex);
+
+			uint64_t ms = 1000;
+			
+			if (_nextTimer > 0)
+			{
+				ms = _nextTimer - TNOWMS;
+			}
+			_cond.wait_for(lock, std::chrono::milliseconds(ms));
+		}
+		catch (exception & ex) {
+			cerr << ex.what() << endl;
+		}
+	}
+}
+
 }
 
 

+ 1272 - 0
util/src/tc_transceiver.cpp

@@ -0,0 +1,1272 @@
+#include "util/tc_transceiver.h"
+#include "util/tc_logger.h"
+#if TAF_SSL
+#include "util/tc_openssl.h"
+#endif
+#include <sstream>
+
+namespace tars
+{
+
+class CloseClourse
+{
+public:
+    CloseClourse(TC_Transceiver *trans, TC_Transceiver::CloseReason reason, const string &err) : _trans(trans), _reason(reason), _err(err)
+    {}
+
+    ~CloseClourse() {
+        _trans->tcpClose(false, _reason, _err);
+    }
+protected:
+    TC_Transceiver *_trans;
+    TC_Transceiver::CloseReason     _reason;
+    string          _err;
+};
+
+#define THROW_ERROR(x, r, y) { CloseClourse c(this, r, y); THROW_EXCEPTION_SYSCODE(x, y); }
+
+static const int BUFFER_SIZE = 16 * 1024;
+
+///////////////////////////////////////////////////////////////////////
+
+int TC_Transceiver::createSocket(bool udp, bool isLocal, bool isIpv6)
+{
+#if TARGET_PLATFORM_WINDOWS
+    int domain = (isIpv6 ? PF_INET6 : PF_INET);
+#else
+    int domain = isLocal ? PF_LOCAL : (isIpv6 ? PF_INET6 : PF_INET);
+#endif
+
+	int type = udp ? SOCK_DGRAM : SOCK_STREAM;
+
+    TC_Socket s;
+    s.createSocket(type, domain);
+
+    if(!udp)
+    {
+        s.setTcpNoDelay();
+	    s.setKeepAlive();
+	    s.setNoCloseWait();
+    }
+    else
+    {
+    	s.setRecvBufferSize(512*1024);
+		s.setSendBufferSize(512*1024);
+	}
+
+    s.setOwner(false);
+    s.setblock(false);
+	return s.getfd();
+}
+
+bool TC_Transceiver::doConnect(int fd, const struct sockaddr *addr, socklen_t len)
+{
+	bool bConnected = false;
+
+	int iRet = ::connect(fd, addr, len);
+
+	if (iRet == 0)
+	{
+		bConnected  = true;
+	}
+	else if (!TC_Socket::isInProgress())
+	{
+        THROW_ERROR(TC_Transceiver_Exception, CR_Connect, "connect error, " + _desc);//, TC_Exception::getSystemCode());
+	}
+
+//	LOG_CONSOLE_DEBUG << bConnected << endl;
+
+    return bConnected;
+}
+
+TC_Transceiver::TC_Transceiver(TC_Epoller* epoller, const TC_Endpoint &ep)
+: _epoller(epoller)
+, _ep(ep)
+, _desc(ep.toString())
+, _fd(-1)
+, _connStatus(eUnconnected)
+, _sendBuffer(this)
+, _recvBuffer(this)
+, _authState(eAuthInit)
+{
+    // LOG_CONSOLE_DEBUG << endl;
+    if (ep.isUdp())
+    {
+        _pRecvBuffer = std::make_shared<TC_NetWorkBuffer::Buffer>();
+        _nRecvBufferSize = DEFAULT_RECV_BUFFERSIZE;
+        _pRecvBuffer->alloc(_nRecvBufferSize);
+    }
+
+    _serverAddr = TC_Socket::createSockAddr(_ep.getHost().c_str());
+}
+
+TC_Transceiver::~TC_Transceiver()
+{
+    if(!isValid()) return;
+
+    if(_ep.isTcp())
+    {
+        tcpClose(true, CR_DECONSTRUCTOR, "");
+    }
+    else
+    {
+        udpClose();
+    }
+}
+
+void TC_Transceiver::initializeClient(const oncreate_callback &oncreate,
+        const onclose_callback &onclose, 
+        const onconnect_callback &onconnect, 
+        const onrequest_callback &onrequest, 
+        const onparser_callback &onparser, 
+        const onopenssl_callback &onopenssl,
+        const oncompletepackage_callback &onfinish)
+{
+    _isServer = false;
+
+    _createSocketCallback = oncreate;
+
+    _onConnectCallback = onconnect;
+
+    _onRequestCallback = onrequest;
+
+    _onCloseCallback = onclose;
+
+    _onParserCallback = onparser;
+
+	_onCompletePackageCallback = onfinish;
+
+    _onOpensslCallback = onopenssl;
+
+}
+
+void TC_Transceiver::initializeServer(const onclose_callback &onclose,
+        const onrequest_callback &onrequest, 
+        const onparser_callback &onparser, 
+        const onopenssl_callback &onopenssl,
+        const oncompletepackage_callback &onfinish)
+{
+    _isServer = true;
+
+    _connStatus = eConnected;
+
+    _onRequestCallback = onrequest;
+
+    _onCloseCallback = onclose;
+
+    _onParserCallback = onparser;
+
+	_onCompletePackageCallback = onfinish;
+
+	_onOpensslCallback = onopenssl;
+
+#if TAF_SSL
+    if (isSSL()) 
+    {
+        _openssl = _onOpensslCallback(this);
+        if (!_openssl) 
+        {
+            THROW_ERROR(TC_Transceiver_Exception, CR_SSL, "[TC_Transceiver::initializeServer create '" + _desc + "' ssl client error]");
+        }
+
+        _openssl->init(true);
+
+	    _openssl->recvBuffer()->setConnection(this);
+
+        int ret = _openssl->doHandshake(_sendBuffer);
+        if (ret != 0) 
+        {
+            THROW_ERROR(TC_Transceiver_Exception, CR_SSL_HANDSHAKE, "[TC_Transceiver::initializeServer create '" + _desc + "' ssl client error: " + _openssl->getErrMsg() + "]");
+        }
+
+        // send the encrypt data from write buffer
+        if (!_sendBuffer.empty()) 
+        {
+            doRequest();
+        }
+    }
+#endif
+}
+
+void TC_Transceiver::setClientAuthCallback(const onclientsendauth_callback &onsendauth, const onclientverifyauth_callback &onverifyauth)
+{
+    _onClientSendAuthCallback = onsendauth;
+    
+    _onClientVerifyAuthCallback = onverifyauth;
+}
+
+void TC_Transceiver::setServerAuthCallback(const onserververifyauth_callback &onverifyauth)
+{
+    _onServerVerifyAuthCallback = onverifyauth;
+}
+
+void TC_Transceiver::setBindAddr(const char *host)
+{
+    if(_isServer)
+    {
+        THROW_ERROR(TC_Transceiver_Exception, CR_Type, "setBindAddr(" + string(host) + ") only use in client, " + _desc);
+    }
+    _bindAddr = TC_Socket::createSockAddr(host);
+}
+
+void TC_Transceiver::setBindAddr(const TC_Socket::addr_type &bindAddr)
+{
+    if(_isServer)
+    {
+        THROW_ERROR(TC_Transceiver_Exception, CR_Type,"setBindAddr only use in client, " + _desc);
+    }
+    _bindAddr = bindAddr;
+}
+
+shared_ptr<TC_Epoller::EpollInfo> TC_Transceiver::bindFd(int fd)
+{
+    if(!_isServer)
+    {
+        THROW_ERROR(TC_Transceiver_Exception, CR_Type, "client should not call bindFd, " + _desc);
+    }
+    _connStatus = eConnected;
+
+    _fd = fd;
+
+    //设置套接口选项
+    for(size_t i=0; i< _socketOpts.size(); ++i)
+    {
+        setsockopt(_fd,_socketOpts[i].level,_socketOpts[i].optname, (const char*)_socketOpts[i].optval,_socketOpts[i].optlen);
+    }
+
+    _clientAddr = TC_Socket::createSockAddr(_ep.getHost().c_str());
+
+    getpeername(_fd, _clientAddr.first.get(), &_clientAddr.second);
+
+    _epollInfo = _epoller->createEpollInfo(_fd);
+
+    return _epollInfo;
+}
+
+void TC_Transceiver::setUdpRecvBuffer(size_t nSize)
+{
+    _nRecvBufferSize = nSize;
+    _pRecvBuffer->alloc(_nRecvBufferSize);
+}
+
+void TC_Transceiver::checkConnect() 
+{
+    //检查连接是否有错误
+    if(isConnecting())
+    {
+        int iVal = 0;
+        SOCKET_LEN_TYPE iLen = static_cast<SOCKET_LEN_TYPE>(sizeof(int));
+        int ret = ::getsockopt(_fd, SOL_SOCKET, SO_ERROR, reinterpret_cast<char*>(&iVal), &iLen);
+
+        if (ret < 0 || iVal)
+        {
+            string err = TC_Exception::parseError(iVal);
+            THROW_ERROR(TC_Transceiver_Exception, CR_Connect, "connect " + _desc + " error:" + err);
+        }
+
+        _clientAddr = TC_Socket::createSockAddr(_ep.getHost().c_str());
+
+        getpeername(_fd, _clientAddr.first.get(), &_clientAddr.second);
+
+        if(_bindAddr.first)
+        {
+            //如果服务器终止后,服务器可以第二次快速启动而不用等待一段时间
+            int iReuseAddr = 1;
+
+            setsockopt(_fd, SOL_SOCKET, SO_REUSEADDR, (const char*)&iReuseAddr, sizeof(int));
+
+            ::bind(_fd, _bindAddr.first.get(), _bindAddr.second);
+        }
+        setConnected();
+    }
+}
+
+void TC_Transceiver::parseConnectAddress()
+{
+    if (isConnectIPv6())
+    {
+        TC_Socket::parseAddrWithPort(getConnectEndpoint().getHost(), getConnectEndpoint().getPort(), *(sockaddr_in6*)_serverAddr.first.get());
+    }
+    else
+    {
+        TC_Socket::parseAddrWithPort(getConnectEndpoint().getHost(), getConnectEndpoint().getPort(), *(sockaddr_in*)_serverAddr.first.get());
+    }
+}
+
+bool TC_Transceiver::isSSL() const 
+{ 
+    return _ep.isSSL(); 
+}
+
+void TC_Transceiver::connect()
+{
+    if(_isServer)
+    {
+        THROW_ERROR(TC_Transceiver_Exception, CR_Type, "server should not call connect, " + _desc);
+    }
+
+    if(isValid())
+    {
+        return;
+    }
+
+    if(_connStatus == eConnecting || _connStatus == eConnected)
+    {
+        return;
+    }
+
+    if (_ep.isUdp())
+    {
+        _fd = createSocket(true, false, isConnectIPv6());
+
+        _connStatus = eConnected;
+
+        _epollInfo = _epoller->createEpollInfo(_fd);
+
+        _proxyInfo = _createSocketCallback(this);
+        if(_proxyInfo)
+        {
+            _desc = _proxyInfo->getEndpoint().toString();
+        }
+
+        //每次连接前都重新解析一下地址, 避免dns变了!
+        parseConnectAddress();
+    }
+    else
+    {
+        _fd = createSocket(false, false, isConnectIPv6());
+
+        _isConnTimeout = false;
+
+         _epollInfo = _epoller->createEpollInfo(_fd);
+
+        _connTimerId = _epoller->postDelayed(_connTimeout, std::bind(&TC_Transceiver::checkConnectTimeout, this));
+
+        _proxyInfo = _createSocketCallback(this);
+        if(_proxyInfo)
+        {
+            _desc = _proxyInfo->getEndpoint().toString();
+        }
+
+        //每次连接前都重新解析一下地址, 避免dns变了!
+        parseConnectAddress();
+
+        bool bConnected = doConnect(_fd, _serverAddr.first.get(), _serverAddr.second);
+        if(bConnected)
+        {
+            setConnected();
+        }
+        else
+        {
+            _connStatus     = TC_Transceiver::eConnecting;
+        }
+    }
+
+    //设置套接口选项
+    for(size_t i=0; i< _socketOpts.size(); ++i)
+    {
+        setsockopt(_fd,_socketOpts[i].level,_socketOpts[i].optname, (const char*)_socketOpts[i].optval,_socketOpts[i].optlen);
+    }
+}
+
+void TC_Transceiver::checkConnectTimeout()
+{
+    if(_connStatus != eConnected)
+    {
+        _isConnTimeout = true;
+        THROW_ERROR(TC_Transceiver_Exception, CR_ConnectTimeout, "connect timeout, " + _desc);
+    }
+}
+
+void TC_Transceiver::setConnected()
+{
+    if(_isServer)
+    {
+        THROW_ERROR(TC_Transceiver_Exception, CR_Type, "server should not call setConnected, " + _desc);
+    }
+    _connStatus = eConnected;
+
+	if(_proxyInfo)
+	{
+		connectProxy();
+	}
+	else
+	{
+		onSetConnected();
+	}
+}
+
+void TC_Transceiver::onSetConnected()
+{
+    if(_isServer)
+    {
+        THROW_ERROR(TC_Transceiver_Exception, CR_Type, "server should not call onSetConnected, " + _desc);
+    }
+	onConnect();
+
+    _onConnectCallback(this);
+
+    if (!isSSL())
+    {
+        doAuthReq();
+    }
+}
+
+void TC_Transceiver::onConnect()
+{
+    if(_isServer)
+    {
+        THROW_ERROR(TC_Transceiver_Exception, CR_Type, "server should not call onConnect, " + _desc);
+    }
+
+    _epoller->erase(_connTimerId);
+    _connTimerId = 0;
+#if TAF_SSL
+    if (isSSL())
+    {
+	    _openssl = _onOpensslCallback(this);
+        if (!_openssl)
+        {
+            close();
+            return;
+        }
+
+	    _openssl->init(false);
+
+	    _openssl->setReadBufferSize(1024 * 8);
+	    _openssl->setWriteBufferSize(1024 * 8);
+
+	    _openssl->recvBuffer()->setConnection(this);
+
+        int ret = _openssl->doHandshake(_sendBuffer);
+        if (ret != 0)
+        {
+            THROW_ERROR(TC_Transceiver_Exception, CR_SSL, "ssl hande shake failed, " + _desc + ", error:" + _openssl->getErrMsg());
+        }
+
+        // send the encrypt data from write buffer
+        if (!_sendBuffer.empty())
+        {
+	        doRequest();
+        }
+
+        return;
+    }
+#endif
+
+}
+
+void TC_Transceiver::doAuthReq()
+{
+    if (_ep.getAuthType() == TC_Endpoint::AUTH_TYPENONE)
+    {
+        _authState = eAuthSucc;
+        _onRequestCallback(this);
+    }
+    else 
+    {
+        //如果是客户端, 则主动发起鉴权请求
+        shared_ptr<TC_NetWorkBuffer::Buffer> buff = _onClientSendAuthCallback(this);
+
+    #if TAF_SSL
+        if(this->isSSL()) 
+        {
+            int ret = _openssl->write(buff->buffer(), (uint32_t) buff->length(), _sendBuffer);
+            if(ret != 0)
+            {
+                THROW_ERROR(TC_Transceiver_Exception, CR_SSL, "ssl write failed, " + _desc + ", error:" + _openssl->getErrMsg());
+                return ;
+            }
+        }
+        else 
+        {
+            _sendBuffer.addBuffer(buff);
+        }
+
+    #else
+        _sendBuffer.addBuffer(buff);
+    #endif
+
+        doRequest();
+    }    
+}
+
+void TC_Transceiver::connectProxy()
+{
+	assert(_proxyInfo);
+
+	vector<char> buff;
+
+	bool succ = _proxyInfo->sendProxyPacket(buff, _ep);
+    if(!succ)
+    {
+        THROW_ERROR(TC_Transceiver_Exception, CR_PROXY_SEND, "connect to proxy, " + _desc + ", error:" + _proxyInfo->getErrMsg());
+    }
+	_sendBuffer.addBuffer(buff);
+
+	doRequest();
+}
+
+int TC_Transceiver::doCheckProxy(const char *buff, size_t length)
+{
+	if(!_proxyInfo || _proxyInfo->isSuccess())
+		return 0;
+
+	bool succ = _proxyInfo->recvProxyPacket(buff, length);
+    if(!succ)
+    {
+        THROW_ERROR(TC_Transceiver_Exception, CR_PROXY_RECV, "connect to proxy, " + _desc + ", error:" + _proxyInfo->getErrMsg());
+    }
+
+	if(!_proxyInfo->isSuccess())
+	{
+		connectProxy();
+	}
+	else
+	{
+		onSetConnected();
+	}
+
+	return 1;
+}
+
+void TC_Transceiver::udpClose()
+{
+    if (_ep.isUdp())
+    {   
+        _epoller->releaseEpollInfo(_epollInfo);
+
+        _epollInfo.reset();
+
+        TC_Port::closeSocket(_fd);
+
+        _fd = -1;
+
+        _connStatus = eUnconnected;
+
+        _sendBuffer.clearBuffers();
+
+        _recvBuffer.clearBuffers();
+    }
+}
+
+void TC_Transceiver::close()
+{
+//	LOG_CONSOLE_DEBUG << this << endl;
+    if(!isValid()) return;
+
+    if(_ep.isTcp())
+    {
+        tcpClose(false, CR_ACTIVE, "active call");
+    }
+    else
+    {
+        udpClose();
+    }
+}
+
+void TC_Transceiver::tcpClose(bool deconstructor, CloseReason reason, const string &err)
+{
+    if(_ep.isTcp() && isValid())
+    {
+#if TAF_SSL
+        if (_openssl)
+        {
+            _openssl->release();
+            _openssl.reset();
+        }
+#endif
+
+//LOG_CONSOLE_DEBUG << this << ", " << _fd << ", " << reason << ", " << err << ", " << deconstructor << endl;
+
+		_epoller->releaseEpollInfo(_epollInfo);
+
+        _epollInfo.reset();
+
+        TC_Port::closeSocket(_fd);
+
+        _fd = -1;
+
+        _connStatus = eUnconnected;
+
+        _sendBuffer.clearBuffers();
+
+        _recvBuffer.clearBuffers();
+
+        _authState = eAuthInit;
+
+        if(!deconstructor)
+        {
+            //注意必须放在最后, 主要避免_onCloseCallback里面析构了链接, 从而导致又进入tcpClose
+            //放在最后就不会有问题了, 因为不会再进入这个函数
+            _onCloseCallback(this, reason, err);
+        }
+    }
+}
+
+void TC_Transceiver::doRequest()
+{
+	if(!isValid()) return ;
+
+    checkConnect();
+
+	//buf不为空,先发送buffer的内容
+    while(!_sendBuffer.empty())
+    {
+    	auto data = _sendBuffer.getBufferPointer();
+    	assert(data.first != NULL && data.second != 0);
+
+        int iRet = this->send(data.first, (uint32_t) data.second, 0);
+
+        if (iRet <= 0)
+        {
+            return;
+        }
+
+	    _sendBuffer.moveHeader(iRet);
+    }
+
+    if(_sendBuffer.empty()) 
+    {
+        _onRequestCallback(this);
+    }
+}
+
+TC_Transceiver::ReturnStatus TC_Transceiver::sendRequest(const shared_ptr<TC_NetWorkBuffer::Buffer> &buff, const TC_Socket::addr_type& addr)
+{
+//	LOG_CONSOLE_DEBUG << buff->length() << endl;
+
+	//空数据 直接返回成功
+	if(buff->empty()) {
+		return eRetOk;
+	}
+
+	// assert(_sendBuffer.empty());
+	//buf不为空, 表示之前的数据还没发送完, 直接返回失败, 等buffer可写了,epoll会通知写事件
+	if(!_sendBuffer.empty()) {
+		//不应该运行到这里
+		return eRetNotSend;
+	}
+
+	if(eConnected != _connStatus)
+	{
+		return eRetNotSend;
+	}
+
+	if(_proxyInfo && !_proxyInfo->isSuccess()) {
+		return eRetNotSend;
+	}
+
+    if (_ep.isTcp() && _ep.getAuthType() == TC_Endpoint::AUTH_TYPELOCAL && _authState != eAuthSucc)
+	{
+#if TAF_SSL
+		if (isSSL() && !_openssl)
+        {
+            return eRetNotSend;
+        }
+#endif
+		return eRetNotSend; // 需要鉴权但还没通过,不能发送非认证消息
+	}
+
+#if TAF_SSL
+	// 握手数据已加密,直接发送,会话数据需加密
+	if (isSSL())
+	{
+		if(!_openssl->isHandshaked()) {
+			return eRetNotSend;
+		}
+
+		int ret = _openssl->write(buff->buffer(), (uint32_t) buff->length(), _sendBuffer);
+		if(ret != 0)
+		{
+            close();
+			return eRetError;
+		}
+
+		buff->clear();
+	}
+    else
+    {
+        _sendBuffer.addBuffer(buff);
+    }
+#else
+	_sendBuffer.addBuffer(buff);
+#endif
+
+//	LOG_CONSOLE_DEBUG << _sendBuffer.getBufferLength() << endl;
+
+	_lastAddr = addr;
+	do
+	{
+		auto data = _sendBuffer.getBufferPointer();
+
+		int iRet = this->send(data.first, (uint32_t) data.second, 0);
+		if(iRet < 0)
+		{
+			if(!isValid())
+			{
+				_sendBuffer.clearBuffers();
+				return eRetError;
+			}
+			else
+			{
+                return eRetFull;
+            }
+		}
+
+		_sendBuffer.moveHeader(iRet);
+//		assert(iRet != 0);
+	}
+	while(!_sendBuffer.empty());
+
+	return eRetOk;
+}
+
+
+void TC_Transceiver::doAuthCheck(TC_NetWorkBuffer *buff)
+{
+    if (!buff->empty() && _ep.isTcp() && _ep.getAuthType() == TC_Endpoint::AUTH_TYPELOCAL && _authState != eAuthSucc)
+	{
+        TC_NetWorkBuffer::PACKET_TYPE type; 
+
+        if(_isServer)
+        {
+            //验证鉴权
+            auto ret = _onServerVerifyAuthCallback(*buff, this);
+
+            type = ret.first;
+
+            if(type == TC_NetWorkBuffer::PACKET_FULL)
+            {
+                _authState = eAuthSucc;
+                //服务器端, 鉴权通过, 可以响应包
+                sendRequest(ret.second, _serverAddr);
+            }
+        }
+        else
+        {
+            type = _onClientVerifyAuthCallback(*buff, this);
+
+            if(type == TC_NetWorkBuffer::PACKET_FULL)
+            {
+                _authState = eAuthSucc;
+                //客户端, 鉴权通过可以发送业务包了
+                _onRequestCallback(this);
+            }
+        }
+        
+		if(type == TC_NetWorkBuffer::PACKET_ERR)
+		{
+            THROW_ERROR(TC_Transceiver_Exception, CR_PROTOCOL, "[TC_Transceiver::doProtocolAnalysis, auth error]");
+		}
+	}
+}
+
+int TC_Transceiver::doProtocolAnalysis(TC_NetWorkBuffer *buff)
+{
+    doAuthCheck(buff);
+
+    TC_NetWorkBuffer::PACKET_TYPE ret;
+
+    int packetCount = 0;
+
+    try
+    {
+        do
+        {
+            ret = _onParserCallback(*buff, this);
+
+            if(ret == TC_NetWorkBuffer::PACKET_FULL || ret == TC_NetWorkBuffer::PACKET_FULL_CLOSE)
+            {
+                ++packetCount;
+            }
+
+            if(ret == TC_NetWorkBuffer::PACKET_FULL_CLOSE) {
+            	//full close模式下, 需要关闭连接
+	            tcpClose(false, CR_PROTOCOL, "protocol full close");
+            }
+
+            if(_onCompletePackageCallback) {
+            	//收到一个完整的包
+	            _onCompletePackageCallback(this);
+            }
+        }
+        while (ret == TC_NetWorkBuffer::PACKET_FULL);
+    }
+    catch (exception & ex) {
+        THROW_ERROR(TC_Transceiver_Exception, CR_PROTOCOL, "parser decode error:" + string(ex.what()) + "]");
+    }
+    catch (...) {
+        THROW_ERROR(TC_Transceiver_Exception, CR_PROTOCOL, "parser decode error");
+    }
+
+    if (ret == TC_NetWorkBuffer::PACKET_ERR)
+    {
+        string err = "parser decode error, " + _desc;
+        tcpClose(false, CR_PROTOCOL, err);
+        throw TC_Transceiver_Exception(err);
+    }
+
+    return packetCount;
+}
+
+//////////////////////////////////////////////////////////
+TC_TCPTransceiver::TC_TCPTransceiver(TC_Epoller* epoller,  const TC_Endpoint &ep)
+: TC_Transceiver(epoller, ep)
+{
+	assert(epoller);
+}
+
+//不同的内存分配机制
+#if 0
+void TC_TCPTransceiver::doResponse()
+{
+    checkConnect();
+
+	int iRet = 0;
+
+    int packetCount = 0;
+	do
+    {
+	    char buff[BUFFER_SIZE];
+
+        if ((iRet = this->recv((void*)buff, BUFFER_SIZE, 0)) > 0)
+	    {
+            int check = doCheckProxy(buff, iRet);
+            if(check != 0)
+		    {
+                _recvBuffer.clearBuffers();
+		    	return;
+		    }
+
+            _recvBuffer.addBuffer(buff, iRet);
+
+            //解析协议
+            packetCount += doProtocolAnalysis(&_recvBuffer);
+
+            //收包太多了, 中断一下, 释放线程给send等
+            if (packetCount >= 2000 && isValid())
+            {
+	            _epoller->mod(_epollInfo, EPOLLIN | EPOLLOUT);
+                break;
+            }
+
+            //接收的数据小于buffer大小, 内核会再次通知你
+            if(iRet < BUFFER_SIZE)
+            {
+                break;
+            }
+	    }
+    }
+    while (iRet>0);
+}
+
+#else
+void TC_TCPTransceiver::doResponse()
+{
+    checkConnect();
+
+	int iRet = 0;
+
+    int packetCount = 0;
+	do
+    {
+       	auto data = _recvBuffer.getOrCreateBuffer(BUFFER_SIZE/8, BUFFER_SIZE);
+
+       	uint32_t left = (uint32_t)data->left();
+
+	    if ((iRet = this->recv((void*)data->free(), left, 0)) > 0)
+	    {
+		    int check = doCheckProxy(data->free(), iRet);
+            if(check != 0)
+		    {
+                _recvBuffer.clearBuffers();
+		    	return;
+		    }
+
+            data->addWriteIdx(iRet);
+
+            _recvBuffer.addLength(iRet);
+
+            //解析协议
+            packetCount += doProtocolAnalysis(&_recvBuffer);
+
+            //收包太多了, 中断一下, 释放线程给send等
+            if (packetCount >= 2000 && isValid())
+            {
+				_epollInfo->mod(EPOLLIN | EPOLLOUT);
+//                _epoller->mod(_epollInfo, EPOLLIN | EPOLLOUT);
+                break;
+            }
+
+            //接收的数据小于buffer大小, 内核会再次通知你
+           if(iRet < (int)left)
+            {
+                break;
+            }
+	    }
+    }
+    while (iRet>0);
+}
+
+#endif
+
+int TC_TCPTransceiver::send(const void* buf, uint32_t len, uint32_t flag)
+{
+    //只有是连接状态才能收发数据
+    if(eConnected != _connStatus)
+    {
+        return -1;
+    }
+
+	int iRet = ::send(_fd, (const char*)buf, len, flag);
+//    LOG_CONSOLE_DEBUG << this << ", send, fd:" << _fd << ", " << _desc << ", iRet:" << iRet << ", len:" << len << endl;
+
+	if (iRet < 0 && !TC_Socket::isPending())
+    {
+        THROW_ERROR(TC_Transceiver_Exception, CR_SEND, "TC_TCPTransceiver::send, " + _desc + ", fd:" + TC_Common::tostr(_fd));
+    }
+
+#if TARGET_PLATFORM_WINDOWS
+    if(iRet < 0 && TC_Socket::isPending())
+    {
+        _epollInfo->mod(EPOLLIN | EPOLLOUT);
+    }
+#endif    
+
+    return iRet;
+}
+
+int TC_TCPTransceiver::recv(void* buf, uint32_t len, uint32_t flag)
+{
+    //只有是连接状态才能收发数据
+    if(eConnected != _connStatus)
+        return -1;
+
+    int iRet = ::recv(_fd, (char*)buf, len, flag);
+
+//	 LOG_CONSOLE_DEBUG << this << ", recv, fd:" << _fd << ", " << _desc << ", iRet:" << iRet << endl;
+
+	if (iRet == 0 || (iRet < 0 && !TC_Socket::isPending()))
+    {
+		int nerr = TC_Exception::getSystemCode();
+	    string err = "recv error, errno:" + TC_Common::tostr(nerr) + "," + TC_Exception::parseError(nerr);
+	    THROW_ERROR(TC_Transceiver_Exception, CR_RECV, err + ", " + _desc + ", fd:" + TC_Common::tostr(_fd));
+    }
+
+#if TARGET_PLATFORM_WINDOWS
+    if(iRet < 0 && TC_Socket::isPending())
+    {
+        _epollInfo->mod(EPOLLIN | EPOLLOUT);
+    }
+#endif   
+
+    return iRet;
+}
+/////////////////////////////////////////////////////////////////
+#if TAF_SSL
+
+TC_SSLTransceiver::TC_SSLTransceiver(TC_Epoller* epoller, const TC_Endpoint &ep)
+: TC_TCPTransceiver(epoller, ep)
+{
+}
+
+#if 0
+
+void TC_SSLTransceiver::doResponse()
+{
+	checkConnect();
+
+	int iRet = 0;
+
+	int packetCount = 0;
+	do
+	{
+	    char buff[BUFFER_SIZE] = {0x00};
+	    if ((iRet = this->recv(buff, BUFFER_SIZE, 0)) > 0)
+		{
+		    int check = doCheckProxy(buff, iRet);
+			if(check != 0)
+			{
+				return;
+			}
+
+			const bool preHandshake = _openssl->isHandshaked();
+
+            int ret = _openssl->read(buff, iRet, _sendBuffer);
+			if (ret != 0)
+			{
+//            	LOG_CONSOLE_DEBUG << "ret:" << ret << ", " << _openssl->getErrMsg() << endl;
+				THROW_ERROR(TC_Transceiver_Exception, CR_SSL, "[TC_SSLTransceiver::doResponse, SSL_read handshake failed: " + _desc + ", info: " + _openssl->getErrMsg() + "]");
+			}
+			else if(!_sendBuffer.empty())
+			{
+//				LOG_CONSOLE_DEBUG << "[Transceiver::doResponse SSL_read prehandshake:" << preHandshake << ", handshake:" << _openssl->isHandshaked() << ", send handshake len:" << _sendBuffer.getBufferLength() << endl;
+				int ret = doRequest();
+
+				if(ret < 0)
+				{
+					// doRequest失败 close fd
+					if (!isValid())
+					{
+						THROW_ERROR(TC_Transceiver_Exception, CR_SSL, "[TC_SSLTransceiver::doResponse, ssl doRequest failed: " + _desc + ", info: " + _openssl->getErrMsg() + "]");
+					}
+					else
+					{
+						return;
+					}
+				}
+			}
+
+//			LOG_CONSOLE_DEBUG << "recv length:" << iRet << ", preHandshake:" << preHandshake << endl;
+
+			if (!_openssl->isHandshaked())
+			{
+//				LOG_CONSOLE_DEBUG << "[Transceiver::doResponse not handshake, prehandshake:" << preHandshake << ", handshake:" << _openssl->isHandshaked() << endl;
+				return;
+			}
+
+			if (!preHandshake)
+			{
+				if(_isServer)
+				{
+					_onRequestCallback(this);
+				}
+				else
+				{
+					//握手完毕, 客户端直接发送鉴权请求
+					doAuthReq();
+					// doAuthReq失败,会close fd, 这里判断下是否还有效
+					if (!isValid())
+					{
+						THROW_ERROR(TC_Transceiver_Exception, CR_SSL,
+								"[TC_SSLTransceiver::doResponse, doAuthReq failed: " + _desc + ", info: " +
+								_openssl->getErrMsg() + "]");
+					}
+					else
+					{
+//						LOG_CONSOLE_DEBUG << "[Transceiver::doResponse prehandshake:" << preHandshake << ", handshake:" << _openssl->isHandshaked() << endl;
+					}
+				}
+			}
+
+			TC_NetWorkBuffer *rbuf = _openssl->recvBuffer();
+
+			//解析协议
+			packetCount += doProtocolAnalysis(rbuf);
+
+			//收包太多了, 中断一下, 释放线程给send等
+			if (packetCount >= 1000 && isValid())
+			{
+				_epoller->mod(_epollInfo, EPOLLIN | EPOLLOUT);
+				break;
+			}
+
+			//接收的数据小于buffer大小, 内核会再次通知你
+            if(iRet < BUFFER_SIZE)
+			{
+				break;
+			}
+		}
+	}
+	while (iRet>0);
+}
+
+#else
+
+void TC_SSLTransceiver::doResponse()
+{
+    checkConnect();
+
+	int iRet = 0;
+
+    int packetCount = 0;
+	do
+    {
+       	auto data = _recvBuffer.getOrCreateBuffer(BUFFER_SIZE/8, BUFFER_SIZE);
+
+       	uint32_t left = (uint32_t)data->left();
+
+	    if ((iRet = this->recv((void*)data->free(), left, 0)) > 0)
+	    {
+		    int check = doCheckProxy(data->free(), iRet);
+
+            if(check != 0)
+		    {
+		    	return;
+		    }
+
+            const bool preHandshake = _openssl->isHandshaked();
+
+			int ret = _openssl->read(data->free(), iRet, _sendBuffer);
+
+			if (ret != 0)
+            {
+//            	LOG_CONSOLE_DEBUG << "ret:" << ret << ", " << _openssl->getErrMsg() << endl;
+                THROW_ERROR(TC_Transceiver_Exception, CR_SSL, "[TC_SSLTransceiver::doResponse, SSL_read handshake failed: " + _desc + ", info: " + _openssl->getErrMsg() + "]");
+            }
+            else if(!_sendBuffer.empty())
+            {
+				doRequest();
+            }
+
+            if (!_openssl->isHandshaked())
+			{
+//				LOG_CONSOLE_DEBUG << "[Transceiver::doResponse not handshake, prehandshake:" << preHandshake << ", handshake:" << _openssl->isHandshaked() << endl;
+				return;
+			}
+
+            if (!preHandshake)
+            {
+                if(_isServer)
+                {
+                    _onRequestCallback(this);
+                }
+                else
+				{
+					//握手完毕, 客户端直接发送鉴权请求
+					doAuthReq();
+					// doAuthReq失败,会close fd, 这里判断下是否还有效
+					if (!isValid())
+					{
+						THROW_ERROR(TC_Transceiver_Exception, CR_SSL,
+								"[TC_SSLTransceiver::doResponse, doAuthReq failed: " + _desc + ", info: " +
+								_openssl->getErrMsg() + "]");
+					}
+					else
+					{
+//						LOG_CONSOLE_DEBUG << "[Transceiver::doResponse prehandshake:" << preHandshake << ", handshake:" << _openssl->isHandshaked() << endl;
+					}
+				}
+            }
+
+            TC_NetWorkBuffer *rbuf = _openssl->recvBuffer();
+
+			//解析协议
+            packetCount += doProtocolAnalysis(rbuf);
+
+            //收包太多了, 中断一下, 释放线程给send等
+            if (packetCount >= 1000 && isValid())
+            {
+				_epollInfo->mod(EPOLLIN | EPOLLOUT);
+//                _epoller->mod(_epollInfo, EPOLLIN | EPOLLOUT);
+                break;
+            }
+
+            //接收的数据小于buffer大小, 内核会再次通知你
+			if(iRet < left)
+			{
+                break;
+            }
+	    }
+    }
+    while (iRet>0);
+}
+
+#endif
+#endif
+
+/////////////////////////////////////////////////////////////////
+TC_UDPTransceiver::TC_UDPTransceiver(TC_Epoller* epoller, const TC_Endpoint &ep)
+: TC_Transceiver(epoller, ep)
+{
+}
+
+TC_UDPTransceiver::~TC_UDPTransceiver()
+{
+}
+
+
+void TC_UDPTransceiver::doResponse()
+{
+    checkConnect();
+
+    int iRet = 0;
+    int packetCount = 0;
+    do
+    {
+        _recvBuffer.clearBuffers();
+
+		auto data = _recvBuffer.getOrCreateBuffer(_nRecvBufferSize, _nRecvBufferSize);
+
+		uint32_t left = (uint32_t)data->left();
+
+		if ((iRet = this->recv((void *)data->free(), left, 0)) > 0)
+		{
+			data->addWriteIdx(iRet);
+			_recvBuffer.addLength(iRet);
+
+            //解析协议
+            packetCount += doProtocolAnalysis(&_recvBuffer);
+
+//            LOG_CONSOLE_DEBUG << iRet << ", " << packetCount << endl;
+            //收包太多了, 中断一下, 释放线程给send等
+            if (packetCount >= 1000 && isValid())
+            {
+				_epollInfo->mod(EPOLLIN | EPOLLOUT);
+//                _epoller->mod(_epollInfo, EPOLLIN | EPOLLOUT);
+                break;
+            }
+        }
+    }
+    while (iRet > 0);
+}
+
+int TC_UDPTransceiver::send(const void* buf, uint32_t len, uint32_t flag)
+{
+    if(!isValid()) return -1;
+
+    int iRet = 0;
+    if(_isServer)
+    {
+        iRet=::sendto(_fd, (const char*)buf, len, flag, _lastAddr.first.get(), _lastAddr.second);
+    }
+    else
+    {
+        iRet=::sendto(_fd, (const char*)buf, len, flag, _serverAddr.first.get(), _serverAddr.second);
+    }
+
+    if(iRet > 0)
+	{
+		//udp只发一次 发送一半也算全部发送成功
+		return len;
+	}
+
+	if (iRet < 0 && TC_Socket::isPending())
+	{
+		//EAGAIN, 认为没有发送
+		return 0;
+	}
+
+    return iRet;
+}
+
+int TC_UDPTransceiver::recv(void* buf, uint32_t len, uint32_t flag)
+{
+    if(!isValid()) return -1;
+
+    _clientAddr = TC_Socket::createSockAddr(_ep.getHost().c_str());
+
+    int iRet = ::recvfrom(_fd, (char*)buf, len, flag, _clientAddr.first.get(), &_clientAddr.second); //need check from_ip & port
+//    cout << "recv :" << iRet << endl;
+
+//	if(iRet < 0)
+//	{
+//		LOG_CONSOLE_DEBUG << this << ", " << TC_Socket::isPending() << ", " << _isServer << ", recv, fd:" << _fd << ", " << _desc << ", iRet:" << iRet << ", len:" << len << endl;
+//	}
+
+	if(!_isServer)
+	{
+		//客户端才会关闭连接, 会重建socket, 服务端不会
+		if (iRet < 0 && !TC_Socket::isPending())
+		{
+			THROW_ERROR(TC_Transceiver_Exception, CR_RECV, "TC_UDPTransceiver::udp recv, " + _desc + ", fd:" + TC_Common::tostr(_fd));
+			return 0;
+		}
+	}
+
+    return iRet;
+}
+
+/////////////////////////////////////////////////////////////////
+}

Algunos archivos no se mostraron porque demasiados archivos cambiaron en este cambio