浏览代码

Automated Testing Cases. (#73)

* tests: insert.

* tests: print assert value.

* tests: cache with datasource.

* tests: dbsource.

* core: fix conf table name bug.

* sql: remove primary key.

* daemons: fix connector path bug.

* daemons: proc absolut path

* docker: udpate base image from centos to ubuntu

* docker: alter listener port.

* Update ubuntu-latest&gcc-4.9.yml

* docker: layered dev.

* dtc: rename dtc to core, rename agent-watchdog to dtc.
Yang Shuang 1 年之前
父节点
当前提交
2c501e54a0
共有 51 个文件被更改,包括 1478 次插入235 次删除
  1. 433 0
      .github/workflows/build-and-test.yml
  2. 2 7
      .github/workflows/ubuntu-18.04&gcc-4.9.yml
  3. 2 7
      .github/workflows/ubuntu-20.04&gcc-4.9.yml
  4. 3 94
      .github/workflows/ubuntu-latest&gcc-4.9.yml
  5. 1 1
      .travis.yml
  6. 4 4
      conf/dtc.yaml
  7. 19 0
      dockerfiles/devel/agent.layered.xml
  8. 16 0
      dockerfiles/devel/agent.single.dtcd.xml
  9. 26 5
      dockerfiles/devel/dockerfile
  10. 29 0
      dockerfiles/devel/dtc.dbaddition.s1.yaml
  11. 34 0
      dockerfiles/devel/dtc.dbaddition.s2.yaml
  12. 32 0
      dockerfiles/devel/dtc.dbaddition.s3.yaml
  13. 0 18
      dockerfiles/devel/dtc.dbaddition.yaml
  14. 34 18
      dockerfiles/devel/dtc.layered.yaml
  15. 19 0
      dockerfiles/devel/run.sh
  16. 2 2
      dockerfiles/server/dockerfile
  17. 1 1
      script/dtcd.sh
  18. 3 3
      src/agent-watchdog/CMakeLists.txt
  19. 39 0
      src/agent-watchdog/core_entry.cc
  20. 31 0
      src/agent-watchdog/core_entry.h
  21. 2 2
      src/agent-watchdog/daemons.cc
  22. 33 7
      src/agent-watchdog/main.cc
  23. 2 1
      src/agent/CMakeLists.txt
  24. 1 1
      src/complex/CMakeLists.txt
  25. 0 3
      src/complex/cm_load.cc
  26. 2 2
      src/connector/CMakeLists.txt
  27. 4 4
      src/core/CMakeLists.txt
  28. 2 2
      src/core/data/container_dtcd.cc
  29. 5 5
      src/core/lib/CMakeLists.txt
  30. 1 1
      src/core/main.cc
  31. 16 1
      src/daemons/helper.cc
  32. 3 3
      src/data_lifecycle/CMakeLists.txt
  33. 2 2
      src/hwcserver/CMakeLists.txt
  34. 18 8
      src/libs/common/config/dbconfig.cc
  35. 7 7
      src/libs/common/connector/connector_client.cc
  36. 2 2
      src/libs/common/packet/packet_server.cc
  37. 3 3
      src/libs/stat/stat_dtc.cc
  38. 1 1
      src/libs/stat/stat_manager.cc
  39. 2 2
      src/libs/stat/stat_tool.cc
  40. 2 1
      src/rule/CMakeLists.txt
  41. 1 1
      src/utils/CMakeLists.txt
  42. 7 0
      tests/init.s1.sql
  43. 79 0
      tests/init.s2.sql
  44. 7 0
      tests/init.s3.sql
  45. 133 0
      tests/test_agent_cache_only.py
  46. 49 0
      tests/test_agent_datasource_s1.py
  47. 133 0
      tests/test_dtcd_cache_only.py
  48. 0 16
      tests/test_dtcd_cache_only_.py
  49. 49 0
      tests/test_dtcd_datasource_s1.py
  50. 49 0
      tests/test_dtcd_datasource_s2.py
  51. 133 0
      tests/test_dtcd_datasource_s3.py

+ 433 - 0
.github/workflows/build-and-test.yml

@@ -0,0 +1,433 @@
+name: build-and-test
+
+on: push
+    
+env:
+  CC: gcc-4.9
+  CXX: g++-4.9
+  secret_account: ${{ secrets.DOCKER_USERNAME }}
+
+jobs:        
+  building:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v3
+      
+      - name: Install gcc env
+        run: |
+          echo "deb http://dk.archive.ubuntu.com/ubuntu/ xenial main" | sudo tee -a /etc/apt/sources.list
+          echo "deb http://dk.archive.ubuntu.com/ubuntu/ xenial universe" | sudo tee -a /etc/apt/sources.list
+          sudo apt update
+          sudo apt install gcc-4.9 g++-4.9
+        shell: bash
+        
+      - name: Install dependency
+        run: |
+          sudo apt-get install -y make zlib1g zlib1g-dev bzip2 liblz4-dev libasan0 openssl libmxml-dev
+    
+      - name: cmake project
+        run: |
+          cd ${{github.workspace}}
+          mkdir build
+          cd build
+          cmake ../
+          make
+
+      - name: Login docker hub
+        uses: docker/login-action@v2
+        with:
+          username: ${{ secrets.DOCKER_USERNAME }}
+          password: ${{ secrets.DOCKER_PASSWORD }}
+      
+      - name: Publish devel docker
+        run: |
+          cd ${{github.workspace}}
+          cp build/src/core/core dockerfiles/devel/
+          cp build/src/agent/dtcagent dockerfiles/devel/
+          cp build/src/agent-watchdog/dtc dockerfiles/devel/
+          cp build/src/complex/async-connector dockerfiles/devel/
+          cp build/src/connector/connector dockerfiles/devel/
+          cp build/src/data_lifecycle/data-lifecycle-manager dockerfiles/devel/
+          cp build/src/hwcserver/hwcserver dockerfiles/devel/
+          cp build/src/rule/librule.so dockerfiles/devel/
+          cp src/libs/hsql/libs/libsqlparser.so dockerfiles/devel/
+
+          cd dockerfiles/devel/
+
+          docker build -t ${{ secrets.DOCKER_USERNAME }}/devel:all .
+          docker push ${{ secrets.DOCKER_USERNAME }}/devel:all
+
+          docker build -t ${{ secrets.DOCKER_USERNAME }}/devel:latest .
+          docker push ${{ secrets.DOCKER_USERNAME }}/devel:latest
+
+# Core Junction
+  CORE:
+    needs: building
+    runs-on: ubuntu-latest
+    steps:
+      - run: echo "Core Junction."  
+
+# Agent Junction
+  AGENT:
+    needs: building
+    runs-on: ubuntu-latest
+    steps:
+      - run: echo "Agent Junction."        
+
+# Layered Junction
+  LAYERED:
+    needs: building
+    runs-on: ubuntu-latest
+    steps:
+      - run: echo "Layered Junction."          
+
+# Testing Cache Only
+  test-dtc-cache-only:
+    needs: CORE
+    runs-on: ubuntu-latest
+    services:
+      dtc:
+        image: docker.io/kfysck/devel:all
+        ports:
+          - 20015:20015
+        volumes:
+          - /usr/local/etc:/etc/dtc/
+        options: --name dtc
+        env:
+          DTC_BIN: dtc
+          DTC_ARGV: -c
+    steps:
+      - uses: actions/checkout@v3
+      
+      - name: Install python dependency
+        run: |
+          python -m pip install --upgrade pip
+          pip install pytest
+          pip install pymysql
+
+      - name: Set up Python 3.7
+        uses: actions/setup-python@v2
+        with:
+          python-version: "3.7"
+
+      - name: Copy conf files.
+        run: |
+          cd ${{github.workspace}}   
+          sudo cp -f conf/log4cplus.conf /usr/local/etc/ 
+          sudo cp -f dockerfiles/devel/dtc.cacheonly.yaml /usr/local/etc/dtc.yaml
+
+      - name: Run Testing Cases
+        run: |
+          sleep 5s
+          docker ps -a
+          docker logs dtc
+          cd ${{github.workspace}}/tests
+          pytest test_dtcd_cache_only.py     
+
+# Testing Cache Only via Agent.
+  test-agent-cache-only:
+    needs: AGENT
+    if: false
+    runs-on: ubuntu-latest
+    services:
+      dtc:
+        image: docker.io/kfysck/devel:all
+        ports:
+          - 20015:20015
+        volumes:
+          - /usr/local/etc:/etc/dtc/
+        options: --name dtc
+        env:
+          DTC_BIN: dtc
+          DTC_ARGV: -c        
+      agent:
+        image: docker.io/kfysck/devel:all
+        ports:
+          - 12001:12001
+        volumes:
+          - /usr/local/etc:/etc/dtc/
+        options: --name agent
+        env:
+          DTC_BIN: dtcagent
+    steps:
+      - uses: actions/checkout@v3
+      
+      - name: Install python dependency
+        run: |
+          python -m pip install --upgrade pip
+          pip install pytest
+          pip install pymysql
+          sudo apt update
+          sudo apt install -y net-tools
+
+      - name: Set up Python 3.7
+        uses: actions/setup-python@v2
+        with:
+          python-version: "3.7"
+
+      - name: Copy conf files.
+        run: |
+          cd ${{github.workspace}}   
+          sudo cp -f conf/log4cplus.conf /usr/local/etc/ 
+          sudo cp -f dockerfiles/devel/dtc.cacheonly.yaml /usr/local/etc/dtc.yaml
+          sudo cp -f dockerfiles/devel/agent.single.dtcd.xml /usr/local/etc/agent.xml
+
+      - name: Run Testing Cases
+        run: |
+          sleep 5s
+          docker ps -a
+          netstat -ntpl
+          docker logs agent
+          docker logs dtc
+          cd ${{github.workspace}}/tests
+          pytest test_agent_cache_only.py               
+
+# Testing Layered Storage.
+  test-agent-layered:
+    needs: LAYERED
+    runs-on: ubuntu-latest
+    services:
+      mysql:
+        image: mysql:5.7
+        ports:
+          - 3306:3306
+        env: 
+          MYSQL_ROOT_PASSWORD: 123456
+      agent:
+        image: docker.io/kfysck/devel:all
+        ports:
+          - 12001:12001
+        volumes:
+          - /usr/local/etc:/etc/dtc/
+        options: --name agent
+        env:
+          DTC_BIN: dtc
+          DTC_ARGV: -ayc
+    steps:
+      - uses: actions/checkout@v3
+      
+      - name: Install python dependency
+        run: |
+          python -m pip install --upgrade pip
+          pip install pytest
+          pip install pymysql
+
+      - name: Set up Python 3.7
+        uses: actions/setup-python@v2
+        with:
+          python-version: "3.7"
+
+      - name: Init db table
+        run: |
+          sleep 5s
+          mysql -h127.0.0.1 -uroot -p123456 -e "create database layer2;"
+          mysql -h127.0.0.1 -uroot -p123456 -Dlayer2 -e "source tests/init.s1.sql;"
+          mysql -h127.0.0.1 -uroot -p123456 -Dlayer2 -e "show tables;"    
+          mysql -h127.0.0.1 -uroot -p123456 -Dlayer2 -e "desc opensource;"  
+          mysql -h127.0.0.1 -uroot -p123456 -e "create database layer3;"
+          mysql -h127.0.0.1 -uroot -p123456 -Dlayer3 -e "source tests/init.s1.sql;"
+          mysql -h127.0.0.1 -uroot -p123456 -Dlayer3 -e "show tables;"    
+          mysql -h127.0.0.1 -uroot -p123456 -Dlayer3 -e "desc opensource;"            
+
+      - name: Copy conf files.
+        run: |
+          cd ${{github.workspace}}   
+          sudo cp -f conf/log4cplus.conf /usr/local/etc/ 
+          sudo cp -f dockerfiles/devel/dtc.layered.yaml /usr/local/etc/dtc.yaml
+          sudo cp -f dockerfiles/devel/agent.layered.xml /usr/local/etc/agent.xml
+
+      - name: Run Testing Cases
+        run: |
+          sleep 5s
+          docker ps -a
+          netstat -ntpl
+          docker logs agent
+          cd ${{github.workspace}}/tests
+          echo "show databases;"
+          mysql -h127.0.0.1 -P12001 -uroot -p123456 -e "show databases;"
+          echo "use layer2;"
+          mysql -h127.0.0.1 -P12001 -uroot -p123456 -Dlayer2 -e "use layer2;"
+          echo "show tables;"
+          mysql -h127.0.0.1 -P12001 -uroot -p123456 -Dlayer2 -e "show tables;"
+#          pytest test_agent_cache_only.py           
+
+# Testing Cache with Datasource Scene 1 (Single DB and Single Table)
+  test-dtc-ds-S-db-S-table:
+    needs: CORE
+    runs-on: ubuntu-latest
+    services:
+      mysql:
+        image: mysql:5.7
+        ports:
+          - 3306:3306
+        env: 
+          MYSQL_ROOT_PASSWORD: 123456
+      dtc:
+        image: docker.io/kfysck/devel:all
+        ports:
+          - 20015:20015
+        volumes:
+          - /usr/local/etc:/etc/dtc/
+        options: --name dtc
+        env:
+          DTC_BIN: dtc
+          DTC_ARGV: -c
+    steps:
+      - uses: actions/checkout@v3
+      
+      - name: Install python dependency
+        run: |
+          python -m pip install --upgrade pip
+          pip install pytest
+          pip install pymysql
+
+      - name: Set up Python 3.7
+        uses: actions/setup-python@v2
+        with:
+          python-version: "3.7"
+
+      - name: Init db table
+        run: |
+          sleep 5s
+          mysql -h127.0.0.1 -uroot -p123456 -e "create database layer2;"
+          mysql -h127.0.0.1 -uroot -p123456 -Dlayer2 -e "source tests/init.s1.sql;"
+          mysql -h127.0.0.1 -uroot -p123456 -Dlayer2 -e "show tables;"    
+          mysql -h127.0.0.1 -uroot -p123456 -Dlayer2 -e "desc opensource;"  
+
+      - name: Copy conf files.
+        run: |
+          cd ${{github.workspace}}   
+          sudo cp -f conf/log4cplus.conf /usr/local/etc/ 
+          sudo cp -f dockerfiles/devel/dtc.dbaddition.s1.yaml /usr/local/etc/dtc.yaml
+
+      - name: Run Testing Cases
+        run: |
+          sleep 5s
+          docker ps -a
+          docker logs dtc
+          cd ${{github.workspace}}/tests
+          pytest test_dtcd_datasource_s1.py               
+
+# Testing Cache with Datasource Scene 1 (Single DB and Single Table) via Agent
+  test-agent-ds-S-db-S-table:
+    needs: AGENT
+    if: false
+    runs-on: ubuntu-latest
+    services:
+      mysql:
+        image: mysql:5.7
+        ports:
+          - 3306:3306
+        env: 
+          MYSQL_ROOT_PASSWORD: 123456
+      agent:
+        image: docker.io/kfysck/devel:all
+        ports:
+          - 12001:12001
+        volumes:
+          - /usr/local/etc:/etc/dtc/
+        options: --name agent
+        env:
+          DTC_BIN: dtcagent       
+      dtc:
+        image: docker.io/kfysck/devel:all
+        ports:
+          - 20015:20015
+        volumes:
+          - /usr/local/etc:/etc/dtc/
+        options: --name dtc
+        env:
+          DTC_BIN: dtc
+          DTC_ARGV: -c
+    steps:
+      - uses: actions/checkout@v3
+      
+      - name: Install python dependency
+        run: |
+          python -m pip install --upgrade pip
+          pip install pytest
+          pip install pymysql
+
+      - name: Set up Python 3.7
+        uses: actions/setup-python@v2
+        with:
+          python-version: "3.7"
+
+      - name: Init db table
+        run: |
+          sleep 5s
+          mysql -h127.0.0.1 -uroot -p123456 -e "create database layer2;"
+          mysql -h127.0.0.1 -uroot -p123456 -Dlayer2 -e "source tests/init.s1.sql;"
+          mysql -h127.0.0.1 -uroot -p123456 -Dlayer2 -e "show tables;"    
+          mysql -h127.0.0.1 -uroot -p123456 -Dlayer2 -e "desc opensource;"  
+
+      - name: Copy conf files.
+        run: |
+          cd ${{github.workspace}}   
+          sudo cp -f conf/log4cplus.conf /usr/local/etc/ 
+          sudo cp -f dockerfiles/devel/dtc.dbaddition.s1.yaml /usr/local/etc/dtc.yaml
+          sudo cp -f dockerfiles/devel/agent.single.dtcd.xml /usr/local/etc/agent.xml          
+
+      - name: Run Testing Cases
+        run: |
+          sleep 5s
+          docker ps -a
+          docker logs agent
+          docker logs dtc
+          cd ${{github.workspace}}/tests
+          pytest test_agent_datasource_s1.py                         
+
+# Testing Cache with Datasource Scene 2 (Single DB and Multi Table)
+  test-dtc-ds-S-db-M-table:
+    if: false
+    needs: CORE
+    runs-on: ubuntu-latest
+    services:
+      mysql:
+        image: mysql:5.7
+        ports:
+          - 3306:3306
+        env: 
+          MYSQL_ROOT_PASSWORD: 123456
+      dtc:
+        image: docker.io/kfysck/devel:all
+        ports:
+          - 20015:20015
+        volumes:
+          - /usr/local/etc:/etc/dtc/
+        options: --name dtc
+    steps:
+      - uses: actions/checkout@v3
+      
+      - name: Install python dependency
+        run: |
+          python -m pip install --upgrade pip
+          pip install pytest
+          pip install pymysql
+
+      - name: Set up Python 3.7
+        uses: actions/setup-python@v2
+        with:
+          python-version: "3.7"
+
+      - name: Init db table
+        run: |
+          sleep 5s
+          mysql -h127.0.0.1 -uroot -p123456 -e "create database layer2;"
+          mysql -h127.0.0.1 -uroot -p123456 -Dlayer2 -e "source tests/init.s2.sql;"
+          mysql -h127.0.0.1 -uroot -p123456 -Dlayer2 -e "show tables;"    
+
+      - name: Copy conf files.
+        run: |
+          cd ${{github.workspace}}   
+          sudo cp -f conf/log4cplus.conf /usr/local/etc/ 
+          sudo cp -f dockerfiles/devel/dtc.dbaddition.s2.yaml /usr/local/etc/dtc.yaml
+
+      - name: Run Testing Cases
+        run: |
+          sleep 5s
+          docker ps -a
+          docker logs dtc
+          cd ${{github.workspace}}/tests
+          pytest test_dtcd_datasource_s2.py                     
+

+ 2 - 7
.github/workflows/ubuntu-18.04&gcc-4.9.yml

@@ -1,18 +1,13 @@
 name: ubuntu-18.04&gcc-4.9
 
-on:
-  push:
-  pull_request:
+on: pull_request
     
 env:
   CC: gcc-4.9
   CXX: g++-4.9
 
 jobs:
-  build:
-    # The CMake configure and build commands are platform agnostic and should work equally well on Windows or Mac.
-    # You can convert this to a matrix build if you need cross-platform coverage.
-    # See: https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix
+  build-and-test:
     runs-on: ubuntu-18.04
     
     steps:

+ 2 - 7
.github/workflows/ubuntu-20.04&gcc-4.9.yml

@@ -1,18 +1,13 @@
 name: ubuntu-20.04&gcc-4.9
 
-on:
-  push:
-  pull_request:
+on: pull_request
     
 env:
   CC: gcc-4.9
   CXX: g++-4.9
 
 jobs:
-  build:
-    # The CMake configure and build commands are platform agnostic and should work equally well on Windows or Mac.
-    # You can convert this to a matrix build if you need cross-platform coverage.
-    # See: https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix
+  build-and-test:
     runs-on: ubuntu-20.04
     
     steps:

+ 3 - 94
.github/workflows/ubuntu-latest&gcc-4.9.yml

@@ -1,30 +1,15 @@
 name: ubuntu-latest&gcc-4.9
 
-on:
-  push:
-  pull_request:
+on: pull_request
     
 env:
   CC: gcc-4.9
   CXX: g++-4.9
-  secret_account: ${{ secrets.DOCKER_USERNAME }}
 
-jobs:        
-  build:
-    # The CMake configure and build commands are platform agnostic and should work equally well on Windows or Mac.
-    # You can convert this to a matrix build if you need cross-platform coverage.
-    # See: https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix
+jobs:
+  build-and-test:
     runs-on: ubuntu-latest
     
-    services:
-      mysql:
-        image: mysql
-        ports:
-          - 3306:3306
-        env: 
-          MYSQL_ROOT_PASSWORD: 123456
-        options: --name "mysql"
-    
     steps:
     - uses: actions/checkout@v3
     
@@ -35,18 +20,10 @@ jobs:
         sudo apt update
         sudo apt install gcc-4.9 g++-4.9
       shell: bash
-
-    - name: Set up Python 3.7
-      uses: actions/setup-python@v2
-      with:
-          python-version: "3.7"
       
     - name: install dependency
       run: |
         sudo apt-get install -y make zlib1g zlib1g-dev bzip2 liblz4-dev libasan0 openssl libmxml-dev
-        python -m pip install --upgrade pip
-        pip install pytest
-        pip install pymysql
       
     - name: cmake project
       run: |
@@ -55,71 +32,3 @@ jobs:
         cd build
         cmake ../
         make
-
-    - name: login docker hub
-      if: ${{ env.secret_account != '' }}
-      uses: docker/login-action@v2
-      with:
-        username: ${{ secrets.DOCKER_USERNAME }}
-        password: ${{ secrets.DOCKER_PASSWORD }}
-    
-    - name: publish devel CacheOnly docker
-      if: ${{ env.secret_account != '' }}
-      run: |
-        cd ${{github.workspace}}
-        cp dockerfiles/devel/dtc.cacheonly.yaml dockerfiles/devel/dtc.yaml
-        cp conf/log4cplus.conf dockerfiles/devel/
-        cp build/src/core/dtcd dockerfiles/devel/
-        cp build/src/agent/dtcagent dockerfiles/devel/
-        cp build/src/agent-watchdog/agent-watchdog dockerfiles/devel/
-        cp build/src/complex/async-connector dockerfiles/devel/
-        cp build/src/connector/connector dockerfiles/devel/
-        cp build/src/data_lifecycle/data-lifecycle-manager dockerfiles/devel/
-        cp build/src/hwcserver/hwcserver dockerfiles/devel/
-
-        cd dockerfiles/devel/
-
-        docker build -t ${{ secrets.DOCKER_USERNAME }}/devel:CacheOnly .
-        docker push ${{ secrets.DOCKER_USERNAME }}/devel:CacheOnly
-
-    - name: Run devel CacheOnly docker
-      if: ${{ env.secret_account != '' }}
-      run: |
-        docker run --name CacheOnly -p 20015:20015 -d ${{ secrets.DOCKER_USERNAME }}/devel:CacheOnly /usr/local/dtc/dtcd -d
-
-    - name: publish devel Cache with Database docker
-      if: ${{ env.secret_account != '' }}
-      run: |
-        cd ${{github.workspace}}
-        cp -f dockerfiles/devel/dtc.dbaddition.yaml dockerfiles/devel/dtc.yaml
-        cd dockerfiles/devel/
-
-        docker build -t ${{ secrets.DOCKER_USERNAME }}/devel:CacheWithDB .
-        docker push ${{ secrets.DOCKER_USERNAME }}/devel:CacheWithDB
-
-    - name: publish devel Layered docker
-      if: ${{ env.secret_account != '' }}
-      run: |
-        cd ${{github.workspace}}
-        cp -f dockerfiles/devel/dtc.layered.yaml dockerfiles/devel/dtc.yaml
-        cd dockerfiles/devel/
-
-        docker build -t ${{ secrets.DOCKER_USERNAME }}/devel:Layered .
-        docker push ${{ secrets.DOCKER_USERNAME }}/devel:Layered
-
-    - name: init db table
-      if: ${{ env.secret_account != '' }}
-      run: |
-        mysql -h127.0.0.1 -uroot -p123456 -e "create database layer2;"
-        mysql -h127.0.0.1 -uroot -p123456 -e "show databases;"
-        mysql -h127.0.0.1 -uroot -p123456 -Dlayer2 -e "source ${{github.workspace}}/script/init_table.sql;"
-        mysql -h127.0.0.1 -uroot -p123456 -Dlayer2 -e "show tables;"
-
-    - name: Testing CacheOnly
-      if: ${{ env.secret_account != '' }}
-      run: |
-        cd ${{github.workspace}}/tests
-        docker ps -a
-        mysql -h127.0.0.1 -P20015 -uroot -proot -e "insert into opensource(uid, name) values(1, 'hello') where uid = 1;"
-        mysql -h127.0.0.1 -P20015 -uroot -proot -e "select uid, name from opensouce where uid = 1;"
-        pytest

+ 1 - 1
.travis.yml

@@ -35,7 +35,7 @@ script:
 after_success:
 - cd ../
 - cp conf/* dockerfiles/server/
-- cp build/src/core/dtcd dockerfiles/server/
+- cp build/src/core/core dockerfiles/server/
 - cp build/src/agent/agent-main dockerfiles/agent/
 - cp conf/agent.xml dockerfiles/agent/
 - cp build/src/connector/connector dockerfiles/server/

+ 4 - 4
conf/dtc.yaml

@@ -18,7 +18,7 @@ data_lifecycle:
   rule.cron: '00 01 * * * ?'
 
 connection: &connection
-  addr: 127.0.0.1:3306
+  addr: 127.0.0.1:3307
   user: username
   pwd: password 
 
@@ -34,7 +34,7 @@ primary:
       - {name: age, type: signed, size: 4}
   hot:
     logic:
-      {db: &db L2, table: *table, connection: *connection}
+      {db: &db layer2, table: *table, connection: *connection}
     real:
       - {addr: mysql-01.local:3306, user: username, pwd: password, db: {prefix: &prefix [*db, _], start: 0, last: 10}}
       - {addr: mysql-02.local:3306, user: username, pwd: password, db: {prefix: *prefix, start: 11, last: 25}}
@@ -44,9 +44,9 @@ primary:
       table: {prefix: [*table, _], start: 0, last: 9}
   full:
     logic:
-      {db: L3, table: *table, connection: *connection}
+      {db: layer3, table: *table, connection: *connection}
     real:
-      - {addr: 127.0.0.1:3306, user: username, pwd: password, db: L3}
+      - {addr: 127.0.0.1:3306, user: username, pwd: password, db: layer3}
 
 extension:
   - logic:

+ 19 - 0
dockerfiles/devel/agent.layered.xml

@@ -0,0 +1,19 @@
+<? xml version="1.0" encoding="utf-8" ?>
+<ALL>
+  <VERSION value="2"/>
+  <AGENT_CONFIG AgentId="1"/>
+  <BUSINESS_MODULE>
+    <MODULE Mid="1319" Name="test1" AccessToken="000013192869b7fcc3f362a97f72c0908a92cb6d" ListenOn="0.0.0.0:12001" Backlog="500" Client_Connections="900"
+        Preconnect="true" Server_Connections="1" Hash="chash" Timeout="3000" ReplicaEnable="true" ModuleIDC="LF" MainReport="false" InstanceReport="false" AutoRemoveReplica="true" TopPercentileEnable="false" TopPercentileDomain="127.0.0.1" TopPercentilePort="20020">
+      <CACHESHARDING  Sid="293" ShardingReplicaEnable="true" ShardingName="test">
+        <INSTANCE idc="LF" Role="replica" Enable="false" Addr="127.0.0.1:20000:1"/>
+        <INSTANCE idc="LF" Role="master" Enable="true" Addr="127.0.01:20015:1"/>
+      </CACHESHARDING>
+      <CACHESHARDING  Sid="999" ShardingReplicaEnable="false" ShardingName="complex">
+        <INSTANCE idc="LF" Role="master" Enable="true" Addr="127.0.0.1:2002:1"/>
+      </CACHESHARDING>
+    </MODULE>
+  </BUSINESS_MODULE>
+<VERSION value="2" />
+    <LOG_MODULE LogSwitch="0" RemoteLogSwitch="1" RemoteLogIP="127.0.0.1" RemoteLogPort="9997" />
+</ALL>

+ 16 - 0
dockerfiles/devel/agent.single.dtcd.xml

@@ -0,0 +1,16 @@
+<? xml version="1.0" encoding="utf-8" ?>
+<ALL>
+  <VERSION value="2"/>
+  <AGENT_CONFIG AgentId="1"/>
+  <BUSINESS_MODULE>
+    <MODULE Mid="1319" Name="test1" AccessToken="000013192869b7fcc3f362a97f72c0908a92cb6d" ListenOn="0.0.0.0:12001" Backlog="500" Client_Connections="900"
+        Preconnect="true" Server_Connections="1" Hash="chash" Timeout="3000" ReplicaEnable="true" ModuleIDC="LF" MainReport="false" InstanceReport="false" AutoRemoveReplica="true" TopPercentileEnable="false" TopPercentileDomain="127.0.0.1" TopPercentilePort="20020">
+      <CACHESHARDING  Sid="293" ShardingReplicaEnable="true" ShardingName="test">
+        <INSTANCE idc="LF" Role="replica" Enable="false" Addr="127.0.0.1:20000:1"/>
+        <INSTANCE idc="LF" Role="master" Enable="true" Addr="dtc:20015:1"/>
+      </CACHESHARDING>
+    </MODULE>
+  </BUSINESS_MODULE>
+<VERSION value="2" />
+    <LOG_MODULE LogSwitch="0" RemoteLogSwitch="1" RemoteLogIP="127.0.0.1" RemoteLogPort="9997" />
+</ALL>

+ 26 - 5
dockerfiles/devel/dockerfile

@@ -1,4 +1,4 @@
-FROM centos:centos8
+FROM ubuntu
 
 ARG basepath=/usr/local/dtc
 ARG confpath=/etc/dtc
@@ -11,13 +11,34 @@ RUN mkdir -p $basepath/log
 RUN mkdir -p $confpath
 RUN mkdir -p $logpath
 
-COPY dtcd $basepath/dtcd
+COPY core $basepath/core
 COPY dtcagent $basepath/dtcagent
-COPY agent-watchdog $basepath/agent-watchdog
+COPY dtc $basepath/dtc
 COPY async-connector $basepath/async-connector
 COPY connector $basepath/connector
 COPY data-lifecycle-manager $basepath/data-lifecycle-manager
 COPY hwcserver $basepath/hwcserver
+COPY librule.so $basepath/librule.so
+COPY libsqlparser.so $basepath/libsqlparser.so
 
-COPY dtc.yaml $confpath/dtc.yaml
-COPY log4cplus.conf $confpath/log4cplus.conf
+COPY librule.so /usr/local/lib/librule.so
+COPY libsqlparser.so /usr/local/lib/libsqlparser.so
+
+RUN apt update
+RUN apt install -y iputils-ping
+RUN apt install -y net-tools
+
+COPY run.sh $basepath/run.sh
+
+RUN chmod +x $basepath/core
+RUN chmod +x $basepath/dtcagent
+RUN chmod +x $basepath/connector
+RUN chmod +x $basepath/run.sh
+RUN chmod +x $basepath/async-connector
+
+ENV LD_LIBRARY_PATH=:/usr/local/lib
+
+CMD ["/usr/local/dtc/run.sh"]
+
+#COPY dtc.yaml $confpath/dtc.yaml
+#COPY log4cplus.conf $confpath/log4cplus.conf

+ 29 - 0
dockerfiles/devel/dtc.dbaddition.s1.yaml

@@ -0,0 +1,29 @@
+# 
+# DTC configure file. v2
+# Cache with Datasource Test cases.
+# Scene 1: Single DB Single Table
+#
+props:
+  log.level: debug
+  listener.port.dtc: 20015
+  shm.mem.size: 100 #MB
+
+connection: &connection
+  addr: 127.0.0.1:3307
+  user: username
+  pwd: password 
+
+primary:
+  table: &table opensource
+  cache:
+    field:
+      - {name: &key uid, type: signed, size: 4}
+      - {name: name, type: string, size: 50}
+      - {name: city, type: string, size: 50}
+      - {name: sex, type: signed, size: 4}
+      - {name: age, type: signed, size: 4}
+  hot:
+    logic:
+      {db: &db layer2, table: *table, connection: *connection}
+    real:
+      - {addr: mysql:3306, user: root, pwd: 123456, db: layer2}

+ 34 - 0
dockerfiles/devel/dtc.dbaddition.s2.yaml

@@ -0,0 +1,34 @@
+# 
+# DTC configure file. v2
+# Cache with Datasource Test cases.
+# Scene 2: Single DB Sharding Table
+# table name: opensource_0 ... opensource_9
+# total: 10 tables.
+#
+props:
+  log.level: debug
+  listener.port.dtc: 20015
+  shm.mem.size: 100 #MB
+
+connection: &connection
+  addr: 127.0.0.1:3307
+  user: username
+  pwd: password 
+
+primary:
+  table: &table opensource
+  cache:
+    field:
+      - {name: &key uid, type: signed, size: 4}
+      - {name: name, type: string, size: 50}
+      - {name: city, type: string, size: 50}
+      - {name: sex, type: signed, size: 4}
+      - {name: age, type: signed, size: 4}
+  hot:
+    logic:
+      {db: &db layer2, table: *table, connection: *connection}
+    real:
+      - {addr: mysql:3306, user: root, pwd: 123456, db: layer2}
+    sharding:
+      key: *key
+      table: {prefix: [*table, _], start: 0, last: 9}

+ 32 - 0
dockerfiles/devel/dtc.dbaddition.s3.yaml

@@ -0,0 +1,32 @@
+# 
+# DTC configure file. v2
+# Cache with Datasource Test cases.
+# Scene 3: Sharding DB Sharding Table
+#
+props:
+  log.level: debug
+  listener.port.dtc: 20015
+  shm.mem.size: 100 #MB
+
+connection: &connection
+  addr: 127.0.0.1:3307
+  user: username
+  pwd: password 
+
+primary:
+  table: &table opensource
+  cache:
+    field:
+      - {name: &key uid, type: signed, size: 4}
+      - {name: name, type: string, size: 50}
+      - {name: city, type: string, size: 50}
+      - {name: sex, type: signed, size: 4}
+      - {name: age, type: signed, size: 4}
+  hot:
+    logic:
+      {db: &db layer2, table: *table, connection: *connection}
+    real:
+      - {addr: mysql-01.local:3306, user: username, pwd: password, db: single}
+    sharding:
+      key: *key
+      table: {prefix: [*table, _], start: 0, last: 9}

+ 0 - 18
dockerfiles/devel/dtc.dbaddition.yaml

@@ -1,18 +0,0 @@
-cache:
-   LOG_LEVEL: debug
-   BIND_ADDR: '*:20015/tcp'
-   DTCID: 20015
-   MAX_USE_MEM_MB: 100
-   DTC_MODE: 1 #1: cache only, 0: database in addition.
-vhot:
-   addr: 127.0.0.1:3307
-   username: root
-   password: root
-   database: sharding
-data_lifecycle:
-   SingleQueryCount: 10
-   DataSQLRule: 'status = 0'
-   OperateTimeRule: '00 01 * * * ?'
-   LifeCycleTableName: 'data_lifecycle_table'
-   HotDBName: 'HOT'
-   ColdDBName: 'COLD'

+ 34 - 18
dockerfiles/devel/dtc.layered.yaml

@@ -1,18 +1,34 @@
-cache:
-   LOG_LEVEL: debug
-   BIND_ADDR: '*:20015/tcp'
-   DTCID: 20015
-   MAX_USE_MEM_MB: 100
-   DTC_MODE: 1 #1: cache only, 0: database in addition.
-vhot:
-   addr: 127.0.0.1:3307
-   username: root
-   password: root
-   database: sharding
-data_lifecycle:
-   SingleQueryCount: 10
-   DataSQLRule: 'status = 0'
-   OperateTimeRule: '00 01 * * * ?'
-   LifeCycleTableName: 'data_lifecycle_table'
-   HotDBName: 'HOT'
-   ColdDBName: 'COLD'
+# 
+# DTC configure file. v2
+# Layered Storage Test Cases.
+# Without ShardingSphere.
+#
+props:
+  log.level: debug
+  listener.port.dtc: 20015
+  shm.mem.size: 100 #MB
+
+connection: &connection
+  addr: mysql:3306
+  user: root
+  pwd: 123456 
+
+primary:
+  table: &table opensource
+  cache:
+    field:
+      - {name: &key uid, type: signed, size: 4}
+      - {name: name, type: string, size: 50}
+      - {name: city, type: string, size: 50}
+      - {name: sex, type: signed, size: 4}
+      - {name: age, type: signed, size: 4}
+  hot:
+    logic:
+      {db: &db layer2, table: *table, connection: *connection}
+    real:
+      - {addr: mysql:3306, user: root, pwd: 123456, db: layer2}
+  full:
+    logic:
+      {db: layer3, table: *table, connection: *connection}
+    real:
+      - {addr: mysql:3306, user: root, pwd: 123456, db: layer3}      

+ 19 - 0
dockerfiles/devel/run.sh

@@ -0,0 +1,19 @@
+#!/bin/bash
+
+sleep_count=0
+
+while [ $sleep_count -le 100 ]
+do
+    if [ -f "/etc/dtc/dtc.yaml" ]; then 
+        echo "Start running process: "$DTC_BIN","$DTC_ARGV
+        cd /usr/local/dtc/
+        ./$DTC_BIN $DTC_ARGV
+        break
+    else
+        echo "sleeping: "$sleep_count"s"
+        sleep 1s
+        let sleep_count+=1
+    fi
+done
+
+echo "Timeout waitting for dtc conf files."

+ 2 - 2
dockerfiles/server/dockerfile

@@ -11,11 +11,11 @@ RUN mkdir -p $basepath/log
 RUN mkdir -p $confpath
 RUN mkdir -p $logpath
 
-COPY dtcd $basepath/bin/dtcd
+COPY core $basepath/bin/core
 COPY hwcserver $basepath/bin/hwcserver
 COPY connector $basepath/bin/connector
 COPY table.yaml $confpath/table.yaml
 COPY dtc.yaml $confpath/dtc.yaml
 COPY log4cplus.conf $confpath/log4cplus.conf
 
-CMD ["/usr/local/dtc/bin/dtcd", "-d"]
+CMD ["/usr/local/dtc/bin/core", "-d"]

+ 1 - 1
script/dtcd.sh

@@ -4,7 +4,7 @@ ulimit -c unlimited
 
 DTC_BIN="dtcd_docker"
 rm -f "$DTC_BIN"
-ln -s dtcd "$DTC_BIN" 
+ln -s core "$DTC_BIN" 
 
 if [ "$1" = "stop" ] ; then
     killall -9 $DTC_BIN

+ 3 - 3
src/agent-watchdog/CMakeLists.txt

@@ -27,7 +27,7 @@ LINK_LIBRARIES(dl)
 ADD_DEFINITIONS("-g -fPIC -fpermissive -std=gnu++11")
 ADD_DEFINITIONS(-Wno-builtin-macro-redefined)
 
-ADD_EXECUTABLE(agent-watchdog ${SRC_LIST1})
+ADD_EXECUTABLE(dtc ${SRC_LIST1})
 
-TARGET_LINK_LIBRARIES(agent-watchdog libstat.a libcommon.a libyaml-cpp.a liblog4cplus.a)
-redefine_file_macro(agent-watchdog)
+TARGET_LINK_LIBRARIES(dtc libstat.a libcommon.a libyaml-cpp.a liblog4cplus.a)
+redefine_file_macro(dtc)

+ 39 - 0
src/agent-watchdog/core_entry.cc

@@ -0,0 +1,39 @@
+/*
+* Copyright [2021] JD.com, Inc.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+* 
+*/
+#include "core_entry.h"
+#include <unistd.h>
+
+const char *core_name = "core";
+
+CoreEntry::CoreEntry(WatchDog *watchdog, int sec)
+	: WatchDogDaemon(watchdog, sec)
+{
+	strncpy(watchdog_object_name_, core_name, sizeof(watchdog_object_name_) < strlen(core_name)? sizeof(watchdog_object_name_): strlen(core_name));
+}
+
+CoreEntry::~CoreEntry(void)
+{
+}
+
+void CoreEntry::exec()
+{
+	char *argv[2];
+
+	argv[0] = (char *)core_name;
+	argv[1] = NULL;
+	execv(argv[0], argv);
+}

+ 31 - 0
src/agent-watchdog/core_entry.h

@@ -0,0 +1,31 @@
+/*
+* Copyright [2021] JD.com, Inc.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+* 
+*/
+#ifndef __H_CORE_ENTRY_H__
+#define __H_CORE_ENTRY_H__
+
+#include "base.h"
+#include "daemon_listener.h"
+
+class CoreEntry : public WatchDogDaemon
+{
+public:
+	CoreEntry(WatchDog *watchdog, int sec);
+	virtual ~CoreEntry();
+	virtual void exec();
+};
+
+#endif

+ 2 - 2
src/agent-watchdog/daemons.cc

@@ -104,9 +104,9 @@ void WatchDog::run_loop()
 
 	while (!watchdog_stop) {
 		int timeout = expire_micro_seconds(1000, 1);
-		log4cplus_debug("befor poll, timeout:%d, %d, %d\n", timeout, pfd[0].fd, pfd[1].fd);
+		//log4cplus_debug("befor poll, timeout:%d, %d, %d\n", timeout, pfd[0].fd, pfd[1].fd);
 		int interrupted = poll(pfd, 2, timeout);
-		log4cplus_debug("after poll, watchdog_stop:%d, interrupted:%d\n", watchdog_stop, interrupted);
+		//log4cplus_debug("after poll, watchdog_stop:%d, interrupted:%d\n", watchdog_stop, interrupted);
 		update_now_time(timeout, interrupted);
 		if (watchdog_stop)
 			break;

+ 33 - 7
src/agent-watchdog/main.cc

@@ -7,6 +7,7 @@
 #include "fulldata_entry.h"
 #include "main_entry.h"
 #include "cold_wipe_entry.h"
+#include "core_entry.h"
 #include "agent_entry.h"
 #include "proc_title.h"
 
@@ -29,6 +30,7 @@ static int show_version;
 static int load_datalife;
 static int load_agent;
 static int load_fulldata;
+static int load_core;
 int recovery_mode;
 int load_sharding;
 int load_all;
@@ -40,12 +42,13 @@ static struct option long_options[] = {
 		{ "version", no_argument, NULL, 'v' },
 		{ "data-lifecycle", no_argument, NULL,'l' },
 		{ "agent", no_argument, NULL,'a' },
-		{ "full-data", no_argument, NULL,'f' },
+		{ "async-connector", no_argument, NULL,'y' },
 		{ "sharding", no_argument, NULL,'s' },
 		{ "recovery", no_argument, NULL,'r' },
+		{ "core", no_argument, NULL,'c' },
 		{ NULL, 0, NULL, 0 } };
 
-static char short_options[] = "hvlafsr";
+static char short_options[] = "hvlaycsr";
 
 
 static int get_options(int argc, char **argv) {
@@ -75,7 +78,7 @@ static int get_options(int argc, char **argv) {
 		case 'a':
 			load_agent = 1;
 			break;
-		case 'f':
+		case 'y':
 			load_fulldata = 1;
 			break;
 		case 's':
@@ -84,7 +87,9 @@ static int get_options(int argc, char **argv) {
 		case 'r':
 			recovery_mode = 1;
 			break;					
-
+		case 'c':
+			load_core = 1;
+			break;				
 		default:
 			break;
 		}
@@ -95,13 +100,14 @@ static int get_options(int argc, char **argv) {
 
 
 static void show_usage(void) {
-	printf("Usage: agent-watchdog -[hvadfs], default load all modules.\n");
+	printf("Usage: dtc -[hvlaycsr], default load all modules.\n");
 	printf("Options:\n"); 
 	printf("  -h, --help             		: this help\n");
 	printf("  -v, --version          		: show version and exit\n");
 	printf("  -a, --agent        			: load agent module\n");
+	printf("  -c, --core        			: load dtc core module\n");
 	printf("  -l, --data-lifecycle			: load data-lifecycle module\n");
-	printf("  -f, --full-data     			: load full-data module\n");
+	printf("  -y, --async-connector			: load async-connector module\n");
 	printf("  -s, --sharding      			: load sharding module\n");
 	printf("  -r, --recovery mode  			: auto restart when crashed\n");
 
@@ -158,6 +164,21 @@ int start_data_lifecycle(WatchDog* wdog, int delay)
 	return 0;
 }
 
+int start_core(WatchDog* wdog, int delay)
+{
+	// start dtcd core main process.
+	CoreEntry *core_entry = new CoreEntry(wdog, delay);
+	if (core_entry == NULL) {
+		log4cplus_error(
+			"create CoreEntry object failed, msg: %m");
+		return -1;
+	}
+	if (core_entry->new_proc_fork() < 0)
+		return -1;
+	
+	return 0;
+}
+
 int start_agent(WatchDog* wdog, int delay)
 {
 	// start agent main process.
@@ -221,7 +242,7 @@ int main(int argc, char* argv[])
 		exit(1);
 	}
 	if (show_version) {
-		printf("This is agent-watchdog-%s\n", DA_VERSION_STR);
+		printf("This is dtc watchdog -%s\n", DA_VERSION_STR);
 		if (show_help) {
 			show_usage();
 		}
@@ -243,6 +264,11 @@ int main(int argc, char* argv[])
 			log4cplus_error("start sharding failed.");
 	}
 
+	if (load_core || load_all) {
+		if(start_core(wdog, delay) < 0)
+			log4cplus_error("start core failed.");
+	}
+
 	if (load_agent || load_all) {
 		if(start_agent(wdog, delay) < 0)
 			log4cplus_error("start full-data failed.");

+ 2 - 1
src/agent/CMakeLists.txt

@@ -27,6 +27,7 @@ INCLUDE_DIRECTORIES(
     ../rule)
 
 LINK_DIRECTORIES(
+    .
     ${PROJECT_SOURCE_DIR}/src/libs/mxml/libs
     ${PROJECT_SOURCE_DIR}/src/libs/hsql/libs
     ${PROJECT_SOURCE_DIR}/build/src/rule/
@@ -43,4 +44,4 @@ ADD_DEFINITIONS(-Wno-builtin-macro-redefined)
 ADD_EXECUTABLE(dtcagent ${SRC_LIST})
 
 TARGET_LINK_LIBRARIES(dtcagent libmxml.a librule.so)
-redefine_file_macro(agent-watchdog)
+redefine_file_macro(dtcagent)

+ 1 - 1
src/complex/CMakeLists.txt

@@ -28,7 +28,7 @@ LINK_LIBRARIES(pthread)
 LINK_LIBRARIES(dl)
 
 #编译参数
-ADD_DEFINITIONS("-g -rdynamic -fPIC -D_CORE_ -fpermissive -std=gnu++11 -D_GLIBCXX_USE_CXX11_ABI=0 -export-dynamic -Wl,--version-script,dtcd.export.lst")
+ADD_DEFINITIONS("-g -rdynamic -fPIC -D_CORE_ -fpermissive -std=gnu++11 -D_GLIBCXX_USE_CXX11_ABI=0 -export-dynamic -Wl,--version-script,core.export.lst")
 ADD_DEFINITIONS(-Wno-builtin-macro-redefined)
 
 #编译为.so/.a

+ 0 - 3
src/complex/cm_load.cc

@@ -129,9 +129,6 @@ bool ConfigHelper::load_full_inst_info()
 {
 	memset(&full_instance, 0, sizeof(DBHost));
 
-	if(!dtc["COLD_MACHINE1"] || !dtc["DATABASE_CONF"])
-		return false;
-
 	if( !dtc["primary"]["full"]["logic"]["connection"]["addr"] || 
 		!dtc["primary"]["full"]["logic"]["connection"]["user"] ||
 		!dtc["primary"]["full"]["logic"]["connection"]["pwd"] ||

+ 2 - 2
src/connector/CMakeLists.txt

@@ -38,7 +38,7 @@ LINK_LIBRARIES(nsl)
 LINK_LIBRARIES(stdc++)
 LINK_LIBRARIES(dl)
 LINK_LIBRARIES(z)
-LINK_LIBRARIES(libdtcd.a)
+LINK_LIBRARIES(libcore.a)
 LINK_LIBRARIES(libcommon.a)
 LINK_LIBRARIES(libdaemons.a)
 LINK_LIBRARIES(libstat.a)
@@ -49,5 +49,5 @@ ADD_DEFINITIONS(-Wno-builtin-macro-redefined)
 
 ADD_EXECUTABLE (connector ${SRC_LIST})
 
-TARGET_LINK_LIBRARIES(connector  libdtcd.a libdaemons.a libstat.a libcommon.a liblog4cplus.a  libyaml-cpp.a  libz64.a libmysqlclient.a)
+TARGET_LINK_LIBRARIES(connector  libcore.a libdaemons.a libstat.a libcommon.a liblog4cplus.a  libyaml-cpp.a  libz64.a libmysqlclient.a)
 redefine_file_macro(connector)

+ 4 - 4
src/core/CMakeLists.txt

@@ -44,12 +44,12 @@ LINK_LIBRARIES(pthread)
 LINK_LIBRARIES(dl)
 
 #编译参数
-ADD_DEFINITIONS("-g -rdynamic -fPIC -D_CORE_ -fpermissive -std=gnu++11 -D_GLIBCXX_USE_CXX11_ABI=0 -export-dynamic -Wl,--version-script,dtcd.export.lst")
+ADD_DEFINITIONS("-g -rdynamic -fPIC -D_CORE_ -fpermissive -std=gnu++11 -D_GLIBCXX_USE_CXX11_ABI=0 -export-dynamic -Wl,--version-script,core.export.lst")
 ADD_DEFINITIONS(-Wno-builtin-macro-redefined)
 
 #编译为.so/.a
-ADD_EXECUTABLE(dtcd ${SRC_LIST})
+ADD_EXECUTABLE(core ${SRC_LIST})
 
 #将目标文件与库文件链接
-TARGET_LINK_LIBRARIES(dtcd libdaemons.a libstat.a libsqlparser.a libcommon.a libyaml-cpp.a liblog4cplus.a libz64.a libmysqlclient.a)
-redefine_file_macro(dtcd)
+TARGET_LINK_LIBRARIES(core libdaemons.a libstat.a libsqlparser.a libcommon.a libyaml-cpp.a liblog4cplus.a libz64.a libmysqlclient.a)
+redefine_file_macro(core)

+ 2 - 2
src/core/data/container_dtcd.cc

@@ -107,7 +107,7 @@ const char *DTCInstance::query_version_string(void)
 
 const char *DTCInstance::query_service_type(void)
 {
-	return "dtcd";
+	return "core";
 }
 
 const char *DTCInstance::query_instance_name(void)
@@ -154,7 +154,7 @@ _QueryInternalService(const char *name, const char *instance)
 	if (!name || !instance)
 		return NULL;
 
-	if (strcasecmp(name, "dtcd") != 0)
+	if (strcasecmp(name, "core") != 0)
 		return NULL;
 
 	/* not found */

+ 5 - 5
src/core/lib/CMakeLists.txt

@@ -39,13 +39,13 @@ LINK_LIBRARIES(pthread)
 LINK_LIBRARIES(dl)
 
 #编译参数
-ADD_DEFINITIONS("-g -rdynamic -fPIC -fpermissive -std=gnu++11 -D_GLIBCXX_USE_CXX11_ABI=0 -export-dynamic -Wl,--version-script,dtcd.export.lst")
+ADD_DEFINITIONS("-g -rdynamic -fPIC -fpermissive -std=gnu++11 -D_GLIBCXX_USE_CXX11_ABI=0 -export-dynamic -Wl,--version-script,core.export.lst")
 
 #编译为.so/.a
-ADD_LIBRARY(dtcd_static ${SRC_LIST})
+ADD_LIBRARY(core_static ${SRC_LIST})
 
 #将目标文件与库文件链接
-TARGET_LINK_LIBRARIES(dtcd_static libdaemons.a libstat.a libcommon.a libyaml-cpp.a liblog4cplus.a libz64.a)
-SET_TARGET_PROPERTIES(dtcd_static PROPERTIES OUTPUT_NAME "dtcd")
-SET_TARGET_PROPERTIES (dtcd_static PROPERTIES CLEAN_DIRECT_OUTPUT 1)
+TARGET_LINK_LIBRARIES(core_static libdaemons.a libstat.a libcommon.a libyaml-cpp.a liblog4cplus.a libz64.a)
+SET_TARGET_PROPERTIES(core_static PROPERTIES OUTPUT_NAME "core")
+SET_TARGET_PROPERTIES (core_static PROPERTIES CLEAN_DIRECT_OUTPUT 1)
 

+ 1 - 1
src/core/main.cc

@@ -27,7 +27,7 @@
 
 using namespace ClusterConfig;
 
-const char project_name[] = "dtcd";
+const char project_name[] = "core";
 const char usage_argv[] = "";
 
 BufferProcessAskChain *g_buffer_process_ask_instance = NULL;

+ 16 - 1
src/daemons/helper.cc

@@ -24,6 +24,7 @@
 #include "log/log.h"
 #include "dtc_global.h"
 #include <sstream>
+#include <unistd.h>
 
 WatchDogHelper::WatchDogHelper(WatchDog *watchdog, int sec, const char *path,
 			       int machine_conf, int role, int backlog,
@@ -90,7 +91,21 @@ void WatchDogHelper::exec()
 	Thread *helperThread =
 		new Thread(watchdog_object_name_, Thread::ThreadTypeProcess);
 	helperThread->initialize_thread();
-	argv[0] = (char *)connector_name[type_];
+	char filedir[260] = {0};
+	char filepath[260] = {0};
+	char fn[260] = {0};
+	snprintf(fn, sizeof(fn), "/proc/%d/exe", getpid());
+	int rv = readlink(fn, filedir, sizeof(filedir) - 1);
+	if(rv > 0)
+	{
+		filedir[rv] = '\0';
+		std::string str = filedir;
+		rv = str.rfind('/');
+		strcpy(filedir, str.substr(0, rv).c_str());
+	}
+	sprintf(filepath, "%s/%s", filedir, connector_name[type_]);
+	log4cplus_info("connector path:%s", filepath);
+	argv[0] = filepath;
 	execv(argv[0], argv);
 	log4cplus_error("helper[%s] execv error: %m", argv[0]);
 }

+ 3 - 3
src/data_lifecycle/CMakeLists.txt

@@ -51,7 +51,7 @@ LINK_LIBRARIES(nsl)
 LINK_LIBRARIES(stdc++)
 LINK_LIBRARIES(dl)
 LINK_LIBRARIES(z)
-LINK_LIBRARIES(libdtcd.a)
+LINK_LIBRARIES(libcore.a)
 LINK_LIBRARIES(libcommon.a)
 LINK_LIBRARIES(libdaemons.a)
 LINK_LIBRARIES(libstat.a)
@@ -60,7 +60,7 @@ LINK_LIBRARIES(libmysqlclient.a)
 ADD_DEFINITIONS ("-g -fPIC -fpermissive -std=gnu++11 -DOMN_PLATFORM_UNIX -Wl,--no-undefined -Xlinker -zmuldefs")
 ADD_DEFINITIONS(-Wno-builtin-macro-redefined)
 ADD_EXECUTABLE (data-lifecycle-manager ${SRC_LIST} ../connector/database_connection.cc)
-TARGET_LINK_LIBRARIES(data-lifecycle-manager libdtcd.a libdaemons.a libstat.a libcommon.a liblog4cplus.a libyaml-cpp.a libz64.a libsqlparser.a libmysqlclient.a libmxml.a)
+TARGET_LINK_LIBRARIES(data-lifecycle-manager libcore.a libdaemons.a libstat.a libcommon.a liblog4cplus.a libyaml-cpp.a libz64.a libsqlparser.a libmysqlclient.a libmxml.a)
 redefine_file_macro(data-lifecycle-manager)
 
 if(jdtestOpen)
@@ -111,7 +111,7 @@ if(jdtestOpen)
     ../libs/mxml/include
     ../libs/google_test/include
     )
-    target_link_libraries(gtest_data_lifecycle dtcd daemons stat common gtest_main gmock gtest dl pthread log4cplus sqlparser yaml-cpp z64 mysqlclient mxml)
+    target_link_libraries(gtest_data_lifecycle core daemons stat common gtest_main gmock gtest dl pthread log4cplus sqlparser yaml-cpp z64 mysqlclient mxml)
     redefine_file_macro(gtest_data_lifecycle)
     SET_TARGET_PROPERTIES(gtest_data_lifecycle PROPERTIES RUNTIME_OUTPUT_DIRECTORY "./bin")
     install(TARGETS gtest_data_lifecycle RUNTIME DESTINATION bin)

+ 2 - 2
src/hwcserver/CMakeLists.txt

@@ -40,7 +40,7 @@ LINK_LIBRARIES(nsl)
 LINK_LIBRARIES(stdc++)
 LINK_LIBRARIES(dl)
 LINK_LIBRARIES(z)
-LINK_LIBRARIES(libdtcd.a)
+LINK_LIBRARIES(libcore.a)
 LINK_LIBRARIES(libcommon.a)
 LINK_LIBRARIES(libdaemons.a)
 LINK_LIBRARIES(libstat.a)
@@ -52,5 +52,5 @@ ADD_DEFINITIONS(-Wno-builtin-macro-redefined)
 
 ADD_EXECUTABLE(hwcserver  ${SRC_LIST})
 
-TARGET_LINK_LIBRARIES(hwcserver  libdtcd.a libdaemons.a libstat.a libcommon.a liblog4cplus.a  libyaml-cpp.a  libz64.a libmysqlclient.a libdtcapi.so)
+TARGET_LINK_LIBRARIES(hwcserver  libcore.a libdaemons.a libstat.a libcommon.a liblog4cplus.a  libyaml-cpp.a  libz64.a libmysqlclient.a libdtcapi.so)
 redefine_file_macro(hwcserver)

+ 18 - 8
src/libs/common/config/dbconfig.cc

@@ -380,6 +380,7 @@ int DbConfig::get_dtc_config(YAML::Node dtc_config, DTCConfig* raw, int i_server
     else{
         machineCnt = 0;
     }
+
     //Depoly
     if(dtc_config["primary"][layer]) //cache.datasource mode
     {
@@ -410,6 +411,7 @@ int DbConfig::get_dtc_config(YAML::Node dtc_config, DTCConfig* raw, int i_server
     {
         depoly = SINGLE;
     }
+
     //DB Name
     if(dtc_config["primary"][layer]) //cache.datasource mode
     {
@@ -478,15 +480,23 @@ int DbConfig::get_dtc_config(YAML::Node dtc_config, DTCConfig* raw, int i_server
                 dbMod);
             database_max_count = dbMod;
         }
-    
+
         //Table section with DATABASE_IN_ADDITION.
         YAML::Node node = dtc_config["primary"][layer]["sharding"]["table"]["prefix"];
-        if(!node)
+        if(node)
         {
-            log4cplus_error("[TABLE_CONF].table_name not defined");
+            tblName = STRDUP(get_merge_string(node).c_str());
+        }
+        else if(dtc_config["primary"]["table"])
+        {
+            tblName = STRDUP(dtc_config["primary"]["table"].as<string>().c_str());
+        }
+        else
+        {
+            log4cplus_error("table name not defined");
             return -1;
         }
-        tblName = STRDUP(get_merge_string(node).c_str());
+
         if ((depoly & 2) == 0) {
             if (strchr(tblName, '%') != NULL) {
                 log4cplus_error(
@@ -524,7 +534,7 @@ int DbConfig::get_dtc_config(YAML::Node dtc_config, DTCConfig* raw, int i_server
         YAML::Node node = dtc_config["primary"]["table"];
         if(!node)
         {
-            log4cplus_error("[TABLE_CONF].table_name not defined");
+            log4cplus_error("table name not defined");
             return -1;
         }
         tblName = STRDUP(node.as<string>().c_str());
@@ -613,13 +623,13 @@ int DbConfig::get_dtc_config(YAML::Node dtc_config, DTCConfig* raw, int i_server
             }
         }
         /* Helper number alter */
-        m->gprocs[0] = raw->get_int_val(NULL, "Procs", 10);
+        m->gprocs[0] = raw->get_int_val(NULL, "Procs", 1);
         if (m->gprocs[0] < 1)
             m->gprocs[0] = 0;
-        m->gprocs[1] = raw->get_int_val(NULL, "WriteProcs", 10);
+        m->gprocs[1] = raw->get_int_val(NULL, "WriteProcs", 1);
         if (m->gprocs[1] < 1)
             m->gprocs[1] = 0;
-        m->gprocs[2] = raw->get_int_val(NULL, "CommitProcs", 10);
+        m->gprocs[2] = raw->get_int_val(NULL, "CommitProcs", 1);
         if (m->gprocs[2] < 1)
             m->gprocs[2] = 0;
         /* Helper Queue Size */

+ 7 - 7
src/libs/common/connector/connector_client.cc

@@ -80,7 +80,7 @@ int ConnectorClient::connect_error()
     connectErrorCnt++;
     if (connectErrorCnt > maxTryConnect && ready) {
         log4cplus_debug(
-            "helper-client[%d] try connect %lu times, switch invalid.",
+            "connector[%d] try connect %lu times, switch invalid.",
             helperIdx, (unsigned long)connectErrorCnt);
         helperGroup->dec_ready_helper();
         ready = 0;
@@ -266,7 +266,7 @@ int ConnectorClient::reconnect(void)
     }
 
     if (errno != EINPROGRESS) {
-        log4cplus_error("connect helper-%s error: %m", sockpath);
+        log4cplus_error("connect connector-%s error: %m", sockpath);
         close(netfd);
         netfd = -1;
         attach_timer(helperGroup->retryList);
@@ -275,7 +275,7 @@ int ConnectorClient::reconnect(void)
         return 0;
     }
 
-    log4cplus_debug("Connectting to helper[%d]: %s", helperIdx, sockpath);
+    log4cplus_debug("Connectting to connector[%d]: %s", helperIdx, sockpath);
 
     disable_input();
     enable_output();
@@ -345,7 +345,7 @@ int ConnectorClient::recv_verify()
             supportBatchKey = 0;
             break;
         default:
-            log4cplus_error("detect helper-%s error: %d, %s",
+            log4cplus_error("detect connector-%s error: %d, %s",
                     helperGroup->sock_path(),
                     job->result_code(),
                     job->resultInfo.error_message());
@@ -355,14 +355,14 @@ int ConnectorClient::recv_verify()
     }
 
     if (supportBatchKey) {
-        log4cplus_debug("helper-%s support batch-key",
+        log4cplus_debug("connector-%s support batch-key",
                 helperGroup->sock_path());
     } else {
         if (logwarn++ == 0)
-            log4cplus_warning("helper-%s unsupported batch-key",
+            log4cplus_warning("connector-%s unsupported batch-key",
                       helperGroup->sock_path());
         else
-            log4cplus_debug("helper-%s unsupported batch-key",
+            log4cplus_debug("connector-%s unsupported batch-key",
                     helperGroup->sock_path());
     }
 

+ 2 - 2
src/libs/common/packet/packet_server.cc

@@ -90,7 +90,7 @@ int Packet::encode_detect(const DTCTableDefinition *tdef, int sn)
 	vi.set_table_hash(tdef->table_hash());
 	vi.set_serial_nr(sn);
 	// app version
-	vi.set_tag(5, "dtcd");
+	vi.set_tag(5, "core");
 	// lib version
 	vi.set_tag(6, "ctlib-v" DTC_VERSION);
 	vi.set_tag(9, tdef->field_type(0));
@@ -190,7 +190,7 @@ int Packet::encode_reload_config(const DTCTableDefinition *tdef, int sn)
 	vi.set_table_hash(tdef->table_hash());
 	vi.set_serial_nr(sn);
 	// app version
-	vi.set_tag(5, "dtcd");
+	vi.set_tag(5, "core");
 	// lib version
 	vi.set_tag(6, "ctlib-v" DTC_VERSION);
 	vi.set_tag(9, tdef->field_type(0));

+ 3 - 3
src/libs/stat/stat_dtc.cc

@@ -25,19 +25,19 @@ StatThread g_stat_mgr;
 int init_statistics(void)
 {
 	int ret;
-	ret = g_stat_mgr.init_stat_info("dtcd", STATIDX);
+	ret = g_stat_mgr.init_stat_info("core", STATIDX);
 	// -1, recreate, -2, failed
 	if (ret == -1) {
 		unlink(STATIDX);
 		char buf[64];
 		ret = g_stat_mgr.create_stat_index(
-			"dtcd", STATIDX, g_stat_definition, buf, sizeof(buf));
+			"core", STATIDX, g_stat_definition, buf, sizeof(buf));
 		if (ret != 0) {
 			log4cplus_error("CreateStatIndex failed: %s",
 					g_stat_mgr.get_error_message());
 			exit(ret);
 		}
-		ret = g_stat_mgr.init_stat_info("dtcd", STATIDX);
+		ret = g_stat_mgr.init_stat_info("core", STATIDX);
 	}
 	if (ret == 0) {
 		int v1, v2, v3;

+ 1 - 1
src/libs/stat/stat_manager.cc

@@ -113,7 +113,7 @@ int StatManager::init_stat_info(const char *name, const char *indexfile,
 	fd = open(indexfile, O_RDWR);
 	if (fd < 0) {
 		snprintf(stat_error_message_, sizeof(stat_error_message_),
-			 "cannot open index file, checking privilege and stat directory.");
+			 "cannot open index file, checking privilege and stat directory: %s", indexfile);
 		return -1;
 	}
 

+ 2 - 2
src/libs/stat/stat_tool.cc

@@ -441,7 +441,7 @@ void dump_base(void)
 void create_files(void)
 {
 	char buf[256];
-	if (g_stat_mgr.create_stat_index("dtcd", STATIDX, g_stat_definition,
+	if (g_stat_mgr.create_stat_index("core", STATIDX, g_stat_definition,
 					 buf, sizeof(buf)) < 0) {
 		fprintf(stderr, "Fail to create stat index file: %s\n", buf);
 		exit(-3);
@@ -453,7 +453,7 @@ void init(void)
 {
 	int ret;
 
-	ret = stc.init_stat_info("dtcd", STATIDX);
+	ret = stc.init_stat_info("core", STATIDX);
 	if (ret < 0) {
 		fprintf(stderr, "Cannot Initialize StatInfo: %s\n",
 			stc.get_error_message());

+ 2 - 1
src/rule/CMakeLists.txt

@@ -11,6 +11,7 @@ INCLUDE_DIRECTORIES(
 
 #添加.so/.a文件搜索路径,相当于gcc -L
 LINK_DIRECTORIES(
+    .
     ${PROJECT_SOURCE_DIR}/src/libs/log4cplus/libs
     ${PROJECT_SOURCE_DIR}/src/libs/yaml-cpp/libs
     ${PROJECT_SOURCE_DIR}/src/libs/hsql/libs)
@@ -23,7 +24,7 @@ LINK_LIBRARIES(pthread)
 LINK_LIBRARIES(dl)
 
 #编译参数
-ADD_DEFINITIONS("-g -rdynamic -fPIC -D_CORE_ -fpermissive -std=gnu++11 -D_GLIBCXX_USE_CXX11_ABI=0 -export-dynamic -Wl,--version-script,dtcd.export.lst")
+ADD_DEFINITIONS("-g -rdynamic -fPIC -D_CORE_ -fpermissive -std=gnu++11 -D_GLIBCXX_USE_CXX11_ABI=0 -export-dynamic -Wl,--version-script,core.export.lst")
 ADD_DEFINITIONS(-Wno-builtin-macro-redefined)
 
 #编译为.so/.a

+ 1 - 1
src/utils/CMakeLists.txt

@@ -23,7 +23,7 @@ LINK_LIBRARIES(pthread)
 LINK_LIBRARIES(dl)
 
 #编译参数
-ADD_DEFINITIONS("-g -rdynamic -fPIC -D_CORE_ -fpermissive -std=gnu++11 -D_GLIBCXX_USE_CXX11_ABI=0 -export-dynamic -Wl,--version-script,dtcd.export.lst")
+ADD_DEFINITIONS("-g -rdynamic -fPIC -D_CORE_ -fpermissive -std=gnu++11 -D_GLIBCXX_USE_CXX11_ABI=0 -export-dynamic -Wl,--version-script,core.export.lst")
 ADD_DEFINITIONS(-Wno-builtin-macro-redefined)
 
 #编译为.so/.a

+ 7 - 0
tests/init.s1.sql

@@ -0,0 +1,7 @@
+CREATE TABLE `opensource` (
+  `uid` int(11) ,
+  `name` varchar(50),
+  `city` varchar(50),
+  `sex` int(11) ,
+  `age` int(11)
+)DEFAULT CHARSET=utf8;

+ 79 - 0
tests/init.s2.sql

@@ -0,0 +1,79 @@
+CREATE TABLE `opensource_0` (
+  `uid` int(11) ,
+  `name` varchar(50),
+  `city` varchar(50),
+  `sex` int(11) ,
+  `age` int(11)
+)DEFAULT CHARSET=utf8;
+
+CREATE TABLE `opensource_1` (
+  `uid` int(11) ,
+  `name` varchar(50),
+  `city` varchar(50),
+  `sex` int(11) ,
+  `age` int(11)
+)DEFAULT CHARSET=utf8;
+
+CREATE TABLE `opensource_2` (
+  `uid` int(11) ,
+  `name` varchar(50),
+  `city` varchar(50),
+  `sex` int(11) ,
+  `age` int(11)
+)DEFAULT CHARSET=utf8;
+
+CREATE TABLE `opensource_3` (
+  `uid` int(11) ,
+  `name` varchar(50),
+  `city` varchar(50),
+  `sex` int(11) ,
+  `age` int(11)
+)DEFAULT CHARSET=utf8;
+
+CREATE TABLE `opensource_4` (
+  `uid` int(11) ,
+  `name` varchar(50),
+  `city` varchar(50),
+  `sex` int(11) ,
+  `age` int(11)
+)DEFAULT CHARSET=utf8;
+
+CREATE TABLE `opensource_5` (
+  `uid` int(11) ,
+  `name` varchar(50),
+  `city` varchar(50),
+  `sex` int(11) ,
+  `age` int(11)
+)DEFAULT CHARSET=utf8;
+
+CREATE TABLE `opensource_6` (
+  `uid` int(11) ,
+  `name` varchar(50),
+  `city` varchar(50),
+  `sex` int(11) ,
+  `age` int(11)
+)DEFAULT CHARSET=utf8;
+
+CREATE TABLE `opensource_7` (
+  `uid` int(11) ,
+  `name` varchar(50),
+  `city` varchar(50),
+  `sex` int(11) ,
+  `age` int(11)
+)DEFAULT CHARSET=utf8;
+
+CREATE TABLE `opensource_8` (
+  `uid` int(11) ,
+  `name` varchar(50),
+  `city` varchar(50),
+  `sex` int(11) ,
+  `age` int(11)
+)DEFAULT CHARSET=utf8;
+
+CREATE TABLE `opensource_9` (
+  `uid` int(11) ,
+  `name` varchar(50),
+  `city` varchar(50),
+  `sex` int(11) ,
+  `age` int(11)
+)DEFAULT CHARSET=utf8;

+ 7 - 0
tests/init.s3.sql

@@ -0,0 +1,7 @@
+CREATE TABLE `opensource` (
+  `uid` int(11) ,
+  `name` varchar(50),
+  `city` varchar(50),
+  `sex` int(11) ,
+  `age` int(11)
+)DEFAULT CHARSET=utf8;

+ 133 - 0
tests/test_agent_cache_only.py

@@ -0,0 +1,133 @@
+import pytest
+import pymysql
+
+def test_insert_with_single_quotes():
+    db = pymysql.connect(host='127.0.0.1', port=12001, user='test', password='test', database='test')
+    cursor = db.cursor()
+    sql = "insert into opensource(uid, name) values(1, 'hello') where uid = 1"
+    cursor.execute(sql)
+    db.commit()
+    rowsaffected = cursor.rowcount
+    print("affected rows: %s" % (rowsaffected))
+    cursor.close()
+    db.close()
+    assert rowsaffected == 1
+
+'''
+def test_insert_with_double_quotes():
+    db = pymysql.connect(host='127.0.0.1', port=12001, user='test', password='test', database='test')
+    cursor = db.cursor()
+    sql = "insert into opensource(uid, name) values(1, \"hello\") where uid = 1"
+    cursor.execute(sql)
+    db.commit()
+    cursor.close()
+    db.close()
+'''    
+
+'''
+def test_insert_remove_where_cluster():
+    db = pymysql.connect(host='127.0.0.1', port=12001, user='test', password='test', database='test')
+    cursor = db.cursor()
+    sql = "insert into opensource(uid, name) values(1, \"hello\")"
+    cursor.execute(sql)
+    db.commit()
+    cursor.close()
+    db.close()
+'''
+
+'''
+def test_insert_remove_where_cluster_without_specify_key():
+    db = pymysql.connect(host='127.0.0.1', port=12001, user='test', password='test', database='test')
+    cursor = db.cursor()
+    sql = "insert into opensource values(1, \"Jack\", \"Shanghai\", 1, 18)"
+    cursor.execute(sql)
+    db.commit()
+    cursor.close()
+    db.close()
+'''
+
+def test_select():
+    db = pymysql.connect(host='127.0.0.1', port=12001, user='test', password='test', database='test')
+    cursor = db.cursor()
+    sql = "select uid, name from opensource where uid = 1"
+    cursor.execute(sql)
+    results = cursor.fetchall()
+    assert len(results) == 1
+    for row in results:
+        uid = row[0]
+        name = row[1]
+        print("uid=%s, name=%s" % (uid, name))
+        assert uid == 1
+        assert name == "hello"
+    db.close()
+
+def test_update():
+    db = pymysql.connect(host='127.0.0.1', port=12001, user='test', password='test', database='test')
+    cursor = db.cursor()
+    sql = "select uid, name from opensource where uid = 1"
+    cursor.execute(sql)
+    results = cursor.fetchall()
+    assert len(results) == 1
+    for row in results:
+        uid = row[0]
+        name = row[1]
+        print("uid=%s, name=%s" % (uid, name))
+        assert uid == 1
+        assert name == "hello"
+    cursor.close()
+
+    cursor = db.cursor()
+    sql = "update opensource set name = 'Lee' where uid = 1"
+    cursor.execute(sql)
+    db.commit()
+    rowsaffected = cursor.rowcount
+    print("affected rows: %s" % (rowsaffected))
+    assert rowsaffected == 1
+    cursor.close()
+
+    cursor = db.cursor()
+    sql = "select uid, name from opensource where uid = 1"
+    cursor.execute(sql)
+    results = cursor.fetchall()
+    assert len(results) == 1
+    for row in results:
+        uid = row[0]
+        name = row[1]
+        print("uid=%s, name=%s" % (uid, name))
+        assert uid == 1
+        assert name == "Lee"
+    cursor.close()
+
+    db.close()
+
+def test_delete():
+    print("----delete----")
+    db = pymysql.connect(host='127.0.0.1', port=12001, user='test', password='test', database='test')
+    cursor = db.cursor()
+    sql = "select uid, name from opensource where uid = 1"
+    cursor.execute(sql)
+    results = cursor.fetchall()
+    assert len(results) == 1
+    cursor.close()
+
+    cursor = db.cursor()
+    sql = "delete from opensource where uid = 1"
+    cursor.execute(sql)
+    db.commit()
+    rowsaffected = cursor.rowcount
+    print("affected rows: %s" % (rowsaffected))
+    assert rowsaffected == 1
+    cursor.close()
+
+    cursor = db.cursor()
+    sql = "select uid, name from opensource where uid = 1"
+    cursor.execute(sql)
+    results = cursor.fetchall()
+    assert len(results) == 0
+    cursor.close()
+
+    db.close()    
+
+'''
+def test_check_tablename():
+'''    

+ 49 - 0
tests/test_agent_datasource_s1.py

@@ -0,0 +1,49 @@
+import pytest
+import pymysql
+
+def test_demo():
+    assert 100 == 100
+
+def test_insert_and_result():
+    db = pymysql.connect(host='127.0.0.1', port=12001, user='test', password='test', database='test')
+
+    #insert to DTC
+    cursor = db.cursor()
+    sql = "insert into opensource(uid, name) values(1, 'hello') where uid = 1"
+    cursor.execute(sql)
+    db.commit()
+    rowsaffected = cursor.rowcount
+    print("affected rows: %s" % (rowsaffected))
+    assert rowsaffected == 1
+    cursor.close()
+
+    #select from DTC
+    cursor = db.cursor()
+    sql = "select uid, name from opensource where uid = 1"
+    cursor.execute(sql)
+    results = cursor.fetchall()
+    dtclen = len(results)
+    assert dtclen == 1
+    dtcuid = results[0][0]
+    dtcname = results[0][1]
+    assert dtcuid == 1
+    assert dtcname == "hello"
+    cursor.close()
+
+    db.close()
+
+    #select from datasource
+    db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='123456', database='layer2')
+    cursor = db.cursor()
+    sql = "select uid, name from opensource where uid = 1"
+    cursor.execute(sql)
+    results = cursor.fetchall()
+    dblen = len(results)
+    assert dblen == dtclen
+    dbuid = results[0][0]
+    dbname = results[0][1]
+    assert dtcuid == dbuid
+    assert dtcname == dbname
+    cursor.close()
+
+    db.close()

+ 133 - 0
tests/test_dtcd_cache_only.py

@@ -0,0 +1,133 @@
+import pytest
+import pymysql
+
+def test_insert_with_single_quotes():
+    db = pymysql.connect(host='127.0.0.1', port=20015, user='test', password='test', database='test')
+    cursor = db.cursor()
+    sql = "insert into opensource(uid, name) values(1, 'hello') where uid = 1"
+    cursor.execute(sql)
+    db.commit()
+    rowsaffected = cursor.rowcount
+    print("affected rows: %s" % (rowsaffected))
+    cursor.close()
+    db.close()
+    assert rowsaffected == 1
+
+'''
+def test_insert_with_double_quotes():
+    db = pymysql.connect(host='127.0.0.1', port=20015, user='test', password='test', database='test')
+    cursor = db.cursor()
+    sql = "insert into opensource(uid, name) values(1, \"hello\") where uid = 1"
+    cursor.execute(sql)
+    db.commit()
+    cursor.close()
+    db.close()
+'''    
+
+'''
+def test_insert_remove_where_cluster():
+    db = pymysql.connect(host='127.0.0.1', port=20015, user='test', password='test', database='test')
+    cursor = db.cursor()
+    sql = "insert into opensource(uid, name) values(1, \"hello\")"
+    cursor.execute(sql)
+    db.commit()
+    cursor.close()
+    db.close()
+'''
+
+'''
+def test_insert_remove_where_cluster_without_specify_key():
+    db = pymysql.connect(host='127.0.0.1', port=20015, user='test', password='test', database='test')
+    cursor = db.cursor()
+    sql = "insert into opensource values(1, \"Jack\", \"Shanghai\", 1, 18)"
+    cursor.execute(sql)
+    db.commit()
+    cursor.close()
+    db.close()
+'''
+
+def test_select():
+    db = pymysql.connect(host='127.0.0.1', port=20015, user='test', password='test', database='test')
+    cursor = db.cursor()
+    sql = "select uid, name from opensource where uid = 1"
+    cursor.execute(sql)
+    results = cursor.fetchall()
+    assert len(results) == 1
+    for row in results:
+        uid = row[0]
+        name = row[1]
+        print("uid=%s, name=%s" % (uid, name))
+        assert uid == 1
+        assert name == "hello"
+    db.close()
+
+def test_update():
+    db = pymysql.connect(host='127.0.0.1', port=20015, user='test', password='test', database='test')
+    cursor = db.cursor()
+    sql = "select uid, name from opensource where uid = 1"
+    cursor.execute(sql)
+    results = cursor.fetchall()
+    assert len(results) == 1
+    for row in results:
+        uid = row[0]
+        name = row[1]
+        print("uid=%s, name=%s" % (uid, name))
+        assert uid == 1
+        assert name == "hello"
+    cursor.close()
+
+    cursor = db.cursor()
+    sql = "update opensource set name = 'Lee' where uid = 1"
+    cursor.execute(sql)
+    db.commit()
+    rowsaffected = cursor.rowcount
+    print("affected rows: %s" % (rowsaffected))
+    assert rowsaffected == 1
+    cursor.close()
+
+    cursor = db.cursor()
+    sql = "select uid, name from opensource where uid = 1"
+    cursor.execute(sql)
+    results = cursor.fetchall()
+    assert len(results) == 1
+    for row in results:
+        uid = row[0]
+        name = row[1]
+        print("uid=%s, name=%s" % (uid, name))
+        assert uid == 1
+        assert name == "Lee"
+    cursor.close()
+
+    db.close()
+
+def test_delete():
+    print("----delete----")
+    db = pymysql.connect(host='127.0.0.1', port=20015, user='test', password='test', database='test')
+    cursor = db.cursor()
+    sql = "select uid, name from opensource where uid = 1"
+    cursor.execute(sql)
+    results = cursor.fetchall()
+    assert len(results) == 1
+    cursor.close()
+
+    cursor = db.cursor()
+    sql = "delete from opensource where uid = 1"
+    cursor.execute(sql)
+    db.commit()
+    rowsaffected = cursor.rowcount
+    print("affected rows: %s" % (rowsaffected))
+    assert rowsaffected == 1
+    cursor.close()
+
+    cursor = db.cursor()
+    sql = "select uid, name from opensource where uid = 1"
+    cursor.execute(sql)
+    results = cursor.fetchall()
+    assert len(results) == 0
+    cursor.close()
+
+    db.close()    
+
+'''
+def test_check_tablename():
+'''    

+ 0 - 16
tests/test_dtcd_cache_only_.py

@@ -1,16 +0,0 @@
-import pytest
-import pymysql
-
-def test_select():
-    db = pymysql.connect(host='127.0.0.1', port=20015, user='test', password='test', database='test')
-    cursor = db.cursor()
-    sql = "select uid, name from opensource where uid = 1"
-    cursor.execute(sql)
-    results = cursor.fetchall()
-    for row in results:
-        uid = row[0]
-        name = row[1]
-        print("uid=%s, name=%s" % (uid, name))
-        assert uid == 1
-        assert name == "hello"
-    db.close()

+ 49 - 0
tests/test_dtcd_datasource_s1.py

@@ -0,0 +1,49 @@
+import pytest
+import pymysql
+
+def test_demo():
+    assert 100 == 100
+
+def test_insert_and_result():
+    db = pymysql.connect(host='127.0.0.1', port=20015, user='test', password='test', database='test')
+
+    #insert to DTC
+    cursor = db.cursor()
+    sql = "insert into opensource(uid, name) values(1, 'hello') where uid = 1"
+    cursor.execute(sql)
+    db.commit()
+    rowsaffected = cursor.rowcount
+    print("affected rows: %s" % (rowsaffected))
+    assert rowsaffected == 1
+    cursor.close()
+
+    #select from DTC
+    cursor = db.cursor()
+    sql = "select uid, name from opensource where uid = 1"
+    cursor.execute(sql)
+    results = cursor.fetchall()
+    dtclen = len(results)
+    assert dtclen == 1
+    dtcuid = results[0][0]
+    dtcname = results[0][1]
+    assert dtcuid == 1
+    assert dtcname == "hello"
+    cursor.close()
+
+    db.close()
+
+    #select from datasource
+    db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='123456', database='layer2')
+    cursor = db.cursor()
+    sql = "select uid, name from opensource where uid = 1"
+    cursor.execute(sql)
+    results = cursor.fetchall()
+    dblen = len(results)
+    assert dblen == dtclen
+    dbuid = results[0][0]
+    dbname = results[0][1]
+    assert dtcuid == dbuid
+    assert dtcname == dbname
+    cursor.close()
+
+    db.close()

+ 49 - 0
tests/test_dtcd_datasource_s2.py

@@ -0,0 +1,49 @@
+import pytest
+import pymysql
+
+def test_demo():
+    assert 100 == 100
+
+def test_insert_and_result():
+    db = pymysql.connect(host='127.0.0.1', port=20015, user='test', password='test', database='test')
+
+    #insert to DTC
+    cursor = db.cursor()
+    sql = "insert into opensource(uid, name) values(1, 'hello') where uid = 1"
+    cursor.execute(sql)
+    db.commit()
+    rowsaffected = cursor.rowcount
+    print("affected rows: %s" % (rowsaffected))
+    assert rowsaffected == 1
+    cursor.close()
+
+    #select from DTC
+    cursor = db.cursor()
+    sql = "select uid, name from opensource where uid = 1"
+    cursor.execute(sql)
+    results = cursor.fetchall()
+    dtclen = len(results)
+    assert dtclen == 1
+    dtcuid = results[0][0]
+    dtcname = results[0][1]
+    assert dtcuid == 1
+    assert dtcname == "hello"
+    cursor.close()
+
+    db.close()
+
+    #select from datasource
+    db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='123456', database='layer2')
+    cursor = db.cursor()
+    sql = "select uid, name from opensource_1 where uid = 1"
+    cursor.execute(sql)
+    results = cursor.fetchall()
+    dblen = len(results)
+    assert dblen == dtclen
+    dbuid = results[0][0]
+    dbname = results[0][1]
+    assert dtcuid == dbuid
+    assert dtcname == dbname
+    cursor.close()
+
+    db.close()

+ 133 - 0
tests/test_dtcd_datasource_s3.py

@@ -0,0 +1,133 @@
+import pytest
+import pymysql
+
+def test_insert_with_single_quotes():
+    db = pymysql.connect(host='127.0.0.1', port=20015, user='test', password='test', database='test')
+    cursor = db.cursor()
+    sql = "insert into opensource(uid, name) values(1, 'hello') where uid = 1"
+    cursor.execute(sql)
+    db.commit()
+    rowsaffected = cursor.rowcount
+    print("affected rows: %s" % (rowsaffected))
+    cursor.close()
+    db.close()
+    assert rowsaffected == 1
+
+'''
+def test_insert_with_double_quotes():
+    db = pymysql.connect(host='127.0.0.1', port=20015, user='test', password='test', database='test')
+    cursor = db.cursor()
+    sql = "insert into opensource(uid, name) values(1, \"hello\") where uid = 1"
+    cursor.execute(sql)
+    db.commit()
+    cursor.close()
+    db.close()
+'''    
+
+'''
+def test_insert_remove_where_cluster():
+    db = pymysql.connect(host='127.0.0.1', port=20015, user='test', password='test', database='test')
+    cursor = db.cursor()
+    sql = "insert into opensource(uid, name) values(1, \"hello\")"
+    cursor.execute(sql)
+    db.commit()
+    cursor.close()
+    db.close()
+'''
+
+'''
+def test_insert_remove_where_cluster_without_specify_key():
+    db = pymysql.connect(host='127.0.0.1', port=20015, user='test', password='test', database='test')
+    cursor = db.cursor()
+    sql = "insert into opensource values(1, \"Jack\", \"Shanghai\", 1, 18)"
+    cursor.execute(sql)
+    db.commit()
+    cursor.close()
+    db.close()
+'''
+
+def test_select():
+    db = pymysql.connect(host='127.0.0.1', port=20015, user='test', password='test', database='test')
+    cursor = db.cursor()
+    sql = "select uid, name from opensource where uid = 1"
+    cursor.execute(sql)
+    results = cursor.fetchall()
+    assert len(results) == 1
+    for row in results:
+        uid = row[0]
+        name = row[1]
+        print("uid=%s, name=%s" % (uid, name))
+        assert uid == 1
+        assert name == "hello"
+    db.close()
+
+def test_update():
+    db = pymysql.connect(host='127.0.0.1', port=20015, user='test', password='test', database='test')
+    cursor = db.cursor()
+    sql = "select uid, name from opensource where uid = 1"
+    cursor.execute(sql)
+    results = cursor.fetchall()
+    assert len(results) == 1
+    for row in results:
+        uid = row[0]
+        name = row[1]
+        print("uid=%s, name=%s" % (uid, name))
+        assert uid == 1
+        assert name == "hello"
+    cursor.close()
+
+    cursor = db.cursor()
+    sql = "update opensource set name = 'Lee' where uid = 1"
+    cursor.execute(sql)
+    db.commit()
+    rowsaffected = cursor.rowcount
+    print("affected rows: %s" % (rowsaffected))
+    assert rowsaffected == 1
+    cursor.close()
+
+    cursor = db.cursor()
+    sql = "select uid, name from opensource where uid = 1"
+    cursor.execute(sql)
+    results = cursor.fetchall()
+    assert len(results) == 1
+    for row in results:
+        uid = row[0]
+        name = row[1]
+        print("uid=%s, name=%s" % (uid, name))
+        assert uid == 1
+        assert name == "Lee"
+    cursor.close()
+
+    db.close()
+
+def test_delete():
+    print("----delete----")
+    db = pymysql.connect(host='127.0.0.1', port=20015, user='test', password='test', database='test')
+    cursor = db.cursor()
+    sql = "select uid, name from opensource where uid = 1"
+    cursor.execute(sql)
+    results = cursor.fetchall()
+    assert len(results) == 1
+    cursor.close()
+
+    cursor = db.cursor()
+    sql = "delete from opensource where uid = 1"
+    cursor.execute(sql)
+    db.commit()
+    rowsaffected = cursor.rowcount
+    print("affected rows: %s" % (rowsaffected))
+    assert rowsaffected == 1
+    cursor.close()
+
+    cursor = db.cursor()
+    sql = "select uid, name from opensource where uid = 1"
+    cursor.execute(sql)
+    results = cursor.fetchall()
+    assert len(results) == 0
+    cursor.close()
+
+    db.close()    
+
+'''
+def test_check_tablename():
+'''