diff --git a/.github/workflows/build_packages.yaml b/.github/workflows/build_packages.yaml
index 95e18185b7..252f01cd31 100644
--- a/.github/workflows/build_packages.yaml
+++ b/.github/workflows/build_packages.yaml
@@ -51,7 +51,7 @@ jobs:
- name: prepare
run: |
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
- brew install curl zip unzip gnu-sed upx
+ brew install curl zip unzip gnu-sed upx pkg-config zmq
echo "/usr/local/bin:$PATH" >> ~/.bashrc
- name: build
run: |
diff --git a/.github/workflows/fvt_tests.yaml b/.github/workflows/fvt_tests.yaml
index 51e00c2290..83769a32e1 100644
--- a/.github/workflows/fvt_tests.yaml
+++ b/.github/workflows/fvt_tests.yaml
@@ -9,7 +9,7 @@ on:
- prereleased
jobs:
- fvt_tests:
+ fvt_tests_with_edgex:
runs-on: ubuntu-latest
steps:
@@ -21,11 +21,12 @@ jobs:
java-version: '8' # The JDK version to make available on the path.
java-package: jdk # (jre, jdk, or jdk+fx) - defaults to jdk
architecture: x64 # (x64 or x86) - defaults to x64
- - name: set up jmeter
+ - name: install jmeter
+ timeout-minutes: 10
env:
JMETER_VERSION: 5.2.1
run: |
- wget -O /tmp/apache-jmeter.tgz http://mirror.bit.edu.cn/apache//jmeter/binaries/apache-jmeter-$JMETER_VERSION.tgz
+ wget -O /tmp/apache-jmeter.tgz http://us.mirrors.quenda.co/apache//jmeter/binaries/apache-jmeter-$JMETER_VERSION.tgz
cd /tmp && tar -xvf apache-jmeter.tgz
echo "jmeter.save.saveservice.output_format=xml" >> /tmp/apache-jmeter-$JMETER_VERSION/user.properties
echo "jmeter.save.saveservice.response_data.on_error=true" >> /tmp/apache-jmeter-$JMETER_VERSION/user.properties
@@ -39,17 +40,27 @@ jobs:
sudo dpkg -i emqx.deb
- uses: actions/checkout@v2
- name: build kuiper
- run: make
- - name: run emqx and kuiper
- run: sudo ./fvt_scripts/setup_env.sh
+ run: |
+ sudo apt update && sudo apt install pkg-config libczmq-dev -y
+ make build_with_edgex
+ - name: run edgex && emqx && kuiper
+ run: |
+ sudo ./fvt_scripts/setup_env.sh
+ ln -s _build/kuiper-$(git describe --tags --always)-$(uname -s | tr "[A-Z]" "[a-z]")-x86_64/log kuiper_logs
- name: run fvt tests
timeout-minutes: 5
- run: ./fvt_scripts/run_jmeter.sh
+ run: ./fvt_scripts/run_jmeter.sh with_edgex=true
+ - uses: actions/upload-artifact@v1
+ if: always()
+ with:
+ name: kuiper_logs_with_edgex
+ path: ./kuiper_logs
- uses: actions/upload-artifact@v1
+ if: always()
with:
- name: jmeter.logs
+ name: jmeter_logs_with_edgex
path: ./jmeter_logs
- - name: checkout out
+ - name: check logs
run: |
sudo apt update && sudo apt install -y libxml2-utils
cd jmeter_logs
@@ -59,3 +70,184 @@ jobs:
exit 1
fi
+ fvt_tests_without_edgex:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/setup-go@v1
+ with:
+ go-version: '1.11.5'
+ - uses: actions/setup-java@v1
+ with:
+ java-version: '8' # The JDK version to make available on the path.
+ java-package: jdk # (jre, jdk, or jdk+fx) - defaults to jdk
+ architecture: x64 # (x64 or x86) - defaults to x64
+ - name: install jmeter
+ timeout-minutes: 10
+ env:
+ JMETER_VERSION: 5.2.1
+ run: |
+ wget -O /tmp/apache-jmeter.tgz http://us.mirrors.quenda.co/apache//jmeter/binaries/apache-jmeter-$JMETER_VERSION.tgz
+ cd /tmp && tar -xvf apache-jmeter.tgz
+ echo "jmeter.save.saveservice.output_format=xml" >> /tmp/apache-jmeter-$JMETER_VERSION/user.properties
+ echo "jmeter.save.saveservice.response_data.on_error=true" >> /tmp/apache-jmeter-$JMETER_VERSION/user.properties
+ wget -O /tmp/apache-jmeter-$JMETER_VERSION/lib/ext/mqtt-xmeter-1.13-jar-with-dependencies.jar https://github.com/emqx/mqtt-jmeter/raw/master/Download/v1.13.0/mqtt-xmeter-1.13-jar-with-dependencies.jar
+ ln -s /tmp/apache-jmeter-$JMETER_VERSION /opt/jmeter
+ - name: install emqx
+ env:
+ EMQX_VERSION: v4.0.2
+ run: |
+ wget -O emqx.deb https://www.emqx.io/downloads/broker/v4.0.2/emqx-ubuntu18.04-${EMQX_VERSION}_amd64.deb
+ sudo dpkg -i emqx.deb
+ - uses: actions/checkout@v2
+ - name: build kuiper
+ run: |
+ sudo apt update && sudo apt install pkg-config libczmq-dev -y
+ make
+ - name: run edgex && emqx && kuiper
+ run: |
+ sudo ./fvt_scripts/setup_env.sh
+ ln -s _build/kuiper-$(git describe --tags --always)-$(uname -s | tr "[A-Z]" "[a-z]")-x86_64/log kuiper_logs
+ - name: run fvt tests
+ timeout-minutes: 5
+ run: ./fvt_scripts/run_jmeter.sh with_edgex=false
+ - uses: actions/upload-artifact@v1
+ if: always()
+ with:
+ name: kuiper_logs_without_edgex
+ path: ./kuiper_logs
+ - uses: actions/upload-artifact@v1
+ if: always()
+ with:
+ name: jmeter_logs_without_edgex
+ path: ./jmeter_logs
+ - name: check logs
+ run: |
+ sudo apt update && sudo apt install -y libxml2-utils
+ cd jmeter_logs
+ if [ "$(xmllint --format --xpath '/testResults/sample/@rc' $(ls *.jtl) | sed -r 's/ /\n/g;' | sort -u | grep -E 'rc=\"[45][0-9][0-9]\"|rc=\"\"')" != "" ]; then
+ echo -e "---------------------------------------------\n"
+ echo "FVT tests error"
+ exit 1
+ fi
+
+ fvt_tests_for_container_in_helm:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/setup-java@v1
+ with:
+ java-version: '8' # The JDK version to make available on the path.
+ java-package: jdk # (jre, jdk, or jdk+fx) - defaults to jdk
+ architecture: x64 # (x64 or x86) - defaults to x64
+ - name: install jmeter
+ timeout-minutes: 10
+ env:
+ JMETER_VERSION: 5.2.1
+ run: |
+ wget -O /tmp/apache-jmeter.tgz http://us.mirrors.quenda.co/apache//jmeter/binaries/apache-jmeter-$JMETER_VERSION.tgz
+ cd /tmp && tar -xvf apache-jmeter.tgz
+ echo "jmeter.save.saveservice.output_format=xml" >> /tmp/apache-jmeter-$JMETER_VERSION/user.properties
+ echo "jmeter.save.saveservice.response_data.on_error=true" >> /tmp/apache-jmeter-$JMETER_VERSION/user.properties
+ wget -O /tmp/apache-jmeter-$JMETER_VERSION/lib/ext/mqtt-xmeter-1.13-jar-with-dependencies.jar https://github.com/emqx/mqtt-jmeter/raw/master/Download/v1.13.0/mqtt-xmeter-1.13-jar-with-dependencies.jar
+ ln -s /tmp/apache-jmeter-$JMETER_VERSION /opt/jmeter
+ - name: setup jmeter
+ timeout-minutes: 10
+ run: |
+ wget -O "/opt/jmeter/lib/json-lib-2.4-jdk15.jar" https://repo1.maven.org/maven2/net/sf/json-lib/json-lib/2.4/json-lib-2.4-jdk15.jar
+ wget -O "/opt/jmeter/lib/commons-beanutils-1.8.0.jar" https://repo1.maven.org/maven2/commons-beanutils/commons-beanutils/1.8.0/commons-beanutils-1.8.0.jar
+ wget -O "/opt/jmeter/lib/commons-collections-3.2.1.jar" https://repo1.maven.org/maven2/commons-collections/commons-collections/3.2.1/commons-collections-3.2.1.jar
+ wget -O "/opt/jmeter/lib/commons-lang-2.5.jar" https://repo1.maven.org/maven2/commons-lang/commons-lang/2.5/commons-lang-2.5.jar
+ wget -O "/opt/jmeter/lib/commons-logging-1.1.1.jar" https://repo1.maven.org/maven2/commons-logging/commons-logging/1.1.1/commons-logging-1.1.1.jar
+ wget -O "/opt/jmeter/lib/ezmorph-1.0.6.jar" https://repo1.maven.org/maven2/net/sf/ezmorph/ezmorph/1.0.6/ezmorph-1.0.6.jar
+ - name: install docker
+ run: |
+ sudo apt-get remove docker docker-engine docker.io containerd runc
+ sudo apt-get update
+ sudo apt-get install apt-transport-https ca-certificates curl gnupg-agent software-properties-common
+ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+ sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
+ sudo apt-get update
+ sudo apt-get install docker-ce docker-ce-cli containerd.io
+ - name: install k3s
+ env:
+ KUBECONFIG: "/etc/rancher/k3s/k3s.yaml"
+ run: |
+ sudo sh -c "echo \"127.0.0.1 $(hostname)\" >> /etc/hosts"
+ curl -sfL https://get.k3s.io | sh -
+ sudo chmod 644 /etc/rancher/k3s/k3s.yaml
+ kubectl cluster-info
+ - name: install helm
+ run: |
+ curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3
+ sudo chmod 700 get_helm.sh
+ sudo ./get_helm.sh
+ helm version
+ - name: build kuiper for docker
+ run: make docker
+ - name: run emqx on chart
+ env:
+ KUBECONFIG: "/etc/rancher/k3s/k3s.yaml"
+ timeout-minutes: 5
+ run: |
+ helm repo add emqx https://repos.emqx.io/charts
+ helm repo update
+ helm install emqx --set replicaCount=1 emqx/emqx
+ while [ "$(kubectl get StatefulSet -l app.kubernetes.io/name=emqx -o jsonpath='{.items[0].status.replicas}')" \
+ != "$(kubectl get StatefulSet -l app.kubernetes.io/name=emqx -o jsonpath='{.items[0].status.readyReplicas}')" ]; do
+ echo "waiting emqx started"
+ sleep 10
+ done
+ - name: run kuiper for chart
+ env:
+ KUBECONFIG: "/etc/rancher/k3s/k3s.yaml"
+ timeout-minutes: 5
+ run: |
+ version=$(git describe --tags --always)
+ emqx_address=$(kubectl get svc --namespace default emqx -o jsonpath="{.spec.clusterIP}")
+
+ sudo docker save emqx/kuiper:$version -o kuier.tar.gz
+ sudo k3s ctr image import kuier.tar.gz
+
+ sed -i -r "s/^appVersion: .*$/appVersion: ${version}/g" deploy/chart/kuiper/Chart.yaml
+ sed -i -r 's/ pullPolicy: .*$/ pullPolicy: Never/g' deploy/chart/kuiper/values.yaml
+ sed -i -r "s/ servers: \[.*\]$/ servers: \[tcp:\/\/${emqx_address}:1883\]/g" deploy/chart/kuiper/values.yaml
+
+ helm install kuiper deploy/chart/kuiper
+ while [ "$(kubectl get StatefulSet -l app.kubernetes.io/name=kuiper -o jsonpath='{.items[0].status.replicas}')" \
+ != "$(kubectl get StatefulSet -l app.kubernetes.io/name=kuiper -o jsonpath='{.items[0].status.readyReplicas}')" ]; do
+ echo "waiting kuiper started"
+ sleep 10
+ done
+ kuiper_address=$(kubectl get svc --namespace default kuiper -o jsonpath="{.spec.clusterIP}")
+ if [ $(curl -w %{http_code} -fsSL -o /dev/null $kuiper_address:9081/rules) != 200 ];then exit 1; fi
+ - name: check kuiper
+ env:
+ KUBECONFIG: "/etc/rancher/k3s/k3s.yaml"
+ timeout-minutes: 5
+ run: |
+ emqx_address=$(kubectl get svc --namespace default emqx -o jsonpath="{.spec.clusterIP}")
+ kuiper_address=$(kubectl get svc --namespace default kuiper -o jsonpath="{.spec.clusterIP}")
+ /opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t fvt_scripts/select_aggr_rule.jmx -Dsrv=${kuiper_address} -Dmqtt_srv=${emqx_address} -l jmeter_logs/select_aggr_rule.jtl -j jmeter_logs/select_aggr_rule.log
+ mkdir -p kuiper_logs
+ kubectl exec kuiper-0 -- cat /kuiper/log/stream.log > kuiper_logs/stream.log
+ - uses: actions/upload-artifact@v1
+ if: always()
+ with:
+ name: kuiper_logs_with_helm
+ path: ./kuiper_logs
+ - uses: actions/upload-artifact@v1
+ if: always()
+ with:
+ name: jmeter_logs_with_helm
+ path: ./jmeter_logs
+ - name: check logs
+ run: |
+ sudo apt update && sudo apt install -y libxml2-utils
+ cd jmeter_logs
+ if [ "$(xmllint --format --xpath '/testResults/sample/@rc' $(ls *.jtl) | sed -r 's/ /\n/g;' | sort -u | grep -E 'rc=\"[45][0-9][0-9]\"|rc=\"\"')" != "" ]; then
+ echo -e "---------------------------------------------\n"
+ echo "FVT tests error"
+ exit 1
+ fi
\ No newline at end of file
diff --git a/.github/workflows/run_test_case.yaml b/.github/workflows/run_test_case.yaml
index bd9594189a..62ce8ff264 100644
--- a/.github/workflows/run_test_case.yaml
+++ b/.github/workflows/run_test_case.yaml
@@ -27,4 +27,5 @@ jobs:
go build --buildmode=plugin -o plugins/functions/Echo.so plugins/functions/echo.go
go build --buildmode=plugin -o plugins/functions/CountPlusOne.so plugins/functions/countPlusOne.go
go test ./...
+ go test --tags=edgex ./...
\ No newline at end of file
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000000..97bf285c4e
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,5 @@
+FROM golang:1.13.4 AS builder
+
+COPY . /go/kuiper
+
+RUN apt update && apt install -y zip upx pkg-config libczmq-dev && make -C /go/kuiper pkg
\ No newline at end of file
diff --git a/Dockerfile-by-corss-build b/Dockerfile-by-corss-build
deleted file mode 100644
index 7080428989..0000000000
--- a/Dockerfile-by-corss-build
+++ /dev/null
@@ -1,5 +0,0 @@
-FROM golang:1.13.4 AS builder
-
-COPY . /go/kuiper
-
-RUN apt update && apt install -y zip upx && make -C /go/kuiper pkg
\ No newline at end of file
diff --git a/Makefile b/Makefile
index 1cac89ac46..474d524de5 100644
--- a/Makefile
+++ b/Makefile
@@ -1,9 +1,6 @@
BUILD_PATH ?= _build
PACKAGES_PATH ?= _packages
-GO111MODULE ?=
-GOPROXY ?= https://goproxy.io
-
CGO_ENABLED ?= 1
GOOS ?= ""
GOARCH ?= ""
@@ -25,7 +22,13 @@ endif
TARGET ?= emqx/kuiper
.PHONY: build
-build:
+build: build_without_edgex
+
+.PHONY:pkg
+pkg: pkg_without_edgex
+
+.PHONY: build_prepare
+build_prepare:
@mkdir -p $(BUILD_PATH)/$(PACKAGE_NAME)/bin
@mkdir -p $(BUILD_PATH)/$(PACKAGE_NAME)/etc
@mkdir -p $(BUILD_PATH)/$(PACKAGE_NAME)/etc/sources
@@ -39,19 +42,42 @@ build:
@cp -r etc/* $(BUILD_PATH)/$(PACKAGE_NAME)/etc
+.PHONY: build_without_edgex
+build_without_edgex: build_prepare
+ @if [ ! -z $(GOOS) ] && [ ! -z $(GOARCH) ] && [ $(CGO_ENABLED) == 0 ];then \
+ GO111MODULE=on GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 go build -ldflags="-s -w -X main.Version=$(VERSION)" -o cli xstream/cli/main.go; \
+ GO111MODULE=on GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 go build -ldflags="-s -w -X main.Version=$(VERSION)" -o server xstream/server/main.go; \
+ else \
+ GO111MODULE=on CGO_ENABLED=1 go build -ldflags="-s -w -X main.Version=$(VERSION)" -o cli xstream/cli/main.go; \
+ GO111MODULE=on CGO_ENABLED=1 go build -ldflags="-s -w -X main.Version=$(VERSION)" -o server xstream/server/main.go; \
+ fi
+ @if [ ! -z $$(which upx) ]; then upx ./cli; upx ./server; fi
+ @mv ./cli ./server $(BUILD_PATH)/$(PACKAGE_NAME)/bin
+ @echo "Build successfully"
+
+.PHONY: pkg_without_edgex
+pkg_without_edgex: build_without_edgex
+ @make real_pkg
+
+.PHONY: build_with_edgex
+build_with_edgex: build_prepare
@if [ ! -z $(GOOS) ] && [ ! -z $(GOARCH) ] && [ $(CGO_ENABLED) == 0 ];then \
- GO111MODULE=on GOPROXY=https://goproxy.io GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 go build -ldflags="-s -w -X main.Version=$(VERSION)" -o cli xstream/cli/main.go; \
- GO111MODULE=on GOPROXY=https://goproxy.io GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 go build -ldflags="-s -w -X main.Version=$(VERSION)" -o server xstream/server/main.go; \
+ GO111MODULE=on GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 go build -ldflags="-s -w -X main.Version=$(VERSION)" -tags edgex -o cli xstream/cli/main.go; \
+ GO111MODULE=on GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 go build -ldflags="-s -w -X main.Version=$(VERSION)" -tags edgex -o server xstream/server/main.go; \
else \
- GO111MODULE=on GOPROXY=https://goproxy.io CGO_ENABLED=1 go build -ldflags="-s -w -X main.Version=$(VERSION)" -o cli xstream/cli/main.go; \
- GO111MODULE=on GOPROXY=https://goproxy.io CGO_ENABLED=1 go build -ldflags="-s -w -X main.Version=$(VERSION)" -o server xstream/server/main.go; \
+ GO111MODULE=on CGO_ENABLED=1 go build -ldflags="-s -w -X main.Version=$(VERSION)" -tags edgex -o cli xstream/cli/main.go; \
+ GO111MODULE=on CGO_ENABLED=1 go build -ldflags="-s -w -X main.Version=$(VERSION)" -tags edgex -o server xstream/server/main.go; \
fi
@if [ ! -z $$(which upx) ]; then upx ./cli; upx ./server; fi
@mv ./cli ./server $(BUILD_PATH)/$(PACKAGE_NAME)/bin
@echo "Build successfully"
-.PHONY: pkg
-pkg: build
+.PHONY: pkg_whit_edgex
+pkg_whit_edgex: build_with_edgex
+ @make real_pkg
+
+.PHONY: real_pkg
+real_pkg:
@mkdir -p $(PACKAGES_PATH)
@cd $(BUILD_PATH) && zip -rq $(PACKAGE_NAME).zip $(PACKAGE_NAME)
@cd $(BUILD_PATH) && tar -czf $(PACKAGE_NAME).tar.gz $(PACKAGE_NAME)
@@ -68,7 +94,7 @@ cross_build: cross_prepare
--platform=linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/ppc64le \
-t cross_build \
--output type=tar,dest=cross_build.tar \
- -f ./Dockerfile-by-corss-build .
+ -f ./Dockerfile .
@mkdir -p $(PACKAGES_PATH)
@tar -xvf cross_build.tar --wildcards linux_amd64/go/kuiper/_packages/* \
diff --git a/README-CN.md b/README-CN.md
index 351e4f59de..2f6e0e1192 100644
--- a/README-CN.md
+++ b/README-CN.md
@@ -147,11 +147,21 @@ Kuiper 可以运行在各类物联网的边缘使用场景中,比如工业物
#### 编译
-+ 编译二进制:``$ make``
++ 编译二进制:
-+ 安装文件打包:`` $ make pkg``
+ - 编译二进制文件: `$ make`
-+ Docker 镜像:``$ make docker``
+ - 编译支持 EdgeX 的二进制文件: `$ make build_with_edgex`
+
++ 安装文件打包:
+
+ - 安装文件打包:: `$ make pkg`
+
+ - 支持 EdgeX 的安装文件打包: `$ make pkg_with_edgex`
+
++ Docker 镜像:`$ make docker`
+
+ > Docker 镜像默认支持 EdgeX
如果您要实现交叉编译,请参考[此文档](docs/zh_CN/cross-compile.md)。
diff --git a/README.md b/README.md
index 4e1c838618..3dde05f838 100644
--- a/README.md
+++ b/README.md
@@ -147,9 +147,21 @@ It can be run at various IoT edge use scenarios, such as real-time processing of
#### Compile
-- Binary: ``$ make``
-- Packages: `` $ make pkg``
-- Docker images: ``$ make docker``
++ Binary:
+
+ - Binary: `$ make`
+
+ - Binary files that support EdgeX: `$ make build_with_edgex`
+
++ Packages: `` $ make pkg``
+
+ - Packages: `$ make pkg`
+
+ - Packages files that support EdgeX: `$ make pkg_with_edgex`
+
++ Docker images: `$ make docker`
+
+ > Docker images support EdgeX by default
To using cross-compilation, refer to [this doc](docs/en_US/cross-compile.md).
diff --git a/deploy/chart/kuiper/Chart.yaml b/deploy/chart/kuiper/Chart.yaml
index af104ea4d8..4402bd75ba 100644
--- a/deploy/chart/kuiper/Chart.yaml
+++ b/deploy/chart/kuiper/Chart.yaml
@@ -14,8 +14,8 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
-version: 0.2.0
+version: 0.2.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
-appVersion: 0.2.0
+appVersion: 0.2.1
diff --git a/deploy/chart/kuiper/README.md b/deploy/chart/kuiper/README.md
index 914edf758f..5784235167 100644
--- a/deploy/chart/kuiper/README.md
+++ b/deploy/chart/kuiper/README.md
@@ -67,15 +67,7 @@ Kuiper can be deployed at k3s/k8s cluster through Helm chart. Below takes k3s as
| `nodeSelector` | Node selector | {} |
| `tolerations` | Tolerations | [] |
| `affinity` | Affinity | {} |
- | `mqtt.servers` | MQTT broker address | `[tcp://127.0.0.1:1883]` |
- | `mqtt.qos` | QoS of message subscription | 1 |
- | `mqtt.sharedSubscription` | Use shared subscription or not | true |
- | `mqtt.username` | MQTT connection user name | |
- | `mqtt.password` | MQTT connection password | |
- | `mqtt.certificationSecretName` | Secret resource name created for certification file. | |
- | `mqtt.privateKeySecretName` | Secret resource name created fro private key file | |
- | `mqtt.certificationPath` | Certification path for MQTT connection | |
- | `mqtt.privateKeyPath` | Private key path for MQTT connection | |
+ | `kuiperConfig` | Configuration file in the Kuiper `etc` directory | |
## Deploy Kuiper through Helm
@@ -169,10 +161,25 @@ Kuiper can be deployed at k3s/k8s cluster through Helm chart. Below takes k3s as
+ Open and edit `values.yaml` file
- + Set `mqtt.certificationSecretName` certification Secret resource: `mqtt.certificationSecretName: client-cert`
- + Set `mqtt.privateKeySecretName` private key Secret resource:`mqtt.privateKeySecretName: client-key`
- + Set certification file path: `mqtt.certificationPath: /var/kuiper/certificate.pem`
- + Set private key file path: `mqtt.privateKeyPath: /var/kuiper/private.pem.key`
+ ```shell
+ $ vim value.yaml
+ kuiperConfig:
+ ...
+ "mqtt_source.yaml":
+ #Global MQTT configurations
+ default:
+ qos: 1
+ sharedSubscription: true
+ servers: [tcp://127.0.0.1:1883]
+ concurrency: 1
+ #username: user1
+ #password: password
+ certificationSecretName: client-cert # Set certification Secret resource name
+ certificationPath: /var/kuiper/certificate.pem # Set certification file path
+ privateKeySecretName: client-key # Set private key Secret resource name
+ privateKeyPath: /var/kuiper/xyz-private.pem.key # Set private key file path
+ ...
+ ```
+ Deploy Kuiper through Helm
diff --git a/deploy/chart/kuiper/README_zh.md b/deploy/chart/kuiper/README_zh.md
index 79cecb7995..3a38ffe410 100644
--- a/deploy/chart/kuiper/README_zh.md
+++ b/deploy/chart/kuiper/README_zh.md
@@ -67,15 +67,7 @@ Kuiper 可以通过 Helm chart 部署在 k3s / k8s 集群上。下面以 k3s 为
| `nodeSelector` | 节点选择 | {} |
| `tolerations` | 污点容忍 | [] |
| `affinity` | 节点亲和性 | {} |
- | `mqtt.servers` | mqtt服务器的代理地址 | `[tcp://127.0.0.1:1883]` |
- | `mqtt.qos` | 消息转发的服务质量 | 1 |
- | `mqtt.sharedSubscription` | 是否使用共享订阅 | true |
- | `mqtt.username` | 连接用户名 | |
- | `mqtt.password` | 连接密码 | |
- | `mqtt.certificationSecretName` | 通过证书文件创建的 Secre 资源的名字 | |
- | `mqtt.privateKeySecretName` | 通过私钥文件创建的 Secre 资源的名字 | |
- | `mqtt.certificationPath` | 证书路径。必须是绝对路径。 | |
- | `mqtt.privateKeyPath` | 私钥路径。必须绝对路径。 | |
+ | `kuiperConfig` | Kuiper `etc` 目录下的配置文件 | |
## 通过 Helm 部署 Kuiper
@@ -169,10 +161,25 @@ Kuiper 可以通过 Helm chart 部署在 k3s / k8s 集群上。下面以 k3s 为
+ 编辑 `values.yaml` 文件
- + 设置 `mqtt.certificationSecretName` 为证书文件 Secret 资源: `mqtt.certificationSecretName: client-cert`
- + 设置 `mqtt.privateKeySecretName` 为私钥文件 Secret 资源:`mqtt.privateKeySecretName: client-key`
- + 设置证书文件部署路径:`mqtt.certificationPath: /var/kuiper/certificate.pem`
- + 设置私钥文件部署路径:`mqtt.privateKeyPath: /var/kuiper/private.pem.key`
+ ```shell
+ $ vim value.yaml
+ kuiperConfig:
+ ...
+ "mqtt_source.yaml":
+ #Global MQTT configurations
+ default:
+ qos: 1
+ sharedSubscription: true
+ servers: [tcp://127.0.0.1:1883]
+ concurrency: 1
+ #username: user1
+ #password: password
+ certificationSecretName: client-cert # 设置证书文件 Secret resource name
+ certificationPath: /var/kuiper/certificate.pem # 设置证书文件部署路径
+ privateKeySecretName: client-key # 设置私钥文件的 Secret resource name
+ privateKeyPath: /var/kuiper/xyz-private.pem.key # 设置私钥文件部署路径
+ ...
+ ```
+ 使用 Helm 部署 Kuiper
diff --git a/deploy/chart/kuiper/templates/StatefulSet.yaml b/deploy/chart/kuiper/templates/StatefulSet.yaml
index dbfe9f5d1a..0fea221c9d 100644
--- a/deploy/chart/kuiper/templates/StatefulSet.yaml
+++ b/deploy/chart/kuiper/templates/StatefulSet.yaml
@@ -36,8 +36,6 @@ spec:
labels:
{{- include "kuiper.selectorLabels" . | nindent 8 }}
spec:
- # securityContext:
- # fsGroup: 1000
volumes:
{{- if not .Values.persistence.enabled }}
- name: kuiper-data
@@ -49,40 +47,74 @@ spec:
claimName: {{ tpl . $ }}
{{- end }}
{{- end }}
- - name: mqtt
+ - name: kuiper-config
configMap:
name: {{ include "kuiper.fullname" . }}
items:
- - key: mqtt.yaml
- path: mqtt.yaml
- {{- if .Values.mqtt.certificationSecretName }}
+ - key: mqtt_source.yaml
+ path: mqtt_source.yaml
+ - key: kuiper.yaml
+ path: kuiper.yaml
+ - key: client.yaml
+ path: client.yaml
+ - key: edgex.yaml
+ path: edgex.yaml
+ - key: random.yaml
+ path: random.yaml
+ - key: zmq.yaml
+ path: zmq.yaml
+ {{ $certificationSecretName := index .Values "kuiperConfig" "mqtt_source.yaml" "default" "certificationSecretName" }}
+ {{- if $certificationSecretName }}
- name: kuiper-certification
secret:
- secretName: {{ .Values.mqtt.certificationSecretName }}
+ secretName: {{ $certificationSecretName }}
{{- end }}
- {{- if .Values.mqtt.privateKeySecretName }}
+ {{ $privateKeySecretName := index .Values "kuiperConfig" "mqtt_source.yaml" "default" "privateKeySecretName" }}
+ {{- if $privateKeySecretName }}
- name: kuiper-private-key
secret:
- secretName: {{ .Values.mqtt.privateKeySecretName }}
+ secretName: {{ $privateKeySecretName }}
{{- end }}
containers:
- name: kuiper
image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ {{ $restPort := index .Values "kuiperConfig" "kuiper.yaml" "basic" "restPort" }}
+ - containerPort: {{ $restPort | default 9081 }}
+ {{ $port := index .Values "kuiperConfig" "kuiper.yaml" "basic" "port" }}
+ - containerPort: {{ $port | default 20498 }}
volumeMounts:
- name: kuiper-data
mountPath: "/kuiper/data"
- - name: mqtt
- mountPath: "/kuiper/etc/sources/mqtt.yaml"
- subPath: "mqtt.yaml"
- {{ if .Values.mqtt.certificationSecretName }}
+ - name: kuiper-config
+ mountPath: "/kuiper/etc/mqtt_source.yaml"
+ subPath: "mqtt_source.yaml"
+ - name: kuiper-config
+ mountPath: "/kuiper/etc/kuiper.yaml"
+ subPath: "kuiper.yaml"
+ - name: kuiper-config
+ mountPath: "/kuiper/etc/client.yaml"
+ subPath: "client.yaml"
+ - name: kuiper-config
+ mountPath: "/kuiper/etc/sources/edgex.yaml"
+ subPath: "edgex.yaml"
+ - name: kuiper-config
+ mountPath: "/kuiper/etc/sources/random.yaml"
+ subPath: "random.yaml"
+ - name: kuiper-config
+ mountPath: "/kuiper/etc/sources/zmq.yaml"
+ subPath: "zmq.yaml"
+ {{ $certificationSecretName := index .Values "kuiperConfig" "mqtt_source.yaml" "default" "certificationSecretName" }}
+ {{- if $certificationSecretName }}
- name: kuiper-certification
- mountPath: {{ .Values.mqtt.certificationPath | default "/var/kuiper/certificate.pem" }}
+ mountPath: {{ index .Values "kuiperConfig" "mqtt_source.yaml" "default" "certificationPath" | default "/var/kuiper/certificate.pem" }}
readOnly: true
{{ end }}
- {{ if .Values.mqtt.privateKeySecretName }}
+ {{ $privateKeySecretName := index .Values "kuiperConfig" "mqtt_source.yaml" "default" "privateKeySecretName" }}
+ {{- if $privateKeySecretName }}
- name: kuiper-private-key
- mountPath: {{ .Values.mqtt.privateKeyPath | default "/var/kuiper/private.pem.key" }}
+ mountPath: {{ index .Values "kuiperConfig" "mqtt_source.yaml" "default" "privateKeyPath" | default "/var/kuiper/private.pem.key" }}
readOnly: true
{{ end }}
resources:
diff --git a/deploy/chart/kuiper/templates/configmap.yaml b/deploy/chart/kuiper/templates/configmap.yaml
index c41016cde2..3bb4c141aa 100644
--- a/deploy/chart/kuiper/templates/configmap.yaml
+++ b/deploy/chart/kuiper/templates/configmap.yaml
@@ -6,34 +6,21 @@ metadata:
labels:
{{- include "kuiper.labels" . | nindent 4 }}
data:
- "random.yaml": |
- default:
- interval: 1000
- pattern:
- count: 50
- ext:
- interval: 300
- seed: 1
- pattern:
- count: 50
- "zmq.yaml": |
- #Global Zmq configurations
- default:
- server: tcp://127.0.0.1:5563
- "mqtt.yaml": |
- #Global MQTT configurations
- default:
- {{- toYaml .Values.mqtt | nindent 8 }}
- #Override the global configurations
- demo_conf: #Conf_key
- qos: 0
- servers: [tcp://10.211.55.6:1883, tcp://127.0.0.1]
- "client.yaml": |
- basic:
- host: 127.0.0.1
- port: 20498
- "kuiper.yaml": |
- basic:
- # true|false, with debug level, it prints more debug info
- debug: false
- port: 20498
\ No newline at end of file
+ "client.yaml": |
+ {{ $client := index .Values "kuiperConfig" "client.yaml" }}
+ {{- toYaml $client | nindent 6 }}
+ "kuiper.yaml": |
+ {{ $kuiper := index .Values "kuiperConfig" "kuiper.yaml" }}
+ {{- toYaml $kuiper | nindent 6 }}
+ "mqtt_source.yaml": |
+ {{ $mqtt := index .Values "kuiperConfig" "mqtt_source.yaml" }}
+ {{- toYaml $mqtt | nindent 6 }}
+ "edgex.yaml": |
+ {{ $edgex := index .Values "kuiperConfig" "sources/edgex.yaml" }}
+ {{- toYaml $edgex | nindent 6 }}
+ "random.yaml": |
+ {{ $random := index .Values "kuiperConfig" "sources/random.yaml" }}
+ {{- toYaml $random | nindent 6 }}
+ "zmq.yaml": |
+ {{ $zmq := index .Values "kuiperConfig" "sources/zmq.yaml" }}
+ {{- toYaml $zmq | nindent 6 }}
\ No newline at end of file
diff --git a/deploy/chart/kuiper/templates/sevice.yaml b/deploy/chart/kuiper/templates/sevice.yaml
index 7362101b98..13a69031c5 100644
--- a/deploy/chart/kuiper/templates/sevice.yaml
+++ b/deploy/chart/kuiper/templates/sevice.yaml
@@ -1,5 +1,29 @@
apiVersion: v1
kind: Service
+metadata:
+ name: {{ include "kuiper.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "kuiper.labels" . | nindent 4 }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - name: rest-port
+ {{ $restPort := index .Values "kuiperConfig" "kuiper.yaml" "basic" "restPort" }}
+ port: {{ $restPort | default 9081 }}
+ protocol: TCP
+ targetPort: {{ $restPort | default 9081 }}
+ - name: port
+ {{ $port := index .Values "kuiperConfig" "kuiper.yaml" "basic" "port" }}
+ port: {{ $port | default 20498 }}
+ protocol: TCP
+ targetPort: {{ $port | default 20498 }}
+ selector:
+ {{- include "kuiper.selectorLabels" . | nindent 4 }}
+
+---
+apiVersion: v1
+kind: Service
metadata:
name: {{ include "kuiper.fullname" . }}-headless
namespace: {{ .Release.Namespace }}
diff --git a/deploy/chart/kuiper/values.yaml b/deploy/chart/kuiper/values.yaml
index 43b5e640bc..170960701f 100644
--- a/deploy/chart/kuiper/values.yaml
+++ b/deploy/chart/kuiper/values.yaml
@@ -8,6 +8,9 @@ image:
repository: emqx/kuiper
pullPolicy: IfNotPresent
+service:
+ type: ClusterIP
+
persistence:
enabled: false
size: 20Mi
@@ -38,14 +41,65 @@ tolerations: []
affinity: {}
-mqtt:
- #Global MQTT configurations
- qos: 1
- sharedSubscription: true
- servers: [tcp://127.0.0.1:1883]
- #username: user1
- #password: password
- certificationSecretName: client-cert
- privateKeySecretName: client-key
- #certificationPath: /var/kuiper/certificate.pem
- #privateKeyPath: /var/kuiper/private.pem.key
+kuiperConfig:
+ "client.yaml":
+ basic:
+ host: 127.0.0.1
+ port: 20498
+ "kuiper.yaml":
+ basic:
+ # true|false, with debug level, it prints more debug info
+ debug: false
+ port: 20498
+ restPort: 9081
+ prometheus: false
+ prometheusPort: 20499
+ "mqtt_source.yaml":
+ #Global MQTT configurations
+ default:
+ qos: 1
+ sharedSubscription: true
+ servers: [tcp://127.0.0.1:1883]
+ concurrency: 1
+ #username: user1
+ #password: password
+ #certificationSecretName: Secret resource name created for certification file
+ #certificationPath: /var/kuiper/xyz-certificate.pem
+ #privateKeySecretName: Secret resource name created for certification file
+ #privateKeyPath: /var/kuiper/xyz-private.pem.key
+ #Override the global configurations
+ demo_conf: #Conf_key
+ qos: 0
+ servers: [tcp://10.211.55.6:1883, tcp://127.0.0.1]
+ "sources/edgex.yaml":
+ #Global Edgex configurations
+ default:
+ protocol: tcp
+ server: localhost
+ port: 5570
+ topic: events
+ serviceServer: http://localhost:10080
+ # optional:
+ # ClientId: client1
+ # Username: user1
+ # Password: password
+ #Override the global configurations
+ application_conf: #Conf_key
+ protocol: tcp
+ server: localhost
+ port: 5571
+ topic: application
+ "sources/random.yaml":
+ default:
+ interval: 1000
+ pattern:
+ count: 50
+ ext:
+ interval: 300
+ seed: 1
+ pattern:
+ count: 50
+ "sources/zmq.yaml":
+ #Global Zmq configurations
+ default:
+ server: tcp://127.0.0.1:5563
diff --git a/deploy/docker/Dockerfile b/deploy/docker/Dockerfile
index 5294c4146c..19db4d75ee 100644
--- a/deploy/docker/Dockerfile
+++ b/deploy/docker/Dockerfile
@@ -4,17 +4,19 @@ COPY . /go/kuiper
WORKDIR /go/kuiper
-RUN apk add upx gcc make git libc-dev binutils-gold pkgconfig zeromq-dev && make
+RUN apk add upx gcc make git libc-dev binutils-gold pkgconfig zeromq-dev && make build_with_edgex
FROM alpine:3.10
COPY --from=builder /go/kuiper/_build/kuiper-* /kuiper/
COPY ./deploy/docker/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
-RUN apk add sed
+RUN apk add sed libzmq
WORKDIR /kuiper
+EXPOSE 9801 20498
+
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
CMD ["./bin/server"]
\ No newline at end of file
diff --git a/deploy/docker/README.md b/deploy/docker/README.md
index 1ed195f849..8ea0b914a4 100644
--- a/deploy/docker/README.md
+++ b/deploy/docker/README.md
@@ -144,19 +144,32 @@ docker run -d --name kuiper -e MQTT_BROKER_ADDRESS=$MQTT_BROKER_ADDRESS emqx/kui
### Configuration
-Use the environment variable to configure `etc/sources/mqtt.yaml` on the Kuiper container.
-
-| Options | Default | Mapped |
-| ---------------------------| ------------------ | ------------------------- |
-| MQTT_BROKER_ADDRESS | tcp://127.0.0.1:1883 | default.servers |
-| MQTT_BROKER_SHARED_SUBSCRIPTION | true | default.sharedSubscription |
-| MQTT_BROKER_QOS | 1 | default.qos |
-| MQTT_BROKER_USERNAME | | default.username |
-| MQTT_BROKER_PASSWORD | | default.password |
-| MQTT_BROKER_CER_PATH | | default.certificationPath |
-| MQTT_BROKER_KEY_PATH | | default.privateKeyPath |
-
-If you want to configure more options, you can mount the configuration file into Kuiper container.
+Use the environment variable to configure `etc/mqtt_sources.yaml` on the Kuiper container.
+
+| Options | Default | Mapped |
+| ------------------------------- | --------------------- | --------------------------- |
+| MQTT_BROKER_ADDRESS | tcp://127.0.0.1:1883 | default.servers |
+| MQTT_BROKER_SHARED_SUBSCRIPTION | true | default.sharedSubscription |
+| MQTT_BROKER_QOS | 1 | default.qos |
+| MQTT_BROKER_USERNAME | | default.username |
+| MQTT_BROKER_PASSWORD | | default.password |
+| MQTT_BROKER_CER_PATH | | default.certificationPath |
+| MQTT_BROKER_KEY_PATH | | default.privateKeyPath |
+
+Use the environment variable to configure `etc/sources/edgex.yaml` on the Kuiper container.
+
+| Options | Default | Mapped |
+| ---------------------------| -------------------------| ------------------------- |
+| EDGEX_PROTOCOL | tcp | default.protocol |
+| EDGEX_SERVER | localhost | default.server |
+| EDGEX_PORT | 5570 | default.port |
+| EDGEX_TOPIC | events | default.topic |
+| EDGEX_SERVICE_SERVER | http://localhost:10080 | default.serviceServer |
+
+If you want to configure more options, you can mount the configuration file into Kuiper container, like this:
+```
+$ docker run --name kuiper -v /path/to/mqtt_sources.yaml:/kuiper/etc/mqtt_sources.yaml -v /path/to/edgex.yaml:/kuiper/etc/sources/edgex.yaml emqx/kuiper:$tag
+```
# More
diff --git a/deploy/docker/docker-entrypoint.sh b/deploy/docker/docker-entrypoint.sh
index 71fa5131ee..1533e737f5 100755
--- a/deploy/docker/docker-entrypoint.sh
+++ b/deploy/docker/docker-entrypoint.sh
@@ -12,43 +12,75 @@ CONFIG="$KUIPER_HOME/etc/mqtt_source.yaml"
if [ ! -z "$MQTT_BROKER_ADDRESS" ]; then
sed -i '/default:/ ,/servers/{/servers/d}' $CONFIG
sed -i "/default:/a\ servers: [$MQTT_BROKER_ADDRESS]" $CONFIG
- echo "default.servers = $MQTT_BROKER_ADDRESS"
+ echo "mqtt.default.servers = $MQTT_BROKER_ADDRESS"
fi
if [ ! -z "$MQTT_BROKER_SHARED_SUBSCRIPTION" ]; then
sed -i '/default:/ ,/sharedSubscription/{/sharedSubscription/d}' $CONFIG
sed -i "/default:/a\ sharedSubscription: $MQTT_BROKER_SHARED_SUBSCRIPTION" $CONFIG
- echo "default.sharedSubscription = $MQTT_BROKER_SHARED_SUBSCRIPTION"
+ echo "mqtt.default.sharedSubscription = $MQTT_BROKER_SHARED_SUBSCRIPTION"
fi
if [ ! -z "$MQTT_BROKER_QOS" ]; then
sed -i '/default:/ ,/qos/{/qos/d}' $CONFIG
sed -i "/default:/a\ qos: $MQTT_BROKER_QOS" $CONFIG
- echo "default.qos = $MQTT_BROKER_QOS"
+ echo "mqtt.default.qos = $MQTT_BROKER_QOS"
fi
if [ ! -z "$MQTT_BROKER_USERNAME" ]; then
sed -i '/default:/ ,/username/{/username/d}' $CONFIG
sed -i "/default:/a\ username: $MQTT_BROKER_USERNAME" $CONFIG
- echo "default.username = $MQTT_BROKER_USERNAME"
+ echo "mqtt.default.username = $MQTT_BROKER_USERNAME"
fi
if [ ! -z "$MQTT_BROKER_PASSWORD" ]; then
sed -i '/default:/ ,/password/{/password/d}' $CONFIG
sed -i "/default:/a\ password: $MQTT_BROKER_PASSWORD" $CONFIG
- echo "default.password = $MQTT_BROKER_PASSWORD"
+ echo "mqtt.default.password = $MQTT_BROKER_PASSWORD"
fi
if [ ! -z "$MQTT_BROKER_CER_PATH" ]; then
sed -i '/default:/ ,/certificationPath/{/certificationPath/d}' $CONFIG
sed -i "/default:/a\ certificationPath: $MQTT_BROKER_CER_PATH" $CONFIG
- echo "default.certificationPath = $MQTT_BROKER_CER_PATH"
+ echo "mqtt.default.certificationPath = $MQTT_BROKER_CER_PATH"
fi
if [ ! -z "$MQTT_BROKER_KEY_PATH" ]; then
sed -i '/default:/ ,/privateKeyPath/{/privateKeyPath/d}' $CONFIG
sed -i "/default:/a\ privateKeyPath: $MQTT_BROKER_KEY_PATH" $CONFIG
- echo "default.privateKeyPath = $MQTT_BROKER_KEY_PATH"
+ echo "mqtt.default.privateKeyPath = $MQTT_BROKER_KEY_PATH"
+fi
+
+EDGEX_CONFIG="$KUIPER_HOME/etc/sources/edgex.yaml"
+
+if [ ! -z "$EDGEX_PROTOCOL" ]; then
+ sed -i '/default:/ ,/protocol/{/protocol/d}' $EDGEX_CONFIG
+ sed -i "/default:/a\ protocol: $EDGEX_PROTOCOL" $EDGEX_CONFIG
+ echo "edgex.default.protocol = $EDGEX_PROTOCOL"
+fi
+
+if [ ! -z "$EDGEX_SERVER" ]; then
+ sed -i '/default:/ ,/server/{/server/d}' $EDGEX_CONFIG
+ sed -i "/default:/a\ server: $EDGEX_SERVER" $EDGEX_CONFIG
+ echo "edgex.default.server = $EDGEX_SERVER"
+fi
+
+if [ ! -z "$EDGEX_PORT" ]; then
+ sed -i '/default:/ ,/port/{/port/d}' $EDGEX_CONFIG
+ sed -i "/default:/a\ port: $EDGEX_PORT" $EDGEX_CONFIG
+ echo "edgex.default.port = $EDGEX_PORT"
+fi
+
+if [ ! -z "$EDGEX_TOPIC" ]; then
+ sed -i '/default:/ ,/topic/{/topic/d}' $EDGEX_CONFIG
+ sed -i "/default:/a\ topic: $EDGEX_TOPIC" $EDGEX_CONFIG
+ echo "edgex.default.topic = $EDGEX_TOPIC"
+fi
+
+if [ ! -z "$EDGEX_SERVICE_SERVER" ]; then
+ sed -i '/default:/ ,/serviceServer/{/serviceServer/d}' $EDGEX_CONFIG
+ sed -i "/default:/a\ serviceServer: $EDGEX_SERVICE_SERVER" $EDGEX_CONFIG
+ echo "edgex.default.serviceServer = $EDGEX_SERVICE_SERVER"
fi
exec "$@"
diff --git a/docs/en_US/README.md b/docs/en_US/README.md
deleted file mode 100644
index 4edb4cf805..0000000000
--- a/docs/en_US/README.md
+++ /dev/null
@@ -1,54 +0,0 @@
-# A lightweight IoT edge analytic software
-
-## Highlight
-
-A SQL based lightweight IoT analytics/streaming software running at resource constrained edge devices.
-- Native run with small overhead ( ~7MB package), support Linux/Windows/Mac OS
-- SQL based, easy to use
-- Built-in support for MQTT source
-- Extension - user can customize the rule engine
-- RESTful APIs for rules management
-
-## Document
-English
-- [Getting started](./getting_started.md)
-- [Reference guide](./reference.md)
-
-中文
-- [入门教程](https://docs.emqx.io/kuiper/cn/getting_started.html)
-- [参考指南](https://docs.emqx.io/kuiper/cn/reference.html)
-
-## Build from source code
-
-#### Prepare
-
-+ Go version >= 1.11
-
-#### Build binary file
-
-+ Build binary file
-
- ```shell
- $ make
- ```
-
-+ Cross build binary file
-
- ```shell
- $ GOOS=linux GOARCH=arm make
- ```
-
-#### Get the compressed file
-
-+ Get the compressed files
-
- ```
- $ make pkg
- ```
-
-+ Get the cross-build compressed file
-
- ```
- $ GOOS=linux GOARCH=arm make pkg
- ```
-
diff --git a/docs/en_US/edgex/arch_dark.png b/docs/en_US/edgex/arch_dark.png
new file mode 100644
index 0000000000..03a93af021
Binary files /dev/null and b/docs/en_US/edgex/arch_dark.png differ
diff --git a/docs/en_US/edgex/arch_light.png b/docs/en_US/edgex/arch_light.png
new file mode 100644
index 0000000000..79cd03a447
Binary files /dev/null and b/docs/en_US/edgex/arch_light.png differ
diff --git a/docs/en_US/edgex/bus_data.png b/docs/en_US/edgex/bus_data.png
new file mode 100644
index 0000000000..45bb3a18c0
Binary files /dev/null and b/docs/en_US/edgex/bus_data.png differ
diff --git a/docs/en_US/edgex/create_stream.png b/docs/en_US/edgex/create_stream.png
new file mode 100644
index 0000000000..7e64be20ac
Binary files /dev/null and b/docs/en_US/edgex/create_stream.png differ
diff --git a/docs/en_US/edgex/edgex_meta.md b/docs/en_US/edgex/edgex_meta.md
new file mode 100644
index 0000000000..6181eea31f
--- /dev/null
+++ b/docs/en_US/edgex/edgex_meta.md
@@ -0,0 +1,87 @@
+# How to use meta function to extract addtional data from EdgeX message bus?
+
+When data are published into EdgeX message bus, besides the actual device value, it also have some additional values, such as event created time, modified time etc in the event. Sometimes these values are required for data analysis. This article describes how to use functions provided by Kuiper to achieve the goal.
+
+## Events data model received in EdgeX message bus
+
+The data structure received from EdgeX message bus is list as in below. An ``Event`` structure encapsulates related metadata (ID, Pushed, Device, Created, Modified, Origin), along with the actual data (in ``Readings`` field) collected from device service.
+
+Similar to ``Event``, ``Reading`` also has some metadata (ID, Pushed... etc).
+
+- Event
+ - ID
+ - Pushed
+ - Device
+ - Created
+ - Modified
+ - Origin
+ - Readings
+ - reading [0]
+ - Id
+ - Pushed
+ - Created
+ - Origin
+ - Modified
+ - Device
+ - Name
+ - Value
+ - reading [1]
+ - ... // The same as in reading[0]
+ - ...
+ - reading [n] ...
+
+## EdgeX data model in Kuiper
+
+So how the EdgeX data are managed in Kuiper? Let's take an example.
+
+As in below - firstly, user creates an EdgeX stream named ``events`` with yellow color.
+
+
+
+Secondly, one message is published to message bus as in below.
+
+- The device name is ``demo`` with green color
+- Reading name ``temperature`` & ``Humidity`` with red color.
+- It has some ``metadata`` that is not necessary to "visible", but it probably will be used during data analysis, such as ``Created`` field in ``Event`` structure. Kuiper saves these values into message tuple named metadata, and user can get these values during analysis.
+
+
+
+Thirdly, a SQL is provided for data analysis. Please notice that,
+
+- The ``events`` in FROM clause is yellow color, which is a stream name defined in the 1st step.
+- The SELECT fields ``temperature`` & ``humidity`` are red color, which are the ``Name`` field of readings.
+- The WHERE clause ``meta(device)`` in green color, which is ued for extracting ``device`` field from ``Events`` structure. The SQL statement will filter data that device names are not ``demo``.
+
+
+
+Below are some other samples that extract other metadata through ``meta`` function.
+
+1. ``meta(created)``: 000
+
+ Get 'created' metadata from Event structure
+
+2. ``meta(temperature -> created)``: 123
+
+ Get 'created' metadata from reading[0], key with 'temperature'
+
+3. ``meta(humidity -> created)``: 456
+
+ Get 'created' metadata from reading[1], key with 'humidity'
+
+Please notice that if you want to extract metadata from readings, you need to use ``reading-name -> key`` operator to access the value. In previous samples, ``temperature`` & ``humidity`` are ``reading-names``, and ``key`` is the field names in readings.
+
+However, if you want to get data from ``Events``, just need to specify the key directly. As the 1st sample in previous list.
+
+The ``meta`` function can also be used in ``SELECT`` clause, below is another example. Please notice that if multiple ``meta`` functions are used in ``SELECT`` clause, you should use ``AS`` to specify an alias name, otherwise, the data of previous fields will be overwritten.
+
+```sql
+SELECT temperature,humidity, meta(id) AS eid,meta(Created) AS ec, meta(temperature->pushed) AS tpush, meta(temperature->Created) AS tcreated, meta(temperature->Origin) AS torigin, meta(Humidity->Device) AS hdevice, meta(Humidity->Modified) AS hmodified FROM demo WHERE meta(device)="demo2"
+```
+
+## Summary
+
+``meta`` function can be used in Kuiper to access metadata values. Below lists all available keys for ``Events`` and ``Reading``.
+
+- Events: id, pushed, device, created, modified, origin, correlationid
+- Readning: id, created, modified, origin, pushed, device
+
diff --git a/docs/en_US/edgex/edgex_rule_engine_tutorial.md b/docs/en_US/edgex/edgex_rule_engine_tutorial.md
new file mode 100644
index 0000000000..3c48d7766c
--- /dev/null
+++ b/docs/en_US/edgex/edgex_rule_engine_tutorial.md
@@ -0,0 +1,257 @@
+# EdgeX rule engine tutorial
+
+## Overview
+
+In EdgeX Geneva, [EMQ X Kuiper - an SQL based rule engine](https://github.com/emqx/kuiper) is integrated with EdgeX. Before diving into this tutorial, let's spend a little time on learning basic knowledge of Kuiper. Kuiper is an edge lightweight IoT data analytics / streaming software implemented by Golang, and it can be run at all kinds of resource constrained edge devices. Kuiper rules are based on ``Source``, ``SQL`` and ``Sink``.
+
+- Source: The data source of streaming data, such as data from MQTT broker. In EdgeX scenario, the data source is EdgeX message bus, which could be ZeroMQ or MQTT broker.
+- SQL: SQL is where you specify the business logic of streaming data processing. Kuiper provides SQL-like statements to allow you to extract, filter & transform data.
+- Sink: Sink is ued for sending analysis result to a specified target. For example, send analysis result to another MQTT broker, or an HTTP rest address.
+
+![](../../resources/arch.png)
+
+Following three steps are required for using Kuiper.
+
+- Create a stream, where you specify the data source.
+- Write a rule.
+ - Write a SQL for data analysis
+ - Specify a sink target for saving analysis result
+- Deploy and run rule.
+
+The tutorial demonstrates how to use Kuiper to process the data from EdgeX message bus.
+
+## Kuiper EdgeX integration
+
+EdgeX uses [message bus](https://github.com/edgexfoundry/go-mod-messaging) to exchange information between different micro services. It contains the abstract message bus interface and an implementation for ZeroMQ & MQTT (NOTICE: **ONLY ZeroMQ** message bus is supported in Kuiper rule engine, MQTT will be supported in later versions). The integration work for Kuiper & EdgeX includes following 3 parts.
+
+- An EdgeX message bus source is extended to support consuming data from EdgeX message bus.
+
+- To analyze the data, Kuiper need to know data types that passed through it. Generally, user would be better to specify data schema for analysis data when a stream is created. Such as in below, a ``demo`` stream has a field named ``temperature`` field. It is very similar to create table schema in relational database system. After creating the stream definition, Kuiper can perform type checking during compilation or runtime, and invalid SQLs or data will be reported to user.
+
+ ```shell
+ CREATE STREAM demo (temperature bigint) WITH (FORMAT="JSON"...)
+ ```
+
+ However, since data type definitions are already specified in EdgeX ``Core contract Service`` , and to improve the using experience, user are NOT necessary to specify data types when creating stream. Kuiper source tries to load all of ``value descriptors`` from ``Core contract Service`` during initialization of a rule (so now if you have any updated value descriptors, you will have to **restart the rule**), then if with any data sending from message bus, it will be converted into [corresponding data types](../rules/sources/edgex.md).
+
+- An EdgeX message bus sink is extended to support send analysis result back to EdgeX Message Bus. User can also choose to send analysis result to RestAPI, Kuiper already supported it.
+
+![](arch_light.png)
+
+## Start to use
+
+### Pull Kuiper Docker and run
+
+It's recommended to use Docker, since related dependency libraries (such ZeroMQ lib) are already installed in Docker images.
+
+```shell
+docker pull emqx/kuiper:0.2.1
+```
+
+TODO: After offcially releasing of EdgeX Geneva, the Kuiper docker image will be pulled automatically by EdgeX docker composer files. The command will be updated by then.
+
+**Run Docker**
+
+```
+docker run -d --name kuiper emqx/kuiper:0.2.1
+```
+
+If the docker instance is failed to start, please use ``docker logs kuiper`` to see the log files.
+
+### Create a device service
+
+In this tutorial, we use a very simple mock-up device service. Please follow the steps in [this doc](https://fuji-docs.edgexfoundry.org/Ch-GettingStartedSDK-Go.html) to develop and run the random number service.
+
+### Create a stream
+
+There are two approaches to manage stream, you can use your preferred approach.
+
+#### Option 1: Use Rest API
+
+The next step is to create a stream that can consuming data from EdgeX message bus. Please change ``127.0.0.1`` to your local Kuiper docker IP address.
+
+```shell
+curl -X POST \
+ http://127.0.0.1:9081/streams \
+ -H 'Content-Type: application/json' \
+ -d '{
+ "sql": "create stream demo() WITH (FORMAT=\"JSON\", TYPE=\"edgex\")"
+}'
+```
+
+For other Rest APIs, please refer to [this doc](../restapi/overview.md).
+
+#### Option 2: Use Kuiper CLI
+
+Run following command to enter the running Kuiper docker instance.
+
+```shell
+docker exec -it kuiper /bin/sh
+```
+
+Use following command to create a stream named ``demo``.
+
+```shell
+bin/cli create stream demo'() WITH (FORMAT="JSON", TYPE="edgex")'
+```
+
+For other command line tools, please refer to [this doc](../cli/overview.md).
+
+------
+
+Now the stream is created. But you maybe curious about how Kuiper knows the message bus IP address & port, because such information are not specified in ``CREATE STREAM`` statement. Those configurations are managed in ``etc/sources/edgex.yaml`` , you can type ``cat etc/sources/edgex.yaml`` command to take a look at the contents of file. If you have different server, ports & service server configurations, please update it accordingly.
+
+```yaml
+#Global Edgex configurations
+default:
+ protocol: tcp
+ server: localhost
+ port: 5570
+ topic: events
+ serviceServer: http://localhost:10080
+.....
+```
+
+For more detailed information of configuration file, please refer to [this doc](../rules/sources/edgex.md).
+
+### Create a rule
+
+Let's create a rule that send result data to an MQTT broker, for detailed information of MQTT sink, please refer to [this link](../rules/sinks/mqtt.md). Similar to create a stream, you can also choose REST or CLI to manage rules.
+
+So the below rule will filter all of ``randomnumber`` that is less than 31. The sink result will be published to topic ``result`` of public MQTT broker ``broker.emqx.io``.
+
+#### Option 1: Use Rest API
+
+```shell
+curl -X POST \
+ http://127.0.0.1:9081/rules \
+ -H 'Content-Type: application/json' \
+ -d '{
+ "id": "rule1",
+ "sql": "SELECT * FROM demo WHERE randomnumber > 30",
+ "actions": [
+ {
+ "mqtt": {
+ "server": "tcp://broker.emqx.io:1883",
+ "topic": "result",
+ "clientId": "demo_001"
+ }
+ }
+ ]
+}'
+```
+
+#### Option 2: Use Kuiper CLI
+
+You can create a rule file with any text editor, and copy following contents into it. Let's say the file name is ``rule.txt``.
+
+```
+{
+ "sql": "SELECT * from demo where randomnumber > 30",
+ "actions": [
+ {
+ "mqtt": {
+ "server": "tcp://broker.emqx.io:1883",
+ "topic": "result",
+ "clientId": "demo_001"
+ }
+ }
+ ]
+}
+```
+
+In the running Kuiper instance, and execute following command.
+
+```shell
+# bin/cli create rule rule1 -f rule.txt
+Connecting to 127.0.0.1:20498...
+Creating a new rule from file rule.txt.
+Rule rule1 was created, please use 'cli getstatus rule $rule_name' command to get rule status.
+```
+
+------
+
+If you want to send analysis result to another sink, please refer to [other sinks](../rules/overview.md#actions) that supported in Kuiper.
+
+Now you can also take a look at the log file under ``log/stream.log``, see detailed info of rule.
+
+```
+time="2020-03-19T10:23:40+08:00" level=info msg="open source node 1 instances" rule=rule1
+time="2020-03-19T10:23:40+08:00" level=info msg="Connect to value descriptor service at: http://localhost:48080/api/v1/valuedescriptor \n"
+time="2020-03-19T10:23:40+08:00" level=info msg="Use configuration for edgex messagebus {{ 0 } {localhost 5563 tcp} zero map[]}\n"
+time="2020-03-19T10:23:40+08:00" level=info msg="Start source demo instance 0 successfully" rule=rule1
+time="2020-03-19T10:23:40+08:00" level=info msg="The connection to edgex messagebus is established successfully." rule=rule1
+time="2020-03-19T10:23:40+08:00" level=info msg="Successfully subscribed to edgex messagebus topic events." rule=rule1
+time="2020-03-19T10:23:40+08:00" level=info msg="The connection to server tcp://broker.emqx.io:1883 was established successfully" rule=rule1
+```
+
+### Monitor analysis result
+
+Since all of the analysis result are published to ``tcp://broker.emqx.io:1883``, so you can just use below ``mosquitto_sub`` command to monitor the result. You can also use other [MQTT client tools](https://www.emqx.io/blog/mqtt-client-tools).
+
+```shell
+# mosquitto_sub -h broker.emqx.io -t result
+[{"randomnumber":81}]
+[{"randomnumber":87}]
+[{"randomnumber":47}]
+[{"randomnumber":59}]
+[{"randomnumber":81}]
+...
+```
+
+You'll find that only those randomnumber larger than 30 will be published to ``result`` topic.
+
+You can also type below command to look at the rule execution status. The corresponding REST API is also available for getting rule status, please check [related docuement](../restapi/overview.md).
+
+```shell
+# bin/cli getstatus rule rule1
+Connecting to 127.0.0.1:20498...
+{
+ "source_demo_0_records_in_total": 29,
+ "source_demo_0_records_out_total": 29,
+ "source_demo_0_exceptions_total": 0,
+ "source_demo_0_process_latency_ms": 0,
+ "source_demo_0_buffer_length": 0,
+ "source_demo_0_last_invocation": "2020-03-19T10:30:09.294337",
+ "op_preprocessor_demo_0_records_in_total": 29,
+ "op_preprocessor_demo_0_records_out_total": 29,
+ "op_preprocessor_demo_0_exceptions_total": 0,
+ "op_preprocessor_demo_0_process_latency_ms": 0,
+ "op_preprocessor_demo_0_buffer_length": 0,
+ "op_preprocessor_demo_0_last_invocation": "2020-03-19T10:30:09.294355",
+ "op_filter_0_records_in_total": 29,
+ "op_filter_0_records_out_total": 21,
+ "op_filter_0_exceptions_total": 0,
+ "op_filter_0_process_latency_ms": 0,
+ "op_filter_0_buffer_length": 0,
+ "op_filter_0_last_invocation": "2020-03-19T10:30:09.294362",
+ "op_project_0_records_in_total": 21,
+ "op_project_0_records_out_total": 21,
+ "op_project_0_exceptions_total": 0,
+ "op_project_0_process_latency_ms": 0,
+ "op_project_0_buffer_length": 0,
+ "op_project_0_last_invocation": "2020-03-19T10:30:09.294382",
+ "sink_sink_mqtt_0_records_in_total": 21,
+ "sink_sink_mqtt_0_records_out_total": 21,
+ "sink_sink_mqtt_0_exceptions_total": 0,
+ "sink_sink_mqtt_0_process_latency_ms": 0,
+ "sink_sink_mqtt_0_buffer_length": 1,
+ "sink_sink_mqtt_0_last_invocation": "2020-03-19T10:30:09.294423"
+}
+```
+
+### Summary
+
+In this tutorial, we introduce a very simple use of EdgeX Kuiper rule engine. If having any issues regarding to use of Kuiper rule engine, you can open issues in EdgeX or Kuiper Github respository.
+
+#### Extended Reading
+
+- Read [EdgeX source](../rules/sources/edgex.md) for more detailed information of configurations and data type conversion.
+- [How to use meta function to extract additional data from EdgeX message bus?](edgex_meta.md) There are some other information are sent along with device service, such as event created time, event id etc. If you want to use such metadata information in your SQL statements, please refer to this doc.
+- [EdgeX message bus sink doc](../rules/sinks/edgex.md). The document describes how to use EdgeX message bus sink. If you'd like to send the analysis result into message bus, you are probably interested in this article.
+
+ If you want to explore more features of EMQ X Kuiper, please refer to below resources.
+
+- [Kuiper Github code repository](https://github.com/emqx/kuiper/)
+- [Kuiper reference guide](https://github.com/emqx/kuiper/blob/edgex/docs/en_US/reference.md)
+
diff --git a/docs/en_US/edgex/sql.png b/docs/en_US/edgex/sql.png
new file mode 100644
index 0000000000..696ac40d7d
Binary files /dev/null and b/docs/en_US/edgex/sql.png differ
diff --git a/docs/en_US/restapi/overview.md b/docs/en_US/restapi/overview.md
index a9b44a4ceb..51e82b30d2 100644
--- a/docs/en_US/restapi/overview.md
+++ b/docs/en_US/restapi/overview.md
@@ -1,6 +1,6 @@
Kuiper provides a set of REST API for streams and rules management in addition to CLI.
-By default, the REST API are running in port 8080. You can change the port in `/etc/kuiper.yaml` for the `restPort` property.
+By default, the REST API are running in port 9081. You can change the port in `/etc/kuiper.yaml` for the `restPort` property.
- [Streams](streams.md)
- [Rules](rules.md)
diff --git a/docs/en_US/rules/overview.md b/docs/en_US/rules/overview.md
index 5a475b48ab..4760ce5cc5 100644
--- a/docs/en_US/rules/overview.md
+++ b/docs/en_US/rules/overview.md
@@ -39,13 +39,22 @@ The identification of the rule. The rule name cannot be duplicated in the same K
The sql query to run for the rule.
-- Kuiper provides embeded support MQTT source, see [MQTT source stream](sources/mqtt.md) for more detailed info.
+- Kuiper provides embeded following 2 sources,
+ - MQTT source, see [MQTT source stream](sources/mqtt.md) for more detailed info.
+ - EdgeX source by default is shipped in [docker images](https://hub.docker.com/r/emqx/kuiper), but NOT included in single download binary files, you use ``make pkg_with_edgex`` command to build a binary package that supports EdgeX source. Please see [EdgeX source stream](sources/edgex.md) for more detailed info.
- See [SQL](../sqls/overview.md) for more info of Kuiper SQL.
- Sources can be customized, see [extension](../extension/overview.md) for more detailed info.
-### actions
+### sinks/actions
-Currently, 3 kinds of actions are supported: [log](sinks/logs.md), [mqtt](sinks/mqtt.md) and [rest](sinks/rest.md). Each action can define its own properties. There are 3 common properties:
+Currently, 3 kinds of sinks/actions are supported:
+
+- [log](sinks/logs.md): Send the result to log file.
+- [mqtt](sinks/mqtt.md): Send the result to an MQTT broker.
+- [edgex](sinks/edgex.md): Send the result to EdgeX message bus.
+- [rest](sinks/rest.md): Send the result to a Rest HTTP server.
+
+Each action can define its own properties. There are 3 common properties:
| property name | Type & Default Value | Description |
| ------------- | -------- | ------------------------------------------------------------ |
diff --git a/docs/en_US/rules/sinks/edgex.md b/docs/en_US/rules/sinks/edgex.md
new file mode 100644
index 0000000000..edffec3ff8
--- /dev/null
+++ b/docs/en_US/rules/sinks/edgex.md
@@ -0,0 +1,25 @@
+# EdgeX Message Bus action
+
+The action is used for publish output message into EdgeX message bus.
+
+| Property name | Optional | Description |
+| ------------- | -------- | ------------------------------------------------------------ |
+| protocol | true | If it's not specified, then use default value ``tcp``. |
+| host | true | The host of message bus. If not specified, then use default value ``*``. |
+| port | true | The port of message bus. If not specified, then use default value ``5570``. |
+| topic | false | The topic to be published. The property must be specified. |
+| contentType | true | The content type of message to be published. If not specified, then use the default value ``application/json``. |
+
+Below is sample configuration for publish result message to ``applicaton`` topic of EdgeX Message Bus.
+```json
+ {
+ "edgex": {
+ "protocol": "tcp",
+ "host": "*",
+ "port": 5571,
+ "topic": "application",
+ "contentType": "application/json"
+ }
+ }
+```
+
diff --git a/docs/en_US/rules/sources/edgex.md b/docs/en_US/rules/sources/edgex.md
new file mode 100644
index 0000000000..6df2ec2922
--- /dev/null
+++ b/docs/en_US/rules/sources/edgex.md
@@ -0,0 +1,112 @@
+# EdgeX Source
+
+Kuiper provides built-in support for EdgeX source stream, which can subscribe the message from [EdgeX message bus](https://github.com/edgexfoundry/go-mod-messaging) and feed into the Kuiper streaming process pipeline.
+
+## Stream definition for EdgeX
+
+EdgeX already defines data types in [value descriptors](https://github.com/edgexfoundry/go-mod-core-contracts), so it's recommeded to use schema-less stream definition in EdgeX source as in below.
+
+```shell
+# cd $kuiper_base
+# bin/cli CREATE STREAM demo'() with(format="json", datasource="demo" type="edgex")'
+```
+
+EdgeX source will try to get the data type of a field,
+
+- convert to releated data type if field of a type can be found in value descriptors service;
+- or keep original value if field of a type can not be found in value descriptors service;
+- or if failed to conver the value, then the value will be **dropped**, and a warning message print in the log;
+
+The types defined in EdgeX value descriptors will be converted into related [data types](../../sqls/streams.md) that supported in Kuiper.
+
+### Boolean
+
+If ``Type`` value of ``ValueDescriptor`` is ``B``, ``Bool`` or ``Boolean``, then Kuiper tries to convert to ``boolean`` type. Following values will be converted into ``true``.
+
+- "1", "t", "T", "true", "TRUE", "True"
+
+Following will be converted into ``false``.
+
+- "0", "f", "F", "false", "FALSE", "False"
+
+### Bigint
+
+If ``Type`` value of ``ValueDescriptor`` is ``I``, ``INT``, ``INT8`` , ``INT16``, ``INT32``, ``INT64``,``UINT`` , ``UINT8`` , ``UINT16`` , ``UINT32`` , ``UINT64`` then Kuiper tries to convert to ``Bigint`` type.
+
+### Float
+
+If ``Type`` value of ``ValueDescriptor`` is ``F``, ``FLOAT``, ``FLOAT16`` , ``FLOAT32``, ``FLOAT64``then Kuiper tries to convert to ``Float`` type.
+
+### String
+
+If ``Type`` value of ``ValueDescriptor`` is ``S``, ``String``, then Kuiper tries to convert to ``String`` type.
+
+### Struct
+
+If ``Type`` value of ``ValueDescriptor`` is ``J``, ``Json``, then Kuiper tries to convert to ``Struct`` type.
+
+# Global configurations
+
+The configuration file of EdgeX source is at ``$kuiper/etc/sources/edgex.yaml``. Below is the file format.
+
+```yaml
+#Global Edgex configurations
+default:
+ protocol: tcp
+ server: localhost
+ port: 5570
+ topic: events
+ serviceServer: http://localhost:10080
+# optional:
+# ClientId: client1
+# Username: user1
+# Password: password
+```
+
+
+
+Use can specify the global EdgeX settings here. The configuration items specified in ``default`` section will be taken as default settings for all EdgeX source.
+
+## protocol
+
+The protocol connect to EdgeX message bus, default value is ``tcp``.
+
+## server
+
+The server address of EdgeX message bus, default value is ``localhost``.
+
+## port
+
+The port of EdgeX message bus, default value is ``5570``.
+
+## topic
+
+The topic name of EdgeX message bus, default value is ``events``.
+
+## serviceServer
+
+The base service address for getting value descriptors, the value of ``serviceServer`` will be concatenated to ``/api/v1/valuedescriptor`` to get all of value descriptors of EdgeX server.
+
+## Override the default settings
+
+In some cases, maybe you want to consume message from multiple topics or event bus. Kuiper supports to specify another configuration, and use the ``CONF_KEY`` to specify the newly created key when you create a stream.
+
+```yaml
+#Override the global configurations
+demo1: #Conf_key
+ protocol: tcp
+ server: 10.211.55.6
+ port: 5570
+ topic: events
+```
+
+If you have a specific connection that need to overwrite the default settings, you can create a customized section. In the previous sample, we create a specific setting named with ``demo1``. Then you can specify the configuration with option ``CONF_KEY`` when creating the stream definition (see [stream specs](../../sqls/streams.md) for more info).
+
+**Sample**
+
+```
+create stream demo1() WITH (FORMAT="JSON", type="edgex", CONF_KEY="demo1");
+```
+
+The configuration keys used for these specific settings are the same as in ``default`` settings, any values specified in specific settings will overwrite the values in ``default`` section.
+
diff --git a/docs/en_US/sqls/built-in_functions.md b/docs/en_US/sqls/built-in_functions.md
index dfac8247ad..be080f06cc 100644
--- a/docs/en_US/sqls/built-in_functions.md
+++ b/docs/en_US/sqls/built-in_functions.md
@@ -93,3 +93,4 @@ Aggregate functions perform a calculation on a set of values and return a single
| newuuid | newuuid() | Returns a random 16-byte UUID. |
| timestamp | timestamp() | Returns the current timestamp in milliseconds from 00:00:00 Coordinated Universal Time (UTC), Thursday, 1 January 1970 |
| mqtt | mqtt(topic) | Returns the MQTT meta-data of specified key. The current supported keys
- topic: return the topic of message. If there are multiple stream source, then specify the source name in parameter. Such as ``mqtt(src1.topic)``
- messageid: return the message id of message. If there are multiple stream source, then specify the source name in parameter. Such as ``mqtt(src2.messageid)`` |
+| meta | meta(topic) | Returns the meta-data of specified key. The key could be:
- a standalone key if there is only one source in the from clause, such as ``meta(device)``
- A qualified key to specify the stream, such as ``meta(src1.device)``
- A key with arrow for multi level meta data, such as ``meta(src1.reading->device->name)`` This assumes reading is a map structure meta data.|
diff --git a/docs/en_US/sqls/data_types.md b/docs/en_US/sqls/data_types.md
index b3a85db99f..1546745a57 100644
--- a/docs/en_US/sqls/data_types.md
+++ b/docs/en_US/sqls/data_types.md
@@ -18,7 +18,25 @@ Below is the list of data types supported.
| 6 | array | The array type, can be any types from simple data or struct type (#1 - #5, and #7). |
| 7 | struct | The complex type. Set of name/value pairs. Values must be of supported data type. |
-
+## Compatibility of comparison and calculation
+
+There may be binary operations in each sql clause. In this example, `Select temperature * 2 from demo where temperature > 20`, a calculation operation is used in select clause and a comparison operation is used in the where clause. In the binary operations, if incompatible data types are used, a runtime error will happen and send to the sinks.
+
+Array and struct are not supported in any binary operations. The compatibility of other data types are listed in below table. Whereas, the row header is the left operand data type and the column header is the right operand data. The value is the compatibility in which Y stands for yes and N stands for no.
+
+ # | bigint | float | string | datetime | boolean |
+ ---- | ------ | ---- | ---- | ---- | --- |
+ bigint| Y | Y | N | N | N |
+ float | Y | Y | N | N | N |
+ string| N | N | Y | N | N |
+ datetime| Y | Y | Y if in the valid format | Y | N |
+ boolean| N | N | N | N | N |
+
+ The default format for datetime string is ``"2006-01-02T15:04:05.000Z07:00"``
+
+ For `nil` value, we follow the rules:
+ 1. Compare with nil always return false
+ 2. Calculate with nil always return nil
## Type conversions
diff --git a/docs/en_US/sqls/streams.md b/docs/en_US/sqls/streams.md
index b2402e8905..31011123be 100644
--- a/docs/en_US/sqls/streams.md
+++ b/docs/en_US/sqls/streams.md
@@ -83,5 +83,15 @@ array: zero length array
struct: null value
```
+### Schema-less stream
+If the data type of the stream is unknown or varying, we can define it without the fields. This is called schema-less. It is defined by leaving the fields empty.
+```sql
+schemaless_stream
+ ()
+WITH ( datasource = "topic/temperature", FORMAT = "json", KEY = "id");
+```
+
+Schema-less stream field data type will be determined at runtime. If the field is used in an incompatible clause, a runtime error will be thrown and send to the sink. For example, ``where temperature > 30``. Once a temperature is not a number, an error will be sent to the sink.
+
See [Query languange element](query_language_elements.md) for more inforamtion of SQL language.
diff --git a/docs/en_US/sqls/windows.md b/docs/en_US/sqls/windows.md
index e5364db2bf..016ad59d50 100644
--- a/docs/en_US/sqls/windows.md
+++ b/docs/en_US/sqls/windows.md
@@ -26,13 +26,6 @@ Tumbling window functions are used to segment a data stream into distinct time s
![Tumbling Window](resources/tumblingWindow.png)
-TODO:
-
-- TIMESTAMP BY is required?
-- Count function is not supported.21
-
-
-
```sql
SELECT count(*) FROM demo GROUP BY ID, TUMBLINGWINDOW(ss, 10);
```
@@ -43,12 +36,6 @@ Hopping window functions hop forward in time by a fixed period. It may be easy t
![Hopping Window](resources/hoppingWindow.png)
-TODO:
-
-- TIMESTAMP BY is required?
-- Count function is not supported.
-
-
```sql
SELECT count(*) FROM demo GROUP BY ID, HOPPINGWINDOW(ss, 10, 5);
@@ -62,10 +49,6 @@ Sliding window functions, unlike Tumbling or Hopping windows, produce an output
![Sliding Window](resources/slidingWindow.png)
-TODO:
-
-- TIMESTAMP BY is required?
-- Count function is not supported.
```sql
SELECT count(*) FROM demo GROUP BY ID, SLIDINGWINDOW(mm, 1);
@@ -79,12 +62,6 @@ Session window functions group events that arrive at similar times, filtering ou
![Session Window](resources/sessionWindow.png)
-TODO:
-
-- TIMESTAMP BY is required?
-- Count function is not supported.
-
-
```sql
SELECT count(*) FROM demo GROUP BY ID, SESSIONWINDOW(mm, 2, 1);
@@ -94,4 +71,21 @@ SELECT count(*) FROM demo GROUP BY ID, SESSIONWINDOW(mm, 2, 1);
A session window begins when the first event occurs. If another event occurs within the specified timeout from the last ingested event, then the window extends to include the new event. Otherwise if no events occur within the timeout, then the window is closed at the timeout.
-If events keep occurring within the specified timeout, the session window will keep extending until maximum duration is reached. The maximum duration checking intervals are set to be the same size as the specified max duration. For example, if the max duration is 10, then the checks on if the window exceed maximum duration will happen at t = 0, 10, 20, 30, etc.
\ No newline at end of file
+If events keep occurring within the specified timeout, the session window will keep extending until maximum duration is reached. The maximum duration checking intervals are set to be the same size as the specified max duration. For example, if the max duration is 10, then the checks on if the window exceed maximum duration will happen at t = 0, 10, 20, 30, etc.
+
+## Timestamp Management
+
+Every event has a timestamp associated with it. The timestamp will be used to calculate the window. By default, a timestamp will be added when an event feed into the source which is called `processing time`. We also support to specify a field as the timestamp, which is called `event time`. The timestamp field is specified in the stream definition. In the below definition, the field `ts` is specified as the timestamp field.
+
+``
+CREATE STREAM demo (
+ color STRING,
+ size BIGINT,
+ ts BIGINT
+ ) WITH (DATASOURCE="demo", FORMAT="json", KEY="ts", TIMESTAMP="ts"
+``
+
+In event time mode, the watermark algorithm is used to calculate a window.
+
+## Runtime error in window
+If the window receive an error (for example, the data type does not comply to the stream definition) from upstream, the error event will be forwarded immediately to the sink. The current window calculation will ignore the error event.
\ No newline at end of file
diff --git a/docs/zh_CN/README.md b/docs/zh_CN/README.md
deleted file mode 100644
index ef32fd901a..0000000000
--- a/docs/zh_CN/README.md
+++ /dev/null
@@ -1,54 +0,0 @@
-# A lightweight IoT edge analytic software
-
-## Highlight
-
-A SQL based lightweight IoT analytics/streaming software running at resource constrained edge devices.
-- Native run with small overhead ( ~7MB package), support Linux/Windows/Mac OS
-- SQL based, easy to use
-- Built-in support for MQTT source
-- Extension - user can customize the rule engine
-- RESTful APIs for rules management
-
-## Document
-English
-- [Getting started](https://docs.emqx.io/kuiper/en/getting_started.html)
-- [Reference guide](https://docs.emqx.io/kuiper/en/reference.html)
-
-中文
-- [入门教程](./getting_started.md)
-- [参考指南](./index.md)
-
-## Build from source code
-
-#### Prepare
-
-+ Go version >= 1.11
-
-#### Build binary file
-
-+ Build binary file
-
- ```shell
- $ make
- ```
-
-+ Cross build binary file
-
- ```shell
- $ GOOS=linux GOARCH=arm make
- ```
-
-#### Get the compressed file
-
-+ Get the compressed files
-
- ```
- $ make pkg
- ```
-
-+ Get the cross-build compressed file
-
- ```
- $ GOOS=linux GOARCH=arm make pkg
- ```
-
diff --git a/etc/sources/edgex.yaml b/etc/sources/edgex.yaml
new file mode 100644
index 0000000000..3563d41561
--- /dev/null
+++ b/etc/sources/edgex.yaml
@@ -0,0 +1,17 @@
+#Global Edgex configurations
+default:
+ protocol: tcp
+ server: localhost
+ port: 5570
+ topic: events
+ serviceServer: http://localhost:10080
+# optional:
+# ClientId: client1
+# Username: user1
+# Password: password
+#Override the global configurations
+application_conf: #Conf_key
+ protocol: tcp
+ server: localhost
+ port: 5571
+ topic: application
\ No newline at end of file
diff --git a/fvt_scripts/README.md b/fvt_scripts/README.md
index 85a28dcd05..0ea2f818df 100644
--- a/fvt_scripts/README.md
+++ b/fvt_scripts/README.md
@@ -96,7 +96,7 @@ For most of scripts, you can just start JMeter by default way, such as ``bin/jme
- The processing SQL is ``SELECT * FROM demo WHERE temperature > 30``, so all of the data that with temperature less than 30 will be fitered. The script read data from file ``iot_data.txt``, totally 10 records.
- Another JMeter mock-up user subscribes MQTT result topic, and expected result are saved in file ``select_condition_iot_data.txt``. If the record cotent is not correct then JMeter response assertion will be failed. If record number is not correct, the script will not be stopped, until CI (continuous integration) pipeline kills it with timeout settings. If you run the script in local, you'll have to stop the test manually.
-- [Aggregation rule]()
+- [Aggregation rule](select_aggr_rule.jmx)
The script automated steps described in [this blog](https://www.emqx.io/blog/lightweight-edge-computing-emqx-kuiper-and-aws-iot-hub-integration-solution), except for the sink target changes to local EMQ broker (not AWS IoT Hub).
@@ -133,4 +133,64 @@ For most of scripts, you can just start JMeter by default way, such as ``bin/jme
- Another JMeter mock-up user subscribes MQTT result topic, and assert the order for device_id field is descending, and temperature is ascending.
+- [EdgeX source with condition](select_edgex_condition_rule.jmx)
+
+ The test script is used for testing [Kuiper EdgeX source](../docs/en_US/rules/sources/edgex.md). To run the script,
+
+ - A mockup EdgeX value descriptor service should be compiled and run before test.
+
+ ```shell
+ # go build -o fvt_scripts/edgex/valuedesc/vdmocker fvt_scripts/edgex/valuedesc/vd_server.go
+
+ # fvt_scripts/edgex/valuedesc/vdmocker > vdmocker.out 2>&1 &
+ ```
+
+ - An EdgeX message bus publish tool should be compiled and run during running test.
+
+ ```shell
+ # go build -o fvt_scripts/edgex/pub fvt_scripts/edgex/pub.go
+ ```
+
+ - Run the JMeter with following command, and specify the ``fvt`` property in the JMeter command line, the ``fvt`` is where you develop Kuiper, script will search ``fvt_scripts/edgex/pub`` from the location.
+
+ ```shell
+ bin/jmeter.sh -Dfvt="/Users/rockyjin/Downloads/workspace/edge/src/kuiper"
+ ```
+
+ - The processing SQL is ``SELECT * FROM demo WHERE temperature > 30``, so all of the data that with temperature less than 30 will be fitered.
+
+ - Another JMeter mock-up user subscribes MQTT result topic, and assert message number and contents.
+
+- [Multiple EdgeX source configurations](fvt_scripts/select_edgex_another_bus_rule.jmx)
+
+ The test script is used for testing specifying another EdgeX source configurations in Kuiper.
+
+ - In the ``edgex.yaml`` configuration file, below additional configurations are specified.
+
+ ```yaml
+ application_conf: #Conf_key
+ protocol: tcp
+ server: localhost
+ port: 5571
+ topic: application
+ ```
+
+ - In the create stream statement, test script uses ``CONF_KEY`` keyword to use overrided configuration value that specified in ``edgex.yaml``.
+
+ ```sql
+ CREATE STREAM application () WITH (FORMAT="JSON", TYPE="edgex", CONF_KEY = "application_conf")
+ ```
+
+ - As same steps that required in the ``select_edgex_condition_rule.jmx``, EdgeX value descriptor service & message bus publish tool should be ready.
+
+- [EdgeX message bus sink](edgex_sink_rule.jmx)
+
+ The test script verifies EdgeX message bus sink. Only one message meet the condition of created rule, and it will be sent to EdgeX message bus sink.
+
+ As with the previous 2 testcases, besides to prepare ``vdmocker`` & ``pub`` application, another ``sub`` application should also be prepared.
+
+ ```shell
+ # go build -o fvt_scripts/edgex/sub/sub fvt_scripts/edgex/sub/sub.go
+ ```
+
\ No newline at end of file
diff --git a/fvt_scripts/edgex/pub.go b/fvt_scripts/edgex/pub.go
new file mode 100644
index 0000000000..e7e758b2b3
--- /dev/null
+++ b/fvt_scripts/edgex/pub.go
@@ -0,0 +1,166 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "github.com/edgexfoundry/go-mod-core-contracts/clients/coredata"
+ "github.com/edgexfoundry/go-mod-core-contracts/clients/urlclient/local"
+ "github.com/edgexfoundry/go-mod-core-contracts/models"
+ "github.com/edgexfoundry/go-mod-messaging/messaging"
+ "github.com/edgexfoundry/go-mod-messaging/pkg/types"
+ "log"
+ "os"
+ "time"
+)
+
+var msgConfig1 = types.MessageBusConfig{
+ PublishHost: types.HostInfo{
+ Host: "*",
+ Port: 5570,
+ Protocol: "tcp",
+ },
+ Type:messaging.ZeroMQ,
+}
+
+func pubEventClientZeroMq() {
+ if msgClient, err := messaging.NewMessageClient(msgConfig1); err != nil {
+ log.Fatal(err)
+ } else {
+ if ec := msgClient.Connect(); ec != nil {
+ log.Fatal(ec)
+ } else {
+ client := coredata.NewEventClient(local.New("test"))
+ //r := rand.New(rand.NewSource(time.Now().UnixNano()))
+ for i := 0; i < 10; i++ {
+ //temp := r.Intn(100)
+ //humd := r.Intn(100)
+
+ var testEvent = models.Event{Device: "demo", Created: 123, Modified: 123, Origin: 123}
+ var r1 = models.Reading{Pushed: 123, Created: 123, Origin: 123, Modified: 123, Device: "test device name", Name: "Temperature", Value: fmt.Sprintf("%d", i*8)}
+ var r2 = models.Reading{Pushed: 123, Created: 123, Origin: 123, Modified: 123, Device: "test device name", Name: "Humidity", Value: fmt.Sprintf("%d", i*9)}
+
+ var r3 = models.Reading{Name:"b1"}
+ if i % 2 == 0 {
+ r3.Value = "true"
+ } else {
+ r3.Value = "false"
+ }
+
+ r4 := models.Reading{Name:"i1", Value:fmt.Sprintf("%d", i)}
+ r5 := models.Reading{Name:"f1", Value:fmt.Sprintf("%.2f", float64(i)/2.0)}
+
+ testEvent.Readings = append(testEvent.Readings, r1, r2, r3, r4, r5)
+
+ data, err := client.MarshalEvent(testEvent)
+ if err != nil {
+ fmt.Errorf("unexpected error MarshalEvent %v", err)
+ } else {
+ fmt.Println(string(data))
+ }
+
+ env := types.NewMessageEnvelope([]byte(data), context.Background())
+ env.ContentType = "application/json"
+
+ if e := msgClient.Publish(env, "events"); e != nil {
+ log.Fatal(e)
+ } else {
+ fmt.Printf("Pub successful: %s\n", data)
+ }
+ time.Sleep(1500 * time.Millisecond)
+ }
+ }
+ }
+}
+
+func pubToAnother() {
+ var msgConfig2 = types.MessageBusConfig{
+ PublishHost: types.HostInfo{
+ Host: "*",
+ Port: 5571,
+ Protocol: "tcp",
+ },
+ Type:messaging.ZeroMQ,
+ }
+ if msgClient, err := messaging.NewMessageClient(msgConfig2); err != nil {
+ log.Fatal(err)
+ } else {
+ if ec := msgClient.Connect(); ec != nil {
+ log.Fatal(ec)
+ }
+ client := coredata.NewEventClient(local.New("test1"))
+ var testEvent = models.Event{Device: "demo1", Created: 123, Modified: 123, Origin: 123}
+ var r1 = models.Reading{Pushed: 123, Created: 123, Origin: 123, Modified: 123, Device: "test device name", Name: "Temperature", Value: "20"}
+ var r2 = models.Reading{Pushed: 123, Created: 123, Origin: 123, Modified: 123, Device: "test device name", Name: "Humidity", Value: "30"}
+
+ testEvent.Readings = append(testEvent.Readings, r1, r2)
+
+ data, err := client.MarshalEvent(testEvent)
+ if err != nil {
+ fmt.Errorf("unexpected error MarshalEvent %v", err)
+ } else {
+ fmt.Println(string(data))
+ }
+
+ env := types.NewMessageEnvelope([]byte(data), context.Background())
+ env.ContentType = "application/json"
+
+ if e := msgClient.Publish(env, "application"); e != nil {
+ log.Fatal(e)
+ } else {
+ fmt.Printf("pubToAnother successful: %s\n", data)
+ }
+ time.Sleep(1500 * time.Millisecond)
+ }
+}
+
+func pubMetaSource() {
+ if msgClient, err := messaging.NewMessageClient(msgConfig1); err != nil {
+ log.Fatal(err)
+ } else {
+ if ec := msgClient.Connect(); ec != nil {
+ log.Fatal(ec)
+ } else {
+ client := coredata.NewEventClient(local.New("test"))
+
+ evtDevice := []string{"demo1", "demo2"}
+ for i, device := range evtDevice {
+ j := int64(i) + 1
+ testEvent := models.Event{Device: device, Created: 11*j, Modified: 12*j, Origin: 13*j}
+ r1 := models.Reading{Pushed: 22*j, Created: 23*j, Origin: 24*j, Modified: 25*j, Device: "Temperature sensor", Name: "Temperature", Value: fmt.Sprintf("%d", j*8)}
+ r2 := models.Reading{Pushed: 32*j, Created: 33*j, Origin: 34*j, Modified: 35*j, Device: "Humidity sensor", Name: "Humidity", Value: fmt.Sprintf("%d", j*8)}
+
+ testEvent.Readings = append(testEvent.Readings, r1, r2)
+ data, err := client.MarshalEvent(testEvent)
+ if err != nil {
+ fmt.Errorf("unexpected error MarshalEvent %v", err)
+ } else {
+ fmt.Println(string(data))
+ }
+
+ env := types.NewMessageEnvelope([]byte(data), context.Background())
+ env.ContentType = "application/json"
+
+ if e := msgClient.Publish(env, "events"); e != nil {
+ log.Fatal(e)
+ } else {
+ fmt.Printf("Pub successful: %s\n", data)
+ }
+ time.Sleep(1500 * time.Millisecond)
+ }
+
+ }
+ }
+}
+
+func main() {
+ if len(os.Args) == 1 {
+ pubEventClientZeroMq()
+ } else if len(os.Args) == 2 {
+ if v := os.Args[1]; v == "another" {
+ pubToAnother()
+ } else if v == "meta" {
+ pubMetaSource()
+ }
+ }
+}
+
diff --git a/fvt_scripts/edgex/sub/sub.go b/fvt_scripts/edgex/sub/sub.go
new file mode 100644
index 0000000000..dc0c81e7bb
--- /dev/null
+++ b/fvt_scripts/edgex/sub/sub.go
@@ -0,0 +1,54 @@
+package main
+
+import (
+ "fmt"
+ "github.com/edgexfoundry/go-mod-messaging/messaging"
+ "github.com/edgexfoundry/go-mod-messaging/pkg/types"
+ "github.com/emqx/kuiper/common"
+)
+
+func main() {
+ var msgConfig1 = types.MessageBusConfig{
+ SubscribeHost: types.HostInfo{
+ Host: "localhost",
+ Port: 5571,
+ Protocol: "tcp",
+ },
+ Type:messaging.ZeroMQ,
+ }
+
+ if msgClient, err := messaging.NewMessageClient(msgConfig1); err != nil {
+ common.Log.Fatal(err)
+ } else {
+ if ec := msgClient.Connect(); ec != nil {
+ common.Log.Fatal(ec)
+ } else {
+ if err := msgClient.Connect(); err != nil {
+ common.Log.Fatal(err)
+ }
+ //log.Infof("The connection to edgex messagebus is established successfully.")
+ messages := make(chan types.MessageEnvelope)
+ topics := []types.TopicChannel{{Topic: "", Messages: messages}}
+ err := make(chan error)
+ if e := msgClient.Subscribe(topics, err); e != nil {
+ //log.Errorf("Failed to subscribe to edgex messagebus topic %s.\n", e)
+ common.Log.Fatal(e)
+ } else {
+ var count int = 0
+ for {
+ select {
+ case e1 := <-err:
+ common.Log.Errorf("%s\n", e1)
+ return
+ case env := <-messages:
+ count ++
+ fmt.Printf("%s\n", env.Payload)
+ if count == 1 {
+ return
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/fvt_scripts/edgex/valuedesc/vd_server.go b/fvt_scripts/edgex/valuedesc/vd_server.go
new file mode 100644
index 0000000000..43349f2be4
--- /dev/null
+++ b/fvt_scripts/edgex/valuedesc/vd_server.go
@@ -0,0 +1,72 @@
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "github.com/edgexfoundry/go-mod-core-contracts/clients"
+ "github.com/edgexfoundry/go-mod-core-contracts/models"
+ "log"
+ "net/http"
+)
+
+const (
+ desc1 = "Temperature descriptor1"
+ desc2 = "Humidity descriptor2"
+ desc3 = "Boolean descriptor"
+ desc4 = "Int descriptor"
+ desc5 = "Float descriptor"
+ desc6 = "String descriptor"
+)
+
+var vd1 = models.ValueDescriptor{Id: "Temperature", Created: 123, Modified: 123, Origin: 123, Name: "Temperature",
+ Description: "test description", Min: -70, Max: 140, DefaultValue: 32, Formatting: "%f", Type:"Float32",
+ Labels: []string{"temp", "room temp"}, UomLabel: "F", MediaType: clients.ContentTypeJSON, FloatEncoding: "eNotation"}
+
+var vd2 = models.ValueDescriptor{Id: "Humidity", Created: 123, Modified: 123, Origin: 123, Name: "Humidity",
+ Description: "test description", Min: -70, Max: 140, DefaultValue: 32, Formatting: "%d", Type:"Uint64",
+ Labels: []string{"humi", "room humidity"}, UomLabel: "F", MediaType: clients.ContentTypeJSON, FloatEncoding: "eNotation"}
+
+var vd3 = models.ValueDescriptor{Id: "b1", Name: "b1", Formatting: "%t", Type:"Bool", MediaType: clients.ContentTypeJSON}
+var vd4 = models.ValueDescriptor{Id: "i1", Name: "i1", Formatting: "%d", Type:"UINT8", MediaType: clients.ContentTypeJSON}
+var vd5 = models.ValueDescriptor{Id: "f1", Name: "f1", Formatting: "%f", Type:"FLOAT64", MediaType: clients.ContentTypeJSON}
+var vd6 = models.ValueDescriptor{Id: "s1", Name: "s1", Formatting: "%s", Type:"String", MediaType: clients.ContentTypeJSON}
+
+func main() {
+ http.HandleFunc(clients.ApiValueDescriptorRoute, Hello)
+ if e := http.ListenAndServe(":10080", nil); e != nil {
+ log.Fatal(e)
+ }
+}
+
+func Hello(w http.ResponseWriter, req *http.Request) {
+ descriptor1 := vd1
+ descriptor1.Description = desc1
+
+ descriptor2 := vd2
+ descriptor2.Description = desc2
+
+ descriptor3 := vd3
+ descriptor3.Description = desc3
+
+ descriptor4 := vd4
+ descriptor4.Description = desc4
+
+ descriptor5 := vd5
+ descriptor5.Description = desc5
+
+ descriptor6 := vd6
+ descriptor6.Description = desc6
+
+ descriptors := []models.ValueDescriptor{descriptor1, descriptor2, descriptor3, descriptor4, descriptor5, descriptor6}
+
+ data, err := json.Marshal(descriptors)
+ if err != nil {
+ fmt.Errorf("marshaling error: %s", err.Error())
+ }
+ if _, err := fmt.Fprintf(w, "%s", data); err != nil {
+ log.Fatal(err)
+ }
+ //_, _ = w.Write(data)
+}
+
+
diff --git a/fvt_scripts/edgex_sink_rule.jmx b/fvt_scripts/edgex_sink_rule.jmx
new file mode 100644
index 0000000000..d850057571
--- /dev/null
+++ b/fvt_scripts/edgex_sink_rule.jmx
@@ -0,0 +1,441 @@
+
+
+
+
+
+ false
+ true
+ false
+
+
+
+
+
+
+
+
+
+ srv
+ 127.0.0.1
+ =
+
+
+ rest_port
+ 9081
+ =
+
+
+ fvt
+ ${__property(fvt,,)}
+ =
+
+
+
+
+
+ continue
+
+ false
+ 1
+
+ 1
+ 1
+ false
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+ false
+ false
+
+
+
+ true
+
+
+
+ false
+ {
+"sql" : "create stream demo () WITH (FORMAT=\"JSON\", TYPE=\"edgex\")"
+}
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /streams
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+
+ Stream demo is created.
+
+
+ Assertion.response_data
+ true
+ 16
+
+
+
+
+ true
+
+
+
+ false
+ {
+ "id": "rule1",
+ "sql": "SELECT * FROM demo WHERE temperature = 72",
+ "actions": [
+ {
+ "edgex": {
+ "protocol": "tcp",
+ "host": "*",
+ "port": 5571,
+ "topic": "application",
+ "contentType": "application/json"
+ }
+ }
+ ]
+}
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /rules
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+
+ Rule rule1 was created
+
+
+ Assertion.response_data
+ true
+ 16
+
+
+
+
+ true
+
+
+
+ false
+
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /rules/rule1/status
+ GET
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ $.source_demo_0_records_in_total
+ 0
+ true
+ false
+ false
+ false
+
+
+
+
+ false
+ 0
+ fvt_scripts/edgex/pub
+
+
+
+
+
+
+ ${__property(fvt,,)}
+
+
+
+ true
+
+
+
+ false
+
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /rules/rule1/status
+ GET
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ $.source_demo_0_records_in_total
+ 10
+ true
+ false
+ false
+ false
+
+
+
+ $.sink_sink_mqtt_0_records_in_total
+ 6
+ true
+ false
+ false
+ false
+
+
+
+ 5000
+
+
+
+
+ true
+
+
+
+ false
+
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /rules/rule1
+ DELETE
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+
+ Rule rule1 is dropped.
+
+
+ Assertion.response_data
+ false
+ 16
+
+
+
+
+ true
+
+
+
+ false
+
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /streams/demo
+ DELETE
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+
+ Stream demo is dropped.
+
+
+ Assertion.response_data
+ false
+ 16
+
+
+
+
+ 500
+
+
+
+
+
+ continue
+
+ false
+ 1
+
+ 1
+ 1
+ false
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+ true
+ 0
+ fvt_scripts/edgex/sub/sub
+
+
+
+
+
+
+ ${__property(fvt,,)}
+
+
+
+ $.[0].temperature
+ 72
+ true
+ false
+ false
+ false
+
+
+
+ $.[0].humidity
+ 81
+ true
+ false
+ false
+ false
+
+
+
+
+
+
+
diff --git a/fvt_scripts/run_jmeter.sh b/fvt_scripts/run_jmeter.sh
index d00dd4d71a..606d644702 100755
--- a/fvt_scripts/run_jmeter.sh
+++ b/fvt_scripts/run_jmeter.sh
@@ -1,4 +1,24 @@
#!/bin/bash
+# This script accepts the following parameters:
+#
+# * with_edgex
+#
+# Example:
+#
+# ./fvt_scripts/run_jmeter.sh with_edgex=true
+#
+# or
+#
+# ./fvt_scripts/run_jmeter.sh with_edgex=false
+#
+
+set -e
+
+CONFIG=$@
+
+for line in $CONFIG; do
+ eval "$line"
+done
function downloadjar
{
@@ -23,26 +43,40 @@ fvt_dir=`pwd`
rm -rf jmeter_logs
-/opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t fvt_scripts/streams_test.jmx -Dbase="$base_dir" -l jmeter_logs/stream_test.jtl
+/opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t fvt_scripts/streams_test.jmx -Dbase="$base_dir" -l jmeter_logs/stream_test.jtl -j jmeter_logs/stream_test.log
echo -e "---------------------------------------------\n"
-/opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t fvt_scripts/rule_test.jmx -Dbase="$base_dir" -Dfvt="$fvt_dir" -l jmeter_logs/rule_test.jtl
+/opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t fvt_scripts/rule_test.jmx -Dbase="$base_dir" -Dfvt="$fvt_dir" -l jmeter_logs/rule_test.jtl -j jmeter_logs/rule_test.log
echo -e "---------------------------------------------\n"
-/opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t fvt_scripts/select_all_rule.jmx -l jmeter_logs/select_all_rule.jtl
+/opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t fvt_scripts/select_all_rule.jmx -l jmeter_logs/select_all_rule.jtl -j jmeter_logs/select_all_rule.log
echo -e "---------------------------------------------\n"
-/opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t fvt_scripts/select_condition_rule.jmx -l jmeter_logs/select_condition_rule.jtl
+/opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t fvt_scripts/select_condition_rule.jmx -l jmeter_logs/select_condition_rule.jtl -j jmeter_logs/select_condition_rule.log
echo -e "---------------------------------------------\n"
-/opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t fvt_scripts/select_aggr_rule.jmx -l jmeter_logs/select_aggr_rule.jtl
+/opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t fvt_scripts/select_aggr_rule.jmx -l jmeter_logs/select_aggr_rule.jtl -j jmeter_logs/select_aggr_rule.log
echo -e "---------------------------------------------\n"
-/opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t fvt_scripts/change_rule_status.jmx -l jmeter_logs/change_rule_status.jtl
+/opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t fvt_scripts/change_rule_status.jmx -l jmeter_logs/change_rule_status.jtl -j jmeter_logs/change_rule_status.log
echo -e "---------------------------------------------\n"
-/opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t fvt_scripts/change_stream_rule.jmx -l jmeter_logs/change_stream_rule.jtl
+/opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t fvt_scripts/change_stream_rule.jmx -l jmeter_logs/change_stream_rule.jtl -j jmeter_logs/change_stream_rule.log
echo -e "---------------------------------------------\n"
-/opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t fvt_scripts/select_aggr_rule_order.jmx -l jmeter_logs/select_aggr_rule_order.jtl
-echo -e "---------------------------------------------\n"
\ No newline at end of file
+/opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t fvt_scripts/select_aggr_rule_order.jmx -l jmeter_logs/select_aggr_rule_order.jtl -j jmeter_logs/select_aggr_rule_order.log
+echo -e "---------------------------------------------\n"
+
+if test $with_edgex = true; then
+ /opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t fvt_scripts/select_edgex_condition_rule.jmx -Dbase="$base_dir" -Dfvt="$fvt_dir" -l jmeter_logs/select_edgex_condition_rule.jtl -j jmeter_logs/select_edgex_condition_rule.log
+ echo -e "---------------------------------------------\n"
+
+ /opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t fvt_scripts/select_edgex_another_bus_rule.jmx -Dfvt="$fvt_dir" -l jmeter_logs/select_edgex_another_bus_rule.jtl -j jmeter_logs/select_edgex_another_bus_rule.log
+ echo -e "---------------------------------------------\n"
+
+ /opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t fvt_scripts/edgex_sink_rule.jmx -Dfvt="$fvt_dir" -l jmeter_logs/edgex_sink_rule.jtl -j jmeter_logs/edgex_sink_rule.log
+ echo -e "---------------------------------------------\n"
+
+ /opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t fvt_scripts/select_edgex_meta_rule.jmx -Dfvt="$fvt_dir" -l jmeter_logs/select_edgex_meta_rule.jtl -j jmeter_logs/select_edgex_meta_rule.log
+ echo -e "---------------------------------------------\n"
+fi
\ No newline at end of file
diff --git a/fvt_scripts/select_aggr_rule.jmx b/fvt_scripts/select_aggr_rule.jmx
index 2ebac5ec70..ae667aadd9 100644
--- a/fvt_scripts/select_aggr_rule.jmx
+++ b/fvt_scripts/select_aggr_rule.jmx
@@ -16,7 +16,7 @@
srv
- 127.0.0.1
+ ${__property(srv,,127.0.0.1)}
=
@@ -26,7 +26,7 @@
mqtt_srv
- 127.0.0.1
+ ${__property(mqtt_srv,,127.0.0.1)}
=
diff --git a/fvt_scripts/select_edgex_another_bus_rule.jmx b/fvt_scripts/select_edgex_another_bus_rule.jmx
new file mode 100644
index 0000000000..c4de777ce0
--- /dev/null
+++ b/fvt_scripts/select_edgex_another_bus_rule.jmx
@@ -0,0 +1,494 @@
+
+
+
+
+
+ false
+ true
+ false
+
+
+
+
+
+
+
+
+
+ srv
+ 127.0.0.1
+ =
+
+
+ rest_port
+ 9081
+ =
+
+
+ fvt
+ ${__property(fvt,,)}
+ =
+
+
+ mqtt_srv
+ 127.0.0.1
+ =
+
+
+
+
+
+ continue
+
+ false
+ 1
+
+ 1
+ 1
+ false
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+ false
+ false
+
+
+
+ true
+
+
+
+ false
+ {
+"sql" : "create stream application () WITH (FORMAT=\"JSON\", TYPE=\"edgex\", CONF_KEY=\"application_conf\")"
+}
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /streams
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+
+ Stream application is created.
+
+
+ Assertion.response_data
+ true
+ 16
+
+
+
+
+ true
+
+
+
+ false
+ {
+ "id": "rule1",
+ "sql": "SELECT * FROM application",
+ "actions": [
+ {
+ "mqtt": {
+ "server": "tcp://${mqtt_srv}:1883",
+ "topic": "devices/result",
+ "qos": 1,
+ "clientId": "demo_001"
+ }
+ }
+ ]
+}
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /rules
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+
+ Rule rule1 was created
+
+
+ Assertion.response_data
+ true
+ 16
+
+
+
+
+ true
+
+
+
+ false
+
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /rules/rule1/status
+ GET
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ $.source_application_0_records_in_total
+ 0
+ true
+ false
+ false
+ false
+
+
+
+
+ false
+ 0
+ fvt_scripts/edgex/pub
+
+
+
+
+ another
+ =
+
+
+
+
+
+
+ ${__property(fvt,,)}
+
+
+
+
+ pubToAnother successful
+
+
+ Assertion.response_data
+ false
+ 16
+
+
+
+ 1000
+
+
+
+ false
+
+
+ System.out.println(prev.getResponseDataAsString());
+
+
+
+
+ true
+
+
+
+ false
+
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /rules/rule1/status
+ GET
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ $.source_application_0_records_in_total
+ 1
+ true
+ false
+ false
+ false
+
+
+
+ $.sink_sink_mqtt_0_records_in_total
+ 6
+ true
+ false
+ false
+ false
+
+
+
+ 3000
+
+
+
+
+ true
+
+
+
+ false
+
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /rules/rule1
+ DELETE
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+
+ Rule rule1 is dropped.
+
+
+ Assertion.response_data
+ false
+ 16
+
+
+
+
+ true
+
+
+
+ false
+
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /streams/application
+ DELETE
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+
+ Stream application is dropped.
+
+
+ Assertion.response_data
+ false
+ 16
+
+
+
+
+ 1000
+
+
+
+
+
+ continue
+
+ false
+ 1
+
+ 1
+ 1
+ false
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+ ${mqtt_srv}
+ 1883
+ 3.1
+ 10
+ false
+ 10
+ TCP
+ false
+
+
+
+
+
+
+ conn_
+ true
+ 300
+ 0
+ 0
+
+
+
+
+ devices/result
+ 0
+ false
+ true
+ number of received messages
+ 1
+
+
+
+ $[0].temperature
+
+ false
+ false
+ false
+ false
+
+
+
+ $[0].humidity
+
+ false
+ false
+ false
+ false
+
+
+
+
+
+
+
diff --git a/fvt_scripts/select_edgex_condition_rule.jmx b/fvt_scripts/select_edgex_condition_rule.jmx
new file mode 100644
index 0000000000..1bb6d2f342
--- /dev/null
+++ b/fvt_scripts/select_edgex_condition_rule.jmx
@@ -0,0 +1,483 @@
+
+
+
+
+
+ false
+ true
+ false
+
+
+
+
+
+
+
+
+
+ srv
+ 127.0.0.1
+ =
+
+
+ rest_port
+ 9081
+ =
+
+
+ fvt
+ ${__property(fvt,,)}
+ =
+
+
+ mqtt_srv
+ 127.0.0.1
+ =
+
+
+
+
+
+ continue
+
+ false
+ 1
+
+ 1
+ 1
+ false
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+ false
+ false
+
+
+
+ true
+
+
+
+ false
+ {
+"sql" : "create stream demo () WITH (FORMAT=\"JSON\", TYPE=\"edgex\")"
+}
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /streams
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+
+ Stream demo is created.
+
+
+ Assertion.response_data
+ true
+ 16
+
+
+
+
+ true
+
+
+
+ false
+ {
+ "id": "rule1",
+ "sql": "SELECT * FROM demo WHERE temperature > 30",
+ "actions": [
+ {
+ "mqtt": {
+ "server": "tcp://${mqtt_srv}:1883",
+ "topic": "devices/result",
+ "qos": 1,
+ "clientId": "demo_001"
+ }
+ }
+ ]
+}
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /rules
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+
+ Rule rule1 was created
+
+
+ Assertion.response_data
+ true
+ 16
+
+
+
+
+ true
+
+
+
+ false
+
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /rules/rule1/status
+ GET
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ $.source_demo_0_records_in_total
+ 0
+ true
+ false
+ false
+ false
+
+
+
+
+ false
+ 0
+ fvt_scripts/edgex/pub
+
+
+
+
+
+
+ ${__property(fvt,,)}
+
+
+
+ true
+
+
+
+ false
+
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /rules/rule1/status
+ GET
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ $.source_demo_0_records_in_total
+ 10
+ true
+ false
+ false
+ false
+
+
+
+ $.sink_sink_mqtt_0_records_in_total
+ 6
+ true
+ false
+ false
+ false
+
+
+
+ 5000
+
+
+
+
+ true
+
+
+
+ false
+
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /rules/rule1
+ DELETE
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+
+ Rule rule1 is dropped.
+
+
+ Assertion.response_data
+ false
+ 16
+
+
+
+
+ true
+
+
+
+ false
+
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /streams/demo
+ DELETE
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+
+ Stream demo is dropped.
+
+
+ Assertion.response_data
+ false
+ 16
+
+
+
+
+ 500
+
+
+
+
+
+ continue
+
+ false
+ 6
+
+ 1
+ 1
+ false
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+ ${mqtt_srv}
+ 1883
+ 3.1
+ 10
+ false
+ 10
+ TCP
+ false
+
+
+
+
+
+
+ conn_
+ true
+ 300
+ 0
+ 0
+
+
+
+
+ devices/result
+ 0
+ false
+ true
+ number of received messages
+ 1
+
+
+
+ $[0].temperature
+
+ false
+ false
+ false
+ false
+
+
+
+ $[0].humidity
+
+ false
+ false
+ false
+ false
+
+
+
+ import net.sf.json.JSONArray;
+import net.sf.json.JSONObject;
+
+String response = SampleResult.getResponseDataAsString();
+JSONArray arr = JSONArray.fromObject(response);
+JSONObject json = arr.getJSONObject(0);
+int temp = json.getInt("temperature");
+if(temp <= 30) {
+ Failure = true;
+ FailureMessage = "The temperature result should not be less than 30!";
+}
+
+
+ false
+
+
+
+
+
+
+
diff --git a/fvt_scripts/select_edgex_meta_rule.jmx b/fvt_scripts/select_edgex_meta_rule.jmx
new file mode 100644
index 0000000000..5f5a3c2280
--- /dev/null
+++ b/fvt_scripts/select_edgex_meta_rule.jmx
@@ -0,0 +1,508 @@
+
+
+
+
+
+ false
+ true
+ false
+
+
+
+
+
+
+
+
+
+ srv
+ 127.0.0.1
+ =
+
+
+ rest_port
+ 9081
+ =
+
+
+ fvt
+ ${__property(fvt,,)}
+ =
+
+
+ mqtt_srv
+ 127.0.0.1
+ =
+
+
+
+
+
+ continue
+
+ false
+ 1
+
+ 1
+ 1
+ false
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+ false
+ false
+
+
+
+ true
+
+
+
+ false
+ {
+"sql" : "create stream demo () WITH (FORMAT=\"JSON\", TYPE=\"edgex\")"
+}
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /streams
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+
+ Stream demo is created.
+
+
+ Assertion.response_data
+ true
+ 16
+
+
+
+
+ true
+
+
+
+ false
+ {
+ "id": "rule1",
+ "sql": "SELECT temperature,humidity, meta(id) AS eid,meta(Created) AS ec, meta(temperature->pushed) AS tpush, meta(temperature->Created) AS tcreated, meta(temperature->Origin) AS torigin, meta(Humidity->Device) AS hdevice, meta(Humidity->Modified) AS hmodified FROM demo WHERE meta(device)=\"demo2\"",
+ "actions": [
+ {
+ "mqtt": {
+ "server": "tcp://${mqtt_srv}:1883",
+ "topic": "devices/result",
+ "qos": 1,
+ "clientId": "demo_001"
+ }
+ }
+ ]
+}
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /rules
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+
+ Rule rule1 was created
+
+
+ Assertion.response_data
+ true
+ 16
+
+
+
+
+ true
+
+
+
+ false
+
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /rules/rule1/status
+ GET
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ $.source_demo_0_records_in_total
+ 0
+ true
+ false
+ false
+ false
+
+
+
+
+ false
+ 0
+ fvt_scripts/edgex/pub
+
+
+
+
+ meta
+ =
+
+
+
+
+
+
+ ${__property(fvt,,)}
+
+
+
+ true
+
+
+
+ false
+
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /rules/rule1/status
+ GET
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ $.source_demo_0_records_in_total
+ 2
+ true
+ false
+ false
+ false
+
+
+
+ $.sink_sink_mqtt_0_records_in_total
+ 6
+ true
+ false
+ false
+ false
+
+
+
+ 2000
+
+
+
+
+ true
+
+
+
+ false
+
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /rules/rule1
+ DELETE
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+
+ Rule rule1 is dropped.
+
+
+ Assertion.response_data
+ false
+ 16
+
+
+
+
+ true
+
+
+
+ false
+
+ =
+
+
+
+ ${srv}
+ ${rest_port}
+
+
+ /streams/demo
+ DELETE
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+
+ Stream demo is dropped.
+
+
+ Assertion.response_data
+ false
+ 16
+
+
+
+
+ 500
+
+
+
+
+
+ continue
+
+ false
+ 1
+
+ 1
+ 1
+ false
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+ ${mqtt_srv}
+ 1883
+ 3.1
+ 10
+ false
+ 10
+ TCP
+ false
+
+
+
+
+
+
+ conn_
+ true
+ 300
+ 0
+ 0
+
+
+
+
+ devices/result
+ 0
+ false
+ true
+ number of received messages
+ 1
+
+
+
+ $[0].temperature
+ 16
+ true
+ false
+ false
+ false
+
+
+
+ $[0].humidity
+ 16
+ true
+ false
+ false
+ false
+
+
+
+ $[0].ec
+ 22
+ true
+ false
+ false
+ false
+
+
+
+ $[0].hdevice
+ Humidity sensor
+ true
+ false
+ false
+ false
+
+
+
+ $[0].hmodified
+ 70
+ true
+ false
+ false
+ false
+
+
+
+ $[0].torigin
+ 48
+ true
+ false
+ false
+ false
+
+
+
+
+
+
+
diff --git a/fvt_scripts/setup_env.sh b/fvt_scripts/setup_env.sh
index 4908f148eb..2015e00f54 100755
--- a/fvt_scripts/setup_env.sh
+++ b/fvt_scripts/setup_env.sh
@@ -1,10 +1,11 @@
#!/bin/bash
+set -e
emqx_ids=`ps aux|grep "emqx" | grep "/usr/bin"|awk '{printf $2 " "}'`
if [ "$emqx_ids" = "" ] ; then
echo "No emqx broker was started"
- echo "starting emqx..."
emqx start
+ echo "Success started emqx "
else
echo "emqx has already started"
#for pid in $emqx_ids ; do
@@ -23,4 +24,16 @@ else
done
fi
-fvt_scripts/start_kuiper.sh
\ No newline at end of file
+fvt_scripts/start_kuiper.sh
+
+pids=`ps aux | grep vdmocker | grep "fvt_scripts" | awk '{printf $2 " "}'`
+if [ "$pids" = "" ] ; then
+ echo "No value descriptor mockup server was started"
+else
+ for pid in $pids ; do
+ echo "kill value descriptor mockup server " $pid
+ kill -9 $pid
+ done
+fi
+
+fvt_scripts/start_vdmock.sh
\ No newline at end of file
diff --git a/fvt_scripts/start_kuiper.sh b/fvt_scripts/start_kuiper.sh
index 3e15a17133..a0fcb5fe39 100755
--- a/fvt_scripts/start_kuiper.sh
+++ b/fvt_scripts/start_kuiper.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+set -e
ver=`git describe --tags --always`
os=`uname -s | tr "[A-Z]" "[a-z]"`
@@ -7,9 +8,9 @@ base_dir=_build/kuiper-"$ver"-"$os"-x86_64
rm -rf $base_dir/data/*
ls -l $base_dir/bin/server
-echo "starting kuiper at " $base_dir
cd $base_dir/
touch log/kuiper.out
export BUILD_ID=dontKillMe
nohup bin/server > log/kuiper.out 2>&1 &
+echo "starting kuiper at " $base_dir
diff --git a/fvt_scripts/start_vdmock.sh b/fvt_scripts/start_vdmock.sh
new file mode 100755
index 0000000000..6389566b21
--- /dev/null
+++ b/fvt_scripts/start_vdmock.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+set -e
+echo "starting edgex value descriptor mockup server."
+
+rm -rf fvt_scripts/edgex/valuedesc/vdmocker
+rm -rf fvt_scripts/edgex/pub
+rm -rf fvt_scripts/edgex/sub/sub
+
+go build -o fvt_scripts/edgex/valuedesc/vdmocker fvt_scripts/edgex/valuedesc/vd_server.go
+go build -o fvt_scripts/edgex/pub fvt_scripts/edgex/pub.go
+go build -o fvt_scripts/edgex/sub/sub fvt_scripts/edgex/sub/sub.go
+
+chmod +x fvt_scripts/edgex/valuedesc/vdmocker
+chmod +x fvt_scripts/edgex/pub
+chmod +x fvt_scripts/edgex/sub/sub
+
+export BUILD_ID=dontKillMe
+nohup fvt_scripts/edgex/valuedesc/vdmocker > vdmocker.out 2>&1 &
\ No newline at end of file
diff --git a/go.mod b/go.mod
index 2b90d4ae66..8bda24bf0d 100644
--- a/go.mod
+++ b/go.mod
@@ -4,6 +4,8 @@ require (
github.com/benbjohnson/clock v1.0.0
github.com/buger/jsonparser v0.0.0-20191004114745-ee4c978eae7e
github.com/eclipse/paho.mqtt.golang v1.2.0
+ github.com/edgexfoundry/go-mod-core-contracts v0.1.48
+ github.com/edgexfoundry/go-mod-messaging v0.1.14
github.com/go-yaml/yaml v2.1.0+incompatible
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3
github.com/google/uuid v1.1.1
@@ -14,7 +16,6 @@ require (
github.com/prometheus/common v0.7.0
github.com/sirupsen/logrus v1.4.2
github.com/urfave/cli v1.22.0
- golang.org/x/net v0.0.0-20190909003024-a7b16738d86b // indirect
)
go 1.13
diff --git a/xsql/ast.go b/xsql/ast.go
index eb266319a9..778af6fc8e 100644
--- a/xsql/ast.go
+++ b/xsql/ast.go
@@ -6,6 +6,7 @@ import (
"github.com/emqx/kuiper/common/plugin_manager"
"github.com/emqx/kuiper/xstream/api"
"math"
+ "reflect"
"sort"
"strings"
"time"
@@ -270,6 +271,14 @@ type FieldRef struct {
func (fr *FieldRef) expr() {}
func (fr *FieldRef) node() {}
+type MetaRef struct {
+ StreamName StreamName
+ Name string
+}
+
+func (fr *MetaRef) expr() {}
+func (fr *MetaRef) node() {}
+
// The stream AST tree
type Options map[string]string
@@ -287,6 +296,9 @@ type StreamStmt struct {
func (ss *StreamStmt) node() {}
func (ss *StreamStmt) Stmt() {}
+func (ss *StreamStmt) isSchemaless() bool {
+ return ss.StreamFields == nil
+}
type FieldType interface {
fieldType()
@@ -456,6 +468,7 @@ func (fn walkFuncVisitor) Visit(n Node) Visitor { fn(n); return fn }
type Valuer interface {
// Value returns the value and existence flag for a given key.
Value(key string) (interface{}, bool)
+ Meta(key string) (interface{}, bool)
}
// CallValuer implements the Call method for evaluating function calls.
@@ -499,6 +512,10 @@ func (wv *WildcardValuer) Value(key string) (interface{}, bool) {
}
}
+func (wv *WildcardValuer) Meta(key string) (interface{}, bool) {
+ return nil, false
+}
+
/**********************************
** Various Data Types for SQL transformation
*/
@@ -524,12 +541,26 @@ func (m Message) Value(key string) (interface{}, bool) {
return nil, false
}
+func (m Message) Meta(key string) (interface{}, bool) {
+ return m.Value(key)
+}
+
type Event interface {
GetTimestamp() int64
IsWatermark() bool
}
-type Metadata map[string]interface{}
+type Metadata Message
+
+func (m Metadata) Value(key string) (interface{}, bool) {
+ msg := Message(m)
+ return msg.Value(key)
+}
+
+func (m Metadata) Meta(key string) (interface{}, bool) {
+ msg := Message(m)
+ return msg.Meta(key)
+}
type Tuple struct {
Emitter string
@@ -538,26 +569,12 @@ type Tuple struct {
Metadata Metadata
}
-// Value returns the value for a key in the Message.
-func (m Metadata) Value(key string) (interface{}, bool) {
- key = strings.ToLower(key)
- if keys := strings.Split(key, "."); len(keys) == 1 {
- v, ok := m[key]
- return v, ok
- } else if len(keys) == 2 {
- v, ok := m[keys[1]]
- return v, ok
- }
- common.Log.Println("Invalid key: " + key + ", expect source.field or field.")
- return nil, false
+func (t *Tuple) Value(key string) (interface{}, bool) {
+ return t.Message.Value(key)
}
-func (t *Tuple) Value(key string) (interface{}, bool) {
- if v, ok := t.Message.Value(key); ok {
- return v, ok
- } else {
- return t.Metadata.Value(key)
- }
+func (t *Tuple) Meta(key string) (interface{}, bool) {
+ return t.Metadata.Value(key)
}
func (t *Tuple) All(stream string) (interface{}, bool) {
@@ -671,14 +688,26 @@ func (jt *JoinTuple) AddTuples(tuples []Tuple) {
}
}
-func (jt *JoinTuple) Value(key string) (interface{}, bool) {
+func getTupleValue(tuple Tuple, t string, key string) (interface{}, bool) {
+ switch t {
+ case "value":
+ return tuple.Value(key)
+ case "meta":
+ return tuple.Meta(key)
+ default:
+ common.Log.Errorf("cannot get tuple for type %s", t)
+ return nil, false
+ }
+}
+
+func (jt *JoinTuple) doGetValue(t string, key string) (interface{}, bool) {
keys := strings.Split(key, ".")
tuples := jt.Tuples
switch len(keys) {
case 1:
if len(tuples) > 1 {
for _, tuple := range tuples { //TODO support key without modifier?
- v, ok := tuple.Message[key]
+ v, ok := getTupleValue(tuple, t, key)
if ok {
return v, ok
}
@@ -686,16 +715,14 @@ func (jt *JoinTuple) Value(key string) (interface{}, bool) {
common.Log.Infoln("Wrong key: ", key, ", not found")
return nil, false
} else {
- v, ok := tuples[0].Message[key]
- return v, ok
+ return getTupleValue(tuples[0], t, key)
}
case 2:
emitter, key := keys[0], keys[1]
//TODO should use hash here
for _, tuple := range tuples {
if tuple.Emitter == emitter {
- v, ok := tuple.Message[key]
- return v, ok
+ return getTupleValue(tuple, t, key)
}
}
return nil, false
@@ -705,6 +732,14 @@ func (jt *JoinTuple) Value(key string) (interface{}, bool) {
}
}
+func (jt *JoinTuple) Value(key string) (interface{}, bool) {
+ return jt.doGetValue("value", key)
+}
+
+func (jt *JoinTuple) Meta(key string) (interface{}, bool) {
+ return jt.doGetValue("meta", key)
+}
+
func (jt *JoinTuple) All(stream string) (interface{}, bool) {
if stream != "" {
for _, t := range jt.Tuples {
@@ -766,6 +801,7 @@ type SortingData interface {
type MultiSorter struct {
SortingData
fields SortFields
+ values []map[string]interface{}
}
// OrderedBy returns a Sorter that sorts using the less functions, in order.
@@ -783,31 +819,104 @@ func OrderedBy(fields SortFields) *MultiSorter {
// -1, 0, 1 and reduce the number of calls for greater efficiency: an
// exercise for the reader.
func (ms *MultiSorter) Less(i, j int) bool {
- p, q := ms.SortingData.Index(i), ms.SortingData.Index(j)
- vep, veq := &ValuerEval{Valuer: MultiValuer(p, &FunctionValuer{})}, &ValuerEval{Valuer: MultiValuer(q, &FunctionValuer{})}
+ p, q := ms.values[i], ms.values[j]
+ v := &ValuerEval{Valuer: MultiValuer(&FunctionValuer{})}
for _, field := range ms.fields {
- vp, ok := vep.Valuer.Value(field.Name)
- if !ok {
- return !field.Ascending
- }
- vq, ok := veq.Valuer.Value(field.Name)
- if !ok {
- return !field.Ascending
+ n := field.Name
+ vp, _ := p[n]
+ vq, _ := q[n]
+ if vp == nil && vq != nil {
+ return false
+ } else if vp != nil && vq == nil {
+ ms.valueSwap(true, i, j)
+ return true
+ } else if vp == nil && vq == nil {
+ return false
}
switch {
- case vep.simpleDataEval(vp, vq, LT):
+ case v.simpleDataEval(vp, vq, LT):
+ ms.valueSwap(field.Ascending, i, j)
return field.Ascending
- case veq.simpleDataEval(vq, vp, LT):
+ case v.simpleDataEval(vq, vp, LT):
+ ms.valueSwap(!field.Ascending, i, j)
return !field.Ascending
}
}
return false
}
+func (ms *MultiSorter) valueSwap(s bool, i, j int) {
+ if s {
+ ms.values[i], ms.values[j] = ms.values[j], ms.values[i]
+ }
+}
+
// Sort sorts the argument slice according to the less functions passed to OrderedBy.
-func (ms *MultiSorter) Sort(data SortingData) {
+func (ms *MultiSorter) Sort(data SortingData) error {
ms.SortingData = data
+ types := make([]string, len(ms.fields))
+ ms.values = make([]map[string]interface{}, data.Len())
+ //load and validate data
+ for i := 0; i < data.Len(); i++ {
+ ms.values[i] = make(map[string]interface{})
+ p := data.Index(i)
+ vep := &ValuerEval{Valuer: MultiValuer(p, &FunctionValuer{})}
+ for j, field := range ms.fields {
+ n := field.Name
+ vp, _ := vep.Valuer.Value(n)
+ if err, ok := vp.(error); ok {
+ return err
+ } else {
+ if types[j] == "" && vp != nil {
+ types[j] = fmt.Sprintf("%T", vp)
+ }
+ if err := validate(types[j], vp); err != nil {
+ return err
+ } else {
+ ms.values[i][n] = vp
+ }
+ }
+ }
+ }
sort.Sort(ms)
+ return nil
+}
+
+func validate(t string, v interface{}) error {
+ if v == nil || t == "" {
+ return nil
+ }
+ vt := fmt.Sprintf("%T", v)
+ switch t {
+ case "int", "int64", "float64", "uint64":
+ if vt == "int" || vt == "int64" || vt == "float64" || vt == "uint64" {
+ return nil
+ } else {
+ return fmt.Errorf("incompatible types for comparison: %s and %s", t, vt)
+ }
+ case "bool":
+ if vt == "bool" {
+ return nil
+ } else {
+ return fmt.Errorf("incompatible types for comparison: %s and %s", t, vt)
+ }
+ case "string":
+ if vt == "string" {
+ return nil
+ } else {
+ return fmt.Errorf("incompatible types for comparison: %s and %s", t, vt)
+ }
+ case "time.Time":
+ _, err := common.InterfaceToTime(v, "")
+ if err != nil {
+ return fmt.Errorf("incompatible types for comparison: %s and %s", t, vt)
+ } else {
+ return nil
+ }
+ default:
+ return fmt.Errorf("incompatible types for comparison: %s and %s", t, vt)
+ }
+ return nil
}
type EvalResultMessage struct {
@@ -850,13 +959,22 @@ func (a multiValuer) Value(key string) (interface{}, bool) {
return nil, false
}
+func (a multiValuer) Meta(key string) (interface{}, bool) {
+ for _, valuer := range a {
+ if v, ok := valuer.Meta(key); ok {
+ return v, true
+ }
+ }
+ return nil, false
+}
+
func (a multiValuer) Call(name string, args []interface{}) (interface{}, bool) {
for _, valuer := range a {
if valuer, ok := valuer.(CallValuer); ok {
if v, ok := valuer.Call(name, args); ok {
return v, true
} else {
- common.Log.Println(fmt.Sprintf("Found error \"%s\" when call func %s.\n", v, name))
+ return fmt.Errorf("call func %s error: %v", name, v), false
}
}
}
@@ -864,33 +982,26 @@ func (a multiValuer) Call(name string, args []interface{}) (interface{}, bool) {
}
type multiAggregateValuer struct {
- data AggregateData
- valuers []Valuer
+ data AggregateData
+ multiValuer
}
func MultiAggregateValuer(data AggregateData, valuers ...Valuer) Valuer {
return &multiAggregateValuer{
- data: data,
- valuers: valuers,
+ data: data,
+ multiValuer: valuers,
}
}
-func (a *multiAggregateValuer) Value(key string) (interface{}, bool) {
- for _, valuer := range a.valuers {
- if v, ok := valuer.Value(key); ok {
- return v, true
- }
- }
- return nil, false
-}
-
//The args is [][] for aggregation
func (a *multiAggregateValuer) Call(name string, args []interface{}) (interface{}, bool) {
var singleArgs []interface{} = nil
- for _, valuer := range a.valuers {
+ for _, valuer := range a.multiValuer {
if a, ok := valuer.(AggregateCallValuer); ok {
if v, ok := a.Call(name, args); ok {
return v, true
+ } else {
+ return fmt.Errorf("call func %s error: %v", name, v), false
}
} else if c, ok := valuer.(CallValuer); ok {
if singleArgs == nil {
@@ -960,6 +1071,9 @@ func (v *ValuerEval) Eval(expr Expr) interface{} {
} else {
for i := range expr.Args {
args[i] = v.Eval(expr.Args[i])
+ if _, ok := args[i].(error); ok {
+ return args[i]
+ }
}
}
}
@@ -976,6 +1090,15 @@ func (v *ValuerEval) Eval(expr Expr) interface{} {
val, _ := v.Valuer.Value(string(expr.StreamName) + "." + expr.Name)
return val
}
+ case *MetaRef:
+ if expr.StreamName == "" {
+ val, _ := v.Valuer.Meta(expr.Name)
+ return val
+ } else {
+ //The field specified with stream source
+ val, _ := v.Valuer.Meta(string(expr.StreamName) + "." + expr.Name)
+ return val
+ }
case *Wildcard:
val, _ := v.Valuer.Value("")
return val
@@ -989,100 +1112,113 @@ func (v *ValuerEval) evalBinaryExpr(expr *BinaryExpr) interface{} {
switch val := lhs.(type) {
case map[string]interface{}:
return v.evalJsonExpr(val, expr.OP, expr.RHS)
- case []interface{}:
+ case []interface{}, []map[string]interface{}:
return v.evalJsonExpr(val, expr.OP, expr.RHS)
+ case error:
+ return val
}
rhs := v.Eval(expr.RHS)
- if lhs == nil && rhs != nil {
- // When the LHS is nil and the RHS is a boolean, implicitly cast the
- // nil to false.
- if _, ok := rhs.(bool); ok {
- lhs = false
- }
- } else if lhs != nil && rhs == nil {
- // Implicit cast of the RHS nil to false when the LHS is a boolean.
- if _, ok := lhs.(bool); ok {
- rhs = false
- }
+ if _, ok := rhs.(error); ok {
+ return rhs
}
return v.simpleDataEval(lhs, rhs, expr.OP)
}
func (v *ValuerEval) evalJsonExpr(result interface{}, op Token, expr Expr) interface{} {
- if val, ok := result.(map[string]interface{}); ok {
+ switch val := result.(type) {
+ case map[string]interface{}:
switch op {
case ARROW:
- if exp, ok := expr.(*FieldRef); ok {
+ switch e := expr.(type) {
+ case *FieldRef, *MetaRef:
ve := &ValuerEval{Valuer: Message(val)}
- return ve.Eval(exp)
- } else {
- fmt.Printf("The right expression is not a field reference node.\n")
- return nil
+ return ve.Eval(e)
+ default:
+ return fmt.Errorf("the right expression is not a field reference node")
}
default:
- fmt.Printf("%v is an invalid operation.\n", op)
- return nil
+ return fmt.Errorf("%v is an invalid operation for %T", op, val)
}
- }
-
- if val, ok := result.([]interface{}); ok {
+ case []interface{}, []map[string]interface{}:
switch op {
case SUBSET:
+ val := reflect.ValueOf(result)
ber := v.Eval(expr)
if berVal, ok1 := ber.(*BracketEvalResult); ok1 {
if berVal.isIndex() {
- if berVal.Start >= len(val) {
- fmt.Printf("Out of index: %d of %d.\n", berVal.Start, len(val))
- return nil
+ if berVal.Start >= val.Len() {
+ return fmt.Errorf("out of index: %d of %d", berVal.Start, val.Len())
}
- return val[berVal.Start]
+ return val.Index(berVal.Start).Interface()
} else {
- if berVal.Start >= len(val) {
- fmt.Printf("Start value is out of index: %d of %d.\n", berVal.Start, len(val))
- return nil
+ if berVal.Start >= val.Len() {
+ return fmt.Errorf("start value is out of index: %d of %d", berVal.Start, val.Len())
}
- if berVal.End >= len(val) {
- fmt.Printf("End value is out of index: %d of %d.\n", berVal.End, len(val))
- return nil
+ if berVal.End >= val.Len() {
+ return fmt.Errorf("end value is out of index: %d of %d", berVal.End, val.Len())
}
- return val[berVal.Start:berVal.End]
+ return val.Slice(berVal.Start, berVal.End).Interface()
}
} else {
- fmt.Printf("Invalid evaluation result - %v.\n", berVal)
- return nil
+ return fmt.Errorf("invalid evaluation result - %v", berVal)
}
default:
- fmt.Printf("%v is an invalid operation.\n", op)
- return nil
+ return fmt.Errorf("%v is an invalid operation for %T", op, val)
}
}
return nil
}
+//lhs and rhs are non-nil
func (v *ValuerEval) simpleDataEval(lhs, rhs interface{}, op Token) interface{} {
+ if lhs == nil || rhs == nil {
+ switch op {
+ case EQ, LTE, GTE:
+ if lhs == nil && rhs == nil {
+ return true
+ } else {
+ return false
+ }
+ case NEQ:
+ if lhs == nil && rhs == nil {
+ return false
+ } else {
+ return true
+ }
+ case LT, GT:
+ return false
+ default:
+ return nil
+ }
+ }
lhs = convertNum(lhs)
rhs = convertNum(rhs)
// Evaluate if both sides are simple types.
switch lhs := lhs.(type) {
case bool:
rhs, ok := rhs.(bool)
+ if !ok {
+ return invalidOpError(lhs, op, rhs)
+ }
switch op {
case AND:
- return ok && (lhs && rhs)
+ return lhs && rhs
case OR:
- return ok && (lhs || rhs)
+ return lhs || rhs
case BITWISE_AND:
- return ok && (lhs && rhs)
+ return lhs && rhs
case BITWISE_OR:
- return ok && (lhs || rhs)
+ return lhs || rhs
case BITWISE_XOR:
- return ok && (lhs != rhs)
+ return lhs != rhs
case EQ:
- return ok && (lhs == rhs)
+ return lhs == rhs
case NEQ:
- return ok && (lhs != rhs)
+ return lhs != rhs
+ default:
+ return invalidOpError(lhs, op, rhs)
}
case float64:
// Try the rhs as a float64, int64, or uint64
@@ -1095,48 +1231,41 @@ func (v *ValuerEval) simpleDataEval(lhs, rhs interface{}, op Token) interface{}
rhsf, ok = float64(val), true
}
}
-
+ if !ok {
+ return invalidOpError(lhs, op, rhs)
+ }
rhs := rhsf
switch op {
case EQ:
- return ok && (lhs == rhs)
+ return lhs == rhs
case NEQ:
- return ok && (lhs != rhs)
+ return lhs != rhs
case LT:
- return ok && (lhs < rhs)
+ return lhs < rhs
case LTE:
- return ok && (lhs <= rhs)
+ return lhs <= rhs
case GT:
- return ok && (lhs > rhs)
+ return lhs > rhs
case GTE:
- return ok && (lhs >= rhs)
+ return lhs >= rhs
case ADD:
- if !ok {
- return nil
- }
return lhs + rhs
case SUB:
- if !ok {
- return nil
- }
return lhs - rhs
case MUL:
- if !ok {
- return nil
- }
return lhs * rhs
case DIV:
- if !ok {
- return nil
- } else if rhs == 0 {
- return float64(0)
+ if rhs == 0 {
+ return fmt.Errorf("divided by zero")
}
return lhs / rhs
case MOD:
- if !ok {
- return nil
+ if rhs == 0 {
+ return fmt.Errorf("divided by zero")
}
return math.Mod(lhs, rhs)
+ default:
+ return invalidOpError(lhs, op, rhs)
}
case int64:
// Try as a float64 to see if a float cast is required.
@@ -1164,11 +1293,16 @@ func (v *ValuerEval) simpleDataEval(lhs, rhs interface{}, op Token) interface{}
return lhs * rhs
case DIV:
if rhs == 0 {
- return float64(0)
+ return fmt.Errorf("divided by zero")
}
return lhs / rhs
case MOD:
+ if rhs == 0 {
+ return fmt.Errorf("divided by zero")
+ }
return math.Mod(lhs, rhs)
+ default:
+ return invalidOpError(lhs, op, rhs)
}
case int64:
switch op {
@@ -1193,18 +1327,18 @@ func (v *ValuerEval) simpleDataEval(lhs, rhs interface{}, op Token) interface{}
case DIV:
if v.IntegerFloatDivision {
if rhs == 0 {
- return float64(0)
+ return fmt.Errorf("divided by zero")
}
return float64(lhs) / float64(rhs)
}
if rhs == 0 {
- return int64(0)
+ return fmt.Errorf("divided by zero")
}
return lhs / rhs
case MOD:
if rhs == 0 {
- return int64(0)
+ return fmt.Errorf("divided by zero")
}
return lhs % rhs
case BITWISE_AND:
@@ -1213,6 +1347,8 @@ func (v *ValuerEval) simpleDataEval(lhs, rhs interface{}, op Token) interface{}
return lhs | rhs
case BITWISE_XOR:
return lhs ^ rhs
+ default:
+ return invalidOpError(lhs, op, rhs)
}
case uint64:
switch op {
@@ -1248,12 +1384,12 @@ func (v *ValuerEval) simpleDataEval(lhs, rhs interface{}, op Token) interface{}
return uint64(lhs) * rhs
case DIV:
if rhs == 0 {
- return uint64(0)
+ return fmt.Errorf("divided by zero")
}
return uint64(lhs) / rhs
case MOD:
if rhs == 0 {
- return uint64(0)
+ return fmt.Errorf("divided by zero")
}
return uint64(lhs) % rhs
case BITWISE_AND:
@@ -1262,7 +1398,11 @@ func (v *ValuerEval) simpleDataEval(lhs, rhs interface{}, op Token) interface{}
return uint64(lhs) | rhs
case BITWISE_XOR:
return uint64(lhs) ^ rhs
+ default:
+ return invalidOpError(lhs, op, rhs)
}
+ default:
+ return invalidOpError(lhs, op, rhs)
}
case uint64:
// Try as a float64 to see if a float cast is required.
@@ -1290,11 +1430,16 @@ func (v *ValuerEval) simpleDataEval(lhs, rhs interface{}, op Token) interface{}
return lhs * rhs
case DIV:
if rhs == 0 {
- return float64(0)
+ return fmt.Errorf("divided by zero")
}
return lhs / rhs
case MOD:
+ if rhs == 0 {
+ return fmt.Errorf("divided by zero")
+ }
return math.Mod(lhs, rhs)
+ default:
+ return invalidOpError(lhs, op, rhs)
}
case int64:
switch op {
@@ -1330,12 +1475,12 @@ func (v *ValuerEval) simpleDataEval(lhs, rhs interface{}, op Token) interface{}
return lhs * uint64(rhs)
case DIV:
if rhs == 0 {
- return uint64(0)
+ return fmt.Errorf("divided by zero")
}
return lhs / uint64(rhs)
case MOD:
if rhs == 0 {
- return uint64(0)
+ return fmt.Errorf("divided by zero")
}
return lhs % uint64(rhs)
case BITWISE_AND:
@@ -1344,6 +1489,8 @@ func (v *ValuerEval) simpleDataEval(lhs, rhs interface{}, op Token) interface{}
return lhs | uint64(rhs)
case BITWISE_XOR:
return lhs ^ uint64(rhs)
+ default:
+ return invalidOpError(lhs, op, rhs)
}
case uint64:
switch op {
@@ -1367,12 +1514,12 @@ func (v *ValuerEval) simpleDataEval(lhs, rhs interface{}, op Token) interface{}
return lhs * rhs
case DIV:
if rhs == 0 {
- return uint64(0)
+ return fmt.Errorf("divided by zero")
}
return lhs / rhs
case MOD:
if rhs == 0 {
- return uint64(0)
+ return fmt.Errorf("divided by zero")
}
return lhs % rhs
case BITWISE_AND:
@@ -1381,51 +1528,37 @@ func (v *ValuerEval) simpleDataEval(lhs, rhs interface{}, op Token) interface{}
return lhs | rhs
case BITWISE_XOR:
return lhs ^ rhs
+ default:
+ return invalidOpError(lhs, op, rhs)
}
+ default:
+ return invalidOpError(lhs, op, rhs)
}
case string:
+ rhss, ok := rhs.(string)
+ if !ok {
+ return invalidOpError(lhs, op, rhs)
+ }
switch op {
case EQ:
- rhs, ok := rhs.(string)
- if !ok {
- return false
- }
- return lhs == rhs
+ return lhs == rhss
case NEQ:
- rhs, ok := rhs.(string)
- if !ok {
- return false
- }
- return lhs != rhs
+ return lhs != rhss
case LT:
- rhs, ok := rhs.(string)
- if !ok {
- return false
- }
- return lhs < rhs
+ return lhs < rhss
case LTE:
- rhs, ok := rhs.(string)
- if !ok {
- return false
- }
- return lhs <= rhs
+ return lhs <= rhss
case GT:
- rhs, ok := rhs.(string)
- if !ok {
- return false
- }
- return lhs > rhs
+ return lhs > rhss
case GTE:
- rhs, ok := rhs.(string)
- if !ok {
- return false
- }
- return lhs >= rhs
+ return lhs >= rhss
+ default:
+ return invalidOpError(lhs, op, rhs)
}
case time.Time:
rt, err := common.InterfaceToTime(rhs, "")
if err != nil {
- return false
+ return invalidOpError(lhs, op, rhs)
}
switch op {
case EQ:
@@ -1440,16 +1573,18 @@ func (v *ValuerEval) simpleDataEval(lhs, rhs interface{}, op Token) interface{}
return lhs.After(rt)
case GTE:
return lhs.After(rt) || lhs.Equal(rt)
+ default:
+ return invalidOpError(lhs, op, rhs)
}
+ default:
+ return invalidOpError(lhs, op, rhs)
}
- // The types were not comparable. If our operation was an equality operation,
- // return false instead of true.
- switch op {
- case EQ, NEQ, LT, LTE, GT, GTE:
- return false
- }
- return nil
+ return invalidOpError(lhs, op, rhs)
+}
+
+func invalidOpError(lhs interface{}, op Token, rhs interface{}) error {
+ return fmt.Errorf("invalid operation %[1]T(%[1]v) %s %[3]T(%[3]v)", lhs, tokens[op], rhs)
}
func convertNum(para interface{}) interface{} {
diff --git a/xsql/funcs_aggregate.go b/xsql/funcs_aggregate.go
index d79a9daae9..547b5c8f05 100644
--- a/xsql/funcs_aggregate.go
+++ b/xsql/funcs_aggregate.go
@@ -16,6 +16,10 @@ func (v AggregateFunctionValuer) Value(key string) (interface{}, bool) {
return nil, false
}
+func (v AggregateFunctionValuer) Meta(key string) (interface{}, bool) {
+ return nil, false
+}
+
func (v AggregateFunctionValuer) Call(name string, args []interface{}) (interface{}, bool) {
lowerName := strings.ToLower(name)
switch lowerName {
@@ -24,14 +28,20 @@ func (v AggregateFunctionValuer) Call(name string, args []interface{}) (interfac
if len(arg0) > 0 {
v := getFirstValidArg(arg0)
switch v.(type) {
- case int:
- return sliceIntTotal(arg0) / len(arg0), true
- case int64:
- return sliceIntTotal(arg0) / len(arg0), true
+ case int, int64:
+ if r, err := sliceIntTotal(arg0); err != nil {
+ return err, false
+ } else {
+ return r / len(arg0), true
+ }
case float64:
- return sliceFloatTotal(arg0) / float64(len(arg0)), true
+ if r, err := sliceFloatTotal(arg0); err != nil {
+ return err, false
+ } else {
+ return r / float64(len(arg0)), true
+ }
default:
- return fmt.Errorf("invalid data type for avg function"), false
+ return fmt.Errorf("run avg function error: found invalid arg %[1]T(%[1]v)", v), false
}
}
return 0, true
@@ -44,49 +54,87 @@ func (v AggregateFunctionValuer) Call(name string, args []interface{}) (interfac
v := getFirstValidArg(arg0)
switch t := v.(type) {
case int:
- return sliceIntMax(arg0, t), true
+ if r, err := sliceIntMax(arg0, t); err != nil {
+ return err, false
+ } else {
+ return r, true
+ }
case int64:
- return sliceIntMax(arg0, int(t)), true
+ if r, err := sliceIntMax(arg0, int(t)); err != nil {
+ return err, false
+ } else {
+ return r, true
+ }
case float64:
- return sliceFloatMax(arg0, t), true
+ if r, err := sliceFloatMax(arg0, t); err != nil {
+ return err, false
+ } else {
+ return r, true
+ }
case string:
- return sliceStringMax(arg0, t), true
+ if r, err := sliceStringMax(arg0, t); err != nil {
+ return err, false
+ } else {
+ return r, true
+ }
default:
- return fmt.Errorf("unsupported data type for avg function"), false
+ return fmt.Errorf("run max function error: found invalid arg %[1]T(%[1]v)", v), false
}
}
- return fmt.Errorf("empty data for max function"), false
+ return fmt.Errorf("run max function error: empty data"), false
case "min":
arg0 := args[0].([]interface{})
if len(arg0) > 0 {
v := getFirstValidArg(arg0)
switch t := v.(type) {
case int:
- return sliceIntMin(arg0, t), true
+ if r, err := sliceIntMin(arg0, t); err != nil {
+ return err, false
+ } else {
+ return r, true
+ }
case int64:
- return sliceIntMin(arg0, int(t)), true
+ if r, err := sliceIntMin(arg0, int(t)); err != nil {
+ return err, false
+ } else {
+ return r, true
+ }
case float64:
- return sliceFloatMin(arg0, t), true
+ if r, err := sliceFloatMin(arg0, t); err != nil {
+ return err, false
+ } else {
+ return r, true
+ }
case string:
- return sliceStringMin(arg0, t), true
+ if r, err := sliceStringMin(arg0, t); err != nil {
+ return err, false
+ } else {
+ return r, true
+ }
default:
- return fmt.Errorf("unsupported data type for avg function"), false
+ return fmt.Errorf("run min function error: found invalid arg %[1]T(%[1]v)", v), false
}
}
- return fmt.Errorf("empty data for max function"), false
+ return fmt.Errorf("run min function error: empty data"), false
case "sum":
arg0 := args[0].([]interface{})
if len(arg0) > 0 {
v := getFirstValidArg(arg0)
switch v.(type) {
- case int:
- return sliceIntTotal(arg0), true
- case int64:
- return sliceIntTotal(arg0), true
+ case int, int64:
+ if r, err := sliceIntTotal(arg0); err != nil {
+ return err, false
+ } else {
+ return r, true
+ }
case float64:
- return sliceFloatTotal(arg0), true
+ if r, err := sliceFloatTotal(arg0); err != nil {
+ return err, false
+ } else {
+ return r, true
+ }
default:
- return fmt.Errorf("invalid data type for sum function"), false
+ return fmt.Errorf("run sum function error: found invalid arg %[1]T(%[1]v)", v), false
}
}
return 0, true
@@ -122,84 +170,100 @@ func getFirstValidArg(s []interface{}) interface{} {
return nil
}
-func sliceIntTotal(s []interface{}) int {
+func sliceIntTotal(s []interface{}) (int, error) {
var total int
for _, v := range s {
- if v, ok := v.(int); ok {
- total += v
+ if vi, ok := v.(int); ok {
+ total += vi
+ } else {
+ return 0, fmt.Errorf("requires int but found %[1]T(%[1]v)", v)
}
}
- return total
+ return total, nil
}
-func sliceFloatTotal(s []interface{}) float64 {
+func sliceFloatTotal(s []interface{}) (float64, error) {
var total float64
for _, v := range s {
- if v, ok := v.(float64); ok {
- total += v
+ if vf, ok := v.(float64); ok {
+ total += vf
+ } else {
+ return 0, fmt.Errorf("requires float64 but found %[1]T(%[1]v)", v)
}
}
- return total
+ return total, nil
}
-func sliceIntMax(s []interface{}, max int) int {
+func sliceIntMax(s []interface{}, max int) (int, error) {
for _, v := range s {
- if v, ok := v.(int); ok {
- if max < v {
- max = v
+ if vi, ok := v.(int); ok {
+ if max < vi {
+ max = vi
}
+ } else {
+ return 0, fmt.Errorf("requires int but found %[1]T(%[1]v)", v)
}
}
- return max
+ return max, nil
}
-func sliceFloatMax(s []interface{}, max float64) float64 {
+func sliceFloatMax(s []interface{}, max float64) (float64, error) {
for _, v := range s {
- if v, ok := v.(float64); ok {
- if max < v {
- max = v
+ if vf, ok := v.(float64); ok {
+ if max < vf {
+ max = vf
}
+ } else {
+ return 0, fmt.Errorf("requires float64 but found %[1]T(%[1]v)", v)
}
}
- return max
+ return max, nil
}
-func sliceStringMax(s []interface{}, max string) string {
+func sliceStringMax(s []interface{}, max string) (string, error) {
for _, v := range s {
- if v, ok := v.(string); ok {
- if max < v {
- max = v
+ if vs, ok := v.(string); ok {
+ if max < vs {
+ max = vs
}
+ } else {
+ return "", fmt.Errorf("requires string but found %[1]T(%[1]v)", v)
}
}
- return max
+ return max, nil
}
-func sliceIntMin(s []interface{}, min int) int {
+func sliceIntMin(s []interface{}, min int) (int, error) {
for _, v := range s {
- if v, ok := v.(int); ok {
- if min > v {
- min = v
+ if vi, ok := v.(int); ok {
+ if min > vi {
+ min = vi
}
+ } else {
+ return 0, fmt.Errorf("requires int but found %[1]T(%[1]v)", v)
}
}
- return min
+ return min, nil
}
-func sliceFloatMin(s []interface{}, min float64) float64 {
+func sliceFloatMin(s []interface{}, min float64) (float64, error) {
for _, v := range s {
- if v, ok := v.(float64); ok {
- if min > v {
- min = v
+ if vf, ok := v.(float64); ok {
+ if min > vf {
+ min = vf
}
+ } else {
+ return 0, fmt.Errorf("requires float64 but found %[1]T(%[1]v)", v)
}
}
- return min
+ return min, nil
}
-func sliceStringMin(s []interface{}, min string) string {
+func sliceStringMin(s []interface{}, min string) (string, error) {
for _, v := range s {
- if v, ok := v.(string); ok {
- if min < v {
- min = v
+ if vs, ok := v.(string); ok {
+ if min < vs {
+ min = vs
}
+ } else {
+ return "", fmt.Errorf("requires string but found %[1]T(%[1]v)", v)
}
}
- return min
+ return min, nil
}
diff --git a/xsql/funcs_ast_validator.go b/xsql/funcs_ast_validator.go
index c8a8deb579..0776e08bed 100644
--- a/xsql/funcs_ast_validator.go
+++ b/xsql/funcs_ast_validator.go
@@ -294,13 +294,33 @@ func validateOtherFunc(name string, args []Expr) error {
return err
}
if isIntegerArg(args[0]) || isTimeArg(args[0]) || isBooleanArg(args[0]) || isStringArg(args[0]) || isFloatArg(args[0]) {
- return produceErrInfo(name, 0, "field reference")
+ return produceErrInfo(name, 0, "meta reference")
}
- if p, ok := args[0].(*FieldRef); ok {
- if _, ok := SpecialKeyMapper[p.Name]; !ok {
+ if p, ok := args[0].(*MetaRef); ok {
+ name := strings.ToLower(p.Name)
+ if name != "topic" && name != "messageid" {
return fmt.Errorf("Parameter of mqtt function can be only topic or messageid.")
}
}
+ case "meta":
+ if err := validateLen(name, 1, len); err != nil {
+ return err
+ }
+ if _, ok := args[0].(*MetaRef); ok {
+ return nil
+ }
+ expr := args[0]
+ for {
+ if be, ok := expr.(*BinaryExpr); ok {
+ if _, ok := be.LHS.(*MetaRef); ok && be.OP == ARROW {
+ return nil
+ }
+ expr = be.LHS
+ } else {
+ break
+ }
+ }
+ return produceErrInfo(name, 0, "meta reference")
}
return nil
}
diff --git a/xsql/funcs_ast_validator_test.go b/xsql/funcs_ast_validator_test.go
index 4eae54b2e9..1fe15bfa98 100644
--- a/xsql/funcs_ast_validator_test.go
+++ b/xsql/funcs_ast_validator_test.go
@@ -389,7 +389,7 @@ func TestFuncValidator(t *testing.T) {
{
s: `SELECT mqtt("topic") FROM tbl`,
stmt: nil,
- err: "Expect field reference type for 1 parameter of function mqtt.",
+ err: "Expect meta reference type for 1 parameter of function mqtt.",
},
{
@@ -409,12 +409,41 @@ func TestFuncValidator(t *testing.T) {
stmt: nil,
err: "Expect string type for 2 parameter of function split_value.",
},
-
{
s: `SELECT split_value(topic1, "hello", -1) FROM tbl`,
stmt: nil,
err: "The index should not be a nagtive integer.",
},
+ {
+ s: `SELECT meta(tbl, "timestamp", 1) FROM tbl`,
+ stmt: nil,
+ err: "The arguments for meta should be 1.",
+ },
+ {
+ s: `SELECT meta("src1.device") FROM tbl`,
+ stmt: nil,
+ err: "Expect meta reference type for 1 parameter of function meta.",
+ },
+ {
+ s: `SELECT meta(device) FROM tbl`,
+ stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "meta", Expr: &Call{Name: "meta", Args: []Expr{&MetaRef{Name: "device"}}}}}, Sources: []Source{&Table{Name: "tbl"}}},
+ },
+ {
+ s: `SELECT meta(tbl.device) FROM tbl`,
+ stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "meta", Expr: &Call{Name: "meta", Args: []Expr{&MetaRef{StreamName: "tbl", Name: "device"}}}}}, Sources: []Source{&Table{Name: "tbl"}}},
+ },
+ {
+ s: `SELECT meta(device->reading->topic) FROM tbl`,
+ stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "meta", Expr: &Call{Name: "meta", Args: []Expr{&BinaryExpr{
+ OP: ARROW,
+ LHS: &BinaryExpr{
+ OP: ARROW,
+ LHS: &MetaRef{Name: "device"},
+ RHS: &MetaRef{Name: "reading"},
+ },
+ RHS: &MetaRef{Name: "topic"},
+ }}}}}, Sources: []Source{&Table{Name: "tbl"}}},
+ },
}
fmt.Printf("The test bucket size is %d.\n\n", len(tests))
diff --git a/xsql/funcs_misc.go b/xsql/funcs_misc.go
index b6aaa59d50..25224ce5e7 100644
--- a/xsql/funcs_misc.go
+++ b/xsql/funcs_misc.go
@@ -206,6 +206,8 @@ func otherCall(name string, args []interface{}) (interface{}, bool) {
return v, true
}
return nil, false
+ case "meta":
+ return args[0], true
default:
return fmt.Errorf("unknown function name %s", name), false
}
diff --git a/xsql/functions.go b/xsql/functions.go
index fa2ba4716a..4980753233 100644
--- a/xsql/functions.go
+++ b/xsql/functions.go
@@ -13,6 +13,10 @@ func (*FunctionValuer) Value(key string) (interface{}, bool) {
return nil, false
}
+func (*FunctionValuer) Meta(key string) (interface{}, bool) {
+ return nil, false
+}
+
var aggFuncMap = map[string]string{"avg": "",
"count": "",
"max": "", "min": "",
@@ -53,7 +57,7 @@ var hashFuncMap = map[string]string{"md5": "",
}
var otherFuncMap = map[string]string{"isNull": "",
- "newuuid": "", "timestamp": "", "mqtt": "",
+ "newuuid": "", "timestamp": "", "mqtt": "", "meta": "",
}
func (*FunctionValuer) Call(name string, args []interface{}) (interface{}, bool) {
diff --git a/xsql/lexical.go b/xsql/lexical.go
index 066d6dc190..1beef7d46d 100644
--- a/xsql/lexical.go
+++ b/xsql/lexical.go
@@ -143,7 +143,8 @@ var tokens = []string{
GT: ">",
GTE: ">=",
- ARROW: "->",
+ SUBSET: "[]",
+ ARROW: "->",
ASTERISK: "*",
COMMA: ",",
diff --git a/xsql/metadata_util.go b/xsql/metadata_util.go
deleted file mode 100644
index 501a8484c5..0000000000
--- a/xsql/metadata_util.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package xsql
-
-import "strings"
-
-const INTERNAL_MQTT_TOPIC_KEY string = "internal_mqtt_topic_key_$$"
-const INTERNAL_MQTT_MSG_ID_KEY string = "internal_mqtt_msg_id_key_$$"
-
-//For functions such as mqtt(topic). If the field definitions also has a field named "topic", then it need to
-//have an internal key for "topic" to avoid key conflicts.
-var SpecialKeyMapper = map[string]string{"topic": INTERNAL_MQTT_TOPIC_KEY, "messageid": INTERNAL_MQTT_MSG_ID_KEY}
-
-func AddSpecialKeyMap(left, right string) {
- SpecialKeyMapper[left] = right
-}
-
-/**
-The function is used for re-write the parameter names.
-For example, for mqtt function, the arguments could be 'topic' or 'messageid'.
-If the field name defined in stream happens to be 'topic' or 'messageid', it will have conflicts.
-*/
-func (c Call) rewrite_func() *Call {
- if strings.ToLower(c.Name) == "mqtt" {
- if f, ok := c.Args[0].(*FieldRef); ok {
- if n, ok1 := SpecialKeyMapper[f.Name]; ok1 {
- f.Name = n
- c.Args[0] = f
- }
- }
- }
- return &c
-}
diff --git a/xsql/parser.go b/xsql/parser.go
index bbeaf8b435..a12736e000 100644
--- a/xsql/parser.go
+++ b/xsql/parser.go
@@ -17,6 +17,7 @@ type Parser struct {
tok Token
lit string
}
+ inmeta bool
}
func (p *Parser) parseCondition() (Expr, error) {
@@ -505,10 +506,17 @@ func (p *Parser) parseUnaryExpr() (Expr, error) {
if n, err := p.parseFieldNameSections(); err != nil {
return nil, err
} else {
- if len(n) == 2 {
- return &FieldRef{StreamName: StreamName(n[0]), Name: n[1]}, nil
+ if p.inmeta {
+ if len(n) == 2 {
+ return &MetaRef{StreamName: StreamName(n[0]), Name: n[1]}, nil
+ }
+ return &MetaRef{StreamName: "", Name: n[0]}, nil
+ } else {
+ if len(n) == 2 {
+ return &FieldRef{StreamName: StreamName(n[0]), Name: n[1]}, nil
+ }
+ return &FieldRef{StreamName: "", Name: n[0]}, nil
}
- return &FieldRef{StreamName: StreamName(""), Name: n[0]}, nil
}
} else if tok == STRING {
return &StringLiteral{Val: lit}, nil
@@ -587,16 +595,22 @@ func (p *Parser) parseAs(f *Field) (*Field, error) {
}
func (p *Parser) parseCall(name string) (Expr, error) {
+ if strings.ToLower(name) == "meta" || strings.ToLower(name) == "mqtt" {
+ p.inmeta = true
+ defer func() {
+ p.inmeta = false
+ }()
+ }
var args []Expr
for {
if tok, _ := p.scanIgnoreWhitespace(); tok == RPAREN {
- return Call{Name: name, Args: args}.rewrite_func(), nil
+ return &Call{Name: name, Args: args}, nil
} else if tok == ASTERISK {
if tok2, lit2 := p.scanIgnoreWhitespace(); tok2 != RPAREN {
return nil, fmt.Errorf("found %q, expected right paren.", lit2)
} else {
args = append(args, &StringLiteral{Val: "*"})
- return Call{Name: name, Args: args}.rewrite_func(), nil
+ return &Call{Name: name, Args: args}, nil
}
} else {
p.unscan()
@@ -621,7 +635,7 @@ func (p *Parser) parseCall(name string) (Expr, error) {
if valErr := validateFuncs(name, args); valErr != nil {
return nil, valErr
}
- return Call{Name: name, Args: args}.rewrite_func(), nil
+ return &Call{Name: name, Args: args}, nil
} else {
if error != nil {
return nil, error
@@ -822,6 +836,17 @@ func (p *Parser) parseStreamFields() (StreamFields, error) {
if tok, lit := p.scanIgnoreWhitespace(); tok == LPAREN {
lStack.Push(lit)
for {
+ //For the schemaless streams
+ //create stream demo () WITH (FORMAT="JSON", DATASOURCE="demo" TYPE="edgex")
+ if tok1, _ := p.scanIgnoreWhitespace(); tok1 == RPAREN {
+ lStack.Pop()
+ if tok2, lit2 := p.scanIgnoreWhitespace(); tok2 != WITH {
+ return nil, fmt.Errorf("found %q, expected is with.", lit2)
+ }
+ return fields, nil
+ } else {
+ p.unscan()
+ }
if f, err := p.parseStreamField(); err != nil {
return nil, err
} else {
diff --git a/xsql/plans/aggregate_operator.go b/xsql/plans/aggregate_operator.go
index ab9ba88826..3ed41fc8d6 100644
--- a/xsql/plans/aggregate_operator.go
+++ b/xsql/plans/aggregate_operator.go
@@ -19,12 +19,13 @@ func (p *AggregatePlan) Apply(ctx api.StreamContext, data interface{}) interface
log.Debugf("aggregate plan receive %s", data)
var ms []xsql.DataValuer
switch input := data.(type) {
+ case error:
+ return input
case xsql.DataValuer:
ms = append(ms, input)
case xsql.WindowTuplesSet:
if len(input) != 1 {
- log.Infof("WindowTuplesSet with multiple tuples cannot be evaluated")
- return nil
+ return fmt.Errorf("run Group By error: the input WindowTuplesSet with multiple tuples cannot be evaluated")
}
ms = make([]xsql.DataValuer, len(input[0].Tuples))
for i, m := range input[0].Tuples {
@@ -39,8 +40,7 @@ func (p *AggregatePlan) Apply(ctx api.StreamContext, data interface{}) interface
ms[i] = &t
}
default:
- log.Errorf("Expect xsql.Valuer or its array type.")
- return nil
+ return fmt.Errorf("run Group By error: invalid input %[1]T(%[1]v)", input)
}
result := make(map[string]xsql.GroupedTuples)
@@ -48,7 +48,12 @@ func (p *AggregatePlan) Apply(ctx api.StreamContext, data interface{}) interface
var name string
ve := &xsql.ValuerEval{Valuer: xsql.MultiValuer(m, &xsql.FunctionValuer{})}
for _, d := range p.Dimensions {
- name += fmt.Sprintf("%v,", ve.Eval(d.Expr))
+ r := ve.Eval(d.Expr)
+ if _, ok := r.(error); ok {
+ return fmt.Errorf("run Group By error: %s", r)
+ } else {
+ name += fmt.Sprintf("%v,", r)
+ }
}
if ts, ok := result[name]; !ok {
result[name] = xsql.GroupedTuples{m}
diff --git a/xsql/plans/aggregate_test.go b/xsql/plans/aggregate_test.go
index b4386a17ac..ff1e58381d 100644
--- a/xsql/plans/aggregate_test.go
+++ b/xsql/plans/aggregate_test.go
@@ -1,6 +1,7 @@
package plans
import (
+ "errors"
"fmt"
"github.com/emqx/kuiper/common"
"github.com/emqx/kuiper/xsql"
@@ -116,7 +117,50 @@ func TestAggregatePlan_Apply(t *testing.T) {
},
},
},
-
+ {
+ sql: "SELECT abc FROM src1 GROUP BY meta(topic), TUMBLINGWINDOW(ss, 10)",
+ data: xsql.WindowTuplesSet{
+ xsql.WindowTuples{
+ Emitter: "src1",
+ Tuples: []xsql.Tuple{
+ {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 1, "f1": "v1"},
+ Metadata: xsql.Metadata{"topic": "topic1"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 2, "f1": "v2"},
+ Metadata: xsql.Metadata{"topic": "topic2"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 3, "f1": "v1"},
+ Metadata: xsql.Metadata{"topic": "topic1"},
+ },
+ },
+ },
+ },
+ result: xsql.GroupedTuplesSet{
+ {
+ &xsql.Tuple{
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 1, "f1": "v1"},
+ Metadata: xsql.Metadata{"topic": "topic1"},
+ },
+ &xsql.Tuple{
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 3, "f1": "v1"},
+ Metadata: xsql.Metadata{"topic": "topic1"},
+ },
+ },
+ {
+ &xsql.Tuple{
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 2, "f1": "v2"},
+ Metadata: xsql.Metadata{"topic": "topic2"},
+ },
+ },
+ },
+ },
{
sql: "SELECT id1 FROM src1 left join src2 on src1.id1 = src2.id2 GROUP BY src2.f2, TUMBLINGWINDOW(ss, 10)",
data: xsql.JoinTupleSets{
@@ -289,3 +333,56 @@ func TestAggregatePlan_Apply(t *testing.T) {
}
}
}
+
+func TestAggregatePlanError(t *testing.T) {
+ tests := []struct {
+ sql string
+ data interface{}
+ result error
+ }{
+ {
+ sql: "SELECT abc FROM tbl group by abc",
+ data: errors.New("an error from upstream"),
+ result: errors.New("an error from upstream"),
+ },
+
+ {
+ sql: "SELECT abc FROM src1 GROUP BY TUMBLINGWINDOW(ss, 10), f1 * 2",
+ data: xsql.WindowTuplesSet{
+ xsql.WindowTuples{
+ Emitter: "src1",
+ Tuples: []xsql.Tuple{
+ {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 1, "f1": "v1"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 2, "f1": "v2"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 3, "f1": "v1"},
+ },
+ },
+ },
+ },
+ result: errors.New("run Group By error: invalid operation string(v1) * int64(2)"),
+ },
+ }
+
+ fmt.Printf("The test bucket size is %d.\n\n", len(tests))
+ contextLogger := common.Log.WithField("rule", "TestFilterPlanError")
+ ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
+ for i, tt := range tests {
+ stmt, err := xsql.NewParser(strings.NewReader(tt.sql)).Parse()
+ if err != nil {
+ t.Errorf("statement parse error %s", err)
+ break
+ }
+
+ pp := &AggregatePlan{Dimensions: stmt.Dimensions.GetGroups()}
+ result := pp.Apply(ctx, tt.data)
+ if !reflect.DeepEqual(tt.result, result) {
+ t.Errorf("%d. %q\n\nresult mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.sql, tt.result, result)
+ }
+ }
+}
diff --git a/xsql/plans/filter_operator.go b/xsql/plans/filter_operator.go
index 1c7002898e..05f4b20bcc 100644
--- a/xsql/plans/filter_operator.go
+++ b/xsql/plans/filter_operator.go
@@ -1,6 +1,7 @@
package plans
import (
+ "fmt"
"github.com/emqx/kuiper/xsql"
"github.com/emqx/kuiper/xstream/api"
)
@@ -17,33 +18,39 @@ func (p *FilterPlan) Apply(ctx api.StreamContext, data interface{}) interface{}
log := ctx.GetLogger()
log.Debugf("filter plan receive %s", data)
switch input := data.(type) {
+ case error:
+ return input
case xsql.Valuer:
ve := &xsql.ValuerEval{Valuer: xsql.MultiValuer(input, &xsql.FunctionValuer{})}
- result, ok := ve.Eval(p.Condition).(bool)
- if ok {
- if result {
+ result := ve.Eval(p.Condition)
+ switch r := result.(type) {
+ case error:
+ return fmt.Errorf("run Where error: %s", r)
+ case bool:
+ if r {
return input
}
- } else {
- log.Errorf("invalid condition that returns non-bool value")
+ default:
+ return fmt.Errorf("run Where error: invalid condition that returns non-bool value %[1]T(%[1]v)", r)
}
case xsql.WindowTuplesSet:
if len(input) != 1 {
- log.Infof("WindowTuplesSet with multiple tuples cannot be evaluated")
- return nil
+ return fmt.Errorf("run Where error: the input WindowTuplesSet with multiple tuples cannot be evaluated")
}
ms := input[0].Tuples
r := ms[:0]
for _, v := range ms {
ve := &xsql.ValuerEval{Valuer: xsql.MultiValuer(&v, &xsql.FunctionValuer{})}
- result, ok := ve.Eval(p.Condition).(bool)
- if ok {
- if result {
+ result := ve.Eval(p.Condition)
+ switch val := result.(type) {
+ case error:
+ return fmt.Errorf("run Where error: %s", val)
+ case bool:
+ if val {
r = append(r, v)
}
- } else {
- log.Errorf("invalid condition that returns non-bool value")
- return nil
+ default:
+ return fmt.Errorf("run Where error: invalid condition that returns non-bool value %[1]T(%[1]v)", val)
}
}
if len(r) > 0 {
@@ -55,22 +62,23 @@ func (p *FilterPlan) Apply(ctx api.StreamContext, data interface{}) interface{}
r := ms[:0]
for _, v := range ms {
ve := &xsql.ValuerEval{Valuer: xsql.MultiValuer(&v, &xsql.FunctionValuer{})}
- result, ok := ve.Eval(p.Condition).(bool)
- if ok {
- if result {
+ result := ve.Eval(p.Condition)
+ switch val := result.(type) {
+ case error:
+ return fmt.Errorf("run Where error: %s", val)
+ case bool:
+ if val {
r = append(r, v)
}
- } else {
- log.Errorf("invalid condition that returns non-bool value")
- return nil
+ default:
+ return fmt.Errorf("run Where error: invalid condition that returns non-bool value %[1]T(%[1]v)", val)
}
}
if len(r) > 0 {
return r
}
default:
- log.Errorf("Expect xsql.Valuer or its array type.")
- return nil
+ return fmt.Errorf("run Where error: invalid input %[1]T(%[1]v)", input)
}
return nil
}
diff --git a/xsql/plans/filter_test.go b/xsql/plans/filter_test.go
index ad858c02a3..67db0c86a3 100644
--- a/xsql/plans/filter_test.go
+++ b/xsql/plans/filter_test.go
@@ -1,6 +1,7 @@
package plans
import (
+ "errors"
"fmt"
"github.com/emqx/kuiper/common"
"github.com/emqx/kuiper/xsql"
@@ -26,7 +27,22 @@ func TestFilterPlan_Apply(t *testing.T) {
},
result: nil,
},
-
+ // nil equals nil?
+ {
+ sql: "SELECT a FROM tbl WHERE def = ghi",
+ data: &xsql.Tuple{
+ Emitter: "tbl",
+ Message: xsql.Message{
+ "a": int64(6),
+ },
+ },
+ result: &xsql.Tuple{
+ Emitter: "tbl",
+ Message: xsql.Message{
+ "a": int64(6),
+ },
+ },
+ },
{
sql: "SELECT * FROM tbl WHERE abc > def and abc <= ghi",
data: &xsql.Tuple{
@@ -215,6 +231,150 @@ func TestFilterPlan_Apply(t *testing.T) {
},
result: nil,
},
+ {
+ sql: "SELECT abc FROM tbl WHERE meta(topic) = \"topic1\" ",
+ data: &xsql.Tuple{
+ Emitter: "tbl",
+ Message: xsql.Message{
+ "a": int64(6),
+ },
+ Metadata: xsql.Metadata{
+ "topic": "topic1",
+ },
+ },
+ result: &xsql.Tuple{
+ Emitter: "tbl",
+ Message: xsql.Message{
+ "a": int64(6),
+ },
+ Metadata: xsql.Metadata{
+ "topic": "topic1",
+ },
+ },
+ },
+ }
+
+ fmt.Printf("The test bucket size is %d.\n\n", len(tests))
+ contextLogger := common.Log.WithField("rule", "TestAggregatePlan_Apply")
+ ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
+ for i, tt := range tests {
+ stmt, err := xsql.NewParser(strings.NewReader(tt.sql)).Parse()
+ if err != nil {
+ t.Errorf("statement parse error %s", err)
+ break
+ }
+
+ pp := &FilterPlan{Condition: stmt.Condition}
+ result := pp.Apply(ctx, tt.data)
+ if !reflect.DeepEqual(tt.result, result) {
+ t.Errorf("%d. %q\n\nresult mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.sql, tt.result, result)
+ }
+ }
+}
+
+func TestFilterPlanError(t *testing.T) {
+ tests := []struct {
+ sql string
+ data interface{}
+ result interface{}
+ }{
+ {
+ sql: "SELECT a FROM tbl WHERE a = b",
+ data: &xsql.Tuple{
+ Emitter: "tbl",
+ Message: xsql.Message{
+ "a": int64(6),
+ "b": "astring",
+ },
+ },
+ result: errors.New("run Where error: invalid operation int64(6) = string(astring)"),
+ },
+ {
+ sql: "SELECT a FROM tbl WHERE def = ghi",
+ data: errors.New("an error from upstream"),
+ result: errors.New("an error from upstream"),
+ },
+ {
+ sql: "SELECT abc FROM src1 WHERE f1 = \"v1\" GROUP BY TUMBLINGWINDOW(ss, 10)",
+ data: xsql.WindowTuplesSet{
+ xsql.WindowTuples{
+ Emitter: "src1",
+ Tuples: []xsql.Tuple{
+ {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 1, "f1": "v1"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 2, "f1": "v2"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 3, "f1": "v1"},
+ },
+ },
+ },
+ xsql.WindowTuples{
+ Emitter: "src2",
+ Tuples: []xsql.Tuple{
+ {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 1, "f1": "v1"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 2, "f1": "v2"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 3, "f1": "v1"},
+ },
+ },
+ },
+ },
+ result: errors.New("run Where error: the input WindowTuplesSet with multiple tuples cannot be evaluated"),
+ },
+
+ {
+ sql: "SELECT abc FROM src1 WHERE f1 = \"v8\" GROUP BY TUMBLINGWINDOW(ss, 10)",
+ data: xsql.WindowTuplesSet{
+ xsql.WindowTuples{
+ Emitter: "src1",
+ Tuples: []xsql.Tuple{
+ {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 1, "f1": "v1"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 2, "f1": 3},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 3, "f1": "v1"},
+ },
+ },
+ },
+ },
+ result: errors.New("run Where error: invalid operation int64(3) = string(v8)"),
+ },
+ {
+ sql: "SELECT id1 FROM src1 left join src2 on src1.id1 = src2.id2 WHERE src1.f1 = \"v1\" GROUP BY TUMBLINGWINDOW(ss, 10)",
+ data: xsql.JoinTupleSets{
+ xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "src1", Message: xsql.Message{"id1": 1, "f1": 50}},
+ {Emitter: "src2", Message: xsql.Message{"id2": 2, "f2": "w2"}},
+ },
+ },
+ xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "src1", Message: xsql.Message{"id1": 2, "f1": "v2"}},
+ {Emitter: "src2", Message: xsql.Message{"id2": 4, "f2": "w3"}},
+ },
+ },
+ xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "src1", Message: xsql.Message{"id1": 3, "f1": "v1"}},
+ },
+ },
+ },
+ result: errors.New("run Where error: invalid operation int64(50) = string(v1)"),
+ },
}
fmt.Printf("The test bucket size is %d.\n\n", len(tests))
diff --git a/xsql/plans/having_operator.go b/xsql/plans/having_operator.go
index 2377ca4eda..dc67dfc2d1 100644
--- a/xsql/plans/having_operator.go
+++ b/xsql/plans/having_operator.go
@@ -1,6 +1,7 @@
package plans
import (
+ "fmt"
"github.com/emqx/kuiper/xsql"
"github.com/emqx/kuiper/xstream/api"
)
@@ -13,42 +14,45 @@ func (p *HavingPlan) Apply(ctx api.StreamContext, data interface{}) interface{}
log := ctx.GetLogger()
log.Debugf("having plan receive %s", data)
switch input := data.(type) {
+ case error:
+ return input
case xsql.GroupedTuplesSet:
r := xsql.GroupedTuplesSet{}
for _, v := range input {
- ve := &xsql.ValuerEval{Valuer: xsql.MultiAggregateValuer(v, &xsql.FunctionValuer{}, &xsql.AggregateFunctionValuer{Data: v})}
- result, ok := ve.Eval(p.Condition).(bool)
- if ok {
- if result {
+ ve := &xsql.ValuerEval{Valuer: xsql.MultiAggregateValuer(v, v[0], &xsql.FunctionValuer{}, &xsql.AggregateFunctionValuer{Data: v}, &xsql.WildcardValuer{Data: v[0]})}
+ result := ve.Eval(p.Condition)
+ switch val := result.(type) {
+ case error:
+ return fmt.Errorf("run Having error: %s", val)
+ case bool:
+ if val {
r = append(r, v)
}
- } else {
- log.Errorf("invalid condition that returns non-bool value")
- return nil
+ default:
+ return fmt.Errorf("run Having error: invalid condition that returns non-bool value %[1]T(%[1]v)", val)
}
-
}
if len(r) > 0 {
return r
}
case xsql.WindowTuplesSet:
if len(input) != 1 {
- log.Infof("WindowTuplesSet with multiple tuples cannot be evaluated")
- return nil
+ return fmt.Errorf("run Having error: input WindowTuplesSet with multiple tuples cannot be evaluated")
}
ms := input[0].Tuples
r := ms[:0]
for _, v := range ms {
- //ve := &xsql.ValuerEval{Valuer: xsql.MultiValuer(&v, &xsql.FunctionValuer{})}
ve := &xsql.ValuerEval{Valuer: xsql.MultiAggregateValuer(input, &v, &xsql.FunctionValuer{}, &xsql.AggregateFunctionValuer{Data: input}, &xsql.WildcardValuer{Data: &v})}
- result, ok := ve.Eval(p.Condition).(bool)
- if ok {
- if result {
+ result := ve.Eval(p.Condition)
+ switch val := result.(type) {
+ case error:
+ return fmt.Errorf("run Having error: %s", val)
+ case bool:
+ if val {
r = append(r, v)
}
- } else {
- log.Errorf("invalid condition that returns non-bool value")
- return nil
+ default:
+ return fmt.Errorf("run Having error: invalid condition that returns non-bool value %[1]T(%[1]v)", val)
}
}
if len(r) > 0 {
@@ -59,24 +63,24 @@ func (p *HavingPlan) Apply(ctx api.StreamContext, data interface{}) interface{}
ms := input
r := ms[:0]
for _, v := range ms {
- //ve := &xsql.ValuerEval{Valuer: xsql.MultiValuer(&v, &xsql.FunctionValuer{})}
ve := &xsql.ValuerEval{Valuer: xsql.MultiAggregateValuer(input, &v, &xsql.FunctionValuer{}, &xsql.AggregateFunctionValuer{Data: input}, &xsql.WildcardValuer{Data: &v})}
- result, ok := ve.Eval(p.Condition).(bool)
- if ok {
- if result {
+ result := ve.Eval(p.Condition)
+ switch val := result.(type) {
+ case error:
+ return fmt.Errorf("run Having error: %s", val)
+ case bool:
+ if val {
r = append(r, v)
}
- } else {
- log.Errorf("invalid condition that returns non-bool value")
- return nil
+ default:
+ return fmt.Errorf("run Having error: invalid condition that returns non-bool value %[1]T(%[1]v)", val)
}
}
if len(r) > 0 {
return r
}
default:
- log.Errorf("Expect xsql.Valuer or its array type.")
- return nil
+ return fmt.Errorf("run Having error: invalid input %[1]T(%[1]v)", input)
}
return nil
}
diff --git a/xsql/plans/having_test.go b/xsql/plans/having_test.go
index 40cec5bafe..cf3a9f7086 100644
--- a/xsql/plans/having_test.go
+++ b/xsql/plans/having_test.go
@@ -1,6 +1,7 @@
package plans
import (
+ "errors"
"fmt"
"github.com/emqx/kuiper/common"
"github.com/emqx/kuiper/xsql"
@@ -111,7 +112,6 @@ func TestHavingPlan_Apply(t *testing.T) {
},
result: nil,
},
-
{
sql: `SELECT id1 FROM src1 HAVING max(id1) = 1`,
data: xsql.WindowTuplesSet{
@@ -136,6 +136,191 @@ func TestHavingPlan_Apply(t *testing.T) {
},
},
},
+ }, {
+ sql: "SELECT id1 FROM src1 GROUP BY TUMBLINGWINDOW(ss, 10), f1 having f1 = \"v2\"",
+ data: xsql.GroupedTuplesSet{
+ {
+ &xsql.Tuple{
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 1, "f1": "v1"},
+ },
+ &xsql.Tuple{
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 3, "f1": "v1"},
+ },
+ },
+ {
+ &xsql.Tuple{
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 2, "f1": "v2"},
+ },
+ },
+ },
+ result: xsql.GroupedTuplesSet{
+ {
+ &xsql.Tuple{
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 2, "f1": "v2"},
+ },
+ },
+ },
+ }, {
+ sql: "SELECT count(*) as c, round(a) as r FROM test Inner Join test1 on test.id = test1.id GROUP BY TumblingWindow(ss, 10), test1.color having a > 100",
+ data: xsql.GroupedTuplesSet{
+ {
+ &xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 1, "a": 122.33}},
+ {Emitter: "src2", Message: xsql.Message{"id": 1, "color": "w2"}},
+ },
+ },
+ &xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 5, "a": 177.51}},
+ {Emitter: "src2", Message: xsql.Message{"id": 5, "color": "w2"}},
+ },
+ },
+ },
+ {
+ &xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 2, "a": 89.03}},
+ {Emitter: "src2", Message: xsql.Message{"id": 2, "color": "w1"}},
+ },
+ },
+ &xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 4, "a": 14.6}},
+ {Emitter: "src2", Message: xsql.Message{"id": 4, "color": "w1"}},
+ },
+ },
+ },
+ },
+ result: xsql.GroupedTuplesSet{
+ {
+ &xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 1, "a": 122.33}},
+ {Emitter: "src2", Message: xsql.Message{"id": 1, "color": "w2"}},
+ },
+ },
+ &xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 5, "a": 177.51}},
+ {Emitter: "src2", Message: xsql.Message{"id": 5, "color": "w2"}},
+ },
+ },
+ },
+ },
+ }, {
+ sql: "SELECT * FROM test Inner Join test1 on test.id = test1.id GROUP BY TumblingWindow(ss, 10) having a > 100",
+ data: xsql.JoinTupleSets{
+ xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 1, "a": 122.33}},
+ {Emitter: "src2", Message: xsql.Message{"id": 1, "color": "w2"}},
+ },
+ },
+ xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 1, "a": 68.55}},
+ {Emitter: "src2", Message: xsql.Message{"id": 1, "color": "w2"}},
+ },
+ },
+ xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 5, "a": 177.51}},
+ {Emitter: "src2", Message: xsql.Message{"id": 5, "color": "w2"}},
+ },
+ },
+ },
+
+ result: xsql.JoinTupleSets{
+ xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 1, "a": 122.33}},
+ {Emitter: "src2", Message: xsql.Message{"id": 1, "color": "w2"}},
+ },
+ },
+ xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 5, "a": 177.51}},
+ {Emitter: "src2", Message: xsql.Message{"id": 5, "color": "w2"}},
+ },
+ },
+ },
+ },
+ }
+
+ fmt.Printf("The test bucket size is %d.\n\n", len(tests))
+ contextLogger := common.Log.WithField("rule", "TestHavingPlan_Apply")
+ ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
+ for i, tt := range tests {
+ stmt, err := xsql.NewParser(strings.NewReader(tt.sql)).Parse()
+ if err != nil {
+ t.Errorf("statement parse error %s", err)
+ break
+ }
+
+ pp := &HavingPlan{Condition: stmt.Having}
+ result := pp.Apply(ctx, tt.data)
+ if !reflect.DeepEqual(tt.result, result) {
+ t.Errorf("%d. %q\n\nresult mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.sql, tt.result, result)
+ }
+ }
+}
+
+func TestHavingPlanError(t *testing.T) {
+ var tests = []struct {
+ sql string
+ data interface{}
+ result interface{}
+ }{
+ {
+ sql: `SELECT id1 FROM src1 HAVING avg(id1) > "str"`,
+ data: xsql.WindowTuplesSet{
+ xsql.WindowTuples{
+ Emitter: "src1",
+ Tuples: []xsql.Tuple{
+ {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 1, "f1": "v1"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 2, "f1": "v2"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 5, "f1": "v1"},
+ },
+ },
+ },
+ },
+ result: errors.New("run Having error: invalid operation int64(2) > string(str)"),
+ }, {
+ sql: `SELECT id1 FROM src1 HAVING avg(id1) > "str"`,
+ data: errors.New("an error from upstream"),
+ result: errors.New("an error from upstream"),
+ }, {
+ sql: "SELECT id1 FROM src1 GROUP BY TUMBLINGWINDOW(ss, 10), f1 having f1 = \"v2\"",
+ data: xsql.GroupedTuplesSet{
+ {
+ &xsql.Tuple{
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 1, "f1": 3},
+ },
+ &xsql.Tuple{
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 3, "f1": 3},
+ },
+ },
+ {
+ &xsql.Tuple{
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 2, "f1": "v2"},
+ },
+ },
+ },
+ result: errors.New("run Having error: invalid operation int64(3) = string(v2)"),
},
}
diff --git a/xsql/plans/join_operator.go b/xsql/plans/join_operator.go
index e59af1954d..fef8f6ce0e 100644
--- a/xsql/plans/join_operator.go
+++ b/xsql/plans/join_operator.go
@@ -2,7 +2,6 @@ package plans
import (
"fmt"
- "github.com/emqx/kuiper/common"
"github.com/emqx/kuiper/xsql"
"github.com/emqx/kuiper/xstream/api"
)
@@ -18,26 +17,28 @@ type JoinPlan struct {
func (jp *JoinPlan) Apply(ctx api.StreamContext, data interface{}) interface{} {
log := ctx.GetLogger()
var input xsql.WindowTuplesSet
- if d, ok := data.(xsql.WindowTuplesSet); !ok {
- log.Errorf("Expect WindowTuplesSet type.\n")
- return nil
- } else {
- log.Debugf("join plan receive %v", d)
- input = d
+ switch v := data.(type) {
+ case error:
+ return v
+ case xsql.WindowTuplesSet:
+ input = v
+ log.Debugf("join plan receive %v", data)
+ default:
+ return fmt.Errorf("run Join error: join is only supported in window")
}
-
result := xsql.JoinTupleSets{}
-
for i, join := range jp.Joins {
if i == 0 {
v, err := jp.evalSet(input, join)
if err != nil {
- fmt.Println(err)
- return nil
+ return fmt.Errorf("run Join error: %s", err)
}
result = v
} else {
- r1, _ := jp.evalJoinSets(&result, input, join)
+ r1, err := jp.evalJoinSets(&result, input, join)
+ if err != nil {
+ return fmt.Errorf("run Join error: %s", err)
+ }
if v1, ok := r1.(xsql.JoinTupleSets); ok {
result = v1
}
@@ -113,8 +114,12 @@ func (jp *JoinPlan) evalSet(input xsql.WindowTuplesSet, join xsql.Join) (xsql.Jo
temp.AddTuple(left)
temp.AddTuple(right)
ve := &xsql.ValuerEval{Valuer: xsql.MultiValuer(temp, &xsql.FunctionValuer{})}
- if r, ok := ve.Eval(join.Expr).(bool); ok {
- if r {
+ result := ve.Eval(join.Expr)
+ switch val := result.(type) {
+ case error:
+ return nil, val
+ case bool:
+ if val {
if join.JoinType == xsql.INNER_JOIN {
merged.AddTuple(left)
merged.AddTuple(right)
@@ -124,8 +129,8 @@ func (jp *JoinPlan) evalSet(input xsql.WindowTuplesSet, join xsql.Join) (xsql.Jo
merged.AddTuple(right)
}
}
- } else {
- common.Log.Infoln("Evaluation error for set.")
+ default:
+ return nil, fmt.Errorf("invalid join condition that returns non-bool value %[1]T(%[1]v)", val)
}
}
}
@@ -135,10 +140,14 @@ func (jp *JoinPlan) evalSet(input xsql.WindowTuplesSet, join xsql.Join) (xsql.Jo
}
if join.JoinType == xsql.FULL_JOIN {
- if rightJoinSet, err := jp.evalSetWithRightJoin(input, join, true); err == nil && len(rightJoinSet) > 0 {
- for _, jt := range rightJoinSet {
- sets = append(sets, jt)
+ if rightJoinSet, err := jp.evalSetWithRightJoin(input, join, true); err == nil {
+ if len(rightJoinSet) > 0 {
+ for _, jt := range rightJoinSet {
+ sets = append(sets, jt)
+ }
}
+ } else {
+ return nil, err
}
}
return sets, nil
@@ -168,13 +177,17 @@ func (jp *JoinPlan) evalSetWithRightJoin(input xsql.WindowTuplesSet, join xsql.J
temp.AddTuple(right)
temp.AddTuple(left)
ve := &xsql.ValuerEval{Valuer: xsql.MultiValuer(temp, &xsql.FunctionValuer{})}
- if r, ok := ve.Eval(join.Expr).(bool); ok {
- if r {
+ result := ve.Eval(join.Expr)
+ switch val := result.(type) {
+ case error:
+ return nil, val
+ case bool:
+ if val {
merged.AddTuple(left)
isJoint = true
}
- } else {
- common.Log.Infoln("Evaluation error for set.")
+ default:
+ return nil, fmt.Errorf("invalid join condition that returns non-bool value %[1]T(%[1]v)", val)
}
}
if excludeJoint {
@@ -215,14 +228,20 @@ func (jp *JoinPlan) evalJoinSets(set *xsql.JoinTupleSets, input xsql.WindowTuple
merged.AddTuple(right)
} else {
ve := &xsql.ValuerEval{Valuer: xsql.MultiValuer(&left, &right, &xsql.FunctionValuer{})}
- if r, ok := ve.Eval(join.Expr).(bool); ok {
- if r {
+ result := ve.Eval(join.Expr)
+ switch val := result.(type) {
+ case error:
+ return nil, val
+ case bool:
+ if val {
if join.JoinType == xsql.INNER_JOIN && !innerAppend {
merged.AddTuples(left.Tuples)
innerAppend = true
}
merged.AddTuple(right)
}
+ default:
+ return nil, fmt.Errorf("invalid join condition that returns non-bool value %[1]T(%[1]v)", val)
}
}
}
@@ -260,11 +279,17 @@ func (jp *JoinPlan) evalRightJoinSets(set *xsql.JoinTupleSets, input xsql.Window
isJoint := false
for _, left := range *set {
ve := &xsql.ValuerEval{Valuer: xsql.MultiValuer(&right, &left, &xsql.FunctionValuer{})}
- if r, ok := ve.Eval(join.Expr).(bool); ok {
- if r {
+ result := ve.Eval(join.Expr)
+ switch val := result.(type) {
+ case error:
+ return nil, val
+ case bool:
+ if val {
isJoint = true
merged.AddTuples(left.Tuples)
}
+ default:
+ return nil, fmt.Errorf("invalid join condition that returns non-bool value %[1]T(%[1]v)", val)
}
}
diff --git a/xsql/plans/join_test.go b/xsql/plans/join_test.go
index c6cc8b37aa..63c1e5f1dc 100644
--- a/xsql/plans/join_test.go
+++ b/xsql/plans/join_test.go
@@ -2,6 +2,7 @@ package plans
import (
"encoding/json"
+ "errors"
"fmt"
"github.com/emqx/kuiper/common"
"github.com/emqx/kuiper/xsql"
@@ -72,6 +73,60 @@ func TestLeftJoinPlan_Apply(t *testing.T) {
},
},
},
+ {
+ sql: "SELECT id1 FROM src1 left join src2 on src1.id1 = src2.id2",
+ data: xsql.WindowTuplesSet{
+ xsql.WindowTuples{
+ Emitter: "src1",
+ Tuples: []xsql.Tuple{
+ {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 1, "f1": "v1"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 2, "f1": "v2"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 3, "f1": "v3"},
+ },
+ },
+ },
+
+ xsql.WindowTuples{
+ Emitter: "src2",
+ Tuples: []xsql.Tuple{
+ {
+ Emitter: "src2",
+ Message: xsql.Message{"id2": 1, "f2": "w1"},
+ }, {
+ Emitter: "src2",
+ Message: xsql.Message{"f2": "w2"},
+ }, {
+ Emitter: "src2",
+ Message: xsql.Message{"id2": 4, "f2": "w3"},
+ },
+ },
+ },
+ },
+ result: xsql.JoinTupleSets{
+ xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "src1", Message: xsql.Message{"id1": 1, "f1": "v1"}},
+ {Emitter: "src2", Message: xsql.Message{"id2": 1, "f2": "w1"}},
+ },
+ },
+ xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "src1", Message: xsql.Message{"id1": 2, "f1": "v2"}},
+ },
+ },
+ xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "src1", Message: xsql.Message{"id1": 3, "f1": "v3"}},
+ },
+ },
+ },
+ },
{
sql: "SELECT id1 FROM src1 left join src2 on src1.ts = src2.ts",
data: xsql.WindowTuplesSet{
@@ -565,8 +620,9 @@ func TestLeftJoinPlan_Apply(t *testing.T) {
Emitter: "src1",
Tuples: []xsql.Tuple{
{
- Emitter: "src1",
- Message: xsql.Message{"id1": 1, "f1": "v1", xsql.INTERNAL_MQTT_TOPIC_KEY: "devices/type1/device001"},
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 1, "f1": "v1"},
+ Metadata: xsql.Metadata{"topic": "devices/type1/device001"},
},
},
},
@@ -575,8 +631,9 @@ func TestLeftJoinPlan_Apply(t *testing.T) {
Emitter: "src2",
Tuples: []xsql.Tuple{
{
- Emitter: "src2",
- Message: xsql.Message{"id2": 1, "f2": "w1", xsql.INTERNAL_MQTT_TOPIC_KEY: "devices/type2/device001"},
+ Emitter: "src2",
+ Message: xsql.Message{"id2": 1, "f2": "w1"},
+ Metadata: xsql.Metadata{"topic": "devices/type2/device001"},
},
},
},
@@ -584,8 +641,8 @@ func TestLeftJoinPlan_Apply(t *testing.T) {
result: xsql.JoinTupleSets{
xsql.JoinTuple{
Tuples: []xsql.Tuple{
- {Emitter: "src1", Message: xsql.Message{"id1": 1, "f1": "v1", xsql.INTERNAL_MQTT_TOPIC_KEY: "devices/type1/device001"}},
- {Emitter: "src2", Message: xsql.Message{"id2": 1, "f2": "w1", xsql.INTERNAL_MQTT_TOPIC_KEY: "devices/type2/device001"}},
+ {Emitter: "src1", Message: xsql.Message{"id1": 1, "f1": "v1"}, Metadata: xsql.Metadata{"topic": "devices/type1/device001"}},
+ {Emitter: "src2", Message: xsql.Message{"id2": 1, "f2": "w1"}, Metadata: xsql.Metadata{"topic": "devices/type2/device001"}},
},
},
},
@@ -670,7 +727,50 @@ func TestInnerJoinPlan_Apply(t *testing.T) {
},
},
},
+ {
+ sql: "SELECT id1 FROM src1 inner join src2 on src1.id1 = src2.id2",
+ data: xsql.WindowTuplesSet{
+ xsql.WindowTuples{
+ Emitter: "src1",
+ Tuples: []xsql.Tuple{
+ {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 1, "f1": "v1"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 2, "f1": "v2"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 3, "f1": "v3"},
+ },
+ },
+ },
+ xsql.WindowTuples{
+ Emitter: "src2",
+ Tuples: []xsql.Tuple{
+ {
+ Emitter: "src2",
+ Message: xsql.Message{"id2": 1, "f2": "w1"},
+ }, {
+ Emitter: "src2",
+ Message: xsql.Message{"f2": "w2"},
+ }, {
+ Emitter: "src2",
+ Message: xsql.Message{"id2": 4, "f2": "w3"},
+ },
+ },
+ },
+ },
+ result: xsql.JoinTupleSets{
+ xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "src1", Message: xsql.Message{"id1": 1, "f1": "v1"}},
+ {Emitter: "src2", Message: xsql.Message{"id2": 1, "f2": "w1"}},
+ },
+ },
+ },
+ },
{
sql: "SELECT id1 FROM src1 As s1 inner join src2 as s2 on s1.id1 = s2.id2",
data: xsql.WindowTuplesSet{
@@ -1099,7 +1199,62 @@ func TestRightJoinPlan_Apply(t *testing.T) {
},
},
},
+ {
+ sql: "SELECT id1 FROM src1 right join src2 on src1.id1 = src2.id2",
+ data: xsql.WindowTuplesSet{
+ xsql.WindowTuples{
+ Emitter: "src1",
+ Tuples: []xsql.Tuple{
+ {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 1, "f1": "v1"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 2, "f1": "v2"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 3, "f1": "v3"},
+ },
+ },
+ },
+
+ xsql.WindowTuples{
+ Emitter: "src2",
+ Tuples: []xsql.Tuple{
+ {
+ Emitter: "src2",
+ Message: xsql.Message{"id2": 1, "f2": "w1"},
+ }, {
+ Emitter: "src2",
+ Message: xsql.Message{"f2": "w2"},
+ }, {
+ Emitter: "src2",
+ Message: xsql.Message{"id2": 4, "f2": "w3"},
+ },
+ },
+ },
+ },
+ result: xsql.JoinTupleSets{
+ xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "src2", Message: xsql.Message{"id2": 1, "f2": "w1"}},
+ {Emitter: "src1", Message: xsql.Message{"id1": 1, "f1": "v1"}},
+ },
+ },
+
+ xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "src2", Message: xsql.Message{"f2": "w2"}},
+ },
+ },
+ xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "src2", Message: xsql.Message{"id2": 4, "f2": "w3"}},
+ },
+ },
+ },
+ },
{
sql: "SELECT id1 FROM src1 right join src2 on src1.id1 = src2.id2",
data: xsql.WindowTuplesSet{
@@ -1544,6 +1699,79 @@ func TestCrossJoinPlan_Apply(t *testing.T) {
}
}
+func TestCrossJoinPlanError(t *testing.T) {
+ var tests = []struct {
+ sql string
+ data interface{}
+ result interface{}
+ }{
+ {
+ sql: "SELECT id1 FROM src1 cross join src2",
+ data: errors.New("an error from upstream"),
+ result: errors.New("an error from upstream"),
+ }, {
+ sql: "SELECT id1 FROM src1 full join src2 on src1.id1 = src2.id2",
+ data: xsql.WindowTuplesSet{
+ xsql.WindowTuples{
+ Emitter: "src1",
+ Tuples: []xsql.Tuple{
+ {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 1, "f1": "v1"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 2, "f1": "v2"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 3, "f1": "v3"},
+ },
+ },
+ },
+
+ xsql.WindowTuples{
+ Emitter: "src2",
+ Tuples: []xsql.Tuple{
+ {
+ Emitter: "src2",
+ Message: xsql.Message{"id2": 1, "f2": "w1"},
+ }, {
+ Emitter: "src2",
+ Message: xsql.Message{"id2": "3", "f2": "w2"},
+ }, {
+ Emitter: "src2",
+ Message: xsql.Message{"id2": 4, "f2": "w3"},
+ }, {
+ Emitter: "src2",
+ Message: xsql.Message{"id2": 2, "f2": "w4"},
+ },
+ },
+ },
+ },
+ result: errors.New("run Join error: invalid operation int64(1) = string(3)"),
+ },
+ }
+ fmt.Printf("The test bucket size is %d.\n\n", len(tests))
+ contextLogger := common.Log.WithField("rule", "TestCrossJoinPlan_Apply")
+ ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
+ for i, tt := range tests {
+ stmt, err := xsql.NewParser(strings.NewReader(tt.sql)).Parse()
+ if err != nil {
+ t.Errorf("statement parse error %s", err)
+ break
+ }
+
+ if table, ok := stmt.Sources[0].(*xsql.Table); !ok {
+ t.Errorf("statement source is not a table")
+ } else {
+ pp := &JoinPlan{Joins: stmt.Joins, From: table}
+ result := pp.Apply(ctx, tt.data)
+ if !reflect.DeepEqual(tt.result, result) {
+ t.Errorf("%d. %q\n\nresult mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.sql, tt.result, result)
+ }
+ }
+ }
+}
+
func str2Map(s string) map[string]interface{} {
var input map[string]interface{}
if err := json.Unmarshal([]byte(s), &input); err != nil {
diff --git a/xsql/plans/misc_func_test.go b/xsql/plans/misc_func_test.go
index 72dc6ae6fb..3ed6a05b12 100644
--- a/xsql/plans/misc_func_test.go
+++ b/xsql/plans/misc_func_test.go
@@ -92,8 +92,9 @@ func TestHashFunc_Apply1(t *testing.T) {
sql: "SELECT mqtt(topic) AS a FROM test",
data: &xsql.Tuple{
Emitter: "test",
- Message: xsql.Message{
- xsql.INTERNAL_MQTT_TOPIC_KEY: "devices/device_001/message",
+ Message: xsql.Message{},
+ Metadata: xsql.Metadata{
+ "topic": "devices/device_001/message",
},
},
result: []map[string]interface{}{{
@@ -105,8 +106,9 @@ func TestHashFunc_Apply1(t *testing.T) {
sql: "SELECT mqtt(topic) AS a FROM test",
data: &xsql.Tuple{
Emitter: "test",
- Message: xsql.Message{
- xsql.INTERNAL_MQTT_TOPIC_KEY: "devices/device_001/message",
+ Message: xsql.Message{},
+ Metadata: xsql.Metadata{
+ "topic": "devices/device_001/message",
},
},
result: []map[string]interface{}{{
@@ -119,8 +121,10 @@ func TestHashFunc_Apply1(t *testing.T) {
data: &xsql.Tuple{
Emitter: "test",
Message: xsql.Message{
- "topic": "fff",
- xsql.INTERNAL_MQTT_TOPIC_KEY: "devices/device_001/message",
+ "topic": "fff",
+ },
+ Metadata: xsql.Metadata{
+ "topic": "devices/device_001/message",
},
},
result: []map[string]interface{}{{
@@ -169,8 +173,8 @@ func TestMqttFunc_Apply2(t *testing.T) {
data: xsql.JoinTupleSets{
xsql.JoinTuple{
Tuples: []xsql.Tuple{
- {Emitter: "src1", Message: xsql.Message{"id1": "1", "f1": "v1", xsql.INTERNAL_MQTT_TOPIC_KEY: "devices/type1/device001"}},
- {Emitter: "src2", Message: xsql.Message{"id2": "1", "f2": "w1", xsql.INTERNAL_MQTT_TOPIC_KEY: "devices/type2/device001"}},
+ {Emitter: "src1", Message: xsql.Message{"id1": "1", "f1": "v1"}, Metadata: xsql.Metadata{"topic": "devices/type1/device001"}},
+ {Emitter: "src2", Message: xsql.Message{"id2": "1", "f2": "w1"}, Metadata: xsql.Metadata{"topic": "devices/type2/device001"}},
},
},
},
@@ -210,3 +214,76 @@ func TestMqttFunc_Apply2(t *testing.T) {
}
}
}
+
+func TestMetaFunc_Apply1(t *testing.T) {
+ var tests = []struct {
+ sql string
+ data interface{}
+ result interface{}
+ }{
+ {
+ sql: "SELECT topic, meta(topic) AS a FROM test",
+ data: &xsql.Tuple{
+ Emitter: "test",
+ Message: xsql.Message{
+ "topic": "fff",
+ },
+ Metadata: xsql.Metadata{
+ "topic": "devices/device_001/message",
+ },
+ },
+ result: []map[string]interface{}{{
+ "topic": "fff",
+ "a": "devices/device_001/message",
+ }},
+ },
+ {
+ sql: "SELECT meta(device) as d, meta(temperature->device) as r FROM test",
+ data: &xsql.Tuple{
+ Emitter: "test",
+ Message: xsql.Message{
+ "temperature": 43.2,
+ },
+ Metadata: xsql.Metadata{
+ "temperature": map[string]interface{}{
+ "id": "dfadfasfas",
+ "device": "device2",
+ },
+ "device": "gateway",
+ },
+ },
+ result: []map[string]interface{}{{
+ "d": "gateway",
+ "r": "device2",
+ }},
+ },
+ }
+
+ fmt.Printf("The test bucket size is %d.\n\n", len(tests))
+ contextLogger := common.Log.WithField("rule", "TestHashFunc_Apply1")
+ ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
+ for i, tt := range tests {
+ stmt, err := xsql.NewParser(strings.NewReader(tt.sql)).Parse()
+ if err != nil || stmt == nil {
+ t.Errorf("parse sql %s error %v", tt.sql, err)
+ }
+ pp := &ProjectPlan{Fields: stmt.Fields}
+ pp.isTest = true
+ result := pp.Apply(ctx, tt.data)
+ var mapRes []map[string]interface{}
+ if v, ok := result.([]byte); ok {
+ err := json.Unmarshal(v, &mapRes)
+ if err != nil {
+ t.Errorf("Failed to parse the input into map.\n")
+ continue
+ }
+ //fmt.Printf("%t\n", mapRes["rengine_field_0"])
+
+ if !reflect.DeepEqual(tt.result, mapRes) {
+ t.Errorf("%d. %q\n\nresult mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.sql, tt.result, mapRes)
+ }
+ } else {
+ t.Errorf("The returned result is not type of []byte\n")
+ }
+ }
+}
diff --git a/xsql/plans/order_operator.go b/xsql/plans/order_operator.go
index 846a754bc2..a41f3db66d 100644
--- a/xsql/plans/order_operator.go
+++ b/xsql/plans/order_operator.go
@@ -1,6 +1,7 @@
package plans
import (
+ "fmt"
"github.com/emqx/kuiper/xsql"
"github.com/emqx/kuiper/xstream/api"
)
@@ -18,13 +19,16 @@ func (p *OrderPlan) Apply(ctx api.StreamContext, data interface{}) interface{} {
log.Debugf("order plan receive %s", data)
sorter := xsql.OrderedBy(p.SortFields)
switch input := data.(type) {
+ case error:
+ return input
case xsql.Valuer:
return input
case xsql.SortingData:
- sorter.Sort(input)
+ if err := sorter.Sort(input); err != nil {
+ return fmt.Errorf("run Order By error: %s", err)
+ }
return input
default:
- log.Errorf("Expect xsql.Valuer or its array type.")
- return nil
+ return fmt.Errorf("run Order By error: expect xsql.Valuer or its array type")
}
}
diff --git a/xsql/plans/order_test.go b/xsql/plans/order_test.go
index e973c3c73b..0e6b9fd1f7 100644
--- a/xsql/plans/order_test.go
+++ b/xsql/plans/order_test.go
@@ -1,6 +1,7 @@
package plans
import (
+ "errors"
"fmt"
"github.com/emqx/kuiper/common"
"github.com/emqx/kuiper/xsql"
@@ -87,7 +88,64 @@ func TestOrderPlan_Apply(t *testing.T) {
},
},
},
-
+ {
+ sql: "SELECT id1 FROM src1 WHERE f1 = \"v1\" GROUP BY TUMBLINGWINDOW(ss, 10) ORDER BY id1 DESC",
+ data: xsql.WindowTuplesSet{
+ xsql.WindowTuples{
+ Emitter: "src1",
+ Tuples: []xsql.Tuple{
+ {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 1, "f1": "v1"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"f1": "v2"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 3, "f1": "v1"},
+ },
+ },
+ },
+ },
+ result: xsql.WindowTuplesSet{
+ xsql.WindowTuples{
+ Emitter: "src1",
+ Tuples: []xsql.Tuple{
+ {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 3, "f1": "v1"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 1, "f1": "v1"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"f1": "v2"},
+ },
+ },
+ },
+ },
+ },
+ {
+ sql: "SELECT id1 FROM src1 WHERE f1 = \"v1\" GROUP BY TUMBLINGWINDOW(ss, 10) ORDER BY id1 DESC",
+ data: xsql.WindowTuplesSet{
+ xsql.WindowTuples{
+ Emitter: "src1",
+ Tuples: []xsql.Tuple{
+ {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 1, "f1": "v1"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": "2string", "f1": "v2"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 3, "f1": "v1"},
+ },
+ },
+ },
+ },
+ result: errors.New("run Order By error: incompatible types for comparison: int and string"),
+ },
{
sql: "SELECT * FROM src1 WHERE f1 = \"v1\" GROUP BY TUMBLINGWINDOW(ss, 10) ORDER BY f1, id1 DESC",
data: xsql.WindowTuplesSet{
@@ -341,23 +399,23 @@ func TestOrderPlan_Apply(t *testing.T) {
{
&xsql.JoinTuple{
Tuples: []xsql.Tuple{
- {Emitter: "src1", Message: xsql.Message{"id1": 3, "f1": "v1"}},
+ {Emitter: "src1", Message: xsql.Message{"id1": 2, "f1": "v2"}},
+ {Emitter: "src2", Message: xsql.Message{"id2": 4, "f2": "w3"}},
},
},
},
{
&xsql.JoinTuple{
Tuples: []xsql.Tuple{
- {Emitter: "src1", Message: xsql.Message{"id1": 2, "f1": "v2"}},
- {Emitter: "src2", Message: xsql.Message{"id2": 4, "f2": "w3"}},
+ {Emitter: "src1", Message: xsql.Message{"id1": 1, "f1": "v1"}},
+ {Emitter: "src2", Message: xsql.Message{"id2": 2, "f2": "w2"}},
},
},
},
{
&xsql.JoinTuple{
Tuples: []xsql.Tuple{
- {Emitter: "src1", Message: xsql.Message{"id1": 1, "f1": "v1"}},
- {Emitter: "src2", Message: xsql.Message{"id2": 2, "f2": "w2"}},
+ {Emitter: "src1", Message: xsql.Message{"id1": 3, "f1": "v1"}},
},
},
},
diff --git a/xsql/plans/preprocessor.go b/xsql/plans/preprocessor.go
index 4a08a6c99c..4d5df09fd9 100644
--- a/xsql/plans/preprocessor.go
+++ b/xsql/plans/preprocessor.go
@@ -44,19 +44,21 @@ func (p *Preprocessor) Apply(ctx api.StreamContext, data interface{}) interface{
log := ctx.GetLogger()
tuple, ok := data.(*xsql.Tuple)
if !ok {
- log.Errorf("Expect tuple data type")
- return nil
+ return fmt.Errorf("expect tuple data type")
}
log.Debugf("preprocessor receive %s", tuple.Message)
result := make(map[string]interface{})
- for _, f := range p.streamStmt.StreamFields {
- fname := strings.ToLower(f.Name)
- if e := p.addRecField(f.FieldType, result, tuple.Message, fname); e != nil {
- log.Errorf("error in preprocessor: %s", e)
- return nil
+ if p.streamStmt.StreamFields != nil {
+ for _, f := range p.streamStmt.StreamFields {
+ fname := strings.ToLower(f.Name)
+ if e := p.addRecField(f.FieldType, result, tuple.Message, fname); e != nil {
+ return fmt.Errorf("error in preprocessor: %s", e)
+ }
}
+ } else {
+ result = tuple.Message
}
//If the field has alias name, then evaluate the alias field before transfer it to proceeding operators, and put it into result.
@@ -64,7 +66,10 @@ func (p *Preprocessor) Apply(ctx api.StreamContext, data interface{}) interface{
for _, f := range p.fields {
if f.AName != "" && (!xsql.HasAggFuncs(f.Expr)) {
ve := &xsql.ValuerEval{Valuer: xsql.MultiValuer(tuple, &xsql.FunctionValuer{})}
- if v := ve.Eval(f.Expr); v != nil {
+ v := ve.Eval(f.Expr)
+ if _, ok := v.(error); ok {
+ return v
+ } else {
result[strings.ToLower(f.AName)] = v
}
}
@@ -74,15 +79,13 @@ func (p *Preprocessor) Apply(ctx api.StreamContext, data interface{}) interface{
if p.isEventTime {
if t, ok := result[p.timestampField]; ok {
if ts, err := common.InterfaceToUnixMilli(t, p.timestampFormat); err != nil {
- log.Errorf("cannot convert timestamp field %s to timestamp with error %v", p.timestampField, err)
- return nil
+ return fmt.Errorf("cannot convert timestamp field %s to timestamp with error %v", p.timestampField, err)
} else {
tuple.Timestamp = ts
log.Debugf("preprocessor calculate timstamp %d", tuple.Timestamp)
}
} else {
- log.Errorf("cannot find timestamp field %s in tuple %v", p.timestampField, result)
- return nil
+ return fmt.Errorf("cannot find timestamp field %s in tuple %v", p.timestampField, result)
}
}
return tuple
@@ -112,30 +115,30 @@ func (p *Preprocessor) addRecField(ft xsql.FieldType, r map[string]interface{},
r[n] = int(t.(float64))
} else if jtype == reflect.String {
if i, err := strconv.Atoi(t.(string)); err != nil {
- return fmt.Errorf("invalid data type for %s, expect bigint but found %s", n, t)
+ return fmt.Errorf("invalid data type for %s, expect bigint but found %[2]T(%[2]v)", n, t)
} else {
r[n] = i
}
} else {
- return fmt.Errorf("invalid data type for %s, expect bigint but found %s", n, t)
+ return fmt.Errorf("invalid data type for %s, expect bigint but found %[2]T(%[2]v)", n, t)
}
case xsql.FLOAT:
if jtype == reflect.Float64 {
r[n] = t.(float64)
} else if jtype == reflect.String {
if f, err := strconv.ParseFloat(t.(string), 64); err != nil {
- return fmt.Errorf("invalid data type for %s, expect float but found %s", n, t)
+ return fmt.Errorf("invalid data type for %s, expect float but found %[2]T(%[2]v)", n, t)
} else {
r[n] = f
}
} else {
- return fmt.Errorf("invalid data type for %s, expect float but found %s", n, t)
+ return fmt.Errorf("invalid data type for %s, expect float but found %[2]T(%[2]v)", n, t)
}
case xsql.STRINGS:
if jtype == reflect.String {
r[n] = t.(string)
} else {
- return fmt.Errorf("invalid data type for %s, expect string but found %s", n, t)
+ return fmt.Errorf("invalid data type for %s, expect string but found %[2]T(%[2]v)", n, t)
}
case xsql.DATETIME:
switch jtype {
@@ -152,19 +155,19 @@ func (p *Preprocessor) addRecField(ft xsql.FieldType, r map[string]interface{},
r[n] = t
}
default:
- return fmt.Errorf("invalid data type for %s, expect datatime but find %v", n, t)
+ return fmt.Errorf("invalid data type for %s, expect datatime but find %[2]T(%[2]v)", n, t)
}
case xsql.BOOLEAN:
if jtype == reflect.Bool {
r[n] = t.(bool)
} else if jtype == reflect.String {
if i, err := strconv.ParseBool(t.(string)); err != nil {
- return fmt.Errorf("invalid data type for %s, expect boolean but found %s", n, t)
+ return fmt.Errorf("invalid data type for %s, expect boolean but found %[2]T(%[2]v)", n, t)
} else {
r[n] = i
}
} else {
- return fmt.Errorf("invalid data type for %s, expect boolean but found %s", n, t)
+ return fmt.Errorf("invalid data type for %s, expect boolean but found %[2]T(%[2]v)", n, t)
}
default:
return fmt.Errorf("invalid data type for %s, it is not supported yet", st)
@@ -176,10 +179,10 @@ func (p *Preprocessor) addRecField(ft xsql.FieldType, r map[string]interface{},
} else if jtype == reflect.String {
err := json.Unmarshal([]byte(t.(string)), &s)
if err != nil {
- return fmt.Errorf("invalid data type for %s, expect array but found %s", n, t)
+ return fmt.Errorf("invalid data type for %s, expect array but found %[2]T(%[2]v)", n, t)
}
} else {
- return fmt.Errorf("invalid data type for %s, expect array but found %s", n, t)
+ return fmt.Errorf("invalid data type for %s, expect array but found %[2]T(%[2]v)", n, t)
}
if tempArr, err := p.addArrayField(st, s); err != nil {
@@ -192,15 +195,15 @@ func (p *Preprocessor) addRecField(ft xsql.FieldType, r map[string]interface{},
if jtype == reflect.Map {
nextJ, ok = t.(map[string]interface{})
if !ok {
- return fmt.Errorf("invalid data type for %s, expect map but found %s", n, t)
+ return fmt.Errorf("invalid data type for %s, expect map but found %[2]T(%[2]v)", n, t)
}
} else if jtype == reflect.String {
err := json.Unmarshal([]byte(t.(string)), &nextJ)
if err != nil {
- return fmt.Errorf("invalid data type for %s, expect map but found %s", n, t)
+ return fmt.Errorf("invalid data type for %s, expect map but found %[2]T(%[2]v)", n, t)
}
} else {
- return fmt.Errorf("invalid data type for %s, expect struct but found %s", n, t)
+ return fmt.Errorf("invalid data type for %s, expect struct but found %[2]T(%[2]v)", n, t)
}
nextR := make(map[string]interface{})
for _, nextF := range st.StreamFields {
@@ -234,10 +237,10 @@ func (p *Preprocessor) addArrayField(ft *xsql.ArrayType, srcSlice []interface{})
} else if jtype == reflect.String {
err := json.Unmarshal([]byte(t.(string)), &s)
if err != nil {
- return nil, fmt.Errorf("invalid data type for [%d], expect array but found %s", i, t)
+ return nil, fmt.Errorf("invalid data type for [%d], expect array but found %[2]T(%[2]v)", i, t)
}
} else {
- return nil, fmt.Errorf("invalid data type for [%d], expect array but found %s", i, t)
+ return nil, fmt.Errorf("invalid data type for [%d], expect array but found %[2]T(%[2]v)", i, t)
}
if tempArr, err := p.addArrayField(st, s); err != nil {
return nil, err
@@ -255,16 +258,16 @@ func (p *Preprocessor) addArrayField(ft *xsql.ArrayType, srcSlice []interface{})
if jtype == reflect.Map {
j, ok = t.(map[string]interface{})
if !ok {
- return nil, fmt.Errorf("invalid data type for [%d], expect map but found %s", i, t)
+ return nil, fmt.Errorf("invalid data type for [%d], expect map but found %[2]T(%[2]v)", i, t)
}
} else if jtype == reflect.String {
err := json.Unmarshal([]byte(t.(string)), &j)
if err != nil {
- return nil, fmt.Errorf("invalid data type for [%d], expect map but found %s", i, t)
+ return nil, fmt.Errorf("invalid data type for [%d], expect map but found %[2]T(%[2]v)", i, t)
}
} else {
- return nil, fmt.Errorf("invalid data type for [%d], expect map but found %s", i, t)
+ return nil, fmt.Errorf("invalid data type for [%d], expect map but found %[2]T(%[2]v)", i, t)
}
r := make(map[string]interface{})
for _, f := range st.StreamFields {
@@ -291,12 +294,12 @@ func (p *Preprocessor) addArrayField(ft *xsql.ArrayType, srcSlice []interface{})
tempSlice = append(tempSlice, int(t.(float64)))
} else if jtype == reflect.String {
if v, err := strconv.Atoi(t.(string)); err != nil {
- return nil, fmt.Errorf("invalid data type for [%d], expect float but found %s", i, t)
+ return nil, fmt.Errorf("invalid data type for [%d], expect float but found %[2]T(%[2]v)", i, t)
} else {
tempSlice = append(tempSlice, v)
}
} else {
- return nil, fmt.Errorf("invalid data type for [%d], expect float but found %s", i, t)
+ return nil, fmt.Errorf("invalid data type for [%d], expect float but found %[2]T(%[2]v)", i, t)
}
}
return tempSlice, nil
@@ -308,12 +311,12 @@ func (p *Preprocessor) addArrayField(ft *xsql.ArrayType, srcSlice []interface{})
tempSlice = append(tempSlice, t.(float64))
} else if jtype == reflect.String {
if f, err := strconv.ParseFloat(t.(string), 64); err != nil {
- return nil, fmt.Errorf("invalid data type for [%d], expect float but found %s", i, t)
+ return nil, fmt.Errorf("invalid data type for [%d], expect float but found %[2]T(%[2]v)", i, t)
} else {
tempSlice = append(tempSlice, f)
}
} else {
- return nil, fmt.Errorf("invalid data type for [%d], expect float but found %s", i, t)
+ return nil, fmt.Errorf("invalid data type for [%d], expect float but found %[2]T(%[2]v)", i, t)
}
}
return tempSlice, nil
@@ -323,7 +326,7 @@ func (p *Preprocessor) addArrayField(ft *xsql.ArrayType, srcSlice []interface{})
if reflect.ValueOf(t).Kind() == reflect.String {
tempSlice = append(tempSlice, t.(string))
} else {
- return nil, fmt.Errorf("invalid data type for [%d], expect string but found %s", i, t)
+ return nil, fmt.Errorf("invalid data type for [%d], expect string but found %[2]T(%[2]v)", i, t)
}
}
return tempSlice, nil
@@ -340,12 +343,12 @@ func (p *Preprocessor) addArrayField(ft *xsql.ArrayType, srcSlice []interface{})
tempSlice = append(tempSlice, common.TimeFromUnixMilli(ai))
case reflect.String:
if ai, err := p.parseTime(t.(string)); err != nil {
- return nil, fmt.Errorf("invalid data type for %s, cannot convert to datetime: %s", t, err)
+ return nil, fmt.Errorf("invalid data type for %s, cannot convert to datetime: %[2]T(%[2]v)", t, err)
} else {
tempSlice = append(tempSlice, ai)
}
default:
- return nil, fmt.Errorf("invalid data type for [%d], expect datetime but found %v", i, t)
+ return nil, fmt.Errorf("invalid data type for [%d], expect datetime but found %[2]T(%[2]v)", i, t)
}
}
return tempSlice, nil
@@ -357,17 +360,17 @@ func (p *Preprocessor) addArrayField(ft *xsql.ArrayType, srcSlice []interface{})
tempSlice = append(tempSlice, t.(bool))
} else if jtype == reflect.String {
if v, err := strconv.ParseBool(t.(string)); err != nil {
- return nil, fmt.Errorf("invalid data type for [%d], expect boolean but found %s", i, t)
+ return nil, fmt.Errorf("invalid data type for [%d], expect boolean but found %[2]T(%[2]v)", i, t)
} else {
tempSlice = append(tempSlice, v)
}
} else {
- return nil, fmt.Errorf("invalid data type for [%d], expect boolean but found %s", i, t)
+ return nil, fmt.Errorf("invalid data type for [%d], expect boolean but found %[2]T(%[2]v)", i, t)
}
}
return tempSlice, nil
default:
- return nil, fmt.Errorf("invalid data type for %T, datetime type is not supported yet", ft.Type)
+ return nil, fmt.Errorf("invalid data type for %T", ft.Type)
}
}
}
diff --git a/xsql/plans/preprocessor_test.go b/xsql/plans/preprocessor_test.go
index 131725cebd..7bd72575c0 100644
--- a/xsql/plans/preprocessor_test.go
+++ b/xsql/plans/preprocessor_test.go
@@ -2,6 +2,7 @@ package plans
import (
"encoding/json"
+ "errors"
"fmt"
"github.com/emqx/kuiper/common"
"github.com/emqx/kuiper/xsql"
@@ -28,9 +29,19 @@ func TestPreprocessor_Apply(t *testing.T) {
},
},
data: []byte(`{"a": 6}`),
- result: nil,
+ result: errors.New("error in preprocessor: invalid data map[a:%!s(float64=6)], field abc not found"),
+ },
+ {
+ stmt: &xsql.StreamStmt{
+ Name: xsql.StreamName("demo"),
+ StreamFields: nil,
+ },
+ data: []byte(`{"a": 6}`),
+ result: &xsql.Tuple{Message: xsql.Message{
+ "a": float64(6),
+ },
+ },
},
-
{
stmt: &xsql.StreamStmt{
Name: xsql.StreamName("demo"),
@@ -40,7 +51,18 @@ func TestPreprocessor_Apply(t *testing.T) {
},
data: []byte(`{"abc": 6}`),
result: &xsql.Tuple{Message: xsql.Message{
- "abc": int(6),
+ "abc": 6,
+ },
+ },
+ },
+ {
+ stmt: &xsql.StreamStmt{
+ Name: xsql.StreamName("demo"),
+ StreamFields: nil,
+ },
+ data: []byte(`{"abc": 6}`),
+ result: &xsql.Tuple{Message: xsql.Message{
+ "abc": float64(6),
},
},
},
@@ -59,6 +81,19 @@ func TestPreprocessor_Apply(t *testing.T) {
},
},
},
+ {
+ stmt: &xsql.StreamStmt{
+ Name: xsql.StreamName("demo"),
+ StreamFields: nil,
+ },
+ data: []byte(`{"abc": 34, "def" : "hello", "ghi": 50}`),
+ result: &xsql.Tuple{Message: xsql.Message{
+ "abc": float64(34),
+ "def": "hello",
+ "ghi": float64(50),
+ },
+ },
+ },
{
stmt: &xsql.StreamStmt{
Name: xsql.StreamName("demo"),
@@ -83,7 +118,7 @@ func TestPreprocessor_Apply(t *testing.T) {
},
},
data: []byte(`{"abc": 77, "def" : "hello"}`),
- result: nil,
+ result: errors.New("error in preprocessor: invalid data type for def, expect boolean but found string(hello)"),
},
{
stmt: &xsql.StreamStmt{
@@ -94,7 +129,20 @@ func TestPreprocessor_Apply(t *testing.T) {
},
},
data: []byte(`{"a": {"b" : "hello"}}`),
- result: nil,
+ result: errors.New("error in preprocessor: invalid data map[a:map[b:hello]], field abc not found"),
+ },
+ {
+ stmt: &xsql.StreamStmt{
+ Name: xsql.StreamName("demo"),
+ StreamFields: nil,
+ },
+ data: []byte(`{"a": {"b" : "hello"}}`),
+ result: &xsql.Tuple{Message: xsql.Message{
+ "a": map[string]interface{}{
+ "b": "hello",
+ },
+ },
+ },
},
//Rec type
{
@@ -136,6 +184,19 @@ func TestPreprocessor_Apply(t *testing.T) {
},
},
},
+ {
+ stmt: &xsql.StreamStmt{
+ Name: xsql.StreamName("demo"),
+ StreamFields: nil,
+ },
+ data: []byte(`{"a": {"b" : "32"}}`),
+ result: &xsql.Tuple{Message: xsql.Message{
+ "a": map[string]interface{}{
+ "b": "32",
+ },
+ },
+ },
+ },
//Array of complex type
{
stmt: &xsql.StreamStmt{
@@ -160,6 +221,20 @@ func TestPreprocessor_Apply(t *testing.T) {
},
},
},
+ {
+ stmt: &xsql.StreamStmt{
+ Name: xsql.StreamName("demo"),
+ StreamFields: nil,
+ },
+ data: []byte(`{"a": [{"b" : "hello1"}, {"b" : "hello2"}]}`),
+ result: &xsql.Tuple{Message: xsql.Message{
+ "a": []interface{}{
+ map[string]interface{}{"b": "hello1"},
+ map[string]interface{}{"b": "hello2"},
+ },
+ },
+ },
+ },
{
stmt: &xsql.StreamStmt{
Name: xsql.StreamName("demo"),
@@ -178,6 +253,20 @@ func TestPreprocessor_Apply(t *testing.T) {
},
},
},
+ {
+ stmt: &xsql.StreamStmt{
+ Name: xsql.StreamName("demo"),
+ StreamFields: nil,
+ },
+ data: []byte(`{"a": [55, 77]}`),
+ result: &xsql.Tuple{Message: xsql.Message{
+ "a": []interface{}{
+ float64(55),
+ float64(77),
+ },
+ },
+ },
+ },
//Rec of complex type
{
stmt: &xsql.StreamStmt{
@@ -206,6 +295,22 @@ func TestPreprocessor_Apply(t *testing.T) {
},
},
},
+ {
+ stmt: &xsql.StreamStmt{
+ Name: xsql.StreamName("demo"),
+ StreamFields: nil,
+ },
+ data: []byte(`{"a": {"b" : "hello", "c": {"d": 35.2}}}`),
+ result: &xsql.Tuple{Message: xsql.Message{
+ "a": map[string]interface{}{
+ "b": "hello",
+ "c": map[string]interface{}{
+ "d": 35.2,
+ },
+ },
+ },
+ },
+ },
}
fmt.Printf("The test bucket size is %d.\n\n", len(tests))
@@ -253,6 +358,18 @@ func TestPreprocessorTime_Apply(t *testing.T) {
},
},
},
+ {
+ stmt: &xsql.StreamStmt{
+ Name: xsql.StreamName("demo"),
+ StreamFields: nil,
+ },
+ data: []byte(`{"abc": "2019-09-19T00:55:15.000Z", "def" : 1568854573431}`),
+ result: &xsql.Tuple{Message: xsql.Message{
+ "abc": "2019-09-19T00:55:15.000Z",
+ "def": float64(1568854573431),
+ },
+ },
+ },
{
stmt: &xsql.StreamStmt{
Name: xsql.StreamName("demo"),
@@ -262,7 +379,7 @@ func TestPreprocessorTime_Apply(t *testing.T) {
},
},
data: []byte(`{"abc": "2019-09-19T00:55:1dd5Z", "def" : 111568854573431}`),
- result: nil,
+ result: errors.New("error in preprocessor: invalid data type for abc, cannot convert to datetime: parsing time \"2019-09-19T00:55:1dd5Z\" as \"2006-01-02T15:04:05.000Z07:00\": cannot parse \"1dd5Z\" as \"05\""),
},
{
stmt: &xsql.StreamStmt{
@@ -389,6 +506,26 @@ func TestPreprocessorEventtime_Apply(t *testing.T) {
}, Timestamp: 1568854515000,
},
},
+ {
+ stmt: &xsql.StreamStmt{
+ Name: xsql.StreamName("demo"),
+ StreamFields: nil,
+ Options: map[string]string{
+ "DATASOURCE": "users",
+ "FORMAT": "AVRO",
+ "KEY": "USERID",
+ "CONF_KEY": "srv1",
+ "TYPE": "MQTT",
+ "TIMESTAMP": "abc",
+ "TIMESTAMP_FORMAT": "yyyy-MM-dd''T''HH:mm:ssX'",
+ },
+ },
+ data: []byte(`{"abc": 1568854515000}`),
+ result: &xsql.Tuple{Message: xsql.Message{
+ "abc": float64(1568854515000),
+ }, Timestamp: 1568854515000,
+ },
+ },
{
stmt: &xsql.StreamStmt{
Name: xsql.StreamName("demo"),
@@ -401,7 +538,7 @@ func TestPreprocessorEventtime_Apply(t *testing.T) {
},
},
data: []byte(`{"abc": true}`),
- result: nil,
+ result: errors.New("cannot convert timestamp field abc to timestamp with error unsupported type to convert to timestamp true"),
},
{
stmt: &xsql.StreamStmt{
@@ -475,7 +612,7 @@ func TestPreprocessorEventtime_Apply(t *testing.T) {
},
},
data: []byte(`{"abc": 34, "def" : "2019-09-23AT02:47:29", "ghi": 50}`),
- result: nil,
+ result: errors.New("cannot convert timestamp field def to timestamp with error parsing time \"2019-09-23AT02:47:29\" as \"2006-01-02PM15:04:05\": cannot parse \"02:47:29\" as \"PM\""),
},
}
@@ -511,3 +648,75 @@ func TestPreprocessorEventtime_Apply(t *testing.T) {
}
}
+
+func TestPreprocessorError(t *testing.T) {
+ tests := []struct {
+ stmt *xsql.StreamStmt
+ data []byte
+ result interface{}
+ }{
+ {
+ stmt: &xsql.StreamStmt{
+ Name: xsql.StreamName("demo"),
+ StreamFields: []xsql.StreamField{
+ {Name: "abc", FieldType: &xsql.BasicType{Type: xsql.BIGINT}},
+ },
+ },
+ data: []byte(`{"abc": "dafsad"}`),
+ result: errors.New("error in preprocessor: invalid data type for abc, expect bigint but found string(dafsad)"),
+ }, {
+ stmt: &xsql.StreamStmt{
+ Name: xsql.StreamName("demo"),
+ StreamFields: []xsql.StreamField{
+ {Name: "a", FieldType: &xsql.RecType{
+ StreamFields: []xsql.StreamField{
+ {Name: "b", FieldType: &xsql.BasicType{Type: xsql.STRINGS}},
+ },
+ }},
+ },
+ },
+ data: []byte(`{"a": {"d" : "hello"}}`),
+ result: errors.New("error in preprocessor: invalid data map[d:hello], field b not found"),
+ }, {
+ stmt: &xsql.StreamStmt{
+ Name: xsql.StreamName("demo"),
+ StreamFields: []xsql.StreamField{
+ {Name: "abc", FieldType: &xsql.BasicType{Type: xsql.BIGINT}},
+ },
+ Options: map[string]string{
+ "DATASOURCE": "users",
+ "FORMAT": "AVRO",
+ "KEY": "USERID",
+ "CONF_KEY": "srv1",
+ "TYPE": "MQTT",
+ "TIMESTAMP": "abc",
+ "TIMESTAMP_FORMAT": "yyyy-MM-dd''T''HH:mm:ssX'",
+ },
+ },
+ data: []byte(`{"abc": "not a time"}`),
+ result: errors.New("error in preprocessor: invalid data type for abc, expect bigint but found string(not a time)"),
+ },
+ }
+ fmt.Printf("The test bucket size is %d.\n\n", len(tests))
+
+ defer common.CloseLogger()
+ contextLogger := common.Log.WithField("rule", "TestPreprocessorError")
+ ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
+ for i, tt := range tests {
+
+ pp := &Preprocessor{streamStmt: tt.stmt}
+
+ dm := make(map[string]interface{})
+ if e := json.Unmarshal(tt.data, &dm); e != nil {
+ log.Fatal(e)
+ return
+ } else {
+ tuple := &xsql.Tuple{Message: dm}
+ result := pp.Apply(ctx, tuple)
+ if !reflect.DeepEqual(tt.result, result) {
+ t.Errorf("%d. %q\n\nresult mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tuple, tt.result, result)
+ }
+ }
+
+ }
+}
diff --git a/xsql/plans/project_operator.go b/xsql/plans/project_operator.go
index cf182dfcad..bf9c586e61 100644
--- a/xsql/plans/project_operator.go
+++ b/xsql/plans/project_operator.go
@@ -25,18 +25,27 @@ func (pp *ProjectPlan) Apply(ctx api.StreamContext, data interface{}) interface{
log.Debugf("project plan receive %s", data)
var results []map[string]interface{}
switch input := data.(type) {
+ case error:
+ return input
case *xsql.Tuple:
ve := pp.getVE(input, input)
- results = append(results, project(pp.Fields, ve, pp.isTest))
+ if r, err := project(pp.Fields, ve, pp.isTest); err != nil {
+ return fmt.Errorf("run Select error: %s", err)
+ } else {
+ results = append(results, r)
+ }
case xsql.WindowTuplesSet:
if len(input) != 1 {
- log.Infof("WindowTuplesSet with multiple tuples cannot be evaluated")
- return nil
+ return fmt.Errorf("run Select error: the input WindowTuplesSet with multiple tuples cannot be evaluated)")
}
ms := input[0].Tuples
for _, v := range ms {
ve := pp.getVE(&v, input)
- results = append(results, project(pp.Fields, ve, pp.isTest))
+ if r, err := project(pp.Fields, ve, pp.isTest); err != nil {
+ return fmt.Errorf("run Select error: %s", err)
+ } else {
+ results = append(results, r)
+ }
if pp.IsAggregate {
break
}
@@ -45,7 +54,11 @@ func (pp *ProjectPlan) Apply(ctx api.StreamContext, data interface{}) interface{
ms := input
for _, v := range ms {
ve := pp.getVE(&v, input)
- results = append(results, project(pp.Fields, ve, pp.isTest))
+ if r, err := project(pp.Fields, ve, pp.isTest); err != nil {
+ return err
+ } else {
+ results = append(results, r)
+ }
if pp.IsAggregate {
break
}
@@ -53,18 +66,20 @@ func (pp *ProjectPlan) Apply(ctx api.StreamContext, data interface{}) interface{
case xsql.GroupedTuplesSet:
for _, v := range input {
ve := pp.getVE(v[0], v)
- results = append(results, project(pp.Fields, ve, pp.isTest))
+ if r, err := project(pp.Fields, ve, pp.isTest); err != nil {
+ return fmt.Errorf("run Select error: %s", err)
+ } else {
+ results = append(results, r)
+ }
}
default:
- log.Errorf("Expect xsql.Valuer or its array type")
- return nil
+ return fmt.Errorf("run Select error: invalid input %[1]T(%[1]v)", input)
}
if ret, err := json.Marshal(results); err == nil {
return ret
} else {
- fmt.Printf("Found error: %v", err)
- return nil
+ return fmt.Errorf("run Select error: %v", err)
}
}
@@ -76,16 +91,22 @@ func (pp *ProjectPlan) getVE(tuple xsql.DataValuer, agg xsql.AggregateData) *xsq
}
}
-func project(fs xsql.Fields, ve *xsql.ValuerEval, isTest bool) map[string]interface{} {
+func project(fs xsql.Fields, ve *xsql.ValuerEval, isTest bool) (map[string]interface{}, error) {
result := make(map[string]interface{})
for _, f := range fs {
//Avoid to re-evaluate for non-agg field has alias name, which was already evaluated in pre-processor operator.
if f.AName != "" && (!xsql.HasAggFuncs(f.Expr)) && !isTest {
fr := &xsql.FieldRef{StreamName: "", Name: f.AName}
v := ve.Eval(fr)
+ if e, ok := v.(error); ok {
+ return nil, e
+ }
result[f.AName] = v
} else {
v := ve.Eval(f.Expr)
+ if e, ok := v.(error); ok {
+ return nil, e
+ }
if _, ok := f.Expr.(*xsql.Wildcard); ok || f.Name == "*" {
switch val := v.(type) {
case map[string]interface{}:
@@ -101,7 +122,7 @@ func project(fs xsql.Fields, ve *xsql.ValuerEval, isTest bool) map[string]interf
}
}
default:
- fmt.Printf("Wildcarder does not return map")
+ return nil, fmt.Errorf("wildcarder does not return map")
}
} else {
if v != nil {
@@ -113,7 +134,7 @@ func project(fs xsql.Fields, ve *xsql.ValuerEval, isTest bool) map[string]interf
}
}
}
- return result
+ return result, nil
}
const DEFAULT_FIELD_NAME_PREFIX string = "rengine_field_"
diff --git a/xsql/plans/project_test.go b/xsql/plans/project_test.go
index 92cfb72db5..19078f9df3 100644
--- a/xsql/plans/project_test.go
+++ b/xsql/plans/project_test.go
@@ -2,6 +2,7 @@ package plans
import (
"encoding/json"
+ "errors"
"fmt"
"github.com/emqx/kuiper/common"
"github.com/emqx/kuiper/xsql"
@@ -29,6 +30,16 @@ func TestProjectPlan_Apply1(t *testing.T) {
"a": "val_a",
}},
},
+ {
+ sql: "SELECT b FROM test",
+ data: &xsql.Tuple{
+ Emitter: "test",
+ Message: xsql.Message{
+ "a": "val_a",
+ },
+ },
+ result: []map[string]interface{}{{}},
+ },
{
sql: "SELECT ts FROM test",
data: &xsql.Tuple{
@@ -42,6 +53,18 @@ func TestProjectPlan_Apply1(t *testing.T) {
"ts": "2019-09-19T00:56:13.431Z",
}},
},
+ //Schemaless may return a message without selecting column
+ {
+ sql: "SELECT ts FROM test",
+ data: &xsql.Tuple{
+ Emitter: "test",
+ Message: xsql.Message{
+ "a": "val_a",
+ "ts2": common.TimeFromUnixMilli(1568854573431),
+ },
+ },
+ result: []map[string]interface{}{{}},
+ },
{
sql: "SELECT A FROM test",
data: &xsql.Tuple{
@@ -130,7 +153,26 @@ func TestProjectPlan_Apply1(t *testing.T) {
"ab": "hello",
}},
},
-
+ {
+ sql: `SELECT a->b AS ab FROM test`,
+ data: &xsql.Tuple{
+ Emitter: "test",
+ Message: xsql.Message{
+ "name": "name",
+ },
+ },
+ result: []map[string]interface{}{{}},
+ },
+ {
+ sql: `SELECT a->b AS ab FROM test`,
+ data: &xsql.Tuple{
+ Emitter: "test",
+ Message: xsql.Message{
+ "a": "commonstring",
+ },
+ },
+ result: []map[string]interface{}{{}},
+ },
{
sql: `SELECT a[0]->b AS ab FROM test`,
data: &xsql.Tuple{
@@ -146,7 +188,42 @@ func TestProjectPlan_Apply1(t *testing.T) {
"ab": "hello1",
}},
},
-
+ {
+ sql: `SELECT a[0]->b AS ab FROM test`,
+ data: &xsql.Tuple{
+ Emitter: "test",
+ Message: xsql.Message{
+ "a": []map[string]interface{}{
+ {"b": "hello1"},
+ {"b": "hello2"},
+ },
+ },
+ },
+ result: []map[string]interface{}{{
+ "ab": "hello1",
+ }},
+ },
+ {
+ sql: `SELECT a[2:4] AS ab FROM test`,
+ data: &xsql.Tuple{
+ Emitter: "test",
+ Message: xsql.Message{
+ "a": []map[string]interface{}{
+ {"b": "hello1"},
+ {"b": "hello2"},
+ {"b": "hello3"},
+ {"b": "hello4"},
+ {"b": "hello5"},
+ },
+ },
+ },
+ result: []map[string]interface{}{{
+ "ab": []interface{}{
+ map[string]interface{}{"b": "hello3"},
+ map[string]interface{}{"b": "hello4"},
+ },
+ }},
+ },
{
sql: `SELECT a->c->d AS f1 FROM test`,
data: &xsql.Tuple{
@@ -164,6 +241,33 @@ func TestProjectPlan_Apply1(t *testing.T) {
"f1": 35.2,
}},
},
+ {
+ sql: `SELECT a->c->d AS f1 FROM test`,
+ data: &xsql.Tuple{
+ Emitter: "test",
+ Message: xsql.Message{
+ "a": map[string]interface{}{
+ "b": "hello",
+ "c": map[string]interface{}{
+ "e": 35.2,
+ },
+ },
+ },
+ },
+ result: []map[string]interface{}{{}},
+ },
+ {
+ sql: `SELECT a->c->d AS f1 FROM test`,
+ data: &xsql.Tuple{
+ Emitter: "test",
+ Message: xsql.Message{
+ "a": map[string]interface{}{
+ "b": "hello",
+ },
+ },
+ },
+ result: []map[string]interface{}{{}},
+ },
//The int type is not supported yet, the json parser returns float64 for int values
{
@@ -289,7 +393,7 @@ func TestProjectPlan_Apply1(t *testing.T) {
t.Errorf("%d. %q\n\nresult mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.sql, tt.result, mapRes)
}
} else {
- t.Errorf("The returned result is not type of []byte\n")
+ t.Errorf("%d. The returned result is not type of []byte\n", i)
}
}
}
@@ -354,6 +458,31 @@ func TestProjectPlan_MultiInput(t *testing.T) {
"id1": float64(3),
}},
},
+ {
+ sql: "SELECT id1 FROM src1 WHERE f1 = \"v1\" GROUP BY TUMBLINGWINDOW(ss, 10)",
+ data: xsql.WindowTuplesSet{
+ xsql.WindowTuples{
+ Emitter: "src1",
+ Tuples: []xsql.Tuple{
+ {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 1, "f1": "v1"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id2": 2, "f1": "v2"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 3, "f1": "v1"},
+ },
+ },
+ },
+ },
+ result: []map[string]interface{}{{
+ "id1": float64(1),
+ }, {}, {
+ "id1": float64(3),
+ }},
+ },
{
sql: "SELECT * FROM src1 WHERE f1 = \"v1\" GROUP BY TUMBLINGWINDOW(ss, 10)",
data: xsql.WindowTuplesSet{
@@ -384,6 +513,36 @@ func TestProjectPlan_MultiInput(t *testing.T) {
"f1": "v1",
}},
},
+ {
+ sql: "SELECT * FROM src1 WHERE f1 = \"v1\" GROUP BY TUMBLINGWINDOW(ss, 10)",
+ data: xsql.WindowTuplesSet{
+ xsql.WindowTuples{
+ Emitter: "src1",
+ Tuples: []xsql.Tuple{
+ {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 1, "f1": "v1"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id2": 2, "f2": "v2"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 3, "f1": "v1"},
+ },
+ },
+ },
+ },
+ result: []map[string]interface{}{{
+ "id1": float64(1),
+ "f1": "v1",
+ }, {
+ "id2": float64(2),
+ "f2": "v2",
+ }, {
+ "id1": float64(3),
+ "f1": "v1",
+ }},
+ },
{
sql: "SELECT src1.* FROM src1 WHERE f1 = \"v1\" GROUP BY TUMBLINGWINDOW(ss, 10)",
data: xsql.WindowTuplesSet{
@@ -443,7 +602,33 @@ func TestProjectPlan_MultiInput(t *testing.T) {
"id1": float64(3),
}},
},
-
+ {
+ sql: "SELECT id1 FROM src1 left join src2 on src1.id1 = src2.id2 WHERE src1.f1 = \"v1\" GROUP BY TUMBLINGWINDOW(ss, 10)",
+ data: xsql.JoinTupleSets{
+ xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "src1", Message: xsql.Message{"id1": 1, "f1": "v1"}},
+ {Emitter: "src2", Message: xsql.Message{"id2": 2, "f2": "w2"}},
+ },
+ },
+ xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "src1", Message: xsql.Message{"id1": 2, "f1": "v2"}},
+ {Emitter: "src2", Message: xsql.Message{"id2": 4, "f2": "w3"}},
+ },
+ },
+ xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "src1", Message: xsql.Message{"id2": 3, "f1": "v1"}},
+ },
+ },
+ },
+ result: []map[string]interface{}{{
+ "id1": float64(1),
+ }, {
+ "id1": float64(2),
+ }, {}},
+ },
{
sql: "SELECT abc FROM tbl group by abc",
data: xsql.GroupedTuplesSet{
@@ -461,6 +646,20 @@ func TestProjectPlan_MultiInput(t *testing.T) {
"abc": float64(6),
}},
},
+ {
+ sql: "SELECT abc FROM tbl group by abc",
+ data: xsql.GroupedTuplesSet{
+ {
+ &xsql.Tuple{
+ Emitter: "tbl",
+ Message: xsql.Message{
+ "def": "hello",
+ },
+ },
+ },
+ },
+ result: []map[string]interface{}{{}},
+ },
{
sql: "SELECT id1 FROM src1 GROUP BY TUMBLINGWINDOW(ss, 10), f1",
data: xsql.GroupedTuplesSet{
@@ -487,6 +686,30 @@ func TestProjectPlan_MultiInput(t *testing.T) {
"id1": float64(2),
}},
},
+ {
+ sql: "SELECT id1 FROM src1 GROUP BY TUMBLINGWINDOW(ss, 10), f1",
+ data: xsql.GroupedTuplesSet{
+ {
+ &xsql.Tuple{
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 1, "f1": "v1"},
+ },
+ &xsql.Tuple{
+ Emitter: "src1",
+ Message: xsql.Message{"id1": 3, "f1": "v1"},
+ },
+ },
+ {
+ &xsql.Tuple{
+ Emitter: "src1",
+ Message: xsql.Message{"id2": 2, "f1": "v2"},
+ },
+ },
+ },
+ result: []map[string]interface{}{{
+ "id1": float64(1),
+ }, {}},
+ },
{
sql: "SELECT src2.id2 FROM src1 left join src2 on src1.id1 = src2.id2 GROUP BY src2.f2, TUMBLINGWINDOW(ss, 10)",
data: xsql.GroupedTuplesSet{
@@ -769,6 +992,32 @@ func TestProjectPlan_Funcs(t *testing.T) {
}, {
"r": float64(123124),
}},
+ }, {
+ sql: "SELECT round(a) as r FROM test GROUP BY TumblingWindow(ss, 10)",
+ data: xsql.WindowTuplesSet{
+ xsql.WindowTuples{
+ Emitter: "test",
+ Tuples: []xsql.Tuple{
+ {
+ Emitter: "src1",
+ Message: xsql.Message{"a": 53.1},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"a": 27.4},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"a": 123123.7},
+ },
+ },
+ },
+ },
+ result: []map[string]interface{}{{
+ "r": float64(53),
+ }, {
+ "r": float64(27),
+ }, {
+ "r": float64(123124),
+ }},
}, {
sql: "SELECT round(a) as r FROM test Inner Join test1 on test.id = test1.id GROUP BY TumblingWindow(ss, 10)",
data: xsql.JoinTupleSets{
@@ -827,6 +1076,46 @@ func TestProjectPlan_Funcs(t *testing.T) {
}, {
"concat": "388.886",
}},
+ }, {
+ sql: "SELECT count(a) as r FROM test",
+ data: &xsql.Tuple{
+ Emitter: "test",
+ Message: xsql.Message{
+ "a": 47.5,
+ },
+ },
+ result: []map[string]interface{}{{
+ "r": float64(1),
+ }},
+ }, {
+ sql: "SELECT meta(test.device) as d FROM test Inner Join test1 on test.id = test1.id GROUP BY TumblingWindow(ss, 10)",
+ data: xsql.JoinTupleSets{
+ xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 1, "a": 65.55}, Metadata: xsql.Metadata{"device": "devicea"}},
+ {Emitter: "test1", Message: xsql.Message{"id": 1, "b": 12}},
+ },
+ },
+ xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 2, "a": 73.499}, Metadata: xsql.Metadata{"device": "deviceb"}},
+ {Emitter: "test1", Message: xsql.Message{"id": 2, "b": 34}},
+ },
+ },
+ xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 3, "a": 88.88}, Metadata: xsql.Metadata{"device": "devicec"}},
+ {Emitter: "test1", Message: xsql.Message{"id": 3, "b": 6}},
+ },
+ },
+ },
+ result: []map[string]interface{}{{
+ "d": "devicea",
+ }, {
+ "d": "deviceb",
+ }, {
+ "d": "devicec",
+ }},
},
}
@@ -838,7 +1127,7 @@ func TestProjectPlan_Funcs(t *testing.T) {
if err != nil {
t.Error(err)
}
- pp := &ProjectPlan{Fields: stmt.Fields}
+ pp := &ProjectPlan{Fields: stmt.Fields, IsAggregate: xsql.IsAggStatement(stmt)}
pp.isTest = true
result := pp.Apply(ctx, tt.data)
var mapRes []map[string]interface{}
@@ -855,7 +1144,7 @@ func TestProjectPlan_Funcs(t *testing.T) {
t.Errorf("%d. %q\n\nresult mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.sql, tt.result, mapRes)
}
} else {
- t.Errorf("The returned result is not type of []byte\n")
+ t.Errorf("%d. The returned result is not type of []byte\n", i)
}
}
}
@@ -1048,6 +1337,68 @@ func TestProjectPlan_AggFuncs(t *testing.T) {
result: []map[string]interface{}{{
"sum": float64(123203),
}},
+ }, {
+ sql: "SELECT sum(a) as sum FROM test GROUP BY TumblingWindow(ss, 10)",
+ data: xsql.WindowTuplesSet{
+ xsql.WindowTuples{
+ Emitter: "test",
+ Tuples: []xsql.Tuple{
+ {
+ Emitter: "src1",
+ Message: xsql.Message{"a": 53},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"a": 27},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"a": 123123},
+ },
+ },
+ },
+ },
+ result: []map[string]interface{}{{
+ "sum": float64(123203),
+ }},
+ },
+ {
+ sql: "SELECT count(*) as c, meta(test1.device) as d FROM test Inner Join test1 on test.id = test1.id GROUP BY TumblingWindow(ss, 10), test1.color",
+ data: xsql.GroupedTuplesSet{
+ {
+ &xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 1, "a": 122.33}},
+ {Emitter: "test1", Message: xsql.Message{"id": 1, "color": "w2"}, Metadata: xsql.Metadata{"device": "devicea"}},
+ },
+ },
+ &xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 5, "a": 177.51}},
+ {Emitter: "test1", Message: xsql.Message{"id": 5, "color": "w2"}, Metadata: xsql.Metadata{"device": "deviceb"}},
+ },
+ },
+ },
+ {
+ &xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 2, "a": 89.03}},
+ {Emitter: "test1", Message: xsql.Message{"id": 2, "color": "w1"}, Metadata: xsql.Metadata{"device": "devicec"}},
+ },
+ },
+ &xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 4, "a": 14.6}},
+ {Emitter: "test1", Message: xsql.Message{"id": 4, "color": "w1"}, Metadata: xsql.Metadata{"device": "deviced"}},
+ },
+ },
+ },
+ },
+ result: []map[string]interface{}{{
+ "c": float64(2),
+ "d": "devicea",
+ }, {
+ "c": float64(2),
+ "d": "devicec",
+ }},
},
}
@@ -1076,7 +1427,135 @@ func TestProjectPlan_AggFuncs(t *testing.T) {
t.Errorf("%d. %q\n\nresult mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.sql, tt.result, mapRes)
}
} else {
- t.Errorf("The returned result is not type of []byte\n")
+ t.Errorf("%d. %q\n\nThe returned result is not type of []byte: %#v\n", i, tt.sql, result)
+ }
+ }
+}
+
+func TestProjectPlanError(t *testing.T) {
+ var tests = []struct {
+ sql string
+ data interface{}
+ result interface{}
+ }{
+ {
+ sql: "SELECT a FROM test",
+ data: errors.New("an error from upstream"),
+ result: errors.New("an error from upstream"),
+ }, {
+ sql: "SELECT a * 5 FROM test",
+ data: &xsql.Tuple{
+ Emitter: "test",
+ Message: xsql.Message{
+ "a": "val_a",
+ },
+ },
+ result: errors.New("run Select error: invalid operation string(val_a) * int64(5)"),
+ }, {
+ sql: `SELECT a[0]->b AS ab FROM test`,
+ data: &xsql.Tuple{
+ Emitter: "test",
+ Message: xsql.Message{
+ "a": "common string",
+ },
+ },
+ result: errors.New("run Select error: invalid operation string(common string) [] *xsql.BracketEvalResult(&{0 0})"),
+ }, {
+ sql: `SELECT round(a) as r FROM test`,
+ data: &xsql.Tuple{
+ Emitter: "test",
+ Message: xsql.Message{
+ "a": "common string",
+ },
+ },
+ result: errors.New("run Select error: call func round error: only float64 & int type are supported"),
+ }, {
+ sql: `SELECT round(a) as r FROM test`,
+ data: &xsql.Tuple{
+ Emitter: "test",
+ Message: xsql.Message{
+ "abc": "common string",
+ },
+ },
+ result: errors.New("run Select error: call func round error: only float64 & int type are supported"),
+ }, {
+ sql: "SELECT avg(a) as avg FROM test Inner Join test1 on test.id = test1.id GROUP BY TumblingWindow(ss, 10), test1.color",
+ data: xsql.GroupedTuplesSet{
+ {
+ &xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 1, "a": 122.33}},
+ {Emitter: "src2", Message: xsql.Message{"id": 1, "color": "w2"}},
+ },
+ },
+ &xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 1, "a": 68.54}},
+ {Emitter: "src2", Message: xsql.Message{"id": 1, "color": "w2"}},
+ },
+ },
+ &xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 4, "a": "dde"}},
+ {Emitter: "src2", Message: xsql.Message{"id": 4, "color": "w2"}},
+ },
+ },
+ &xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 5, "a": 177.54}},
+ {Emitter: "src2", Message: xsql.Message{"id": 5, "color": "w2"}},
+ },
+ },
+ },
+ {
+ &xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 2, "a": 89.03}},
+ {Emitter: "src2", Message: xsql.Message{"id": 2, "color": "w1"}},
+ },
+ },
+ &xsql.JoinTuple{
+ Tuples: []xsql.Tuple{
+ {Emitter: "test", Message: xsql.Message{"id": 4, "a": 14.6}},
+ {Emitter: "src2", Message: xsql.Message{"id": 4, "color": "w1"}},
+ },
+ },
+ },
+ },
+ result: errors.New("run Select error: call func avg error: requires float64 but found string(dde)"),
+ }, {
+ sql: "SELECT sum(a) as sum FROM test GROUP BY TumblingWindow(ss, 10)",
+ data: xsql.WindowTuplesSet{
+ xsql.WindowTuples{
+ Emitter: "test",
+ Tuples: []xsql.Tuple{
+ {
+ Emitter: "src1",
+ Message: xsql.Message{"a": 53},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"a": "ddd"},
+ }, {
+ Emitter: "src1",
+ Message: xsql.Message{"a": 123123},
+ },
+ },
+ },
+ },
+ result: errors.New("run Select error: call func sum error: requires int but found string(ddd)"),
+ },
+ }
+ fmt.Printf("The test bucket size is %d.\n\n", len(tests))
+ contextLogger := common.Log.WithField("rule", "TestProjectPlanError")
+ ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
+ for i, tt := range tests {
+ stmt, _ := xsql.NewParser(strings.NewReader(tt.sql)).Parse()
+
+ pp := &ProjectPlan{Fields: stmt.Fields, IsAggregate: xsql.IsAggStatement(stmt)}
+ pp.isTest = true
+ result := pp.Apply(ctx, tt.data)
+ if !reflect.DeepEqual(tt.result, result) {
+ t.Errorf("%d. %q\n\nresult mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.sql, tt.result, result)
}
}
}
diff --git a/xsql/plans/str_func_test.go b/xsql/plans/str_func_test.go
index 3111b3ed87..5cbf694756 100644
--- a/xsql/plans/str_func_test.go
+++ b/xsql/plans/str_func_test.go
@@ -407,17 +407,6 @@ func TestStrFunc_Apply1(t *testing.T) {
"b": "message",
}},
},
-
- {
- sql: `SELECT split_value(a,"/",3) AS a FROM test1`,
- data: &xsql.Tuple{
- Emitter: "test",
- Message: xsql.Message{
- "a": "test/device001/message",
- },
- },
- result: []map[string]interface{}{map[string]interface{}{}},
- },
}
fmt.Printf("The test bucket size is %d.\n\n", len(tests))
@@ -444,7 +433,7 @@ func TestStrFunc_Apply1(t *testing.T) {
t.Errorf("%d. %q\n\nresult mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.sql, tt.result, mapRes)
}
} else {
- t.Errorf("The returned result is not type of []byte\n")
+ t.Errorf("%d. The returned result is not type of []byte\n", i)
}
}
}
diff --git a/xsql/processors/xsql_processor_test.go b/xsql/processors/xsql_processor_test.go
index 7d0157d662..467fb697cc 100644
--- a/xsql/processors/xsql_processor_test.go
+++ b/xsql/processors/xsql_processor_test.go
@@ -112,6 +112,15 @@ func createStreams(t *testing.T) {
if err != nil {
t.Log(err)
}
+ demoE := `CREATE STREAM demoE (
+ color STRING,
+ size BIGINT,
+ ts BIGINT
+ ) WITH (DATASOURCE="demoE", FORMAT="json", KEY="ts");`
+ _, err = p.ExecStmt(demoE)
+ if err != nil {
+ t.Log(err)
+ }
demo1 := `CREATE STREAM demo1 (
temp FLOAT,
hum BIGINT,
@@ -139,6 +148,11 @@ func dropStreams(t *testing.T) {
if err != nil {
t.Log(err)
}
+ demoE := `DROP STREAM demoE`
+ _, err = p.ExecStmt(demoE)
+ if err != nil {
+ t.Log(err)
+ }
demo1 := `DROP STREAM demo1`
_, err = p.ExecStmt(demo1)
if err != nil {
@@ -151,6 +165,47 @@ func dropStreams(t *testing.T) {
}
}
+func createSchemalessStreams(t *testing.T) {
+ p := NewStreamProcessor(path.Join(DbDir, "stream"))
+ demo := `CREATE STREAM ldemo (
+ ) WITH (DATASOURCE="ldemo", FORMAT="json");`
+ _, err := p.ExecStmt(demo)
+ if err != nil {
+ t.Log(err)
+ }
+ demo1 := `CREATE STREAM ldemo1 (
+ ) WITH (DATASOURCE="ldemo1", FORMAT="json");`
+ _, err = p.ExecStmt(demo1)
+ if err != nil {
+ t.Log(err)
+ }
+ sessionDemo := `CREATE STREAM lsessionDemo (
+ ) WITH (DATASOURCE="lsessionDemo", FORMAT="json");`
+ _, err = p.ExecStmt(sessionDemo)
+ if err != nil {
+ t.Log(err)
+ }
+}
+
+func dropSchemalessStreams(t *testing.T) {
+ p := NewStreamProcessor(path.Join(DbDir, "stream"))
+ demo := `DROP STREAM ldemo`
+ _, err := p.ExecStmt(demo)
+ if err != nil {
+ t.Log(err)
+ }
+ demo1 := `DROP STREAM ldemo1`
+ _, err = p.ExecStmt(demo1)
+ if err != nil {
+ t.Log(err)
+ }
+ sessionDemo := `DROP STREAM lsessionDemo`
+ _, err = p.ExecStmt(sessionDemo)
+ if err != nil {
+ t.Log(err)
+ }
+}
+
func getMockSource(name string, done <-chan int, size int) *nodes.SourceNode {
var data []*xsql.Tuple
switch name {
@@ -202,6 +257,54 @@ func getMockSource(name string, done <-chan int, size int) *nodes.SourceNode {
Timestamp: 1541152489252,
},
}
+ case "demoE":
+ data = []*xsql.Tuple{
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "color": 3,
+ "size": "red",
+ "ts": 1541152486013,
+ },
+ Timestamp: 1541152486013,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "color": "blue",
+ "size": 6,
+ "ts": "1541152486822",
+ },
+ Timestamp: 1541152486822,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "color": "blue",
+ "size": 2,
+ "ts": 1541152487632,
+ },
+ Timestamp: 1541152487632,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "color": 7,
+ "size": 4,
+ "ts": 1541152488442,
+ },
+ Timestamp: 1541152488442,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "color": "red",
+ "size": "blue",
+ "ts": 1541152489252,
+ },
+ Timestamp: 1541152489252,
+ },
+ }
case "demo1":
data = []*xsql.Tuple{
{
@@ -487,6 +590,175 @@ func TestSingleSQL(t *testing.T) {
"source_demo_0_records_in_total": int64(5),
"source_demo_0_records_out_total": int64(5),
+ "op_filter_0_exceptions_total": int64(0),
+ "op_filter_0_process_latency_ms": int64(0),
+ "op_filter_0_records_in_total": int64(5),
+ "op_filter_0_records_out_total": int64(2),
+ },
+ }, {
+ name: `rule4`,
+ sql: `SELECT size as Int8, ts FROM demoE where size > 3`,
+ r: [][]map[string]interface{}{
+ {{
+ "error": "error in preprocessor: invalid data type for color, expect string but found int(3)",
+ }},
+ {{
+ "Int8": float64(6),
+ "ts": float64(1541152486822),
+ }},
+ {{
+ "error": "error in preprocessor: invalid data type for color, expect string but found int(7)",
+ }},
+ {{
+ "error": "error in preprocessor: invalid data type for size, expect bigint but found string(blue)",
+ }},
+ },
+ s: "op_filter_0_records_in_total",
+ m: map[string]interface{}{
+ "op_preprocessor_demoE_0_exceptions_total": int64(3),
+ "op_preprocessor_demoE_0_process_latency_ms": int64(0),
+ "op_preprocessor_demoE_0_records_in_total": int64(5),
+ "op_preprocessor_demoE_0_records_out_total": int64(2),
+
+ "op_project_0_exceptions_total": int64(3),
+ "op_project_0_process_latency_ms": int64(0),
+ "op_project_0_records_in_total": int64(4),
+ "op_project_0_records_out_total": int64(1),
+
+ "sink_mockSink_0_exceptions_total": int64(0),
+ "sink_mockSink_0_records_in_total": int64(4),
+ "sink_mockSink_0_records_out_total": int64(4),
+
+ "source_demoE_0_exceptions_total": int64(0),
+ "source_demoE_0_records_in_total": int64(5),
+ "source_demoE_0_records_out_total": int64(5),
+
+ "op_filter_0_exceptions_total": int64(3),
+ "op_filter_0_process_latency_ms": int64(0),
+ "op_filter_0_records_in_total": int64(5),
+ "op_filter_0_records_out_total": int64(1),
+ },
+ }, {
+ name: `rule4`,
+ sql: `SELECT size as Int8, ts FROM demoE where size > 3`,
+ r: [][]map[string]interface{}{
+ {{
+ "error": "error in preprocessor: invalid data type for color, expect string but found int(3)",
+ }},
+ {{
+ "Int8": float64(6),
+ "ts": float64(1541152486822),
+ }},
+ {{
+ "error": "error in preprocessor: invalid data type for color, expect string but found int(7)",
+ }},
+ {{
+ "error": "error in preprocessor: invalid data type for size, expect bigint but found string(blue)",
+ }},
+ },
+ s: "op_filter_0_records_in_total",
+ m: map[string]interface{}{
+ "op_preprocessor_demoE_0_exceptions_total": int64(3),
+ "op_preprocessor_demoE_0_process_latency_ms": int64(0),
+ "op_preprocessor_demoE_0_records_in_total": int64(5),
+ "op_preprocessor_demoE_0_records_out_total": int64(2),
+
+ "op_project_0_exceptions_total": int64(3),
+ "op_project_0_process_latency_ms": int64(0),
+ "op_project_0_records_in_total": int64(4),
+ "op_project_0_records_out_total": int64(1),
+
+ "sink_mockSink_0_exceptions_total": int64(0),
+ "sink_mockSink_0_records_in_total": int64(4),
+ "sink_mockSink_0_records_out_total": int64(4),
+
+ "source_demoE_0_exceptions_total": int64(0),
+ "source_demoE_0_records_in_total": int64(5),
+ "source_demoE_0_records_out_total": int64(5),
+
+ "op_filter_0_exceptions_total": int64(3),
+ "op_filter_0_process_latency_ms": int64(0),
+ "op_filter_0_records_in_total": int64(5),
+ "op_filter_0_records_out_total": int64(1),
+ },
+ }, {
+ name: `rule5`,
+ sql: `SELECT meta(topic) as m, ts FROM demo`,
+ r: [][]map[string]interface{}{
+ {{
+ "m": "mock",
+ "ts": float64(1541152486013),
+ }},
+ {{
+ "m": "mock",
+ "ts": float64(1541152486822),
+ }},
+ {{
+ "m": "mock",
+ "ts": float64(1541152487632),
+ }},
+ {{
+ "m": "mock",
+ "ts": float64(1541152488442),
+ }},
+ {{
+ "m": "mock",
+ "ts": float64(1541152489252),
+ }},
+ },
+ m: map[string]interface{}{
+ "op_preprocessor_demo_0_exceptions_total": int64(0),
+ "op_preprocessor_demo_0_process_latency_ms": int64(0),
+ "op_preprocessor_demo_0_records_in_total": int64(5),
+ "op_preprocessor_demo_0_records_out_total": int64(5),
+
+ "op_project_0_exceptions_total": int64(0),
+ "op_project_0_process_latency_ms": int64(0),
+ "op_project_0_records_in_total": int64(5),
+ "op_project_0_records_out_total": int64(5),
+
+ "sink_mockSink_0_exceptions_total": int64(0),
+ "sink_mockSink_0_records_in_total": int64(5),
+ "sink_mockSink_0_records_out_total": int64(5),
+
+ "source_demo_0_exceptions_total": int64(0),
+ "source_demo_0_records_in_total": int64(5),
+ "source_demo_0_records_out_total": int64(5),
+ },
+ s: "sink_mockSink_0_records_out_total",
+ }, {
+ name: `rule6`,
+ sql: `SELECT color, ts FROM demo where size > 3 and meta(topic)="mock"`,
+ r: [][]map[string]interface{}{
+ {{
+ "color": "blue",
+ "ts": float64(1541152486822),
+ }},
+ {{
+ "color": "yellow",
+ "ts": float64(1541152488442),
+ }},
+ },
+ s: "op_filter_0_records_in_total",
+ m: map[string]interface{}{
+ "op_preprocessor_demo_0_exceptions_total": int64(0),
+ "op_preprocessor_demo_0_process_latency_ms": int64(0),
+ "op_preprocessor_demo_0_records_in_total": int64(5),
+ "op_preprocessor_demo_0_records_out_total": int64(5),
+
+ "op_project_0_exceptions_total": int64(0),
+ "op_project_0_process_latency_ms": int64(0),
+ "op_project_0_records_in_total": int64(2),
+ "op_project_0_records_out_total": int64(2),
+
+ "sink_mockSink_0_exceptions_total": int64(0),
+ "sink_mockSink_0_records_in_total": int64(2),
+ "sink_mockSink_0_records_out_total": int64(2),
+
+ "source_demo_0_exceptions_total": int64(0),
+ "source_demo_0_records_in_total": int64(5),
+ "source_demo_0_records_out_total": int64(5),
+
"op_filter_0_exceptions_total": int64(0),
"op_filter_0_process_latency_ms": int64(0),
"op_filter_0_records_in_total": int64(5),
@@ -571,49 +843,716 @@ func TestSingleSQL(t *testing.T) {
}
}
-func TestWindow(t *testing.T) {
- var tests = []struct {
- name string
- sql string
- size int
- r [][]map[string]interface{}
- m map[string]interface{}
- }{
- {
- name: `rule1`,
- sql: `SELECT * FROM demo GROUP BY HOPPINGWINDOW(ss, 2, 1)`,
- size: 5,
- r: [][]map[string]interface{}{
- {{
- "color": "red",
- "size": float64(3),
- "ts": float64(1541152486013),
- }, {
- "color": "blue",
- "size": float64(6),
- "ts": float64(1541152486822),
- }},
- {{
+func getMockSourceL(name string, done <-chan int, size int) *nodes.SourceNode {
+ var data []*xsql.Tuple
+ switch name {
+ case "ldemo":
+ data = []*xsql.Tuple{
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
"color": "red",
- "size": float64(3),
- "ts": float64(1541152486013),
- }, {
+ "size": 3,
+ "ts": 1541152486013,
+ },
+ Timestamp: 1541152486013,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
"color": "blue",
- "size": float64(6),
- "ts": float64(1541152486822),
- }, {
+ "size": "string",
+ "ts": 1541152486822,
+ },
+ Timestamp: 1541152486822,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "size": 3,
+ "ts": 1541152487632,
+ },
+ Timestamp: 1541152487632,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "color": 49,
+ "size": 2,
+ "ts": 1541152488442,
+ },
+ Timestamp: 1541152488442,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "color": "red",
+ "ts": 1541152489252,
+ },
+ Timestamp: 1541152489252,
+ },
+ }
+ case "ldemo1":
+ data = []*xsql.Tuple{
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "temp": 25.5,
+ "hum": 65,
+ "ts": 1541152486013,
+ },
+ Timestamp: 1541152486013,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "temp": 27.5,
+ "hum": 59,
+ "ts": 1541152486823,
+ },
+ Timestamp: 1541152486823,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "temp": 28.1,
+ "hum": 75,
+ "ts": 1541152487632,
+ },
+ Timestamp: 1541152487632,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "temp": 27.4,
+ "hum": 80,
+ "ts": "1541152488442",
+ },
+ Timestamp: 1541152488442,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "temp": 25.5,
+ "hum": 62,
+ "ts": 1541152489252,
+ },
+ Timestamp: 1541152489252,
+ },
+ }
+ case "lsessionDemo":
+ data = []*xsql.Tuple{
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "temp": 25.5,
+ "hum": 65,
+ "ts": 1541152486013,
+ },
+ Timestamp: 1541152486013,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "temp": 27.5,
+ "hum": 59,
+ "ts": 1541152486823,
+ },
+ Timestamp: 1541152486823,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "temp": 28.1,
+ "hum": 75,
+ "ts": 1541152487932,
+ },
+ Timestamp: 1541152487932,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "temp": 27.4,
+ "hum": 80,
+ "ts": 1541152488442,
+ },
+ Timestamp: 1541152488442,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "temp": 25.5,
+ "hum": 62,
+ "ts": 1541152489252,
+ },
+ Timestamp: 1541152489252,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "temp": 26.2,
+ "hum": 63,
+ "ts": 1541152490062,
+ },
+ Timestamp: 1541152490062,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "temp": 26.8,
+ "hum": 71,
+ "ts": 1541152490872,
+ },
+ Timestamp: 1541152490872,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "temp": 28.9,
+ "hum": 85,
+ "ts": 1541152491682,
+ },
+ Timestamp: 1541152491682,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "temp": 29.1,
+ "hum": 92,
+ "ts": 1541152492492,
+ },
+ Timestamp: 1541152492492,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "temp": 2.2,
+ "hum": 99,
+ "ts": 1541152493202,
+ },
+ Timestamp: 1541152493202,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "temp": 30.9,
+ "hum": 87,
+ "ts": 1541152494112,
+ },
+ Timestamp: 1541152494112,
+ },
+ }
+ }
+ return nodes.NewSourceNodeWithSource(name, test.NewMockSource(data[:size], done, false), map[string]string{
+ "DATASOURCE": name,
+ })
+}
+func TestSingleSQLError(t *testing.T) {
+ var tests = []struct {
+ name string
+ sql string
+ r [][]map[string]interface{}
+ s string
+ m map[string]interface{}
+ }{
+ {
+ name: `rule1`,
+ sql: `SELECT color, ts FROM ldemo where size >= 3`,
+ r: [][]map[string]interface{}{
+ {{
+ "color": "red",
+ "ts": float64(1541152486013),
+ }},
+ {{
+ "error": "run Where error: invalid operation string(string) >= int64(3)",
+ }},
+ {{
+ "ts": float64(1541152487632),
+ }},
+ },
+ s: "op_filter_0_records_in_total",
+ m: map[string]interface{}{
+ "op_preprocessor_ldemo_0_exceptions_total": int64(0),
+ "op_preprocessor_ldemo_0_process_latency_ms": int64(0),
+ "op_preprocessor_ldemo_0_records_in_total": int64(5),
+ "op_preprocessor_ldemo_0_records_out_total": int64(5),
+
+ "op_project_0_exceptions_total": int64(1),
+ "op_project_0_process_latency_ms": int64(0),
+ "op_project_0_records_in_total": int64(3),
+ "op_project_0_records_out_total": int64(2),
+
+ "sink_mockSink_0_exceptions_total": int64(0),
+ "sink_mockSink_0_records_in_total": int64(3),
+ "sink_mockSink_0_records_out_total": int64(3),
+
+ "source_ldemo_0_exceptions_total": int64(0),
+ "source_ldemo_0_records_in_total": int64(5),
+ "source_ldemo_0_records_out_total": int64(5),
+
+ "op_filter_0_exceptions_total": int64(1),
+ "op_filter_0_process_latency_ms": int64(0),
+ "op_filter_0_records_in_total": int64(5),
+ "op_filter_0_records_out_total": int64(2),
+ },
+ }, {
+ name: `rule2`,
+ sql: `SELECT size * 5 FROM ldemo`,
+ r: [][]map[string]interface{}{
+ {{
+ "rengine_field_0": float64(15),
+ }},
+ {{
+ "error": "run Select error: invalid operation string(string) * int64(5)",
+ }},
+ {{
+ "rengine_field_0": float64(15),
+ }},
+ {{
+ "rengine_field_0": float64(10),
+ }},
+ {{}},
+ },
+ s: "op_filter_0_records_in_total",
+ m: map[string]interface{}{
+ "op_preprocessor_ldemo_0_exceptions_total": int64(0),
+ "op_preprocessor_ldemo_0_process_latency_ms": int64(0),
+ "op_preprocessor_ldemo_0_records_in_total": int64(5),
+ "op_preprocessor_ldemo_0_records_out_total": int64(5),
+
+ "op_project_0_exceptions_total": int64(1),
+ "op_project_0_process_latency_ms": int64(0),
+ "op_project_0_records_in_total": int64(5),
+ "op_project_0_records_out_total": int64(4),
+
+ "sink_mockSink_0_exceptions_total": int64(0),
+ "sink_mockSink_0_records_in_total": int64(5),
+ "sink_mockSink_0_records_out_total": int64(5),
+
+ "source_ldemo_0_exceptions_total": int64(0),
+ "source_ldemo_0_records_in_total": int64(5),
+ "source_ldemo_0_records_out_total": int64(5),
+ },
+ },
+ }
+ fmt.Printf("The test bucket size is %d.\n\n", len(tests))
+ createSchemalessStreams(t)
+ defer dropSchemalessStreams(t)
+ //defer close(done)
+ for i, tt := range tests {
+ test.ResetClock(1541152486000)
+ p := NewRuleProcessor(DbDir)
+ parser := xsql.NewParser(strings.NewReader(tt.sql))
+ var (
+ sources []*nodes.SourceNode
+ syncs []chan int
+ )
+ if stmt, err := xsql.Language.Parse(parser); err != nil {
+ t.Errorf("parse sql %s error: %s", tt.sql, err)
+ } else {
+ if selectStmt, ok := stmt.(*xsql.SelectStatement); !ok {
+ t.Errorf("sql %s is not a select statement", tt.sql)
+ } else {
+ streams := xsql.GetStreams(selectStmt)
+ for _, stream := range streams {
+ next := make(chan int)
+ syncs = append(syncs, next)
+ source := getMockSourceL(stream, next, 5)
+ sources = append(sources, source)
+ }
+ }
+ }
+ tp, inputs, err := p.createTopoWithSources(&api.Rule{Id: tt.name, Sql: tt.sql, Options: map[string]interface{}{
+ "bufferLength": float64(100),
+ }}, sources)
+ if err != nil {
+ t.Error(err)
+ }
+ mockSink := test.NewMockSink()
+ sink := nodes.NewSinkNodeWithSink("mockSink", mockSink)
+ tp.AddSink(inputs, sink)
+ errCh := tp.Open()
+ func() {
+ for i := 0; i < 5; i++ {
+ syncs[i%len(syncs)] <- i
+ select {
+ case err = <-errCh:
+ t.Log(err)
+ tp.Cancel()
+ return
+ default:
+ }
+ }
+ for retry := 100; retry > 0; retry-- {
+ if err := compareMetrics(tp, tt.m, tt.sql); err == nil {
+ break
+ }
+ time.Sleep(time.Duration(retry) * time.Millisecond)
+ }
+ }()
+ results := mockSink.GetResults()
+ var maps [][]map[string]interface{}
+ for _, v := range results {
+ var mapRes []map[string]interface{}
+ err := json.Unmarshal(v, &mapRes)
+ if err != nil {
+ t.Errorf("Failed to parse the input into map")
+ continue
+ }
+ maps = append(maps, mapRes)
+ }
+ if !reflect.DeepEqual(tt.r, maps) {
+ t.Errorf("%d. %q\n\nresult mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.sql, tt.r, maps)
+ continue
+ }
+ if err := compareMetrics(tp, tt.m, tt.sql); err != nil {
+ t.Errorf("%d. %q\n\n%v", i, tt.sql, err)
+ }
+ tp.Cancel()
+ }
+}
+
+func TestWindow(t *testing.T) {
+ var tests = []struct {
+ name string
+ sql string
+ size int
+ r [][]map[string]interface{}
+ m map[string]interface{}
+ }{
+ {
+ name: `rule1`,
+ sql: `SELECT * FROM demo GROUP BY HOPPINGWINDOW(ss, 2, 1)`,
+ size: 5,
+ r: [][]map[string]interface{}{
+ {{
+ "color": "red",
+ "size": float64(3),
+ "ts": float64(1541152486013),
+ }, {
+ "color": "blue",
+ "size": float64(6),
+ "ts": float64(1541152486822),
+ }},
+ {{
+ "color": "red",
+ "size": float64(3),
+ "ts": float64(1541152486013),
+ }, {
+ "color": "blue",
+ "size": float64(6),
+ "ts": float64(1541152486822),
+ }, {
+ "color": "blue",
+ "size": float64(2),
+ "ts": float64(1541152487632),
+ }},
+ {{
+ "color": "blue",
+ "size": float64(2),
+ "ts": float64(1541152487632),
+ }, {
+ "color": "yellow",
+ "size": float64(4),
+ "ts": float64(1541152488442),
+ }},
+ },
+ m: map[string]interface{}{
+ "op_preprocessor_demo_0_exceptions_total": int64(0),
+ "op_preprocessor_demo_0_process_latency_ms": int64(0),
+ "op_preprocessor_demo_0_records_in_total": int64(5),
+ "op_preprocessor_demo_0_records_out_total": int64(5),
+
+ "op_project_0_exceptions_total": int64(0),
+ "op_project_0_process_latency_ms": int64(0),
+ "op_project_0_records_in_total": int64(3),
+ "op_project_0_records_out_total": int64(3),
+
+ "sink_mockSink_0_exceptions_total": int64(0),
+ "sink_mockSink_0_records_in_total": int64(3),
+ "sink_mockSink_0_records_out_total": int64(3),
+
+ "source_demo_0_exceptions_total": int64(0),
+ "source_demo_0_records_in_total": int64(5),
+ "source_demo_0_records_out_total": int64(5),
+
+ "op_window_0_exceptions_total": int64(0),
+ "op_window_0_process_latency_ms": int64(0),
+ "op_window_0_records_in_total": int64(5),
+ "op_window_0_records_out_total": int64(3),
+ },
+ }, {
+ name: `rule2`,
+ sql: `SELECT color, ts FROM demo where size > 2 GROUP BY tumblingwindow(ss, 1)`,
+ size: 5,
+ r: [][]map[string]interface{}{
+ {{
+ "color": "red",
+ "ts": float64(1541152486013),
+ }, {
"color": "blue",
- "size": float64(2),
- "ts": float64(1541152487632),
+ "ts": float64(1541152486822),
}},
{{
+ "color": "yellow",
+ "ts": float64(1541152488442),
+ }},
+ },
+ m: map[string]interface{}{
+ "op_preprocessor_demo_0_exceptions_total": int64(0),
+ "op_preprocessor_demo_0_process_latency_ms": int64(0),
+ "op_preprocessor_demo_0_records_in_total": int64(5),
+ "op_preprocessor_demo_0_records_out_total": int64(5),
+
+ "op_project_0_exceptions_total": int64(0),
+ "op_project_0_process_latency_ms": int64(0),
+ "op_project_0_records_in_total": int64(2),
+ "op_project_0_records_out_total": int64(2),
+
+ "sink_mockSink_0_exceptions_total": int64(0),
+ "sink_mockSink_0_records_in_total": int64(2),
+ "sink_mockSink_0_records_out_total": int64(2),
+
+ "source_demo_0_exceptions_total": int64(0),
+ "source_demo_0_records_in_total": int64(5),
+ "source_demo_0_records_out_total": int64(5),
+
+ "op_window_0_exceptions_total": int64(0),
+ "op_window_0_process_latency_ms": int64(0),
+ "op_window_0_records_in_total": int64(5),
+ "op_window_0_records_out_total": int64(3),
+
+ "op_filter_0_exceptions_total": int64(0),
+ "op_filter_0_process_latency_ms": int64(0),
+ "op_filter_0_records_in_total": int64(3),
+ "op_filter_0_records_out_total": int64(2),
+ },
+ }, {
+ name: `rule3`,
+ sql: `SELECT color, temp, ts FROM demo INNER JOIN demo1 ON demo.ts = demo1.ts GROUP BY SlidingWindow(ss, 1)`,
+ size: 5,
+ r: [][]map[string]interface{}{
+ {{
+ "color": "red",
+ "temp": 25.5,
+ "ts": float64(1541152486013),
+ }}, {{
+ "color": "red",
+ "temp": 25.5,
+ "ts": float64(1541152486013),
+ }}, {{
+ "color": "red",
+ "temp": 25.5,
+ "ts": float64(1541152486013),
+ }}, {{
"color": "blue",
- "size": float64(2),
+ "temp": 28.1,
+ "ts": float64(1541152487632),
+ }}, {{
+ "color": "blue",
+ "temp": 28.1,
+ "ts": float64(1541152487632),
+ }}, {{
+ "color": "blue",
+ "temp": 28.1,
"ts": float64(1541152487632),
}, {
"color": "yellow",
- "size": float64(4),
+ "temp": 27.4,
+ "ts": float64(1541152488442),
+ }}, {{
+ "color": "yellow",
+ "temp": 27.4,
+ "ts": float64(1541152488442),
+ }}, {{
+ "color": "yellow",
+ "temp": 27.4,
"ts": float64(1541152488442),
+ }, {
+ "color": "red",
+ "temp": 25.5,
+ "ts": float64(1541152489252),
+ }},
+ },
+ m: map[string]interface{}{
+ "op_preprocessor_demo_0_exceptions_total": int64(0),
+ "op_preprocessor_demo_0_process_latency_ms": int64(0),
+ "op_preprocessor_demo_0_records_in_total": int64(5),
+ "op_preprocessor_demo_0_records_out_total": int64(5),
+
+ "op_preprocessor_demo1_0_exceptions_total": int64(0),
+ "op_preprocessor_demo1_0_process_latency_ms": int64(0),
+ "op_preprocessor_demo1_0_records_in_total": int64(5),
+ "op_preprocessor_demo1_0_records_out_total": int64(5),
+
+ "op_project_0_exceptions_total": int64(0),
+ "op_project_0_process_latency_ms": int64(0),
+ "op_project_0_records_in_total": int64(8),
+ "op_project_0_records_out_total": int64(8),
+
+ "sink_mockSink_0_exceptions_total": int64(0),
+ "sink_mockSink_0_records_in_total": int64(8),
+ "sink_mockSink_0_records_out_total": int64(8),
+
+ "source_demo_0_exceptions_total": int64(0),
+ "source_demo_0_records_in_total": int64(5),
+ "source_demo_0_records_out_total": int64(5),
+
+ "source_demo1_0_exceptions_total": int64(0),
+ "source_demo1_0_records_in_total": int64(5),
+ "source_demo1_0_records_out_total": int64(5),
+
+ "op_window_0_exceptions_total": int64(0),
+ "op_window_0_process_latency_ms": int64(0),
+ "op_window_0_records_in_total": int64(10),
+ "op_window_0_records_out_total": int64(10),
+
+ "op_join_0_exceptions_total": int64(0),
+ "op_join_0_process_latency_ms": int64(0),
+ "op_join_0_records_in_total": int64(10),
+ "op_join_0_records_out_total": int64(8),
+ },
+ }, {
+ name: `rule4`,
+ sql: `SELECT color FROM demo GROUP BY SlidingWindow(ss, 2), color ORDER BY color`,
+ size: 5,
+ r: [][]map[string]interface{}{
+ {{
+ "color": "red",
+ }}, {{
+ "color": "blue",
+ }, {
+ "color": "red",
+ }}, {{
+ "color": "blue",
+ }, {
+ "color": "red",
+ }}, {{
+ "color": "blue",
+ }, {
+ "color": "yellow",
+ }}, {{
+ "color": "blue",
+ }, {
+ "color": "red",
+ }, {
+ "color": "yellow",
+ }},
+ },
+ m: map[string]interface{}{
+ "op_preprocessor_demo_0_exceptions_total": int64(0),
+ "op_preprocessor_demo_0_process_latency_ms": int64(0),
+ "op_preprocessor_demo_0_records_in_total": int64(5),
+ "op_preprocessor_demo_0_records_out_total": int64(5),
+
+ "op_project_0_exceptions_total": int64(0),
+ "op_project_0_process_latency_ms": int64(0),
+ "op_project_0_records_in_total": int64(5),
+ "op_project_0_records_out_total": int64(5),
+
+ "sink_mockSink_0_exceptions_total": int64(0),
+ "sink_mockSink_0_records_in_total": int64(5),
+ "sink_mockSink_0_records_out_total": int64(5),
+
+ "source_demo_0_exceptions_total": int64(0),
+ "source_demo_0_records_in_total": int64(5),
+ "source_demo_0_records_out_total": int64(5),
+
+ "op_window_0_exceptions_total": int64(0),
+ "op_window_0_process_latency_ms": int64(0),
+ "op_window_0_records_in_total": int64(5),
+ "op_window_0_records_out_total": int64(5),
+
+ "op_aggregate_0_exceptions_total": int64(0),
+ "op_aggregate_0_process_latency_ms": int64(0),
+ "op_aggregate_0_records_in_total": int64(5),
+ "op_aggregate_0_records_out_total": int64(5),
+
+ "op_order_0_exceptions_total": int64(0),
+ "op_order_0_process_latency_ms": int64(0),
+ "op_order_0_records_in_total": int64(5),
+ "op_order_0_records_out_total": int64(5),
+ },
+ }, {
+ name: `rule5`,
+ sql: `SELECT temp FROM sessionDemo GROUP BY SessionWindow(ss, 2, 1) `,
+ size: 11,
+ r: [][]map[string]interface{}{
+ {{
+ "temp": 25.5,
+ }, {
+ "temp": 27.5,
+ }}, {{
+ "temp": 28.1,
+ }, {
+ "temp": 27.4,
+ }, {
+ "temp": 25.5,
+ }}, {{
+ "temp": 26.2,
+ }, {
+ "temp": 26.8,
+ }, {
+ "temp": 28.9,
+ }, {
+ "temp": 29.1,
+ }, {
+ "temp": 32.2,
+ }},
+ },
+ m: map[string]interface{}{
+ "op_preprocessor_sessionDemo_0_exceptions_total": int64(0),
+ "op_preprocessor_sessionDemo_0_process_latency_ms": int64(0),
+ "op_preprocessor_sessionDemo_0_records_in_total": int64(11),
+ "op_preprocessor_sessionDemo_0_records_out_total": int64(11),
+
+ "op_project_0_exceptions_total": int64(0),
+ "op_project_0_process_latency_ms": int64(0),
+ "op_project_0_records_in_total": int64(3),
+ "op_project_0_records_out_total": int64(3),
+
+ "sink_mockSink_0_exceptions_total": int64(0),
+ "sink_mockSink_0_records_in_total": int64(3),
+ "sink_mockSink_0_records_out_total": int64(3),
+
+ "source_sessionDemo_0_exceptions_total": int64(0),
+ "source_sessionDemo_0_records_in_total": int64(11),
+ "source_sessionDemo_0_records_out_total": int64(11),
+
+ "op_window_0_exceptions_total": int64(0),
+ "op_window_0_process_latency_ms": int64(0),
+ "op_window_0_records_in_total": int64(11),
+ "op_window_0_records_out_total": int64(3),
+ },
+ }, {
+ name: `rule6`,
+ sql: `SELECT max(temp) as m, count(color) as c FROM demo INNER JOIN demo1 ON demo.ts = demo1.ts GROUP BY SlidingWindow(ss, 1)`,
+ size: 5,
+ r: [][]map[string]interface{}{
+ {{
+ "m": 25.5,
+ "c": float64(1),
+ }}, {{
+ "m": 25.5,
+ "c": float64(1),
+ }}, {{
+ "m": 25.5,
+ "c": float64(1),
+ }}, {{
+ "m": 28.1,
+ "c": float64(1),
+ }}, {{
+ "m": 28.1,
+ "c": float64(1),
+ }}, {{
+ "m": 28.1,
+ "c": float64(2),
+ }}, {{
+ "m": 27.4,
+ "c": float64(1),
+ }}, {{
+ "m": 27.4,
+ "c": float64(2),
}},
},
m: map[string]interface{}{
@@ -622,73 +1561,267 @@ func TestWindow(t *testing.T) {
"op_preprocessor_demo_0_records_in_total": int64(5),
"op_preprocessor_demo_0_records_out_total": int64(5),
+ "op_preprocessor_demo1_0_exceptions_total": int64(0),
+ "op_preprocessor_demo1_0_process_latency_ms": int64(0),
+ "op_preprocessor_demo1_0_records_in_total": int64(5),
+ "op_preprocessor_demo1_0_records_out_total": int64(5),
+
"op_project_0_exceptions_total": int64(0),
"op_project_0_process_latency_ms": int64(0),
- "op_project_0_records_in_total": int64(3),
- "op_project_0_records_out_total": int64(3),
+ "op_project_0_records_in_total": int64(8),
+ "op_project_0_records_out_total": int64(8),
"sink_mockSink_0_exceptions_total": int64(0),
- "sink_mockSink_0_records_in_total": int64(3),
- "sink_mockSink_0_records_out_total": int64(3),
+ "sink_mockSink_0_records_in_total": int64(8),
+ "sink_mockSink_0_records_out_total": int64(8),
"source_demo_0_exceptions_total": int64(0),
"source_demo_0_records_in_total": int64(5),
"source_demo_0_records_out_total": int64(5),
+ "source_demo1_0_exceptions_total": int64(0),
+ "source_demo1_0_records_in_total": int64(5),
+ "source_demo1_0_records_out_total": int64(5),
+
"op_window_0_exceptions_total": int64(0),
"op_window_0_process_latency_ms": int64(0),
- "op_window_0_records_in_total": int64(5),
- "op_window_0_records_out_total": int64(3),
+ "op_window_0_records_in_total": int64(10),
+ "op_window_0_records_out_total": int64(10),
+
+ "op_join_0_exceptions_total": int64(0),
+ "op_join_0_process_latency_ms": int64(0),
+ "op_join_0_records_in_total": int64(10),
+ "op_join_0_records_out_total": int64(8),
},
}, {
- name: `rule2`,
- sql: `SELECT color, ts FROM demo where size > 2 GROUP BY tumblingwindow(ss, 1)`,
+ name: `rule7`,
+ sql: `SELECT * FROM demoE GROUP BY HOPPINGWINDOW(ss, 2, 1)`,
size: 5,
r: [][]map[string]interface{}{
{{
- "color": "red",
- "ts": float64(1541152486013),
- }, {
+ "error": "error in preprocessor: invalid data type for color, expect string but found int(3)",
+ }},
+ {{
"color": "blue",
+ "size": float64(6),
"ts": float64(1541152486822),
}},
{{
- "color": "yellow",
- "ts": float64(1541152488442),
+ "color": "blue",
+ "size": float64(6),
+ "ts": float64(1541152486822),
+ }, {
+ "color": "blue",
+ "size": float64(2),
+ "ts": float64(1541152487632),
+ }},
+ {{
+ "error": "error in preprocessor: invalid data type for color, expect string but found int(7)",
+ }},
+ {{
+ "color": "blue",
+ "size": float64(2),
+ "ts": float64(1541152487632),
+ }},
+ {{
+ "error": "error in preprocessor: invalid data type for size, expect bigint but found string(blue)",
}},
},
m: map[string]interface{}{
- "op_preprocessor_demo_0_exceptions_total": int64(0),
- "op_preprocessor_demo_0_process_latency_ms": int64(0),
- "op_preprocessor_demo_0_records_in_total": int64(5),
- "op_preprocessor_demo_0_records_out_total": int64(5),
+ "op_preprocessor_demoE_0_exceptions_total": int64(3),
+ "op_preprocessor_demoE_0_process_latency_ms": int64(0),
+ "op_preprocessor_demoE_0_records_in_total": int64(5),
+ "op_preprocessor_demoE_0_records_out_total": int64(2),
- "op_project_0_exceptions_total": int64(0),
+ "op_project_0_exceptions_total": int64(3),
+ "op_project_0_process_latency_ms": int64(0),
+ "op_project_0_records_in_total": int64(6),
+ "op_project_0_records_out_total": int64(3),
+
+ "sink_mockSink_0_exceptions_total": int64(0),
+ "sink_mockSink_0_records_in_total": int64(6),
+ "sink_mockSink_0_records_out_total": int64(6),
+
+ "source_demoE_0_exceptions_total": int64(0),
+ "source_demoE_0_records_in_total": int64(5),
+ "source_demoE_0_records_out_total": int64(5),
+
+ "op_window_0_exceptions_total": int64(3),
+ "op_window_0_process_latency_ms": int64(0),
+ "op_window_0_records_in_total": int64(5),
+ "op_window_0_records_out_total": int64(3),
+ },
+ },
+ }
+ fmt.Printf("The test bucket size is %d.\n\n", len(tests))
+ createStreams(t)
+ defer dropStreams(t)
+ for i, tt := range tests {
+ test.ResetClock(1541152486000)
+ p := NewRuleProcessor(DbDir)
+ parser := xsql.NewParser(strings.NewReader(tt.sql))
+ var (
+ sources []*nodes.SourceNode
+ syncs []chan int
+ )
+ if stmt, err := xsql.Language.Parse(parser); err != nil {
+ t.Errorf("parse sql %s error: %s", tt.sql, err)
+ } else {
+ if selectStmt, ok := stmt.(*xsql.SelectStatement); !ok {
+ t.Errorf("sql %s is not a select statement", tt.sql)
+ } else {
+ streams := xsql.GetStreams(selectStmt)
+ for _, stream := range streams {
+ next := make(chan int)
+ syncs = append(syncs, next)
+ source := getMockSource(stream, next, tt.size)
+ sources = append(sources, source)
+ }
+ }
+ }
+ tp, inputs, err := p.createTopoWithSources(&api.Rule{Id: tt.name, Sql: tt.sql}, sources)
+ if err != nil {
+ t.Error(err)
+ }
+ mockSink := test.NewMockSink()
+ sink := nodes.NewSinkNodeWithSink("mockSink", mockSink)
+ tp.AddSink(inputs, sink)
+ errCh := tp.Open()
+ func() {
+ for i := 0; i < tt.size*len(syncs); i++ {
+ syncs[i%len(syncs)] <- i
+ for {
+ time.Sleep(1)
+ if getMetric(tp, "op_window_0_records_in_total") == (i + 1) {
+ break
+ }
+ }
+ select {
+ case err = <-errCh:
+ t.Log(err)
+ tp.Cancel()
+ return
+ default:
+ }
+ }
+ retry := 100
+ for ; retry > 0; retry-- {
+ if err := compareMetrics(tp, tt.m, tt.sql); err == nil {
+ break
+ }
+ t.Logf("wait to try another %d times", retry)
+ time.Sleep(time.Duration(retry) * time.Millisecond)
+ }
+ if retry == 0 {
+ err := compareMetrics(tp, tt.m, tt.sql)
+ t.Errorf("could not get correct metrics: %v", err)
+ }
+ }()
+ results := mockSink.GetResults()
+ var maps [][]map[string]interface{}
+ for _, v := range results {
+ var mapRes []map[string]interface{}
+ err := json.Unmarshal(v, &mapRes)
+ if err != nil {
+ t.Errorf("Failed to parse the input into map")
+ continue
+ }
+ maps = append(maps, mapRes)
+ }
+ if !reflect.DeepEqual(tt.r, maps) {
+ t.Errorf("%d. %q\n\nresult mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.sql, tt.r, maps)
+ }
+ if err := compareMetrics(tp, tt.m, tt.sql); err != nil {
+ t.Errorf("%d. %q\n\n%v", i, tt.sql, err)
+ }
+ tp.Cancel()
+ }
+}
+
+func TestWindowError(t *testing.T) {
+ var tests = []struct {
+ name string
+ sql string
+ size int
+ r [][]map[string]interface{}
+ m map[string]interface{}
+ }{
+ {
+ name: `rule1`,
+ sql: `SELECT size * 3 FROM ldemo GROUP BY TUMBLINGWINDOW(ss, 2)`,
+ size: 5,
+ r: [][]map[string]interface{}{
+ {{
+ "error": "run Select error: invalid operation string(string) * int64(3)",
+ }},
+ },
+ m: map[string]interface{}{
+ "op_preprocessor_ldemo_0_exceptions_total": int64(0),
+ "op_preprocessor_ldemo_0_process_latency_ms": int64(0),
+ "op_preprocessor_ldemo_0_records_in_total": int64(5),
+ "op_preprocessor_ldemo_0_records_out_total": int64(5),
+
+ "op_project_0_exceptions_total": int64(1),
+ "op_project_0_process_latency_ms": int64(0),
+ "op_project_0_records_in_total": int64(1),
+ "op_project_0_records_out_total": int64(0),
+
+ "sink_mockSink_0_exceptions_total": int64(0),
+ "sink_mockSink_0_records_in_total": int64(1),
+ "sink_mockSink_0_records_out_total": int64(1),
+
+ "source_ldemo_0_exceptions_total": int64(0),
+ "source_ldemo_0_records_in_total": int64(5),
+ "source_ldemo_0_records_out_total": int64(5),
+
+ "op_window_0_exceptions_total": int64(0),
+ "op_window_0_process_latency_ms": int64(0),
+ "op_window_0_records_in_total": int64(5),
+ "op_window_0_records_out_total": int64(1),
+ },
+ }, {
+ name: `rule2`,
+ sql: `SELECT color, ts FROM ldemo where size > 2 GROUP BY tumblingwindow(ss, 1)`,
+ size: 5,
+ r: [][]map[string]interface{}{
+ {{
+ "error": "run Where error: invalid operation string(string) > int64(2)",
+ }}, {{
+ "ts": float64(1541152487632),
+ }},
+ },
+ m: map[string]interface{}{
+ "op_preprocessor_ldemo_0_exceptions_total": int64(0),
+ "op_preprocessor_ldemo_0_process_latency_ms": int64(0),
+ "op_preprocessor_ldemo_0_records_in_total": int64(5),
+ "op_preprocessor_ldemo_0_records_out_total": int64(5),
+
+ "op_project_0_exceptions_total": int64(1),
"op_project_0_process_latency_ms": int64(0),
"op_project_0_records_in_total": int64(2),
- "op_project_0_records_out_total": int64(2),
+ "op_project_0_records_out_total": int64(1),
"sink_mockSink_0_exceptions_total": int64(0),
"sink_mockSink_0_records_in_total": int64(2),
"sink_mockSink_0_records_out_total": int64(2),
- "source_demo_0_exceptions_total": int64(0),
- "source_demo_0_records_in_total": int64(5),
- "source_demo_0_records_out_total": int64(5),
+ "source_ldemo_0_exceptions_total": int64(0),
+ "source_ldemo_0_records_in_total": int64(5),
+ "source_ldemo_0_records_out_total": int64(5),
"op_window_0_exceptions_total": int64(0),
"op_window_0_process_latency_ms": int64(0),
"op_window_0_records_in_total": int64(5),
"op_window_0_records_out_total": int64(3),
- "op_filter_0_exceptions_total": int64(0),
+ "op_filter_0_exceptions_total": int64(1),
"op_filter_0_process_latency_ms": int64(0),
"op_filter_0_records_in_total": int64(3),
- "op_filter_0_records_out_total": int64(2),
+ "op_filter_0_records_out_total": int64(1),
},
}, {
name: `rule3`,
- sql: `SELECT color, temp, ts FROM demo INNER JOIN demo1 ON demo.ts = demo1.ts GROUP BY SlidingWindow(ss, 1)`,
+ sql: `SELECT color, temp, ts FROM ldemo INNER JOIN ldemo1 ON ldemo.ts = ldemo1.ts GROUP BY SlidingWindow(ss, 1)`,
size: 5,
r: [][]map[string]interface{}{
{{
@@ -704,118 +1837,92 @@ func TestWindow(t *testing.T) {
"temp": 25.5,
"ts": float64(1541152486013),
}}, {{
- "color": "blue",
- "temp": 28.1,
- "ts": float64(1541152487632),
+ "temp": 28.1,
+ "ts": float64(1541152487632),
}}, {{
- "color": "blue",
- "temp": 28.1,
- "ts": float64(1541152487632),
+ "temp": 28.1,
+ "ts": float64(1541152487632),
}}, {{
- "color": "blue",
- "temp": 28.1,
- "ts": float64(1541152487632),
- }, {
- "color": "yellow",
- "temp": 27.4,
- "ts": float64(1541152488442),
+ "error": "run Join error: invalid operation int64(1541152487632) = string(1541152488442)",
}}, {{
- "color": "yellow",
- "temp": 27.4,
- "ts": float64(1541152488442),
+ "error": "run Join error: invalid operation int64(1541152488442) = string(1541152488442)",
}}, {{
- "color": "yellow",
- "temp": 27.4,
- "ts": float64(1541152488442),
- }, {
- "color": "red",
- "temp": 25.5,
- "ts": float64(1541152489252),
+ "error": "run Join error: invalid operation int64(1541152488442) = string(1541152488442)",
}},
},
m: map[string]interface{}{
- "op_preprocessor_demo_0_exceptions_total": int64(0),
- "op_preprocessor_demo_0_process_latency_ms": int64(0),
- "op_preprocessor_demo_0_records_in_total": int64(5),
- "op_preprocessor_demo_0_records_out_total": int64(5),
+ "op_preprocessor_ldemo_0_exceptions_total": int64(0),
+ "op_preprocessor_ldemo_0_process_latency_ms": int64(0),
+ "op_preprocessor_ldemo_0_records_in_total": int64(5),
+ "op_preprocessor_ldemo_0_records_out_total": int64(5),
- "op_preprocessor_demo1_0_exceptions_total": int64(0),
- "op_preprocessor_demo1_0_process_latency_ms": int64(0),
- "op_preprocessor_demo1_0_records_in_total": int64(5),
- "op_preprocessor_demo1_0_records_out_total": int64(5),
+ "op_preprocessor_ldemo1_0_exceptions_total": int64(0),
+ "op_preprocessor_ldemo1_0_process_latency_ms": int64(0),
+ "op_preprocessor_ldemo1_0_records_in_total": int64(5),
+ "op_preprocessor_ldemo1_0_records_out_total": int64(5),
- "op_project_0_exceptions_total": int64(0),
+ "op_project_0_exceptions_total": int64(3),
"op_project_0_process_latency_ms": int64(0),
"op_project_0_records_in_total": int64(8),
- "op_project_0_records_out_total": int64(8),
+ "op_project_0_records_out_total": int64(5),
"sink_mockSink_0_exceptions_total": int64(0),
"sink_mockSink_0_records_in_total": int64(8),
"sink_mockSink_0_records_out_total": int64(8),
- "source_demo_0_exceptions_total": int64(0),
- "source_demo_0_records_in_total": int64(5),
- "source_demo_0_records_out_total": int64(5),
+ "source_ldemo_0_exceptions_total": int64(0),
+ "source_ldemo_0_records_in_total": int64(5),
+ "source_ldemo_0_records_out_total": int64(5),
- "source_demo1_0_exceptions_total": int64(0),
- "source_demo1_0_records_in_total": int64(5),
- "source_demo1_0_records_out_total": int64(5),
+ "source_ldemo1_0_exceptions_total": int64(0),
+ "source_ldemo1_0_records_in_total": int64(5),
+ "source_ldemo1_0_records_out_total": int64(5),
"op_window_0_exceptions_total": int64(0),
"op_window_0_process_latency_ms": int64(0),
"op_window_0_records_in_total": int64(10),
"op_window_0_records_out_total": int64(10),
- "op_join_0_exceptions_total": int64(0),
+ "op_join_0_exceptions_total": int64(3),
"op_join_0_process_latency_ms": int64(0),
"op_join_0_records_in_total": int64(10),
- "op_join_0_records_out_total": int64(8),
+ "op_join_0_records_out_total": int64(5),
},
}, {
name: `rule4`,
- sql: `SELECT color FROM demo GROUP BY SlidingWindow(ss, 2), color ORDER BY color`,
+ sql: `SELECT color FROM ldemo GROUP BY SlidingWindow(ss, 2), color having size >= 2 order by color`,
size: 5,
r: [][]map[string]interface{}{
{{
"color": "red",
}}, {{
- "color": "blue",
- }, {
- "color": "red",
+ "error": "run Having error: invalid operation string(string) >= int64(2)",
}}, {{
- "color": "blue",
- }, {
- "color": "red",
+ "error": "run Having error: invalid operation string(string) >= int64(2)",
}}, {{
- "color": "blue",
- }, {
- "color": "yellow",
+ "error": "run Having error: invalid operation string(string) >= int64(2)",
}}, {{
- "color": "blue",
- }, {
- "color": "red",
- }, {
- "color": "yellow",
- }},
+ "color": float64(49),
+ }, {}},
},
m: map[string]interface{}{
- "op_preprocessor_demo_0_exceptions_total": int64(0),
- "op_preprocessor_demo_0_process_latency_ms": int64(0),
- "op_preprocessor_demo_0_records_in_total": int64(5),
- "op_preprocessor_demo_0_records_out_total": int64(5),
+ "op_preprocessor_ldemo_0_exceptions_total": int64(0),
+ "op_preprocessor_ldemo_0_process_latency_ms": int64(0),
+ "op_preprocessor_ldemo_0_records_in_total": int64(5),
+ "op_preprocessor_ldemo_0_records_out_total": int64(5),
- "op_project_0_exceptions_total": int64(0),
+ "op_project_0_exceptions_total": int64(3),
"op_project_0_process_latency_ms": int64(0),
"op_project_0_records_in_total": int64(5),
- "op_project_0_records_out_total": int64(5),
+ "op_project_0_records_out_total": int64(2),
"sink_mockSink_0_exceptions_total": int64(0),
"sink_mockSink_0_records_in_total": int64(5),
"sink_mockSink_0_records_out_total": int64(5),
- "source_demo_0_exceptions_total": int64(0),
- "source_demo_0_records_in_total": int64(5),
- "source_demo_0_records_out_total": int64(5),
+ "source_ldemo_0_exceptions_total": int64(0),
+ "source_ldemo_0_records_in_total": int64(5),
+ "source_ldemo_0_records_out_total": int64(5),
"op_window_0_exceptions_total": int64(0),
"op_window_0_process_latency_ms": int64(0),
@@ -827,136 +1934,59 @@ func TestWindow(t *testing.T) {
"op_aggregate_0_records_in_total": int64(5),
"op_aggregate_0_records_out_total": int64(5),
- "op_order_0_exceptions_total": int64(0),
- "op_order_0_process_latency_ms": int64(0),
- "op_order_0_records_in_total": int64(5),
- "op_order_0_records_out_total": int64(5),
+ "op_having_0_exceptions_total": int64(3),
+ "op_having_0_process_latency_ms": int64(0),
+ "op_having_0_records_in_total": int64(5),
+ "op_having_0_records_out_total": int64(2),
},
}, {
name: `rule5`,
- sql: `SELECT temp FROM sessionDemo GROUP BY SessionWindow(ss, 2, 1) `,
- size: 11,
+ sql: `SELECT color, size FROM ldemo GROUP BY tumblingwindow(ss, 1) ORDER BY size`,
+ size: 5,
r: [][]map[string]interface{}{
{{
- "temp": 25.5,
- }, {
- "temp": 27.5,
+ "error": "run Order By error: incompatible types for comparison: int and string",
}}, {{
- "temp": 28.1,
- }, {
- "temp": 27.4,
- }, {
- "temp": 25.5,
+ "size": float64(3),
}}, {{
- "temp": 26.2,
- }, {
- "temp": 26.8,
- }, {
- "temp": 28.9,
- }, {
- "temp": 29.1,
- }, {
- "temp": 32.2,
+ "color": float64(49),
+ "size": float64(2),
}},
},
m: map[string]interface{}{
- "op_preprocessor_sessionDemo_0_exceptions_total": int64(0),
- "op_preprocessor_sessionDemo_0_process_latency_ms": int64(0),
- "op_preprocessor_sessionDemo_0_records_in_total": int64(11),
- "op_preprocessor_sessionDemo_0_records_out_total": int64(11),
+ "op_preprocessor_ldemo_0_exceptions_total": int64(0),
+ "op_preprocessor_ldemo_0_process_latency_ms": int64(0),
+ "op_preprocessor_ldemo_0_records_in_total": int64(5),
+ "op_preprocessor_ldemo_0_records_out_total": int64(5),
- "op_project_0_exceptions_total": int64(0),
+ "op_project_0_exceptions_total": int64(1),
"op_project_0_process_latency_ms": int64(0),
"op_project_0_records_in_total": int64(3),
- "op_project_0_records_out_total": int64(3),
+ "op_project_0_records_out_total": int64(2),
"sink_mockSink_0_exceptions_total": int64(0),
"sink_mockSink_0_records_in_total": int64(3),
"sink_mockSink_0_records_out_total": int64(3),
- "source_sessionDemo_0_exceptions_total": int64(0),
- "source_sessionDemo_0_records_in_total": int64(11),
- "source_sessionDemo_0_records_out_total": int64(11),
+ "source_ldemo_0_exceptions_total": int64(0),
+ "source_ldemo_0_records_in_total": int64(5),
+ "source_ldemo_0_records_out_total": int64(5),
"op_window_0_exceptions_total": int64(0),
"op_window_0_process_latency_ms": int64(0),
- "op_window_0_records_in_total": int64(11),
+ "op_window_0_records_in_total": int64(5),
"op_window_0_records_out_total": int64(3),
- },
- }, {
- name: `rule6`,
- sql: `SELECT max(temp) as m, count(color) as c FROM demo INNER JOIN demo1 ON demo.ts = demo1.ts GROUP BY SlidingWindow(ss, 1)`,
- size: 5,
- r: [][]map[string]interface{}{
- {{
- "m": 25.5,
- "c": float64(1),
- }}, {{
- "m": 25.5,
- "c": float64(1),
- }}, {{
- "m": 25.5,
- "c": float64(1),
- }}, {{
- "m": 28.1,
- "c": float64(1),
- }}, {{
- "m": 28.1,
- "c": float64(1),
- }}, {{
- "m": 28.1,
- "c": float64(2),
- }}, {{
- "m": 27.4,
- "c": float64(1),
- }}, {{
- "m": 27.4,
- "c": float64(2),
- }},
- },
- m: map[string]interface{}{
- "op_preprocessor_demo_0_exceptions_total": int64(0),
- "op_preprocessor_demo_0_process_latency_ms": int64(0),
- "op_preprocessor_demo_0_records_in_total": int64(5),
- "op_preprocessor_demo_0_records_out_total": int64(5),
-
- "op_preprocessor_demo1_0_exceptions_total": int64(0),
- "op_preprocessor_demo1_0_process_latency_ms": int64(0),
- "op_preprocessor_demo1_0_records_in_total": int64(5),
- "op_preprocessor_demo1_0_records_out_total": int64(5),
-
- "op_project_0_exceptions_total": int64(0),
- "op_project_0_process_latency_ms": int64(0),
- "op_project_0_records_in_total": int64(8),
- "op_project_0_records_out_total": int64(8),
-
- "sink_mockSink_0_exceptions_total": int64(0),
- "sink_mockSink_0_records_in_total": int64(8),
- "sink_mockSink_0_records_out_total": int64(8),
-
- "source_demo_0_exceptions_total": int64(0),
- "source_demo_0_records_in_total": int64(5),
- "source_demo_0_records_out_total": int64(5),
-
- "source_demo1_0_exceptions_total": int64(0),
- "source_demo1_0_records_in_total": int64(5),
- "source_demo1_0_records_out_total": int64(5),
-
- "op_window_0_exceptions_total": int64(0),
- "op_window_0_process_latency_ms": int64(0),
- "op_window_0_records_in_total": int64(10),
- "op_window_0_records_out_total": int64(10),
- "op_join_0_exceptions_total": int64(0),
- "op_join_0_process_latency_ms": int64(0),
- "op_join_0_records_in_total": int64(10),
- "op_join_0_records_out_total": int64(8),
+ "op_order_0_exceptions_total": int64(1),
+ "op_order_0_process_latency_ms": int64(0),
+ "op_order_0_records_in_total": int64(3),
+ "op_order_0_records_out_total": int64(2),
},
},
}
fmt.Printf("The test bucket size is %d.\n\n", len(tests))
- createStreams(t)
- defer dropStreams(t)
+ createSchemalessStreams(t)
+ defer dropSchemalessStreams(t)
for i, tt := range tests {
test.ResetClock(1541152486000)
p := NewRuleProcessor(DbDir)
@@ -975,7 +2005,7 @@ func TestWindow(t *testing.T) {
for _, stream := range streams {
next := make(chan int)
syncs = append(syncs, next)
- source := getMockSource(stream, next, tt.size)
+ source := getMockSourceL(stream, next, tt.size)
sources = append(sources, source)
}
}
@@ -1068,6 +2098,15 @@ func createEventStreams(t *testing.T) {
if err != nil {
t.Log(err)
}
+ demoErr := `CREATE STREAM demoErr (
+ color STRING,
+ size BIGINT,
+ ts BIGINT
+ ) WITH (DATASOURCE="demoErr", FORMAT="json", KEY="ts", TIMESTAMP="ts");`
+ _, err = p.ExecStmt(demoErr)
+ if err != nil {
+ t.Log(err)
+ }
}
func dropEventStreams(t *testing.T) {
@@ -1087,6 +2126,11 @@ func dropEventStreams(t *testing.T) {
if err != nil {
t.Log(err)
}
+ demoErr := `DROP STREAM demoErr`
+ _, err = p.ExecStmt(demoErr)
+ if err != nil {
+ t.Log(err)
+ }
}
func getEventMockSource(name string, done <-chan int, size int) *nodes.SourceNode {
@@ -1317,6 +2361,63 @@ func getEventMockSource(name string, done <-chan int, size int) *nodes.SourceNod
Timestamp: 1541152499202,
},
}
+ case "demoErr":
+ data = []*xsql.Tuple{
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "color": "red",
+ "size": 3,
+ "ts": 1541152486013,
+ },
+ Timestamp: 1541152486013,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "color": 2,
+ "size": "blue",
+ "ts": 1541152487632,
+ },
+ Timestamp: 1541152487632,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "color": "red",
+ "size": 1,
+ "ts": 1541152489252,
+ },
+ Timestamp: 1541152489252,
+ },
+ { //dropped item
+ Emitter: name,
+ Message: map[string]interface{}{
+ "color": "blue",
+ "size": 6,
+ "ts": 1541152486822,
+ },
+ Timestamp: 1541152486822,
+ },
+ {
+ Emitter: name,
+ Message: map[string]interface{}{
+ "color": "yellow",
+ "size": 4,
+ "ts": 1541152488442,
+ },
+ Timestamp: 1541152488442,
+ },
+ { //To lift the watermark and issue all windows
+ Emitter: name,
+ Message: map[string]interface{}{
+ "color": "yellow",
+ "size": 4,
+ "ts": 1541152492342,
+ },
+ Timestamp: 1541152488442,
+ },
+ }
}
return nodes.NewSourceNodeWithSource(name, test.NewMockSource(data[:size], done, true), map[string]string{
"DATASOURCE": name,
@@ -1679,6 +2780,66 @@ func TestEventWindow(t *testing.T) {
"op_join_0_records_in_total": int64(5),
"op_join_0_records_out_total": int64(5),
},
+ }, {
+ name: `rule7`,
+ sql: `SELECT * FROM demoErr GROUP BY HOPPINGWINDOW(ss, 2, 1)`,
+ size: 6,
+ r: [][]map[string]interface{}{
+ {{
+ "error": "error in preprocessor: invalid data type for color, expect string but found int(2)",
+ }},
+ {{
+ "color": "red",
+ "size": float64(3),
+ "ts": float64(1541152486013),
+ }},
+ {{
+ "color": "red",
+ "size": float64(3),
+ "ts": float64(1541152486013),
+ }},
+ {{
+ "color": "yellow",
+ "size": float64(4),
+ "ts": float64(1541152488442),
+ }}, {{
+ "color": "yellow",
+ "size": float64(4),
+ "ts": float64(1541152488442),
+ }, {
+ "color": "red",
+ "size": float64(1),
+ "ts": float64(1541152489252),
+ }}, {{
+ "color": "red",
+ "size": float64(1),
+ "ts": float64(1541152489252),
+ }},
+ },
+ m: map[string]interface{}{
+ "op_preprocessor_demoErr_0_exceptions_total": int64(1),
+ "op_preprocessor_demoErr_0_process_latency_ms": int64(0),
+ "op_preprocessor_demoErr_0_records_in_total": int64(6),
+ "op_preprocessor_demoErr_0_records_out_total": int64(5),
+
+ "op_project_0_exceptions_total": int64(1),
+ "op_project_0_process_latency_ms": int64(0),
+ "op_project_0_records_in_total": int64(6),
+ "op_project_0_records_out_total": int64(5),
+
+ "sink_mockSink_0_exceptions_total": int64(0),
+ "sink_mockSink_0_records_in_total": int64(6),
+ "sink_mockSink_0_records_out_total": int64(6),
+
+ "source_demoErr_0_exceptions_total": int64(0),
+ "source_demoErr_0_records_in_total": int64(6),
+ "source_demoErr_0_records_out_total": int64(6),
+
+ "op_window_0_exceptions_total": int64(1),
+ "op_window_0_process_latency_ms": int64(0),
+ "op_window_0_records_in_total": int64(6),
+ "op_window_0_records_out_total": int64(5),
+ },
},
}
fmt.Printf("The test bucket size is %d.\n\n", len(tests))
@@ -1787,9 +2948,9 @@ func getMetric(tp *xstream.TopologyNew, name string) int {
func compareMetrics(tp *xstream.TopologyNew, m map[string]interface{}, sql string) (err error) {
keys, values := tp.GetMetrics()
- //for i, k := range keys{
- // log.Printf("%s:%v", k, values[i])
- //}
+ for i, k := range keys {
+ log.Printf("%s:%v", k, values[i])
+ }
for k, v := range m {
var (
index int
diff --git a/xsql/valuer_eval_test.go b/xsql/valuer_eval_test.go
new file mode 100644
index 0000000000..ee8e9ba7f2
--- /dev/null
+++ b/xsql/valuer_eval_test.go
@@ -0,0 +1,241 @@
+package xsql
+
+import (
+ "errors"
+ "fmt"
+ "github.com/emqx/kuiper/common"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+func TestComparison(t *testing.T) {
+ testTime, _ := common.InterfaceToTime(1541152488442, "")
+ data := []struct {
+ m Message
+ r []interface{}
+ }{
+ {
+ m: map[string]interface{}{
+ "a": float64(32),
+ "b": float64(72),
+ },
+ r: []interface{}{
+ false, true, errors.New("invalid operation float64(32) = string(string literal)"),
+ false, true, false, true,
+ },
+ }, {
+ m: map[string]interface{}{
+ "a": int64(32),
+ "b": int64(72),
+ },
+ r: []interface{}{
+ false, true, errors.New("invalid operation int64(32) = string(string literal)"),
+ false, true, false, true,
+ },
+ }, {
+ m: map[string]interface{}{
+ "a": "32",
+ "b": "72",
+ },
+ r: []interface{}{
+ errors.New("invalid operation string(32) > int64(72)"), errors.New("invalid operation string(32) <= int64(32)"), false,
+ false, true, false, true,
+ },
+ }, {
+ m: map[string]interface{}{
+ "a": []interface{}{32, 72},
+ "b": []interface{}{32, 72},
+ },
+ r: []interface{}{
+ errors.New("> is an invalid operation for []interface {}"), errors.New("<= is an invalid operation for []interface {}"), errors.New("= is an invalid operation for []interface {}"),
+ errors.New(">= is an invalid operation for []interface {}"), errors.New("< is an invalid operation for []interface {}"), errors.New("= is an invalid operation for []interface {}"), errors.New("!= is an invalid operation for []interface {}"),
+ },
+ }, {
+ m: map[string]interface{}{
+ "a": map[string]interface{}{"c": 5},
+ "b": map[string]interface{}{"d": 5},
+ },
+ r: []interface{}{
+ errors.New("> is an invalid operation for map[string]interface {}"), errors.New("<= is an invalid operation for map[string]interface {}"), errors.New("= is an invalid operation for map[string]interface {}"),
+ errors.New(">= is an invalid operation for map[string]interface {}"), errors.New("< is an invalid operation for map[string]interface {}"), errors.New("= is an invalid operation for map[string]interface {}"), errors.New("!= is an invalid operation for map[string]interface {}"),
+ },
+ }, {
+ m: map[string]interface{}{
+ "a": float64(55),
+ "b": int64(55),
+ },
+ r: []interface{}{
+ false, false, errors.New("invalid operation float64(55) = string(string literal)"),
+ true, false, true, false,
+ },
+ }, {
+ m: map[string]interface{}{
+ "a": testTime,
+ "b": int64(1541152388442),
+ },
+ r: []interface{}{
+ true, false, errors.New("invalid operation time.Time(2018-11-02 09:54:48.442 +0000 UTC) = string(string literal)"),
+ true, false, false, true,
+ },
+ }, {
+ m: map[string]interface{}{
+ "a": testTime,
+ "b": "2020-02-26T02:37:21.822Z",
+ },
+ r: []interface{}{
+ true, false, errors.New("invalid operation time.Time(2018-11-02 09:54:48.442 +0000 UTC) = string(string literal)"),
+ false, true, false, true,
+ },
+ }, {
+ m: map[string]interface{}{
+ "a": int64(1541152388442),
+ "b": testTime,
+ },
+ r: []interface{}{
+ true, false, errors.New("invalid operation int64(1541152388442) = string(string literal)"),
+ errors.New("invalid operation int64(1541152388442) >= time.Time(2018-11-02 09:54:48.442 +0000 UTC)"), errors.New("invalid operation int64(1541152388442) < time.Time(2018-11-02 09:54:48.442 +0000 UTC)"), errors.New("invalid operation int64(1541152388442) = time.Time(2018-11-02 09:54:48.442 +0000 UTC)"), errors.New("invalid operation int64(1541152388442) != time.Time(2018-11-02 09:54:48.442 +0000 UTC)"),
+ },
+ }, {
+ m: map[string]interface{}{
+ "a": "2020-02-26T02:37:21.822Z",
+ "b": testTime,
+ },
+ r: []interface{}{
+ errors.New("invalid operation string(2020-02-26T02:37:21.822Z) > int64(72)"), errors.New("invalid operation string(2020-02-26T02:37:21.822Z) <= int64(32)"), false,
+ errors.New("invalid operation string(2020-02-26T02:37:21.822Z) >= time.Time(2018-11-02 09:54:48.442 +0000 UTC)"), errors.New("invalid operation string(2020-02-26T02:37:21.822Z) < time.Time(2018-11-02 09:54:48.442 +0000 UTC)"), errors.New("invalid operation string(2020-02-26T02:37:21.822Z) = time.Time(2018-11-02 09:54:48.442 +0000 UTC)"), errors.New("invalid operation string(2020-02-26T02:37:21.822Z) != time.Time(2018-11-02 09:54:48.442 +0000 UTC)"),
+ },
+ }, {
+ m: map[string]interface{}{
+ "c": "nothing",
+ },
+ r: []interface{}{
+ false, false, false,
+ true, false, true, false,
+ },
+ }, {
+ m: map[string]interface{}{
+ "a": 12,
+ "c": "nothing",
+ },
+ r: []interface{}{
+ false, true, errors.New("invalid operation int64(12) = string(string literal)"),
+ false, false, false, true,
+ },
+ },
+ }
+ sqls := []string{
+ "select * from src where a > 72",
+ "select * from src where a <= 32",
+ "select * from src where a = \"string literal\"",
+ "select * from src where a >= b",
+ "select * from src where a < b",
+ "select * from src where a = b",
+ "select * from src where a != b",
+ }
+ var conditions []Expr
+ for _, sql := range sqls {
+ stmt, _ := NewParser(strings.NewReader(sql)).Parse()
+ conditions = append(conditions, stmt.Condition)
+ }
+
+ fmt.Printf("The test bucket size is %d.\n\n", len(data)*len(sqls))
+ for i, tt := range data {
+ for j, c := range conditions {
+ tuple := &Tuple{Emitter: "src", Message: tt.m, Timestamp: common.GetNowInMilli(), Metadata: nil}
+ ve := &ValuerEval{Valuer: MultiValuer(tuple, &FunctionValuer{})}
+ result := ve.Eval(c)
+ if !reflect.DeepEqual(tt.r[j], result) {
+ t.Errorf("%d-%d. \nstmt mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, j, tt.r[j], result)
+ }
+ }
+ }
+}
+
+func TestCalculation(t *testing.T) {
+ data := []struct {
+ m Message
+ r []interface{}
+ }{
+ {
+ m: map[string]interface{}{
+ "a": float64(32),
+ "b": float64(72),
+ },
+ r: []interface{}{
+ float64(104), float64(96), float64(0.4444444444444444), float64(32),
+ },
+ }, {
+ m: map[string]interface{}{
+ "a": int64(32),
+ "b": int64(72),
+ },
+ r: []interface{}{
+ int64(104), int64(96), int64(0), int64(32),
+ },
+ }, {
+ m: map[string]interface{}{
+ "a": "32",
+ "b": "72",
+ },
+ r: []interface{}{
+ errors.New("invalid operation string(32) + string(72)"), errors.New("invalid operation string(32) * int64(3)"),
+ errors.New("invalid operation string(32) / string(72)"), errors.New("invalid operation string(32) % string(72)"),
+ },
+ }, {
+ m: map[string]interface{}{
+ "a": float64(55),
+ "b": int64(55),
+ },
+ r: []interface{}{
+ float64(110), float64(165), float64(1), float64(0),
+ },
+ }, {
+ m: map[string]interface{}{
+ "a": int64(55),
+ "b": float64(0),
+ },
+ r: []interface{}{
+ float64(55), int64(165), errors.New("divided by zero"), errors.New("divided by zero"),
+ },
+ }, {
+ m: map[string]interface{}{
+ "c": "nothing",
+ },
+ r: []interface{}{
+ nil, nil, nil, nil,
+ },
+ }, {
+ m: map[string]interface{}{
+ "a": 12,
+ "c": "nothing",
+ },
+ r: []interface{}{
+ nil, int64(36), nil, nil,
+ },
+ },
+ }
+ sqls := []string{
+ "select a + b as t from src",
+ "select a * 3 as t from src",
+ "select a / b as t from src",
+ "select a % b as t from src",
+ }
+ var projects []Expr
+ for _, sql := range sqls {
+ stmt, _ := NewParser(strings.NewReader(sql)).Parse()
+ projects = append(projects, stmt.Fields[0].Expr)
+ }
+
+ fmt.Printf("The test bucket size is %d.\n\n", len(data)*len(sqls))
+ for i, tt := range data {
+ for j, c := range projects {
+ tuple := &Tuple{Emitter: "src", Message: tt.m, Timestamp: common.GetNowInMilli(), Metadata: nil}
+ ve := &ValuerEval{Valuer: MultiValuer(tuple, &FunctionValuer{})}
+ result := ve.Eval(c)
+ if !reflect.DeepEqual(tt.r[j], result) {
+ t.Errorf("%d-%d. \nstmt mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, j, tt.r[j], result)
+ }
+ }
+ }
+}
diff --git a/xsql/xsql_stream_test.go b/xsql/xsql_stream_test.go
index 797417ce94..ed67a69e41 100644
--- a/xsql/xsql_stream_test.go
+++ b/xsql/xsql_stream_test.go
@@ -153,8 +153,28 @@ func TestParser_ParseCreateStream(t *testing.T) {
s: `CREATE STREAM demo (
) WITH (DATASOURCE="users", FORMAT="JSON", KEY="USERID");`,
- stmt: nil,
- err: `found ")", expect stream field name.`,
+ stmt: &StreamStmt{
+ Name: StreamName("demo"),
+ StreamFields: nil,
+ Options: map[string]string{
+ "DATASOURCE": "users",
+ "FORMAT": "JSON",
+ "KEY": "USERID",
+ },
+ },
+ },
+
+ {
+ s: `CREATE STREAM demo() WITH (DATASOURCE="users", FORMAT="JSON", KEY="USERID");`,
+ stmt: &StreamStmt{
+ Name: StreamName("demo"),
+ StreamFields: nil,
+ Options: map[string]string{
+ "DATASOURCE": "users",
+ "FORMAT": "JSON",
+ "KEY": "USERID",
+ },
+ },
},
{
diff --git a/xstream/extensions/edgex_source.go b/xstream/extensions/edgex_source.go
new file mode 100644
index 0000000000..4392d58859
--- /dev/null
+++ b/xstream/extensions/edgex_source.go
@@ -0,0 +1,240 @@
+// +build edgex
+
+package extensions
+
+import (
+ "context"
+ "fmt"
+ "github.com/edgexfoundry/go-mod-core-contracts/clients"
+ "github.com/edgexfoundry/go-mod-core-contracts/clients/coredata"
+ "github.com/edgexfoundry/go-mod-core-contracts/clients/urlclient/local"
+ "github.com/edgexfoundry/go-mod-core-contracts/models"
+ "github.com/edgexfoundry/go-mod-messaging/messaging"
+ "github.com/edgexfoundry/go-mod-messaging/pkg/types"
+ "github.com/emqx/kuiper/common"
+ "github.com/emqx/kuiper/xstream/api"
+ "strconv"
+ "strings"
+)
+
+type EdgexSource struct {
+ client messaging.MessageClient
+ subscribed bool
+ vdc coredata.ValueDescriptorClient
+ topic string
+ valueDescs map[string]string
+}
+
+func (es *EdgexSource) Configure(device string, props map[string]interface{}) error {
+ var protocol = "tcp";
+ if p, ok := props["protocol"]; ok {
+ protocol = p.(string)
+ }
+ var server = "localhost";
+ if s, ok := props["server"]; ok {
+ server = s.(string)
+ }
+ var port = 5570
+ if p, ok := props["port"]; ok {
+ port = p.(int)
+ }
+
+ if tpc, ok := props["topic"]; ok {
+ es.topic = tpc.(string)
+ }
+
+ var mbusType = messaging.ZeroMQ
+ if t, ok := props["type"]; ok {
+ mbusType = t.(string)
+ }
+
+ if messaging.ZeroMQ != strings.ToLower(mbusType) {
+ mbusType = messaging.MQTT
+ }
+
+ if serviceServer, ok := props["serviceServer"]; ok {
+ svr := serviceServer.(string) + clients.ApiValueDescriptorRoute
+ common.Log.Infof("Connect to value descriptor service at: %s \n", svr)
+ es.vdc = coredata.NewValueDescriptorClient(local.New(svr))
+ es.valueDescs = make(map[string]string)
+ } else {
+ return fmt.Errorf("The service server cannot be empty.")
+ }
+
+ mbconf := types.MessageBusConfig{SubscribeHost: types.HostInfo{Protocol: protocol, Host: server, Port: port}, Type: messaging.ZeroMQ}
+ common.Log.Infof("Use configuration for edgex messagebus %v\n", mbconf)
+
+ var optional = make(map[string]string)
+ if ops, ok := props["optional"]; ok {
+ if ops1, ok1 := ops.(map[interface{}]interface{}); ok1 {
+ for k, v := range ops1 {
+ k1 := k.(string)
+ v1 := v.(string)
+ optional[k1] = v1
+ }
+ }
+ mbconf.Optional = optional
+ }
+
+ if client, err := messaging.NewMessageClient(mbconf); err != nil {
+ return err
+ } else {
+ es.client = client
+ return nil
+ }
+
+}
+
+func (es *EdgexSource) Open(ctx api.StreamContext, consumer chan<- api.SourceTuple, errCh chan<- error) {
+ log := ctx.GetLogger()
+ if err := es.client.Connect(); err != nil {
+ errCh <- fmt.Errorf("Failed to connect to edgex message bus: " + err.Error())
+ }
+ log.Infof("The connection to edgex messagebus is established successfully.")
+ messages := make(chan types.MessageEnvelope)
+ topics := []types.TopicChannel{{Topic: es.topic, Messages: messages}}
+ err := make(chan error)
+ if e := es.client.Subscribe(topics, err); e != nil {
+ log.Errorf("Failed to subscribe to edgex messagebus topic %s.\n", e)
+ errCh <- e
+ } else {
+ es.subscribed = true
+ log.Infof("Successfully subscribed to edgex messagebus topic %s.", es.topic)
+ for {
+ select {
+ case e1 := <-err:
+ errCh <- e1
+ return
+ case env := <-messages:
+ if strings.ToLower(env.ContentType) == "application/json" {
+ e := models.Event{}
+ if err := e.UnmarshalJSON(env.Payload); err != nil {
+ log.Warnf("payload %s unmarshal fail: %v", env.Payload, err)
+ } else {
+ result := make(map[string]interface{})
+ meta := make(map[string]interface{})
+
+ log.Debugf("receive message from device %s", e.Device)
+ for _, r := range e.Readings {
+ if r.Name != "" {
+ if v, err := es.getValue(r, log); err != nil {
+ log.Warnf("fail to get value for %s: %v", r.Name, err)
+ } else {
+ result[strings.ToLower(r.Name)] = v
+ }
+ r_meta := map[string]interface{}{}
+ r_meta["id"] = r.Id
+ r_meta["created"] = r.Created
+ r_meta["modified"] = r.Modified
+ r_meta["origin"] = r.Origin
+ r_meta["pushed"] = r.Pushed
+ r_meta["device"] = r.Device
+ meta[strings.ToLower(r.Name)] = r_meta
+ } else {
+ log.Warnf("The name of readings should not be empty!")
+ }
+ }
+ if len(result) > 0 {
+ meta["id"] = e.ID
+ meta["pushed"] = e.Pushed
+ meta["device"] = e.Device
+ meta["created"] = e.Created
+ meta["modified"] = e.Modified
+ meta["origin"] = e.Origin
+ meta["correlationid"] = env.CorrelationID
+
+ select {
+ case consumer <- api.NewDefaultSourceTuple(result, meta):
+ log.Debugf("send data to device node")
+ case <-ctx.Done():
+ return
+ }
+ } else {
+ log.Warnf("got an empty result, ignored")
+ }
+ }
+ } else {
+ log.Errorf("Unsupported data type %s.", env.ContentType)
+ }
+ }
+ }
+ }
+}
+
+func (es *EdgexSource) getValue(r models.Reading, logger api.Logger) (interface{}, error) {
+ t, err := es.getType(r.Name, logger)
+ if err != nil {
+ return nil, err
+ }
+ t = strings.ToUpper(t)
+ logger.Debugf("name %s with type %s", r.Name, t)
+ v := r.Value
+ switch t {
+ case "BOOL":
+ if r, err := strconv.ParseBool(v); err != nil {
+ return nil, err
+ } else {
+ return r, nil
+ }
+ case "INT8", "INT16", "INT32", "INT64", "UINT8", "UINT16", "UINT32", "UINT64":
+ if r, err := strconv.Atoi(v); err != nil {
+ return nil, err
+ } else {
+ return r, nil
+ }
+ case "FLOAT32", "FLOAT64":
+ if r, err := strconv.ParseFloat(v, 64); err != nil {
+ return nil, err
+ } else {
+ return r, nil
+ }
+ case "STRING":
+ return v, nil
+ default:
+ logger.Warnf("unknown type %s return the string value", t)
+ return v, nil
+ }
+}
+
+func (es *EdgexSource) fetchAllDataDescriptors() error {
+ if vdArr, err := es.vdc.ValueDescriptors(context.Background()); err != nil {
+ return err
+ } else {
+ for _, vd := range vdArr {
+ es.valueDescs[vd.Name] = vd.Type
+ }
+ if len(vdArr) == 0 {
+ common.Log.Infof("Cannot find any value descriptors from value descriptor services.")
+ } else {
+ common.Log.Infof("Get %d of value descriptors from service.", len(vdArr))
+ for i, v := range vdArr {
+ common.Log.Debugf("%d: %s - %s ", i, v.Name, v.Type)
+ }
+ }
+ }
+ return nil
+}
+
+func (es *EdgexSource) getType(id string, logger api.Logger) (string, error) {
+ if t, ok := es.valueDescs[id]; ok {
+ return t, nil
+ } else {
+ if e := es.fetchAllDataDescriptors(); e != nil {
+ return "", e
+ }
+ if t, ok := es.valueDescs[id]; ok {
+ return t, nil
+ } else {
+ return "", fmt.Errorf("cannot find type info for %s in value descriptor.", id)
+ }
+ }
+}
+
+func (es *EdgexSource) Close(ctx api.StreamContext) error {
+ if es.subscribed {
+ if e := es.client.Disconnect(); e != nil {
+ return e
+ }
+ }
+ return nil
+}
\ No newline at end of file
diff --git a/xstream/extensions/edgex_source_test.go b/xstream/extensions/edgex_source_test.go
new file mode 100644
index 0000000000..caeab9f837
--- /dev/null
+++ b/xstream/extensions/edgex_source_test.go
@@ -0,0 +1,158 @@
+// +build edgex
+
+package extensions
+
+import (
+ "fmt"
+ "github.com/edgexfoundry/go-mod-core-contracts/models"
+ "github.com/emqx/kuiper/common"
+ "testing"
+)
+
+var es = EdgexSource{valueDescs: map[string]string{
+ "b1" : "bool",
+ "i1" : "int8",
+ "i2" : "INT16",
+ "i3" : "INT32",
+ "i4" : "INT64",
+ "i5" : "UINT8",
+ "i6" : "UINT16",
+ "i7" : "UINT32",
+ "i8" : "UINT64",
+ "f1" : "FLOAT32",
+ "f2" : "FLOAT64",
+ "s1" : "String",
+ },
+}
+
+func TestGetValue_Int(t *testing.T) {
+ var testEvent = models.Event{Device: "test"}
+ for i := 1; i < 9; i++{
+ r1 := models.Reading{Name: fmt.Sprintf("i%d", i), Value: "1"}
+ testEvent.Readings = append(testEvent.Readings, r1)
+ }
+
+ for _, r := range testEvent.Readings {
+ if v, e := es.getValue(r, common.Log); e != nil {
+ t.Errorf("%s", e)
+ } else {
+ expectOne(t, v)
+ }
+ }
+}
+
+func expectOne(t *testing.T, expected interface{}) {
+ if v1, ok := expected.(int); ok {
+ if v1 != 1 {
+ t.Errorf("expected 1, but it's %d.", v1)
+ }
+ } else {
+ t.Errorf("expected int type, but it's %t.", expected)
+ }
+}
+
+func TestGetValue_Float(t *testing.T) {
+ var testEvent = models.Event{Device: "test"}
+ for i := 1; i < 3; i++{
+ r1 := models.Reading{Name: fmt.Sprintf("f%d", i), Value: "3.14"}
+ testEvent.Readings = append(testEvent.Readings, r1)
+ }
+
+ for _, r := range testEvent.Readings {
+ if v, e := es.getValue(r, common.Log); e != nil {
+ t.Errorf("%s", e)
+ } else {
+ expectPi(t, v)
+ }
+ }
+}
+
+func expectPi(t *testing.T, expected interface{}) {
+ if v1, ok := expected.(float64); ok {
+ if v1 != 3.14 {
+ t.Errorf("expected 3.14, but it's %f.", v1)
+ }
+ } else {
+ t.Errorf("expected float type, but it's %t.", expected)
+ }
+}
+
+
+func TestGetValue_Bool(t *testing.T) {
+ ///////////True
+ trues := []string{"1", "t", "T", "true", "TRUE", "True"}
+ for _, v := range trues {
+ r1 := models.Reading{Name: "b1", Value: v}
+ if v, e := es.getValue(r1, common.Log); e != nil {
+ t.Errorf("%s", e)
+ } else {
+ expectTrue(t, v)
+ }
+ }
+
+ r1 := models.Reading{Name: "b1", Value: "TRue"}
+ if _, e := es.getValue(r1, common.Log); e == nil {
+ t.Errorf("%s", e)
+ }
+
+ ///////////False
+ falses := []string{"0", "f", "F", "false", "FALSE", "False"}
+ for _, v := range falses {
+ r1 := models.Reading{Name: "b1", Value: v}
+ if v, e := es.getValue(r1, common.Log); e != nil {
+ t.Errorf("%s", e)
+ } else {
+ expectFalse(t, v)
+ }
+ }
+
+ r1 = models.Reading{Name: "b1", Value: "FAlse"}
+ if _, e := es.getValue(r1, common.Log); e == nil {
+ t.Errorf("%s", e)
+ }
+}
+
+func expectTrue(t *testing.T, expected interface{}) {
+ if v1, ok := expected.(bool); ok {
+ if !v1 {
+ t.Errorf("expected true, but it's false.")
+ }
+ } else {
+ t.Errorf("expected boolean type, but it's %t.", expected)
+ }
+}
+
+func expectFalse(t *testing.T, expected interface{}) {
+ if v1, ok := expected.(bool); ok {
+ if v1 {
+ t.Errorf("expected false, but it's true.")
+ }
+ } else {
+ t.Errorf("expected boolean type, but it's %t.", expected)
+ }
+}
+
+func TestWrongType(t *testing.T) {
+ es1 := EdgexSource{valueDescs: map[string]string{
+ "f": "FLOAT", //A not exsited type
+ },
+ }
+ r1 := models.Reading{Name: "f", Value: "100"}
+ if v, _ := es1.getValue(r1, common.Log); v != "100" {
+ t.Errorf("Expected 100, but it's %s!", v)
+ }
+}
+
+func TestWrongValue(t *testing.T) {
+ var testEvent = models.Event{Device: "test"}
+ r1 := models.Reading{Name: "b1", Value: "100"} //100 cannot be converted to a boolean value
+ r2 := models.Reading{Name: "i1", Value: "int"} //'int' string cannot be converted to int value
+ r3 := models.Reading{Name: "f1", Value: "float"} //'float' string cannot be converted to int value
+ testEvent.Readings = append(testEvent.Readings, r1, r2, r3)
+
+ for _, v := range testEvent.Readings {
+ if _, e := es.getValue(v, common.Log); e == nil {
+ t.Errorf("Expected an error!")
+ }
+ }
+}
diff --git a/xstream/extensions/mqtt_source.go b/xstream/extensions/mqtt_source.go
index 858f8fdf8f..b0ba6c47ce 100644
--- a/xstream/extensions/mqtt_source.go
+++ b/xstream/extensions/mqtt_source.go
@@ -154,8 +154,8 @@ func subscribe(topic string, client MQTT.Client, ctx api.StreamContext, consumer
result = xsql.LowercaseKeyMap(result)
meta := make(map[string]interface{})
- meta[xsql.INTERNAL_MQTT_TOPIC_KEY] = msg.Topic()
- meta[xsql.INTERNAL_MQTT_MSG_ID_KEY] = strconv.Itoa(int(msg.MessageID()))
+ meta["topic"] = msg.Topic()
+ meta["messageid"] = strconv.Itoa(int(msg.MessageID()))
select {
case consumer <- api.NewDefaultSourceTuple(result, meta):
log.Debugf("send data to source node")
diff --git a/xstream/nodes/sink_node.go b/xstream/nodes/sink_node.go
index cc629ee409..24c9bc71d0 100644
--- a/xstream/nodes/sink_node.go
+++ b/xstream/nodes/sink_node.go
@@ -86,7 +86,7 @@ func (m *SinkNode) Open(ctx api.StreamContext, result chan<- error) {
retryInterval = t
}
}
- cacheLength := 10240
+ cacheLength := 1024
if c, ok := m.options["cacheLength"]; ok {
if t, err := common.ToInt(c); err != nil || t < 0 {
logger.Warnf("invalid type for cacheLength property, should be positive integer but found %t", c)
@@ -168,10 +168,19 @@ func doCollect(sink api.Sink, item *CacheTuple, stats StatManager, retryInterval
stats.IncTotalRecordsIn()
stats.ProcessTimeStart()
logger := ctx.GetLogger()
+ var outdata []byte
+ switch val := item.data.(type) {
+ case []byte:
+ outdata = val
+ case error:
+ outdata = []byte(fmt.Sprintf(`[{"error":"%s"}]`, val.Error()))
+ default:
+ outdata = []byte(fmt.Sprintf(`[{"error":"result is not a string but found %#v"}]`, val))
+ }
for {
- if err := sink.Collect(ctx, item.data); err != nil {
+ if err := sink.Collect(ctx, outdata); err != nil {
stats.IncTotalExceptions()
- logger.Warnf("sink node %s instance %d publish %s error: %v", ctx.GetOpId(), ctx.GetInstanceId(), item.data, err)
+ logger.Warnf("sink node %s instance %d publish %s error: %v", ctx.GetOpId(), ctx.GetInstanceId(), outdata, err)
if retryInterval > 0 {
time.Sleep(time.Duration(retryInterval) * time.Millisecond)
logger.Debugf("try again")
@@ -188,7 +197,7 @@ func doCollect(sink api.Sink, item *CacheTuple, stats StatManager, retryInterval
stats.ProcessTimeEnd()
}
-func getSink(name string, action map[string]interface{}) (api.Sink, error) {
+func doGetSink(name string, action map[string]interface{}) (api.Sink, error) {
var s api.Sink
switch name {
case "log":
diff --git a/xstream/nodes/source_node.go b/xstream/nodes/source_node.go
index 4df150a52e..1089d89a29 100644
--- a/xstream/nodes/source_node.go
+++ b/xstream/nodes/source_node.go
@@ -146,7 +146,7 @@ func (m *SourceNode) reset() {
m.statManagers = nil
}
-func getSource(t string) (api.Source, error) {
+func doGetSource(t string) (api.Source, error) {
var s api.Source
var ok bool
switch t {
diff --git a/xstream/nodes/with_edgex.go b/xstream/nodes/with_edgex.go
new file mode 100644
index 0000000000..c513457691
--- /dev/null
+++ b/xstream/nodes/with_edgex.go
@@ -0,0 +1,29 @@
+// +build edgex
+
+package nodes
+
+import (
+ "github.com/emqx/kuiper/xstream/api"
+ "github.com/emqx/kuiper/xstream/extensions"
+ "github.com/emqx/kuiper/xstream/sinks"
+)
+
+func getSource(t string) (api.Source, error) {
+ if t == "edgex" {
+ return &extensions.EdgexSource{}, nil
+ }
+ return doGetSource(t)
+}
+
+
+func getSink(name string, action map[string]interface{}) (api.Sink, error) {
+ if name == "edgex" {
+ s := &sinks.EdgexMsgBusSink{}
+ if err := s.Configure(action); err != nil {
+ return nil, err
+ } else {
+ return s, nil
+ }
+ }
+ return doGetSink(name, action)
+}
\ No newline at end of file
diff --git a/xstream/nodes/without_edgex.go b/xstream/nodes/without_edgex.go
new file mode 100644
index 0000000000..0465f3200e
--- /dev/null
+++ b/xstream/nodes/without_edgex.go
@@ -0,0 +1,13 @@
+// +build !edgex
+
+package nodes
+
+import "github.com/emqx/kuiper/xstream/api"
+
+func getSource(t string) (api.Source, error) {
+ return doGetSource(t)
+}
+
+func getSink(name string, action map[string]interface{}) (api.Sink, error) {
+ return doGetSink(name, action)
+}
\ No newline at end of file
diff --git a/xstream/operators/operations.go b/xstream/operators/operations.go
index d30c66a787..615f9ce1fe 100644
--- a/xstream/operators/operations.go
+++ b/xstream/operators/operations.go
@@ -137,9 +137,9 @@ func (o *UnaryOperator) doOp(ctx api.StreamContext, errCh chan<- error) {
switch val := result.(type) {
case nil:
continue
- case error: //TODO error handling
- logger.Infoln(val)
- logger.Infoln(val.Error())
+ case error:
+ logger.Errorf("Operation %s error: %s", ctx.GetOpId(), val)
+ nodes.Broadcast(o.outputs, val, ctx)
stats.IncTotalExceptions()
continue
default:
diff --git a/xstream/operators/watermark.go b/xstream/operators/watermark.go
index bbaf311874..8fd585dcb9 100644
--- a/xstream/operators/watermark.go
+++ b/xstream/operators/watermark.go
@@ -7,6 +7,7 @@ import (
"github.com/emqx/kuiper/common"
"github.com/emqx/kuiper/xsql"
"github.com/emqx/kuiper/xstream/api"
+ "github.com/emqx/kuiper/xstream/nodes"
"math"
"sort"
"time"
@@ -204,11 +205,12 @@ func (o *WindowOperator) execEventWindow(ctx api.StreamContext, errCh chan<- err
o.statManager.IncTotalExceptions()
break
}
- if d, ok := item.(xsql.Event); !ok {
- log.Errorf("Expect xsql.Event type")
+ switch d := item.(type) {
+ case error:
+ o.statManager.IncTotalRecordsIn()
+ nodes.Broadcast(o.outputs, d, ctx)
o.statManager.IncTotalExceptions()
- break
- } else {
+ case xsql.Event:
if d.IsWatermark() {
watermarkTs := d.GetTimestamp()
windowEndTs := nextWindowEndTs
@@ -238,6 +240,10 @@ func (o *WindowOperator) execEventWindow(ctx api.StreamContext, errCh chan<- err
}
}
o.statManager.ProcessTimeEnd()
+ default:
+ o.statManager.IncTotalRecordsIn()
+ nodes.Broadcast(o.outputs, fmt.Errorf("run Window error: expect xsql.Event type but got %[1]T(%[1]v)", d), ctx)
+ o.statManager.IncTotalExceptions()
}
// is cancelling
case <-ctx.Done():
diff --git a/xstream/operators/window_op.go b/xstream/operators/window_op.go
index 830d9ad196..9a409ff818 100644
--- a/xstream/operators/window_op.go
+++ b/xstream/operators/window_op.go
@@ -141,11 +141,11 @@ func (o *WindowOperator) execProcessingWindow(ctx api.StreamContext, errCh chan<
o.statManager.IncTotalExceptions()
break
}
- if d, ok := item.(*xsql.Tuple); !ok {
- log.Errorf("Expect xsql.Tuple type")
+ switch d := item.(type) {
+ case error:
+ nodes.Broadcast(o.outputs, d, ctx)
o.statManager.IncTotalExceptions()
- break
- } else {
+ case *xsql.Tuple:
log.Debugf("Event window receive tuple %s", d.Message)
inputs = append(inputs, d)
switch o.window.Type {
@@ -162,9 +162,12 @@ func (o *WindowOperator) execProcessingWindow(ctx api.StreamContext, errCh chan<
timeout = timeoutTicker.C
}
}
+ o.statManager.ProcessTimeEnd()
+ o.statManager.SetBufferLength(int64(len(o.input)))
+ default:
+ nodes.Broadcast(o.outputs, fmt.Errorf("run Window error: expect xsql.Tuple type but got %[1]T(%[1]v)", d), ctx)
+ o.statManager.IncTotalExceptions()
}
- o.statManager.ProcessTimeEnd()
- o.statManager.SetBufferLength(int64(len(o.input)))
case now := <-c:
if len(inputs) > 0 {
o.statManager.ProcessTimeStart()
diff --git a/xstream/sinks/edgex_sink.go b/xstream/sinks/edgex_sink.go
new file mode 100644
index 0000000000..ad3eece3af
--- /dev/null
+++ b/xstream/sinks/edgex_sink.go
@@ -0,0 +1,142 @@
+// +build edgex
+
+package sinks
+
+import (
+ "fmt"
+ "github.com/edgexfoundry/go-mod-messaging/messaging"
+ "github.com/edgexfoundry/go-mod-messaging/pkg/types"
+ "github.com/emqx/kuiper/common"
+ "github.com/emqx/kuiper/xstream/api"
+)
+
+type EdgexMsgBusSink struct {
+ protocol string
+ host string
+ port int
+ ptype string
+
+ topic string
+ contentType string
+
+ optional *OptionalConf
+ client messaging.MessageClient
+}
+
+type OptionalConf struct {
+ clientid string
+ username string
+ password string
+}
+
+func (ems *EdgexMsgBusSink) Configure(ps map[string]interface{}) error {
+ ems.host = "*"
+ ems.protocol = "tcp"
+ ems.port = 5570
+ ems.contentType = "application/json"
+ ems.ptype = messaging.ZeroMQ
+
+ if host, ok := ps["host"]; ok {
+ ems.host = host.(string)
+ } else {
+ common.Log.Infof("Not find host conf, will use default value '*'.")
+ }
+
+ if pro, ok := ps["protocol"]; ok {
+ ems.protocol = pro.(string)
+ } else {
+ common.Log.Infof("Not find protocol conf, will use default value 'tcp'.")
+ }
+
+ if port, ok := ps["port"]; ok {
+ if pv, ok := port.(float64); ok {
+ ems.port = int(pv)
+ } else if pv, ok := port.(float32); ok {
+ ems.port = int(pv)
+ } else {
+ common.Log.Infof("Not valid port value, will use default value '5570'.")
+ }
+
+ } else {
+ common.Log.Infof("Not find port conf, will use default value '5570'.")
+ }
+
+ if topic, ok := ps["topic"]; ok {
+ ems.topic = topic.(string)
+ } else {
+ return fmt.Errorf("Topic must be specified.")
+ }
+
+ if contentType, ok := ps["contentType"]; ok {
+ ems.contentType = contentType.(string)
+ } else {
+ common.Log.Infof("Not find contentType conf, will use default value 'application/json'.")
+ }
+
+ if optIntf, ok := ps["optional"]; ok {
+ if opt, ok1 := optIntf.(map[string]interface{}); ok1 {
+ optional := &OptionalConf{}
+ ems.optional = optional
+ if cid, ok2 := opt["clientid"]; ok2 {
+ optional.clientid = cid.(string)
+ }
+ if uname, ok2 := opt["username"]; ok2 {
+ optional.username = uname.(string)
+ }
+ if password, ok2 := opt["password"]; ok2 {
+ optional.password = password.(string)
+ }
+ }
+ }
+ return nil
+}
+
+func (ems *EdgexMsgBusSink) Open(ctx api.StreamContext) error {
+ log := ctx.GetLogger()
+ conf := types.MessageBusConfig{
+ PublishHost: types.HostInfo{
+ Host: ems.host,
+ Port: ems.port,
+ Protocol: ems.protocol,
+ },
+ Type: ems.ptype,
+ }
+ log.Infof("Using configuration for EdgeX message bus sink: %+v", conf)
+ if msgClient, err := messaging.NewMessageClient(conf); err != nil {
+ return err
+ } else {
+ if ec := msgClient.Connect(); ec != nil {
+ return ec
+ } else {
+ ems.client = msgClient
+ }
+ }
+ return nil
+}
+
+func (ems *EdgexMsgBusSink) Collect(ctx api.StreamContext, item interface{}) error {
+ logger := ctx.GetLogger()
+ if payload, ok := item.([]byte); ok {
+ logger.Debugf("EdgeX message bus sink: %s\n", payload)
+ env := types.NewMessageEnvelope(payload, ctx)
+ env.ContentType = ems.contentType
+ if e := ems.client.Publish(env, ems.topic); e != nil {
+ logger.Errorf("Found error %s when publish to EdgeX message bus.\n", e)
+ return e
+ }
+ } else {
+ return fmt.Errorf("Unkown type %t, the message cannot be published.\n", item)
+ }
+ return nil
+}
+
+func (ems *EdgexMsgBusSink) Close(ctx api.StreamContext) error {
+ logger := ctx.GetLogger()
+ logger.Infof("Closing edgex sink")
+ if ems.client != nil {
+ if e := ems.client.Disconnect(); e != nil {
+ return e
+ }
+ }
+ return nil
+}
diff --git a/xstream/test/mock_source.go b/xstream/test/mock_source.go
index 650ddb28ba..db01f9b342 100644
--- a/xstream/test/mock_source.go
+++ b/xstream/test/mock_source.go
@@ -34,7 +34,7 @@ func (m *MockSource) Open(ctx api.StreamContext, consumer chan<- api.SourceTuple
} else {
mockClock.Add(1000 * time.Millisecond)
}
- consumer <- api.NewDefaultSourceTuple(d.Message, nil)
+ consumer <- api.NewDefaultSourceTuple(d.Message, xsql.Metadata{"topic": "mock"})
time.Sleep(1)
}
}