浏览代码

Merge pull request #204 from emqx/develop

Develop
jinfahua 5 年之前
父节点
当前提交
b2816f45bd
共有 73 个文件被更改,包括 2980 次插入296 次删除
  1. 1 1
      .github/workflows/fvt_tests.yaml
  2. 3 3
      .github/workflows/run_test_case.yaml
  3. 8 1
      Makefile
  4. 8 2
      common/data.go
  5. 0 45
      common/plugin_manager/manager.go
  6. 2 2
      deploy/chart/kuiper/Chart.yaml
  7. 6 0
      deploy/chart/kuiper/templates/StatefulSet.yaml
  8. 2 2
      deploy/chart/kuiper/values.yaml
  9. 3 1
      deploy/docker/Dockerfile
  10. 18 0
      deploy/docker/Dockerfile-dev
  11. 21 2
      deploy/docker/README.md
  12. 1 1
      deploy/docker/docker-entrypoint.sh
  13. 1 0
      docs/en_US/cli/overview.md
  14. 94 0
      docs/en_US/cli/plugins.md
  15. 4 4
      docs/en_US/cli/rules.md
  16. 69 48
      docs/en_US/edgex/edgex_rule_engine_tutorial.md
  17. 12 1
      docs/en_US/extension/overview.md
  18. 1 0
      docs/en_US/restapi/overview.md
  19. 80 0
      docs/en_US/restapi/plugins.md
  20. 1 0
      docs/en_US/rules/overview.md
  21. 103 5
      docs/en_US/rules/sinks/edgex.md
  22. 14 12
      docs/en_US/rules/sinks/mqtt.md
  23. 9 13
      docs/en_US/rules/sources/edgex.md
  24. 二进制
      docs/zh_CN/edgex/arch_dark.png
  25. 二进制
      docs/zh_CN/edgex/arch_light.png
  26. 二进制
      docs/zh_CN/edgex/bus_data.png
  27. 二进制
      docs/zh_CN/edgex/create_stream.png
  28. 87 0
      docs/zh_CN/edgex/edgex_meta.md
  29. 268 0
      docs/zh_CN/edgex/edgex_rule_engine_tutorial.md
  30. 二进制
      docs/zh_CN/edgex/sql.png
  31. 70 69
      docs/zh_CN/rules/overview.md
  32. 127 0
      docs/zh_CN/rules/sinks/edgex.md
  33. 108 0
      docs/zh_CN/rules/sources/edgex.md
  34. 2 2
      etc/sources/edgex.yaml
  35. 1 1
      fvt_scripts/edgex/pub.go
  36. 1 1
      fvt_scripts/edgex/valuedesc/vd_server.go
  37. 15 5
      fvt_scripts/edgex_sink_rule.jmx
  38. 2 2
      go.mod
  39. 417 0
      plugins/manager.go
  40. 235 0
      plugins/manager_test.go
  41. 47 0
      plugins/plugins.http
  42. 二进制
      plugins/testzips/functions/echo2.zip
  43. 二进制
      plugins/testzips/functions/zipMissSo.zip
  44. 二进制
      plugins/testzips/sinks/file2.zip
  45. 二进制
      plugins/testzips/sinks/zipWrongName.zip
  46. 二进制
      plugins/testzips/sources/random2.zip
  47. 二进制
      plugins/testzips/sources/random3.zip
  48. 二进制
      plugins/testzips/sources/zipMissConf.zip
  49. 11 2
      xsql/ast.go
  50. 2 2
      xsql/funcs_aggregate.go
  51. 2 2
      xsql/funcs_ast_validator.go
  52. 13 1
      xsql/funcs_misc.go
  53. 4 4
      xsql/functions.go
  54. 5 1
      xsql/parser.go
  55. 70 3
      xsql/plans/misc_func_test.go
  56. 51 14
      xsql/plans/preprocessor.go
  57. 204 2
      xsql/plans/preprocessor_test.go
  58. 19 0
      xsql/plans/project_test.go
  59. 2 0
      xsql/processors/extension_test.go
  60. 3 3
      xsql/processors/xsql_processor_test.go
  61. 186 20
      xstream/cli/main.go
  62. 1 1
      xstream/extensions/edgex_source.go
  63. 17 5
      xstream/nodes/sink_node.go
  64. 2 2
      xstream/nodes/source_node.go
  65. 1 1
      xstream/server/main.go
  66. 107 0
      xstream/server/server/rest.go
  67. 90 1
      xstream/server/server/rpc.go
  68. 6 0
      xstream/server/server/server.go
  69. 123 5
      xstream/sinks/edgex_sink.go
  70. 197 0
      xstream/sinks/edgex_sink_test.go
  71. 11 1
      xstream/sinks/mqtt_sink.go
  72. 3 3
      xstream/sinks/rest_sink.go
  73. 9 0
      xstream/util_test.go

+ 1 - 1
.github/workflows/fvt_tests.yaml

@@ -221,7 +221,7 @@ jobs:
             sleep 10
           done
           kuiper_address=$(kubectl get svc --namespace default kuiper -o jsonpath="{.spec.clusterIP}")
-          if [ $(curl -w %{http_code} -fsSL -o /dev/null $kuiper_address:9081/rules) != 200 ];then exit 1; fi
+          if [ $(curl -w %{http_code} -fsSL -o /dev/null $kuiper_address:9081) != 200 ];then exit 1; fi
       - name: check kuiper
         env:
           KUBECONFIG: "/etc/rancher/k3s/k3s.yaml"

+ 3 - 3
.github/workflows/run_test_case.yaml

@@ -22,10 +22,10 @@ jobs:
         - name: run test case
           run: |
             mkdir -p data
-            go build --buildmode=plugin -o plugins/sources/Random.so plugins/sources/random.go
-            go build --buildmode=plugin -o plugins/sinks/File.so plugins/sinks/file.go
+            go build --buildmode=plugin -o plugins/sources/Random@v1.0.0.so plugins/sources/random.go
+            go build --buildmode=plugin -o plugins/sinks/File@v1.0.0.so plugins/sinks/file.go
             go build --buildmode=plugin -o plugins/functions/Echo.so plugins/functions/echo.go
-            go build --buildmode=plugin -o plugins/functions/CountPlusOne.so plugins/functions/countPlusOne.go
+            go build --buildmode=plugin -o plugins/functions/CountPlusOne@v1.0.0.so plugins/functions/countPlusOne.go
             go test ./...
             go test --tags=edgex ./...
     

+ 8 - 1
Makefile

@@ -72,7 +72,7 @@ build_with_edgex: build_prepare
 	@mv ./cli ./server $(BUILD_PATH)/$(PACKAGE_NAME)/bin
 	@echo "Build successfully"
 
-.PHONY: pkg_whit_edgex
+.PHONY: pkg_with_edgex
 pkg_whit_edgex: build_with_edgex 
 	@make real_pkg
 
@@ -114,6 +114,7 @@ cross_build: cross_prepare
 .PHONY: docker
 docker:
 	docker build --no-cache -t $(TARGET):$(VERSION) -f deploy/docker/Dockerfile .
+	docker build --no-cache -t $(TARGET):$(VERSION)-dev -f deploy/docker/Dockerfile-dev .
 
 .PHONY:cross_docker
 cross_docker: cross_prepare
@@ -123,6 +124,12 @@ cross_docker: cross_prepare
 	-f deploy/docker/Dockerfile . \
 	--push
 
+	docker buildx build --no-cache \
+	--platform=linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/ppc64le \
+	-t $(TARGET):$(VERSION)-dev \
+	-f deploy/docker/Dockerfile-dev . \
+	--push
+
 .PHONY: clean
 clean:
 	@rm -rf cross_build.tar linux_amd64 linux_arm64 linux_arm_v7 linux_ppc64le linux_386

+ 8 - 2
common/data.go

@@ -1,5 +1,11 @@
 package common
 
-type Rule struct {
+type RuleDesc struct {
 	Name, Json string
-}
+}
+
+type PluginDesc struct {
+	RuleDesc
+	Type int
+	Stop bool
+}

+ 0 - 45
common/plugin_manager/manager.go

@@ -1,45 +0,0 @@
-package plugin_manager
-
-import (
-	"fmt"
-	"github.com/emqx/kuiper/common"
-	"path"
-	"plugin"
-	"unicode"
-)
-
-var registry map[string]plugin.Symbol
-
-func init() {
-	registry = make(map[string]plugin.Symbol)
-}
-
-func GetPlugin(t string, ptype string) (plugin.Symbol, error) {
-	t = ucFirst(t)
-	key := ptype + "/" + t
-	var nf plugin.Symbol
-	nf, ok := registry[key]
-	if !ok {
-		loc, err := common.GetLoc("/plugins/")
-		if err != nil {
-			return nil, fmt.Errorf("cannot find the plugins folder")
-		}
-		mod := path.Join(loc, ptype, t+".so")
-		plug, err := plugin.Open(mod)
-		if err != nil {
-			return nil, fmt.Errorf("cannot open %s: %v", mod, err)
-		}
-		nf, err = plug.Lookup(t)
-		if err != nil {
-			return nil, fmt.Errorf("cannot find symbol %s, please check if it is exported", t)
-		}
-	}
-	return nf, nil
-}
-
-func ucFirst(str string) string {
-	for i, v := range str {
-		return string(unicode.ToUpper(v)) + str[i+1:]
-	}
-	return ""
-}

+ 2 - 2
deploy/chart/kuiper/Chart.yaml

@@ -14,8 +14,8 @@ type: application
 
 # This is the chart version. This version number should be incremented each time you make changes
 # to the chart and its templates, including the app version.
-version: 0.2.1
+version: 0.3.0
 
 # This is the version number of the application being deployed. This version number should be
 # incremented each time you make changes to the application.
-appVersion: 0.2.1
+appVersion: 0.3.0

+ 6 - 0
deploy/chart/kuiper/templates/StatefulSet.yaml

@@ -105,6 +105,12 @@ spec:
           - name: kuiper-config
             mountPath: "/kuiper/etc/sources/zmq.yaml"
             subPath: "zmq.yaml"
+          readinessProbe:
+            httpGet:
+              {{ $restPort := index .Values "kuiperConfig" "kuiper.yaml" "basic" "restPort" }}
+              port: {{ $restPort | default 9081 }}
+            initialDelaySeconds: 5
+            periodSeconds: 5
           {{ $certificationSecretName := index .Values "kuiperConfig" "mqtt_source.yaml" "default" "certificationSecretName" }}
           {{- if $certificationSecretName }}
           - name: kuiper-certification

+ 2 - 2
deploy/chart/kuiper/values.yaml

@@ -76,9 +76,9 @@ kuiperConfig:
     default:
       protocol: tcp
       server: localhost
-      port: 5570
+      port: 5563
       topic: events
-      serviceServer: http://localhost:10080
+      serviceServer: http://localhost:48080
     #  optional:
     #    ClientId: client1
     #    Username: user1

+ 3 - 1
deploy/docker/Dockerfile

@@ -15,7 +15,9 @@ RUN apk add sed libzmq
 
 WORKDIR /kuiper
 
-EXPOSE 9801 20498
+EXPOSE 9081 20498
+
+ENV KUIPER_HOME /kuiper
 
 ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
 

+ 18 - 0
deploy/docker/Dockerfile-dev

@@ -0,0 +1,18 @@
+FROM golang:1.13.4-alpine AS builder
+
+COPY . /go/kuiper
+
+WORKDIR /go/kuiper
+
+RUN apk add vim upx gcc make git sed libc-dev binutils-gold pkgconfig zeromq-dev libzmq \
+    && make build_with_edgex \
+    && ln -s /go/kuiper/_build/kuiper-$(git describe --tags --always)-$(uname -s | tr "[A-Z]" "[a-z]")-$(uname -m) /usr/local/kuiper \
+    && ln -s /go/kuiper/deploy/docker/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
+
+EXPOSE 9081 20498
+
+ENV KUIPER_HOME /usr/local/kuiper
+
+ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
+
+CMD ["sh", "-c", "cd /usr/local/kuiper && ./bin/server"]

+ 21 - 2
deploy/docker/README.md

@@ -162,9 +162,28 @@ Use the environment variable to configure `etc/sources/edgex.yaml`  on the Kuipe
 | ---------------------------| -------------------------| ------------------------- |
 | EDGEX_PROTOCOL             | tcp                      | default.protocol          |
 | EDGEX_SERVER               | localhost                | default.server            |
-| EDGEX_PORT                 | 5570                     | default.port              |
+| EDGEX_PORT                 | 5563                     | default.port              |
 | EDGEX_TOPIC                | events                   | default.topic             |
-| EDGEX_SERVICE_SERVER       | http://localhost:10080   | default.serviceServer     |
+| EDGEX_SERVICE_SERVER       | http://localhost:48080   | default.serviceServer     |
+
+All of the environment variable should be set with corresponding values that configured in file ``cmd/core-data/res/configuration.toml`` of EdgeX core-data service, as listed in below.
+
+```
+[MessageQueue]
+Protocol = 'tcp'
+Host = '*'
+Port = 5563
+Type = 'zero'
+Topic = 'events'
+```
+
+```
+[Service]
+...
+Host = 'localhost'
+Port = 48080
+...
+```
 
 If you want to configure more options, you can mount the configuration file into Kuiper container, like this:
 ```

+ 1 - 1
deploy/docker/docker-entrypoint.sh

@@ -5,7 +5,7 @@ else
     set -e
 fi
 
-KUIPER_HOME="/kuiper"
+KUIPER_HOME=${KUIPER_HOME:-"/kuiper"}
 
 CONFIG="$KUIPER_HOME/etc/mqtt_source.yaml"
 

+ 1 - 0
docs/en_US/cli/overview.md

@@ -7,4 +7,5 @@ The Kuiper CLI acts as a client to the Kuiper server. The Kuiper server runs the
 
 - [Streams](streams.md)
 - [Rules](rules.md)
+- [Plugins](plugins.md)
 

+ 94 - 0
docs/en_US/cli/plugins.md

@@ -0,0 +1,94 @@
+# Plugins management
+
+The Kuiper plugin command line tools allows you to manage plugins, such as create, show and drop plugins. Notice that, drop a plugin will need to restart kuiper to take effect. To update a plugin, do the following:
+1. Drop the plugin.
+2. Restart Kuiper.
+3. Create the plugin with the new configuration.
+
+## create a plugin
+
+The command is used for creating a plugin.  The plugin's definition is specified with JSON format.
+
+```shell
+create plugin $plugin_type $plugin_name $plugin_json | create plugin $plugin_type $plugin_name -f $plugin_def_file
+```
+
+The plugin can be created with two ways. 
+
+- Specify the plugin definition in command line.
+
+Sample:
+
+```shell
+# bin/cli create plugin source random {"file":"http://127.0.0.1/plugins/sources/random.zip"}
+```
+
+The command create a source plugin named ``random``. 
+
+- Specify the plugin definition in a file. If the plugin is complex, or the plugin is already wrote in text files with well organized formats, you can just specify the plugin definition through ``-f`` option.
+
+Sample:
+
+```shell
+# bin/cli create plugin sink plugin1 -f /tmp/plugin1.txt
+```
+
+Below is the contents of ``plugin1.txt``.
+
+```json
+{
+  "file":"http://127.0.0.1/plugins/sources/random.zip"
+}
+```
+### parameters
+1. plugin_type: the type of the plugin. Available values are `["source", "sink", "functions"]`
+2. plugin_name: a unique name of the plugin. The name must be the same as the camel case version of the plugin with lowercase first letter. For example, if the exported plugin name is `Random`, then the name of this plugin is `random`.
+3. file: the url of the plugin files. It must be a zip file with: a compiled so file and the yaml file(only required for sources). The name of the files must match the name of the plugin. Please check [Extension](../extension/overview.md) for the naming rule.
+
+## show plugins
+
+The command is used for displaying all plugins defined in the server for a plugin type.
+
+```shell
+show plugins function
+```
+
+Sample:
+
+```shell
+# bin/cli show plugins function
+function1
+function2
+```
+
+## describe a plugin
+The command is used to print out the detailed definition of a plugin.
+
+```shell
+describe plugin $plugin_type $plugin_name
+```
+
+Sample: 
+
+```shell
+# bin/cli describe plugin source plugin1
+{
+  "name": "plugin1",
+  "version": "1.0.0"
+}
+```
+
+## drop a plugin
+
+The command is used for drop the plugin.
+
+```shell
+drop plugin $plugin_type $plugin_name -s $stop 
+```
+In which, `-s $stop` is an optional boolean parameter. If it is set to true, the Kuiper server will be stopped for the delete to take effect. The user will need to restart it manually.
+Sample:
+
+```shell
+# bin/cli drop plugin source random
+Plugin random is dropped.
+```

+ 4 - 4
docs/en_US/cli/rules.md

@@ -105,7 +105,7 @@ Sample:
 
 ```shell
 # bin/cli drop rule rule1
-rule rule1 dropped
+Rule rule1 is dropped.
 ```
 
 ## start a rule
@@ -120,7 +120,7 @@ Sample:
 
 ```shell
 # bin/cli start rule rule1
-rule rule1 started
+Rule rule1 was started.
 ```
 
 ## stop a rule
@@ -135,7 +135,7 @@ Sample:
 
 ```shell
 # bin/cli stop rule rule1
-rule rule1 stopped
+Rule rule1 was stopped.
 ```
 
 ## restart a rule
@@ -150,7 +150,7 @@ Sample:
 
 ```shell
 # bin/cli restart rule rule1
-rule rule1 restarted
+Rule rule1 was restarted.
 ```
 
 ## get the status of a rule

+ 69 - 48
docs/en_US/edgex/edgex_rule_engine_tutorial.md

@@ -40,49 +40,50 @@ EdgeX uses [message bus](https://github.com/edgexfoundry/go-mod-messaging) to ex
 
 ## Start to use
 
-### Pull Kuiper Docker and run
+In out tutorial, we will use [Random Integer Device Service](https://github.com/edgexfoundry/device-random) which is shipped in official released EdgeX, and run rules against the data generated by this sample device service.
 
-It's **STRONGLY** recommended to use Docker, since related dependency libraries (such ZeroMQ lib) are already installed in Docker images.
+### Run EdgeX Docker instances
 
-```shell
-docker pull emqx/kuiper:0.2.1
-```
+After the EdgeX Geneva is offcially released, you can just follow steps in [this doc](https://fuji-docs.edgexfoundry.org/Ch-QuickStart.html) to start the service. But now since Kuiper has not been official released yet, you have to download Docker composer file from [here](https://github.com/edgexfoundry/developer-scripts/blob/master/releases/nightly-build/compose-files/docker-compose-nexus-mongo-no-secty.yml), and then bring up EdgeX Docker instances. 
 
-<u>TODO: After offcially releasing of EdgeX Geneva, the Kuiper docker image will be pulled automatically by EdgeX docker composer files. The command will be updated by then.</u>  
-
-**Run Docker**
+```shell
+$ wget https://github.com/edgexfoundry/developer-scripts/raw/master/releases/nightly-build/compose-files/docker-compose-nexus-mongo-no-secty.yml
 
+$ docker-compose -f ./docker-compose-nexus-redis-no-secty.yml up -d --build
 ```
-docker run -d --name kuiper emqx/kuiper:0.2.1
-```
-
-If the docker instance is failed to start, please use ``docker logs kuiper`` to see the log files.
 
-Notice 1: The default EdgeX message bus configuration could be updated when bring-up the Docker instance.  As listed in below, override the default configurations for message bus server, port and service server address for getting value descriptors in Kuiper instance.
+After all of the Docker instances are started, you can use ``docker ps`` command to verify all of services are runnings correctly.
 
 ```shell
-docker run -d --name kuiper -e EDGEX_SERVER=10.211.55.2 -e EDGEX_PORT=9999 -e EDGEX_SERVICE_SERVER=http://10.211.55.2:8888 emqx/kuiper:0.2.1
+$ docker ps
+CONTAINER ID        IMAGE                                                                  COMMAND                  CREATED             STATUS              PORTS                                                                                              NAMES
+5618c93027a9        nexus3.edgexfoundry.org:10004/docker-device-virtual-go:master          "/device-virtual --p…"   37 minutes ago      Up 37 minutes       0.0.0.0:49990->49990/tcp                                                                           edgex-device-virtual
+fabe6b9052f5        nexus3.edgexfoundry.org:10004/docker-edgex-ui-go:master                "./edgex-ui-server"      37 minutes ago      Up 37 minutes       0.0.0.0:4000->4000/tcp                                                                             edgex-ui-go
+83ef687fe546        emqx/kuiper:0.2.1                                                      "/usr/bin/docker-ent…"   37 minutes ago      Up 37 minutes       0.0.0.0:9081->9081/tcp, 0.0.0.0:20498->20498/tcp, 9801/tcp                                         edgex-kuiper
+c49b0d6f9347        nexus3.edgexfoundry.org:10004/docker-support-scheduler-go:master       "/support-scheduler …"   37 minutes ago      Up 37 minutes       0.0.0.0:48085->48085/tcp                                                                           edgex-support-scheduler
+4265dcc2bb48        nexus3.edgexfoundry.org:10004/docker-core-command-go:master            "/core-command -cp=c…"   37 minutes ago      Up 37 minutes       0.0.0.0:48082->48082/tcp                                                                           edgex-core-command
+4667160e2f41        nexus3.edgexfoundry.org:10004/docker-app-service-configurable:master   "/app-service-config…"   37 minutes ago      Up 37 minutes       48095/tcp, 0.0.0.0:48100->48100/tcp                                                                edgex-app-service-configurable-rules
+9bbfe95993f5        nexus3.edgexfoundry.org:10004/docker-core-metadata-go:master           "/core-metadata -cp=…"   37 minutes ago      Up 37 minutes       0.0.0.0:48081->48081/tcp, 48082/tcp                                                                edgex-core-metadata
+2e342a3aae81        nexus3.edgexfoundry.org:10004/docker-support-notifications-go:master   "/support-notificati…"   37 minutes ago      Up 37 minutes       0.0.0.0:48060->48060/tcp                                                                           edgex-support-notifications
+3cfc628e013a        nexus3.edgexfoundry.org:10004/docker-sys-mgmt-agent-go:master          "/sys-mgmt-agent -cp…"   37 minutes ago      Up 37 minutes       0.0.0.0:48090->48090/tcp                                                                           edgex-sys-mgmt-agent
+f69e9c4d6cc8        nexus3.edgexfoundry.org:10004/docker-core-data-go:master               "/core-data -cp=cons…"   37 minutes ago      Up 37 minutes       0.0.0.0:5563->5563/tcp, 0.0.0.0:48080->48080/tcp                                                   edgex-core-data
+9e5091928409        nexus3.edgexfoundry.org:10004/docker-support-logging-go:master         "/support-logging -c…"   37 minutes ago      Up 37 minutes       0.0.0.0:48061->48061/tcp                                                                           edgex-support-logging
+74e8668f892c        redis:5.0.7-alpine                                                     "docker-entrypoint.s…"   37 minutes ago      Up 37 minutes       0.0.0.0:6379->6379/tcp                                                                             edgex-redis
+9b341bb217f9        consul:1.3.1                                                           "docker-entrypoint.s…"   37 minutes ago      Up 37 minutes       0.0.0.0:8400->8400/tcp, 8300-8302/tcp, 8301-8302/udp, 8600/tcp, 8600/udp, 0.0.0.0:8500->8500/tcp   edgex-core-consul
+ed7ad5ae08b2        nexus3.edgexfoundry.org:10004/docker-edgex-volume:master               "/bin/sh -c '/usr/bi…"   37 minutes ago      Up 37 minutes                                                                                                          edgex-files
 ```
 
-For more detailed supported Docer environment varialbles, please refer to [this link](https://hub.docker.com/r/emqx/kuiper).
-
-*Notice 2: If you'd like to use Kuiper with EdgeX support seperately (without Docker), you could build Kuiper by yourself with ``make pkg_with_edgex`` command.*
-
-### Create a device service
-
-In this tutorial, we use a very simple mock-up device service. Please follow the steps in [this doc](https://fuji-docs.edgexfoundry.org/Ch-GettingStartedSDK-Go.html) to develop and run the random number service.  
-
 ### Create a stream
 
 There are two approaches to manage stream, you can use your preferred approach.
 
 #### Option 1: Use Rest API
 
-The next step is to create a stream that can consume data from EdgeX message bus. Please change ``$your_server`` to Kuiper docker instance IP address.
+The next step is to create a stream that can consume data from EdgeX message bus. Please change ``$kuiper_docker`` to Kuiper docker instance IP address.
 
 ```shell
 curl -X POST \
-  http://$your_server:9081/streams \
+  http://$kuiper_docker:9081/streams \
   -H 'Content-Type: application/json' \
   -d '{
   "sql": "create stream demo() WITH (FORMAT=\"JSON\", TYPE=\"edgex\")"
@@ -96,7 +97,7 @@ For other Rest APIs, please refer to [this doc](../restapi/overview.md).
 Run following command to enter the running Kuiper docker instance.
 
 ```shell
-docker exec -it kuiper /bin/sh
+docker exec -it edgex-kuiper /bin/sh
 ```
 
 Use following command to create a stream named ``demo``.
@@ -116,9 +117,9 @@ Now the stream is created. But you maybe curious about how Kuiper knows the mess
 default:
   protocol: tcp
   server: localhost
-  port: 5570
+  port: 5563
   topic: events
-  serviceServer: http://localhost:10080
+  serviceServer: http://localhost:48080
 .....  
 ```
 
@@ -128,17 +129,17 @@ For more detailed information of configuration file, please refer to [this doc](
 
 Let's create a rule that send result data to an MQTT broker, for detailed information of MQTT sink, please refer to [this link](../rules/sinks/mqtt.md).  Similar to create a stream, you can also choose REST or CLI to manage rules. 
 
-So the below rule will filter all of ``randomnumber`` that is less than 31. The sink result will be published to topic ``result`` of public MQTT broker ``broker.emqx.io``. 
+So the below rule will get all of values from ``event`` topic. The sink result will be published to topic ``result`` of public MQTT broker ``broker.emqx.io``. 
 
 #### Option 1: Use Rest API
 
 ```shell
 curl -X POST \
-  http://$your_server:9081/rules \
+  http://$kuiper_docker:9081/rules \
   -H 'Content-Type: application/json' \
   -d '{
   "id": "rule1",
-  "sql": "SELECT * FROM demo WHERE randomnumber > 30",
+  "sql": "SELECT * FROM demo",
   "actions": [
     {
       "mqtt": {
@@ -157,7 +158,7 @@ You can create a rule file with any text editor, and copy following contents int
 
 ```
 {
-  "sql": "SELECT * from demo where randomnumber > 30",
+  "sql": "SELECT * from demo",
   "actions": [
     {
       "mqtt": {
@@ -173,7 +174,7 @@ You can create a rule file with any text editor, and copy following contents int
 In the running Kuiper instance, and execute following command.
 
 ```shell
-# bin/cli create rule rule1 -f rule.txt
+$ bin/cli create rule rule1 -f rule.txt
 Connecting to 127.0.0.1:20498...
 Creating a new rule from file rule.txt.
 Rule rule1 was created, please use 'cli getstatus rule $rule_name' command to get rule status.
@@ -186,13 +187,23 @@ If you want to send analysis result to another sink, please refer to [other sink
 Now you can also take a look at the log file under ``log/stream.log``, see detailed info of rule. 
 
 ```
-time="2020-03-19T10:23:40+08:00" level=info msg="open source node 1 instances" rule=rule1
-time="2020-03-19T10:23:40+08:00" level=info msg="Connect to value descriptor service at: http://localhost:48080/api/v1/valuedescriptor \n"
-time="2020-03-19T10:23:40+08:00" level=info msg="Use configuration for edgex messagebus {{ 0 } {localhost 5563 tcp} zero map[]}\n"
-time="2020-03-19T10:23:40+08:00" level=info msg="Start source demo instance 0 successfully" rule=rule1
-time="2020-03-19T10:23:40+08:00" level=info msg="The connection to edgex messagebus is established successfully." rule=rule1
-time="2020-03-19T10:23:40+08:00" level=info msg="Successfully subscribed to edgex messagebus topic events." rule=rule1
-time="2020-03-19T10:23:40+08:00" level=info msg="The connection to server tcp://broker.emqx.io:1883 was established successfully" rule=rule1
+time="2020-04-07T03:33:28Z" level=info msg="db location is /kuiper/data/"
+time="2020-04-07T03:33:28Z" level=info msg="Starting rules"
+time="2020-04-07T03:33:28Z" level=info msg="Serving kuiper (version - 0.2.1) on port 20498, and restful api on port 9081. \n"
+time="2020-04-07T03:35:35Z" level=info msg="Rule rule1 is created."
+time="2020-04-07T03:35:35Z" level=info msg="Init rule with options {isEventTime: false, lateTolerance: 0, concurrency: 1, bufferLength: 1024"
+time="2020-04-07T03:35:35Z" level=info msg="Opening stream" rule=rule1
+time="2020-04-07T03:35:35Z" level=info msg="open source node demo with option map[FORMAT:JSON TYPE:edgex]" rule=rule1
+time="2020-04-07T03:35:35Z" level=info msg="open sink node 1 instances" rule=rule1
+time="2020-04-07T03:35:35Z" level=info msg="open source node 1 instances" rule=rule1
+time="2020-04-07T03:35:35Z" level=info msg="Opening mqtt sink for rule rule1." rule=rule1
+time="2020-04-07T03:35:35Z" level=info msg="Connect to value descriptor service at: http://edgex-core-data:48080/api/v1/valuedescriptor \n"
+time="2020-04-07T03:35:35Z" level=info msg="Use configuration for edgex messagebus {{ 0 } {edgex-core-data 5563 tcp} zero map[]}\n"
+time="2020-04-07T03:35:35Z" level=info msg="Start source demo instance 0 successfully" rule=rule1
+time="2020-04-07T03:35:35Z" level=info msg="The connection to edgex messagebus is established successfully." rule=rule1
+time="2020-04-07T03:35:35Z" level=info msg="Connect MQTT broker with username and password." rule=rule1
+time="2020-04-07T03:35:35Z" level=info msg="Successfully subscribed to edgex messagebus topic events." rule=rule1
+time="2020-04-07T03:35:35Z" level=info msg="The connection to server tcp://broker.emqx.io:1883 was established successfully" rule=rule1
 ```
 
 ### Monitor analysis result
@@ -200,17 +211,23 @@ time="2020-03-19T10:23:40+08:00" level=info msg="The connection to server tcp://
 Since all of the analysis result are published to  ``tcp://broker.emqx.io:1883``, so you can just use below ``mosquitto_sub`` command to monitor the result. You can also use other [MQTT client tools](https://www.emqx.io/blog/mqtt-client-tools).
 
 ```shell
-# mosquitto_sub -h broker.emqx.io -t result
-[{"randomnumber":81}]
-[{"randomnumber":87}]
-[{"randomnumber":47}]
-[{"randomnumber":59}]
-[{"randomnumber":81}]
-...
+$ mosquitto_sub -h broker.emqx.io -t result
+[{"bool":true}]
+[{"bool":false}]
+[{"bool":true}]
+[{"randomvalue_int16":3287}]
+[{"float64":8.41326e+306}]
+[{"randomvalue_int32":-1872949486}]
+[{"randomvalue_int8":-53}]
+[{"int64":-1829499332806053678}]
+[{"int32":-1560624981}]
+[{"int16":8991}]
+[{"int8":-4}]
+[{"bool":true}]
+[{"bool":false}]
+[{"float64":1.737076e+306}]
 ```
 
-You'll find that only those randomnumber larger than 30 will be published to ``result`` topic.
-
 You can also type below command to look at the rule execution status. The corresponding REST API is also available for getting rule status, please check [related document](../restapi/overview.md).
 
 ```shell
@@ -254,6 +271,10 @@ Connecting to 127.0.0.1:20498...
 
 In this tutorial,  we introduce a very simple use of EdgeX Kuiper rule engine. If having any issues regarding to use of Kuiper rule engine, you can open issues in EdgeX or Kuiper Github respository.
 
+### More Excecise 
+
+Current rule does not filter any data that are sent to Kuiper, so how to filter data?  Please [drop rule](../cli/rules.md) and change the SQL in previous rule  accordingly.  After update the rule file, and then deploy the rule again. Please monitor the result topic of MQTT broker, and please verify see if the rule works or not.
+
 #### Extended Reading
 
 - Read [EdgeX source](../rules/sources/edgex.md) for more detailed information of configurations and data type conversion.

+ 12 - 1
docs/en_US/extension/overview.md

@@ -10,11 +10,22 @@ Kuiper extensions are based on golang plugin system. The general steps to make e
 1. Create the plugin package that implements required source, sink or function interface.
 2. Compile the plugin into a _.so_ file, and put it into sources or sinks or functions folder under _plugins_ folder.
 
+Currently golang plugins are only supported on Linux and macOS which poses the same limitation for Kuiper extensions.
+
+## Naming
+
 Notice that, there are some restrictions for the names:
 1. The name of _.so_ file must be camel case with an upper case first letter. For example, _MySource.so_ or _MySink.so_.
 2. The name of the export symbol of the plugin must be camel case with an upper case first letter.
 
-Currently golang plugins are only supported on Linux and macOS which poses the same limitation for Kuiper extensions.
+### Version
+
+The user can **optionally** add a version string to the name of _.so_ to help identify the version of the plugin. The version can be then retrieved through describe CLI command or REST API. The naming convention is to add a version string to the name after _@v_. The version can be any string. Below are some typical examples.
+
+- _MySource@v1.0.0.so_ : version is 1.0.0
+- _MySource@v20200331.so_:  version is 20200331
+
+If multiple versions of plugins with the same name in place, only the latest version(ordered by the version string) will be taken effect.
 
 ## Setup the plugin developing environment
 It is required to build the plugin with exactly the same version of dependencies. And the plugin must implement interfaces exported by Kuiper, so the Kuiper project must be in the gopath. 

+ 1 - 0
docs/en_US/restapi/overview.md

@@ -4,4 +4,5 @@ By default, the REST API are running in port 9081. You can change the port in `/
 
 - [Streams](streams.md)
 - [Rules](rules.md)
+- [Plugins](plugins.md)
 

+ 80 - 0
docs/en_US/restapi/plugins.md

@@ -0,0 +1,80 @@
+# Plugins management
+
+The Kuiper REST api for plugins allows you to manage plugins, such as create, drop and list plugins. Notice that, drop a plugin will need to restart kuiper to take effect. To update a plugin, do the following:
+1. Drop the plugin.
+2. Restart Kuiper.
+3. Create the plugin with the new configuration.
+
+## create a plugin
+
+The API accepts a JSON content to create a new plugin. Each plugin type has a standalone endpoint. The supported types are `["sources", "sinks", "functions"`. The plugin is identified by the name. The name must be unique.
+```shell
+POST http://localhost:9081/plugins/sources
+POST http://localhost:9081/plugins/sinks
+POST http://localhost:9081/plugins/functions
+```
+Request Sample
+
+```json
+{
+  "name":"random",
+  "file":"http://127.0.0.1/plugins/sources/random.zip"
+}
+```
+
+### Parameters
+
+1. name: a unique name of the plugin. The name must be the same as the camel case version of the plugin with lowercase first letter. For example, if the exported plugin name is `Random`, then the name of this plugin is `random`.
+2. file: the url of the plugin files. It must be a zip file with: a compiled so file and the yaml file(only required for sources). The name of the files must match the name of the plugin. Please check [Extension](../extension/overview.md) for the naming rule.
+
+
+## show plugins
+
+The API is used for displaying all of plugins defined in the server for a plugin type.
+
+```shell
+GET http://localhost:9081/plugins/sources
+GET http://localhost:9081/plugins/sinks
+GET http://localhost:9081/plugins/functions
+```
+
+Response Sample:
+
+```json
+["plugin1","plugin2"]
+```
+
+## describe a plugin
+
+The API is used to print out the detailed definition of a plugin.
+
+```shell
+GET http://localhost:9081/plugins/sources/{name}
+GET http://localhost:9081/plugins/sinks/{name}
+GET http://localhost:9081/plugins/functions/{name}
+```
+
+Path parameter `name` is the name of the plugin.
+
+Response Sample: 
+
+```json
+{
+  "name": "plugin1",
+  "version": "1.0.0"
+}
+```
+
+## drop a plugin
+
+The API is used for drop the plugin. The kuiper server needs to be restarted to take effect.
+
+```shell
+DELETE http://localhost:8080/plugins/sources/{name}
+DELETE http://localhost:8080/plugins/sinks/{name}
+DELETE http://localhost:8080/plugins/functions/{name}
+```
+The user can pass a query parameter to decide if Kuiper should be stopped after a delete in order to make the deletion take effect. The parameter is `restart` and only when the value is `1` will the Kuiper be stopped. The user has to manually restart it.
+```shell
+DELETE http://localhost:8080/plugins/sources/{name}?restart=1
+```

+ 1 - 0
docs/en_US/rules/overview.md

@@ -64,6 +64,7 @@ Each action can define its own properties. There are 3 common properties:
 | retryInterval   | int:1000   | Specify how many milliseconds will the sink retry to send data out if the previous send failed  |
 | cacheLength     | int:10240   | Specify how many messages can be cached. The cached messages will be resent to external system until the data sent out successfully. The cached message will be sent in order except in runAsync or concurrent mode. The cached message will be saved to disk in fixed intervals.  |
 | cacheSaveInterval  | int:1000   | Specify the interval to save cached message to the disk. Notice that, if the rule is closed in plan, all the cached messages will be saved at close. A larger value can reduce the saving overhead but may lose more cache messages when the system is interrupted in error.  |
+| omitIfEmpty | bool: false | If the configuration item is set to true, when SELECT result is empty, then the result will not feed to sink operator. |
 
 Actions could be customized to support different kinds of outputs, see [extension](../extension/overview.md) for more detailed info.
 

+ 103 - 5
docs/en_US/rules/sinks/edgex.md

@@ -6,20 +6,118 @@ The action is used for publish output message into EdgeX message bus.
 | ------------- | -------- | ------------------------------------------------------------ |
 | protocol      | true     | If it's not specified, then use default value ``tcp``.       |
 | host          | true     | The host of message bus. If not specified, then use default value ``*``. |
-| port          | true     | The port of message bus. If not specified, then use default value ``5570``. |
-| topic         | false    | The topic to be published. The property must be specified.   |
+| port          | true     | The port of message bus. If not specified, then use default value ``5563``. |
+| topic         | true     | The topic to be published. If not specified, then use default value ``events``. |
 | contentType   | true     | The content type of message to be published. If not specified, then use the default value ``application/json``. |
+| metadata      | true     | The property is a field name that allows user to specify a field name of SQL  select clause,  the field name should use ``meta(*) AS xxx``  to select all of EdgeX metadata from message. |
+| deviceName    | true     | Allows user to specify the device name in the event structure that are sent from Kuiper. |
+
+## Examples
+
+### Publish result to EdgeX message bus without keeping original metadata
+In this case, the original metadata value (such as ``id, pushed, created, modified, origin`` in ``Events`` structure, and ``id, created, modified, origin, pushed, device`` in ``Reading`` structure will not be kept). Kuiper acts as another EdgeX micro service here, and it has own ``device name``. A ``deviceName`` property is provided, and allows user to specify the device name of Kuiper. Below is one example,
+
+1) Data received from EdgeX message bus ``events`` topic,
+```
+{
+  "Device": "demo", "Created": 000, …
+  "readings": 
+  [
+     {"Name": "Temperature", value: "30", "Created":123 …},
+     {"Name": "Humidity", value: "20", "Created":456 …}
+  ]
+}
+```
+2) Use following rule,  and specify ``deviceName`` with ``kuiper`` in ``edgex`` action.
+
+```json
+{
+  "id": "rule1",
+  "sql": "SELECT temperature * 3 AS t1, humidity FROM events",
+  "actions": [
+    {
+      "edgex": {
+        "protocol": "tcp",
+        "host": "*",
+        "port": 5571,
+        "topic": "application",
+        "deviceName": "kuiper",
+        "contentType": "application/json"
+      }
+    }
+  ]
+}
+```
+3) The data sent to EdgeX message bus.
+```
+{
+  "Device": "kuiper", "Created": 0, …
+  "readings": 
+  [
+     {"Name": "t1", value: "90" , "Created": 0 …},
+     {"Name": "humidity", value: "20" , "Created": 0 …}
+  ]
+}
+```
+Please notice that, 
+- The device name of ``Event`` structure is changed to ``kuiper``
+- All of metadata for ``Events and Readings`` structure will be updated with new value. ``Created`` field is updated to another value generated by Kuiper (here is ``0``).
+
+### Publish result to EdgeX message bus with keeping original metadata
+But for some scenarios, you may want to keep some of original metadata. Such as keep the device name as original value that published to Kuiper (``demo`` in the sample), and also other metadata of readings arrays. In such case, Kuiper is acting as a filter - to filter NOT concerned messages, but still keep original data.
+
+Below is an example,
+
+1) Data received from EdgeX message bus ``events`` topic,
+```
+{
+  "Device": "demo", "Created": 000, …
+  "readings": 
+  [
+     {"Name": "Temperature", value: "30", "Created":123 …},
+     {"Name": "Humidity", value: "20", "Created":456 …}
+  ]
+}
+```
+2) Use following rule,  and specify ``metadata`` with ``edgex_meta``  in ``edgex`` action.
 
-Below is sample configuration for publish result message to ``applicaton`` topic of EdgeX Message Bus.
 ```json
-	{
+{
+  "id": "rule1",
+  "sql": "SELECT meta(*) AS edgex_meta, temperature * 3 AS t1, humidity FROM events WHERE temperature > 30",
+  "actions": [
+    {
       "edgex": {
         "protocol": "tcp",
         "host": "*",
         "port": 5571,
         "topic": "application",
+        "metadata": "edgex_meta",
         "contentType": "application/json"
       }
-  }
+    }
+  ]
+}
 ```
+Please notice that,
+- User need to add ``meta(*) AS edgex_meta`` in the SQL clause, the ``meta(*)`` returns all of metadata.
+- In ``edgex`` action, value ``edgex_meta``  is specified for ``metadata`` property. This property specifies which field contains metadata of message.
 
+3) The data sent to EdgeX message bus.
+```
+{
+  "Device": "demo", "Created": 000, …
+  "readings": 
+  [
+     {"Name": "t1", value: "90" , "Created": 0 …},
+     {"Name": "humidity", value: "20", "Created":456 …}
+  ]
+}
+```
+Please notice that,
+- The metadata of ``Events`` structure is still kept, such as ``Device`` & ``Created``.
+- For the reading that can be found in original message, the metadata will be kept. Such as ``humidity`` metadata will be the ``old values`` received from EdgeX message bus.
+- For the reading that can NOT be found in original message,  the metadata will not be set.  Such as metadata of ``t1`` in the sample will fill with default value that generated by Kuiper. 
+- If your SQL has aggregated function, then it does not make sense to keep these metadata, but Kuiper will still fill with metadata from a particular message in the time window. For example, with following SQL, 
+```SELECT avg(temperature) AS temperature, meta(*) AS edgex_meta FROM ... GROUP BY TUMBLINGWINDOW(ss, 10)```. 
+In this case, there are possibly several messages in the window, the metadata value for ``temperature`` will be filled with value from 1st message that received from bus.

+ 14 - 12
docs/en_US/rules/sinks/mqtt.md

@@ -2,17 +2,18 @@
 
 The action is used for publish output message into a MQTT server. 
 
-| Property name     | Optional | Description                                                  |
-| ----------------- | -------- | ------------------------------------------------------------ |
-| server            | false    | The broker address of the mqtt server, such as ``tcp://127.0.0.1:1883`` |
-| topic             | false    | The mqtt topic, such as ``analysis/result``                  |
-| clientId          | true     | The client id for mqtt connection. If not specified, an uuid will be used |
-| protocolVersion   | true     | 3.1 (also refer as MQTT 3) or 3.1.1 (also refer as MQTT 4).  If not specified, the default value is 3.1. |
-| qos               | true     | The QoS for message delivery.                                |
-| username          | true     | The user name for the connection.                            |
-| password          | true     | The password for the connection.                             |
-| certificationPath | true     | The certification path. It can be an absolute path, or a relative path. If it is an relative path, then the base path is where you excuting the ``server`` command. For example, if you run ``bin/server`` from ``/var/kuiper``, then the base path is ``/var/kuiper``; If you run ``./server`` from ``/var/kuiper/bin``, then the base path is ``/var/kuiper/bin``. |
-| privateKeyPath    | true     | The private key path. It can be either absolute path, or relative path. For more detailed information, please refer to ``certificationPath``. |
+| Property name      | Optional | Description                                                  |
+| ------------------ | -------- | ------------------------------------------------------------ |
+| server             | false    | The broker address of the mqtt server, such as ``tcp://127.0.0.1:1883`` |
+| topic              | false    | The mqtt topic, such as ``analysis/result``                  |
+| clientId           | true     | The client id for mqtt connection. If not specified, an uuid will be used |
+| protocolVersion    | true     | 3.1 (also refer as MQTT 3) or 3.1.1 (also refer as MQTT 4).  If not specified, the default value is 3.1. |
+| qos                | true     | The QoS for message delivery.                                |
+| username           | true     | The user name for the connection.                            |
+| password           | true     | The password for the connection.                             |
+| certificationPath  | true     | The certification path. It can be an absolute path, or a relative path. If it is an relative path, then the base path is where you excuting the ``server`` command. For example, if you run ``bin/server`` from ``/var/kuiper``, then the base path is ``/var/kuiper``; If you run ``./server`` from ``/var/kuiper/bin``, then the base path is ``/var/kuiper/bin``. |
+| privateKeyPath     | true     | The private key path. It can be either absolute path, or relative path. For more detailed information, please refer to ``certificationPath``. |
+| insecureSkipVerify | true     | If InsecureSkipVerify is ``true``, TLS accepts any certificate presented by the server and any host name in that certificate.  In this mode, TLS is susceptible to man-in-the-middle attacks. The default value is ``false``. The configuration item can only be used with TLS connections. |
 
 Below is sample configuration for connecting to Azure IoT Hub by using SAS authentication.
 ```json
@@ -39,7 +40,8 @@ Below is another sample configuration for connecting to AWS IoT by using certifi
         "qos": 1,
         "clientId": "demo_001",
         "certificationPath": "keys/d3807d9fa5-certificate.pem",
-        "privateKeyPath": "keys/d3807d9fa5-private.pem.key"
+        "privateKeyPath": "keys/d3807d9fa5-private.pem.key", 
+        "insecureSkipVerify": false
       }
     }
 ```

+ 9 - 13
docs/en_US/rules/sources/edgex.md

@@ -21,7 +21,7 @@ The types defined in EdgeX value descriptors will be converted into related [dat
 
 ### Boolean
 
-If  ``Type`` value of ``ValueDescriptor`` is ``B``, ``Bool`` or ``Boolean``, then Kuiper tries to convert to ``boolean`` type. Following values will be converted into ``true``.
+If  ``Type`` value of ``ValueDescriptor`` is ``Bool``, then Kuiper tries to convert to ``boolean`` type. Following values will be converted into ``true``.
 
 - "1", "t", "T", "true", "TRUE", "True" 
 
@@ -31,19 +31,15 @@ Following will be converted into ``false``.
 
 ### Bigint
 
-If  ``Type`` value of ``ValueDescriptor`` is ``I``, ``INT``,  ``INT8`` , ``INT16``, ``INT32``,  ``INT64``,``UINT`` , ``UINT8`` , ``UINT16`` ,  ``UINT32`` , ``UINT64`` then Kuiper tries to convert to ``Bigint`` type. 
+If  ``Type`` value of ``ValueDescriptor`` is ``INT8`` , ``INT16``, ``INT32``,  ``INT64``,``UINT`` , ``UINT8`` , ``UINT16`` ,  ``UINT32`` , ``UINT64`` then Kuiper tries to convert to ``Bigint`` type. 
 
 ### Float
 
-If  ``Type`` value of ``ValueDescriptor`` is ``F``, ``FLOAT``,  ``FLOAT16`` , ``FLOAT32``, ``FLOAT64``then Kuiper tries to convert to ``Float`` type. 
+If  ``Type`` value of ``ValueDescriptor`` is ``FLOAT16`` , ``FLOAT32``, ``FLOAT64``then Kuiper tries to convert to ``Float`` type. 
 
 ### String
 
-If  ``Type`` value of ``ValueDescriptor`` is ``S``, ``String``, then Kuiper tries to convert to ``String`` type. 
-
-### Struct
-
-If  ``Type`` value of ``ValueDescriptor`` is ``J``, ``Json``, then Kuiper tries to convert to ``Struct`` type. 
+If  ``Type`` value of ``ValueDescriptor`` is ``String``, then Kuiper tries to convert to ``String`` type. 
 
 # Global configurations
 
@@ -54,9 +50,9 @@ The configuration file of EdgeX source is at ``$kuiper/etc/sources/edgex.yaml``.
 default:
   protocol: tcp
   server: localhost
-  port: 5570
+  port: 5573
   topic: events
-  serviceServer: http://localhost:10080
+  serviceServer: http://localhost:48080
 #  optional:
 #    ClientId: client1
 #    Username: user1
@@ -77,7 +73,7 @@ The server address of  EdgeX message bus, default value is ``localhost``.
 
 ## port
 
-The port of EdgeX message bus, default value is ``5570``.
+The port of EdgeX message bus, default value is ``5573``.
 
 ## topic
 
@@ -89,14 +85,14 @@ The base service address for getting value descriptors, the value of ``serviceSe
 
 ## Override the default settings
 
-In some cases, maybe you want to consume message from multiple topics or event bus.  Kuiper supports to specify another configuration, and use the ``CONF_KEY`` to specify the newly created key when you create a stream.
+In some cases, maybe you want to consume message from multiple topics from message bus.  Kuiper supports to specify another configuration, and use the ``CONF_KEY`` to specify the newly created key when you create a stream.
 
 ```yaml
 #Override the global configurations
 demo1: #Conf_key
   protocol: tcp
   server: 10.211.55.6
-  port: 5570
+  port: 5571
   topic: events
 ```
 

二进制
docs/zh_CN/edgex/arch_dark.png


二进制
docs/zh_CN/edgex/arch_light.png


二进制
docs/zh_CN/edgex/bus_data.png


二进制
docs/zh_CN/edgex/create_stream.png


+ 87 - 0
docs/zh_CN/edgex/edgex_meta.md

@@ -0,0 +1,87 @@
+# 如何使用 meta 函数抽取在 EdgeX 消息总线中发送的其它信息?
+
+当数据被发布到 EdgeX 消息总线的时候,除了真正的设备发出的值之外,还包含了一些额外的值,例如事件创建的时间,修改时间。有时在数据分析的时候需要这些值,本文描述如何使用 Kuiper 提供的函数来实现这个功能。
+
+## EdgeX 消息总线上收到的消息模型
+
+在 EdgeX 消息总线上收到的数据结构如下,一个 ``Event`` 结构体封装了相关的「元数据」(ID, Pushed, Device, Created, Modified, Origin),以及从设备服务中采集到的实际数据 (在 ``Readings`` 字段中) 。
+
+与``Event`` 类似, ``Reading`` 也包含了一些元数据 (ID, Pushed... 等)。
+
+- Event
+  - ID
+  - Pushed
+  - Device
+  - Created
+  - Modified
+  - Origin
+  - Readings
+    - reading [0]
+      - Id
+      - Pushed
+      - Created
+      - Origin
+      - Modified
+      - Device
+      - Name
+      - Value
+    - reading [1]
+      - ... // The same as in reading[0]
+      - ...
+    - reading [n] ...
+
+## Kuiper 中的 EdgeX 数据模型
+
+那么在 Kuiper 中, EdgeX 数据是如何被管理的?让我们来看个例子。
+
+如下所示,首先用户创建了一个名为 ``events`` 的 EdgeX 流定义(以黄色高亮标示)。
+
+<img src="create_stream.png" style="zoom:50%;" />
+
+其次,如下所示,一条消息被发送到消息总线。
+
+- Device name 为 ``demo``,以绿色高亮标示
+- Reading 名称为 ``temperature`` & ``Humidity`` ,用红色高亮标示
+- 这里有些 ``元数据`` 是没有必要「可见」的,但是这些值在分析的时候可能会被用到,例如``Event`` 结构体中的 ``Created`` 字段。Kuiper 将这些值保存在 Kuiper 消息中的名为 metadata 的字段中,用户在分析阶段可以获取到这些值。
+
+<img src="bus_data.png" style="zoom:50%;" />
+
+最后,提供一条 SQL 用于数据分析,此处请注意,
+
+- FROM 子句中的 ``events`` 为黄色高亮,就是在第一步中定义的流名字。
+- SELECT 中的 ``temperature`` & ``humidity`` 字段为红色高亮,它们是 readings 中的 ``Name`` 字段的值。
+- WHERE 子句中的 ``meta(device)`` 为绿色高亮,用于从 ``Events ``结构体中抽取 ``device`` 字段。该 SQL 语句将过滤所有设备名称不是 ``demo`` 的记录。
+
+<img src="sql.png" style="zoom:50%;" />
+
+以下是使用 ``meta`` 函数抽取别的元数据的一些例子。
+
+1. ``meta(created)``: 000  
+
+   从 Event 结构体中获取 'created' 元数据
+
+2. ``meta(temperature -> created)``: 123 
+
+   从 reading[0] 中获取  'created' 元数据,以 'temperature'  为 key
+
+3. ``meta(humidity -> created)``: 456 
+
+   从 reading[1] 中获取  'created' 元数据,以 'humidity' 为 key
+
+请注意,如果你想从 readings 中获取元数据,你需要使用 ``reading-name -> key`` 操作符来访问这些值。在前述例子中,``temperature`` & ``humidity``  是  ``reading-names``,并且  ``key`` 是 readings 中的字段名字。
+
+但是,如果你从 ``Events`` 中获取元数据,只需直接指定 key,如第一个例子所示。
+
+``meta`` 函数也可以用在 ``SELECT`` 子句中,以下为另外一个例子。请注意,如果在 ``SELECT`` 子句中使用了多个 ``meta`` 函数,你应该使用 ``AS`` 来指定一个别名,否则在前面的字段中的值将会被覆盖(不加别名,都有 meta 作为字段名)。
+
+```sql
+SELECT temperature,humidity, meta(id) AS eid,meta(Created) AS ec, meta(temperature->pushed) AS tpush, meta(temperature->Created) AS tcreated, meta(temperature->Origin) AS torigin, meta(Humidity->Device) AS hdevice, meta(Humidity->Modified) AS hmodified FROM demo WHERE meta(device)="demo2"
+```
+
+## 总结
+
+Kuper 的 ``meta`` 函数可以用于访问元数据,以下列出了所有在 EdgeX 的 ``Events`` 和 ``Reading`` 中支持的 key,
+
+- Events: id, pushed, device, created, modified, origin, correlationid
+- Readning: id, created, modified, origin, pushed, device
+

文件差异内容过多而无法显示
+ 268 - 0
docs/zh_CN/edgex/edgex_rule_engine_tutorial.md


二进制
docs/zh_CN/edgex/sql.png


+ 70 - 69
docs/zh_CN/rules/overview.md

@@ -1,69 +1,70 @@
-# 规则
-
-规则由JSON定义,下面是一个示例。
-
-```json
-{
-  "id": "rule1",
-  "sql": "SELECT demo.temperature, demo1.temp FROM demo left join demo1 on demo.timestamp = demo1.timestamp where demo.temperature > demo1.temp GROUP BY demo.temperature, HOPPINGWINDOW(ss, 20, 10)",
-  "actions": [
-    {
-      "log": {}
-    },
-    {
-      "mqtt": {
-        "server": "tcp://47.52.67.87:1883",
-        "topic": "demoSink"
-      }
-    }
-  ]
-}
-```
-
-创建规则需要以下3个参数。
-
-## 参数
-
-| 参数名 | 是否可选 | 说明                |
-| ------------- | -------- | ------------------------------------------------------------ |
-| id | false   | 规则id |
-| sql        | false   | 为规则运行的sql查询 |
-| actions           | false    | Sink动作数组 |
-| options           | 是       | A map of options        |
-
-## id
-
-规则的标识。 规则名称不能在同一Kuiper实例中重复。
-
-## sql
-
-为规则运行的sql查询。
-
-- Kuiper支持嵌入式MQTT源,有关更多详细信息,请参阅[MQTT source stream](sources/mqtt.md)。
-- 有关Kuiper SQL的更多信息,请参阅[SQL](../sqls/overview.md)。
-- 可以自定义来源,请参阅 [extension](../extension/overview.md)了解更多详细信息。
-
-### 动作
-
-当前,支持两种操作: [log](sinks/logs.md) 、[mqtt](sinks/mqtt.md) 和 [rest](sinks/rest.md)。 每个动作可以定义自己的属性。当前有三个公共属性:
-
-| 属性名 | 类型和默认值 | 描述                                                  |
-| ------------- | -------- | ------------------------------------------------------------ |
-| concurrency | int: 1   | 设置运行的线程数。该参数值大于1时,消息发出的顺序可能无法保证。 |
-| bufferLength | int: 1024   | 设置可缓存消息数目。若缓存消息数超过此限制,sink将阻塞消息接收,直到缓存消息被消费使得缓存消息数目小于限制为止。|
-| runAsync        | bool:false   | 设置是否异步运行输出操作以提升性能。请注意,异步运行的情况下,输出结果顺序不能保证。  |
-| retryInterval   | int:1000   | 设置信息发送失败后重试等待时间,单位为毫秒|
-| cacheLength     | int:10240   | 设置最大消息缓存数量。缓存的消息会一直保留直到消息发送成功。缓存消息将按顺序发送,除非运行在异步或者并发模式下。缓存消息会定期存储到磁盘中。  |
-| cacheSaveInterval  | int:1000   | 设置缓存存储间隔时间,单位为毫秒。需要注意的是,当规则关闭时,缓存会自动存储。该值越大,则缓存保存开销越小,但系统意外退出时缓存丢失的风险变大。 |
-
-可以自定义动作以支持不同种类的输出,有关更多详细信息,请参见 [extension](../extension/overview.md) 。
-
-### 选项
-当前的选项包括:
-
-| 选项名 | 类型和默认值 | Description                                                  |
-| ------------- | -------- | ------------------------------------------------------------ |
-| isEventTime | 布尔值:false | 使用事件时间还是将时间用作事件的时间戳。 如果使用事件时间,则将从有效负载中提取时间戳。 必须通过[stream]([extension](../sqls/streams.md))定义指定时间戳记。 |
-| lateTolerance        | int64:0   | 在使用事件时间窗口时,可能会出现元素延迟到达的情况。 LateTolerance可以指定在删除元素之前可以延迟多少时间(单位为毫秒)。 默认情况下,该值为0,表示后期元素将被删除。 |
-| concurrency | int: 1   | 一条规则运行时会根据sql语句分解成多个plan运行。该参数设置每个plan运行的线程数。该参数值大于1时,消息处理顺序可能无法保证。 |
-| bufferLength | int: 1024   | 指定每个plan可缓存消息数。若缓存消息数超过此限制,plan将阻塞消息接收,直到缓存消息被消费使得缓存消息数目小于限制为止。此选项值越大,则消息吞吐能力越强,但是内存占用也会越多。|
+# 规则
+
+规则由JSON定义,下面是一个示例。
+
+```json
+{
+  "id": "rule1",
+  "sql": "SELECT demo.temperature, demo1.temp FROM demo left join demo1 on demo.timestamp = demo1.timestamp where demo.temperature > demo1.temp GROUP BY demo.temperature, HOPPINGWINDOW(ss, 20, 10)",
+  "actions": [
+    {
+      "log": {}
+    },
+    {
+      "mqtt": {
+        "server": "tcp://47.52.67.87:1883",
+        "topic": "demoSink"
+      }
+    }
+  ]
+}
+```
+
+创建规则需要以下3个参数。
+
+## 参数
+
+| 参数名 | 是否可选 | 说明                |
+| ------------- | -------- | ------------------------------------------------------------ |
+| id | false   | 规则id |
+| sql        | false   | 为规则运行的sql查询 |
+| actions           | false    | Sink动作数组 |
+| options           | 是       | A map of options        |
+
+## id
+
+规则的标识。 规则名称不能在同一Kuiper实例中重复。
+
+## sql
+
+为规则运行的sql查询。
+
+- Kuiper支持嵌入式MQTT源,有关更多详细信息,请参阅[MQTT source stream](sources/mqtt.md)。
+- 有关Kuiper SQL的更多信息,请参阅[SQL](../sqls/overview.md)。
+- 可以自定义来源,请参阅 [extension](../extension/overview.md)了解更多详细信息。
+
+### 动作
+
+当前,支持两种操作: [log](sinks/logs.md) 、[mqtt](sinks/mqtt.md) 和 [rest](sinks/rest.md)。 每个动作可以定义自己的属性。当前有三个公共属性:
+
+| 属性名 | 类型和默认值 | 描述                                                  |
+| ------------- | -------- | ------------------------------------------------------------ |
+| concurrency | int: 1   | 设置运行的线程数。该参数值大于1时,消息发出的顺序可能无法保证。 |
+| bufferLength | int: 1024   | 设置可缓存消息数目。若缓存消息数超过此限制,sink将阻塞消息接收,直到缓存消息被消费使得缓存消息数目小于限制为止。|
+| runAsync        | bool:false   | 设置是否异步运行输出操作以提升性能。请注意,异步运行的情况下,输出结果顺序不能保证。  |
+| retryInterval   | int:1000   | 设置信息发送失败后重试等待时间,单位为毫秒|
+| cacheLength     | int:10240   | 设置最大消息缓存数量。缓存的消息会一直保留直到消息发送成功。缓存消息将按顺序发送,除非运行在异步或者并发模式下。缓存消息会定期存储到磁盘中。  |
+| cacheSaveInterval  | int:1000   | 设置缓存存储间隔时间,单位为毫秒。需要注意的是,当规则关闭时,缓存会自动存储。该值越大,则缓存保存开销越小,但系统意外退出时缓存丢失的风险变大。 |
+| omitIfEmpty | bool: false | Omit the output if the select result is empty. |
+
+可以自定义动作以支持不同种类的输出,有关更多详细信息,请参见 [extension](../extension/overview.md) 。
+
+### 选项
+当前的选项包括:
+
+| 选项名 | 类型和默认值 | Description                                                  |
+| ------------- | -------- | ------------------------------------------------------------ |
+| isEventTime | 布尔值:false | 使用事件时间还是将时间用作事件的时间戳。 如果使用事件时间,则将从有效负载中提取时间戳。 必须通过[stream]([extension](../sqls/streams.md))定义指定时间戳记。 |
+| lateTolerance        | int64:0   | 在使用事件时间窗口时,可能会出现元素延迟到达的情况。 LateTolerance可以指定在删除元素之前可以延迟多少时间(单位为毫秒)。 默认情况下,该值为0,表示后期元素将被删除。 |
+| concurrency | int: 1   | 一条规则运行时会根据sql语句分解成多个plan运行。该参数设置每个plan运行的线程数。该参数值大于1时,消息处理顺序可能无法保证。 |
+| bufferLength | int: 1024   | 指定每个plan可缓存消息数。若缓存消息数超过此限制,plan将阻塞消息接收,直到缓存消息被消费使得缓存消息数目小于限制为止。此选项值越大,则消息吞吐能力越强,但是内存占用也会越多。|

+ 127 - 0
docs/zh_CN/rules/sinks/edgex.md

@@ -0,0 +1,127 @@
+# EdgeX 消息总线目标
+
+该目标用于将消息发送到 EdgeX 消息总线上。
+
+| name        | Optional | Description                                                  |
+| ----------- | -------- | ------------------------------------------------------------ |
+| protocol    | true     | 如未指定,使用缺省值 ``tcp``.                                |
+| host        | true     | 消息总线目标主机地址,使用缺省值 ``*``.                      |
+| port        | true     | 消息总线端口号。 如未指定,使用缺省值 ``5563``.              |
+| topic       | true     | 发布的主题名称,如未指定,使用缺省值 ``events``.             |
+| contentType | true     | 发布消息的内容类型,如未指定,使用缺省值 ``application/json``. |
+| metadata    | true     | 该属性为一个字段名称,该字段是 SQL SELECT 子句的一个字段名称,这个字段应该类似于 ``meta(*) AS xxx`` ,用于选出消息中所有的 EdgeX 元数据. |
+| deviceName  | true     | 允许用户指定设备名称,该名称将作为从 Kuiper 中发送出来的 Event 结构体的设备名称. |
+
+## 例子
+
+### 发布结果到 EdgeX 消息总线,而不保留原有的元数据
+在此情况下,原有的元数据 (例如``Events`` 结构体中的 ``id, pushed, created, modified, origin``,以及``Reading`` 结构体中的  ``id, created, modified, origin, pushed, device`` 不会被保留)。Kuiper 在此情况下作为 EdgeX 的一个单独微服务,它有自己的 ``device name``。 提供了属性 ``deviceName``, 该属性允许用户指定 Kuiper 的设备名称。如下所示,
+
+1) 从 EdgeX 消息总线上的 ``events`` 主题上收到的消息,
+
+```
+{
+  "Device": "demo", "Created": 000, …
+  "readings": 
+  [
+     {"Name": "Temperature", value: "30", "Created":123 …},
+     {"Name": "Humidity", value: "20", "Created":456 …}
+  ]
+}
+```
+2) 使用如下的规则,并且在 ``edgex`` action 中给属性 ``deviceName`` 指定 ``kuiper``。
+
+```json
+{
+  "id": "rule1",
+  "sql": "SELECT temperature * 3 AS t1, humidity FROM events",
+  "actions": [
+    {
+      "edgex": {
+        "protocol": "tcp",
+        "host": "*",
+        "port": 5571,
+        "topic": "application",
+        "deviceName": "kuiper",
+        "contentType": "application/json"
+      }
+    }
+  ]
+}
+```
+3) 发送到 EdgeX 消息总线上的数据。
+
+```
+{
+  "Device": "kuiper", "Created": 0, …
+  "readings": 
+  [
+     {"Name": "t1", value: "90" , "Created": 0 …},
+     {"Name": "humidity", value: "20" , "Created": 0 …}
+  ]
+}
+```
+请注意,
+- Event 结构体中的设备名称( `` Device``)变成了 ``kuiper``
+- ``Events and Readings`` 结构体中的数据被更新为新的值. 字段 ``Created`` 被 Kuiper 更新为新的值 (这里为 ``0``).
+
+### 发布结果到 EdgeX 消息总线,并保留原有的元数据
+但是在某些场景中,你可能需要保留原来的元数据。比如保留发送到 Kuiper 的设备名称,在本例中为 ``demo``, 还有 reading 数组中的其它元数据。在此情况下,Kuiper 更像是一个过滤器 - 将不关心的数据过滤掉,但是依然保留原有的数据。
+
+参考以下的例子,
+
+1) 从 EdgeX 消息总线上的 ``events`` 主题上收到的消息,
+
+```
+{
+  "Device": "demo", "Created": 000, …
+  "readings": 
+  [
+     {"Name": "Temperature", value: "30", "Created":123 …},
+     {"Name": "Humidity", value: "20", "Created":456 …}
+  ]
+}
+```
+2) 使用如下规则,在``edgex`` action 中,为 ``metadata`` 指定值 ``edgex_meta`` 。
+
+```json
+{
+  "id": "rule1",
+  "sql": "SELECT meta(*) AS edgex_meta, temperature * 3 AS t1, humidity FROM events WHERE temperature > 30",
+  "actions": [
+    {
+      "edgex": {
+        "protocol": "tcp",
+        "host": "*",
+        "port": 5571,
+        "topic": "application",
+        "metadata": "edgex_meta",
+        "contentType": "application/json"
+      }
+    }
+  ]
+}
+```
+请注意,
+- 用户需要在 SQL 子句中加 ``meta(*) AS edgex_meta`` ,函数 ``meta(*)`` 返回所有的元数据。
+- 在 ``edgex`` action里, 属性 ``metadata`` 指定值 ``edgex_meta`` 。该属性指定哪个字段包含了元数据。
+
+3) 发送给 EdgeX 消息总线的数据
+
+```
+{
+  "Device": "demo", "Created": 000, …
+  "readings": 
+  [
+     {"Name": "t1", value: "90" , "Created": 0 …},
+     {"Name": "humidity", value: "20", "Created":456 …}
+  ]
+}
+```
+请注意,
+- ``Events`` 结构体的元数据依然保留,例如 ``Device`` & ``Created``.
+- 对于在原有消息中可以找到的 reading,元数据将继续保留。 比如 ``humidity`` 的元数据就是从 EdgeX 消息总线里接收到的``原值 - 或者说是旧值``。
+- 对于在原有消息中无法找到的 reading,元数据将不会被设置。如例子中的``t1`` 的元数据被设置为 Kuiper 产生的缺省值。
+- 如果你的 SQL 包含了聚合函数,那保留原有的元数据就没有意义,但是 Kuiper 还是会使用时间窗口中的某一条记录的元数据。例如,在下面的 SQL 里,
+```SELECT avg(temperature) AS temperature, meta(*) AS edgex_meta FROM ... GROUP BY TUMBLINGWINDOW(ss, 10)```. 
+这种情况下,在时间窗口中可能有几条数据,Kuiper 会使用窗口中的第一条数据的元数据来填充 ``temperature`` 的元数据。

+ 108 - 0
docs/zh_CN/rules/sources/edgex.md

@@ -0,0 +1,108 @@
+
+
+# EdgeX 源
+
+Kuiper 提供了内置的 EdgeX 源支持,它可以被用来订阅来自于[EdgeX 消息总线](https://github.com/edgexfoundry/go-mod-messaging)的数据,并且将数据放入 Kuiper 数据处理流水线中。
+
+## EdgeX 流定义
+
+EdgeX 在 [value descriptors](https://github.com/edgexfoundry/go-mod-core-contracts) 已经定义了数据类型,因此在 Kuiper 中建议采用 schema-less 方式的 EdgeX 流式定义,如下所示。
+
+```shell
+# cd $kuiper_base
+# bin/cli CREATE STREAM demo'() with(format="json", datasource="demo" type="edgex")'
+```
+
+EdgeX 源会试图取得某个字段的类型,
+
+- 如果在 value descriptors 中可找到其数据类型,就将其转换为对应类型;
+- 如果在 value descriptors 中可找不到到其数据类型,将保留原值;
+- 如果类型转换失败,该值将被**丢弃**,并在日志上打印一条告警消息;
+
+在 EdgeX value descriptors 中定义的数据类型,将被转换为 Kuiper 中相应支持的[数据类型](../../sqls/streams.md)。
+
+### Boolean
+
+如果 ``ValueDescriptor`` 中  ``Type`` 的值为 ``Bool`` ,那么 Kuiper 会试着将其转换为 ``boolean`` 类型,以下的值将被转化为 ``true``。
+
+- "1", "t", "T", "true", "TRUE", "True" 
+
+以下值将被转换为 ``false``。
+
+- "0", "f", "F", "false", "FALSE", "False"
+
+### Bigint
+
+如果 ``ValueDescriptor`` 中  ``Type`` 的值为 ``INT8`` , ``INT16``, ``INT32``,  ``INT64`` , ``UINT8`` , ``UINT16`` ,  ``UINT32`` , ``UINT64`` 那么 Kuiper 会试着将其转换为 ``Bigint`` 类型。 
+
+### Float
+
+如果 ``ValueDescriptor`` 中  ``Type`` 的值为 ``FLOAT32``, ``FLOAT64`` ,那么 Kuiper 会试着将其转换为 ``Float`` 类型。 
+
+### String
+
+如果 ``ValueDescriptor`` 中  ``Type`` 的值为 ``String``,那么 Kuiper 会试着将其转换为 ``String`` 类型。 
+
+# 全局配置
+
+EdgeX 源配置文件为 ``$kuiper/etc/sources/edgex.yaml``,以下配置文件内容。
+
+```yaml
+#Global Edgex configurations
+default:
+  protocol: tcp
+  server: localhost
+  port: 5573
+  topic: events
+  serviceServer: http://localhost:48080
+#  optional:
+#    ClientId: client1
+#    Username: user1
+#    Password: password
+```
+
+用户可以在此指定全局的 EdgeX 配置。在 ``default`` 部分中指定的配置将作为所有 EdgeX 源的缺省配置。
+
+## protocol
+
+连接到 EdgeX 消息总线的协议,缺省为 ``tcp``
+
+## server
+
+EdgeX 消息总线的地址,缺省为 ``localhost``
+
+## port
+
+EdgeX 消息总线的端口,缺省为 ``5573``.
+
+## topic
+
+EdgeX 消息总线上监听的主题名称,缺省为 ``events``.
+
+## serviceServer
+
+访问 value descriptors 的基础服务地址,配置项 ``serviceServer`` 的值与 ``/api/v1/valuedescriptor`` 拼接后,用于获取 EdgeX 服务器上定义的所有 value descriptors。
+
+## 重载缺省设置
+
+在某些情况下,你可能想消费来自于多个主题的数据。Kuiper 支持指定别的配置,并且在创建流定义的时候使用 ``CONF_KEY`` 来指定新的配置。
+
+```yaml
+#Override the global configurations
+demo1: #Conf_key
+  protocol: tcp
+  server: 10.211.55.6
+  port: 5571
+  topic: events
+```
+
+如果你有个特定的源需要覆盖缺省的设置,你可以定义一个自定义的配置段。在上面的例子中,我们创建了一个新的配置 ``demo1``,然后你在创建流定义的时候可以使用选项 ``CONF_KEY`` 来使用新的配置 (参考 [流定义规范](../../sqls/streams.md) 获取更多详细信息)。
+
+**例子**
+
+```
+create stream demo1() WITH (FORMAT="JSON", type="edgex", CONF_KEY="demo1");
+```
+
+在自定义的配置中,能够使用的配置项与 ``default`` 部分的是一样的,任何在自定义段中设置的值将覆盖 ``default`` 部分里的配置。
+

+ 2 - 2
etc/sources/edgex.yaml

@@ -2,9 +2,9 @@
 default:
   protocol: tcp
   server: localhost
-  port: 5570
+  port: 5563
   topic: events
-  serviceServer: http://localhost:10080
+  serviceServer: http://localhost:48080
 #  optional:
 #    ClientId: client1
 #    Username: user1

+ 1 - 1
fvt_scripts/edgex/pub.go

@@ -16,7 +16,7 @@ import (
 var msgConfig1 = types.MessageBusConfig{
 	PublishHost: types.HostInfo{
 		Host:     "*",
-		Port:     5570,
+		Port:     5563,
 		Protocol: "tcp",
 	},
 	Type:messaging.ZeroMQ,

+ 1 - 1
fvt_scripts/edgex/valuedesc/vd_server.go

@@ -33,7 +33,7 @@ var vd6 = models.ValueDescriptor{Id: "s1", Name: "s1", Formatting: "%s", Type:"S
 
 func main() {
 	http.HandleFunc(clients.ApiValueDescriptorRoute, Hello)
-	if e := http.ListenAndServe(":10080", nil); e != nil {
+	if e := http.ListenAndServe(":48080", nil); e != nil {
 		log.Fatal(e)
 	}
 }

+ 15 - 5
fvt_scripts/edgex_sink_rule.jmx

@@ -133,7 +133,7 @@
                   <boolProp name="HTTPArgument.always_encode">false</boolProp>
                   <stringProp name="Argument.value">{&#xd;
   &quot;id&quot;: &quot;rule1&quot;,&#xd;
-  &quot;sql&quot;: &quot;SELECT * FROM demo WHERE temperature = 72&quot;,&#xd;
+  &quot;sql&quot;: &quot;SELECT temperature, humidity, meta(*) AS metadt FROM demo WHERE temperature = 72&quot;,&#xd;
   &quot;actions&quot;: [&#xd;
     {&#xd;
       &quot;edgex&quot;: {&#xd;
@@ -141,6 +141,7 @@
         &quot;host&quot;: &quot;*&quot;,&#xd;
         &quot;port&quot;: 5571,&#xd;
         &quot;topic&quot;: &quot;application&quot;,&#xd;
+        &quot;metadata&quot;: &quot;metadt&quot;,&#xd;
         &quot;contentType&quot;: &quot;application/json&quot;&#xd;
       }&#xd;
     }&#xd;
@@ -416,9 +417,9 @@
           <stringProp name="SystemSampler.directory">${__property(fvt,,)}</stringProp>
         </SystemSampler>
         <hashTree>
-          <JSONPathAssertion guiclass="JSONPathAssertionGui" testclass="JSONPathAssertion" testname="temperature Assertion" enabled="true">
-            <stringProp name="JSON_PATH">$.[0].temperature</stringProp>
-            <stringProp name="EXPECTED_VALUE">72</stringProp>
+          <JSONPathAssertion guiclass="JSONPathAssertionGui" testclass="JSONPathAssertion" testname="device Assertion" enabled="true">
+            <stringProp name="JSON_PATH">$.device</stringProp>
+            <stringProp name="EXPECTED_VALUE">demo</stringProp>
             <boolProp name="JSONVALIDATION">true</boolProp>
             <boolProp name="EXPECT_NULL">false</boolProp>
             <boolProp name="INVERT">false</boolProp>
@@ -426,7 +427,7 @@
           </JSONPathAssertion>
           <hashTree/>
           <JSONPathAssertion guiclass="JSONPathAssertionGui" testclass="JSONPathAssertion" testname="humidity Assertion" enabled="true">
-            <stringProp name="JSON_PATH">$.[0].humidity</stringProp>
+            <stringProp name="JSON_PATH">$.readings[0].value</stringProp>
             <stringProp name="EXPECTED_VALUE">81</stringProp>
             <boolProp name="JSONVALIDATION">true</boolProp>
             <boolProp name="EXPECT_NULL">false</boolProp>
@@ -434,6 +435,15 @@
             <boolProp name="ISREGEX">false</boolProp>
           </JSONPathAssertion>
           <hashTree/>
+          <JSONPathAssertion guiclass="JSONPathAssertionGui" testclass="JSONPathAssertion" testname="temperature Assertion" enabled="true">
+            <stringProp name="JSON_PATH">$.readings[1].value</stringProp>
+            <stringProp name="EXPECTED_VALUE">72</stringProp>
+            <boolProp name="JSONVALIDATION">true</boolProp>
+            <boolProp name="EXPECT_NULL">false</boolProp>
+            <boolProp name="INVERT">false</boolProp>
+            <boolProp name="ISREGEX">false</boolProp>
+          </JSONPathAssertion>
+          <hashTree/>
         </hashTree>
       </hashTree>
     </hashTree>

+ 2 - 2
go.mod

@@ -4,8 +4,8 @@ require (
 	github.com/benbjohnson/clock v1.0.0
 	github.com/buger/jsonparser v0.0.0-20191004114745-ee4c978eae7e
 	github.com/eclipse/paho.mqtt.golang v1.2.0
-	github.com/edgexfoundry/go-mod-core-contracts v0.1.48
-	github.com/edgexfoundry/go-mod-messaging v0.1.14
+	github.com/edgexfoundry/go-mod-core-contracts v0.1.53
+	github.com/edgexfoundry/go-mod-messaging v0.1.16
 	github.com/go-yaml/yaml v2.1.0+incompatible
 	github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3
 	github.com/google/uuid v1.1.1

+ 417 - 0
plugins/manager.go

@@ -0,0 +1,417 @@
+package plugins
+
+import (
+	"archive/zip"
+	"errors"
+	"fmt"
+	"github.com/emqx/kuiper/common"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"os"
+	"path"
+	"path/filepath"
+	"plugin"
+	"regexp"
+	"strings"
+	"sync"
+	"time"
+	"unicode"
+)
+
+type Plugin struct {
+	Name string `json:"name"`
+	File string `json:"file"`
+}
+
+type PluginType int
+
+const (
+	SOURCE PluginType = iota
+	SINK
+	FUNCTION
+)
+
+var (
+	PluginTypes = []string{"sources", "sinks", "functions"}
+	once        sync.Once
+	singleton   *Manager
+)
+
+//Registry is append only because plugin cannot delete or reload. To delete a plugin, restart the server to reindex
+type Registry struct {
+	sync.RWMutex
+	internal []map[string]string
+}
+
+func (rr *Registry) Store(t PluginType, name string, version string) {
+	rr.Lock()
+	rr.internal[t][name] = version
+	rr.Unlock()
+}
+
+func (rr *Registry) List(t PluginType) []string {
+	rr.RLock()
+	result := rr.internal[t]
+	rr.RUnlock()
+	keys := make([]string, 0, len(result))
+	for k := range result {
+		keys = append(keys, k)
+	}
+	return keys
+}
+
+func (rr *Registry) Get(t PluginType, name string) (string, bool) {
+	rr.RLock()
+	result := rr.internal[t]
+	rr.RUnlock()
+	r, ok := result[name]
+	return r, ok
+}
+
+//func (rr *Registry) Delete(t PluginType, value string) {
+//	rr.Lock()
+//	s := rr.internal[t]
+//	for i, f := range s{
+//		if f == value{
+//			s[len(s)-1], s[i] = s[i], s[len(s)-1]
+//			rr.internal[t] = s
+//			break
+//		}
+//	}
+//	rr.Unlock()
+//}
+
+var symbolRegistry = make(map[string]plugin.Symbol)
+
+func GetPlugin(t string, pt PluginType) (plugin.Symbol, error) {
+	ut := ucFirst(t)
+	ptype := PluginTypes[pt]
+	key := ptype + "/" + t
+	var nf plugin.Symbol
+	nf, ok := symbolRegistry[key]
+	if !ok {
+		loc, err := common.GetLoc("/plugins/")
+		if err != nil {
+			return nil, fmt.Errorf("cannot find the plugins folder")
+		}
+		m, err := NewPluginManager()
+		if err != nil {
+			return nil, fmt.Errorf("fail to initialize the plugin manager")
+		}
+		soFile, err := getSoFileName(m, pt, t)
+		if err != nil {
+			return nil, fmt.Errorf("cannot get the plugin file name: %v", err)
+		}
+		mod := path.Join(loc, ptype, soFile)
+		plug, err := plugin.Open(mod)
+		if err != nil {
+			return nil, fmt.Errorf("cannot open %s: %v", mod, err)
+		}
+		nf, err = plug.Lookup(ut)
+		if err != nil {
+			return nil, fmt.Errorf("cannot find symbol %s, please check if it is exported", t)
+		}
+		symbolRegistry[key] = nf
+	}
+	return nf, nil
+}
+
+type Manager struct {
+	pluginDir string
+	etcDir    string
+	registry  *Registry
+}
+
+func NewPluginManager() (*Manager, error) {
+	var err error
+	once.Do(func() {
+		dir, err := common.GetLoc("/plugins")
+		if err != nil {
+			err = fmt.Errorf("cannot find plugins folder: %s", err)
+			return
+		}
+		etcDir, err := common.GetLoc("/etc")
+		if err != nil {
+			err = fmt.Errorf("cannot find etc folder: %s", err)
+			return
+		}
+
+		plugins := make([]map[string]string, 3)
+		for i := 0; i < 3; i++ {
+			names, err := findAll(PluginType(i), dir)
+			if err != nil {
+				err = fmt.Errorf("fail to find existing plugins: %s", err)
+				return
+			}
+			plugins[i] = names
+		}
+		registry := &Registry{internal: plugins}
+
+		singleton = &Manager{
+			pluginDir: dir,
+			etcDir:    etcDir,
+			registry:  registry,
+		}
+	})
+	return singleton, err
+}
+
+func findAll(t PluginType, pluginDir string) (result map[string]string, err error) {
+	result = make(map[string]string)
+	dir := path.Join(pluginDir, PluginTypes[t])
+	files, err := ioutil.ReadDir(dir)
+	if err != nil {
+		return
+	}
+
+	for _, file := range files {
+		baseName := filepath.Base(file.Name())
+		if strings.HasSuffix(baseName, ".so") {
+			n, v := parseName(baseName)
+			result[n] = v
+		}
+	}
+	return
+}
+
+func (m *Manager) List(t PluginType) (result []string, err error) {
+	return m.registry.List(t), nil
+}
+
+func (m *Manager) Register(t PluginType, j *Plugin) error {
+	name, uri := j.Name, j.File
+	//Validation
+	name = strings.Trim(name, " ")
+	if name == "" {
+		return fmt.Errorf("invalid name %s: should not be empty", name)
+	}
+	if !isValidUrl(uri) || !strings.HasSuffix(uri, ".zip") {
+		return fmt.Errorf("invalid uri %s", uri)
+	}
+
+	for _, n := range m.registry.List(t) {
+		if n == name {
+			return fmt.Errorf("invalid name %s: duplicate", name)
+		}
+	}
+	zipPath := path.Join(m.pluginDir, name+".zip")
+	var unzipFiles []string
+	//clean up: delete zip file and unzip files in error
+	defer os.Remove(zipPath)
+	//download
+	err := downloadFile(zipPath, uri)
+	if err != nil {
+		return fmt.Errorf("fail to download file %s: %s", uri, err)
+	}
+	//unzip and copy to destination
+	unzipFiles, version, err := m.unzipAndCopy(t, name, zipPath)
+	if err != nil {
+		if t == SOURCE && len(unzipFiles) == 1 { //source that only copy so file
+			os.Remove(unzipFiles[0])
+		}
+		return fmt.Errorf("fail to unzip file %s: %s", uri, err)
+	}
+
+	m.registry.Store(t, name, version)
+	return nil
+}
+
+func (m *Manager) Delete(t PluginType, name string, stop bool) error {
+	name = strings.Trim(name, " ")
+	if name == "" {
+		return fmt.Errorf("invalid name %s: should not be empty", name)
+	}
+	soFile, err := getSoFileName(m, t, name)
+	if err != nil {
+		return err
+	}
+	var results []string
+	paths := []string{
+		path.Join(m.pluginDir, PluginTypes[t], soFile),
+	}
+	if t == SOURCE {
+		paths = append(paths, path.Join(m.etcDir, PluginTypes[t], name+".yaml"))
+	}
+	for _, p := range paths {
+		_, err := os.Stat(p)
+		if err == nil {
+			err = os.Remove(p)
+			if err != nil {
+				results = append(results, err.Error())
+			}
+		} else {
+			results = append(results, fmt.Sprintf("can't find %s", p))
+		}
+	}
+
+	if len(results) > 0 {
+		return errors.New(strings.Join(results, "\n"))
+	} else {
+		if stop {
+			go func() {
+				time.Sleep(1 * time.Second)
+				os.Exit(100)
+			}()
+		}
+		return nil
+	}
+}
+func (m *Manager) Get(t PluginType, name string) (map[string]string, bool) {
+	v, ok := m.registry.Get(t, name)
+	if ok {
+		m := map[string]string{
+			"name":    name,
+			"version": v,
+		}
+		return m, ok
+	}
+	return nil, false
+}
+
+func getSoFileName(m *Manager, t PluginType, name string) (string, error) {
+	v, ok := m.registry.Get(t, name)
+	if !ok {
+		return "", fmt.Errorf("invalid name %s: not exist", name)
+	}
+
+	soFile := ucFirst(name) + ".so"
+	if v != "" {
+		soFile = fmt.Sprintf("%s@v%s.so", ucFirst(name), v)
+	}
+	return soFile, nil
+}
+
+func (m *Manager) unzipAndCopy(t PluginType, name string, src string) ([]string, string, error) {
+	var filenames []string
+	r, err := zip.OpenReader(src)
+	if err != nil {
+		return filenames, "", err
+	}
+	defer r.Close()
+
+	soPrefix := regexp.MustCompile(fmt.Sprintf(`^%s(@v.*)?\.so$`, ucFirst(name)))
+	var yamlFile, yamlPath, version string
+	expFiles := 1
+	if t == SOURCE {
+		yamlFile = name + ".yaml"
+		yamlPath = path.Join(m.etcDir, PluginTypes[t], yamlFile)
+		expFiles = 2
+	}
+	for _, file := range r.File {
+		fileName := file.Name
+		if yamlFile == fileName {
+			err = unzipTo(file, yamlPath)
+			if err != nil {
+				return filenames, "", err
+			}
+			filenames = append(filenames, yamlPath)
+		}
+		if soPrefix.Match([]byte(fileName)) {
+			soPath := path.Join(m.pluginDir, PluginTypes[t], fileName)
+			err = unzipTo(file, soPath)
+			if err != nil {
+				return filenames, "", err
+			}
+			filenames = append(filenames, soPath)
+			_, version = parseName(fileName)
+		}
+	}
+	if len(filenames) != expFiles {
+		return filenames, version, fmt.Errorf("invalid zip file: so file or conf file is missing")
+	}
+	return filenames, version, nil
+}
+
+func parseName(n string) (string, string) {
+	result := strings.Split(n, ".so")
+	result = strings.Split(result[0], "@v")
+	name := lcFirst(result[0])
+	if len(result) > 1 {
+		return name, result[1]
+	}
+	return name, ""
+}
+
+func unzipTo(f *zip.File, fpath string) error {
+	_, err := os.Stat(fpath)
+	if err == nil || !os.IsNotExist(err) {
+		return fmt.Errorf("%s already exist", fpath)
+	}
+
+	if f.FileInfo().IsDir() {
+		return fmt.Errorf("%s: not a file, but a directory", fpath)
+	}
+
+	if err := os.MkdirAll(filepath.Dir(fpath), os.ModePerm); err != nil {
+		return err
+	}
+
+	outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
+	if err != nil {
+		return err
+	}
+
+	rc, err := f.Open()
+	if err != nil {
+		return err
+	}
+
+	_, err = io.Copy(outFile, rc)
+
+	outFile.Close()
+	rc.Close()
+	return err
+}
+
+func isValidUrl(uri string) bool {
+	_, err := url.ParseRequestURI(uri)
+	if err != nil {
+		return false
+	}
+
+	u, err := url.Parse(uri)
+	if err != nil || u.Scheme == "" || u.Host == "" {
+		return false
+	}
+
+	return true
+}
+
+func downloadFile(filepath string, url string) error {
+	// Get the data
+	resp, err := http.Get(url)
+	if err != nil {
+		return err
+	}
+	if resp.StatusCode != http.StatusOK {
+		return fmt.Errorf("cannot download the file with status: %s", resp.Status)
+	}
+	defer resp.Body.Close()
+
+	// Create the file
+	out, err := os.Create(filepath)
+	if err != nil {
+		return err
+	}
+	defer out.Close()
+
+	// Write the body to file
+	_, err = io.Copy(out, resp.Body)
+	return err
+}
+
+func ucFirst(str string) string {
+	for i, v := range str {
+		return string(unicode.ToUpper(v)) + str[i+1:]
+	}
+	return ""
+}
+
+func lcFirst(str string) string {
+	for i, v := range str {
+		return string(unicode.ToLower(v)) + str[i+1:]
+	}
+	return ""
+}

+ 235 - 0
plugins/manager_test.go

@@ -0,0 +1,235 @@
+package plugins
+
+import (
+	"errors"
+	"fmt"
+	"net/http"
+	"net/http/httptest"
+	"os"
+	"path"
+	"reflect"
+	"sort"
+	"testing"
+)
+
+func TestManager_Register(t *testing.T) {
+	s := httptest.NewServer(
+		http.FileServer(http.Dir("testzips")),
+	)
+	defer s.Close()
+	endpoint := s.URL
+
+	data := []struct {
+		t   PluginType
+		n   string
+		u   string
+		v   string
+		err error
+	}{
+		{
+			t:   SOURCE,
+			n:   "",
+			u:   "",
+			err: errors.New("invalid name : should not be empty"),
+		}, {
+			t:   SOURCE,
+			n:   "zipMissConf",
+			u:   endpoint + "/sources/zipMissConf.zip",
+			err: errors.New("fail to unzip file " + endpoint + "/sources/zipMissConf.zip: invalid zip file: so file or conf file is missing"),
+		}, {
+			t:   SINK,
+			n:   "urlerror",
+			u:   endpoint + "/sinks/nozip",
+			err: errors.New("invalid uri " + endpoint + "/sinks/nozip"),
+		}, {
+			t:   SINK,
+			n:   "zipWrongname",
+			u:   endpoint + "/sinks/zipWrongName.zip",
+			err: errors.New("fail to unzip file " + endpoint + "/sinks/zipWrongName.zip: invalid zip file: so file or conf file is missing"),
+		}, {
+			t:   FUNCTION,
+			n:   "zipMissSo",
+			u:   endpoint + "/functions/zipMissSo.zip",
+			err: errors.New("fail to unzip file " + endpoint + "/functions/zipMissSo.zip: invalid zip file: so file or conf file is missing"),
+		}, {
+			t: SOURCE,
+			n: "random2",
+			u: endpoint + "/sources/random2.zip",
+		}, {
+			t: SOURCE,
+			n: "random3",
+			u: endpoint + "/sources/random3.zip",
+			v: "1.0.0",
+		}, {
+			t: SINK,
+			n: "file2",
+			u: endpoint + "/sinks/file2.zip",
+		}, {
+			t: FUNCTION,
+			n: "echo2",
+			u: endpoint + "/functions/echo2.zip",
+		}, {
+			t:   FUNCTION,
+			n:   "echo2",
+			u:   endpoint + "/functions/echo2.zip",
+			err: errors.New("invalid name echo2: duplicate"),
+		},
+	}
+	manager, err := NewPluginManager()
+	if err != nil {
+		t.Error(err)
+	}
+
+	fmt.Printf("The test bucket size is %d.\n\n", len(data))
+	for i, tt := range data {
+		err = manager.Register(tt.t, &Plugin{
+			Name: tt.n,
+			File: tt.u,
+		})
+		if !reflect.DeepEqual(tt.err, err) {
+			t.Errorf("%d: error mismatch:\n  exp=%s\n  got=%s\n\n", i, tt.err, err)
+		} else if tt.err == nil {
+			err := checkFile(manager.pluginDir, manager.etcDir, tt.t, tt.n, tt.v)
+			if err != nil {
+				t.Errorf("%d: error : %s\n\n", i, err)
+			}
+		}
+	}
+
+}
+
+func TestManager_List(t *testing.T) {
+	data := []struct {
+		t PluginType
+		r []string
+	}{
+		{
+			t: SOURCE,
+			r: []string{"random", "random2", "random3"},
+		}, {
+			t: SINK,
+			r: []string{"file", "file2"},
+		}, {
+			t: FUNCTION,
+			r: []string{"countPlusOne", "echo", "echo2"},
+		},
+	}
+	manager, err := NewPluginManager()
+	if err != nil {
+		t.Error(err)
+	}
+	fmt.Printf("The test bucket size is %d.\n\n", len(data))
+
+	for i, p := range data {
+		result, err := manager.List(p.t)
+		if err != nil {
+			t.Errorf("%d: list error : %s\n\n", i, err)
+			return
+		}
+		sort.Strings(result)
+		if !reflect.DeepEqual(p.r, result) {
+			t.Errorf("%d: result mismatch:\n  exp=%v\n  got=%v\n\n", i, p.r, result)
+		}
+	}
+}
+
+func TestManager_Desc(t *testing.T) {
+	data := []struct {
+		t PluginType
+		n string
+		r map[string]string
+	}{
+		{
+			t: SOURCE,
+			n: "random2",
+			r: map[string]string{
+				"name":    "random2",
+				"version": "",
+			},
+		}, {
+			t: SOURCE,
+			n: "random3",
+			r: map[string]string{
+				"name":    "random3",
+				"version": "1.0.0",
+			},
+		}, {
+			t: FUNCTION,
+			n: "echo2",
+			r: map[string]string{
+				"name":    "echo2",
+				"version": "",
+			},
+		},
+	}
+	manager, err := NewPluginManager()
+	if err != nil {
+		t.Error(err)
+	}
+	fmt.Printf("The test bucket size is %d.\n\n", len(data))
+
+	for i, p := range data {
+		result, ok := manager.Get(p.t, p.n)
+		if !ok {
+			t.Errorf("%d: get error : not found\n\n", i)
+			return
+		}
+		if !reflect.DeepEqual(p.r, result) {
+			t.Errorf("%d: result mismatch:\n  exp=%v\n  got=%v\n\n", i, p.r, result)
+		}
+	}
+}
+
+func TestManager_Delete(t *testing.T) {
+	data := []struct {
+		t   PluginType
+		n   string
+		err error
+	}{
+		{
+			t: SOURCE,
+			n: "random2",
+		}, {
+			t: SINK,
+			n: "file2",
+		}, {
+			t: FUNCTION,
+			n: "echo2",
+		}, {
+			t: SOURCE,
+			n: "random3",
+		},
+	}
+	manager, err := NewPluginManager()
+	if err != nil {
+		t.Error(err)
+	}
+	fmt.Printf("The test bucket size is %d.\n\n", len(data))
+
+	for i, p := range data {
+		err = manager.Delete(p.t, p.n, false)
+		if err != nil {
+			t.Errorf("%d: delete error : %s\n\n", i, err)
+		}
+	}
+}
+
+func checkFile(pluginDir string, etcDir string, t PluginType, name string, version string) error {
+	soName := ucFirst(name) + ".so"
+	if version != "" {
+		soName = fmt.Sprintf("%s@v%s.so", ucFirst(name), version)
+	}
+	soPath := path.Join(pluginDir, PluginTypes[t], soName)
+	_, err := os.Stat(soPath)
+	if err != nil {
+		return err
+	}
+	if t == SOURCE {
+		etcPath := path.Join(etcDir, PluginTypes[t], name+".yaml")
+		_, err = os.Stat(etcPath)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}

+ 47 - 0
plugins/plugins.http

@@ -0,0 +1,47 @@
+###
+
+POST http://127.0.0.1:9081/plugins/sources
+Content-Type: application/json
+
+{"name":"random3","file":"http://127.0.0.1/testzips/sources/random3.zip"}
+
+###
+GET http://127.0.0.1:9081/plugins/sources
+
+###
+GET http://127.0.0.1:9081/plugins/sources/random3
+
+###
+DELETE http://127.0.0.1:9081/plugins/sources/random3
+
+###
+POST http://127.0.0.1:9081/plugins/sinks
+Content-Type: application/json
+
+{"name":"file2","file":"http://127.0.0.1/testzips/sinks/file2.zip"}
+
+###
+GET http://127.0.0.1:9081/plugins/sinks
+
+###
+GET http://127.0.0.1:9081/plugins/sinks/file2
+
+###
+DELETE http://127.0.0.1:9081/plugins/sinks/file2?stop=1
+
+###
+POST http://127.0.0.1:9081/plugins/functions
+Content-Type: application/json
+
+{"name":"echo2","file":"http://127.0.0.1/testzips/functions/echo2.zip"}
+
+###
+GET http://127.0.0.1:9081/plugins/functions
+
+###
+GET http://127.0.0.1:9081/plugins/functions/echo2
+
+###
+DELETE http://127.0.0.1:9081/plugins/functions/echo2
+
+###

二进制
plugins/testzips/functions/echo2.zip


二进制
plugins/testzips/functions/zipMissSo.zip


二进制
plugins/testzips/sinks/file2.zip


二进制
plugins/testzips/sinks/zipWrongName.zip


二进制
plugins/testzips/sources/random2.zip


二进制
plugins/testzips/sources/random3.zip


二进制
plugins/testzips/sources/zipMissConf.zip


+ 11 - 2
xsql/ast.go

@@ -3,7 +3,7 @@ package xsql
 import (
 	"fmt"
 	"github.com/emqx/kuiper/common"
-	"github.com/emqx/kuiper/common/plugin_manager"
+	"github.com/emqx/kuiper/plugins"
 	"github.com/emqx/kuiper/xstream/api"
 	"math"
 	"reflect"
@@ -542,6 +542,9 @@ func (m Message) Value(key string) (interface{}, bool) {
 }
 
 func (m Message) Meta(key string) (interface{}, bool) {
+	if key == "*" {
+		return map[string]interface{}(m), true
+	}
 	return m.Value(key)
 }
 
@@ -558,6 +561,9 @@ func (m Metadata) Value(key string) (interface{}, bool) {
 }
 
 func (m Metadata) Meta(key string) (interface{}, bool) {
+	if key == "*" {
+		return map[string]interface{}(m), true
+	}
 	msg := Message(m)
 	return msg.Meta(key)
 }
@@ -574,6 +580,9 @@ func (t *Tuple) Value(key string) (interface{}, bool) {
 }
 
 func (t *Tuple) Meta(key string) (interface{}, bool) {
+	if key == "*" {
+		return map[string]interface{}(t.Metadata), true
+	}
 	return t.Metadata.Value(key)
 }
 
@@ -1680,7 +1689,7 @@ func isAggFunc(f *Call) bool {
 	} else if _, ok := mathFuncMap[fn]; ok {
 		return false
 	} else {
-		if nf, err := plugin_manager.GetPlugin(f.Name, "functions"); err == nil {
+		if nf, err := plugins.GetPlugin(f.Name, plugins.FUNCTION); err == nil {
 			if ef, ok := nf.(api.Function); ok && ef.IsAggregate() {
 				return true
 			}

+ 2 - 2
xsql/funcs_aggregate.go

@@ -3,7 +3,7 @@ package xsql
 import (
 	"fmt"
 	"github.com/emqx/kuiper/common"
-	"github.com/emqx/kuiper/common/plugin_manager"
+	"github.com/emqx/kuiper/plugins"
 	"github.com/emqx/kuiper/xstream/api"
 	"strings"
 )
@@ -140,7 +140,7 @@ func (v AggregateFunctionValuer) Call(name string, args []interface{}) (interfac
 		return 0, true
 	default:
 		common.Log.Debugf("run aggregate func %s", name)
-		if nf, err := plugin_manager.GetPlugin(name, "functions"); err != nil {
+		if nf, err := plugins.GetPlugin(name, plugins.FUNCTION); err != nil {
 			return nil, false
 		} else {
 			f, ok := nf.(api.Function)

+ 2 - 2
xsql/funcs_ast_validator.go

@@ -2,7 +2,7 @@ package xsql
 
 import (
 	"fmt"
-	"github.com/emqx/kuiper/common/plugin_manager"
+	"github.com/emqx/kuiper/plugins"
 	"github.com/emqx/kuiper/xstream/api"
 	"strings"
 )
@@ -26,7 +26,7 @@ func validateFuncs(funcName string, args []Expr) error {
 	} else if _, ok := aggFuncMap[lowerName]; ok {
 		return validateAggFunc(lowerName, args)
 	} else {
-		if nf, err := plugin_manager.GetPlugin(funcName, "functions"); err != nil {
+		if nf, err := plugins.GetPlugin(funcName, plugins.FUNCTION); err != nil {
 			return err
 		} else {
 			f, ok := nf.(api.Function)

+ 13 - 1
xsql/funcs_misc.go

@@ -12,6 +12,7 @@ import (
 	"hash"
 	"io"
 	"math"
+	"reflect"
 	"strconv"
 	"strings"
 	"time"
@@ -192,7 +193,18 @@ func hashCall(name string, args []interface{}) (interface{}, bool) {
 func otherCall(name string, args []interface{}) (interface{}, bool) {
 	switch name {
 	case "isnull":
-		return args[0] == nil, true
+		if args[0] == nil {
+			return true, true
+		} else {
+			v := reflect.ValueOf(args[0])
+			switch v.Kind() {
+			case reflect.Slice, reflect.Map:
+				return v.IsNil(), true
+			default:
+				return false, true
+			}
+		}
+		return false, true
 	case "newuuid":
 		if uuid, err := uuid.NewUUID(); err != nil {
 			return err, false

+ 4 - 4
xsql/functions.go

@@ -2,7 +2,7 @@ package xsql
 
 import (
 	"github.com/emqx/kuiper/common"
-	"github.com/emqx/kuiper/common/plugin_manager"
+	"github.com/emqx/kuiper/plugins"
 	"github.com/emqx/kuiper/xstream/api"
 	"strings"
 )
@@ -56,7 +56,7 @@ var hashFuncMap = map[string]string{"md5": "",
 	"sha1": "", "sha256": "", "sha384": "", "sha512": "",
 }
 
-var otherFuncMap = map[string]string{"isNull": "",
+var otherFuncMap = map[string]string{"isnull": "",
 	"newuuid": "", "timestamp": "", "mqtt": "", "meta": "",
 }
 
@@ -76,8 +76,8 @@ func (*FunctionValuer) Call(name string, args []interface{}) (interface{}, bool)
 		return nil, false
 	} else {
 		common.Log.Debugf("run func %s", name)
-		if nf, err := plugin_manager.GetPlugin(name, "functions"); err != nil {
-			return nil, false
+		if nf, err := plugins.GetPlugin(name, plugins.FUNCTION); err != nil {
+			return err, false
 		} else {
 			f, ok := nf.(api.Function)
 			if !ok {

+ 5 - 1
xsql/parser.go

@@ -609,7 +609,11 @@ func (p *Parser) parseCall(name string) (Expr, error) {
 			if tok2, lit2 := p.scanIgnoreWhitespace(); tok2 != RPAREN {
 				return nil, fmt.Errorf("found %q, expected right paren.", lit2)
 			} else {
-				args = append(args, &StringLiteral{Val: "*"})
+				if p.inmeta {
+					args = append(args, &MetaRef{StreamName: "", Name: "*"})
+				} else {
+					args = append(args, &StringLiteral{Val: "*"})
+				}
 				return &Call{Name: name, Args: args}, nil
 			}
 		} else {

+ 70 - 3
xsql/plans/misc_func_test.go

@@ -11,7 +11,7 @@ import (
 	"testing"
 )
 
-func TestHashFunc_Apply1(t *testing.T) {
+func TestMiscFunc_Apply1(t *testing.T) {
 	var tests = []struct {
 		sql    string
 		data   *xsql.Tuple
@@ -132,12 +132,51 @@ func TestHashFunc_Apply1(t *testing.T) {
 				"a":     "devices/device_001/message",
 			}},
 		},
+		{
+			sql: "SELECT isNull(arr) as r FROM test",
+			data: &xsql.Tuple{
+				Emitter: "test",
+				Message: xsql.Message{
+					"temperature": 43.2,
+					"arr":         []int{},
+				},
+			},
+			result: []map[string]interface{}{{
+				"r": false,
+			}},
+		},
+		{
+			sql: "SELECT isNull(arr) as r FROM test",
+			data: &xsql.Tuple{
+				Emitter: "test",
+				Message: xsql.Message{
+					"temperature": 43.2,
+					"arr":         []float64(nil),
+				},
+			},
+			result: []map[string]interface{}{{
+				"r": true,
+			}},
+		}, {
+			sql: "SELECT isNull(rec) as r FROM test",
+			data: &xsql.Tuple{
+				Emitter: "test",
+				Message: xsql.Message{
+					"temperature": 43.2,
+					"rec":         map[string]interface{}(nil),
+				},
+			},
+			result: []map[string]interface{}{{
+				"r": true,
+			}},
+		},
 	}
 
 	fmt.Printf("The test bucket size is %d.\n\n", len(tests))
-	contextLogger := common.Log.WithField("rule", "TestHashFunc_Apply1")
+	contextLogger := common.Log.WithField("rule", "TestMiscFunc_Apply1")
 	ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
 	for i, tt := range tests {
+
 		stmt, err := xsql.NewParser(strings.NewReader(tt.sql)).Parse()
 		if err != nil || stmt == nil {
 			t.Errorf("parse sql %s error %v", tt.sql, err)
@@ -257,12 +296,40 @@ func TestMetaFunc_Apply1(t *testing.T) {
 				"r": "device2",
 			}},
 		},
+		{
+			sql: "SELECT meta(*) as r FROM test",
+			data: &xsql.Tuple{
+				Emitter: "test",
+				Message: xsql.Message{
+					"temperature": 43.2,
+				},
+				Metadata: xsql.Metadata{
+					"temperature": map[string]interface{}{
+						"id":     "dfadfasfas",
+						"device": "device2",
+					},
+					"device": "gateway",
+				},
+			},
+			result: []map[string]interface{}{{
+				"r": map[string]interface{}{
+					"temperature": map[string]interface{}{
+						"id":     "dfadfasfas",
+						"device": "device2",
+					},
+					"device": "gateway",
+				},
+			}},
+		},
 	}
 
 	fmt.Printf("The test bucket size is %d.\n\n", len(tests))
-	contextLogger := common.Log.WithField("rule", "TestHashFunc_Apply1")
+	contextLogger := common.Log.WithField("rule", "TestMetaFunc_Apply1")
 	ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
 	for i, tt := range tests {
+		if i != 2 {
+			continue
+		}
 		stmt, err := xsql.NewParser(strings.NewReader(tt.sql)).Parse()
 		if err != nil || stmt == nil {
 			t.Errorf("parse sql %s error %v", tt.sql, err)

+ 51 - 14
xsql/plans/preprocessor.go

@@ -174,7 +174,9 @@ func (p *Preprocessor) addRecField(ft xsql.FieldType, r map[string]interface{},
 			}
 		case *xsql.ArrayType:
 			var s []interface{}
-			if jtype == reflect.Slice {
+			if t == nil {
+				s = nil
+			} else if jtype == reflect.Slice {
 				s = t.([]interface{})
 			} else if jtype == reflect.String {
 				err := json.Unmarshal([]byte(t.(string)), &s)
@@ -186,13 +188,17 @@ func (p *Preprocessor) addRecField(ft xsql.FieldType, r map[string]interface{},
 			}
 
 			if tempArr, err := p.addArrayField(st, s); err != nil {
-				return err
+				return fmt.Errorf("fail to parse field %s: %s", n, err)
 			} else {
 				r[n] = tempArr
 			}
 		case *xsql.RecType:
 			nextJ := make(map[string]interface{})
-			if jtype == reflect.Map {
+			if t == nil {
+				nextJ = nil
+				r[n] = nextJ
+				return nil
+			} else if jtype == reflect.Map {
 				nextJ, ok = t.(map[string]interface{})
 				if !ok {
 					return fmt.Errorf("invalid data type for %s, expect map but found %[2]T(%[2]v)", n, t)
@@ -228,11 +234,16 @@ func (p *Preprocessor) addArrayField(ft *xsql.ArrayType, srcSlice []interface{})
 	if ft.FieldType != nil { //complex type array or struct
 		switch st := ft.FieldType.(type) { //Only two complex types supported here
 		case *xsql.ArrayType: //TODO handle array of array. Now the type is treated as interface{}
-			var tempSlice [][]interface{}
+			if srcSlice == nil {
+				return [][]interface{}(nil), nil
+			}
 			var s []interface{}
+			var tempSlice reflect.Value
 			for i, t := range srcSlice {
 				jtype := reflect.ValueOf(t).Kind()
-				if jtype == reflect.Slice || jtype == reflect.Array {
+				if t == nil {
+					s = nil
+				} else if jtype == reflect.Slice || jtype == reflect.Array {
 					s = t.([]interface{})
 				} else if jtype == reflect.String {
 					err := json.Unmarshal([]byte(t.(string)), &s)
@@ -245,17 +256,28 @@ func (p *Preprocessor) addArrayField(ft *xsql.ArrayType, srcSlice []interface{})
 				if tempArr, err := p.addArrayField(st, s); err != nil {
 					return nil, err
 				} else {
-					tempSlice = append(tempSlice, tempArr.([]interface{}))
+					if !tempSlice.IsValid() {
+						s := reflect.TypeOf(tempArr)
+						tempSlice = reflect.MakeSlice(reflect.SliceOf(s), 0, 0)
+					}
+					tempSlice = reflect.Append(tempSlice, reflect.ValueOf(tempArr))
 				}
 			}
-			return tempSlice, nil
+			return tempSlice.Interface(), nil
 		case *xsql.RecType:
-			var tempSlice []map[string]interface{}
+			if srcSlice == nil {
+				return []map[string]interface{}(nil), nil
+			}
+			tempSlice := make([]map[string]interface{}, 0)
 			for i, t := range srcSlice {
 				jtype := reflect.ValueOf(t).Kind()
 				j := make(map[string]interface{})
 				var ok bool
-				if jtype == reflect.Map {
+				if t == nil {
+					j = nil
+					tempSlice = append(tempSlice, j)
+					continue
+				} else if jtype == reflect.Map {
 					j, ok = t.(map[string]interface{})
 					if !ok {
 						return nil, fmt.Errorf("invalid data type for [%d], expect map but found %[2]T(%[2]v)", i, t)
@@ -287,7 +309,10 @@ func (p *Preprocessor) addArrayField(ft *xsql.ArrayType, srcSlice []interface{})
 		case xsql.UNKNOWN:
 			return nil, fmt.Errorf("invalid data type unknown defined for %s, please checke the stream definition", srcSlice)
 		case xsql.BIGINT:
-			var tempSlice []int
+			if srcSlice == nil {
+				return []int(nil), nil
+			}
+			tempSlice := make([]int, 0)
 			for i, t := range srcSlice {
 				jtype := reflect.ValueOf(t).Kind()
 				if jtype == reflect.Float64 {
@@ -304,7 +329,10 @@ func (p *Preprocessor) addArrayField(ft *xsql.ArrayType, srcSlice []interface{})
 			}
 			return tempSlice, nil
 		case xsql.FLOAT:
-			var tempSlice []float64
+			if srcSlice == nil {
+				return []float64(nil), nil
+			}
+			tempSlice := make([]float64, 0)
 			for i, t := range srcSlice {
 				jtype := reflect.ValueOf(t).Kind()
 				if jtype == reflect.Float64 {
@@ -321,7 +349,10 @@ func (p *Preprocessor) addArrayField(ft *xsql.ArrayType, srcSlice []interface{})
 			}
 			return tempSlice, nil
 		case xsql.STRINGS:
-			var tempSlice []string
+			if srcSlice == nil {
+				return []string(nil), nil
+			}
+			tempSlice := make([]string, 0)
 			for i, t := range srcSlice {
 				if reflect.ValueOf(t).Kind() == reflect.String {
 					tempSlice = append(tempSlice, t.(string))
@@ -331,7 +362,10 @@ func (p *Preprocessor) addArrayField(ft *xsql.ArrayType, srcSlice []interface{})
 			}
 			return tempSlice, nil
 		case xsql.DATETIME:
-			var tempSlice []time.Time
+			if srcSlice == nil {
+				return []time.Time(nil), nil
+			}
+			tempSlice := make([]time.Time, 0)
 			for i, t := range srcSlice {
 				jtype := reflect.ValueOf(t).Kind()
 				switch jtype {
@@ -353,7 +387,10 @@ func (p *Preprocessor) addArrayField(ft *xsql.ArrayType, srcSlice []interface{})
 			}
 			return tempSlice, nil
 		case xsql.BOOLEAN:
-			var tempSlice []bool
+			if srcSlice == nil {
+				return []bool(nil), nil
+			}
+			tempSlice := make([]bool, 0)
 			for i, t := range srcSlice {
 				jtype := reflect.ValueOf(t).Kind()
 				if jtype == reflect.Bool {

+ 204 - 2
xsql/plans/preprocessor_test.go

@@ -33,6 +33,16 @@ func TestPreprocessor_Apply(t *testing.T) {
 		},
 		{
 			stmt: &xsql.StreamStmt{
+				Name: xsql.StreamName("demo"),
+				StreamFields: []xsql.StreamField{
+					{Name: "abc", FieldType: &xsql.BasicType{Type: xsql.BIGINT}},
+				},
+			},
+			data:   []byte(`{"abc": null}`),
+			result: errors.New("error in preprocessor: invalid data type for abc, expect bigint but found <nil>(<nil>)"),
+		},
+		{
+			stmt: &xsql.StreamStmt{
 				Name:         xsql.StreamName("demo"),
 				StreamFields: nil,
 			},
@@ -164,7 +174,6 @@ func TestPreprocessor_Apply(t *testing.T) {
 			},
 			},
 		},
-		//Rec type
 		{
 			stmt: &xsql.StreamStmt{
 				Name: xsql.StreamName("demo"),
@@ -223,6 +232,113 @@ func TestPreprocessor_Apply(t *testing.T) {
 		},
 		{
 			stmt: &xsql.StreamStmt{
+				Name: xsql.StreamName("demo"),
+				StreamFields: []xsql.StreamField{
+					{Name: "a", FieldType: &xsql.ArrayType{
+						Type: xsql.STRUCT,
+						FieldType: &xsql.RecType{
+							StreamFields: []xsql.StreamField{
+								{Name: "b", FieldType: &xsql.BasicType{Type: xsql.STRINGS}},
+							},
+						},
+					}},
+				},
+			},
+			data: []byte(`{"a": []}`),
+			result: &xsql.Tuple{Message: xsql.Message{
+				"a": make([]map[string]interface{}, 0),
+			},
+			},
+		},
+		{
+			stmt: &xsql.StreamStmt{
+				Name: xsql.StreamName("demo"),
+				StreamFields: []xsql.StreamField{
+					{Name: "a", FieldType: &xsql.ArrayType{
+						Type: xsql.STRUCT,
+						FieldType: &xsql.RecType{
+							StreamFields: []xsql.StreamField{
+								{Name: "b", FieldType: &xsql.BasicType{Type: xsql.STRINGS}},
+							},
+						},
+					}},
+				},
+			},
+			data: []byte(`{"a": null}`),
+			result: &xsql.Tuple{Message: xsql.Message{
+				"a": []map[string]interface{}(nil),
+			},
+			},
+		},
+		{
+			stmt: &xsql.StreamStmt{
+				Name: xsql.StreamName("demo"),
+				StreamFields: []xsql.StreamField{
+					{Name: "a", FieldType: &xsql.ArrayType{
+						Type: xsql.STRUCT,
+						FieldType: &xsql.RecType{
+							StreamFields: []xsql.StreamField{
+								{Name: "b", FieldType: &xsql.BasicType{Type: xsql.STRINGS}},
+							},
+						},
+					}},
+				},
+			},
+			data: []byte(`{"a": [null, {"b" : "hello2"}]}`),
+			result: &xsql.Tuple{Message: xsql.Message{
+				"a": []map[string]interface{}{
+					nil,
+					{"b": "hello2"},
+				},
+			},
+			},
+		},
+		{
+			stmt: &xsql.StreamStmt{
+				Name: xsql.StreamName("demo"),
+				StreamFields: []xsql.StreamField{
+					{Name: "a", FieldType: &xsql.ArrayType{
+						Type: xsql.ARRAY,
+						FieldType: &xsql.ArrayType{
+							Type: xsql.BIGINT,
+						},
+					}},
+				},
+			},
+			data: []byte(`{"a": [[50, 60, 70],[66], [77]]}`),
+			result: &xsql.Tuple{Message: xsql.Message{
+				"a": [][]int{
+					{50, 60, 70},
+					{66},
+					{77},
+				},
+			},
+			},
+		},
+		{
+			stmt: &xsql.StreamStmt{
+				Name: xsql.StreamName("demo"),
+				StreamFields: []xsql.StreamField{
+					{Name: "a", FieldType: &xsql.ArrayType{
+						Type: xsql.ARRAY,
+						FieldType: &xsql.ArrayType{
+							Type: xsql.BIGINT,
+						},
+					}},
+				},
+			},
+			data: []byte(`{"a": [null, [66], [77]]}`),
+			result: &xsql.Tuple{Message: xsql.Message{
+				"a": [][]int{
+					[]int(nil),
+					{66},
+					{77},
+				},
+			},
+			},
+		},
+		{
+			stmt: &xsql.StreamStmt{
 				Name:         xsql.StreamName("demo"),
 				StreamFields: nil,
 			},
@@ -297,6 +413,93 @@ func TestPreprocessor_Apply(t *testing.T) {
 		},
 		{
 			stmt: &xsql.StreamStmt{
+				Name: xsql.StreamName("demo"),
+				StreamFields: []xsql.StreamField{
+					{Name: "a", FieldType: &xsql.RecType{
+						StreamFields: []xsql.StreamField{
+							{Name: "b", FieldType: &xsql.BasicType{Type: xsql.STRINGS}},
+							{Name: "c", FieldType: &xsql.RecType{
+								StreamFields: []xsql.StreamField{
+									{Name: "d", FieldType: &xsql.BasicType{Type: xsql.BIGINT}},
+								},
+							}},
+						},
+					}},
+				},
+			},
+			data: []byte(`{"a": null}`),
+			result: &xsql.Tuple{Message: xsql.Message{
+				"a": map[string]interface{}(nil),
+			},
+			},
+		},
+		{
+			stmt: &xsql.StreamStmt{
+				Name: xsql.StreamName("demo"),
+				StreamFields: []xsql.StreamField{
+					{Name: "a", FieldType: &xsql.RecType{
+						StreamFields: []xsql.StreamField{
+							{Name: "b", FieldType: &xsql.BasicType{Type: xsql.STRINGS}},
+							{Name: "c", FieldType: &xsql.ArrayType{
+								Type: xsql.FLOAT,
+							}},
+						},
+					}},
+				},
+			},
+			data: []byte(`{"a": {"b" : "hello", "c": [35.2, 38.2]}}`),
+			result: &xsql.Tuple{Message: xsql.Message{
+				"a": map[string]interface{}{
+					"b": "hello",
+					"c": []float64{
+						35.2, 38.2,
+					},
+				},
+			},
+			},
+		},
+		{
+			stmt: &xsql.StreamStmt{
+				Name: xsql.StreamName("demo"),
+				StreamFields: []xsql.StreamField{
+					{Name: "a", FieldType: &xsql.RecType{
+						StreamFields: []xsql.StreamField{
+							{Name: "b", FieldType: &xsql.BasicType{Type: xsql.STRINGS}},
+							{Name: "c", FieldType: &xsql.ArrayType{
+								Type: xsql.FLOAT,
+							}},
+						},
+					}},
+				},
+			},
+			data: []byte(`{"a": {"b" : "hello", "c": null}}`),
+			result: &xsql.Tuple{Message: xsql.Message{
+				"a": map[string]interface{}{
+					"b": "hello",
+					"c": []float64(nil),
+				},
+			},
+			},
+		},
+		{
+			stmt: &xsql.StreamStmt{
+				Name: xsql.StreamName("demo"),
+				StreamFields: []xsql.StreamField{
+					{Name: "a", FieldType: &xsql.RecType{
+						StreamFields: []xsql.StreamField{
+							{Name: "b", FieldType: &xsql.BasicType{Type: xsql.STRINGS}},
+							{Name: "c", FieldType: &xsql.ArrayType{
+								Type: xsql.FLOAT,
+							}},
+						},
+					}},
+				},
+			},
+			data:   []byte(`{"a": {"b" : "hello", "c": [null, 35.4]}}`),
+			result: errors.New("error in preprocessor: fail to parse field c: invalid data type for [0], expect float but found <nil>(<nil>)"),
+		},
+		{
+			stmt: &xsql.StreamStmt{
 				Name:         xsql.StreamName("demo"),
 				StreamFields: nil,
 			},
@@ -319,7 +522,6 @@ func TestPreprocessor_Apply(t *testing.T) {
 	contextLogger := common.Log.WithField("rule", "TestPreprocessor_Apply")
 	ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
 	for i, tt := range tests {
-
 		pp := &Preprocessor{streamStmt: tt.stmt}
 
 		dm := make(map[string]interface{})

+ 19 - 0
xsql/plans/project_test.go

@@ -158,6 +158,16 @@ func TestProjectPlan_Apply1(t *testing.T) {
 			data: &xsql.Tuple{
 				Emitter: "test",
 				Message: xsql.Message{
+					"a": map[string]interface{}(nil),
+				},
+			},
+			result: []map[string]interface{}{{}},
+		},
+		{
+			sql: `SELECT a->b AS ab FROM test`,
+			data: &xsql.Tuple{
+				Emitter: "test",
+				Message: xsql.Message{
 					"name": "name",
 				},
 			},
@@ -1543,6 +1553,15 @@ func TestProjectPlanError(t *testing.T) {
 				},
 			},
 			result: errors.New("run Select error: call func sum error: requires int but found string(ddd)"),
+		}, {
+			sql: `SELECT a[0]->b AS ab FROM test`,
+			data: &xsql.Tuple{
+				Emitter: "test",
+				Message: xsql.Message{
+					"a": []map[string]interface{}(nil),
+				},
+			},
+			result: errors.New("run Select error: out of index: 0 of 0"),
 		},
 	}
 	fmt.Printf("The test bucket size is %d.\n\n", len(tests))

+ 2 - 0
xsql/processors/extension_test.go

@@ -1,3 +1,5 @@
+// +build !windows
+
 package processors
 
 import (

+ 3 - 3
xsql/processors/xsql_processor_test.go

@@ -2948,9 +2948,9 @@ func getMetric(tp *xstream.TopologyNew, name string) int {
 
 func compareMetrics(tp *xstream.TopologyNew, m map[string]interface{}, sql string) (err error) {
 	keys, values := tp.GetMetrics()
-	for i, k := range keys {
-		log.Printf("%s:%v", k, values[i])
-	}
+	//for i, k := range keys {
+	//	log.Printf("%s:%v", k, values[i])
+	//}
 	for k, v := range m {
 		var (
 			index   int

+ 186 - 20
xstream/cli/main.go

@@ -130,7 +130,7 @@ func main() {
 		{
 			Name:    "create",
 			Aliases: []string{"create"},
-			Usage:   "create stream $stream_name | create stream $stream_name -f $stream_def_file | create rule $rule_name $rule_json | create rule $rule_name -f $rule_def_file",
+			Usage:   "create stream $stream_name | create stream $stream_name -f $stream_def_file | create rule $rule_name $rule_json | create rule $rule_name -f $rule_def_file | create plugin $plugin_type $plugin_name $plugin_json | create plugin $plugin_type $plugin_name -f $plugin_def_file",
 
 			Subcommands: []cli.Command{
 				{
@@ -146,13 +146,8 @@ func main() {
 					Action: func(c *cli.Context) error {
 						sfile := c.String("file")
 						if sfile != "" {
-							if _, err := os.Stat(c.String("file")); os.IsNotExist(err) {
-								fmt.Printf("The specified stream defintion file %s does not existed.\n", sfile)
-								return nil
-							}
-							fmt.Printf("Creating a new stream from file %s.\n", sfile)
-							if stream, err := ioutil.ReadFile(sfile); err != nil {
-								fmt.Printf("Failed to read from stream definition file %s.\n", sfile)
+							if stream, err := readDef(sfile, "stream"); err != nil {
+								fmt.Printf("%s", err)
 								return nil
 							} else {
 								args := strings.Join([]string{"CREATE STREAM ", string(stream)}, " ")
@@ -178,13 +173,8 @@ func main() {
 					Action: func(c *cli.Context) error {
 						sfile := c.String("file")
 						if sfile != "" {
-							if _, err := os.Stat(c.String("file")); os.IsNotExist(err) {
-								fmt.Printf("The specified rule defenition file %s is not existed.\n", sfile)
-								return nil
-							}
-							fmt.Printf("Creating a new rule from file %s.\n", sfile)
-							if rule, err := ioutil.ReadFile(sfile); err != nil {
-								fmt.Printf("Failed to read from rule definition file %s.\n", sfile)
+							if rule, err := readDef(sfile, "rule"); err != nil {
+								fmt.Printf("%s", err)
 								return nil
 							} else {
 								if len(c.Args()) != 1 {
@@ -193,7 +183,7 @@ func main() {
 								}
 								rname := c.Args()[0]
 								var reply string
-								args := &common.Rule{rname, string(rule)}
+								args := &common.RuleDesc{rname, string(rule)}
 								err = client.Call("Server.CreateRule", args, &reply)
 								if err != nil {
 									fmt.Println(err)
@@ -210,7 +200,7 @@ func main() {
 							rname := c.Args()[0]
 							rjson := c.Args()[1]
 							var reply string
-							args := &common.Rule{rname, rjson}
+							args := &common.RuleDesc{rname, rjson}
 							err = client.Call("Server.CreateRule", args, &reply)
 							if err != nil {
 								fmt.Println(err)
@@ -221,12 +211,68 @@ func main() {
 						}
 					},
 				},
+				{
+					Name:  "plugin",
+					Usage: "create plugin $plugin_type $plugin_name [$plugin_json | -f plugin_def_file]",
+					Flags: []cli.Flag{
+						cli.StringFlag{
+							Name:     "file, f",
+							Usage:    "the location of plugin definition file",
+							FilePath: "/home/myplugin.txt",
+						},
+					},
+					Action: func(c *cli.Context) error {
+						if len(c.Args()) < 2 {
+							fmt.Printf("Expect plugin type and name.\n")
+							return nil
+						}
+						ptype, err := getPluginType(c.Args()[0])
+						if err != nil {
+							fmt.Printf("%s\n", err)
+							return nil
+						}
+						pname := c.Args()[1]
+						sfile := c.String("file")
+						args := &common.PluginDesc{
+							RuleDesc: common.RuleDesc{
+								Name: pname,
+							},
+							Type: ptype,
+						}
+						if sfile != "" {
+							if len(c.Args()) != 2 {
+								fmt.Printf("Expect plugin type, name.\nBut found %d args:%s.\n", len(c.Args()), c.Args())
+								return nil
+							}
+							if p, err := readDef(sfile, "plugin"); err != nil {
+								fmt.Printf("%s", err)
+								return nil
+							} else {
+								args.Json = string(p)
+							}
+						} else {
+							if len(c.Args()) != 3 {
+								fmt.Printf("Expect plugin type, name and json.\nBut found %d args:%s.\n", len(c.Args()), c.Args())
+								return nil
+							}
+							args.Json = c.Args()[2]
+						}
+						var reply string
+						err = client.Call("Server.CreatePlugin", args, &reply)
+						if err != nil {
+							fmt.Println(err)
+						} else {
+							fmt.Println(reply)
+						}
+						return nil
+					},
+				},
 			},
 		},
 		{
 			Name:    "describe",
 			Aliases: []string{"describe"},
-			Usage:   "describe stream $stream_name | describe rule $rule_name",
+			Usage:   "describe stream $stream_name | describe rule $rule_name | describe plugin $plugin_type $plugin_name",
 			Subcommands: []cli.Command{
 				{
 					Name:  "stream",
@@ -256,13 +302,41 @@ func main() {
 						return nil
 					},
 				},
+				{
+					Name:  "plugin",
+					Usage: "describe plugin $plugin_type $plugin_name",
+					//Flags: nflag,
+					Action: func(c *cli.Context) error {
+						ptype, err := getPluginType(c.Args()[0])
+						if err != nil {
+							fmt.Printf("%s\n", err)
+							return nil
+						}
+						pname := c.Args()[1]
+						args := &common.PluginDesc{
+							RuleDesc: common.RuleDesc{
+								Name: pname,
+							},
+							Type: ptype,
+						}
+
+						var reply string
+						err = client.Call("Server.DescPlugin", args, &reply)
+						if err != nil {
+							fmt.Println(err)
+						} else {
+							fmt.Println(reply)
+						}
+						return nil
+					},
+				},
 			},
 		},
 
 		{
 			Name:    "drop",
 			Aliases: []string{"drop"},
-			Usage:   "drop stream $stream_name | drop rule $rule_name",
+			Usage:   "drop stream $stream_name | drop rule $rule_name | drop plugin $plugin_type $plugin_name -r $stop",
 			Subcommands: []cli.Command{
 				{
 					Name:  "stream",
@@ -293,13 +367,56 @@ func main() {
 						return nil
 					},
 				},
+				{
+					Name:  "plugin",
+					Usage: "drop plugin $plugin_type $plugin_name -s stop",
+					Flags: []cli.Flag{
+						cli.StringFlag{
+							Name:  "stop, s",
+							Usage: "stop kuiper after the action",
+						},
+					},
+					Action: func(c *cli.Context) error {
+						r := c.String("stop")
+						if r != "true" && r != "false" {
+							fmt.Printf("Expect r to be a boolean value.\n")
+							return nil
+						}
+						if len(c.Args()) < 2 || len(c.Args()) > 3 {
+							fmt.Printf("Expect plugin type and name.\n")
+							return nil
+						}
+						ptype, err := getPluginType(c.Args()[0])
+						if err != nil {
+							fmt.Printf("%s\n", err)
+							return nil
+						}
+						pname := c.Args()[1]
+						args := &common.PluginDesc{
+							RuleDesc: common.RuleDesc{
+								Name: pname,
+							},
+							Type: ptype,
+							Stop: r == "true",
+						}
+
+						var reply string
+						err = client.Call("Server.DropPlugin", args, &reply)
+						if err != nil {
+							fmt.Println(err)
+						} else {
+							fmt.Println(reply)
+						}
+						return nil
+					},
+				},
 			},
 		},
 
 		{
 			Name:    "show",
 			Aliases: []string{"show"},
-			Usage:   "show streams | show rules",
+			Usage:   "show streams | show rules | show plugins $plugin_type",
 
 			Subcommands: []cli.Command{
 				{
@@ -324,6 +441,29 @@ func main() {
 						return nil
 					},
 				},
+				{
+					Name:  "plugins",
+					Usage: "show plugins $plugin_type",
+					Action: func(c *cli.Context) error {
+						if len(c.Args()) != 1 {
+							fmt.Printf("Expect plugin type.\n")
+							return nil
+						}
+						ptype, err := getPluginType(c.Args()[0])
+						if err != nil {
+							fmt.Printf("%s\n", err)
+							return nil
+						}
+						var reply string
+						err = client.Call("Server.ShowPlugins", ptype, &reply)
+						if err != nil {
+							fmt.Println(err)
+						} else {
+							fmt.Println(reply)
+						}
+						return nil
+					},
+				},
 			},
 		},
 
@@ -455,3 +595,29 @@ func main() {
 		fmt.Printf("%v", err)
 	}
 }
+
+func getPluginType(arg string) (ptype int, err error) {
+	switch arg {
+	case "source":
+		ptype = 0
+	case "sink":
+		ptype = 1
+	case "function":
+		ptype = 2
+	default:
+		err = fmt.Errorf("Invalid plugin type %s, should be \"source\", \"sink\" or \"function\".\n", arg)
+	}
+	return
+}
+
+func readDef(sfile string, t string) ([]byte, error) {
+	if _, err := os.Stat(sfile); os.IsNotExist(err) {
+		return nil, fmt.Errorf("The specified %s defenition file %s is not existed.\n", t, sfile)
+	}
+	fmt.Printf("Creating a new %s from file %s.\n", t, sfile)
+	if rule, err := ioutil.ReadFile(sfile); err != nil {
+		return nil, fmt.Errorf("Failed to read from %s definition file %s.\n", t, sfile)
+	} else {
+		return rule, nil
+	}
+}

+ 1 - 1
xstream/extensions/edgex_source.go

@@ -34,7 +34,7 @@ func (es *EdgexSource) Configure(device string, props map[string]interface{}) er
 	if s, ok := props["server"]; ok {
 		server = s.(string)
 	}
-	var port = 5570
+	var port = 5563
 	if p, ok := props["port"]; ok {
 		port = p.(int)
 	}

+ 17 - 5
xstream/nodes/sink_node.go

@@ -3,7 +3,7 @@ package nodes
 import (
 	"fmt"
 	"github.com/emqx/kuiper/common"
-	"github.com/emqx/kuiper/common/plugin_manager"
+	"github.com/emqx/kuiper/plugins"
 	"github.com/emqx/kuiper/xstream/api"
 	"github.com/emqx/kuiper/xstream/sinks"
 	"sync"
@@ -102,6 +102,15 @@ func (m *SinkNode) Open(ctx api.StreamContext, result chan<- error) {
 				cacheSaveInterval = t
 			}
 		}
+		omitIfEmpty := false
+		if c, ok := m.options["omitIfEmpty"]; ok {
+			if t, ok := c.(bool); !ok {
+				logger.Warnf("invalid type for omitIfEmpty property, should be a bool value 'true/false'.", c)
+			} else {
+				omitIfEmpty = t
+			}
+		}
+
 		m.reset()
 		logger.Infof("open sink node %d instances", m.concurrency)
 		for i := 0; i < m.concurrency; i++ { // workers
@@ -140,9 +149,9 @@ func (m *SinkNode) Open(ctx api.StreamContext, result chan<- error) {
 					case data := <-cache.Out:
 						stats.SetBufferLength(int64(cache.Length()))
 						if runAsync {
-							go doCollect(sink, data, stats, retryInterval, cache.Complete, ctx)
+							go doCollect(sink, data, stats, retryInterval, omitIfEmpty, cache.Complete, ctx)
 						} else {
-							doCollect(sink, data, stats, retryInterval, cache.Complete, ctx)
+							doCollect(sink, data, stats, retryInterval, omitIfEmpty,cache.Complete, ctx)
 						}
 					case <-ctx.Done():
 						logger.Infof("sink node %s instance %d done", m.name, instance)
@@ -164,7 +173,7 @@ func (m *SinkNode) reset() {
 	m.statManagers = nil
 }
 
-func doCollect(sink api.Sink, item *CacheTuple, stats StatManager, retryInterval int, signalCh chan<- int, ctx api.StreamContext) {
+func doCollect(sink api.Sink, item *CacheTuple, stats StatManager, retryInterval int, omitIfEmpty bool, signalCh chan<- int, ctx api.StreamContext) {
 	stats.IncTotalRecordsIn()
 	stats.ProcessTimeStart()
 	logger := ctx.GetLogger()
@@ -178,6 +187,9 @@ func doCollect(sink api.Sink, item *CacheTuple, stats StatManager, retryInterval
 		outdata = []byte(fmt.Sprintf(`[{"error":"result is not a string but found %#v"}]`, val))
 	}
 	for {
+		if omitIfEmpty && string(outdata) == "[{}]" {
+			break
+		}
 		if err := sink.Collect(ctx, outdata); err != nil {
 			stats.IncTotalExceptions()
 			logger.Warnf("sink node %s instance %d publish %s error: %v", ctx.GetOpId(), ctx.GetInstanceId(), outdata, err)
@@ -209,7 +221,7 @@ func doGetSink(name string, action map[string]interface{}) (api.Sink, error) {
 	case "rest":
 		s = &sinks.RestSink{}
 	default:
-		nf, err := plugin_manager.GetPlugin(name, "sinks")
+		nf, err := plugins.GetPlugin(name, plugins.SINK)
 		if err != nil {
 			return nil, err
 		}

+ 2 - 2
xstream/nodes/source_node.go

@@ -3,7 +3,7 @@ package nodes
 import (
 	"fmt"
 	"github.com/emqx/kuiper/common"
-	"github.com/emqx/kuiper/common/plugin_manager"
+	"github.com/emqx/kuiper/plugins"
 	"github.com/emqx/kuiper/xsql"
 	"github.com/emqx/kuiper/xstream/api"
 	"github.com/emqx/kuiper/xstream/extensions"
@@ -153,7 +153,7 @@ func doGetSource(t string) (api.Source, error) {
 	case "mqtt":
 		s = &extensions.MQTTSource{}
 	default:
-		nf, err := plugin_manager.GetPlugin(t, "sources")
+		nf, err := plugins.GetPlugin(t, plugins.SOURCE)
 		if err != nil {
 			return nil, err
 		}

+ 1 - 1
xstream/server/main.go

@@ -2,7 +2,7 @@ package main
 
 import "github.com/emqx/kuiper/xstream/server/server"
 
-var Version string = "unknown"
+var Version = "unknown"
 
 func main() {
 	server.StartUp(Version)

+ 107 - 0
xstream/server/server/rest.go

@@ -3,6 +3,7 @@ package server
 import (
 	"encoding/json"
 	"fmt"
+	"github.com/emqx/kuiper/plugins"
 	"github.com/emqx/kuiper/xstream/api"
 	"github.com/gorilla/mux"
 	"io"
@@ -51,6 +52,7 @@ func jsonResponse(i interface{}, w http.ResponseWriter, logger api.Logger) {
 
 func createRestServer(port int) *http.Server {
 	r := mux.NewRouter()
+	r.HandleFunc("/", rootHandler).Methods(http.MethodGet, http.MethodPost)
 	r.HandleFunc("/streams", streamsHandler).Methods(http.MethodGet, http.MethodPost)
 	r.HandleFunc("/streams/{name}", streamHandler).Methods(http.MethodGet, http.MethodDelete)
 	r.HandleFunc("/rules", rulesHandler).Methods(http.MethodGet, http.MethodPost)
@@ -60,6 +62,13 @@ func createRestServer(port int) *http.Server {
 	r.HandleFunc("/rules/{name}/stop", stopRuleHandler).Methods(http.MethodPost)
 	r.HandleFunc("/rules/{name}/restart", restartRuleHandler).Methods(http.MethodPost)
 
+	r.HandleFunc("/plugins/sources", sourcesHandler).Methods(http.MethodGet, http.MethodPost)
+	r.HandleFunc("/plugins/sources/{name}", sourceHandler).Methods(http.MethodDelete, http.MethodGet)
+	r.HandleFunc("/plugins/sinks", sinksHandler).Methods(http.MethodGet, http.MethodPost)
+	r.HandleFunc("/plugins/sinks/{name}", sinkHandler).Methods(http.MethodDelete, http.MethodGet)
+	r.HandleFunc("/plugins/functions", functionsHandler).Methods(http.MethodGet, http.MethodPost)
+	r.HandleFunc("/plugins/functions/{name}", functionHandler).Methods(http.MethodDelete, http.MethodGet)
+
 	server := &http.Server{
 		Addr: fmt.Sprintf("0.0.0.0:%d", port),
 		// Good practice to set timeouts to avoid Slowloris attacks.
@@ -72,6 +81,16 @@ func createRestServer(port int) *http.Server {
 	return server
 }
 
+//The handler for root
+func rootHandler(w http.ResponseWriter, r *http.Request) {
+	defer r.Body.Close()
+	switch r.Method {
+	case http.MethodGet, http.MethodPost:
+		w.WriteHeader(http.StatusOK)
+		w.Write([]byte("OK\n"))
+	}
+}
+
 //list or create streams
 func streamsHandler(w http.ResponseWriter, r *http.Request) {
 	defer r.Body.Close()
@@ -248,3 +267,91 @@ func restartRuleHandler(w http.ResponseWriter, r *http.Request) {
 	w.WriteHeader(http.StatusOK)
 	w.Write([]byte(fmt.Sprintf("Rule %s was restarted", name)))
 }
+
+func pluginsHandler(w http.ResponseWriter, r *http.Request, t plugins.PluginType) {
+	defer r.Body.Close()
+	switch r.Method {
+	case http.MethodGet:
+		content, err := pluginManager.List(t)
+		if err != nil {
+			handleError(w, fmt.Errorf("%s plugins list command error: %s", plugins.PluginTypes[t], err), http.StatusBadRequest, logger)
+			return
+		}
+		jsonResponse(content, w, logger)
+	case http.MethodPost:
+		sd := plugins.Plugin{}
+		err := json.NewDecoder(r.Body).Decode(&sd)
+		// Problems decoding
+		if err != nil {
+			handleError(w, fmt.Errorf("Invalid body: Error decoding the %s plugin json: %v", plugins.PluginTypes[t], err), http.StatusBadRequest, logger)
+			return
+		}
+		err = pluginManager.Register(t, &sd)
+		if err != nil {
+			handleError(w, fmt.Errorf("%s plugins create command error: %s", plugins.PluginTypes[t], err), http.StatusBadRequest, logger)
+			return
+		}
+		w.WriteHeader(http.StatusCreated)
+		w.Write([]byte(fmt.Sprintf("%s plugin %s is created", plugins.PluginTypes[t], sd.Name)))
+	}
+}
+
+func pluginHandler(w http.ResponseWriter, r *http.Request, t plugins.PluginType) {
+	defer r.Body.Close()
+	vars := mux.Vars(r)
+	name := vars["name"]
+	cb := r.URL.Query().Get("stop")
+
+	switch r.Method {
+	case http.MethodDelete:
+		r := cb == "1"
+		err := pluginManager.Delete(t, name, r)
+		if err != nil {
+			handleError(w, fmt.Errorf("delete %s plugin %s error: %s", plugins.PluginTypes[t], name, err), http.StatusBadRequest, logger)
+			return
+		}
+		w.WriteHeader(http.StatusOK)
+		result := fmt.Sprintf("%s plugin %s is deleted", plugins.PluginTypes[t], name)
+		if r {
+			result = fmt.Sprintf("%s and Kuiper will be stopped", result)
+		}
+		w.Write([]byte(result))
+	case http.MethodGet:
+		j, ok := pluginManager.Get(t, name)
+		if !ok {
+			handleError(w, fmt.Errorf("describe %s plugin %s error: not found", plugins.PluginTypes[t], name), http.StatusBadRequest, logger)
+			return
+		}
+		jsonResponse(j, w, logger)
+	}
+}
+
+//list or create source plugin
+func sourcesHandler(w http.ResponseWriter, r *http.Request) {
+	pluginsHandler(w, r, plugins.SOURCE)
+}
+
+//delete a source plugin
+func sourceHandler(w http.ResponseWriter, r *http.Request) {
+	pluginHandler(w, r, plugins.SOURCE)
+}
+
+//list or create sink plugin
+func sinksHandler(w http.ResponseWriter, r *http.Request) {
+	pluginsHandler(w, r, plugins.SINK)
+}
+
+//delete a sink plugin
+func sinkHandler(w http.ResponseWriter, r *http.Request) {
+	pluginHandler(w, r, plugins.SINK)
+}
+
+//list or create function plugin
+func functionsHandler(w http.ResponseWriter, r *http.Request) {
+	pluginsHandler(w, r, plugins.FUNCTION)
+}
+
+//delete a function plugin
+func functionHandler(w http.ResponseWriter, r *http.Request) {
+	pluginHandler(w, r, plugins.FUNCTION)
+}

+ 90 - 1
xstream/server/server/rpc.go

@@ -1,8 +1,11 @@
 package server
 
 import (
+	"bytes"
+	"encoding/json"
 	"fmt"
 	"github.com/emqx/kuiper/common"
+	"github.com/emqx/kuiper/plugins"
 	"github.com/emqx/kuiper/xstream/sinks"
 	"strings"
 	"time"
@@ -72,7 +75,7 @@ func (t *Server) Stream(stream string, reply *string) error {
 	return nil
 }
 
-func (t *Server) CreateRule(rule *common.Rule, reply *string) error {
+func (t *Server) CreateRule(rule *common.RuleDesc, reply *string) error {
 	r, err := ruleProcessor.ExecCreate(rule.Name, rule.Json)
 	if err != nil {
 		return fmt.Errorf("Create rule error : %s.", err)
@@ -158,6 +161,92 @@ func (t *Server) DropRule(name string, reply *string) error {
 	return nil
 }
 
+func (t *Server) CreatePlugin(arg *common.PluginDesc, reply *string) error {
+	pt := plugins.PluginType(arg.Type)
+	p, err := getPluginByJson(arg)
+	if err != nil {
+		return fmt.Errorf("Create plugin error: %s", err)
+	}
+	if p.File == "" {
+		return fmt.Errorf("Create plugin error: Missing plugin file url.")
+	}
+	err = pluginManager.Register(pt, p)
+	if err != nil {
+		return fmt.Errorf("Create plugin error: %s", err)
+	} else {
+		*reply = fmt.Sprintf("Plugin %s is created.", p.Name)
+	}
+	return nil
+}
+
+func (t *Server) DropPlugin(arg *common.PluginDesc, reply *string) error {
+	pt := plugins.PluginType(arg.Type)
+	p, err := getPluginByJson(arg)
+	if err != nil {
+		return fmt.Errorf("Drop plugin error: %s", err)
+	}
+	err = pluginManager.Delete(pt, p.Name, arg.Stop)
+	if err != nil {
+		return fmt.Errorf("Drop plugin error: %s", err)
+	} else {
+		if arg.Stop {
+			*reply = fmt.Sprintf("Plugin %s is dropped and Kuiper will be stopped.", p.Name)
+		} else {
+			*reply = fmt.Sprintf("Plugin %s is dropped.", p.Name)
+		}
+
+	}
+	return nil
+}
+
+func (t *Server) ShowPlugins(arg int, reply *string) error {
+	pt := plugins.PluginType(arg)
+	l, err := pluginManager.List(pt)
+	if err != nil {
+		return fmt.Errorf("Drop plugin error: %s", err)
+	} else {
+		if len(l) == 0 {
+			l = append(l, "No plugin is found.")
+		}
+		*reply = strings.Join(l, "\n")
+	}
+	return nil
+}
+
+func (t *Server) DescPlugin(arg *common.PluginDesc, reply *string) error {
+	pt := plugins.PluginType(arg.Type)
+	p, err := getPluginByJson(arg)
+	if err != nil {
+		return fmt.Errorf("Describe plugin error: %s", err)
+	}
+	m, ok := pluginManager.Get(pt, p.Name)
+	if !ok {
+		return fmt.Errorf("Describe plugin error: not found")
+	} else {
+		s, err := json.Marshal(m)
+		if err != nil {
+			return fmt.Errorf("Describe plugin error: invalid json %v", m)
+		}
+		dst := &bytes.Buffer{}
+		if err := json.Indent(dst, s, "", "  "); err != nil {
+			return fmt.Errorf("Describe plugin error: indent json error %v", err)
+		}
+		*reply = dst.String()
+	}
+	return nil
+}
+
+func getPluginByJson(arg *common.PluginDesc) (*plugins.Plugin, error) {
+	var p plugins.Plugin
+	if arg.Json != "" {
+		if err := json.Unmarshal([]byte(arg.Json), &p); err != nil {
+			return nil, fmt.Errorf("Parse plugin %s error : %s.", arg.Json, err)
+		}
+	}
+	p.Name = arg.Name
+	return &p, nil
+}
+
 func init() {
 	ticker := time.NewTicker(time.Second * 5)
 	go func() {

+ 6 - 0
xstream/server/server/server.go

@@ -3,6 +3,7 @@ package server
 import (
 	"fmt"
 	"github.com/emqx/kuiper/common"
+	"github.com/emqx/kuiper/plugins"
 	"github.com/emqx/kuiper/xsql/processors"
 	"github.com/prometheus/client_golang/prometheus/promhttp"
 	"net"
@@ -17,6 +18,7 @@ var (
 
 	ruleProcessor   *processors.RuleProcessor
 	streamProcessor *processors.StreamProcessor
+	pluginManager   *plugins.Manager
 )
 
 func StartUp(Version string) {
@@ -31,6 +33,10 @@ func StartUp(Version string) {
 	}
 	ruleProcessor = processors.NewRuleProcessor(path.Dir(dataDir))
 	streamProcessor = processors.NewStreamProcessor(path.Join(path.Dir(dataDir), "stream"))
+	pluginManager, err = plugins.NewPluginManager()
+	if err != nil {
+		logger.Panic(err)
+	}
 
 	registry = &RuleRegistry{internal: make(map[string]*RuleState)}
 

+ 123 - 5
xstream/sinks/edgex_sink.go

@@ -3,7 +3,11 @@
 package sinks
 
 import (
+	"encoding/json"
 	"fmt"
+	"github.com/edgexfoundry/go-mod-core-contracts/clients/coredata"
+	"github.com/edgexfoundry/go-mod-core-contracts/clients/urlclient/local"
+	"github.com/edgexfoundry/go-mod-core-contracts/models"
 	"github.com/edgexfoundry/go-mod-messaging/messaging"
 	"github.com/edgexfoundry/go-mod-messaging/pkg/types"
 	"github.com/emqx/kuiper/common"
@@ -19,6 +23,9 @@ type EdgexMsgBusSink struct {
 	topic       string
 	contentType string
 
+	deviceName string
+	metadata   string
+
 	optional *OptionalConf
 	client   messaging.MessageClient
 }
@@ -32,7 +39,8 @@ type OptionalConf struct {
 func (ems *EdgexMsgBusSink) Configure(ps map[string]interface{}) error {
 	ems.host = "*"
 	ems.protocol = "tcp"
-	ems.port = 5570
+	ems.port = 5573
+	ems.topic = "events"
 	ems.contentType = "application/json"
 	ems.ptype = messaging.ZeroMQ
 
@@ -54,17 +62,17 @@ func (ems *EdgexMsgBusSink) Configure(ps map[string]interface{}) error {
 		} else if pv, ok := port.(float32); ok {
 			ems.port = int(pv)
 		} else {
-			common.Log.Infof("Not valid port value, will use default value '5570'.")
+			common.Log.Infof("Not valid port value, will use default value '5563'.")
 		}
 
 	} else {
-		common.Log.Infof("Not find port conf, will use default value '5570'.")
+		common.Log.Infof("Not find port conf, will use default value '5563'.")
 	}
 
 	if topic, ok := ps["topic"]; ok {
 		ems.topic = topic.(string)
 	} else {
-		return fmt.Errorf("Topic must be specified.")
+		common.Log.Infof("Not find topic conf, will use default value 'events'.")
 	}
 
 	if contentType, ok := ps["contentType"]; ok {
@@ -73,6 +81,14 @@ func (ems *EdgexMsgBusSink) Configure(ps map[string]interface{}) error {
 		common.Log.Infof("Not find contentType conf, will use default value 'application/json'.")
 	}
 
+	if dname, ok := ps["deviceName"]; ok {
+		ems.deviceName = dname.(string)
+	}
+
+	if metadata, ok := ps["metadata"]; ok {
+		ems.metadata = metadata.(string)
+	}
+
 	if optIntf, ok := ps["optional"]; ok {
 		if opt, ok1 := optIntf.(map[string]interface{}); ok1 {
 			optional := &OptionalConf{}
@@ -114,12 +130,114 @@ func (ems *EdgexMsgBusSink) Open(ctx api.StreamContext) error {
 	return nil
 }
 
+func (ems *EdgexMsgBusSink) produceEvents(result []byte) (*models.Event, error) {
+	var m []map[string]interface{}
+	if err := json.Unmarshal(result, &m); err == nil {
+		m1, f := ems.getMeta(m)
+		var event = &models.Event{}
+		if f {
+			event.Device = m1.getStrVal("device")
+			event.Created = m1.getIntVal("created")
+			event.Modified = m1.getIntVal("modified")
+			event.Origin = m1.getIntVal("origin")
+			event.ID = m1.getStrVal("id")
+			event.Pushed = m1.getIntVal("pushed")
+		}
+		//Override the devicename if user specified the value
+		if ems.deviceName != "" {
+			event.Device = ems.deviceName
+		}
+
+		for _, v := range m {
+			for k1, v1 := range v {
+				if k1 == ems.metadata {
+					continue
+				} else {
+					value := fmt.Sprintf("%v", v1)
+					r := models.Reading{Name: k1, Value: value}
+					if m, ok := m1[k1]; ok {
+						if mm, ok1 := m.(map[string]interface{}); ok1 {
+							mm1 := meta(mm)
+							r.Created = mm1.getIntVal("created")
+							r.Device = mm1.getStrVal("device")
+							r.Id = mm1.getStrVal("id")
+							r.Modified = mm1.getIntVal("modified")
+							r.Origin = mm1.getIntVal("origin")
+							r.Pushed = mm1.getIntVal("pushed")
+						}
+					}
+					event.Readings = append(event.Readings, r)
+				}
+			}
+		}
+		return event, nil
+	} else {
+		return nil, err
+	}
+}
+
+type meta map[string]interface{}
+
+func (ems *EdgexMsgBusSink) getMeta(result []map[string]interface{}) (meta, bool) {
+	if ems.metadata == "" {
+		return nil, false
+	}
+	//Try to get the meta field
+	for _, v := range result {
+		if m, ok := v[ems.metadata]; ok {
+			if m1, ok1 := m.(map[string]interface{}); ok1 {
+				return meta(m1), true
+			} else {
+				common.Log.Infof("Specified a meta field, but the field does not contains any EdgeX metadata.")
+			}
+		}
+	}
+	return nil, false
+}
+
+func (m meta) getIntVal(k string) (int64) {
+	if v, ok := m[k]; ok {
+		if v1, ok1 := v.(float64); ok1 {
+			return int64(v1)
+		}
+	}
+	return 0
+}
+
+func (m meta) getStrVal(k string) (string) {
+	if v, ok := m[k]; ok {
+		if v1, ok1 := v.(string); ok1 {
+			return v1
+		}
+	}
+	return ""
+}
+
+func (ems *EdgexMsgBusSink) getMetaValueAsMap(m meta, k string) (map[string]interface{}) {
+	if v, ok := m[k]; ok {
+		if v1, ok1 := v.(map[string]interface{}); ok1 {
+			return v1
+		}
+	}
+	return nil
+}
+
 func (ems *EdgexMsgBusSink) Collect(ctx api.StreamContext, item interface{}) error {
 	logger := ctx.GetLogger()
+	client := coredata.NewEventClient(local.New(""))
 	if payload, ok := item.([]byte); ok {
 		logger.Debugf("EdgeX message bus sink: %s\n", payload)
-		env := types.NewMessageEnvelope(payload, ctx)
+		evt, err := ems.produceEvents(payload)
+		if err != nil {
+			return fmt.Errorf("Failed to convert to EdgeX event: %s.", err.Error())
+		}
+		data, err := client.MarshalEvent(*evt)
+		if err != nil {
+			return fmt.Errorf("unexpected error MarshalEvent %v", err)
+		}
+		env := types.NewMessageEnvelope([]byte(data), ctx)
 		env.ContentType = ems.contentType
+
 		if e := ems.client.Publish(env, ems.topic); e != nil {
 			logger.Errorf("Found error %s when publish to EdgeX message bus.\n", e)
 			return e

+ 197 - 0
xstream/sinks/edgex_sink_test.go

@@ -0,0 +1,197 @@
+// +build edgex
+
+package sinks
+
+import (
+	"fmt"
+	"github.com/edgexfoundry/go-mod-core-contracts/models"
+	"reflect"
+	"testing"
+)
+
+func TestProduceEvents(t1 *testing.T) {
+	var tests = []struct {
+		input      string
+		deviceName string
+		expected   *models.Event
+		error      string
+	}{
+		{
+			input: `[
+						{"meta":{
+							"correlationid":"","created":1,"device":"demo","id":"","modified":2,"origin":3,"pushed":0,
+							"humidity":{"created":11,"device":"test device name1","id":"12","modified":13,"origin":14,"pushed":15},
+							"temperature":{"created":21,"device":"test device name2","id":"22","modified":23,"origin":24,"pushed":25}
+							}
+						},
+						{"humidity":100},
+						{"temperature":50}
+					]`,
+			expected: &models.Event{
+				ID:       "",
+				Pushed:   0,
+				Device:   "demo",
+				Created:  1,
+				Modified: 2,
+				Origin:   3,
+				Readings: []models.Reading{
+					{
+						Name: "humidity",
+						Value: "100",
+						Created: 11,
+						Device: "test device name1",
+						Id: "12",
+						Modified: 13,
+						Origin: 14,
+						Pushed: 15,
+					},
+					{
+						Name: "temperature",
+						Value: "50",
+						Created: 21,
+						Device: "test device name2",
+						Id: "22",
+						Modified: 23,
+						Origin: 24,
+						Pushed: 25,
+					},
+				},
+			},
+			error:     "",
+		},
+
+		{
+			input: `[
+						{"meta":{
+							"correlationid":"","created":1,"device":"demo","id":"","modified":2,"origin":3,"pushed":0,
+							"humidity":{"created":11,"device":"test device name1","id":"12","modified":13,"origin":14,"pushed":15},
+							"temperature":{"created":21,"device":"test device name2","id":"22","modified":23,"origin":24,"pushed":25}
+							}
+						},
+						{"h1":100}
+					]`,
+			expected: &models.Event{
+				ID:       "",
+				Pushed:   0,
+				Device:   "demo",
+				Created:  1,
+				Modified: 2,
+				Origin:   3,
+				Readings: []models.Reading{
+					{
+						Name: "h1",
+						Value: "100",
+						Created: 0,
+						Device: "",
+						Id: "",
+						Modified: 0,
+						Origin: 0,
+						Pushed: 0,
+					},
+				},
+			},
+			error:     "",
+		},
+
+		{
+			input: `[
+						{"meta": 50},
+						{"h1":100}
+					]`,
+			expected: &models.Event{
+				ID:       "",
+				Pushed:   0,
+				Device:   "",
+				Created:  0,
+				Modified: 0,
+				Origin:   0,
+				Readings: []models.Reading{
+					{
+						Name: "h1",
+						Value: "100",
+						Created: 0,
+						Device: "",
+						Id: "",
+						Modified: 0,
+						Origin: 0,
+						Pushed: 0,
+					},
+				},
+			},
+			error:     "",
+		},
+
+		{
+			input: `[
+						{"meta1": 50},
+						{"h1":100}
+					]`,
+			expected: &models.Event{
+				ID:       "",
+				Pushed:   0,
+				Device:   "",
+				Created:  0,
+				Modified: 0,
+				Origin:   0,
+				Readings: []models.Reading{
+					{
+						Name: "meta1",
+						Value: "50",
+						Created: 0,
+						Device: "",
+						Id: "",
+						Modified: 0,
+						Origin: 0,
+						Pushed: 0,
+					},
+					{
+						Name: "h1",
+						Value: "100",
+						Created: 0,
+						Device: "",
+						Id: "",
+						Modified: 0,
+						Origin: 0,
+						Pushed: 0,
+					},
+				},
+			},
+			error:     "",
+		},
+
+		{
+			input: `[]`,
+			deviceName: "kuiper",
+			expected: &models.Event{
+				ID:       "",
+				Pushed:   0,
+				Device:   "kuiper",
+				Created:  0,
+				Modified: 0,
+				Origin:   0,
+				Readings: nil,
+			},
+			error:     "",
+		},
+	}
+
+	fmt.Printf("The test bucket size is %d.\n\n", len(tests))
+	for i, t := range tests {
+		ems := EdgexMsgBusSink{deviceName: t.deviceName, metadata: "meta"}
+		result, err := ems.produceEvents([]byte(t.input))
+
+		if !reflect.DeepEqual(t.error, errstring(err)) {
+			t1.Errorf("%d. %q: error mismatch:\n  exp=%s\n  got=%s\n\n", i, t.input, t.error, err)
+		} else if t.error == "" && !reflect.DeepEqual(t.expected, result) {
+			t1.Errorf("%d. %q\n\nresult mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, t.input, t.expected, result)
+		}
+	}
+}
+
+// errstring returns the string representation of an error.
+func errstring(err error) string {
+	if err != nil {
+		return err.Error()
+	}
+	return ""
+}

+ 11 - 1
xstream/sinks/mqtt_sink.go

@@ -20,6 +20,8 @@ type MQTTSink struct {
 	certPath string
 	pkeyPath string
 
+	insecureSkipVerify bool
+
 	conn MQTT.Client
 }
 
@@ -86,6 +88,13 @@ func (ms *MQTTSink) Configure(ps map[string]interface{}) error {
 		}
 	}
 
+	insecureSkipVerify := false
+	if pk, ok := ps["insecureSkipVerify"]; ok {
+		if v, ok := pk.(bool); ok {
+			insecureSkipVerify = v
+		}
+	}
+
 	ms.srv = srv.(string)
 	ms.tpc = tpc.(string)
 	ms.clientid = clientid.(string)
@@ -94,6 +103,7 @@ func (ms *MQTTSink) Configure(ps map[string]interface{}) error {
 	ms.password = password
 	ms.certPath = certPath
 	ms.pkeyPath = pKeyPath
+	ms.insecureSkipVerify = insecureSkipVerify
 
 	return nil
 }
@@ -110,7 +120,7 @@ func (ms *MQTTSink) Open(ctx api.StreamContext) error {
 				if cer, err2 := tls.LoadX509KeyPair(cp, kp); err2 != nil {
 					return err2
 				} else {
-					opts.SetTLSConfig(&tls.Config{Certificates: []tls.Certificate{cer}})
+					opts.SetTLSConfig(&tls.Config{Certificates: []tls.Certificate{cer}, InsecureSkipVerify: ms.insecureSkipVerify})
 				}
 			} else {
 				return err1

+ 3 - 3
xstream/sinks/rest_sink.go

@@ -280,13 +280,13 @@ func (ms *RestSink) send(v interface{}, logger api.Logger) error {
 	}
 	logger.Debugf("do request: %s %s with %s", ms.method, ms.url, req.Body)
 	resp, err := ms.client.Do(req)
-	if resp.StatusCode < 200 || resp.StatusCode > 299 {
-		return fmt.Errorf("rest sink fails to err http return code: %d.", resp.StatusCode)
-	}
 	if err != nil {
 		return fmt.Errorf("rest sink fails to send out the data")
 	} else {
 		logger.Debugf("rest sink got response %v", resp)
+		if resp.StatusCode < 200 || resp.StatusCode > 299 {
+			return fmt.Errorf("rest sink fails to err http return code: %d.", resp.StatusCode)
+		}
 	}
 	return nil
 }

+ 9 - 0
xstream/util_test.go

@@ -36,3 +36,12 @@ func TestConf(t *testing.T) {
 	}
 
 }
+
+func TestConf2(t *testing.T) {
+	var file = "test/testconf.json"
+
+	if v, e := GetConfAsString(file, "conf_string"); e != nil || (v != "test") {
+		t.Errorf("Expect %s, actual %s; error is %s. \n", "test", v, e)
+	}
+
+}