浏览代码

Merge branch 'develop'

RockyJin 5 年之前
父节点
当前提交
99bbb725b9
共有 98 个文件被更改,包括 2102 次插入1358 次删除
  1. 11 11
      Makefile
  2. 4 4
      README.md
  3. 18 14
      common/util.go
  4. 0 10
      docs/cli/overview.md
  5. 二进制
      docs/cli/resources/arch.png
  6. 0 0
      docs/en_US/JSON_Expressions.pptx
  7. 10 0
      docs/en_US/cli/overview.md
  8. 二进制
      docs/en_US/cli/resources/arch.png
  9. 1 1
      docs/cli/rules.md
  10. 5 5
      docs/cli/streams.md
  11. 14 0
      docs/en_US/extension/overview.md
  12. 15 15
      docs/getting_started.md
  13. 12 0
      docs/en_US/index.md
  14. 0 0
      docs/en_US/json_expr.md
  15. 1 1
      docs/operation/configuration_file.md
  16. 19 0
      docs/en_US/operation/install/cent-os.md
  17. 5 5
      docs/operation/install/overview.md
  18. 12 0
      docs/en_US/operation/operations.md
  19. 6 0
      docs/en_US/operation/overview.md
  20. 0 0
      docs/en_US/resources/stream_storage.png
  21. 3 3
      docs/rules/overview.md
  22. 1 1
      docs/rules/sinks/logs.md
  23. 25 0
      docs/en_US/rules/sinks/mqtt.md
  24. 2 2
      docs/rules/sources/mqtt.md
  25. 9 7
      docs/sqls/built-in_functions.md
  26. 1 1
      docs/sqls/data_types.md
  27. 0 0
      docs/en_US/sqls/json_expr.md
  28. 8 0
      docs/en_US/sqls/overview.md
  29. 5 5
      docs/sqls/query_language_elements.md
  30. 0 0
      docs/en_US/sqls/resources/hoppingWindow.png
  31. 0 0
      docs/en_US/sqls/resources/sessionWindow.png
  32. 0 0
      docs/en_US/sqls/resources/slidingWindow.png
  33. 0 0
      docs/en_US/sqls/resources/stream_storage.png
  34. 0 0
      docs/en_US/sqls/resources/tumblingWindow.png
  35. 3 3
      docs/sqls/streams.md
  36. 2 2
      docs/sqls/windows.md
  37. 0 0
      docs/en_US/streaming_class_diagram.pdf
  38. 7 7
      docs/streams.md
  39. 2 2
      docs/tutorial.md
  40. 0 14
      docs/extension/overview.md
  41. 0 20
      docs/operation/install/cent-os.md
  42. 0 12
      docs/operation/operations.md
  43. 0 6
      docs/operation/overview.md
  44. 0 10
      docs/rules/sinks/mqtt.md
  45. 0 8
      docs/sqls/overview.md
  46. 0 0
      docs/zh_CN/index.md
  47. 0 0
      etc/kuiper.yaml
  48. 76 5
      xsql/ast.go
  49. 37 7
      xsql/funcs_ast_validator.go
  50. 51 21
      xsql/funcs_ast_validator_test.go
  51. 1 1
      xsql/funcs_math.go
  52. 7 0
      xsql/funcs_misc.go
  53. 9 0
      xsql/funcs_str.go
  54. 2 2
      xsql/functions.go
  55. 5 5
      xsql/lexical.go
  56. 30 0
      xsql/metadata_util.go
  57. 10 6
      xsql/parser.go
  58. 498 486
      xsql/parser_test.go
  59. 3 4
      xsql/plans/aggregate_operator.go
  60. 3 4
      xsql/plans/filter_operator.go
  61. 83 0
      xsql/plans/having_operator.go
  62. 156 0
      xsql/plans/having_test.go
  63. 4 4
      xsql/plans/join_operator.go
  64. 33 0
      xsql/plans/join_test.go
  65. 92 1
      xsql/plans/misc_func_test.go
  66. 3 5
      xsql/plans/order_operator.go
  67. 18 5
      xsql/plans/preprocessor.go
  68. 30 24
      xsql/plans/project_operator.go
  69. 64 0
      xsql/plans/str_func_test.go
  70. 33 24
      xsql/processors/xsql_processor.go
  71. 22 21
      xsql/processors/xsql_processor_test.go
  72. 33 0
      xsql/sql_validator.go
  73. 1 1
      xsql/util.go
  74. 1 1
      xsql/xsql_manager.go
  75. 1 1
      xsql/xsql_parser_tree_test.go
  76. 61 0
      xstream/api/stream.go
  77. 5 6
      xstream/cli/main.go
  78. 15 37
      xstream/collectors/func.go
  79. 84 0
      xstream/contexts/default.go
  80. 1 1
      xstream/demo/func_visitor.go
  81. 0 63
      xstream/demo/test.go
  82. 0 92
      xstream/demo/testWindow.go
  83. 79 83
      xstream/extensions/mqtt_source.go
  84. 5 5
      xstream/funcs.go
  85. 21 0
      xstream/nodes/common_func.go
  86. 56 0
      xstream/nodes/sink_node.go
  87. 69 0
      xstream/nodes/source_node.go
  88. 20 44
      xstream/operators/operations.go
  89. 13 14
      xstream/operators/watermark.go
  90. 19 23
      xstream/operators/window_op.go
  91. 4 3
      xstream/server/main.go
  92. 7 8
      xstream/sinks/log_sink.go
  93. 69 45
      xstream/sinks/mqtt_sink.go
  94. 30 32
      xstream/streams.go
  95. 20 32
      xstream/test/mock_sink.go
  96. 10 26
      xstream/test/mock_source.go
  97. 0 40
      xstream/types.go
  98. 7 7
      xstream/util_test.go

+ 11 - 11
Makefile

@@ -9,13 +9,13 @@ GOARCH ?= ""
 
 .PHONY: build
 build:
-	@mkdir -p $(BUILD_PATH)/engine/bin
-	@mkdir -p $(BUILD_PATH)/engine/etc
-	@mkdir -p $(BUILD_PATH)/engine/data
-	@mkdir -p $(BUILD_PATH)/engine/plugins
-	@mkdir -p $(BUILD_PATH)/engine/log
+	@mkdir -p $(BUILD_PATH)/kuiper/bin
+	@mkdir -p $(BUILD_PATH)/kuiper/etc
+	@mkdir -p $(BUILD_PATH)/kuiper/data
+	@mkdir -p $(BUILD_PATH)/kuiper/plugins
+	@mkdir -p $(BUILD_PATH)/kuiper/log
 
-	@cp -r etc/* $(BUILD_PATH)/engine/etc
+	@cp -r etc/* $(BUILD_PATH)/kuiper/etc
 
 	@if [ ! -z $(GOOS) ] && [ ! -z $(GOARCH) ];then \
 		GO111MODULE=on GOPROXY=https://goproxy.io GOOS=$(GOOS) $(GOARCH)=$(GOARCH) CGO_ENABLED=0 go build -ldflags="-s -w" -o cli xstream/cli/main.go; \
@@ -25,20 +25,20 @@ build:
 		GO111MODULE=on GOPROXY=https://goproxy.io CGO_ENABLED=0 go build -ldflags="-s -w" -o server xstream/server/main.go; \
 	fi
 	@if [ ! -z $$(which upx) ]; then upx ./cli; upx ./server; fi
-	@mv ./cli ./server $(BUILD_PATH)/engine/bin
+	@mv ./cli ./server $(BUILD_PATH)/kuiper/bin
 	@echo "Build successfully"
 
 .PHONY: pkg
 pkg: build
 	@mkdir -p $(PACKAGES_PATH)
 	@if [ ! -z $(GOOS) ] && [ ! -z $(GOARCH) ];then \
-		package_name=engine_$(GOARCH); \
+		package_name=kuiper_$(GOARCH); \
 	else \
-		package_name=engine; \
+		package_name=kuiper; \
 	fi; \
 	cd $(BUILD_PATH); \
-	zip -rq $${package_name}.zip engine; \
-	tar -czf $${package_name}.tar.gz engine; \
+	zip -rq $${package_name}.zip kuiper; \
+	tar -czf $${package_name}.tar.gz kuiper; \
 	mv $${package_name}.zip $${package_name}.tar.gz ../$(PACKAGES_PATH)
 	@echo "Package build success"
 

+ 4 - 4
README.md

@@ -1,8 +1,8 @@
-# Rule Engine for Edge
+# A lightweight IoT edge analytic software
 
 ## Highlight
 
-A SQL based lightweight IoT streaming rule engine running at resource constrained edge devices.
+A SQL based lightweight IoT analytics/streaming software running at resource constrained edge devices.
 - Native run with small overhead ( ~7MB package), support Linux/Windows/Mac OS
 - SQL based, easy to use
 - Built-in support for MQTT source
@@ -11,8 +11,8 @@ A SQL based lightweight IoT streaming rule engine running at resource constraine
 
 ## Document
 
-- [Getting started](docs/getting_started.md)
-- [Reference guide](docs/index.md)
+- [Getting started](docs/en_US/getting_started.md)
+- [Reference guide](docs/en_US/index.md)
 
 ## Build from source code
 

+ 18 - 14
common/util.go

@@ -2,19 +2,18 @@ package common
 
 import (
 	"bytes"
-	"context"
 	"fmt"
 	"github.com/dgraph-io/badger"
 	"github.com/go-yaml/yaml"
 	"github.com/sirupsen/logrus"
 	"io/ioutil"
 	"os"
+	"path"
 	"path/filepath"
 )
 
 const (
 	logFileName = "stream.log"
-	LoggerKey = "logger"
 	etc_dir = "/etc/"
 	data_dir = "/data/"
 	log_dir = "/log/"
@@ -50,16 +49,6 @@ func (l *logRedirect) Debugf(f string, v ...interface{}) {
 	Log.Debug(fmt.Sprintf(f, v...))
 }
 
-func GetLogger(ctx context.Context) *logrus.Entry {
-	if ctx != nil{
-		l, ok := ctx.Value(LoggerKey).(*logrus.Entry)
-		if l != nil && ok {
-			return l
-		}
-	}
-	return Log.WithField("caller", "default")
-}
-
 func LoadConf(confName string) []byte {
 	confDir, err := GetConfLoc()
 	if err != nil {
@@ -79,7 +68,7 @@ type XStreamConf struct {
 	Port int `yaml:"port"`
 }
 
-var StreamConf = "xstream.yaml"
+var StreamConf = "kuiper.yaml"
 
 func init(){
 	Log = logrus.New()
@@ -94,7 +83,7 @@ func init(){
 	}
 
 	if c, ok := cfg["basic"]; !ok{
-		Log.Fatal("no basic config in xstream.yaml")
+		Log.Fatal("no basic config in kuiper.yaml")
 	}else{
 		Config = &c
 	}
@@ -241,6 +230,21 @@ func GetLoc(subdir string)(string, error) {
 	return "", fmt.Errorf("conf dir not found")
 }
 
+func GetAndCreateDataLoc(dir string) (string, error) {
+	dataDir, err := GetDataLoc()
+	if err != nil {
+		return "", err
+	}
+	d := path.Join(path.Dir(dataDir), dir)
+	if _, err := os.Stat(d); os.IsNotExist(err) {
+		err = os.MkdirAll(d, 0755)
+		if err != nil {
+			return "", err
+		}
+	}
+	return d, nil
+}
+
 //Time related. For Mock
 func GetTicker(duration int) Ticker {
 	if IsTesting{

+ 0 - 10
docs/cli/overview.md

@@ -1,10 +0,0 @@
-The XStream CLI (command line interface) tools provides streams and rules management. 
-
-The XStream CLI acts as a client to the XStream server. The XStream server runs the engine that executes the stream or rule queries. This includes processing stream or rule definitions, manage rule status and io.
-
-*XStream CLI Architecture*
-![CLI Arch](resources/arch.png)
-
-- [Streams](streams.md)
-- [Rules](rules.md)
-

二进制
docs/cli/resources/arch.png


docs/JSON_Expressions.pptx → docs/en_US/JSON_Expressions.pptx


+ 10 - 0
docs/en_US/cli/overview.md

@@ -0,0 +1,10 @@
+The Kuiper CLI (command line interface) tools provides streams and rules management. 
+
+The Kuiper CLI acts as a client to the Kuiper server. The Kuiper server runs the engine that executes the stream or rule queries. This includes processing stream or rule definitions, manage rule status and io.
+
+*Kuiper CLI Architecture*
+![CLI Arch](resources/arch.png)
+
+- [Streams](streams.md)
+- [Rules](rules.md)
+

二进制
docs/en_US/cli/resources/arch.png


+ 1 - 1
docs/cli/rules.md

@@ -1,6 +1,6 @@
 # Rules management
 
-The XStream rule command line tools allows you to manage rules, such as create, show, drop, describe, start, stop and restart rules. 
+The Kuiper rule command line tools allows you to manage rules, such as create, show, drop, describe, start, stop and restart rules. 
 
 ## create a rule
 

+ 5 - 5
docs/cli/streams.md

@@ -1,6 +1,6 @@
 # Streams management
 
-The XStream stream command line tools allows you to manage the streams, such as create, describe, show and drop stream definitions.
+The Kuiper stream command line tools allows you to manage the streams, such as create, describe, show and drop stream definitions.
 
 ## create a stream
 
@@ -100,19 +100,19 @@ Sample:
 
 ```shell
 # bin/cli query
-xstream > 
+kuiper > 
 ```
 
-After typing ``query`` sub-command, it prompts ``xstream > ``, then type SQLs (see [XStream SQL reference](../sqls/overview.md) for how to use XStream SQL) in the command prompt and press enter. 
+After typing ``query`` sub-command, it prompts ``kuiper > ``, then type SQLs (see [Kuiper SQL reference](../sqls/overview.md) for how to use Kuiper SQL) in the command prompt and press enter. 
 
 The results will be print in the console.
 
 ```shell
-xstream > SELECT * FROM my_stream WHERE id > 10;
+kuiper > SELECT * FROM my_stream WHERE id > 10;
 [{"...":"..." ....}]
 ...
 ```
 - Press ``CTRL + C`` to stop the query; 
 
-- If no SQL are type, you can type ``quit`` or ``exit`` to quit the ``xstream`` prompt console.
+- If no SQL are type, you can type ``quit`` or ``exit`` to quit the ``kuiper`` prompt console.
 

+ 14 - 0
docs/en_US/extension/overview.md

@@ -0,0 +1,14 @@
+# Extensions
+
+Kuiper allows user to customize the different kinds of extensions.  
+
+- The source extension is used for extending different stream source, such as consuming data from other message brokers. Kuiper has built-in source support for [MQTT broker](../rules/sources/mqtt.md).
+- Sink/Action extension is used for extending pub/push data to different targets, such as database, other message system, web interfaces or file systems. Built-in action support in Kuiper, see [MQTT](../rules/sinks/mqtt.md) & [log files](../rules/sinks/logs.md).
+- Functions extension allows user to extend different functions that used in SQL. Built-in functions supported in Kuiper, see [functions](../sqls/built-in_functions.md).
+
+Please read below for how to realize the different extensions.
+
+- [Source extension](#)
+- [Sink/Action extension](#)
+- [Functions extension](#)
+

+ 15 - 15
docs/getting_started.md

@@ -2,14 +2,14 @@
 
 ## Download & install
 
-Download the latest release from https://github.com/emqx/edge-rule-engine/releases, and unzip file.
+Download the latest release from https://github.com/emqx/kuiper/releases, and unzip file.
 
 ## Directory structure 
 
-Below is the installation directory structure after installing xstream. 
+Below is the installation directory structure after installing Kuiper. 
 
 ```
-xstream_installed_dir
+kuiper_installed_dir
   bin
     server
     cli
@@ -26,23 +26,23 @@ xstream_installed_dir
 
 ## Run the first rule stream
 
-XStream rule is composed by a SQL and multiple actions. XStream SQL is an easy to use SQL-like language to specify the logic of the rule stream. By providing the rule through CLI, a rule stream will be created in the rule engine and run continuously. The user can then manage the rules through CLI.
+Kuiper rule is composed by a SQL and multiple actions. Kuiper SQL is an easy to use SQL-like language to specify the logic of the rule stream. By providing the rule through CLI, a rule stream will be created in the rule engine and run continuously. The user can then manage the rules through CLI.
 
-XStream has a lot of built-in functions and extensions available for complex analysis, and you can find more information about the grammer and its functions from the [XStream SQL reference](sqls/overview.md).
+Kuiper has a lot of built-in functions and extensions available for complex analysis, and you can find more information about the grammer and its functions from the [Kuiper SQL reference](sqls/overview.md).
 
-Let's consider a sample scenario where we are receiving temperature and humidity record from a sensor through MQTT service and we want to issue an alert when the temperature is bigger than 30 degrees celcius in a time window. We can write a XStream rule for the above scenario using the following several steps.
+Let's consider a sample scenario where we are receiving temperature and humidity record from a sensor through MQTT service and we want to issue an alert when the temperature is bigger than 30 degrees celcius in a time window. We can write a Kuiper rule for the above scenario using the following several steps.
 
 ### Prerequisite
 
-We assume there is already a MQTT broker as the data source of XStream server. If you don't have one, EMQX is recommended. Please follow the [EMQ Installation Guide](https://docs.emqx.io/broker/v3/en/install.html) to setup a mqtt broker.
+We assume there is already a MQTT broker as the data source of Kuiper server. If you don't have one, EMQX is recommended. Please follow the [EMQ Installation Guide](https://docs.emqx.io/broker/v3/en/install.html) to setup a mqtt broker.
 
-### Start the XStream Engine Server
+### Start the Kuiper Engine Server
 
-Run bin/server to start the XStream Enginer Server
+Run bin/server to start the Kuiper Server
 ```sh
 $ bin/server
 ```
-You should see a succesul message `Serving Rule server on port 20498` 
+You should see a succesul message `Serving Kuiper server on port 20498` 
 
 ### Defining the input stream
 
@@ -65,17 +65,17 @@ You can use command ``cli show streams`` to see if the ``demo`` stream was creat
 
 ### Testing the stream through query tool
 
-Now the stream is created, it can be tested from ``cli query`` command. The ``xstream`` prompt is displayed as below after typing ``cli query``.
+Now the stream is created, it can be tested from ``cli query`` command. The ``kuiper`` prompt is displayed as below after typing ``cli query``.
 
 ```sh
 $ bin/cli query
-xstream > 
+kuiper > 
 ```
 
-In the ``xstream`` prompt, you can type SQL and validate the SQL against the stream.
+In the ``kuiper`` prompt, you can type SQL and validate the SQL against the stream.
 
 ```sh
-xstream > select count(*), avg(humidity) as avg_hum, max(humidity) as max_hum from demo where temperature > 30 group by TUMBLINGWINDOW(ss, 5);
+kuiper > select count(*), avg(humidity) as avg_hum, max(humidity) as max_hum from demo where temperature > 30 group by TUMBLINGWINDOW(ss, 5);
 
 query is submit successfully.
 ```
@@ -83,7 +83,7 @@ query is submit successfully.
 Now if any data are publish to the MQTT server available at ``tcp://127.0.0.1:1883``, then it prints message as following.
 
 ```
-xstream > [{"avg_hum":41,"count":4,"max_hum":91}]
+kuiper > [{"avg_hum":41,"count":4,"max_hum":91}]
 [{"avg_hum":62,"count":5,"max_hum":96}]
 [{"avg_hum":36,"count":3,"max_hum":63}]
 [{"avg_hum":48,"count":3,"max_hum":71}]

+ 12 - 0
docs/en_US/index.md

@@ -0,0 +1,12 @@
+
+
+
+
+Refer to the following topics for guidance on using the Kuiper.
+
+- [Install and operation](operation/overview.md)
+- [Command line interface tools - CLI](cli/overview.md)
+- [Kuiper SQL reference](sqls/overview.md)
+- [Rules](rules/overview.md)
+- [Extend Kuiper](extension/overview.md)
+

docs/json_expr.md → docs/en_US/json_expr.md


+ 1 - 1
docs/operation/configuration_file.md

@@ -1,5 +1,5 @@
 # Basic configurations
-The configuration file for XStream is at ``$xstream/etc/xstream.yaml``. The configuration file is yaml format.
+The configuration file for Kuiper is at ``$kuiper/etc/kuiper.yaml``. The configuration file is yaml format.
 
 ## Log level
 

+ 19 - 0
docs/en_US/operation/install/cent-os.md

@@ -0,0 +1,19 @@
+# CentOS
+
+This document describes how to install on CentOS.
+
+## Install from zip
+
+Unzip the installation package.
+
+``unzip kuiper-centos7-v0.0.1.zip``
+
+Run the ``cli`` to verify Kuiper is installed successfully or not.
+
+```shell
+# cd kuiper
+# bin/cli --version
+kuiper version 0.0.1
+```
+
+If it can print the version, then Kuiper is installed successfully. 

+ 5 - 5
docs/operation/install/overview.md

@@ -26,15 +26,15 @@ The ``bin`` directory includes all of executable files. Such as ``cli`` command.
 
 ## etc
 
-The ``etc`` directory contains the configuration files of XStream. Such as MQTT source configurations etc.
+The ``etc`` directory contains the configuration files of Kuiper. Such as MQTT source configurations etc.
 
 ## data
 
-XStream persistences all the definitions of streams and rules, and all of message will be stored in this folder  for long duration operations.
+Kuiper persistences all the definitions of streams and rules, and all of message will be stored in this folder  for long duration operations.
 
 ## plugins
 
-XStream allows users to develop your own plugins, and put these plugins into this folder.  See [extension](../../extension/overview.md) for more info for how to extend the XStream.
+Kuiper allows users to develop your own plugins, and put these plugins into this folder.  See [extension](../../extension/overview.md) for more info for how to extend the Kuiper.
 
 ## log
 
@@ -42,6 +42,6 @@ All of the log files are under this folder. The default log file name is ``strea
 
 # Next steps
 
-- See [getting started](../../getting_started.md) for your first XStream experience.
-- See [CLI tools](../../cli/overview.md) for usage of XStream CLI tools.
+- See [getting started](../../getting_started.md) for your first Kuiper experience.
+- See [CLI tools](../../cli/overview.md) for usage of Kuiper CLI tools.
 

+ 12 - 0
docs/en_US/operation/operations.md

@@ -0,0 +1,12 @@
+# Configuration
+
+- [Kuiper basic configuration](configuration_file.md)
+- [MQTT source configuration](../rules/sources/mqtt.md)
+
+# Restful APIs
+
+Kuiper provides some RESTful management APIs.
+
+
+
+

+ 6 - 0
docs/en_US/operation/overview.md

@@ -0,0 +1,6 @@
+
+Kuiper is developed by Golang, and it can be run at different operating systems. See below docs for how to install and operating Kuiper.
+
+- [Install instruction](install/overview.md)
+- [Operation guide](operations.md)
+

docs/sqls/resources/stream_storage.png → docs/en_US/resources/stream_storage.png


+ 3 - 3
docs/rules/overview.md

@@ -33,14 +33,14 @@ The following 3 parameters are required for creating a rule.
 
 ## id
 
-The identification of the rule. The rule name cannot be duplicated in the same XStream instance.
+The identification of the rule. The rule name cannot be duplicated in the same Kuiper instance.
 
 ## sql
 
 The sql query to run for the rule. 
 
-- XStream provides embeded support MQTT source, see  [MQTT source stream](sources/mqtt.md) for more detailed info.
-- See [SQL](../sqls/overview.md) for more info of XStream SQL.
+- Kuiper provides embeded support MQTT source, see  [MQTT source stream](sources/mqtt.md) for more detailed info.
+- See [SQL](../sqls/overview.md) for more info of Kuiper SQL.
 - Sources can be customized, see [extension](../extension/overview.md) for more detailed info.
 
 ### actions

+ 1 - 1
docs/rules/sinks/logs.md

@@ -1,6 +1,6 @@
 # Log action
 
-The action is used for print output message into log file, the log file is at  `` $xstream_install/log/stream.log`` by default.
+The action is used for print output message into log file, the log file is at  `` $kuiper_install/log/stream.log`` by default.
 
 No properties can be specified for the action.
 

+ 25 - 0
docs/en_US/rules/sinks/mqtt.md

@@ -0,0 +1,25 @@
+# MQTT action
+
+The action is used for publish output message into a MQTT server. 
+
+| Property name    | Optional | Description                                                  |
+| ---------------- | -------- | ------------------------------------------------------------ |
+| server           | false    | The broker address of the mqtt server, such as ``tcp://127.0.0.1:1883`` |
+| topic            | false    | The mqtt topic, such as ``analysis/result``                  |
+| clientId         | true     | The client id for mqtt connection. If not specified, an uuid will be used |
+| protocol_version | true     | 3.1 (also refer as MQTT 3) or 3.1.1 (also refer as MQTT 4).  If not specified, the default value is 3.1. |
+| username         | true     | The user name for the connection.                        |
+| password         | true     | The password for the connection.                             |
+
+Below is one of the sample configuration.
+```json
+{
+  "mqtt": {
+  	"server": "tcp://sink_server:1883",
+  	"topic": "demoSink",
+  	"clientId": "client_id_1",
+    "protocol_version": "3.1.1"
+  }
+}
+```
+

+ 2 - 2
docs/rules/sources/mqtt.md

@@ -1,6 +1,6 @@
 # MQTT source 
 
-XStream provides built-in support for MQTT source stream, which can subscribe the message from MQTT broker and feed into the XStream processing pipeline.  The configuration file of MQTT source is at ``$xstream/etc/mqtt_source.yaml``. Below is the file format.
+Kuiper provides built-in support for MQTT source stream, which can subscribe the message from MQTT broker and feed into the Kuiper processing pipeline.  The configuration file of MQTT source is at ``$kuiper/etc/mqtt_source.yaml``. Below is the file format.
 
 ```yaml
 #Global MQTT configurations
@@ -27,7 +27,7 @@ The default subscription QoS level.
 
 ### sharedsubscription
 
-Whether use the shared subscription mode or not. If using the shared subscription mode, then if there are multiple XStream process can be load balanced.
+Whether use the shared subscription mode or not. If using the shared subscription mode, then if there are multiple Kuiper process can be load balanced.
 
 ### servers
 

+ 9 - 7
docs/sqls/built-in_functions.md

@@ -1,6 +1,6 @@
 # Functions
 
-XStream has many built-in functions for performing calculations on data.
+Kuiper has many built-in functions for performing calculations on data.
 
 ## Aggregate Functions
 Aggregate functions perform a calculation on a set of values and return a single value. Aggregate functions can be used as expressions only in the following:
@@ -56,7 +56,7 @@ Aggregate functions perform a calculation on a set of values and return a single
 | lower    | lower(col1) | Returns the lowercase version of the given String.                                                                         |
 | lpad     | lpad(col1, 2) | Returns the String argument, padded on the left side with the number of spaces specified by the second argument.         |
 | ltrim    | ltrim(col1) | Removes all leading whitespace (tabs and spaces) from the provided String.                                                |
-| numbytes | numbytes(col1) | Returns the number of bytes in the UTF-8 encoding of the provided string.                                         | 
+| numbytes | numbytes(col1) | Returns the number of bytes in the UTF-8 encoding of the provided string.                                         |
 | regexp_matches| regexp_matches(col1, regex) | Returns true if the string (first argument) contains a match for the regular expression.            |
 | regexp_replace| regexp_matches(col1, regex, str) | Replaces all occurrences of the second argument (regular expression) in the first argument with the third argument.                                                          |
 | regexp_substr| regexp_substr(col1, regex) | Finds the first match of the 2nd parameter (regex) in the first parameter.                            |
@@ -64,6 +64,7 @@ Aggregate functions perform a calculation on a set of values and return a single
 | rtrim    | rtrim(col1) | Removes all trailing whitespace (tabs and spaces) from the provided String.                                                |
 | substring| substring(col1, start, end) |  returns the substring of the provided String from the provided Int index (0-based, inclusive) to the end of the String.                                                           |
 | startswith| startswith(col1, str) | Returns Boolean, whether the first string argument starts with the second string argument.                  |
+| split_value | split_value(col1, str_splitter, index) | Split the value of the 1st parameter with the 2nd parameter, and return the value of split array that indexed with the 3rd parameter.<br />``split_value("/test/device001/message","/",0) AS a``, the returned value of function is empty; <br />``split_value("/test/device001/message","/",3) AS a``, the returned value of function is ``message``; |
 | trim      | trim(col1) | Removes all leading and trailing whitespace (tabs and spaces) from the provided String.                                    |
 | upper     | upper(col1)| Returns the uppercase version of the given String.|
 
@@ -86,8 +87,9 @@ Aggregate functions perform a calculation on a set of values and return a single
 | sha512   | sha512(col1)| Hashed value of the argument                   |
 
 ## Other Functions
-| Function | Example     | Description                                    |
-| -------- | ----------- | ---------------------------------------------- |
-| isNull   | isNull(col1)| Returns true if the argument is the Null value.|
-| newuuid  | newuuid()   | Returns a random 16-byte UUID.                 |
-| timestamp| timestamp() | Returns the current timestamp in milliseconds from 00:00:00 Coordinated Universal Time (UTC), Thursday, 1 January 1970 |
+| Function  | Example      | Description                                                  |
+| --------- | ------------ | ------------------------------------------------------------ |
+| isNull    | isNull(col1) | Returns true if the argument is the Null value.              |
+| newuuid   | newuuid()    | Returns a random 16-byte UUID.                               |
+| timestamp | timestamp()  | Returns the current timestamp in milliseconds from 00:00:00 Coordinated Universal Time (UTC), Thursday, 1 January 1970 |
+| mqtt      | mqtt(topic)  | Returns the MQTT meta-data of specified key. The current supported keys<br />- topic: return the topic of message.  If there are multiple stream source, then specify the source name in parameter. Such as ``mqtt(src1.topic)``<br />- messageid: return the message id of message. If there are multiple stream source, then specify the source name in parameter. Such as ``mqtt(src2.messageid)`` |

+ 1 - 1
docs/sqls/data_types.md

@@ -1,6 +1,6 @@
 # Data types
 
-In XStream, each column or an expression has a related data type. A data type describes (and constrains) the set of values that a column of that type can hold or an expression of that type can produce.
+In Kuiper, each column or an expression has a related data type. A data type describes (and constrains) the set of values that a column of that type can hold or an expression of that type can produce.
 
 
 

docs/sqls/json_expr.md → docs/en_US/sqls/json_expr.md


+ 8 - 0
docs/en_US/sqls/overview.md

@@ -0,0 +1,8 @@
+Kuiper offers a SQL-like query language for performing transformations and computations over streams of events. This document describes the syntax, usage and best practices for the Kuiper query language. 
+
+- [Stream specifications](streams.md)
+
+- [Query languange element](query_language_elements.md)
+- [Windows](windows.md)
+- [Built-in functions](built-in_functions.md)
+

+ 5 - 5
docs/sqls/query_language_elements.md

@@ -1,11 +1,11 @@
 
 # Query language elements
 
-XStream provides a variety of elements for building queries. They are summarized below.
+Kuiper provides a variety of elements for building queries. They are summarized below.
 
 | Element               | Summary                                                      |
 | --------------------- | ------------------------------------------------------------ |
-| [SELECT](#SELECT)     | SELECT is used to retrieve rows from input streams and enables the selection of one or many columns from one or many input streams in XStream. |
+| [SELECT](#SELECT)     | SELECT is used to retrieve rows from input streams and enables the selection of one or many columns from one or many input streams in Kuiper. |
 | [FROM](#FROM)         | FROM specifies the input stream. The FROM clause is always required for any SELECT statement. |
 | [JOIN](#JOIN)         | JOIN is used to combine records from two or more input streams. JOIN includes LEFT, RIGHT, FULL & CROSS. |
 | [WHERE](#WHERE)       | WHERE specifies the search condition for the rows returned by the query. |
@@ -18,7 +18,7 @@ XStream provides a variety of elements for building queries. They are summarized
 
 ## SELECT
 
-Retrieves rows from input streams and enables the selection of one or many columns from one or many input streams in XStream.
+Retrieves rows from input streams and enables the selection of one or many columns from one or many input streams in Kuiper.
 
 ### Syntax
 
@@ -240,7 +240,7 @@ GROUP BY <group by spec>
 
 **<window_type>**
 
-Specifies any XStream supported Windowing, see [windows](windows.md) for more info.
+Specifies any Kuiper supported Windowing, see [windows](windows.md) for more info.
 
 **< column_expression >**
 
@@ -254,7 +254,7 @@ GROUP BY column_name
 
 ### HAVING
 
-Specifies a search condition for a group or an aggregate. HAVING can be used only with the SELECT expression. HAVING is typically used in a GROUP BY clause. When GROUP BY is not used, HAVING behaves like a WHERE clause.
+The HAVING clause was added to SQL because the WHERE keyword could not be used with aggregate functions. Specifies a search condition for a group or an aggregate. HAVING can be used only with the SELECT expression. HAVING is typically used in a GROUP BY clause. 
 
 #### Syntax
 

docs/sqls/resources/hoppingWindow.png → docs/en_US/sqls/resources/hoppingWindow.png


docs/sqls/resources/sessionWindow.png → docs/en_US/sqls/resources/sessionWindow.png


docs/sqls/resources/slidingWindow.png → docs/en_US/sqls/resources/slidingWindow.png


docs/resources/stream_storage.png → docs/en_US/sqls/resources/stream_storage.png


docs/sqls/resources/tumblingWindow.png → docs/en_US/sqls/resources/tumblingWindow.png


+ 3 - 3
docs/sqls/streams.md

@@ -2,7 +2,7 @@
 
 ## Data types
 
-In XStream, each column or an expression has a related data type. A data type describes (and constrains) the set of values that a column of that type can hold or an expression of that type can produce.
+In Kuiper, each column or an expression has a related data type. A data type describes (and constrains) the set of values that a column of that type can hold or an expression of that type can produce.
 
 Below is the list of data types supported.
 
@@ -44,7 +44,7 @@ my_stream
 WITH ( datasource = "topic/temperature", FORMAT = "json", KEY = "id");
 ```
 
-The stream will subscribe to MQTT topic ``topic/temperature``, the server connection uses ``servers`` key of ``default`` section in configuration file ``$xstream/etc/mqtt_source.yaml``. 
+The stream will subscribe to MQTT topic ``topic/temperature``, the server connection uses ``servers`` key of ``default`` section in configuration file ``$kuiper/etc/mqtt_source.yaml``. 
 
 - See [MQTT source](../rules/sources/mqtt.md) for more info.
 
@@ -61,7 +61,7 @@ demo (
 	) WITH (datasource="test/", FORMAT="JSON", KEY="USERID", CONF_KEY="demo");
 ```
 
-The stream will subscribe to MQTT topic ``test/``, the server connection uses settings of ``demo`` section in configuration file ``$xstream/etc/mqtt_source.yaml``. 
+The stream will subscribe to MQTT topic ``test/``, the server connection uses settings of ``demo`` section in configuration file ``$kuiper/etc/mqtt_source.yaml``. 
 
 - See [MQTT source](../rules/sources/mqtt.md) for more info.
 

+ 2 - 2
docs/sqls/windows.md

@@ -1,8 +1,8 @@
 # Windows
 
-In time-streaming scenarios, performing operations on the data contained in temporal windows is a common pattern. XStream has native support for windowing functions, enabling you to author complex stream processing jobs with minimal effort.
+In time-streaming scenarios, performing operations on the data contained in temporal windows is a common pattern. Kuiper has native support for windowing functions, enabling you to author complex stream processing jobs with minimal effort.
 
-There are four kinds of windows to use: [Tumbling window](#TUMBLING WINDOW), [Hopping window](#Hopping window), [Sliding window](#Sliding window), and [Session window](#Session window). You use the window functions in the GROUP BY clause of the query syntax in your XStream queries. 
+There are four kinds of windows to use: [Tumbling window](#TUMBLING WINDOW), [Hopping window](#Hopping window), [Sliding window](#Sliding window), and [Session window](#Session window). You use the window functions in the GROUP BY clause of the query syntax in your Kuiper queries. 
 
 All the windowing operations output results at the end of the window. The output of the window will be single event based on the aggregate function used. 
 

docs/streaming_class_diagram.pdf → docs/en_US/streaming_class_diagram.pdf


+ 7 - 7
docs/streams.md

@@ -37,7 +37,7 @@ CREATE STREAM
 | KEY           | true     | It will be used in future for GROUP BY statements ??         |
 | TYPE     | false    | Is it requried in future if more & more sources are supported? By default, it would be MQTT type. |
 | StrictValidation     | false    | To control validation behavior of message field against stream schema. |
-| KEY_CONF | false | If additional configuration items are requied to be configured, then specify the config key here.<br />XStream currently propose yaml file format. |
+| KEY_CONF | false | If additional configuration items are requied to be configured, then specify the config key here.<br />Kuiper currently propose yaml file format. |
 
 **Introduction for StrictValidation**
 
@@ -80,7 +80,7 @@ CREATE STREAM my_stream
 
 
 
-The configuration of MQTT source is specified with yaml format, and the configuration file location is at ``$xstream/etc/mqtt_source.yaml``.  Below is the file format.
+The configuration of MQTT source is specified with yaml format, and the configuration file location is at ``$kuiper/etc/mqtt_source.yaml``.  Below is the file format.
 
 ```yaml
 #Global MQTT configurations
@@ -150,16 +150,16 @@ my_stream, iot_stream
 
 ### A simple CLI
 
-A simple command line tool is implemented in ``stream/cli/main.go``. To build the command line tool, run command ``go install -x engine/xstream/cli``.
+A simple command line tool is implemented in ``stream/cli/main.go``. To build the command line tool, run command ``go install -x engine/kuiper/cli``.
 
 #### Run sql to manage streams
 
-Run `cli stream` command, after `xstream >` prompt shown, enter stream related sql statements such as create, drop, describe, explain and show stream statements to execute.
+Run `cli stream` command, after `kuiper >` prompt shown, enter stream related sql statements such as create, drop, describe, explain and show stream statements to execute.
 
 ```bash
 cli stream
-xstream > CREATE STREAM sname (count bigint) WITH (source="users", FORMAT="AVRO", KEY="USERID"
-xstream > DESCRIBE STREAM sname
+kuiper > CREATE STREAM sname (count bigint) WITH (source="users", FORMAT="AVRO", KEY="USERID"
+kuiper > DESCRIBE STREAM sname
 ...
 ```
 
@@ -168,7 +168,7 @@ xstream > DESCRIBE STREAM sname
 
 ```bash
 cli query
-xstream > select USERID from demo;
+kuiper > select USERID from demo;
 ...
 ```
 

+ 2 - 2
docs/tutorial.md

@@ -2,10 +2,10 @@
 
 ## Directory structure 
 
-Below is the installation directory structure after installing xstream. 
+Below is the installation directory structure after installing Kuiper. 
 
 ```
-xstream_installed_dir
+kuiper_installed_dir
   bin
     cli
   etc

+ 0 - 14
docs/extension/overview.md

@@ -1,14 +0,0 @@
-# Extensions
-
-XStream allows user to customize the different kinds of extensions.  
-
-- The source extension is used for extending different stream source, such as consuming data from other message brokers. XStream has built-in source support for [MQTT broker](../rules/sources/mqtt.md).
-- Sink/Action extension is used for extending pub/push data to different targets, such as database, other message system, web interfaces or file systems. Built-in action support in XStream, see [MQTT](../rules/sinks/mqtt.md) & [log files](../rules/sinks/logs.md).
-- Functions extension allows user to extend different functions that used in SQL. Built-in functions supported in XStream, see [functions](../sqls/built-in_functions.md).
-
-Please read below for how to realize the different extensions.
-
-- [Source extension](#)
-- [Sink/Action extension](#)
-- [Functions extension](#)
-

+ 0 - 20
docs/operation/install/cent-os.md

@@ -1,20 +0,0 @@
-# CentOS
-
-This document describes how to install on CentOS.
-
-## Install from zip
-
-Unzip the installation package.
-
-``unzip xstream-centos7-v0.0.1.zip``
-
-Run the ``cli`` to verify XStream is installed successfully or not.
-
-```shell
-# cd xstream
-# bin/cli --version
-xstream version 0.0.1
-```
-
-If it can print the version, then XStream is installed successfully. 
-

+ 0 - 12
docs/operation/operations.md

@@ -1,12 +0,0 @@
-# Configuration
-
-- [XStream basic configuration](configuration_file.md)
-- [MQTT source configuration](../rules/sources/mqtt.md)
-
-# Restful APIs
-
-XStream provides some RESTful management APIs.
-
-
-
-

+ 0 - 6
docs/operation/overview.md

@@ -1,6 +0,0 @@
-
-XStream is developed by Golang, and it can be run at different operating systems. See below docs for how to install and operating XStream.
-
-- [Install instruction](install/overview.md)
-- [Operation guide](operations.md)
-

+ 0 - 10
docs/rules/sinks/mqtt.md

@@ -1,10 +0,0 @@
-# MQTT action
-
-The action is used for publish output message into a MQTT server. 
-
-| Property name | Optional | Description                                                  |
-| ------------- | -------- | ------------------------------------------------------------ |
-| server        | false    | The broker address of the mqtt server, such as ``tcp://127.0.0.1:1883`` |
-| topic         | false    | The mqtt topic, such as ``analysis/result``                  |
-| clientId      | true     | The client id for mqtt connection. If not specified, an uuid will be used |
-

+ 0 - 8
docs/sqls/overview.md

@@ -1,8 +0,0 @@
-XStream offers a SQL-like query language for performing transformations and computations over streams of events. This document describes the syntax, usage and best practices for the XStream query language. 
-
-- [Stream specifications](streams.md)
-
-- [Query languange element](query_language_elements.md)
-- [Windows](windows.md)
-- [Built-in functions](built-in_functions.md)
-

docs/index.md → docs/zh_CN/index.md


etc/xstream.yaml → etc/kuiper.yaml


+ 76 - 5
xsql/ast.go

@@ -529,14 +529,35 @@ type Event interface {
 	IsWatermark() bool
 }
 
+type Metadata map[string]interface{}
+
 type Tuple struct {
 	Emitter   string
 	Message   Message
 	Timestamp int64
+	Metadata  Metadata
+}
+
+// Value returns the value for a key in the Message.
+func (m Metadata) Value(key string) (interface{}, bool) {
+	key = strings.ToLower(key)
+	if keys := strings.Split(key, "."); len(keys) == 1 {
+		v, ok := m[key]
+		return v, ok
+	} else if len(keys) == 2 {
+		v, ok := m[keys[1]]
+		return v, ok
+	}
+	common.Log.Println("Invalid key: " + key + ", expect source.field or field.")
+	return nil, false
 }
 
 func (t *Tuple) Value(key string) (interface{}, bool) {
-	return t.Message.Value(key)
+	if v, ok := t.Message.Value(key); ok {
+		return v, ok
+	} else {
+		return t.Metadata.Value(key)
+	}
 }
 
 func (t *Tuple) All(stream string) (interface{}, bool) {
@@ -551,6 +572,11 @@ func (t *Tuple) GetTimestamp() int64 {
 	return t.Timestamp
 }
 
+func (t *Tuple) GetMetadata() Metadata {
+	return t.Metadata
+}
+
+
 func (t *Tuple) IsWatermark() bool {
 	return false
 }
@@ -954,7 +980,6 @@ func (v *ValuerEval) Eval(expr Expr) interface{} {
 	default:
 		return nil
 	}
-	return nil
 }
 
 
@@ -1486,8 +1511,8 @@ func toFloat64(para interface{}) float64 {
 	return 0
 }
 
-func IsAggStatement(node Node) (bool) {
-	var r bool = false
+func IsAggStatement(node Node) bool {
+	var r = false
 	WalkFunc(node, func(n Node) {
 		if f, ok := n.(*Call); ok {
 			fn := strings.ToLower(f.Name)
@@ -1502,6 +1527,52 @@ func IsAggStatement(node Node) (bool) {
 				return
 			}
 		}
+	})
+	return r
+}
+func HasAggFuncs(node Node) bool {
+	if node == nil{
+		return false
+	}
+	var r bool = false
+	WalkFunc(node, func(n Node) {
+		if f, ok := n.(*Call); ok {
+			fn := strings.ToLower(f.Name)
+			if _, ok1 := aggFuncMap[fn]; ok1 {
+				r = true
+				return
+			}
+		}
 	});
 	return r
-}
+}
+
+func HasNoAggFuncs(node Node) bool {
+	if node == nil{
+		return false
+	}
+	var r bool = false
+	WalkFunc(node, func(n Node) {
+		if f, ok := n.(*Call); ok {
+			fn := strings.ToLower(f.Name)
+			if _, ok1 := mathFuncMap[fn]; ok1 {
+				r = true
+				return
+			} else if _, ok1 := strFuncMap[fn]; ok1 {
+				r = true
+				return
+			} else if _, ok1 := convFuncMap[fn]; ok1 {
+				r = true
+				return
+			} else if _, ok1 := hashFuncMap[fn]; ok1 {
+				r = true
+				return
+			} else if _, ok1 := otherFuncMap[fn]; ok1 {
+				r = true
+				return
+			}
+		}
+	});
+	return r
+}
+

+ 37 - 7
xsql/funcs_ast_validator.go

@@ -9,7 +9,7 @@ type AllowTypes struct {
 	types []Literal
 }
 
-func validateFuncs(funcName string, args []Expr) (error) {
+func validateFuncs(funcName string, args []Expr) error {
 	lowerName := strings.ToLower(funcName)
 	if _, ok := mathFuncMap[lowerName]; ok {
 		return validateMathFunc(funcName, args)
@@ -25,7 +25,7 @@ func validateFuncs(funcName string, args []Expr) (error) {
 	return nil
 }
 
-func validateMathFunc(name string, args []Expr) (error) {
+func validateMathFunc(name string, args []Expr) error {
 	len := len(args)
 	switch name {
 	case "abs", "acos", "asin", "atan", "ceil", "cos", "cosh", "exp", "ln", "log", "round", "sign", "sin", "sinh",
@@ -74,7 +74,7 @@ func validateMathFunc(name string, args []Expr) (error) {
 	return nil
 }
 
-func validateStrFunc(name string, args []Expr) (error) {
+func validateStrFunc(name string, args []Expr) error {
 	len := len(args)
 	switch name {
 	case "concat":
@@ -160,11 +160,29 @@ func validateStrFunc(name string, args []Expr) (error) {
 				}
 			}
 		}
+	case "split_value":
+		if len != 3 {
+			return fmt.Errorf("the arguments for split_value should be 3")
+		}
+		if isNumericArg(args[0]) || isTimeArg(args[0]) || isBooleanArg(args[0]) {
+			return produceErrInfo(name, 0, "string")
+		}
+		if isNumericArg(args[1]) || isTimeArg(args[1]) || isBooleanArg(args[1]) {
+			return produceErrInfo(name, 1, "string")
+		}
+		if isFloatArg(args[2]) || isTimeArg(args[2]) || isBooleanArg(args[2]) || isStringArg(args[2]) {
+			return produceErrInfo(name, 2, "int")
+		}
+		if s, ok := args[2].(*IntegerLiteral); ok {
+			if s.Val < 0 {
+				return fmt.Errorf("The index should not be a nagtive integer.")
+			}
+		}
 	}
 	return nil
 }
 
-func validateConvFunc(name string, args []Expr) (error) {
+func validateConvFunc(name string, args []Expr) error {
 	len := len(args)
 	switch name {
 	case "cast":
@@ -221,7 +239,7 @@ func validateConvFunc(name string, args []Expr) (error) {
 	return nil
 }
 
-func validateHashFunc(name string, args []Expr) (error) {
+func validateHashFunc(name string, args []Expr) error {
 	len := len(args)
 	switch name {
 	case "md5", "sha1", "sha224", "sha256", "sha384", "sha512":
@@ -236,7 +254,7 @@ func validateHashFunc(name string, args []Expr) (error) {
 	return nil
 }
 
-func validateOtherFunc(name string, args []Expr) (error) {
+func validateOtherFunc(name string, args []Expr) error {
 	len := len(args)
 	switch name {
 	case "isNull":
@@ -254,6 +272,18 @@ func validateOtherFunc(name string, args []Expr) (error) {
 		if err := validateLen(name, 0, len); err != nil {
 			return  err
 		}
+	case "mqtt":
+		if err := validateLen(name, 1, len); err != nil {
+			return err
+		}
+		if isIntegerArg(args[0]) || isTimeArg(args[0]) || isBooleanArg(args[0]) || isStringArg(args[0]) || isFloatArg(args[0]) {
+			return produceErrInfo(name, 0, "field reference")
+		}
+		if p, ok := args[0].(*FieldRef); ok {
+			if _, ok := SpecialKeyMapper[p.Name]; !ok {
+				return fmt.Errorf("Parameter of mqtt function can be only topic or messageid.")
+			}
+		}
 	}
 	return nil
 }
@@ -266,7 +296,7 @@ func produceErrInfo(name string, index int, expect string) (err error) {
 	return
 }
 
-func validateLen(funcName string, exp, actual int) (error) {
+func validateLen(funcName string, exp, actual int) error {
 	if actual != exp {
 		return fmt.Errorf("The arguments for %s should be %d.", funcName, exp)
 	}

+ 51 - 21
xsql/funcs_ast_validator_test.go

@@ -16,14 +16,14 @@ func TestFuncValidator(t *testing.T) {
 	}{
 		{
 			s: `SELECT abs(1) FROM tbl`,
-			stmt: &SelectStatement{Fields: []Field{Field{ AName:"",  Name: "abs", Expr:&Call{Name:"abs", Args: []Expr{&IntegerLiteral{Val:1}}}}},
+			stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "abs", Expr: &Call{Name: "abs", Args: []Expr{&IntegerLiteral{Val: 1}}}}},
 				Sources: []Source{&Table{Name:"tbl"}},
 			},
 		},
 
 		{
 			s: `SELECT abs(field1) FROM tbl`,
-			stmt: &SelectStatement{Fields: []Field{Field{ AName:"",  Name: "abs", Expr:&Call{Name:"abs", Args: []Expr{&FieldRef{Name:"field1"}}}}},
+			stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "abs", Expr: &Call{Name: "abs", Args: []Expr{&FieldRef{Name: "field1"}}}}},
 				Sources: []Source{&Table{Name:"tbl"}},
 			},
 		},
@@ -36,7 +36,7 @@ func TestFuncValidator(t *testing.T) {
 
 		{
 			s: `SELECT abs(1.1) FROM tbl`,
-			stmt: &SelectStatement{Fields: []Field{Field{ AName:"",  Name: "abs", Expr:&Call{Name:"abs", Args: []Expr{&NumberLiteral{Val:1.1}}}}},
+			stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "abs", Expr: &Call{Name: "abs", Args: []Expr{&NumberLiteral{Val: 1.1}}}}},
 				Sources: []Source{&Table{Name:"tbl"}},
 			},
 		},
@@ -63,14 +63,14 @@ func TestFuncValidator(t *testing.T) {
 		///
 		{
 			s: `SELECT sin(1) FROM tbl`,
-			stmt: &SelectStatement{Fields: []Field{Field{ AName:"",  Name: "sin", Expr:&Call{Name:"sin", Args: []Expr{&IntegerLiteral{Val:1}}}}},
+			stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "sin", Expr: &Call{Name: "sin", Args: []Expr{&IntegerLiteral{Val: 1}}}}},
 				Sources: []Source{&Table{Name:"tbl"}},
 			},
 		},
 
 		{
 			s: `SELECT sin(1.1) FROM tbl`,
-			stmt: &SelectStatement{Fields: []Field{Field{ AName:"",  Name: "sin", Expr:&Call{Name:"sin", Args: []Expr{&NumberLiteral{Val:1.1}}}}},
+			stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "sin", Expr: &Call{Name: "sin", Args: []Expr{&NumberLiteral{Val: 1.1}}}}},
 				Sources: []Source{&Table{Name:"tbl"}},
 			},
 		},
@@ -95,14 +95,14 @@ func TestFuncValidator(t *testing.T) {
 		///
 		{
 			s: `SELECT tanh(1) FROM tbl`,
-			stmt: &SelectStatement{Fields: []Field{Field{ AName:"",  Name: "tanh", Expr:&Call{Name:"tanh", Args: []Expr{&IntegerLiteral{Val:1}}}}},
+			stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "tanh", Expr: &Call{Name: "tanh", Args: []Expr{&IntegerLiteral{Val: 1}}}}},
 				Sources: []Source{&Table{Name:"tbl"}},
 			},
 		},
 
 		{
 			s: `SELECT tanh(1.1) FROM tbl`,
-			stmt: &SelectStatement{Fields: []Field{Field{ AName:"",  Name: "tanh", Expr:&Call{Name:"tanh", Args: []Expr{&NumberLiteral{Val:1.1}}}}},
+			stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "tanh", Expr: &Call{Name: "tanh", Args: []Expr{&NumberLiteral{Val: 1.1}}}}},
 				Sources: []Source{&Table{Name:"tbl"}},
 			},
 		},
@@ -128,7 +128,7 @@ func TestFuncValidator(t *testing.T) {
 		///
 		{
 			s: `SELECT bitxor(1, 2) FROM tbl`,
-			stmt: &SelectStatement{Fields: []Field{Field{ AName:"",  Name: "bitxor", Expr:&Call{Name:"bitxor", Args: []Expr{&IntegerLiteral{Val:1}, &IntegerLiteral{Val:2}}}}},
+			stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "bitxor", Expr: &Call{Name: "bitxor", Args: []Expr{&IntegerLiteral{Val: 1}, &IntegerLiteral{Val: 2}}}}},
 				Sources: []Source{&Table{Name:"tbl"}},
 			},
 		},
@@ -160,7 +160,7 @@ func TestFuncValidator(t *testing.T) {
 		///
 		{
 			s: `SELECT bitnot(1) FROM tbl`,
-			stmt: &SelectStatement{Fields: []Field{Field{ AName:"",  Name: "bitnot", Expr:&Call{Name:"bitnot", Args: []Expr{&IntegerLiteral{Val:1}}}}},
+			stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "bitnot", Expr: &Call{Name: "bitnot", Args: []Expr{&IntegerLiteral{Val: 1}}}}},
 				Sources: []Source{&Table{Name:"tbl"}},
 			},
 		},
@@ -180,7 +180,7 @@ func TestFuncValidator(t *testing.T) {
 		///
 		{
 			s: `SELECT mod(1, 2) FROM tbl`,
-			stmt: &SelectStatement{Fields: []Field{Field{ AName:"",  Name: "mod", Expr:&Call{Name:"mod", Args: []Expr{&IntegerLiteral{Val:1}, &IntegerLiteral{Val:2}}}}},
+			stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "mod", Expr: &Call{Name: "mod", Args: []Expr{&IntegerLiteral{Val: 1}, &IntegerLiteral{Val: 2}}}}},
 				Sources: []Source{&Table{Name:"tbl"}},
 			},
 		},
@@ -206,7 +206,7 @@ func TestFuncValidator(t *testing.T) {
 		///
 		{
 			s: `SELECT concat(field, "hello") FROM tbl`,
-			stmt: &SelectStatement{Fields: []Field{Field{ AName:"",  Name: "concat", Expr:&Call{Name:"concat", Args: []Expr{&FieldRef{Name:"field"}, &StringLiteral{Val:"hello"}}}}},
+			stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "concat", Expr: &Call{Name: "concat", Args: []Expr{&FieldRef{Name: "field"}, &StringLiteral{Val: "hello"}}}}},
 				Sources: []Source{&Table{Name:"tbl"}},
 			},
 		},
@@ -232,7 +232,7 @@ func TestFuncValidator(t *testing.T) {
 		///
 		{
 			s: `SELECT regexp_matches(field, "hello") FROM tbl`,
-			stmt: &SelectStatement{Fields: []Field{Field{ AName:"",  Name: "regexp_matches", Expr:&Call{Name:"regexp_matches", Args: []Expr{&FieldRef{Name:"field"}, &StringLiteral{Val:"hello"}}}}},
+			stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "regexp_matches", Expr: &Call{Name: "regexp_matches", Args: []Expr{&FieldRef{Name: "field"}, &StringLiteral{Val: "hello"}}}}},
 				Sources: []Source{&Table{Name:"tbl"}},
 			},
 		},
@@ -252,7 +252,7 @@ func TestFuncValidator(t *testing.T) {
 		///
 		{
 			s: `SELECT regexp_replace(field, "hello", "h") FROM tbl`,
-			stmt: &SelectStatement{Fields: []Field{Field{ AName:"",  Name: "regexp_replace", Expr:&Call{Name:"regexp_replace", Args: []Expr{&FieldRef{Name:"field"}, &StringLiteral{Val:"hello"}, &StringLiteral{Val:"h"}}}}},
+			stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "regexp_replace", Expr: &Call{Name: "regexp_replace", Args: []Expr{&FieldRef{Name: "field"}, &StringLiteral{Val: "hello"}, &StringLiteral{Val: "h"}}}}},
 				Sources: []Source{&Table{Name:"tbl"}},
 			},
 		},
@@ -266,7 +266,7 @@ func TestFuncValidator(t *testing.T) {
 		///
 		{
 			s: `SELECT trim(field) FROM tbl`,
-			stmt: &SelectStatement{Fields: []Field{Field{ AName:"",  Name: "trim", Expr:&Call{Name:"trim", Args: []Expr{&FieldRef{Name:"field"}}}}},
+			stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "trim", Expr: &Call{Name: "trim", Args: []Expr{&FieldRef{Name: "field"}}}}},
 				Sources: []Source{&Table{Name:"tbl"}},
 			},
 		},
@@ -280,7 +280,7 @@ func TestFuncValidator(t *testing.T) {
 		///
 		{
 			s: `SELECT rpad(field, 3) FROM tbl`,
-			stmt: &SelectStatement{Fields: []Field{Field{ AName:"",  Name: "rpad", Expr:&Call{Name:"rpad", Args: []Expr{&FieldRef{Name:"field"}, &IntegerLiteral{Val:3}}}}},
+			stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "rpad", Expr: &Call{Name: "rpad", Args: []Expr{&FieldRef{Name: "field"}, &IntegerLiteral{Val: 3}}}}},
 				Sources: []Source{&Table{Name:"tbl"}},
 			},
 		},
@@ -294,7 +294,7 @@ func TestFuncValidator(t *testing.T) {
 		///
 		{
 			s: `SELECT substring(field, 3, 4) FROM tbl`,
-			stmt: &SelectStatement{Fields: []Field{Field{ AName:"",  Name: "substring", Expr:&Call{Name:"substring", Args: []Expr{&FieldRef{Name:"field"}, &IntegerLiteral{Val:3}, &IntegerLiteral{Val:4}}}}},
+			stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "substring", Expr: &Call{Name: "substring", Args: []Expr{&FieldRef{Name: "field"}, &IntegerLiteral{Val: 3}, &IntegerLiteral{Val: 4}}}}},
 				Sources: []Source{&Table{Name:"tbl"}},
 			},
 		},
@@ -320,7 +320,7 @@ func TestFuncValidator(t *testing.T) {
 		///
 		{
 			s: `SELECT cast(field, "bigint") FROM tbl`,
-			stmt: &SelectStatement{Fields: []Field{Field{ AName:"",  Name: "cast", Expr:&Call{Name:"cast", Args: []Expr{&FieldRef{Name:"field"}, &StringLiteral{Val:"bigint"}}}}},
+			stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "cast", Expr: &Call{Name: "cast", Args: []Expr{&FieldRef{Name: "field"}, &StringLiteral{Val: "bigint"}}}}},
 				Sources: []Source{&Table{Name:"tbl"}},
 			},
 		},
@@ -334,7 +334,7 @@ func TestFuncValidator(t *testing.T) {
 		///
 		{
 			s: `SELECT chr(field) FROM tbl`,
-			stmt: &SelectStatement{Fields: []Field{Field{ AName:"",  Name: "chr", Expr:&Call{Name:"chr", Args: []Expr{&FieldRef{Name:"field"}}}}},
+			stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "chr", Expr: &Call{Name: "chr", Args: []Expr{&FieldRef{Name: "field"}}}}},
 				Sources: []Source{&Table{Name:"tbl"}},
 			},
 		},
@@ -348,7 +348,7 @@ func TestFuncValidator(t *testing.T) {
 		///
 		{
 			s: `SELECT encode(field, "base64") FROM tbl`,
-			stmt: &SelectStatement{Fields: []Field{Field{ AName:"",  Name: "encode", Expr:&Call{Name:"encode", Args: []Expr{&FieldRef{Name:"field"}, &StringLiteral{Val:"base64"}}}}},
+			stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "encode", Expr: &Call{Name: "encode", Args: []Expr{&FieldRef{Name: "field"}, &StringLiteral{Val: "base64"}}}}},
 				Sources: []Source{&Table{Name:"tbl"}},
 			},
 		},
@@ -362,7 +362,7 @@ func TestFuncValidator(t *testing.T) {
 		///
 		{
 			s: `SELECT trunc(field, 3) FROM tbl`,
-			stmt: &SelectStatement{Fields: []Field{Field{ AName:"",  Name: "trunc", Expr:&Call{Name:"trunc", Args: []Expr{&FieldRef{Name:"field"}, &IntegerLiteral{Val:3}}}}},
+			stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "trunc", Expr: &Call{Name: "trunc", Args: []Expr{&FieldRef{Name: "field"}, &IntegerLiteral{Val: 3}}}}},
 				Sources: []Source{&Table{Name:"tbl"}},
 			},
 		},
@@ -376,7 +376,7 @@ func TestFuncValidator(t *testing.T) {
 		///
 		{
 			s: `SELECT sha512(field) FROM tbl`,
-			stmt: &SelectStatement{Fields: []Field{Field{ AName:"",  Name: "sha512", Expr:&Call{Name:"sha512", Args: []Expr{&FieldRef{Name:"field"}}}}},
+			stmt: &SelectStatement{Fields: []Field{{AName: "", Name: "sha512", Expr: &Call{Name: "sha512", Args: []Expr{&FieldRef{Name: "field"}}}}},
 				Sources: []Source{&Table{Name:"tbl"}},
 			},
 		},
@@ -387,6 +387,36 @@ func TestFuncValidator(t *testing.T) {
 			err: "Expect string type for 1 parameter of function sha512.",
 		},
 
+		{
+			s: `SELECT mqtt("topic") FROM tbl`,
+			stmt: nil,
+			err: "Expect field reference type for 1 parameter of function mqtt.",
+		},
+
+		{
+			s: `SELECT mqtt(topic1) FROM tbl`,
+			stmt: nil,
+			err: "Parameter of mqtt function can be only topic or messageid.",
+		},
+
+		{
+			s: `SELECT split_value(topic1) FROM tbl`,
+			stmt: nil,
+			err: "the arguments for split_value should be 3",
+		},
+
+		{
+			s: `SELECT split_value(topic1, 3, 1) FROM tbl`,
+			stmt: nil,
+			err: "Expect string type for 2 parameter of function split_value.",
+		},
+
+		{
+			s: `SELECT split_value(topic1, "hello", -1) FROM tbl`,
+			stmt: nil,
+			err: "The index should not be a nagtive integer.",
+		},
+
 	}
 
 	fmt.Printf("The test bucket size is %d.\n\n", len(tests))

+ 1 - 1
xsql/funcs_math.go

@@ -11,7 +11,7 @@ func mathCall(name string, args []interface{}) (interface{}, bool) {
 	case "abs":
 		if v, ok := args[0].(int); ok {
 			t := float64(v)
-			var ret int = int(math.Abs(t))
+			var ret = int(math.Abs(t))
 			return ret, true
 		} else if v, ok := args[0].(float64); ok {
 			return math.Abs(v), true

+ 7 - 0
xsql/funcs_misc.go

@@ -201,7 +201,14 @@ func otherCall(name string, args []interface{}) (interface{}, bool) {
 		}
 	case "timestamp":
 		return common.TimeToUnixMilli(time.Now()), true
+	case "mqtt":
+		if v, ok := args[0].(string); ok {
+			return v, true
+		}
+		return nil, false
 	default:
 		return fmt.Errorf("unknown function name %s", name), false
 	}
 }
+
+

+ 9 - 0
xsql/funcs_str.go

@@ -102,6 +102,15 @@ func strCall(name string, args []interface{}) (interface{}, bool) {
 	case "startswith":
 		arg0, arg1 := common.ToString(args[0]), common.ToString(args[1])
 		return strings.HasPrefix(arg0, arg1), true
+	case "split_value":
+		arg0, arg1 := common.ToString(args[0]), common.ToString(args[1])
+		ss := strings.Split(arg0, arg1)
+		v, _ := common.ToInt(args[2])
+		if v > (len(ss) - 1) {
+			return fmt.Errorf("%d out of index array (size = %d)", v, ss), false
+		} else {
+			return ss[v], true
+		}
 	case "trim":
 		arg0 := common.ToString(args[0])
 		return strings.TrimSpace(arg0), true

+ 2 - 2
xsql/functions.go

@@ -35,7 +35,7 @@ var strFuncMap = map[string]string{"concat": "",
 	"length":   "", "lower": "", "lpad": "", "ltrim": "",
 	"numbytes":       "",
 	"regexp_matches": "", "regexp_replace": "", "regexp_substr": "", "rpad": "", "rtrim": "",
-	"substring": "", "startswith": "",
+	"substring": "", "startswith": "", "split_value": "",
 	"trim":  "",
 	"upper": "",
 }
@@ -50,7 +50,7 @@ var hashFuncMap = map[string]string{ "md5": "",
 }
 
 var otherFuncMap = map[string]string{"isNull": "",
-	"newuuid": "", "timestamp": "",
+	"newuuid": "", "timestamp": "", "mqtt": "",
 }
 
 func (*FunctionValuer) Call(name string, args []interface{}) (interface{}, bool) {

+ 5 - 5
xsql/lexical.go

@@ -276,7 +276,7 @@ func (s *Scanner) Scan() (tok Token, lit string) {
 		if r := s.read(); r == '-' {
 			s.skipUntilNewline()
 			return COMMENT, ""
-		} else if (r == '>'){
+		} else if r == '>' {
 			return ARROW, tokens[ARROW]
 		} else if isDigit(r) {
 			s.unread()
@@ -587,20 +587,20 @@ func isWhiteSpace(r rune) bool {
 
 func isLetter(ch rune) bool { return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') }
 
-func isDigit(ch rune) bool { return (ch >= '0' && ch <= '9') }
+func isDigit(ch rune) bool { return ch >= '0' && ch <= '9' }
 
 func isQuotation(ch rune) bool { return ch == '"' }
 
 func (tok Token) isOperator() bool { return (tok > operatorBeg && tok < operatorEnd) || tok == ASTERISK || tok == LBRACKET }
 
-func (tok Token) isTimeLiteral() bool { return (tok >= DD && tok <= MS) }
+func (tok Token) isTimeLiteral() bool { return tok >= DD && tok <= MS }
 
 func (tok Token) allowedSourceToken() bool {
-	return (tok == IDENT || tok == DIV || tok == HASH || tok == ADD)
+	return tok == IDENT || tok == DIV || tok == HASH || tok == ADD
 }
 
 //Allowed special field name token
-func (tok Token) allowedSFNToken() bool { return (tok == DOT) }
+func (tok Token) allowedSFNToken() bool { return tok == DOT }
 
 func (tok Token) Precedence() int {
 	switch tok {

+ 30 - 0
xsql/metadata_util.go

@@ -0,0 +1,30 @@
+package xsql
+
+import "strings"
+
+const INTERNAL_MQTT_TOPIC_KEY string = "internal_mqtt_topic_key_$$"
+const INTERNAL_MQTT_MSG_ID_KEY string = "internal_mqtt_msg_id_key_$$"
+
+//For functions such as mqtt(topic). If the field definitions also has a field named "topic", then it need to
+//have an internal key for "topic" to avoid key conflicts.
+var SpecialKeyMapper = map[string]string{"topic" : INTERNAL_MQTT_TOPIC_KEY, "messageid" : INTERNAL_MQTT_MSG_ID_KEY}
+func AddSpecialKeyMap(left, right string) {
+	SpecialKeyMapper[left] = right
+}
+
+/**
+The function is used for re-write the parameter names.
+For example, for mqtt function, the arguments could be 'topic' or 'messageid'.
+If the field name defined in stream happens to be 'topic' or 'messageid', it will have conflicts.
+ */
+func (c Call) rewrite_func() *Call {
+	if strings.ToLower(c.Name) == "mqtt" {
+		if f, ok := c.Args[0].(*FieldRef); ok {
+			if n, ok1 := SpecialKeyMapper[f.Name]; ok1 {
+				f.Name = n
+				c.Args[0] = f
+			}
+		}
+	}
+	return &c
+}

+ 10 - 6
xsql/parser.go

@@ -159,6 +159,10 @@ func (p *Parser) Parse() (*SelectStatement, error) {
 		return nil, fmt.Errorf("found %q, expected EOF.", lit)
 	}
 
+	if err := Validate(selects); err != nil {
+		return nil, err
+	}
+
 	return selects, nil
 }
 
@@ -232,7 +236,7 @@ func (p *Parser) parseJoins() (Joins, error) {
 	for {
 		if tok, lit := p.scanIgnoreWhitespace(); tok == INNER || tok == LEFT || tok == RIGHT || tok == FULL || tok == CROSS {
 			if tok1, _ := p.scanIgnoreWhitespace(); tok1 == JOIN {
-				var jt JoinType = INNER_JOIN
+				var jt = INNER_JOIN
 				switch tok {
 				case INNER:
 					jt = INNER_JOIN
@@ -588,13 +592,13 @@ func (p *Parser) parseCall(name string) (Expr, error) {
 	var args []Expr
 	for {
 		if tok, _ := p.scanIgnoreWhitespace(); tok == RPAREN {
-			return &Call{Name: name, Args: args}, nil
+			return Call{Name: name, Args: args}.rewrite_func(), nil
 		} else if tok == ASTERISK {
 			if tok2, lit2 := p.scanIgnoreWhitespace(); tok2 != RPAREN {
 				return nil, fmt.Errorf("found %q, expected right paren.", lit2)
 			} else {
 				args = append(args, &StringLiteral{Val:"*"})
-				return &Call{Name: name, Args: args}, nil
+				return Call{Name: name, Args: args}.rewrite_func(), nil
 			}
 		} else {
 			p.unscan()
@@ -619,7 +623,7 @@ func (p *Parser) parseCall(name string) (Expr, error) {
 		if valErr := validateFuncs(name, args); valErr != nil {
 			return nil, valErr
 		}
-		return &Call{Name: name, Args: args}, nil
+		return Call{Name: name, Args: args}.rewrite_func(), nil
 	} else {
 		if error != nil {
 			return nil, error
@@ -655,7 +659,7 @@ func validateWindows(name string, args []Expr) (WindowType, error) {
 	return NOT_WINDOW, nil
 }
 
-func validateWindow(funcName string, expectLen int, args []Expr) (error) {
+func validateWindow(funcName string, expectLen int, args []Expr) error {
 	if len(args) != expectLen {
 		return fmt.Errorf("The arguments for %s should be %d.\n", funcName, expectLen)
 	}
@@ -957,7 +961,7 @@ func (p *Parser) parseStreamStructType() (FieldType, error) {
 }
 
 func (p *Parser) parseStreamOptions() (map[string]string, error) {
-	var opts map[string]string = make(map[string]string)
+	var opts = make(map[string]string)
 	lStack := &stack.Stack{}
 	if tok, lit := p.scanIgnoreWhitespace(); tok == LPAREN {
 		lStack.Push(LPAREN)

文件差异内容过多而无法显示
+ 498 - 486
xsql/parser_test.go


+ 3 - 4
xsql/plans/aggregate_operator.go

@@ -1,9 +1,8 @@
 package plans
 
 import (
-	"context"
-	"engine/common"
 	"engine/xsql"
+	"engine/xstream/api"
 	"fmt"
 )
 
@@ -16,8 +15,8 @@ type AggregatePlan struct {
  *  input: *xsql.Tuple from preprocessor | xsql.WindowTuplesSet from windowOp | xsql.JoinTupleSets from joinOp
  *  output: xsql.GroupedTuplesSet
  */
-func (p *AggregatePlan) Apply(ctx context.Context, data interface{}) interface{} {
-	log := common.GetLogger(ctx)
+func (p *AggregatePlan) Apply(ctx api.StreamContext, data interface{}) interface{} {
+	log := ctx.GetLogger()
 	log.Debugf("aggregate plan receive %s", data)
 	var ms []xsql.DataValuer
 	switch input := data.(type) {

+ 3 - 4
xsql/plans/filter_operator.go

@@ -1,9 +1,8 @@
 package plans
 
 import (
-	"context"
-	"engine/common"
 	"engine/xsql"
+	"engine/xstream/api"
 )
 
 type FilterPlan struct {
@@ -14,8 +13,8 @@ type FilterPlan struct {
   *  input: *xsql.Tuple from preprocessor | xsql.WindowTuplesSet from windowOp | xsql.JoinTupleSets from joinOp
   *  output: *xsql.Tuple | xsql.WindowTuplesSet | xsql.JoinTupleSets
  */
-func (p *FilterPlan) Apply(ctx context.Context, data interface{}) interface{} {
-	log := common.GetLogger(ctx)
+func (p *FilterPlan) Apply(ctx api.StreamContext, data interface{}) interface{} {
+	log := ctx.GetLogger()
 	log.Debugf("filter plan receive %s", data)
 	switch input := data.(type) {
 	case xsql.Valuer:

+ 83 - 0
xsql/plans/having_operator.go

@@ -0,0 +1,83 @@
+package plans
+
+import (
+	"context"
+	"engine/common"
+	"engine/xsql"
+)
+
+type HavingPlan struct {
+	Condition xsql.Expr
+}
+
+func (p *HavingPlan) Apply(ctx context.Context, data interface{}) interface{} {
+	log := common.GetLogger(ctx)
+	log.Debugf("having plan receive %s", data)
+	switch input := data.(type) {
+	case xsql.GroupedTuplesSet:
+		r := xsql.GroupedTuplesSet{}
+		for _, v := range input {
+			ve := &xsql.ValuerEval{Valuer: xsql.MultiAggregateValuer(v, &xsql.FunctionValuer{}, &xsql.AggregateFunctionValuer{Data: v})}
+			result, ok := ve.Eval(p.Condition).(bool)
+			if ok {
+				if result {
+					r = append(r, v)
+				}
+			} else {
+				log.Errorf("invalid condition that returns non-bool value")
+				return nil
+			}
+
+		}
+		if len(r) > 0 {
+			return r
+		}
+	case xsql.WindowTuplesSet:
+		if len(input) != 1 {
+			log.Infof("WindowTuplesSet with multiple tuples cannot be evaluated")
+			return nil
+		}
+		ms := input[0].Tuples
+		r := ms[:0]
+		for _, v := range ms {
+			//ve := &xsql.ValuerEval{Valuer: xsql.MultiValuer(&v, &xsql.FunctionValuer{})}
+			ve := &xsql.ValuerEval{Valuer: xsql.MultiAggregateValuer(input, &v, &xsql.FunctionValuer{}, &xsql.AggregateFunctionValuer{Data: input}, &xsql.WildcardValuer{Data: &v})}
+			result, ok := ve.Eval(p.Condition).(bool)
+			if ok {
+				if result {
+					r = append(r, v)
+				}
+			} else {
+				log.Errorf("invalid condition that returns non-bool value")
+				return nil
+			}
+		}
+		if len(r) > 0 {
+			input[0].Tuples = r
+			return input
+		}
+	case xsql.JoinTupleSets:
+		ms := input
+		r := ms[:0]
+		for _, v := range ms {
+			//ve := &xsql.ValuerEval{Valuer: xsql.MultiValuer(&v, &xsql.FunctionValuer{})}
+			ve := &xsql.ValuerEval{Valuer: xsql.MultiAggregateValuer(input, &v, &xsql.FunctionValuer{}, &xsql.AggregateFunctionValuer{Data: input}, &xsql.WildcardValuer{Data: &v})}
+			result, ok := ve.Eval(p.Condition).(bool)
+			if ok {
+				if result {
+					r = append(r, v)
+				}
+			} else {
+				log.Errorf("invalid condition that returns non-bool value")
+				return nil
+			}
+		}
+		if len(r) > 0{
+			return r
+		}
+	default:
+		log.Errorf("Expect xsql.Valuer or its array type.")
+		return nil
+	}
+	return nil
+}

+ 156 - 0
xsql/plans/having_test.go

@@ -0,0 +1,156 @@
+package plans
+
+import (
+	"engine/xsql"
+	"fmt"
+	"reflect"
+	"strings"
+	"testing"
+)
+
+func TestHavingPlan_Apply(t *testing.T) {
+	var tests = []struct {
+		sql  string
+		data interface{}
+		result interface{}
+	}{
+		{
+			sql: `SELECT id1 FROM src1 HAVING avg(id1) > 1`,
+			data: xsql.WindowTuplesSet{
+				xsql.WindowTuples{
+					Emitter:"src1",
+					Tuples:[]xsql.Tuple{
+						{
+							Emitter: "src1",
+							Message: xsql.Message{"id1" : 1, "f1" : "v1"},
+						},{
+							Emitter: "src1",
+							Message: xsql.Message{"id1" : 2, "f1" : "v2"},
+						},{
+							Emitter: "src1",
+							Message: xsql.Message{"id1" : 5, "f1" : "v1"},
+						},
+
+					},
+				},
+			},
+			result: xsql.WindowTuplesSet{
+				xsql.WindowTuples{
+					Emitter:"src1",
+					Tuples:[]xsql.Tuple{
+						{
+							Emitter: "src1",
+							Message: xsql.Message{"id1" : 1, "f1" : "v1"},
+						},{
+							Emitter: "src1",
+							Message: xsql.Message{"id1" : 2, "f1" : "v2"},
+						},{
+							Emitter: "src1",
+							Message: xsql.Message{"id1" : 5, "f1" : "v1"},
+						},
+					},
+				},
+			},
+		},
+
+		{
+			sql: `SELECT id1 FROM src1 HAVING sum(id1) > 1`,
+			data: xsql.WindowTuplesSet{
+				xsql.WindowTuples{
+					Emitter:"src1",
+					Tuples:[]xsql.Tuple{
+						{
+							Emitter: "src1",
+							Message: xsql.Message{"id1" : 1, "f1" : "v1"},
+						},
+					},
+				},
+			},
+			result: nil,
+		},
+
+		{
+			sql: `SELECT id1 FROM src1 HAVING sum(id1) = 1`,
+			data: xsql.WindowTuplesSet{
+				xsql.WindowTuples{
+					Emitter:"src1",
+					Tuples:[]xsql.Tuple{
+						{
+							Emitter: "src1",
+							Message: xsql.Message{"id1" : 1, "f1" : "v1"},
+						},
+					},
+				},
+			},
+			result: xsql.WindowTuplesSet{
+				xsql.WindowTuples{
+					Emitter:"src1",
+					Tuples:[]xsql.Tuple{
+						{
+							Emitter: "src1",
+							Message: xsql.Message{"id1" : 1, "f1" : "v1"},
+						},
+					},
+				},
+			},
+		},
+
+
+		{
+			sql: `SELECT id1 FROM src1 HAVING max(id1) > 10`,
+			data: xsql.WindowTuplesSet{
+				xsql.WindowTuples{
+					Emitter:"src1",
+					Tuples:[]xsql.Tuple{
+						{
+							Emitter: "src1",
+							Message: xsql.Message{"id1" : 1, "f1" : "v1"},
+						},
+					},
+				},
+			},
+			result: nil,
+		},
+
+		{
+			sql: `SELECT id1 FROM src1 HAVING max(id1) = 1`,
+			data: xsql.WindowTuplesSet{
+				xsql.WindowTuples{
+					Emitter:"src1",
+					Tuples:[]xsql.Tuple{
+						{
+							Emitter: "src1",
+							Message: xsql.Message{"id1" : 1, "f1" : "v1"},
+						},
+					},
+				},
+			},
+			result: xsql.WindowTuplesSet{
+				xsql.WindowTuples{
+					Emitter:"src1",
+					Tuples:[]xsql.Tuple{
+						{
+							Emitter: "src1",
+							Message: xsql.Message{"id1" : 1, "f1" : "v1"},
+						},
+					},
+				},
+			},
+		},
+	}
+
+	fmt.Printf("The test bucket size is %d.\n\n", len(tests))
+	for i, tt := range tests {
+		stmt, err := xsql.NewParser(strings.NewReader(tt.sql)).Parse()
+		if err != nil {
+			t.Errorf("statement parse error %s", err)
+			break
+		}
+
+		pp := &HavingPlan{Condition:stmt.Having}
+		result := pp.Apply(nil, tt.data)
+		if !reflect.DeepEqual(tt.result, result) {
+			t.Errorf("%d. %q\n\nresult mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.sql, tt.result, result)
+		}
+	}
+}

+ 4 - 4
xsql/plans/join_operator.go

@@ -1,9 +1,9 @@
 package plans
 
 import (
-	"context"
 	"engine/common"
 	"engine/xsql"
+	"engine/xstream/api"
 	"fmt"
 )
 
@@ -15,8 +15,8 @@ type JoinPlan struct {
 
 // input:  xsql.WindowTuplesSet from windowOp, window is required for join
 // output: xsql.JoinTupleSets
-func (jp *JoinPlan) Apply(ctx context.Context, data interface{}) interface{} {
-	log := common.GetLogger(ctx)
+func (jp *JoinPlan) Apply(ctx api.StreamContext, data interface{}) interface{} {
+	log := ctx.GetLogger()
 	var input xsql.WindowTuplesSet
 	if d, ok := data.(xsql.WindowTuplesSet); !ok {
 		log.Errorf("Expect WindowTuplesSet type.\n")
@@ -59,7 +59,7 @@ func getStreamNames(join *xsql.Join) ([]string, error) {
 			}
 			srcs = append(srcs, string(f.StreamName))
 		}
-	});
+	})
 	if len(srcs) != 2 {
 		return nil, fmt.Errorf("Not correct join expression, it requires exactly 2 sources at ON expression.")
 	}

+ 33 - 0
xsql/plans/join_test.go

@@ -561,6 +561,39 @@ func TestLeftJoinPlan_Apply(t *testing.T) {
 			},
 		},
 
+		{
+			sql: "SELECT id1, mqtt(src1.topic) AS a, mqtt(src2.topic) as b FROM src1 left join src2 on src1.id1 = src2.id2",
+			data: xsql.WindowTuplesSet{
+				xsql.WindowTuples{
+					Emitter:"src1",
+					Tuples:[]xsql.Tuple{
+						{
+							Emitter: "src1",
+							Message: xsql.Message{ "id1" : 1, "f1" : "v1", xsql.INTERNAL_MQTT_TOPIC_KEY: "devices/type1/device001"},
+						},
+					},
+				},
+
+				xsql.WindowTuples{
+					Emitter:"src2",
+					Tuples:[]xsql.Tuple{
+						{
+							Emitter: "src2",
+							Message: xsql.Message{ "id2" : 1, "f2" : "w1", xsql.INTERNAL_MQTT_TOPIC_KEY: "devices/type2/device001" },
+						},
+					},
+				},
+			},
+			result: xsql.JoinTupleSets{
+				xsql.JoinTuple{
+					Tuples: []xsql.Tuple{
+						{Emitter: "src1", Message: xsql.Message{ "id1" : 1, "f1" : "v1" , xsql.INTERNAL_MQTT_TOPIC_KEY: "devices/type1/device001"},},
+						{Emitter: "src2", Message: xsql.Message{ "id2" : 1, "f2" : "w1", xsql.INTERNAL_MQTT_TOPIC_KEY: "devices/type2/device001" },},
+					},
+				},
+			},
+		},
+
 	}
 
 	fmt.Printf("The test bucket size is %d.\n\n", len(tests))

+ 92 - 1
xsql/plans/misc_func_test.go

@@ -85,6 +85,97 @@ func TestHashFunc_Apply1(t *testing.T) {
 				"a": strings.ToLower("07E547D9586F6A73F73FBAC0435ED76951218FB7D0C8D788A309D785436BBB642E93A252A954F23912547D1E8A3B5ED6E1BFD7097821233FA0538F3DB854FEE6"),
 			}},
 		},
+
+		{
+			sql: "SELECT mqtt(topic) AS a FROM test",
+			data: &xsql.Tuple{
+				Emitter: "test",
+				Message: xsql.Message{
+					xsql.INTERNAL_MQTT_TOPIC_KEY : "devices/device_001/message",
+				},
+			},
+			result: []map[string]interface{}{{
+				"a": "devices/device_001/message",
+			}},
+		},
+
+		{
+			sql: "SELECT mqtt(topic) AS a FROM test",
+			data: &xsql.Tuple{
+				Emitter: "test",
+				Message: xsql.Message{
+					xsql.INTERNAL_MQTT_TOPIC_KEY : "devices/device_001/message",
+				},
+			},
+			result: []map[string]interface{}{{
+				"a": "devices/device_001/message",
+			}},
+		},
+
+		{
+			sql: "SELECT topic, mqtt(topic) AS a FROM test",
+			data: &xsql.Tuple{
+				Emitter: "test",
+				Message: xsql.Message{
+					"topic" : "fff",
+					xsql.INTERNAL_MQTT_TOPIC_KEY : "devices/device_001/message",
+				},
+			},
+			result: []map[string]interface{}{{
+				"topic": "fff",
+				"a": "devices/device_001/message",
+			}},
+		},
+
+	}
+
+	fmt.Printf("The test bucket size is %d.\n\n", len(tests))
+	for i, tt := range tests {
+		stmt, err := xsql.NewParser(strings.NewReader(tt.sql)).Parse()
+		if err != nil || stmt == nil {
+			t.Errorf("parse sql %s error %v", tt.sql, err)
+		}
+		pp := &ProjectPlan{Fields:stmt.Fields}
+		result := pp.Apply(nil, tt.data)
+		var mapRes []map[string]interface{}
+		if v, ok := result.([]byte); ok {
+			err := json.Unmarshal(v, &mapRes)
+			if err != nil {
+				t.Errorf("Failed to parse the input into map.\n")
+				continue
+			}
+			//fmt.Printf("%t\n", mapRes["rengine_field_0"])
+
+			if !reflect.DeepEqual(tt.result, mapRes) {
+				t.Errorf("%d. %q\n\nresult mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.sql, tt.result, mapRes)
+			}
+		} else {
+			t.Errorf("The returned result is not type of []byte\n")
+		}
+	}
+}
+func TestMqttFunc_Apply2(t *testing.T) {
+	var tests = []struct {
+		sql  string
+		data xsql.JoinTupleSets
+		result []map[string]interface{}
+	}{
+		{
+			sql: "SELECT id1, mqtt(src1.topic) AS a, mqtt(src2.topic) as b FROM src1 LEFT JOIN src2 ON src1.id1 = src2.id1",
+			data: xsql.JoinTupleSets{
+				xsql.JoinTuple{
+					Tuples: []xsql.Tuple{
+						{Emitter: "src1", Message: xsql.Message{ "id1" : "1", "f1" : "v1" , xsql.INTERNAL_MQTT_TOPIC_KEY: "devices/type1/device001"},},
+						{Emitter: "src2", Message: xsql.Message{ "id2" : "1", "f2" : "w1", xsql.INTERNAL_MQTT_TOPIC_KEY: "devices/type2/device001" },},
+					},
+				},
+			},
+			result: []map[string]interface{}{{
+				"id1": "1",
+				"a": "devices/type1/device001",
+				"b": "devices/type2/device001",
+			}},
+		},
 	}
 
 	fmt.Printf("The test bucket size is %d.\n\n", len(tests))
@@ -111,4 +202,4 @@ func TestHashFunc_Apply1(t *testing.T) {
 			t.Errorf("The returned result is not type of []byte\n")
 		}
 	}
-}
+}

+ 3 - 5
xsql/plans/order_operator.go

@@ -1,9 +1,8 @@
 package plans
 
 import (
-	"context"
-	"engine/common"
 	"engine/xsql"
+	"engine/xstream/api"
 )
 
 type OrderPlan struct {
@@ -14,8 +13,8 @@ type OrderPlan struct {
   *  input: *xsql.Tuple from preprocessor | xsql.WindowTuplesSet from windowOp | xsql.JoinTupleSets from joinOp
   *  output: *xsql.Tuple | xsql.WindowTuplesSet | xsql.JoinTupleSets
  */
-func (p *OrderPlan) Apply(ctx context.Context, data interface{}) interface{} {
-	log := common.GetLogger(ctx)
+func (p *OrderPlan) Apply(ctx api.StreamContext, data interface{}) interface{} {
+	log := ctx.GetLogger()
 	log.Debugf("order plan receive %s", data)
 	sorter := xsql.OrderedBy(p.SortFields)
 	switch input := data.(type) {
@@ -28,5 +27,4 @@ func (p *OrderPlan) Apply(ctx context.Context, data interface{}) interface{} {
 		log.Errorf("Expect xsql.Valuer or its array type.")
 		return nil
 	}
-	return nil
 }

+ 18 - 5
xsql/plans/preprocessor.go

@@ -1,9 +1,9 @@
 package plans
 
 import (
-	"context"
 	"engine/common"
 	"engine/xsql"
+	"engine/xstream/api"
 	"fmt"
 	"reflect"
 	"strings"
@@ -12,13 +12,14 @@ import (
 
 type Preprocessor struct {
 	streamStmt  *xsql.StreamStmt
+	fields xsql.Fields
 	isEventTime bool
 	timestampField string
 	timestampFormat string
 }
 
-func NewPreprocessor(s *xsql.StreamStmt, iet bool) (*Preprocessor, error){
-	p := &Preprocessor{streamStmt: s, isEventTime: iet}
+func NewPreprocessor(s *xsql.StreamStmt, fs xsql.Fields, iet bool) (*Preprocessor, error){
+	p := &Preprocessor{streamStmt: s, fields: fs, isEventTime: iet}
 	if iet {
 		if tf, ok := s.Options["TIMESTAMP"]; ok{
 			p.timestampField = tf
@@ -37,8 +38,8 @@ func NewPreprocessor(s *xsql.StreamStmt, iet bool) (*Preprocessor, error){
  *	input: *xsql.Tuple
  *	output: *xsql.Tuple
  */
-func (p *Preprocessor) Apply(ctx context.Context, data interface{}) interface{} {
-	log := common.GetLogger(ctx)
+func (p *Preprocessor) Apply(ctx api.StreamContext, data interface{}) interface{} {
+	log := ctx.GetLogger()
 	tuple, ok := data.(*xsql.Tuple)
 	if !ok {
 		log.Errorf("Expect tuple data type")
@@ -55,6 +56,18 @@ func (p *Preprocessor) Apply(ctx context.Context, data interface{}) interface{}
 			return nil
 		}
 	}
+
+	//If the field has alias name, then evaluate the alias field before transfer it to proceeding operators, and put it into result.
+	//Otherwise, the GROUP BY, ORDER BY statement cannot get the value.
+	for _, f := range p.fields {
+		if f.AName != "" && (!xsql.HasAggFuncs(f.Expr)) {
+			ve := &xsql.ValuerEval{Valuer: xsql.MultiValuer(tuple, &xsql.FunctionValuer{})}
+			if v := ve.Eval(f.Expr); v != nil {
+				result[f.AName] = v
+			}
+		}
+	}
+
 	tuple.Message = result
 	if p.isEventTime{
 		if t, ok := result[p.timestampField]; ok{

+ 30 - 24
xsql/plans/project_operator.go

@@ -1,10 +1,9 @@
 package plans
 
 import (
-	"context"
 	"encoding/json"
-	"engine/common"
 	"engine/xsql"
+	"engine/xstream/api"
 	"fmt"
 	"strconv"
 	"strings"
@@ -19,8 +18,8 @@ type ProjectPlan struct {
  *  input: *xsql.Tuple from preprocessor or filterOp | xsql.WindowTuplesSet from windowOp or filterOp | xsql.JoinTupleSets from joinOp or filterOp
  *  output: []map[string]interface{}
  */
-func (pp *ProjectPlan) Apply(ctx context.Context, data interface{}) interface{} {
-	log := common.GetLogger(ctx)
+func (pp *ProjectPlan) Apply(ctx api.StreamContext, data interface{}) interface{} {
+	log := ctx.GetLogger()
 	log.Debugf("project plan receive %s", data)
 	var results []map[string]interface{}
 	switch input := data.(type) {
@@ -78,29 +77,36 @@ func (pp *ProjectPlan) getVE(tuple xsql.DataValuer, agg xsql.AggregateData) *xsq
 func project(fs xsql.Fields, ve *xsql.ValuerEval) map[string]interface{} {
 	result := make(map[string]interface{})
 	for _, f := range fs {
-		v := ve.Eval(f.Expr)
-		if _, ok := f.Expr.(*xsql.Wildcard); ok || f.Name == "*"{
-			switch val := v.(type) {
-			case map[string]interface{} :
-				for k, v := range val{
-					if _, ok := result[k]; !ok{
-						result[k] = v
+		//Avoid to re-evaluate for non-agg field has alias name, which was already evaluated in pre-processor operator.
+		if f.AName != "" && (!xsql.HasAggFuncs(f.Expr)){
+			fr := &xsql.FieldRef{StreamName:"", Name:f.AName}
+			v := ve.Eval(fr);
+			result[f.AName] = v
+		} else {
+			v := ve.Eval(f.Expr)
+			if _, ok := f.Expr.(*xsql.Wildcard); ok || f.Name == "*"{
+				switch val := v.(type) {
+				case map[string]interface{} :
+					for k, v := range val{
+						if _, ok := result[k]; !ok{
+							result[k] = v
+						}
 					}
-				}
-			case xsql.Message:
-				for k, v := range val{
-					if _, ok := result[k]; !ok{
-						result[k] = v
+				case xsql.Message:
+					for k, v := range val{
+						if _, ok := result[k]; !ok{
+							result[k] = v
+						}
 					}
+				default:
+					fmt.Printf("Wildcarder does not return map")
 				}
-			default:
-				fmt.Printf("Wildcarder does not return map")
-			}
-		} else {
-			if v != nil {
-				n := assignName(f.Name, f.AName, result)
-				if _, ok := result[n]; !ok{
-					result[n] = v
+			} else {
+				if v != nil {
+					n := assignName(f.Name, f.AName, result)
+					if _, ok := result[n]; !ok{
+						result[n] = v
+					}
 				}
 			}
 		}

+ 64 - 0
xsql/plans/str_func_test.go

@@ -353,6 +353,70 @@ func TestStrFunc_Apply1(t *testing.T) {
 				"a": "NYCNICKS",
 			}},
 		},
+
+		{
+			sql: `SELECT split_value(a,"/",0) AS a FROM test1`,
+			data: &xsql.Tuple{
+				Emitter: "test",
+				Message: xsql.Message{
+					"a" : "test/device001/message",
+				},
+			},
+			result: []map[string]interface{}{{
+				"a": "test",
+			}},
+		},
+
+		{
+			sql: `SELECT split_value(a,"/",1) AS a FROM test1`,
+			data: &xsql.Tuple{
+				Emitter: "test",
+				Message: xsql.Message{
+					"a" : "test/device001/message",
+				},
+			},
+			result: []map[string]interface{}{{
+				"a": "device001",
+			}},
+		},
+
+		{
+			sql: `SELECT split_value(a,"/",2) AS a FROM test1`,
+			data: &xsql.Tuple{
+				Emitter: "test",
+				Message: xsql.Message{
+					"a" : "test/device001/message",
+				},
+			},
+			result: []map[string]interface{}{{
+				"a": "message",
+			}},
+		},
+
+		{
+			sql: `SELECT split_value(a,"/",0) AS a, split_value(a,"/",3) AS b FROM test1`,
+			data: &xsql.Tuple{
+				Emitter: "test",
+				Message: xsql.Message{
+					"a" : "/test/device001/message",
+				},
+			},
+			result: []map[string]interface{}{{
+				"a": "",
+				"b": "message",
+			}},
+		},
+
+		{
+			sql: `SELECT split_value(a,"/",3) AS a FROM test1`,
+			data: &xsql.Tuple{
+				Emitter: "test",
+				Message: xsql.Message{
+					"a" : "test/device001/message",
+				},
+			},
+			result: []map[string]interface{}{map[string]interface {}{}},
+		},
 	}
 
 	fmt.Printf("The test bucket size is %d.\n\n", len(tests))

+ 33 - 24
xsql/processors/xsql_processor.go

@@ -7,7 +7,9 @@ import (
 	"engine/xsql"
 	"engine/xsql/plans"
 	"engine/xstream"
+	"engine/xstream/api"
 	"engine/xstream/extensions"
+	"engine/xstream/nodes"
 	"engine/xstream/operators"
 	"engine/xstream/sinks"
 	"fmt"
@@ -156,7 +158,7 @@ func NewRuleProcessor(d string) *RuleProcessor {
 	return processor
 }
 
-func (p *RuleProcessor) ExecCreate(name, ruleJson string) (*xstream.Rule, error) {
+func (p *RuleProcessor) ExecCreate(name, ruleJson string) (*api.Rule, error) {
 	rule, err := p.getRuleByJson(name, ruleJson)
 	if err != nil {
 		return nil, err
@@ -176,7 +178,7 @@ func (p *RuleProcessor) ExecCreate(name, ruleJson string) (*xstream.Rule, error)
 	return rule, nil
 }
 
-func (p *RuleProcessor) GetRuleByName(name string) (*xstream.Rule, error) {
+func (p *RuleProcessor) GetRuleByName(name string) (*api.Rule, error) {
 	db, err := common.DbOpen(path.Join(p.badgerDir, "rule"))
 	if err != nil {
 		return nil, err
@@ -189,8 +191,8 @@ func (p *RuleProcessor) GetRuleByName(name string) (*xstream.Rule, error) {
 	return p.getRuleByJson(name, s)
 }
 
-func (p *RuleProcessor) getRuleByJson(name, ruleJson string) (*xstream.Rule, error) {
-	var rule xstream.Rule
+func (p *RuleProcessor) getRuleByJson(name, ruleJson string) (*api.Rule, error) {
+	var rule api.Rule
 	if err := json.Unmarshal([]byte(ruleJson), &rule); err != nil {
 		return nil, fmt.Errorf("parse rule %s error : %s", ruleJson, err)
 	}
@@ -208,7 +210,7 @@ func (p *RuleProcessor) getRuleByJson(name, ruleJson string) (*xstream.Rule, err
 	return &rule, nil
 }
 
-func (p *RuleProcessor) ExecInitRule(rule *xstream.Rule) (*xstream.TopologyNew, error) {
+func (p *RuleProcessor) ExecInitRule(rule *api.Rule) (*xstream.TopologyNew, error) {
 	if tp, inputs, err := p.createTopo(rule); err != nil {
 		return nil, err
 	}else{
@@ -217,13 +219,13 @@ func (p *RuleProcessor) ExecInitRule(rule *xstream.Rule) (*xstream.TopologyNew,
 				switch name {
 				case "log":
 					log.Printf("Create log sink with %s", action)
-					tp.AddSink(inputs, sinks.NewLogSink("sink_log", rule.Id))
+					tp.AddSink(inputs, nodes.NewSinkNode("sink_log", sinks.NewLogSink()))
 				case "mqtt":
 					log.Printf("Create mqtt sink with %s", action)
-					if ms, err := sinks.NewMqttSink("mqtt_log", rule.Id, action); err != nil{
+					if ms, err := sinks.NewMqttSink(action); err != nil{
 						return nil, err
 					}else{
-						tp.AddSink(inputs, ms)
+						tp.AddSink(inputs, nodes.NewSinkNode("sink_mqtt", ms))
 					}
 				default:
 					return nil, fmt.Errorf("unsupported action: %s", name)
@@ -235,10 +237,10 @@ func (p *RuleProcessor) ExecInitRule(rule *xstream.Rule) (*xstream.TopologyNew,
 }
 
 func (p *RuleProcessor) ExecQuery(ruleid, sql string) (*xstream.TopologyNew, error) {
-	if tp, inputs, err := p.createTopo(&xstream.Rule{Id: ruleid, Sql: sql}); err != nil {
+	if tp, inputs, err := p.createTopo(&api.Rule{Id: ruleid, Sql: sql}); err != nil {
 		return nil, err
 	} else {
-		tp.AddSink(inputs, sinks.NewLogSinkToMemory("sink_log", ruleid))
+		tp.AddSink(inputs, nodes.NewSinkNode("sink_memory_log", sinks.NewLogSinkToMemory()))
 		go func() {
 			select {
 			case err := <-tp.Open():
@@ -306,12 +308,12 @@ func (p *RuleProcessor) ExecDrop(name string) (string, error) {
 	}
 }
 
-func (p *RuleProcessor) createTopo(rule *xstream.Rule) (*xstream.TopologyNew, []xstream.Emitter, error) {
+func (p *RuleProcessor) createTopo(rule *api.Rule) (*xstream.TopologyNew, []api.Emitter, error) {
 	return p.createTopoWithSources(rule, nil)
 }
 
 //For test to mock source
-func (p *RuleProcessor) createTopoWithSources(rule *xstream.Rule, sources []xstream.Source) (*xstream.TopologyNew, []xstream.Emitter, error){
+func (p *RuleProcessor) createTopoWithSources(rule *api.Rule, sources []*nodes.SourceNode) (*xstream.TopologyNew, []api.Emitter, error){
 	name := rule.Id
 	sql := rule.Sql
 	var isEventTime bool
@@ -340,7 +342,7 @@ func (p *RuleProcessor) createTopoWithSources(rule *xstream.Rule, sources []xstr
 			return nil, nil, fmt.Errorf("sql %s is not a select statement", sql)
 		} else {
 			tp := xstream.NewWithName(name)
-			var inputs []xstream.Emitter
+			var inputs []api.Emitter
 			streamsFromStmt := xsql.GetStreams(selectStmt)
 			if !shouldCreateSource && len(streamsFromStmt) != len(sources){
 				return nil, nil, fmt.Errorf("invalid parameter sources or streams, the length cannot match the statement, expect %d sources", len(streamsFromStmt))
@@ -356,23 +358,24 @@ func (p *RuleProcessor) createTopoWithSources(rule *xstream.Rule, sources []xstr
 				if err != nil {
 					return nil, nil, err
 				}
-				pp, err := plans.NewPreprocessor(streamStmt, isEventTime)
+				pp, err := plans.NewPreprocessor(streamStmt, selectStmt.Fields, isEventTime)
 				if err != nil{
 					return nil, nil, err
 				}
 				if shouldCreateSource{
-					mqs, err := extensions.NewWithName(string(streamStmt.Name), streamStmt.Options["DATASOURCE"], streamStmt.Options["CONF_KEY"])
+					mqs, err := extensions.NewMQTTSource(streamStmt.Options["DATASOURCE"], streamStmt.Options["CONF_KEY"])
 					if err != nil {
 						return nil, nil, err
 					}
-					tp.AddSrc(mqs)
+					node := nodes.NewSourceNode(string(streamStmt.Name), mqs)
+					tp.AddSrc(node)
 					preprocessorOp := xstream.Transform(pp, "preprocessor_"+s)
-					tp.AddOperator([]xstream.Emitter{mqs}, preprocessorOp)
+					tp.AddOperator([]api.Emitter{node}, preprocessorOp)
 					inputs = append(inputs, preprocessorOp)
 				}else{
 					tp.AddSrc(sources[i])
 					preprocessorOp := xstream.Transform(pp, "preprocessor_"+s)
-					tp.AddOperator([]xstream.Emitter{sources[i]}, preprocessorOp)
+					tp.AddOperator([]api.Emitter{sources[i]}, preprocessorOp)
 					inputs = append(inputs, preprocessorOp)
 				}
 			}
@@ -386,7 +389,7 @@ func (p *RuleProcessor) createTopoWithSources(rule *xstream.Rule, sources []xstr
 						return nil, nil, err
 					}
 					tp.AddOperator(inputs, wop)
-					inputs = []xstream.Emitter{wop}
+					inputs = []api.Emitter{wop}
 				}
 			}
 
@@ -395,7 +398,7 @@ func (p *RuleProcessor) createTopoWithSources(rule *xstream.Rule, sources []xstr
 				//TODO concurrency setting by command
 				//joinOp.SetConcurrency(3)
 				tp.AddOperator(inputs, joinOp)
-				inputs = []xstream.Emitter{joinOp}
+				inputs = []api.Emitter{joinOp}
 			}
 
 			if selectStmt.Condition != nil {
@@ -403,7 +406,7 @@ func (p *RuleProcessor) createTopoWithSources(rule *xstream.Rule, sources []xstr
 				//TODO concurrency setting by command
 				// filterOp.SetConcurrency(3)
 				tp.AddOperator(inputs, filterOp)
-				inputs = []xstream.Emitter{filterOp}
+				inputs = []api.Emitter{filterOp}
 			}
 
 			var ds xsql.Dimensions
@@ -412,20 +415,26 @@ func (p *RuleProcessor) createTopoWithSources(rule *xstream.Rule, sources []xstr
 				if ds != nil && len(ds) > 0 {
 					aggregateOp := xstream.Transform(&plans.AggregatePlan{Dimensions: ds}, "aggregate")
 					tp.AddOperator(inputs, aggregateOp)
-					inputs = []xstream.Emitter{aggregateOp}
+					inputs = []api.Emitter{aggregateOp}
 				}
 			}
 
+			if selectStmt.Having != nil {
+				havingOp := xstream.Transform(&plans.HavingPlan{selectStmt.Having}, "having")
+				tp.AddOperator(inputs, havingOp)
+				inputs = []xstream.Emitter{havingOp}
+			}
+
 			if selectStmt.SortFields != nil {
 				orderOp := xstream.Transform(&plans.OrderPlan{SortFields:selectStmt.SortFields}, "order")
 				tp.AddOperator(inputs, orderOp)
-				inputs = []xstream.Emitter{orderOp}
+				inputs = []api.Emitter{orderOp}
 			}
 
 			if selectStmt.Fields != nil {
 				projectOp := xstream.Transform(&plans.ProjectPlan{Fields: selectStmt.Fields, IsAggregate: xsql.IsAggStatement(selectStmt)}, "project")
 				tp.AddOperator(inputs, projectOp)
-				inputs = []xstream.Emitter{projectOp}
+				inputs = []api.Emitter{projectOp}
 			}
 			return tp, inputs, nil
 		}

+ 22 - 21
xsql/processors/xsql_processor_test.go

@@ -4,7 +4,8 @@ import (
 	"encoding/json"
 	"engine/common"
 	"engine/xsql"
-	"engine/xstream"
+	"engine/xstream/api"
+	"engine/xstream/nodes"
 	"engine/xstream/test"
 	"fmt"
 	"path"
@@ -16,13 +17,10 @@ import (
 
 var BadgerDir string
 func init(){
-	dataDir, err := common.GetDataLoc()
+	BadgerDir, err := common.GetAndCreateDataLoc("test")
 	if err != nil {
 		log.Panic(err)
-	}else{
-		log.Infof("db location is %s", dataDir)
 	}
-	BadgerDir = path.Join(path.Dir(dataDir), "test")
 	log.Infof("badge location is %s", BadgerDir)
 }
 
@@ -147,7 +145,7 @@ func dropStreams(t *testing.T){
 	}
 }
 
-func getMockSource(name string, done chan<- struct{}, size int) *test.MockSource{
+func getMockSource(name string, done chan<- struct{}, size int) *nodes.SourceNode{
 	var data []*xsql.Tuple
 	switch name{
 	case "demo":
@@ -349,7 +347,7 @@ func getMockSource(name string, done chan<- struct{}, size int) *test.MockSource
 			},
 		}
 	}
-	return test.NewMockSource(data[:size], name, done, false)
+	return nodes.NewSourceNode(name, test.NewMockSource(data[:size], done, false))
 }
 
 func TestSingleSQL(t *testing.T) {
@@ -411,7 +409,7 @@ func TestSingleSQL(t *testing.T) {
 	for i, tt := range tests {
 		p := NewRuleProcessor(BadgerDir)
 		parser := xsql.NewParser(strings.NewReader(tt.sql))
-		var sources []xstream.Source
+		var sources []*nodes.SourceNode
 		if stmt, err := xsql.Language.Parse(parser); err != nil{
 			t.Errorf("parse sql %s error: %s", tt.sql , err)
 		}else {
@@ -425,11 +423,12 @@ func TestSingleSQL(t *testing.T) {
 				}
 			}
 		}
-		tp, inputs, err := p.createTopoWithSources(&xstream.Rule{Id:tt.name, Sql: tt.sql}, sources)
+		tp, inputs, err := p.createTopoWithSources(&api.Rule{Id: tt.name, Sql: tt.sql}, sources)
 		if err != nil{
 			t.Error(err)
 		}
-		sink := test.NewMockSink("mockSink", tt.name)
+		mockSink := test.NewMockSink()
+		sink := nodes.NewSinkNode("MockSink", mockSink)
 		tp.AddSink(inputs, sink)
 		count := len(sources)
 		errCh := tp.Open()
@@ -453,7 +452,7 @@ func TestSingleSQL(t *testing.T) {
 				}
 			}
 		}()
-		results := sink.GetResults()
+		results := mockSink.GetResults()
 		var maps [][]map[string]interface{}
 		for _, v := range results{
 			var mapRes []map[string]interface{}
@@ -675,7 +674,7 @@ func TestWindow(t *testing.T) {
 	for i, tt := range tests {
 		p := NewRuleProcessor(BadgerDir)
 		parser := xsql.NewParser(strings.NewReader(tt.sql))
-		var sources []xstream.Source
+		var sources []*nodes.SourceNode
 		if stmt, err := xsql.Language.Parse(parser); err != nil{
 			t.Errorf("parse sql %s error: %s", tt.sql , err)
 		}else {
@@ -689,11 +688,12 @@ func TestWindow(t *testing.T) {
 				}
 			}
 		}
-		tp, inputs, err := p.createTopoWithSources(&xstream.Rule{Id:tt.name, Sql: tt.sql}, sources)
+		tp, inputs, err := p.createTopoWithSources(&api.Rule{Id: tt.name, Sql: tt.sql}, sources)
 		if err != nil{
 			t.Error(err)
 		}
-		sink := test.NewMockSink("mockSink", tt.name)
+		mockSink := test.NewMockSink()
+		sink := nodes.NewSinkNode("mockSink", mockSink)
 		tp.AddSink(inputs, sink)
 		count := len(sources)
 		errCh := tp.Open()
@@ -717,7 +717,7 @@ func TestWindow(t *testing.T) {
 				}
 			}
 		}()
-		results := sink.GetResults()
+		results := mockSink.GetResults()
 		var maps [][]map[string]interface{}
 		for _, v := range results{
 			var mapRes []map[string]interface{}
@@ -782,7 +782,7 @@ func dropEventStreams(t *testing.T){
 	}
 }
 
-func getEventMockSource(name string, done chan<- struct{}, size int) *test.MockSource{
+func getEventMockSource(name string, done chan<- struct{}, size int) *nodes.SourceNode{
 	var data []*xsql.Tuple
 	switch name{
 	case "demoE":
@@ -1011,7 +1011,7 @@ func getEventMockSource(name string, done chan<- struct{}, size int) *test.MockS
 			},
 		}
 	}
-	return test.NewMockSource(data[:size], name, done, true)
+	return nodes.NewSourceNode(name, test.NewMockSource(data[:size], done, true))
 }
 
 func TestEventWindow(t *testing.T) {
@@ -1218,7 +1218,7 @@ func TestEventWindow(t *testing.T) {
 	for i, tt := range tests {
 		p := NewRuleProcessor(BadgerDir)
 		parser := xsql.NewParser(strings.NewReader(tt.sql))
-		var sources []xstream.Source
+		var sources []*nodes.SourceNode
 		if stmt, err := xsql.Language.Parse(parser); err != nil{
 			t.Errorf("parse sql %s error: %s", tt.sql , err)
 		}else {
@@ -1232,7 +1232,7 @@ func TestEventWindow(t *testing.T) {
 				}
 			}
 		}
-		tp, inputs, err := p.createTopoWithSources(&xstream.Rule{
+		tp, inputs, err := p.createTopoWithSources(&api.Rule{
 			Id:tt.name, Sql: tt.sql,
 			Options: map[string]interface{}{
 				"isEventTime": true,
@@ -1242,7 +1242,8 @@ func TestEventWindow(t *testing.T) {
 		if err != nil{
 			t.Error(err)
 		}
-		sink := test.NewMockSink("mockSink", tt.name)
+		mockSink := test.NewMockSink()
+		sink := nodes.NewSinkNode("MockSink", mockSink)
 		tp.AddSink(inputs, sink)
 		count := len(sources)
 		errCh := tp.Open()
@@ -1266,7 +1267,7 @@ func TestEventWindow(t *testing.T) {
 				}
 			}
 		}()
-		results := sink.GetResults()
+		results := mockSink.GetResults()
 		var maps [][]map[string]interface{}
 		for _, v := range results{
 			var mapRes []map[string]interface{}

+ 33 - 0
xsql/sql_validator.go

@@ -0,0 +1,33 @@
+package xsql
+
+import "fmt"
+
+func Validate(stmt *SelectStatement) error {
+	if HasAggFuncs(stmt.Condition) {
+		return fmt.Errorf("Not allowed to call aggregate functions in WHERE clause.")
+	}
+
+	if HasNoAggFuncs(stmt.Having) {
+		return fmt.Errorf("Not allowed to call none-aggregate functions in HAVING clause.")
+	}
+
+	//Cannot GROUP BY alias fields with aggregate funcs
+	//if stmt.Dimensions != nil {
+	//	for _, d := range stmt.Dimensions {
+	//		if f, ok := d.Expr.(*FieldRef); ok {
+	//			for _, f1 := range stmt.Fields {
+	//				if f.Name == f1.Name || f.Name == f1.AName {
+	//					if HasAggFuncs(f1.Expr) {
+	//						return fmt.Errorf("Cannot group on %s.", f.Name)
+	//					}
+	//					break
+	//				}
+	//			}
+	//		} else {
+	//			return fmt.Errorf("Invalid use of group function")
+	//		}
+	//
+	//	}
+	//}
+	return nil
+}

+ 1 - 1
xsql/util.go

@@ -49,7 +49,7 @@ func GetStreams(stmt *SelectStatement) (result []string){
 	return
 }
 
-func LowercaseKeyMap(m map[string]interface{}) (map[string]interface{}) {
+func LowercaseKeyMap(m map[string]interface{}) map[string]interface{} {
 	m1 := make(map[string]interface{})
 	for k, v := range m {
 		if m2, ok := v.(map[string]interface{}); ok {

+ 1 - 1
xsql/xsql_manager.go

@@ -29,7 +29,7 @@ func (t *ParseTree) Handle(tok Token, fn func(*Parser) (Statement, error)) {
 
 
 func (pt *ParseTree) Parse(p *Parser) (Statement, error) {
-	tok, _ := p.scanIgnoreWhitespace();
+	tok, _ := p.scanIgnoreWhitespace()
 	p.unscan()
 	if f, ok  := pt.Handlers[tok]; ok {
 		return f(p)

+ 1 - 1
xsql/xsql_parser_tree_test.go

@@ -20,7 +20,7 @@ func TestParser_ParseTree(t *testing.T) {
 			stmt: &StreamStmt{
 				Name: StreamName("demo"),
 				StreamFields: []StreamField{
-					StreamField{Name: "USERID", FieldType: &BasicType{Type: BIGINT}},
+					{Name: "USERID", FieldType: &BasicType{Type: BIGINT}},
 				},
 				Options: map[string]string{
 					"DATASOURCE" : "users",

+ 61 - 0
xstream/api/stream.go

@@ -0,0 +1,61 @@
+package api
+
+import (
+	"context"
+	"github.com/sirupsen/logrus"
+)
+
+type ConsumeFunc func(data interface{})
+
+type Closable interface {
+	Close(StreamContext) error
+}
+
+type Source interface {
+	//Should be sync function for normal case. The container will run it in go func
+	Open(StreamContext, ConsumeFunc) error
+	Closable
+}
+
+type Sink interface {
+	//Should be sync function for normal case. The container will run it in go func
+	Open(StreamContext) error
+	Collect(StreamContext, interface{}) error
+	Closable
+}
+
+type Emitter interface {
+	AddOutput(chan<- interface{}, string) error
+}
+
+type Collector interface {
+	GetInput() (chan<- interface{}, string)
+}
+
+type TopNode interface {
+	GetName() string
+}
+
+type Rule struct {
+	Id      string                   `json:"id"`
+	Sql     string                   `json:"sql"`
+	Actions []map[string]interface{} `json:"actions"`
+	Options map[string]interface{}   `json:"options"`
+}
+
+type StreamContext interface {
+	context.Context
+	GetLogger()  *logrus.Entry
+	GetRuleId() string
+	GetOpId() string
+	WithMeta(ruleId string, opId string) StreamContext
+	WithCancel() (StreamContext, context.CancelFunc)
+}
+
+type Operator interface {
+	Emitter
+	Collector
+	Exec(StreamContext, chan<- error)
+	GetName() string
+}
+

+ 5 - 6
xstream/cli/main.go

@@ -38,7 +38,7 @@ func streamProcess(client *rpc.Client, args string) error {
 
 func main() {
 	app := cli.NewApp()
-	app.Version = "0.1"
+	app.Version = "0.0.3"
 
 	//nflag := []cli.Flag { cli.StringFlag{
 	//		Name: "name, n",
@@ -84,7 +84,7 @@ func main() {
 				ticker := time.NewTicker(time.Millisecond * 300)
 				defer ticker.Stop()
 				for {
-					fmt.Print("xstream > ")
+					fmt.Print("kuiper > ")
 
 					text, _ := reader.ReadString('\n')
 					inputs = append(inputs, text)
@@ -150,7 +150,6 @@ func main() {
 								args := strings.Join([]string{"CREATE STREAM ", string(stream)}, " ")
 								return streamProcess(client, args)
 							}
-							return nil
 						} else {
 							return streamProcess(client, "")
 						}
@@ -427,8 +426,8 @@ func main() {
 	}
 
 
-	app.Name = "xstream"
-	app.Usage = "The command line tool for EMQ X stream."
+	app.Name = "Kuiper"
+	app.Usage = "The command line tool for EMQ X Kuiper."
 
 	app.Action = func(c *cli.Context) error {
 		cli.ShowSubcommandHelp(c)
@@ -442,6 +441,6 @@ func main() {
 
 	err = app.Run(os.Args)
 	if err != nil {
-		fmt.Errorf("%s", err)
+		fmt.Printf("%v", err)
 	}
 }

+ 15 - 37
xstream/collectors/func.go

@@ -1,67 +1,45 @@
 package collectors
 
 import (
-	"context"
-	"engine/common"
+	"engine/xstream/api"
 	"errors"
 )
 
 // CollectorFunc is a function used to colllect
 // incoming stream data. It can be used as a
 // stream sink.
-type CollectorFunc func(context.Context, interface{}) error
+type CollectorFunc func(api.StreamContext, interface{}) error
 
 // FuncCollector is a colletor that uses a function
 // to collect data.  The specified function must be
 // of type:
 //   CollectorFunc
 type FuncCollector struct {
-	input chan interface{}
-	//logf  api.LogFunc
-	//errf  api.ErrorFunc
 	f     CollectorFunc
-	name  string
 }
 
 // Func creates a new value *FuncCollector that
 // will use the specified function parameter to
 // collect streaming data.
-func Func(name string, f CollectorFunc) *FuncCollector {
-	return &FuncCollector{f: f, name:name, input: make(chan interface{}, 1024)}
-}
-
-func (c *FuncCollector) GetName() string  {
-	return c.name
-}
-
-func (c *FuncCollector) GetInput() (chan<- interface{}, string)  {
-	return c.input, c.name
+func Func(f CollectorFunc) *FuncCollector {
+	return &FuncCollector{f: f}
 }
 
 // Open is the starting point that starts the collector
-func (c *FuncCollector) Open(ctx context.Context, result chan<- error) {
-	//c.logf = autoctx.GetLogFunc(ctx)
-	//c.errf = autoctx.GetErrFunc(ctx)
-	log := common.GetLogger(ctx)
+func (c *FuncCollector) Open(ctx api.StreamContext) error {
+	log := ctx.GetLogger()
 	log.Println("Opening func collector")
 
 	if c.f == nil {
-		err := errors.New("Func collector missing function")
-		log.Println(err)
-		go func() { result <- err }()
+		return errors.New("func collector missing function")
 	}
+	return nil
+}
 
-	go func() {
-		for {
-			select {
-			case item := <-c.input:
-				if err := c.f(ctx, item); err != nil {
-					log.Println(err)
-				}
-			case <-ctx.Done():
-				log.Infof("Func collector %s done", c.name)
-				return
-			}
-		}
-	}()
+func (c *FuncCollector) Collect(ctx api.StreamContext, item interface{}) error {
+	return c.f(ctx, item)
 }
+
+func (c *FuncCollector) Close(api.StreamContext) error {
+	return nil
+}

+ 84 - 0
xstream/contexts/default.go

@@ -0,0 +1,84 @@
+package contexts
+
+import (
+	"context"
+	"engine/common"
+	"engine/xstream/api"
+	"github.com/sirupsen/logrus"
+	"time"
+)
+
+const LoggerKey = "$$logger"
+
+type DefaultContext struct {
+	ruleId string
+	opId   string
+	ctx context.Context
+}
+
+func Background() *DefaultContext {
+	c := &DefaultContext{
+		ctx:context.Background(),
+	}
+	return c
+}
+
+func WithValue(parent *DefaultContext, key, val interface{}) *DefaultContext {
+	parent.ctx = context.WithValue(parent.ctx, key, val)
+	return parent
+}
+
+//Implement context interface
+func (c *DefaultContext) Deadline() (deadline time.Time, ok bool){
+	return c.ctx.Deadline()
+}
+
+func (c *DefaultContext) Done() <-chan struct{}{
+	return c.ctx.Done()
+}
+
+func (c *DefaultContext) Err() error{
+	return c.ctx.Err()
+}
+
+func (c *DefaultContext) Value(key interface{}) interface{}{
+	return c.ctx.Value(key)
+}
+
+// Stream metas
+func (c *DefaultContext) GetContext() context.Context{
+	return c.ctx
+}
+
+func (c *DefaultContext) GetLogger() *logrus.Entry {
+	l, ok := c.ctx.Value(LoggerKey).(*logrus.Entry)
+	if l != nil && ok {
+		return l
+	}
+	return common.Log.WithField("caller", "default")
+}
+
+func (c *DefaultContext) GetRuleId() string {
+	return c.ruleId
+}
+
+func (c *DefaultContext) GetOpId() string {
+	return c.opId
+}
+
+func (c *DefaultContext) WithMeta(ruleId string, opId string) api.StreamContext{
+	return &DefaultContext{
+		ruleId: ruleId,
+		opId: opId,
+		ctx:c.ctx,
+	}
+}
+
+func (c *DefaultContext) WithCancel() (api.StreamContext, context.CancelFunc) {
+	ctx, cancel := context.WithCancel(c.ctx)
+	return &DefaultContext{
+		ruleId: c.ruleId,
+		opId: c.opId,
+		ctx: ctx,
+	}, cancel
+}

+ 1 - 1
xstream/demo/func_visitor.go

@@ -18,7 +18,7 @@ func main() {
 			}
 			srcs = append(srcs, string(f.StreamName))
 		}
-	});
+	})
 
 	for _, src := range srcs {
 		fmt.Println(src)

+ 0 - 63
xstream/demo/test.go

@@ -1,63 +0,0 @@
-package main
-
-import (
-	"engine/common"
-	"engine/xsql"
-	"engine/xsql/plans"
-	"engine/xstream"
-	"engine/xstream/collectors"
-	"engine/xstream/extensions"
-	"strings"
-)
-
-func main() {
-
-	log := common.Log
-
-	demo1Stream, err := xsql.NewParser(strings.NewReader("CREATE STREAM demo1 (count bigint) WITH (source=\"users\", FORMAT=\"AVRO\", KEY=\"USERID\")")).ParseCreateStreamStmt()
-	demo2Stream, err := xsql.NewParser(strings.NewReader("CREATE STREAM demo2 (abc bigint) WITH (source=\"users\", FORMAT=\"AVRO\", KEY=\"USERID\")")).ParseCreateStreamStmt()
-	stmt, err := xsql.NewParser(strings.NewReader("SELECT count FROM demo1 where demo1.count > 3")).Parse()
-	if err != nil {
-		log.Fatal("Failed to parse SQL for %s. \n", err)
-	}
-
-	tp := xstream.New()
-
-	mqs1, err := extensions.NewWithName("srv1", "demo1", "")
-	if err != nil {
-		log.Fatalf("Found error %s.\n", err)
-		return
-	}
-	tp.AddSrc(mqs1)
-
-	mqs2, err := extensions.NewWithName("srv2", "demo2", "")
-	if err != nil {
-		log.Fatalf("Found error %s.\n", err)
-		return
-	}
-	tp.AddSrc(mqs2)
-
-	preprocessorOp1 := xstream.Transform(&plans.Preprocessor{StreamStmt: demo1Stream}, "preprocessor1")
-	tp.AddOperator([]xstream.Emitter{mqs1}, preprocessorOp1)
-
-	preprocessorOp2 := xstream.Transform(&plans.Preprocessor{StreamStmt: demo2Stream}, "preprocessor2")
-	tp.AddOperator([]xstream.Emitter{mqs2}, preprocessorOp2)
-
-	filterOp := xstream.Transform(&plans.FilterPlan{Condition: stmt.Condition}, "filter plan")
-	filterOp.SetConcurrency(3)
-	tp.AddOperator([]xstream.Emitter{preprocessorOp1, preprocessorOp2}, filterOp)
-
-	projectOp := xstream.Transform(&plans.ProjectPlan{Fields: stmt.Fields}, "project plan")
-	tp.AddOperator([]xstream.Emitter{filterOp}, projectOp)
-
-
-	tp.AddSink([]xstream.Emitter{projectOp}, collectors.Func(func(data interface{}) error {
-		log.Println("sink result %s", data)
-		return nil
-	}))
-
-	if err := <-tp.Open(); err != nil {
-		log.Fatal(err)
-		return
-	}
-}

+ 0 - 92
xstream/demo/testWindow.go

@@ -1,92 +0,0 @@
-package main
-
-import (
-	"engine/common"
-	"engine/xsql"
-	"engine/xsql/plans"
-	"engine/xstream"
-	"engine/xstream/collectors"
-	"engine/xstream/extensions"
-	"engine/xstream/operators"
-	"strings"
-)
-
-func main() {
-
-	log := common.Log
-
-	demo1Stream, err := xsql.NewParser(strings.NewReader("CREATE STREAM demo (count bigint) WITH (datasource=\"demo\", FORMAT=\"AVRO\", KEY=\"USERID\")")).ParseCreateStreamStmt()
-	//demo2Stream, err := xsql.NewParser(strings.NewReader("CREATE STREAM demo2 (abc bigint) WITH (datasource=\"demo2\", FORMAT=\"AVRO\", KEY=\"USERID\")")).ParseCreateStreamStmt()
-	//stmt, err := xsql.NewParser(strings.NewReader("SELECT count FROM demo1 where demo1.count > 3")).Parse()
-	if err != nil {
-		log.Fatal("Failed to parse SQL for %s. \n", err)
-	}
-
-	tp := xstream.New()
-
-	mqs1, err := extensions.NewWithName("srv1", "demo", "")
-	if err != nil {
-		log.Fatalf("Found error %s.\n", err)
-		return
-	}
-	tp.AddSrc(mqs1)
-
-	//mqs2, err := extensions.NewWithName("srv1", "demo2")
-	//if err != nil {
-	//	log.Fatalf("Found error %s.\n", err)
-	//	return
-	//}
-	//tp.AddSrc(mqs2)
-
-	preprocessorOp1 := xstream.Transform(&plans.Preprocessor{StreamStmt: demo1Stream}, "preprocessor1")
-	tp.AddOperator([]xstream.Emitter{mqs1}, preprocessorOp1)
-
-	//preprocessorOp2 := xstream.Transform(&plans.Preprocessor{StreamStmt: demo2Stream}, "preprocessor2")
-	//tp.AddOperator([]xstream.Emitter{mqs2}, preprocessorOp2)
-
-	//filterOp := xstream.Transform(&plans.FilterPlan{Condition: stmt.Condition}, "filter plan")
-	//filterOp.SetConcurrency(3)
-	//tp.AddOperator([]xstream.Emitter{preprocessorOp1, preprocessorOp2}, filterOp)
-	//
-	//projectOp := xstream.Transform(&plans.ProjectPlan{Fields: stmt.Fields}, "project plan")
-	//tp.AddOperator([]xstream.Emitter{filterOp}, projectOp)
-
-	//windowOp := operators.NewWindowOp("windowOp", &operators.WindowConfig{
-	//	Type: operators.NO_WINDOW,
-	//})
-
-	//windowOp := operators.NewWindowOp("windowOp", &operators.WindowConfig{
-	//	Type: operators.TUMBLING_WINDOW,
-	//	Length: 30000,
-	//})
-
-	//windowOp := operators.NewWindowOp("windowOp", &operators.WindowConfig{
-	//	Type: operators.HOPPING_WINDOW,
-	//	Length: 20000,
-	//	Interval: 10000,
-	//})
-	//
-	//windowOp := operators.NewWindowOp("windowOp", &operators.WindowConfig{
-	//	Type: operators.SLIDING_WINDOW,
-	//	Length: 20000,
-	//})
-
-	windowOp := operators.NewWindowOp("windowOp", &operators.WindowConfig{
-		Type: operators.SESSION_WINDOW,
-		Length: 20000,
-		Interval: 6000,
-	})
-
-	tp.AddOperator([]xstream.Emitter{preprocessorOp1}, windowOp)
-
-
-	tp.AddSink([]xstream.Emitter{windowOp}, collectors.Func(func(data interface{}) error {
-		log.Println("sink result %s", data)
-		return nil
-	}))
-
-	if err := <-tp.Open(); err != nil {
-		log.Fatal(err)
-		return
-	}
-}

+ 79 - 83
xstream/extensions/mqtt_source.go

@@ -1,15 +1,16 @@
 package extensions
 
 import (
-	"context"
 	"encoding/json"
 	"engine/common"
 	"engine/xsql"
+	"engine/xstream/api"
 	"fmt"
 	MQTT "github.com/eclipse/paho.mqtt.golang"
 	"github.com/go-yaml/yaml"
 	"github.com/google/uuid"
-	"os"
+	"strconv"
+	"strings"
 	"time"
 )
 
@@ -17,12 +18,12 @@ type MQTTSource struct {
 	srv      string
 	tpc      string
 	clientid string
-	schema   map[string]interface{}
+	pVersion uint
+	uName 	 string
+	password string
 
-	outs  map[string]chan<- interface{}
+	schema   map[string]interface{}
 	conn MQTT.Client
-	name 		string
-	//ctx context.Context
 }
 
 
@@ -31,20 +32,21 @@ type MQTTConfig struct {
 	Sharedsubscription string `yaml:"sharedsubscription"`
 	Servers []string `yaml:"servers"`
 	Clientid string `yaml:"clientid"`
+	PVersion string `yaml:"protocolVersion"`
+	Uname string `yaml:"username"`
+	Password string `yaml:"password"`
 }
 
-
 const confName string = "mqtt_source.yaml"
 
-func NewWithName(name string, topic string, confKey string) (*MQTTSource, error) {
+func NewMQTTSource(topic string, confKey string) (*MQTTSource, error) {
 	b := common.LoadConf(confName)
 	var cfg map[string]MQTTConfig
 	if err := yaml.Unmarshal(b, &cfg); err != nil {
 		return nil, err
 	}
 
-	ms := &MQTTSource{tpc: topic, name: name}
-	ms.outs = make(map[string]chan<- interface{})
+	ms := &MQTTSource{tpc: topic}
 	if srvs := cfg[confKey].Servers; srvs != nil && len(srvs) > 1 {
 		return nil, fmt.Errorf("It only support one server in %s section.", confKey)
 	} else if srvs == nil {
@@ -63,94 +65,88 @@ func NewWithName(name string, topic string, confKey string) (*MQTTSource, error)
 	} else {
 		ms.clientid = cfg["default"].Clientid
 	}
-	return ms, nil
-}
 
-func fileExists(filename string) bool {
-	info, err := os.Stat(filename)
-	if os.IsNotExist(err) {
-		return false
+	var pversion uint = 3
+	if pv := cfg[confKey].PVersion; pv != "" {
+		if pv == "3.1.1" {
+			pversion = 4
+		}
+	}
+	ms.pVersion = pversion
+
+	if uname := cfg[confKey].Uname; uname != "" {
+		ms.uName = strings.Trim(uname, " ")
 	}
-	return !info.IsDir()
+
+	if password := cfg[confKey].Password; password != "" {
+		ms.password = strings.Trim(password, " ")
+	}
+
+	return ms, nil
 }
 
 func (ms *MQTTSource) WithSchema(schema string) *MQTTSource {
 	return ms
 }
 
-func (ms *MQTTSource) GetName() string {
-	return ms.name
-}
-
-func (ms *MQTTSource) AddOutput(output chan<- interface{}, name string) {
-	if _, ok := ms.outs[name]; !ok{
-		ms.outs[name] = output
-	}else{
-		common.Log.Warnf("fail to add output %s, operator %s already has an output of the same name", name, ms.name)
-	}
-}
+func (ms *MQTTSource) Open(ctx api.StreamContext, consume api.ConsumeFunc) error {
+	log := ctx.GetLogger()
 
-func (ms *MQTTSource) Open(ctx context.Context) error {
-	log := common.GetLogger(ctx)
-	go func() {
-		exeCtx, cancel := context.WithCancel(ctx)
-		opts := MQTT.NewClientOptions().AddBroker(ms.srv)
-
-		if ms.clientid == "" {
-			if uuid, err := uuid.NewUUID(); err != nil {
-				log.Printf("Failed to get uuid, the error is %s", err)
-				cancel()
-				return
-			} else {
-				opts.SetClientID(uuid.String())
-			}
+	opts := MQTT.NewClientOptions().AddBroker(ms.srv).SetProtocolVersion(ms.pVersion)
+	if ms.clientid == "" {
+		if uuid, err := uuid.NewUUID(); err != nil {
+			return fmt.Errorf("failed to get uuid, the error is %s", err)
 		} else {
-			opts.SetClientID(ms.clientid)
+			opts.SetClientID(uuid.String())
 		}
+	} else {
+		opts.SetClientID(ms.clientid)
+	}
+	if ms.uName != "" {
+		opts.SetUsername(ms.uName)
+	}
 
-		h := func(client MQTT.Client, msg MQTT.Message) {
-			if ms.tpc != msg.Topic() {
-				return
-			} else {
-				log.Infof("received %s", msg.Payload())
-
-				result := make(map[string]interface{})
-				//The unmarshal type can only be bool, float64, string, []interface{}, map[string]interface{}, nil
-				if e := json.Unmarshal(msg.Payload(), &result); e != nil {
-					log.Errorf("Invalid data format, cannot convert %s into JSON with error %s", string(msg.Payload()), e)
-					return
-				}
-				//Convert the keys to lowercase
-				result = xsql.LowercaseKeyMap(result)
-				tuple := &xsql.Tuple{Emitter: ms.tpc, Message:result, Timestamp: common.TimeToUnixMilli(time.Now())}
-				for _, out := range ms.outs{
-					out <- tuple
-				}
-			}
-		}
+	if ms.password != "" {
+		opts.SetPassword(ms.password)
+	}
 
-		opts.SetDefaultPublishHandler(h)
-		c := MQTT.NewClient(opts)
-		if token := c.Connect(); token.Wait() && token.Error() != nil {
-			log.Printf("Found error when connecting to %s for %s: %s", ms.srv, ms.name, token.Error())
-			cancel()
-			return
-		}
-		log.Printf("The connection to server %s was established successfully", ms.srv)
-		ms.conn = c
-		if token := c.Subscribe(ms.tpc, 0, nil); token.Wait() && token.Error() != nil {
-			log.Printf("Found error: %s", token.Error())
-			cancel()
+	h := func(client MQTT.Client, msg MQTT.Message) {
+		log.Infof("received %s", msg.Payload())
+
+		result := make(map[string]interface{})
+		//The unmarshal type can only be bool, float64, string, []interface{}, map[string]interface{}, nil
+		if e := json.Unmarshal(msg.Payload(), &result); e != nil {
+			log.Errorf("Invalid data format, cannot convert %s into JSON with error %s", string(msg.Payload()), e)
 			return
 		}
-		log.Printf("Successfully subscribe to topic %s", ms.tpc)
-		select {
-		case <-exeCtx.Done():
-			log.Println("Mqtt Source Done")
-			ms.conn.Disconnect(5000)
-			cancel()
-		}
-	}()
+		//Convert the keys to lowercase
+		result = xsql.LowercaseKeyMap(result)
+
+		meta := make(map[string]interface{})
+		meta[xsql.INTERNAL_MQTT_TOPIC_KEY] = msg.Topic()
+		meta[xsql.INTERNAL_MQTT_MSG_ID_KEY] = strconv.Itoa(int(msg.MessageID()))
+
+		tuple := &xsql.Tuple{Emitter: ms.tpc, Message:result, Timestamp: common.TimeToUnixMilli(time.Now()), Metadata:meta}
+		consume(tuple)
+	}
+	//TODO error listener?
+	opts.SetDefaultPublishHandler(h)
+	c := MQTT.NewClient(opts)
+	if token := c.Connect(); token.Wait() && token.Error() != nil {
+		return fmt.Errorf("found error when connecting to %s: %s", ms.srv, token.Error())
+	}
+	log.Printf("The connection to server %s was established successfully", ms.srv)
+	ms.conn = c
+	if token := c.Subscribe(ms.tpc, 0, nil); token.Wait() && token.Error() != nil {
+		return fmt.Errorf("Found error: %s", token.Error())
+	}
+	log.Printf("Successfully subscribe to topic %s", ms.tpc)
+
+	return nil
+}
 
+func (ms *MQTTSource) Close(ctx api.StreamContext) error{
+	ctx.GetLogger().Println("Mqtt Source Done")
+	ms.conn.Disconnect(5000)
 	return nil
 }

+ 5 - 5
xstream/funcs.go

@@ -2,10 +2,10 @@ package xstream
 
 import (
 	"context"
-	"fmt"
+	"engine/xstream/api"
 	"engine/xstream/operators"
+	"fmt"
 	"reflect"
-
 )
 
 type unaryFuncForm byte
@@ -35,7 +35,7 @@ func ProcessFunc(f interface{}) (operators.UnFunc, error) {
 
 	fnval := reflect.ValueOf(f)
 
-	return operators.UnFunc(func(ctx context.Context, data interface{}) interface{} {
+	return operators.UnFunc(func(ctx api.StreamContext, data interface{}) interface{} {
 		result := callOpFunc(fnval, ctx, data, funcForm)
 		return result.Interface()
 	}), nil
@@ -64,7 +64,7 @@ func FilterFunc(f interface{}) (operators.UnFunc, error) {
 	}
 
 	fnval := reflect.ValueOf(f)
-	return operators.UnFunc(func(ctx context.Context, data interface{}) interface{} {
+	return operators.UnFunc(func(ctx api.StreamContext, data interface{}) interface{} {
 		result := callOpFunc(fnval, ctx, data, funcForm)
 		predicate := result.Bool()
 		if !predicate {
@@ -104,7 +104,7 @@ func FlatMapFunc(f interface{}) (operators.UnFunc, error) {
 	}
 
 	fnval := reflect.ValueOf(f)
-	return operators.UnFunc(func(ctx context.Context, data interface{}) interface{} {
+	return operators.UnFunc(func(ctx api.StreamContext, data interface{}) interface{} {
 		result := callOpFunc(fnval, ctx, data, funcForm)
 		return result.Interface()
 	}), nil

+ 21 - 0
xstream/nodes/common_func.go

@@ -0,0 +1,21 @@
+package nodes
+
+import (
+	"engine/xstream/api"
+)
+
+func Broadcast(outputs map[string]chan<- interface{}, val interface{}, ctx api.StreamContext) int {
+	count := 0
+	logger := ctx.GetLogger()
+	for n, out := range outputs {
+		select {
+		case out <- val:
+			count++
+		default: //TODO channel full strategy?
+			logger.Errorf("send output from %s to %s fail: channel full", ctx.GetOpId(), n)
+		}
+	}
+	return count
+}
+
+

+ 56 - 0
xstream/nodes/sink_node.go

@@ -0,0 +1,56 @@
+package nodes
+
+import (
+	"engine/xstream/api"
+)
+
+type SinkNode struct {
+	sink   api.Sink
+	input  chan interface{}
+	name   string
+	ctx    api.StreamContext
+}
+
+func NewSinkNode(name string, sink api.Sink) *SinkNode{
+	return &SinkNode{
+		sink: sink,
+		input: make(chan interface{}, 1024),
+		name: name,
+		ctx: nil,
+	}
+}
+
+func (m *SinkNode) Open(ctx api.StreamContext, result chan<- error) {
+	m.ctx = ctx
+	logger := ctx.GetLogger()
+	logger.Debugf("open sink node %s", m.name)
+	go func() {
+		if err := m.sink.Open(ctx); err != nil{
+			go func() { result <- err }()
+			return
+		}
+		for {
+			select {
+			case item := <-m.input:
+				if err := m.sink.Collect(ctx, item); err != nil{
+					//TODO deal with publish error
+					logger.Errorf("sink node %s publish %v error: %v", ctx.GetOpId(), item, err)
+				}
+			case <-ctx.Done():
+				logger.Infof("sink node %s done", m.name)
+				if err := m.sink.Close(ctx); err != nil{
+					go func() { result <- err }()
+				}
+				return
+			}
+		}
+	}()
+}
+
+func (m *SinkNode) GetName() string{
+	return m.name
+}
+
+func (m *SinkNode) GetInput() (chan<- interface{}, string)  {
+	return m.input, m.name
+}

+ 69 - 0
xstream/nodes/source_node.go

@@ -0,0 +1,69 @@
+package nodes
+
+import (
+	"engine/xstream/api"
+	"fmt"
+)
+
+type SourceNode struct {
+	source api.Source
+	outs   map[string]chan<- interface{}
+	name   string
+	ctx    api.StreamContext
+}
+
+func NewSourceNode(name string, source api.Source) *SourceNode{
+	return &SourceNode{
+		source: source,
+		outs: make(map[string]chan<- interface{}),
+		name: name,
+		ctx: nil,
+	}
+}
+
+func (m *SourceNode) Open(ctx api.StreamContext, errCh chan<- error) {
+	m.ctx = ctx
+	logger := ctx.GetLogger()
+	logger.Debugf("open source node %s", m.name)
+	go func(){
+		if err := m.source.Open(ctx, func(data interface{}){
+			m.Broadcast(data)
+			logger.Debugf("%s consume data %v complete", m.name, data)
+		}); err != nil{
+			select {
+			case errCh <- err:
+			case <-ctx.Done():
+				if err := m.source.Close(ctx); err != nil{
+					go func() { errCh <- err }()
+				}
+			}
+		}
+		for {
+			select {
+			case <-ctx.Done():
+				logger.Infof("source %s done", m.name)
+				if err := m.source.Close(ctx); err != nil{
+					go func() { errCh <- err }()
+				}
+				return
+			}
+		}
+	}()
+}
+
+func (m *SourceNode) Broadcast(data interface{}) int{
+	return Broadcast(m.outs, data, m.ctx)
+}
+
+func (m *SourceNode) GetName() string{
+	return m.name
+}
+
+func (m *SourceNode) AddOutput(output chan<- interface{}, name string) (err error) {
+	if _, ok := m.outs[name]; !ok{
+		m.outs[name] = output
+	}else{
+		return fmt.Errorf("fail to add output %s, stream node %s already has an output of the same name", name, m.name)
+	}
+	return nil
+}

+ 20 - 44
xstream/operators/operations.go

@@ -1,22 +1,22 @@
 package operators
 
 import (
-	"context"
-	"engine/common"
+	"engine/xstream/api"
+	"engine/xstream/nodes"
 	"fmt"
 	"sync"
 )
 
 // UnOperation interface represents unary operations (i.e. Map, Filter, etc)
 type UnOperation interface {
-	Apply(ctx context.Context, data interface{}) interface{}
+	Apply(ctx api.StreamContext, data interface{}) interface{}
 }
 
 // UnFunc implements UnOperation as type func (context.Context, interface{})
-type UnFunc func(context.Context, interface{}) interface{}
+type UnFunc func(api.StreamContext, interface{}) interface{}
 
 // Apply implements UnOperation.Apply method
-func (f UnFunc) Apply(ctx context.Context, data interface{}) interface{} {
+func (f UnFunc) Apply(ctx api.StreamContext, data interface{}) interface{} {
 	return f(ctx, data)
 }
 
@@ -61,12 +61,13 @@ func (o *UnaryOperator) SetConcurrency(concurr int) {
 	}
 }
 
-func (o *UnaryOperator) AddOutput(output chan<- interface{}, name string) {
+func (o *UnaryOperator) AddOutput(output chan<- interface{}, name string) error{
 	if _, ok := o.outputs[name]; !ok{
 		o.outputs[name] = output
 	}else{
-		common.Log.Warnf("fail to add output %s, operator %s already has an output of the same name", name, o.name)
+		return fmt.Errorf("fail to add output %s, operator %s already has an output of the same name", name, o.name)
 	}
+	return nil
 }
 
 func (o *UnaryOperator) GetInput() (chan<- interface{}, string) {
@@ -74,12 +75,12 @@ func (o *UnaryOperator) GetInput() (chan<- interface{}, string) {
 }
 
 // Exec is the entry point for the executor
-func (o *UnaryOperator) Exec(ctx context.Context) (err error) {
-	log := common.GetLogger(ctx)
-	log.Printf("Unary operator %s is started", o.name)
+func (o *UnaryOperator) Exec(ctx api.StreamContext, errCh chan<- error ) {
+	log := ctx.GetLogger()
+	log.Tracef("Unary operator %s is started", o.name)
 
 	if len(o.outputs) <= 0 {
-		err = fmt.Errorf("no output channel found")
+		go func(){errCh <- fmt.Errorf("no output channel found")}()
 		return
 	}
 
@@ -96,7 +97,7 @@ func (o *UnaryOperator) Exec(ctx context.Context) (err error) {
 		for i := 0; i < o.concurrency; i++ { // workers
 			go func(wg *sync.WaitGroup) {
 				defer wg.Done()
-				o.doOp(ctx)
+				o.doOp(ctx, errCh)
 			}(&barrier)
 		}
 
@@ -117,17 +118,15 @@ func (o *UnaryOperator) Exec(ctx context.Context) (err error) {
 			return
 		}
 	}()
-
-	return nil
 }
 
-func (o *UnaryOperator) doOp(ctx context.Context) {
-	log := common.GetLogger(ctx)
+func (o *UnaryOperator) doOp(ctx api.StreamContext, errCh chan<- error) {
+	log := ctx.GetLogger()
 	if o.op == nil {
 		log.Println("Unary operator missing operation")
 		return
 	}
-	exeCtx, cancel := context.WithCancel(ctx)
+	exeCtx, cancel := ctx.WithCancel()
 
 	defer func() {
 		log.Infof("unary operator %s done, cancelling future items", o.name)
@@ -143,40 +142,16 @@ func (o *UnaryOperator) doOp(ctx context.Context) {
 			switch val := result.(type) {
 			case nil:
 				continue
-			//case api.StreamError:
-			//	fmt.Println( val)
-			//	fmt.Println( val)
-			//	if item := val.Item(); item != nil {
-			//		select {
-			//		case o.output <- *item:
-			//		case <-exeCtx.Done():
-			//			return
-			//		}
-			//	}
-			//	continue
-			//case api.PanicStreamError:
-			//	util.Logfn(o.logf, val)
-			//	autoctx.Err(o.errf, api.StreamError(val))
-			//	panic(val)
-			//case api.CancelStreamError:
-			//	util.Logfn(o.logf, val)
-			//	autoctx.Err(o.errf, api.StreamError(val))
-			//	return
-			case error:
+			case error: //TODO error handling
 				log.Println(val)
 				log.Println(val.Error())
 				continue
-
 			default:
-				for _, output := range o.outputs{
-					select {
-					case output <- val:
-					}
-				}
+				nodes.Broadcast(o.outputs, val, ctx)
 			}
 
 		// is cancelling
-		case <-exeCtx.Done():
+		case <-ctx.Done():
 			log.Printf("unary operator %s cancelling....", o.name)
 			o.mutex.Lock()
 			cancel()
@@ -186,3 +161,4 @@ func (o *UnaryOperator) doOp(ctx context.Context) {
 		}
 	}
 }
+

+ 13 - 14
xstream/operators/watermark.go

@@ -4,6 +4,7 @@ import (
 	"context"
 	"engine/common"
 	"engine/xsql"
+	"engine/xstream/api"
 	"fmt"
 	"math"
 	"sort"
@@ -62,8 +63,8 @@ func NewWatermarkGenerator(window *WindowConfig, l int64, s []string, stream cha
 	return w, nil
 }
 
-func (w *WatermarkGenerator) track(s string, ts int64, ctx context.Context) bool {
-	log := common.GetLogger(ctx)
+func (w *WatermarkGenerator) track(s string, ts int64, ctx api.StreamContext) bool {
+	log := ctx.GetLogger()
 	log.Infof("watermark generator track event from topic %s at %d", s, ts)
 	currentVal, ok := w.topicToTs[s]
 	if !ok || ts > currentVal {
@@ -79,9 +80,8 @@ func (w *WatermarkGenerator) track(s string, ts int64, ctx context.Context) bool
 	return r
 }
 
-func (w *WatermarkGenerator) start(ctx context.Context) {
-	exeCtx, cancel := context.WithCancel(ctx)
-	log := common.GetLogger(ctx)
+func (w *WatermarkGenerator) start(ctx api.StreamContext) {
+	log := ctx.GetLogger()
 	var c <-chan time.Time
 
 	if w.ticker != nil {
@@ -91,19 +91,18 @@ func (w *WatermarkGenerator) start(ctx context.Context) {
 		select {
 		case <-c:
 			w.trigger(ctx)
-		case <-exeCtx.Done():
+		case <-ctx.Done():
 			log.Println("Cancelling watermark generator....")
 			if w.ticker != nil{
 				w.ticker.Stop()
 			}
-			cancel()
 			return
 		}
 	}
 }
 
-func (w *WatermarkGenerator) trigger(ctx context.Context) {
-	log := common.GetLogger(ctx)
+func (w *WatermarkGenerator) trigger(ctx api.StreamContext) {
+	log := ctx.GetLogger()
 	watermark := w.computeWatermarkTs(ctx)
 	log.Infof("compute watermark event at %d with last %d", watermark, w.lastWatermarkTs)
 	if watermark > w.lastWatermarkTs {
@@ -184,10 +183,10 @@ func (w *WatermarkGenerator) getNextWindow(inputs []*xsql.Tuple,current int64, w
 	}
 }
 
-func (o *WindowOperator) execEventWindow(ctx context.Context) {
-	exeCtx, cancel := context.WithCancel(ctx)
-	log := common.GetLogger(ctx)
-	go o.watermarkGenerator.start(ctx)
+func (o *WindowOperator) execEventWindow(ctx api.StreamContext, errCh chan<- error) {
+	exeCtx, cancel := ctx.WithCancel()
+	log := ctx.GetLogger()
+	go o.watermarkGenerator.start(exeCtx)
 	var (
 		inputs []*xsql.Tuple
 		triggered bool
@@ -236,7 +235,7 @@ func (o *WindowOperator) execEventWindow(ctx context.Context) {
 
 			}
 		// is cancelling
-		case <-exeCtx.Done():
+		case <-ctx.Done():
 			log.Println("Cancelling window....")
 			if o.ticker != nil{
 				o.ticker.Stop()

+ 19 - 23
xstream/operators/window_op.go

@@ -1,9 +1,10 @@
 package operators
 
 import (
-	"context"
 	"engine/common"
 	"engine/xsql"
+	"engine/xstream/api"
+	"engine/xstream/nodes"
 	"fmt"
 	"github.com/sirupsen/logrus"
 	"math"
@@ -79,12 +80,13 @@ func (o *WindowOperator) GetName() string {
 	return o.name
 }
 
-func (o *WindowOperator) AddOutput(output chan<- interface{}, name string) {
+func (o *WindowOperator) AddOutput(output chan<- interface{}, name string) error {
 	if _, ok := o.outputs[name]; !ok{
 		o.outputs[name] = output
 	}else{
-		common.Log.Warnf("fail to add output %s, operator %s already has an output of the same name", name, o.name)
+		fmt.Errorf("fail to add output %s, operator %s already has an output of the same name", name, o.name)
 	}
+	return nil
 }
 
 func (o *WindowOperator) GetInput() (chan<- interface{}, string) {
@@ -94,26 +96,23 @@ func (o *WindowOperator) GetInput() (chan<- interface{}, string) {
 // Exec is the entry point for the executor
 // input: *xsql.Tuple from preprocessor
 // output: xsql.WindowTuplesSet
-func (o *WindowOperator) Exec(ctx context.Context) (err error) {
-	log := common.GetLogger(ctx)
+func (o *WindowOperator) Exec(ctx api.StreamContext, errCh chan<- error ){
+	log := ctx.GetLogger()
 	log.Printf("Window operator %s is started", o.name)
 
 	if len(o.outputs) <= 0 {
-		err = fmt.Errorf("no output channel found")
+		go func(){errCh <- fmt.Errorf("no output channel found")}()
 		return
 	}
 	if o.isEventTime{
-		go o.execEventWindow(ctx)
+		go o.execEventWindow(ctx, errCh)
 	}else{
-		go o.execProcessingWindow(ctx)
+		go o.execProcessingWindow(ctx, errCh)
 	}
-
-	return nil
 }
 
-func (o *WindowOperator) execProcessingWindow(ctx context.Context) {
-	exeCtx, cancel := context.WithCancel(ctx)
-	log := common.GetLogger(ctx)
+func (o *WindowOperator) execProcessingWindow(ctx api.StreamContext, errCh chan<- error) {
+	log := ctx.GetLogger()
 	var (
 		inputs []*xsql.Tuple
 		c <-chan time.Time
@@ -177,19 +176,18 @@ func (o *WindowOperator) execProcessingWindow(ctx context.Context) {
 				inputs = make([]*xsql.Tuple, 0)
 			}
 		// is cancelling
-		case <-exeCtx.Done():
+		case <-ctx.Done():
 			log.Println("Cancelling window....")
 			if o.ticker != nil{
 				o.ticker.Stop()
 			}
-			cancel()
 			return
 		}
 	}
 }
 
-func (o *WindowOperator) scan(inputs []*xsql.Tuple, triggerTime int64, ctx context.Context) ([]*xsql.Tuple, bool){
-	log := common.GetLogger(ctx)
+func (o *WindowOperator) scan(inputs []*xsql.Tuple, triggerTime int64, ctx api.StreamContext) ([]*xsql.Tuple, bool){
+	log := ctx.GetLogger()
 	log.Printf("window %s triggered at %s", o.name, time.Unix(triggerTime/1000, triggerTime%1000))
 	var delta int64
 	if o.window.Type == xsql.HOPPING_WINDOW || o.window.Type == xsql.SLIDING_WINDOW {
@@ -225,12 +223,10 @@ func (o *WindowOperator) scan(inputs []*xsql.Tuple, triggerTime int64, ctx conte
 		if o.isEventTime{
 			results.Sort()
 		}
-		for _, output := range o.outputs {
-			select {
-			case output <- results:
-				triggered = true
-			default: //TODO need to set buffer
-			}
+		count := nodes.Broadcast(o.outputs, results, ctx)
+		//TODO deal with partial fail
+		if count > 0{
+			triggered = true
 		}
 	}
 

+ 4 - 3
xstream/server/main.go

@@ -5,6 +5,7 @@ import (
 	"engine/common"
 	"engine/xsql/processors"
 	"engine/xstream"
+	"engine/xstream/api"
 	"engine/xstream/sinks"
 	"fmt"
 	"net"
@@ -28,7 +29,7 @@ var processor *processors.RuleProcessor
 
 type Server int
 
-var QUERY_RULE_ID string = "internal-xstream_query_rule"
+var QUERY_RULE_ID = "internal-xstream_query_rule"
 func (t *Server) CreateQuery(sql string, reply *string) error {
 	if _, ok := registry[QUERY_RULE_ID]; ok {
 		stopQuery()
@@ -105,7 +106,7 @@ func (t *Server) CreateRule(rule *common.Rule, reply *string) error{
 	return nil
 }
 
-func (t *Server) createRuleState(rule *xstream.Rule) (*RuleState, error){
+func (t *Server) createRuleState(rule *api.Rule) (*RuleState, error){
 	if tp, err := processor.ExecInitRule(rule); err != nil{
 		return nil, err
 	}else{
@@ -300,7 +301,7 @@ func main() {
 	if e != nil {
 		log.Fatal("Listen error: ", e)
 	}
-	msg := fmt.Sprintf("Serving Rule server on port %d", common.Config.Port)
+	msg := fmt.Sprintf("Serving Kuiper server on port %d", common.Config.Port)
 	log.Info(msg)
 	fmt.Println(msg)
 	// Start accept incoming HTTP connections

+ 7 - 8
xstream/sinks/log_sink.go

@@ -1,8 +1,7 @@
 package sinks
 
 import (
-	"context"
-	"engine/common"
+	"engine/xstream/api"
 	"engine/xstream/collectors"
 	"fmt"
 	"sync"
@@ -11,10 +10,10 @@ import (
 
 // log action, no properties now
 // example: {"log":{}}
-func NewLogSink(name string, ruleId string) *collectors.FuncCollector {
-	return collectors.Func(name, func(ctx context.Context, data interface{}) error {
-		log := common.GetLogger(ctx)
-		log.Printf("sink result for rule %s: %s", ruleId, data)
+func NewLogSink() *collectors.FuncCollector {
+	return collectors.Func(func(ctx api.StreamContext, data interface{}) error {
+		log := ctx.GetLogger()
+		log.Printf("sink result for rule %s: %s", ctx.GetRuleId(), data)
 		return nil
 	})
 }
@@ -27,9 +26,9 @@ type QueryResult struct {
 
 var QR = &QueryResult{LastFetch:time.Now()}
 
-func NewLogSinkToMemory(name string, ruleId string) *collectors.FuncCollector {
+func NewLogSinkToMemory() *collectors.FuncCollector {
 	QR.Results = make([]string, 10)
-	return collectors.Func(name, func(ctx context.Context, data interface{}) error {
+	return collectors.Func(func(ctx api.StreamContext, data interface{}) error {
 		QR.Mux.Lock()
 		QR.Results = append(QR.Results, fmt.Sprintf("%s", data))
 		QR.Mux.Unlock()

+ 69 - 45
xstream/sinks/mqtt_sink.go

@@ -1,26 +1,24 @@
 package sinks
 
 import (
-	"context"
-	"engine/common"
+	"engine/xstream/api"
 	"fmt"
 	MQTT "github.com/eclipse/paho.mqtt.golang"
 	"github.com/google/uuid"
+	"strings"
 )
 
 type MQTTSink struct {
 	srv      string
 	tpc      string
 	clientid string
-
-	input chan interface{}
+	pVersion uint
+	uName 	string
+	password string
 	conn MQTT.Client
-	ruleId   string
-	name 	 string
-	//ctx context.Context
 }
 
-func NewMqttSink(name string, ruleId string, properties interface{}) (*MQTTSink, error) {
+func NewMqttSink(properties interface{}) (*MQTTSink, error) {
 	ps, ok := properties.(map[string]interface{})
 	if !ok {
 		return nil, fmt.Errorf("expect map[string]interface{} type for the mqtt sink properties")
@@ -41,51 +39,77 @@ func NewMqttSink(name string, ruleId string, properties interface{}) (*MQTTSink,
 			clientid = uuid.String()
 		}
 	}
-	ms := &MQTTSink{name:name, ruleId: ruleId, input: make(chan interface{}), srv: srv.(string), tpc: tpc.(string), clientid: clientid.(string)}
-	return ms, nil
-}
+	var pVersion uint = 3
+	pVersionStr, ok := ps["protocol_version"];
+	if ok {
+		v, _ := pVersionStr.(string)
+		if v == "3.1" {
+			pVersion = 3
+		} else if v == "3.1.1" {
+			pVersion = 4
+		} else {
+			return nil, fmt.Errorf("Unknown protocol version {0}, the value could be only 3.1 or 3.1.1 (also refers to MQTT version 4).", pVersionStr)
+		}
+	}
 
-func (ms *MQTTSink) GetName() string {
-	return ms.name
-}
+	uName := ""
+	un, ok := ps["username"];
+	if ok {
+		v, _ := un.(string)
+		if strings.Trim(v, " ") != "" {
+			uName = v
+		}
+	}
 
-func (ms *MQTTSink) GetInput() (chan<- interface{}, string)  {
-	return ms.input, ms.name
-}
+	password := ""
+	pwd, ok := ps["password"];
+	if ok {
+		v, _ := pwd.(string)
+		if strings.Trim(v, " ") != "" {
+			password = v
+		}
+	}
 
-func (ms *MQTTSink) Open(ctx context.Context, result chan<- error) {
-	log := common.GetLogger(ctx)
-	log.Printf("Opening mqtt sink for rule %s", ms.ruleId)
+	ms := &MQTTSink{srv: srv.(string), tpc: tpc.(string), clientid: clientid.(string), pVersion:pVersion, uName:uName, password:password}
+	return ms, nil
+}
 
-	go func() {
-		exeCtx, cancel := context.WithCancel(ctx)
-		opts := MQTT.NewClientOptions().AddBroker(ms.srv).SetClientID(ms.clientid)
+func (ms *MQTTSink) Open(ctx api.StreamContext) error {
+	log := ctx.GetLogger()
+	log.Printf("Opening mqtt sink for rule %s", ctx.GetRuleId())
+	opts := MQTT.NewClientOptions().AddBroker(ms.srv).SetClientID(ms.clientid)
+	if ms.uName != "" {
+		opts = opts.SetUsername(ms.uName)
+	}
 
-		c := MQTT.NewClient(opts)
-		if token := c.Connect(); token.Wait() && token.Error() != nil {
-			result <- fmt.Errorf("Found error: %s", token.Error())
-			cancel()
-		}
-		log.Printf("The connection to server %s was established successfully", ms.srv)
-		ms.conn = c
+	if ms.password != "" {
+		opts = opts.SetPassword(ms.password)
+	}
 
-		for {
-			select {
-			case item := <-ms.input:
-				log.Infof("publish %s", item)
-				if token := c.Publish(ms.tpc, 0, false, item); token.Wait() && token.Error() != nil {
-					result <- fmt.Errorf("Publish error: %s", token.Error())
-				}
+	c := MQTT.NewClient(opts)
+	if token := c.Connect(); token.Wait() && token.Error() != nil {
+		return fmt.Errorf("Found error: %s", token.Error())
+	}
+	log.Printf("The connection to server %s was established successfully", ms.srv)
+	ms.conn = c
+	return nil
+}
 
-			case <-exeCtx.Done():
-				c.Disconnect(5000)
-				log.Infof("Closing mqtt sink")
-				cancel()
-				return
-			}
-		}
+func (ms *MQTTSink) Collect(ctx api.StreamContext, item interface{}) error {
+	logger := ctx.GetLogger()
+	c := ms.conn
+	logger.Infof("publish %s", item)
+	if token := c.Publish(ms.tpc, 0, false, item); token.Wait() && token.Error() != nil {
+		return fmt.Errorf("publish error: %s", token.Error())
+	}
+	return nil
+}
 
-	}()
+func (ms *MQTTSink) Close(ctx api.StreamContext) error {
+	logger := ctx.GetLogger()
+	logger.Infof("Closing mqtt sink")
+	ms.conn.Disconnect(5000)
+	return nil
 }
 
 

+ 30 - 32
xstream/streams.go

@@ -3,16 +3,19 @@ package xstream
 import (
 	"context"
 	"engine/common"
+	"engine/xstream/api"
+	"engine/xstream/contexts"
+	"engine/xstream/nodes"
 	"engine/xstream/operators"
 )
 
 type TopologyNew struct {
-	sources []Source
-	sinks []Sink
-	ctx context.Context
+	sources []*nodes.SourceNode
+	sinks []*nodes.SinkNode
+	ctx api.StreamContext
 	cancel context.CancelFunc
 	drain chan error
-	ops []Operator
+	ops []api.Operator
 	name string
 }
 
@@ -29,12 +32,12 @@ func (s *TopologyNew) Cancel(){
 	s.cancel()
 }
 
-func (s *TopologyNew) AddSrc(src Source) *TopologyNew {
+func (s *TopologyNew) AddSrc(src *nodes.SourceNode) *TopologyNew {
 	s.sources = append(s.sources, src)
 	return s
 }
 
-func (s *TopologyNew) AddSink(inputs []Emitter, snk Sink) *TopologyNew {
+func (s *TopologyNew) AddSink(inputs []api.Emitter, snk *nodes.SinkNode) *TopologyNew {
 	for _, input := range inputs{
 		input.AddOutput(snk.GetInput())
 	}
@@ -42,7 +45,7 @@ func (s *TopologyNew) AddSink(inputs []Emitter, snk Sink) *TopologyNew {
 	return s
 }
 
-func (s *TopologyNew) AddOperator(inputs []Emitter, operator Operator) *TopologyNew {
+func (s *TopologyNew) AddOperator(inputs []api.Emitter, operator api.Operator) *TopologyNew {
 	for _, input := range inputs{
 		input.AddOutput(operator.GetInput())
 	}
@@ -57,7 +60,7 @@ func Transform(op operators.UnOperation, name string) *operators.UnaryOperator {
 }
 
 func (s *TopologyNew) Map(f interface{}) *TopologyNew {
-	log := common.GetLogger(s.ctx)
+	log := s.ctx.GetLogger()
 	op, err := MapFunc(f)
 	if err != nil {
 		log.Println(err)
@@ -91,9 +94,9 @@ func (s *TopologyNew) Transform(op operators.UnOperation) *TopologyNew {
 // stream starts execution.
 func (s *TopologyNew) prepareContext() {
 	if s.ctx == nil || s.ctx.Err() != nil {
-		s.ctx, s.cancel = context.WithCancel(context.Background())
 		contextLogger := common.Log.WithField("rule", s.name)
-		s.ctx = context.WithValue(s.ctx, common.LoggerKey, contextLogger)
+		ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
+		s.ctx, s.cancel = ctx.WithCancel()
 	}
 }
 
@@ -103,39 +106,34 @@ func (s *TopologyNew) drainErr(err error) {
 
 func (s *TopologyNew) Open() <-chan error {
 	s.prepareContext() // ensure context is set
-	log := common.GetLogger(s.ctx)
+	log := s.ctx.GetLogger()
 	log.Println("Opening stream")
 
 	// open stream
 	go func() {
-		// open source, if err bail
-		for _, src := range s.sources{
-			if err := src.Open(s.ctx); err != nil {
-				s.drainErr(err)
-				log.Println("Closing stream")
-				return
-			}
+		streamErr := make(chan error)
+		defer func() {
+			log.Println("Closing streamErr channel")
+			close(streamErr)
+		}()
+		// open stream sink, after log sink is ready.
+		for _, snk := range s.sinks{
+			snk.Open(s.ctx.WithMeta(s.name, snk.GetName()), streamErr)
 		}
 
 		//apply operators, if err bail
 		for _, op := range s.ops {
-			if err := op.Exec(s.ctx); err != nil {
-				s.drainErr(err)
-				log.Println("Closing stream")
-				return
-			}
+			op.Exec(s.ctx.WithMeta(s.name, op.GetName()), streamErr)
 		}
-		sinkErr := make(chan error)
-		defer func() {
-			log.Println("Closing sinkErr channel")
-			close(sinkErr)
-		}()
-		// open stream sink, after log sink is ready.
-		for _, snk := range s.sinks{
-			snk.Open(s.ctx, sinkErr)
+
+		// open source, if err bail
+		for _, node := range s.sources{
+			node.Open(s.ctx.WithMeta(s.name, node.GetName()), streamErr)
 		}
+
 		select {
-		case err := <- sinkErr:
+		case err := <-streamErr:
+			//TODO error handling
 			log.Println("Closing stream")
 			s.drain <- err
 		}

+ 20 - 32
xstream/test/mock_sink.go

@@ -1,53 +1,41 @@
 package test
 
 import (
-	"context"
-	"engine/common"
+	"engine/xstream/api"
 )
 
 type MockSink struct {
-	ruleId   string
-	name 	 string
 	results  [][]byte
-	input chan interface{}
 }
 
-func NewMockSink(name, ruleId string) *MockSink{
-	m := &MockSink{
-		ruleId:  ruleId,
-		name:    name,
-		input: make(chan interface{}),
-	}
+func NewMockSink() *MockSink{
+	m := &MockSink{}
 	return m
 }
 
-func (m *MockSink) Open(ctx context.Context, result chan<- error) {
-	log := common.GetLogger(ctx)
+func (m *MockSink) Open(ctx api.StreamContext) error {
+	log := ctx.GetLogger()
 	log.Trace("Opening mock sink")
 	m.results = make([][]byte, 0)
-	go func() {
-		for {
-			select {
-			case item := <-m.input:
-				if v, ok := item.([]byte); ok {
-					log.Infof("mock sink receive %s", item)
-					m.results = append(m.results, v)
-				}else{
-					log.Info("mock sink receive non byte data")
-				}
+	return nil
+}
 
-			case <-ctx.Done():
-				log.Infof("mock sink %s done", m.name)
-				return
-			}
-		}
-	}()
+func (m *MockSink) Collect(ctx api.StreamContext, item interface{}) error {
+	logger := ctx.GetLogger()
+	if v, ok := item.([]byte); ok {
+		logger.Infof("mock sink receive %s", item)
+		m.results = append(m.results, v)
+	}else{
+		logger.Info("mock sink receive non byte data")
+	}
+	return nil
 }
 
-func (m *MockSink) GetInput() (chan<- interface{}, string)  {
-	return m.input, m.name
+func (m *MockSink) Close(ctx api.StreamContext) error {
+	//do nothing
+	return nil
 }
 
 func (m *MockSink) GetResults() [][]byte {
 	return m.results
-}
+}

+ 10 - 26
xstream/test/mock_source.go

@@ -1,35 +1,32 @@
 package test
 
 import (
-	"context"
 	"engine/common"
 	"engine/xsql"
+	"engine/xstream/api"
 	"time"
 )
 
 type MockSource struct {
-	outs map[string]chan<- interface{}
 	data []*xsql.Tuple
-	name string
 	done chan<- struct{}
 	isEventTime bool
 }
 
 // New creates a new CsvSource
-func NewMockSource(data []*xsql.Tuple, name string, done chan<- struct{}, isEventTime bool) *MockSource {
+func NewMockSource(data []*xsql.Tuple, done chan<- struct{}, isEventTime bool) *MockSource {
 	mock := &MockSource{
 		data: data,
-		name: name,
-		outs: make(map[string]chan<- interface{}),
 		done: done,
 		isEventTime: isEventTime,
 	}
 	return mock
 }
 
-func (m *MockSource) Open(ctx context.Context) (err error) {
-	log := common.GetLogger(ctx)
-	log.Trace("Mocksource starts")
+func (m *MockSource) Open(ctx api.StreamContext, consume api.ConsumeFunc) (err error) {
+	log := ctx.GetLogger()
+
+	log.Trace("mock source starts")
 	go func(){
 		for _, d := range m.data{
 			log.Infof("mock source is sending data %s", d)
@@ -44,16 +41,7 @@ func (m *MockSource) Open(ctx context.Context) (err error) {
 					timer.DoTick(d.Timestamp)
 				}
 			}
-			for _, out := range m.outs{
-				select {
-				case out <- d:
-				case <-ctx.Done():
-					log.Trace("Mocksource stop")
-					return
-//				default:  TODO non blocking must have buffer?
-				}
-				time.Sleep(50 * time.Millisecond)
-			}
+			consume(d)
 			if m.isEventTime{
 				time.Sleep(1000 * time.Millisecond) //Let window run to make sure timers are set
 			}else{
@@ -70,10 +58,6 @@ func (m *MockSource) Open(ctx context.Context) (err error) {
 	return nil
 }
 
-func (m *MockSource) AddOutput(output chan<- interface{}, name string) {
-	if _, ok := m.outs[name]; !ok{
-		m.outs[name] = output
-	}else{
-		common.Log.Warnf("fail to add output %s, operator %s already has an output of the same name", name, m.name)
-	}
-}
+func (m *MockSource) Close(ctx api.StreamContext) error{
+	return nil
+}

+ 0 - 40
xstream/types.go

@@ -1,40 +0,0 @@
-package xstream
-
-import (
-	"context"
-)
-
-type Emitter interface {
-	AddOutput(chan<- interface{}, string)
-}
-
-type Source interface {
-	Emitter
-	Open(context context.Context) error
-}
-
-type Collector interface {
-	GetInput() (chan<- interface{}, string)
-}
-
-type Sink interface {
-	Collector
-	Open(context.Context, chan<- error)
-}
-
-type Operator interface{
-	Emitter
-	Collector
-	Exec(context context.Context) error
-}
-
-type TopNode interface{
-	GetName() string
-}
-
-type Rule struct{
-	Id string `json:"id"`
-	Sql string `json:"sql"`
-	Actions []map[string]interface{} `json:"actions"`
-	Options map[string]interface{} `json:"options"`
-}

+ 7 - 7
xstream/util_test.go

@@ -7,31 +7,31 @@ import (
 func TestConf(t *testing.T) {
 	var file = "test/testconf.json"
 
-	if v, e := GetConfAsString(file, "conf_string"); (e != nil || (v != "test")) {
+	if v, e := GetConfAsString(file, "conf_string"); e != nil || (v != "test") {
 		t.Errorf("Expect %s, actual %s; error is %s. \n", "test", v, e)
 	}
 
-	if v, e := GetConfAsInt(file, "conf_int"); (e != nil || (v != 10)) {
+	if v, e := GetConfAsInt(file, "conf_int"); e != nil || (v != 10) {
 		t.Errorf("Expect %s, actual %d. error is %s. \n ", "10", v, e)
 	}
 
-	if v, e := GetConfAsFloat(file, "conf_float"); (e != nil || (v != 32.3)) {
+	if v, e := GetConfAsFloat(file, "conf_float"); e != nil || (v != 32.3) {
 		t.Errorf("Expect %s, actual %f. error is %s. \n ", "32.3", v, e)
 	}
 
-	if v, e := GetConfAsBool(file, "conf_bool"); (e != nil || (v != true)) {
+	if v, e := GetConfAsBool(file, "conf_bool"); e != nil || (v != true) {
 		t.Errorf("Expect %s, actual %v. error is %s. \n", "true", v, e)
 	}
 
-	if v, e := GetConfAsString(file, "servers.srv1.addr"); (e != nil || (v != "127.0.0.1")) {
+	if v, e := GetConfAsString(file, "servers.srv1.addr"); e != nil || (v != "127.0.0.1") {
 		t.Errorf("Expect %s, actual %s. error is %s. \n", "127.0.0.1", v, e)
 	}
 
-	if v, e := GetConfAsString(file, "servers.srv1.clientid"); (e != nil || (v != "")) {
+	if v, e := GetConfAsString(file, "servers.srv1.clientid"); e != nil || (v != "") {
 		t.Errorf("Expect %s, actual %s. error is %s. \n", "", v, e)
 	}
 
-	if v, e := GetConfAsInt(file, "servers.srv2.port"); (e != nil || (v != 1883)) {
+	if v, e := GetConfAsInt(file, "servers.srv2.port"); e != nil || (v != 1883) {
 		t.Errorf("Expect %s, actual %d. error is %s. \n", "1883", v, e)
 	}