Browse Source

feat(table): support table for batch static data as data source

Modify parser, planner, operator, topo, rest, cli and tests
ngjaying 4 years ago
parent
commit
7d1eb0842a

+ 8 - 0
etc/sources/file.yaml

@@ -0,0 +1,8 @@
+default:
+  fileType: json
+  # The directory of the file relative to kuiper root or an absolute path.
+  # Do not include the file name here. The file name should be defined in the stream data source
+  path: data
+
+test:
+  path: fvt_scripts

+ 17 - 0
fvt_scripts/lookup.json

@@ -0,0 +1,17 @@
+[
+  {
+    "id": 1541152486013,
+    "name": "name1",
+    "size": 2
+  },
+  {
+    "id": 1541152487632,
+    "name": "name2",
+    "size": 6
+  },
+  {
+    "id": 1541152489252,
+    "name": "name3",
+    "size": 4
+  }
+]

+ 61 - 24
xsql/ast.go

@@ -15,6 +15,11 @@ type Node interface {
 	node()
 	node()
 }
 }
 
 
+type NameNode interface {
+	Node
+	GetName() string
+}
+
 type Expr interface {
 type Expr interface {
 	Node
 	Node
 	expr()
 	expr()
@@ -315,10 +320,23 @@ type StreamName string
 
 
 func (sn *StreamName) node() {}
 func (sn *StreamName) node() {}
 
 
+type StreamType int
+
+const (
+	TypeStream StreamType = iota
+	TypeTable
+)
+
+var StreamTypeMap = map[StreamType]string{
+	TypeStream: "stream",
+	TypeTable:  "table",
+}
+
 type StreamStmt struct {
 type StreamStmt struct {
 	Name         StreamName
 	Name         StreamName
 	StreamFields StreamFields
 	StreamFields StreamFields
 	Options      Options
 	Options      Options
+	StreamType   StreamType //default to TypeStream
 }
 }
 
 
 func (ss *StreamStmt) node() {}
 func (ss *StreamStmt) node() {}
@@ -388,14 +406,47 @@ type DropStreamStatement struct {
 func (ss *ShowStreamsStatement) Stmt() {}
 func (ss *ShowStreamsStatement) Stmt() {}
 func (ss *ShowStreamsStatement) node() {}
 func (ss *ShowStreamsStatement) node() {}
 
 
-func (dss *DescribeStreamStatement) Stmt() {}
-func (dss *DescribeStreamStatement) node() {}
+func (dss *DescribeStreamStatement) Stmt()           {}
+func (dss *DescribeStreamStatement) node()           {}
+func (dss *DescribeStreamStatement) GetName() string { return dss.Name }
+
+func (ess *ExplainStreamStatement) Stmt()           {}
+func (ess *ExplainStreamStatement) node()           {}
+func (ess *ExplainStreamStatement) GetName() string { return ess.Name }
+
+func (dss *DropStreamStatement) Stmt()           {}
+func (dss *DropStreamStatement) node()           {}
+func (dss *DropStreamStatement) GetName() string { return dss.Name }
+
+type ShowTablesStatement struct {
+}
+
+type DescribeTableStatement struct {
+	Name string
+}
+
+type ExplainTableStatement struct {
+	Name string
+}
+
+type DropTableStatement struct {
+	Name string
+}
 
 
-func (ess *ExplainStreamStatement) Stmt() {}
-func (ess *ExplainStreamStatement) node() {}
+func (ss *ShowTablesStatement) Stmt() {}
+func (ss *ShowTablesStatement) node() {}
 
 
-func (dss *DropStreamStatement) Stmt() {}
-func (dss *DropStreamStatement) node() {}
+func (dss *DescribeTableStatement) Stmt()           {}
+func (dss *DescribeTableStatement) node()           {}
+func (dss *DescribeTableStatement) GetName() string { return dss.Name }
+
+func (ess *ExplainTableStatement) Stmt()           {}
+func (ess *ExplainTableStatement) node()           {}
+func (ess *ExplainTableStatement) GetName() string { return ess.Name }
+
+func (dss *DropTableStatement) Stmt()           {}
+func (dss *DropTableStatement) node()           {}
+func (dss *DropTableStatement) GetName() string { return dss.Name }
 
 
 type Visitor interface {
 type Visitor interface {
 	Visit(Node) Visitor
 	Visit(Node) Visitor
@@ -478,25 +529,11 @@ func Walk(v Visitor, node Node) {
 		Walk(v, n.StreamFields)
 		Walk(v, n.StreamFields)
 		Walk(v, n.Options)
 		Walk(v, n.Options)
 
 
-	case *BasicType:
-		Walk(v, n)
-
-	case *ArrayType:
-		Walk(v, n)
-
-	case *RecType:
-		Walk(v, n)
-
-	case *ShowStreamsStatement:
-		Walk(v, n)
-
-	case *DescribeStreamStatement:
-		Walk(v, n)
-
-	case *ExplainStreamStatement:
+	case *BasicType, *ArrayType, *RecType:
 		Walk(v, n)
 		Walk(v, n)
 
 
-	case *DropStreamStatement:
+	case *ShowStreamsStatement, *DescribeStreamStatement, *ExplainStreamStatement, *DropStreamStatement,
+		*ShowTablesStatement, *DescribeTableStatement, *ExplainTableStatement, *DropTableStatement:
 		Walk(v, n)
 		Walk(v, n)
 	}
 	}
 }
 }
@@ -787,7 +824,7 @@ func (jt *JoinTuple) doGetValue(t string, key string) (interface{}, bool) {
 					return v, ok
 					return v, ok
 				}
 				}
 			}
 			}
-			common.Log.Infoln("Wrong key: ", key, ", not found")
+			common.Log.Debugf("Wrong key: %s not found", key)
 			return nil, false
 			return nil, false
 		} else {
 		} else {
 			return getTupleValue(tuples[0], t, key)
 			return getTupleValue(tuples[0], t, key)

+ 8 - 0
xsql/lexical.go

@@ -98,7 +98,9 @@ const (
 	DESCRIBE
 	DESCRIBE
 	SHOW
 	SHOW
 	STREAM
 	STREAM
+	TABLE
 	STREAMS
 	STREAMS
+	TABLES
 	WITH
 	WITH
 
 
 	XBIGINT
 	XBIGINT
@@ -188,7 +190,9 @@ var tokens = []string{
 	DESCRIBE: "DESCRIBE",
 	DESCRIBE: "DESCRIBE",
 	SHOW:     "SHOW",
 	SHOW:     "SHOW",
 	STREAM:   "STREAM",
 	STREAM:   "STREAM",
+	TABLE:    "TABLE",
 	STREAMS:  "STREAMS",
 	STREAMS:  "STREAMS",
+	TABLES:   "TABLES",
 	WITH:     "WITH",
 	WITH:     "WITH",
 
 
 	XBIGINT:   "BIGINT",
 	XBIGINT:   "BIGINT",
@@ -435,6 +439,10 @@ func (s *Scanner) ScanIdent() (tok Token, lit string) {
 		return STREAM, lit
 		return STREAM, lit
 	case "STREAMS":
 	case "STREAMS":
 		return STREAMS, lit
 		return STREAMS, lit
+	case "TABLE":
+		return TABLE, lit
+	case "TABLES":
+		return TABLES, lit
 	case "WITH":
 	case "WITH":
 		return WITH, lit
 		return WITH, lit
 	case "BIGINT":
 	case "BIGINT":

+ 105 - 49
xsql/parser.go

@@ -836,45 +836,52 @@ func (p *Parser) ConvertToWindows(wtype WindowType, args []Expr) (*Window, error
 	return win, nil
 	return win, nil
 }
 }
 
 
-func (p *Parser) ParseCreateStreamStmt() (*StreamStmt, error) {
-	stmt := &StreamStmt{}
+func (p *Parser) ParseCreateStmt() (Statement, error) {
 	if tok, _ := p.scanIgnoreWhitespace(); tok == CREATE {
 	if tok, _ := p.scanIgnoreWhitespace(); tok == CREATE {
-		if tok1, lit1 := p.scanIgnoreWhitespace(); tok1 == STREAM {
-			if tok2, lit2 := p.scanIgnoreWhitespace(); tok2 == IDENT {
-				stmt.Name = StreamName(lit2)
-				if fields, err := p.parseStreamFields(); err != nil {
-					return nil, err
-				} else {
-					stmt.StreamFields = fields
-				}
-				if opts, err := p.parseStreamOptions(); err != nil {
-					return nil, err
-				} else {
-					stmt.Options = opts
-				}
-				if tok3, lit3 := p.scanIgnoreWhitespace(); tok3 == SEMICOLON {
-					p.unscan()
-				} else if tok3 == EOF {
-					//Finish parsing create stream statement. Jump to validate
-				} else {
-					return nil, fmt.Errorf("found %q, expected semicolon or EOF.", lit3)
-				}
+		tok1, lit1 := p.scanIgnoreWhitespace()
+		stmt := &StreamStmt{}
+		switch tok1 {
+		case STREAM:
+			stmt.StreamType = TypeStream
+		case TABLE:
+			stmt.StreamType = TypeTable
+		default:
+			return nil, fmt.Errorf("found %q, expected keyword stream or table.", lit1)
+		}
+		if tok2, lit2 := p.scanIgnoreWhitespace(); tok2 == IDENT {
+			stmt.Name = StreamName(lit2)
+			if fields, err := p.parseStreamFields(); err != nil {
+				return nil, err
 			} else {
 			} else {
-				return nil, fmt.Errorf("found %q, expected stream name.", lit2)
+				stmt.StreamFields = fields
+			}
+			if opts, err := p.parseStreamOptions(); err != nil {
+				return nil, err
+			} else {
+				stmt.Options = opts
+			}
+			if tok3, lit3 := p.scanIgnoreWhitespace(); tok3 == SEMICOLON {
+				p.unscan()
+			} else if tok3 == EOF {
+				//Finish parsing create stream statement. Jump to validate
+			} else {
+				return nil, fmt.Errorf("found %q, expected semicolon or EOF.", lit3)
 			}
 			}
 		} else {
 		} else {
-			return nil, fmt.Errorf("found %q, expected keyword stream.", lit1)
+			return nil, fmt.Errorf("found %q, expected stream name.", lit2)
+		}
+		if valErr := validateStream(stmt); valErr != nil {
+			return nil, valErr
 		}
 		}
+		return stmt, nil
 	} else {
 	} else {
 		p.unscan()
 		p.unscan()
 		return nil, nil
 		return nil, nil
 	}
 	}
-	if valErr := validateStream(stmt); valErr != nil {
-		return nil, valErr
-	}
-	return stmt, nil
+
 }
 }
 
 
+// TODO more accurate validation for table
 func validateStream(stmt *StreamStmt) error {
 func validateStream(stmt *StreamStmt) error {
 	f, ok := stmt.Options["FORMAT"]
 	f, ok := stmt.Options["FORMAT"]
 	if !ok {
 	if !ok {
@@ -884,6 +891,9 @@ func validateStream(stmt *StreamStmt) error {
 	case common.FORMAT_JSON:
 	case common.FORMAT_JSON:
 		//do nothing
 		//do nothing
 	case common.FORMAT_BINARY:
 	case common.FORMAT_BINARY:
+		if stmt.StreamType == TypeTable {
+			return fmt.Errorf("'binary' format is not supported for table")
+		}
 		switch len(stmt.StreamFields) {
 		switch len(stmt.StreamFields) {
 		case 0:
 		case 0:
 			// do nothing for schemaless
 			// do nothing for schemaless
@@ -901,40 +911,66 @@ func validateStream(stmt *StreamStmt) error {
 	default:
 	default:
 		return fmt.Errorf("option 'format=%s' is invalid", f)
 		return fmt.Errorf("option 'format=%s' is invalid", f)
 	}
 	}
+
+	if stmt.StreamType == TypeTable {
+		if t, ok := stmt.Options["TYPE"]; ok {
+			if strings.ToLower(t) != "file" {
+				return fmt.Errorf("table only supports 'file' type")
+			}
+		}
+	}
 	return nil
 	return nil
 }
 }
 
 
-func (p *Parser) parseShowStreamsStmt() (*ShowStreamsStatement, error) {
-	ss := &ShowStreamsStatement{}
+func (p *Parser) parseShowStmt() (Statement, error) {
 	if tok, _ := p.scanIgnoreWhitespace(); tok == SHOW {
 	if tok, _ := p.scanIgnoreWhitespace(); tok == SHOW {
-		if tok1, lit1 := p.scanIgnoreWhitespace(); tok1 == STREAMS {
+		tok1, lit1 := p.scanIgnoreWhitespace()
+		switch tok1 {
+		case STREAMS:
+			ss := &ShowStreamsStatement{}
 			if tok2, lit2 := p.scanIgnoreWhitespace(); tok2 == EOF || tok2 == SEMICOLON {
 			if tok2, lit2 := p.scanIgnoreWhitespace(); tok2 == EOF || tok2 == SEMICOLON {
 				return ss, nil
 				return ss, nil
 			} else {
 			} else {
 				return nil, fmt.Errorf("found %q, expected semecolon or EOF.", lit2)
 				return nil, fmt.Errorf("found %q, expected semecolon or EOF.", lit2)
 			}
 			}
-		} else {
-			return nil, fmt.Errorf("found %q, expected keyword streams.", lit1)
+		case TABLES:
+			ss := &ShowTablesStatement{}
+			if tok2, lit2 := p.scanIgnoreWhitespace(); tok2 == EOF || tok2 == SEMICOLON {
+				return ss, nil
+			} else {
+				return nil, fmt.Errorf("found %q, expected semecolon or EOF.", lit2)
+			}
+		default:
+			return nil, fmt.Errorf("found %q, expected keyword streams or tables.", lit1)
 		}
 		}
 	} else {
 	} else {
 		p.unscan()
 		p.unscan()
 		return nil, nil
 		return nil, nil
 	}
 	}
-	return ss, nil
 }
 }
 
 
-func (p *Parser) parseDescribeStreamStmt() (*DescribeStreamStatement, error) {
-	dss := &DescribeStreamStatement{}
+func (p *Parser) parseDescribeStmt() (Statement, error) {
 	if tok, _ := p.scanIgnoreWhitespace(); tok == DESCRIBE {
 	if tok, _ := p.scanIgnoreWhitespace(); tok == DESCRIBE {
-		if tok1, lit1 := p.scanIgnoreWhitespace(); tok1 == STREAM {
+		tok1, lit1 := p.scanIgnoreWhitespace()
+		switch tok1 {
+		case STREAM:
+			dss := &DescribeStreamStatement{}
 			if tok2, lit2 := p.scanIgnoreWhitespace(); tok2 == IDENT {
 			if tok2, lit2 := p.scanIgnoreWhitespace(); tok2 == IDENT {
 				dss.Name = lit2
 				dss.Name = lit2
 				return dss, nil
 				return dss, nil
 			} else {
 			} else {
 				return nil, fmt.Errorf("found %q, expected stream name.", lit2)
 				return nil, fmt.Errorf("found %q, expected stream name.", lit2)
 			}
 			}
-		} else {
-			return nil, fmt.Errorf("found %q, expected keyword stream.", lit1)
+		case TABLE:
+			dss := &DescribeTableStatement{}
+			if tok2, lit2 := p.scanIgnoreWhitespace(); tok2 == IDENT {
+				dss.Name = lit2
+				return dss, nil
+			} else {
+				return nil, fmt.Errorf("found %q, expected table name.", lit2)
+			}
+		default:
+			return nil, fmt.Errorf("found %q, expected keyword stream or table.", lit1)
 		}
 		}
 	} else {
 	} else {
 		p.unscan()
 		p.unscan()
@@ -942,18 +978,28 @@ func (p *Parser) parseDescribeStreamStmt() (*DescribeStreamStatement, error) {
 	}
 	}
 }
 }
 
 
-func (p *Parser) parseExplainStreamsStmt() (*ExplainStreamStatement, error) {
-	ess := &ExplainStreamStatement{}
+func (p *Parser) parseExplainStmt() (Statement, error) {
 	if tok, _ := p.scanIgnoreWhitespace(); tok == EXPLAIN {
 	if tok, _ := p.scanIgnoreWhitespace(); tok == EXPLAIN {
-		if tok1, lit1 := p.scanIgnoreWhitespace(); tok1 == STREAM {
+		tok1, lit1 := p.scanIgnoreWhitespace()
+		switch tok1 {
+		case STREAM:
+			ess := &ExplainStreamStatement{}
 			if tok2, lit2 := p.scanIgnoreWhitespace(); tok2 == IDENT {
 			if tok2, lit2 := p.scanIgnoreWhitespace(); tok2 == IDENT {
 				ess.Name = lit2
 				ess.Name = lit2
 				return ess, nil
 				return ess, nil
 			} else {
 			} else {
 				return nil, fmt.Errorf("found %q, expected stream name.", lit2)
 				return nil, fmt.Errorf("found %q, expected stream name.", lit2)
 			}
 			}
-		} else {
-			return nil, fmt.Errorf("found %q, expected keyword stream.", lit1)
+		case TABLE:
+			ess := &ExplainTableStatement{}
+			if tok2, lit2 := p.scanIgnoreWhitespace(); tok2 == IDENT {
+				ess.Name = lit2
+				return ess, nil
+			} else {
+				return nil, fmt.Errorf("found %q, expected table name.", lit2)
+			}
+		default:
+			return nil, fmt.Errorf("found %q, expected keyword stream or table.", lit1)
 		}
 		}
 	} else {
 	} else {
 		p.unscan()
 		p.unscan()
@@ -961,18 +1007,28 @@ func (p *Parser) parseExplainStreamsStmt() (*ExplainStreamStatement, error) {
 	}
 	}
 }
 }
 
 
-func (p *Parser) parseDropStreamsStmt() (*DropStreamStatement, error) {
-	ess := &DropStreamStatement{}
+func (p *Parser) parseDropStmt() (Statement, error) {
 	if tok, _ := p.scanIgnoreWhitespace(); tok == DROP {
 	if tok, _ := p.scanIgnoreWhitespace(); tok == DROP {
-		if tok1, lit1 := p.scanIgnoreWhitespace(); tok1 == STREAM {
+		tok1, lit1 := p.scanIgnoreWhitespace()
+		switch tok1 {
+		case STREAM:
+			ess := &DropStreamStatement{}
 			if tok2, lit2 := p.scanIgnoreWhitespace(); tok2 == IDENT {
 			if tok2, lit2 := p.scanIgnoreWhitespace(); tok2 == IDENT {
 				ess.Name = lit2
 				ess.Name = lit2
 				return ess, nil
 				return ess, nil
 			} else {
 			} else {
 				return nil, fmt.Errorf("found %q, expected stream name.", lit2)
 				return nil, fmt.Errorf("found %q, expected stream name.", lit2)
 			}
 			}
-		} else {
-			return nil, fmt.Errorf("found %q, expected keyword stream.", lit1)
+		case TABLE:
+			ess := &DropTableStatement{}
+			if tok2, lit2 := p.scanIgnoreWhitespace(); tok2 == IDENT {
+				ess.Name = lit2
+				return ess, nil
+			} else {
+				return nil, fmt.Errorf("found %q, expected stream name.", lit2)
+			}
+		default:
+			return nil, fmt.Errorf("found %q, expected keyword stream or table.", lit1)
 		}
 		}
 	} else {
 	} else {
 		p.unscan()
 		p.unscan()

+ 15 - 2
xsql/processors/common_test.go

@@ -1046,7 +1046,10 @@ func createStream(t *testing.T, tt ruleTest, j int, opt *api.RuleOption, sinkPro
 		} else {
 		} else {
 			streams := xsql.GetStreams(selectStmt)
 			streams := xsql.GetStreams(selectStmt)
 			for _, stream := range streams {
 			for _, stream := range streams {
-				data := testData[stream]
+				data, ok := testData[stream]
+				if !ok {
+					continue
+				}
 				dataLength = len(data)
 				dataLength = len(data)
 				datas = append(datas, data)
 				datas = append(datas, data)
 				source := nodes.NewSourceNodeWithSource(stream, test.NewMockSource(data), map[string]string{
 				source := nodes.NewSourceNodeWithSource(stream, test.NewMockSource(data), map[string]string{
@@ -1140,11 +1143,21 @@ func handleStream(createOrDrop bool, names []string, t *testing.T) {
 				sql = "CREATE STREAM text (slogan string, brand string) WITH (DATASOURCE=\"users\", FORMAT=\"JSON\")"
 				sql = "CREATE STREAM text (slogan string, brand string) WITH (DATASOURCE=\"users\", FORMAT=\"JSON\")"
 			case "binDemo":
 			case "binDemo":
 				sql = "CREATE STREAM binDemo () WITH (DATASOURCE=\"users\", FORMAT=\"BINARY\")"
 				sql = "CREATE STREAM binDemo () WITH (DATASOURCE=\"users\", FORMAT=\"BINARY\")"
+			case "table1":
+				sql = `CREATE TABLE table1 (
+					name STRING,
+					size BIGINT,
+					id BIGINT
+				) WITH (DATASOURCE="lookup.json", FORMAT="json", CONF_KEY="test");`
 			default:
 			default:
 				t.Errorf("create stream %s fail", name)
 				t.Errorf("create stream %s fail", name)
 			}
 			}
 		} else {
 		} else {
-			sql = `DROP STREAM ` + name
+			if strings.Index(name, "table") == 0 {
+				sql = `DROP TABLE ` + name
+			} else {
+				sql = `DROP STREAM ` + name
+			}
 		}
 		}
 
 
 		_, err := p.ExecStmt(sql)
 		_, err := p.ExecStmt(sql)

+ 59 - 1
xsql/processors/rule_test.go

@@ -9,7 +9,7 @@ import (
 
 
 func TestSingleSQL(t *testing.T) {
 func TestSingleSQL(t *testing.T) {
 	//Reset
 	//Reset
-	streamList := []string{"demo", "demoError", "demo1"}
+	streamList := []string{"demo", "demoError", "demo1", "table1"}
 	handleStream(false, streamList, t)
 	handleStream(false, streamList, t)
 	//Data setup
 	//Data setup
 	var tests = []ruleTest{
 	var tests = []ruleTest{
@@ -405,6 +405,64 @@ func TestSingleSQL(t *testing.T) {
 					"op_2_project":           {"sink_mockSink"},
 					"op_2_project":           {"sink_mockSink"},
 				},
 				},
 			},
 			},
+		}, {
+			name: `TestSingleSQLRule10`,
+			sql:  "SELECT * FROM demo INNER JOIN table1 on demo.ts = table1.id",
+			r: [][]map[string]interface{}{
+				{{
+					"id":    float64(1541152486013),
+					"name":  "name1",
+					"color": "red",
+					"size":  float64(3),
+					"ts":    float64(1541152486013),
+				}},
+				{{
+					"id":    float64(1541152487632),
+					"name":  "name2",
+					"color": "blue",
+					"size":  float64(2),
+					"ts":    float64(1541152487632),
+				}},
+				{{
+					"id":    float64(1541152489252),
+					"name":  "name3",
+					"color": "red",
+					"size":  float64(1),
+					"ts":    float64(1541152489252),
+				}},
+			},
+			m: map[string]interface{}{
+				"op_1_preprocessor_demo_0_exceptions_total":  int64(0),
+				"op_1_preprocessor_demo_0_records_in_total":  int64(5),
+				"op_1_preprocessor_demo_0_records_out_total": int64(5),
+
+				"op_2_tableprocessor_table1_0_exceptions_total":  int64(0),
+				"op_2_tableprocessor_table1_0_records_in_total":  int64(1),
+				"op_2_tableprocessor_table1_0_records_out_total": int64(1),
+
+				"op_3_join_aligner_0_records_in_total":  int64(6),
+				"op_3_join_aligner_0_records_out_total": int64(5),
+
+				"op_4_join_0_exceptions_total":  int64(0),
+				"op_4_join_0_records_in_total":  int64(5),
+				"op_4_join_0_records_out_total": int64(3),
+
+				"op_5_project_0_exceptions_total":  int64(0),
+				"op_5_project_0_records_in_total":  int64(3),
+				"op_5_project_0_records_out_total": int64(3),
+
+				"sink_mockSink_0_exceptions_total":  int64(0),
+				"sink_mockSink_0_records_in_total":  int64(3),
+				"sink_mockSink_0_records_out_total": int64(3),
+
+				"source_demo_0_exceptions_total":  int64(0),
+				"source_demo_0_records_in_total":  int64(5),
+				"source_demo_0_records_out_total": int64(5),
+
+				"source_table1_0_exceptions_total":  int64(0),
+				"source_table1_0_records_in_total":  int64(1),
+				"source_table1_0_records_out_total": int64(1),
+			},
 		},
 		},
 	}
 	}
 	handleStream(true, streamList, t)
 	handleStream(true, streamList, t)

+ 87 - 10
xsql/processors/stream_processor_test.go

@@ -2,7 +2,6 @@ package processors
 
 
 import (
 import (
 	"fmt"
 	"fmt"
-	"github.com/emqx/kuiper/xstream"
 	"path"
 	"path"
 	"reflect"
 	"reflect"
 	"testing"
 	"testing"
@@ -20,7 +19,7 @@ func TestStreamCreateProcessor(t *testing.T) {
 		},
 		},
 		{
 		{
 			s:   `EXPLAIN STREAM topic1;`,
 			s:   `EXPLAIN STREAM topic1;`,
-			err: "Stream topic1 is not found.",
+			err: "Explain stream fails, topic1 is not found.",
 		},
 		},
 		{
 		{
 			s: `CREATE STREAM topic1 (
 			s: `CREATE STREAM topic1 (
@@ -70,7 +69,7 @@ func TestStreamCreateProcessor(t *testing.T) {
 		},
 		},
 		{
 		{
 			s:   `DESCRIBE STREAM topic1;`,
 			s:   `DESCRIBE STREAM topic1;`,
-			err: "Stream topic1 is not found.",
+			err: "Describe stream fails, topic1 is not found.",
 		},
 		},
 		{
 		{
 			s:   `DROP STREAM topic1;`,
 			s:   `DROP STREAM topic1;`,
@@ -97,13 +96,91 @@ func TestStreamCreateProcessor(t *testing.T) {
 	}
 	}
 }
 }
 
 
-func getMetric(tp *xstream.TopologyNew, name string) int {
-	keys, values := tp.GetMetrics()
-	for index, key := range keys {
-		if key == name {
-			return int(values[index].(int64))
+func TestTableProcessor(t *testing.T) {
+	var tests = []struct {
+		s   string
+		r   []string
+		err string
+	}{
+		{
+			s: `SHOW TABLES;`,
+			r: []string{"No table definitions are found."},
+		},
+		{
+			s:   `EXPLAIN TABLE topic1;`,
+			err: "Explain table fails, topic1 is not found.",
+		},
+		{
+			s: `CREATE TABLE topic1 (
+					USERID BIGINT,
+					FIRST_NAME STRING,
+					LAST_NAME STRING,
+					NICKNAMES ARRAY(STRING),
+					Gender BOOLEAN,
+					ADDRESS STRUCT(STREET_NAME STRING, NUMBER BIGINT),
+				) WITH (DATASOURCE="users", FORMAT="JSON", KEY="USERID");`,
+			r: []string{"Table topic1 is created."},
+		},
+		{
+			s: `CREATE TABLE ` + "`stream`" + ` (
+					USERID BIGINT,
+					FIRST_NAME STRING,
+					LAST_NAME STRING,
+					NICKNAMES ARRAY(STRING),
+					Gender BOOLEAN,
+					` + "`地址`" + ` STRUCT(STREET_NAME STRING, NUMBER BIGINT),
+				) WITH (DATASOURCE="users", FORMAT="JSON", KEY="USERID");`,
+			r: []string{"Table stream is created."},
+		},
+		{
+			s: `CREATE TABLE topic1 (
+					USERID BIGINT,
+				) WITH (DATASOURCE="users", FORMAT="JSON", KEY="USERID");`,
+			err: "Create table fails: Item topic1 already exists.",
+		},
+		{
+			s: `EXPLAIN TABLE topic1;`,
+			r: []string{"TO BE SUPPORTED"},
+		},
+		{
+			s: `DESCRIBE TABLE topic1;`,
+			r: []string{"Fields\n--------------------------------------------------------------------------------\nUSERID\tbigint\nFIRST_NAME\tstring\nLAST_NAME\tstring\nNICKNAMES\t" +
+				"array(string)\nGender\tboolean\nADDRESS\tstruct(STREET_NAME string, NUMBER bigint)\n\n" +
+				"DATASOURCE: users\nFORMAT: JSON\nKEY: USERID\n"},
+		},
+		{
+			s: `DROP TABLE topic1;`,
+			r: []string{"Table topic1 is dropped."},
+		},
+		{
+			s: `SHOW TABLES;`,
+			r: []string{"stream"},
+		},
+		{
+			s:   `DESCRIBE TABLE topic1;`,
+			err: "Describe table fails, topic1 is not found.",
+		},
+		{
+			s:   `DROP TABLE topic1;`,
+			err: "Drop table fails: topic1 is not found.",
+		},
+		{
+			s: "DROP TABLE `stream`;",
+			r: []string{"Table stream is dropped."},
+		},
+	}
+
+	fmt.Printf("The test bucket size is %d.\n\n", len(tests))
+
+	streamDB := path.Join(getDbDir(), "streamTest")
+	for i, tt := range tests {
+		results, err := NewStreamProcessor(streamDB).ExecStmt(tt.s)
+		if !reflect.DeepEqual(tt.err, errstring(err)) {
+			t.Errorf("%d. %q: error mismatch:\n  exp=%s\n  got=%s\n\n", i, tt.s, tt.err, err)
+		} else if tt.err == "" {
+			if !reflect.DeepEqual(tt.r, results) {
+				t.Errorf("%d. %q\n\nstmt mismatch:\nexp=%s\ngot=%#v\n\n", i, tt.s, tt.r, results)
+			}
 		}
 		}
 	}
 	}
-	fmt.Println("can't find " + name)
-	return 0
 }
 }

+ 61 - 1
xsql/processors/window_rule_test.go

@@ -8,7 +8,7 @@ import (
 
 
 func TestWindow(t *testing.T) {
 func TestWindow(t *testing.T) {
 	//Reset
 	//Reset
-	streamList := []string{"demo", "demoError", "demo1", "sessionDemo"}
+	streamList := []string{"demo", "demoError", "demo1", "sessionDemo", "table1"}
 	handleStream(false, streamList, t)
 	handleStream(false, streamList, t)
 	var tests = []ruleTest{
 	var tests = []ruleTest{
 		{
 		{
@@ -632,6 +632,66 @@ func TestWindow(t *testing.T) {
 				"op_2_window_0_records_in_total":   int64(5),
 				"op_2_window_0_records_in_total":   int64(5),
 				"op_2_window_0_records_out_total":  int64(5),
 				"op_2_window_0_records_out_total":  int64(5),
 			},
 			},
+		}, {
+			name: `TestWindowRule11`,
+			sql:  `SELECT color, name FROM demo INNER JOIN table1 on demo.ts = table1.id where demo.size > 2 and table1.size > 1 GROUP BY tumblingwindow(ss, 1)`,
+			r: [][]map[string]interface{}{
+				{{
+					"color": "red",
+					"name":  "name1",
+				}},
+			},
+			m: map[string]interface{}{
+				//"op_4_project_0_exceptions_total":   int64(0),
+				//"op_4_project_0_process_latency_us": int64(0),
+				//"op_4_project_0_records_in_total":   int64(2),
+				//"op_4_project_0_records_out_total":  int64(2),
+
+				"op_3_window_0_exceptions_total":   int64(0),
+				"op_3_window_0_process_latency_us": int64(0),
+				"op_3_window_0_records_in_total":   int64(3),
+				"op_3_window_0_records_out_total":  int64(2),
+
+				"op_2_filter_0_exceptions_total":   int64(0),
+				"op_2_filter_0_process_latency_us": int64(0),
+				"op_2_filter_0_records_in_total":   int64(5),
+				"op_2_filter_0_records_out_total":  int64(3),
+
+				"op_1_preprocessor_demo_0_exceptions_total":  int64(0),
+				"op_1_preprocessor_demo_0_records_in_total":  int64(5),
+				"op_1_preprocessor_demo_0_records_out_total": int64(5),
+
+				"op_4_tableprocessor_table1_0_exceptions_total":  int64(0),
+				"op_4_tableprocessor_table1_0_records_in_total":  int64(1),
+				"op_4_tableprocessor_table1_0_records_out_total": int64(1),
+
+				"op_5_filter_0_exceptions_total":  int64(0),
+				"op_5_filter_0_records_in_total":  int64(1),
+				"op_5_filter_0_records_out_total": int64(1),
+
+				"op_6_join_aligner_0_records_in_total":  int64(3),
+				"op_6_join_aligner_0_records_out_total": int64(2),
+
+				"op_7_join_0_exceptions_total":  int64(0),
+				"op_7_join_0_records_in_total":  int64(2),
+				"op_7_join_0_records_out_total": int64(1),
+
+				"op_8_project_0_exceptions_total":  int64(0),
+				"op_8_project_0_records_in_total":  int64(1),
+				"op_8_project_0_records_out_total": int64(1),
+
+				"sink_mockSink_0_exceptions_total":  int64(0),
+				"sink_mockSink_0_records_in_total":  int64(1),
+				"sink_mockSink_0_records_out_total": int64(1),
+
+				"source_demo_0_exceptions_total":  int64(0),
+				"source_demo_0_records_in_total":  int64(5),
+				"source_demo_0_records_out_total": int64(5),
+
+				"source_table1_0_exceptions_total":  int64(0),
+				"source_table1_0_records_in_total":  int64(1),
+				"source_table1_0_records_out_total": int64(1),
+			},
 		},
 		},
 	}
 	}
 	handleStream(true, streamList, t)
 	handleStream(true, streamList, t)

+ 121 - 72
xsql/processors/xsql_processor.go

@@ -16,7 +16,9 @@ import (
 	"strings"
 	"strings"
 )
 )
 
 
-var log = common.Log
+var (
+	log = common.Log
+)
 
 
 type StreamProcessor struct {
 type StreamProcessor struct {
 	db kv.KeyValue
 	db kv.KeyValue
@@ -37,23 +39,44 @@ func (p *StreamProcessor) ExecStmt(statement string) (result []string, err error
 		return nil, err
 		return nil, err
 	}
 	}
 	switch s := stmt.(type) {
 	switch s := stmt.(type) {
-	case *xsql.StreamStmt:
+	case *xsql.StreamStmt: //Table is also StreamStmt
 		var r string
 		var r string
-		r, err = p.execCreateStream(s, statement)
+		err = p.execSave(s, statement, false)
+		stt := xsql.StreamTypeMap[s.StreamType]
+		if err != nil {
+			err = fmt.Errorf("Create %s fails: %v.", stt, err)
+		} else {
+			r = fmt.Sprintf("%s %s is created.", strings.Title(stt), s.Name)
+			log.Printf("%s", r)
+		}
 		result = append(result, r)
 		result = append(result, r)
 	case *xsql.ShowStreamsStatement:
 	case *xsql.ShowStreamsStatement:
-		result, err = p.execShowStream(s)
+		result, err = p.execShow(xsql.TypeStream)
+	case *xsql.ShowTablesStatement:
+		result, err = p.execShow(xsql.TypeTable)
 	case *xsql.DescribeStreamStatement:
 	case *xsql.DescribeStreamStatement:
 		var r string
 		var r string
-		r, err = p.execDescribeStream(s)
+		r, err = p.execDescribe(s, xsql.TypeStream)
+		result = append(result, r)
+	case *xsql.DescribeTableStatement:
+		var r string
+		r, err = p.execDescribe(s, xsql.TypeTable)
 		result = append(result, r)
 		result = append(result, r)
 	case *xsql.ExplainStreamStatement:
 	case *xsql.ExplainStreamStatement:
 		var r string
 		var r string
-		r, err = p.execExplainStream(s)
+		r, err = p.execExplain(s, xsql.TypeStream)
+		result = append(result, r)
+	case *xsql.ExplainTableStatement:
+		var r string
+		r, err = p.execExplain(s, xsql.TypeTable)
 		result = append(result, r)
 		result = append(result, r)
 	case *xsql.DropStreamStatement:
 	case *xsql.DropStreamStatement:
 		var r string
 		var r string
-		r, err = p.execDropStream(s)
+		r, err = p.execDrop(s, xsql.TypeStream)
+		result = append(result, r)
+	case *xsql.DropTableStatement:
+		var r string
+		r, err = p.execDrop(s, xsql.TypeTable)
 		result = append(result, r)
 		result = append(result, r)
 	default:
 	default:
 		return nil, fmt.Errorf("Invalid stream statement: %s", statement)
 		return nil, fmt.Errorf("Invalid stream statement: %s", statement)
@@ -62,47 +85,50 @@ func (p *StreamProcessor) ExecStmt(statement string) (result []string, err error
 	return
 	return
 }
 }
 
 
-func (p *StreamProcessor) execCreateStream(stmt *xsql.StreamStmt, statement string) (string, error) {
+func (p *StreamProcessor) execSave(stmt *xsql.StreamStmt, statement string, replace bool) error {
 	err := p.db.Open()
 	err := p.db.Open()
 	if err != nil {
 	if err != nil {
-		return "", fmt.Errorf("Create stream fails, error when opening db: %v.", err)
+		return fmt.Errorf("error when opening db: %v.", err)
 	}
 	}
 	defer p.db.Close()
 	defer p.db.Close()
-	err = p.db.Setnx(string(stmt.Name), statement)
+	s, err := json.Marshal(xsql.StreamInfo{
+		StreamType: stmt.StreamType,
+		Statement:  statement,
+	})
 	if err != nil {
 	if err != nil {
-		return "", fmt.Errorf("Create stream fails: %v.", err)
+		return fmt.Errorf("error when saving to db: %v.", err)
+	}
+	if replace {
+		err = p.db.Set(string(stmt.Name), string(s))
 	} else {
 	} else {
-		info := fmt.Sprintf("Stream %s is created.", stmt.Name)
-		log.Printf("%s", info)
-		return info, nil
+		err = p.db.Setnx(string(stmt.Name), string(s))
 	}
 	}
+	return err
 }
 }
 
 
-func (p *StreamProcessor) ExecReplaceStream(statement string) (string, error) {
+func (p *StreamProcessor) ExecReplaceStream(statement string, st xsql.StreamType) (string, error) {
 	parser := xsql.NewParser(strings.NewReader(statement))
 	parser := xsql.NewParser(strings.NewReader(statement))
 	stmt, err := xsql.Language.Parse(parser)
 	stmt, err := xsql.Language.Parse(parser)
 	if err != nil {
 	if err != nil {
 		return "", err
 		return "", err
 	}
 	}
-
+	stt := xsql.StreamTypeMap[st]
 	switch s := stmt.(type) {
 	switch s := stmt.(type) {
 	case *xsql.StreamStmt:
 	case *xsql.StreamStmt:
-		if err = p.db.Open(); nil != err {
-			return "", fmt.Errorf("Replace stream fails, error when opening db: %v.", err)
+		if s.StreamType != st {
+			return "", common.NewErrorWithCode(common.NOT_FOUND, fmt.Sprintf("%s %s is not found", xsql.StreamTypeMap[st], s.Name))
 		}
 		}
-		defer p.db.Close()
-
-		if err = p.db.Set(string(s.Name), statement); nil != err {
-			return "", fmt.Errorf("Replace stream fails: %v.", err)
+		err = p.execSave(s, statement, true)
+		if err != nil {
+			return "", fmt.Errorf("Replace %s fails: %v.", stt, err)
 		} else {
 		} else {
-			info := fmt.Sprintf("Stream %s is replaced.", s.Name)
+			info := fmt.Sprintf("%s %s is replaced.", strings.Title(stt), s.Name)
 			log.Printf("%s", info)
 			log.Printf("%s", info)
 			return info, nil
 			return info, nil
 		}
 		}
 	default:
 	default:
-		return "", fmt.Errorf("Invalid stream statement: %s", statement)
+		return "", fmt.Errorf("Invalid %s statement: %s", stt, statement)
 	}
 	}
-	return "", nil
 }
 }
 
 
 func (p *StreamProcessor) ExecStreamSql(statement string) (string, error) {
 func (p *StreamProcessor) ExecStreamSql(statement string) (string, error) {
@@ -114,88 +140,111 @@ func (p *StreamProcessor) ExecStreamSql(statement string) (string, error) {
 	}
 	}
 }
 }
 
 
-func (p *StreamProcessor) execShowStream(_ *xsql.ShowStreamsStatement) ([]string, error) {
-	keys, err := p.ShowStream()
+func (p *StreamProcessor) execShow(st xsql.StreamType) ([]string, error) {
+	keys, err := p.ShowStream(st)
 	if len(keys) == 0 {
 	if len(keys) == 0 {
-		keys = append(keys, "No stream definitions are found.")
+		keys = append(keys, fmt.Sprintf("No %s definitions are found.", xsql.StreamTypeMap[st]))
 	}
 	}
 	return keys, err
 	return keys, err
 }
 }
 
 
-func (p *StreamProcessor) ShowStream() ([]string, error) {
+func (p *StreamProcessor) ShowStream(st xsql.StreamType) ([]string, error) {
+	stt := xsql.StreamTypeMap[st]
 	err := p.db.Open()
 	err := p.db.Open()
 	if err != nil {
 	if err != nil {
-		return nil, fmt.Errorf("Show stream fails, error when opening db: %v.", err)
+		return nil, fmt.Errorf("Show %ss fails, error when opening db: %v.", stt, err)
 	}
 	}
 	defer p.db.Close()
 	defer p.db.Close()
-	return p.db.Keys()
+	keys, err := p.db.Keys()
+	if err != nil {
+		return nil, fmt.Errorf("Show %ss fails, error when loading data from db: %v.", stt, err)
+	}
+	var (
+		v      string
+		vs     = &xsql.StreamInfo{}
+		result = make([]string, 0)
+	)
+	for _, k := range keys {
+		if ok, _ := p.db.Get(k, &v); ok {
+			if err := json.Unmarshal([]byte(v), vs); err == nil && vs.StreamType == st {
+				result = append(result, k)
+			}
+		}
+	}
+	return result, nil
 }
 }
 
 
-func (p *StreamProcessor) execDescribeStream(stmt *xsql.DescribeStreamStatement) (string, error) {
-	streamStmt, err := p.DescStream(stmt.Name)
+func (p *StreamProcessor) getStream(name string, st xsql.StreamType) (string, error) {
+	vs, err := xsql.GetDataSourceStatement(p.db, name)
+	if vs != nil && vs.StreamType == st {
+		return vs.Statement, nil
+	}
 	if err != nil {
 	if err != nil {
 		return "", err
 		return "", err
 	}
 	}
-	var buff bytes.Buffer
-	buff.WriteString("Fields\n--------------------------------------------------------------------------------\n")
-	for _, f := range streamStmt.StreamFields {
-		buff.WriteString(f.Name + "\t")
-		buff.WriteString(xsql.PrintFieldType(f.FieldType))
-		buff.WriteString("\n")
-	}
-	buff.WriteString("\n")
-	common.PrintMap(streamStmt.Options, &buff)
-	return buff.String(), err
+	return "", common.NewErrorWithCode(common.NOT_FOUND, fmt.Sprintf("%s %s is not found", xsql.StreamTypeMap[st], name))
 }
 }
 
 
-func (p *StreamProcessor) DescStream(name string) (*xsql.StreamStmt, error) {
-	err := p.db.Open()
+func (p *StreamProcessor) execDescribe(stmt xsql.NameNode, st xsql.StreamType) (string, error) {
+	streamStmt, err := p.DescStream(stmt.GetName(), st)
 	if err != nil {
 	if err != nil {
-		return nil, fmt.Errorf("Describe stream fails, error when opening db: %v.", err)
+		return "", err
 	}
 	}
-	defer p.db.Close()
-	var s1 string
-	f, _ := p.db.Get(name, &s1)
-	if !f {
-		return nil, common.NewErrorWithCode(common.NOT_FOUND, fmt.Sprintf("Stream %s is not found.", name))
+	switch s := streamStmt.(type) {
+	case *xsql.StreamStmt:
+		var buff bytes.Buffer
+		buff.WriteString("Fields\n--------------------------------------------------------------------------------\n")
+		for _, f := range s.StreamFields {
+			buff.WriteString(f.Name + "\t")
+			buff.WriteString(xsql.PrintFieldType(f.FieldType))
+			buff.WriteString("\n")
+		}
+		buff.WriteString("\n")
+		common.PrintMap(s.Options, &buff)
+		return buff.String(), err
+	default:
+		return "%s", fmt.Errorf("Error resolving the %s %s, the data in db may be corrupted.", xsql.StreamTypeMap[st], stmt.GetName())
 	}
 	}
 
 
-	parser := xsql.NewParser(strings.NewReader(s1))
+}
+
+func (p *StreamProcessor) DescStream(name string, st xsql.StreamType) (xsql.Statement, error) {
+	statement, err := p.getStream(name, st)
+	if err != nil {
+		return nil, fmt.Errorf("Describe %s fails, %s.", xsql.StreamTypeMap[st], err)
+	}
+	parser := xsql.NewParser(strings.NewReader(statement))
 	stream, err := xsql.Language.Parse(parser)
 	stream, err := xsql.Language.Parse(parser)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	streamStmt, ok := stream.(*xsql.StreamStmt)
-	if !ok {
-		return nil, fmt.Errorf("Error resolving the stream %s, the data in db may be corrupted.", name)
-	}
-	return streamStmt, nil
+	return stream, nil
 }
 }
 
 
-func (p *StreamProcessor) execExplainStream(stmt *xsql.ExplainStreamStatement) (string, error) {
-	err := p.db.Open()
+func (p *StreamProcessor) execExplain(stmt xsql.NameNode, st xsql.StreamType) (string, error) {
+	_, err := p.getStream(stmt.GetName(), st)
 	if err != nil {
 	if err != nil {
-		return "", fmt.Errorf("Explain stream fails, error when opening db: %v.", err)
-	}
-	defer p.db.Close()
-	var s string
-	f, _ := p.db.Get(stmt.Name, &s)
-	if !f {
-		return "", fmt.Errorf("Stream %s is not found.", stmt.Name)
+		return "", fmt.Errorf("Explain %s fails, %s.", xsql.StreamTypeMap[st], err)
 	}
 	}
 	return "TO BE SUPPORTED", nil
 	return "TO BE SUPPORTED", nil
 }
 }
 
 
-func (p *StreamProcessor) execDropStream(stmt *xsql.DropStreamStatement) (string, error) {
-	s, err := p.DropStream(stmt.Name)
+func (p *StreamProcessor) execDrop(stmt xsql.NameNode, st xsql.StreamType) (string, error) {
+	s, err := p.DropStream(stmt.GetName(), st)
 	if err != nil {
 	if err != nil {
-		return s, fmt.Errorf("Drop stream fails: %s.", err)
+		return s, fmt.Errorf("Drop %s fails: %s.", xsql.StreamTypeMap[st], err)
 	}
 	}
 	return s, nil
 	return s, nil
 }
 }
 
 
-func (p *StreamProcessor) DropStream(name string) (string, error) {
-	err := p.db.Open()
+func (p *StreamProcessor) DropStream(name string, st xsql.StreamType) (string, error) {
+	defer p.db.Close()
+	_, err := p.getStream(name, st)
+	if err != nil {
+		return "", err
+	}
+
+	err = p.db.Open()
 	if err != nil {
 	if err != nil {
 		return "", fmt.Errorf("error when opening db: %v", err)
 		return "", fmt.Errorf("error when opening db: %v", err)
 	}
 	}
@@ -204,7 +253,7 @@ func (p *StreamProcessor) DropStream(name string) (string, error) {
 	if err != nil {
 	if err != nil {
 		return "", err
 		return "", err
 	} else {
 	} else {
-		return fmt.Sprintf("Stream %s is dropped.", name), nil
+		return fmt.Sprintf("%s %s is dropped.", strings.Title(xsql.StreamTypeMap[st]), name), nil
 	}
 	}
 }
 }
 
 

+ 41 - 0
xsql/util.go

@@ -3,6 +3,8 @@ package xsql
 import (
 import (
 	"encoding/json"
 	"encoding/json"
 	"fmt"
 	"fmt"
+	"github.com/emqx/kuiper/common"
+	"github.com/emqx/kuiper/common/kv"
 	"strings"
 	"strings"
 )
 )
 
 
@@ -124,3 +126,42 @@ func GetStatementFromSql(sql string) (*SelectStatement, error) {
 		}
 		}
 	}
 	}
 }
 }
+
+type StreamInfo struct {
+	StreamType StreamType `json:"streamType"`
+	Statement  string     `json:"statement"`
+}
+
+func GetDataSourceStatement(m kv.KeyValue, name string) (*StreamInfo, error) {
+	var (
+		v  string
+		vs = &StreamInfo{}
+	)
+	err := m.Open()
+	if err != nil {
+		return nil, fmt.Errorf("error when opening db: %v", err)
+	}
+	defer m.Close()
+	if ok, _ := m.Get(name, &v); ok {
+		if err := json.Unmarshal([]byte(v), vs); err != nil {
+			return nil, fmt.Errorf("error unmarshall %s, the data in db may be corrupted", name)
+		} else {
+			return vs, nil
+		}
+	}
+	return nil, common.NewErrorWithCode(common.NOT_FOUND, fmt.Sprintf("%s is not found", name))
+}
+
+func GetDataSource(m kv.KeyValue, name string) (stmt *StreamStmt, err error) {
+	info, err := GetDataSourceStatement(m, name)
+	if err != nil {
+		return nil, err
+	}
+	parser := NewParser(strings.NewReader(info.Statement))
+	stream, err := Language.Parse(parser)
+	stmt, ok := stream.(*StreamStmt)
+	if !ok {
+		err = fmt.Errorf("Error resolving the stream %s, the data in db may be corrupted.", name)
+	}
+	return
+}

+ 5 - 5
xsql/xsql_manager.go

@@ -42,22 +42,22 @@ func init() {
 	})
 	})
 
 
 	Language.Handle(CREATE, func(p *Parser) (statement Statement, e error) {
 	Language.Handle(CREATE, func(p *Parser) (statement Statement, e error) {
-		return p.ParseCreateStreamStmt()
+		return p.ParseCreateStmt()
 	})
 	})
 
 
 	Language.Handle(SHOW, func(p *Parser) (statement Statement, e error) {
 	Language.Handle(SHOW, func(p *Parser) (statement Statement, e error) {
-		return p.parseShowStreamsStmt()
+		return p.parseShowStmt()
 	})
 	})
 
 
 	Language.Handle(EXPLAIN, func(p *Parser) (statement Statement, e error) {
 	Language.Handle(EXPLAIN, func(p *Parser) (statement Statement, e error) {
-		return p.parseExplainStreamsStmt()
+		return p.parseExplainStmt()
 	})
 	})
 
 
 	Language.Handle(DESCRIBE, func(p *Parser) (statement Statement, e error) {
 	Language.Handle(DESCRIBE, func(p *Parser) (statement Statement, e error) {
-		return p.parseDescribeStreamStmt()
+		return p.parseDescribeStmt()
 	})
 	})
 
 
 	Language.Handle(DROP, func(p *Parser) (statement Statement, e error) {
 	Language.Handle(DROP, func(p *Parser) (statement Statement, e error) {
-		return p.parseDropStreamsStmt()
+		return p.parseDropStmt()
 	})
 	})
 }
 }

+ 45 - 1
xsql/xsql_parser_tree_test.go

@@ -29,16 +29,37 @@ func TestParser_ParseTree(t *testing.T) {
 				},
 				},
 			},
 			},
 		},
 		},
+		{
+			s: `CREATE TABLE demo (
+					USERID BIGINT,
+				) WITH (DATASOURCE="users", FORMAT="JSON", KEY="USERID");`,
+			stmt: &StreamStmt{
+				Name: StreamName("demo"),
+				StreamFields: []StreamField{
+					{Name: "USERID", FieldType: &BasicType{Type: BIGINT}},
+				},
+				Options: map[string]string{
+					"DATASOURCE": "users",
+					"FORMAT":     "JSON",
+					"KEY":        "USERID",
+				},
+				StreamType: TypeTable,
+			},
+		},
 
 
 		{
 		{
 			s:    `SHOW STREAMS`,
 			s:    `SHOW STREAMS`,
 			stmt: &ShowStreamsStatement{},
 			stmt: &ShowStreamsStatement{},
 		},
 		},
+		{
+			s:    `SHOW TABLES`,
+			stmt: &ShowTablesStatement{},
+		},
 
 
 		{
 		{
 			s:    `SHOW STREAMSf`,
 			s:    `SHOW STREAMSf`,
 			stmt: nil,
 			stmt: nil,
-			err:  `found "STREAMSf", expected keyword streams.`,
+			err:  `found "STREAMSf", expected keyword streams or tables.`,
 		},
 		},
 
 
 		{
 		{
@@ -70,6 +91,29 @@ func TestParser_ParseTree(t *testing.T) {
 			},
 			},
 			err: ``,
 			err: ``,
 		},
 		},
+		{
+			s: `DESCRIBE TABLE demo`,
+			stmt: &DescribeTableStatement{
+				Name: "demo",
+			},
+			err: ``,
+		},
+
+		{
+			s: `EXPLAIN TABLE demo1`,
+			stmt: &ExplainTableStatement{
+				Name: "demo1",
+			},
+			err: ``,
+		},
+
+		{
+			s: `DROP TABLE demo1`,
+			stmt: &DropTableStatement{
+				Name: "demo1",
+			},
+			err: ``,
+		},
 	}
 	}
 
 
 	fmt.Printf("The test bucket size is %d.\n\n", len(tests))
 	fmt.Printf("The test bucket size is %d.\n\n", len(tests))

+ 1 - 1
xsql/xsql_stream_test.go

@@ -398,7 +398,7 @@ func TestParser_ParseCreateStream(t *testing.T) {
 
 
 	fmt.Printf("The test bucket size is %d.\n\n", len(tests))
 	fmt.Printf("The test bucket size is %d.\n\n", len(tests))
 	for i, tt := range tests {
 	for i, tt := range tests {
-		stmt, err := NewParser(strings.NewReader(tt.s)).ParseCreateStreamStmt()
+		stmt, err := NewParser(strings.NewReader(tt.s)).ParseCreateStmt()
 		if !reflect.DeepEqual(tt.err, errstring(err)) {
 		if !reflect.DeepEqual(tt.err, errstring(err)) {
 			t.Errorf("%d. %q: error mismatch:\n  exp=%s\n  got=%s\n\n", i, tt.s, tt.err, err)
 			t.Errorf("%d. %q: error mismatch:\n  exp=%s\n  got=%s\n\n", i, tt.s, tt.err, err)
 		} else if tt.err == "" && !reflect.DeepEqual(tt.stmt, stmt) {
 		} else if tt.err == "" && !reflect.DeepEqual(tt.stmt, stmt) {

+ 8 - 0
xstream/api/stream.go

@@ -63,6 +63,14 @@ type Source interface {
 	Closable
 	Closable
 }
 }
 
 
+type TableSource interface {
+	// Load the data at batch
+	Load(ctx StreamContext) ([]SourceTuple, error)
+	//Called during initialization. Configure the source with the data source(e.g. topic for mqtt) and the properties
+	//read from the yaml
+	Configure(datasource string, props map[string]interface{}) error
+}
+
 type Sink interface {
 type Sink interface {
 	//Should be sync function for normal case. The container will run it in go func
 	//Should be sync function for normal case. The container will run it in go func
 	Open(ctx StreamContext) error
 	Open(ctx StreamContext) error

+ 57 - 4
xstream/cli/main.go

@@ -134,7 +134,7 @@ func main() {
 		{
 		{
 			Name:    "create",
 			Name:    "create",
 			Aliases: []string{"create"},
 			Aliases: []string{"create"},
-			Usage:   "create stream $stream_name | create stream $stream_name -f $stream_def_file | create rule $rule_name $rule_json | create rule $rule_name -f $rule_def_file | create plugin $plugin_type $plugin_name $plugin_json | create plugin $plugin_type $plugin_name -f $plugin_def_file",
+			Usage:   "create stream $stream_name | create stream $stream_name -f $stream_def_file | create table $table_name | create table $table_name -f $table_def_file| create rule $rule_name $rule_json | create rule $rule_name -f $rule_def_file | create plugin $plugin_type $plugin_name $plugin_json | create plugin $plugin_type $plugin_name -f $plugin_def_file",
 
 
 			Subcommands: []cli.Command{
 			Subcommands: []cli.Command{
 				{
 				{
@@ -165,6 +165,33 @@ func main() {
 					},
 					},
 				},
 				},
 				{
 				{
+					Name:  "table",
+					Usage: "create table $table_name [-f table_def_file]",
+					Flags: []cli.Flag{
+						cli.StringFlag{
+							Name:     "file, f",
+							Usage:    "the location of table definition file",
+							FilePath: "/home/mytable.txt",
+						},
+					},
+					Action: func(c *cli.Context) error {
+						sfile := c.String("file")
+						if sfile != "" {
+							if stream, err := readDef(sfile, "table"); err != nil {
+								fmt.Printf("%s", err)
+								return nil
+							} else {
+								args := strings.Join([]string{"CREATE TABLE ", string(stream)}, " ")
+								streamProcess(client, args)
+								return nil
+							}
+						} else {
+							streamProcess(client, "")
+							return nil
+						}
+					},
+				},
+				{
 					Name:  "rule",
 					Name:  "rule",
 					Usage: "create rule $rule_name [$rule_json | -f rule_def_file]",
 					Usage: "create rule $rule_name [$rule_json | -f rule_def_file]",
 					Flags: []cli.Flag{
 					Flags: []cli.Flag{
@@ -276,7 +303,7 @@ func main() {
 		{
 		{
 			Name:    "describe",
 			Name:    "describe",
 			Aliases: []string{"describe"},
 			Aliases: []string{"describe"},
-			Usage:   "describe stream $stream_name | describe rule $rule_name | describe plugin $plugin_type $plugin_name",
+			Usage:   "describe stream $stream_name | describe table $table_name | describe rule $rule_name | describe plugin $plugin_type $plugin_name",
 			Subcommands: []cli.Command{
 			Subcommands: []cli.Command{
 				{
 				{
 					Name:  "stream",
 					Name:  "stream",
@@ -288,6 +315,15 @@ func main() {
 					},
 					},
 				},
 				},
 				{
 				{
+					Name:  "table",
+					Usage: "describe table $table_name",
+					//Flags: nflag,
+					Action: func(c *cli.Context) error {
+						streamProcess(client, "")
+						return nil
+					},
+				},
+				{
 					Name:  "rule",
 					Name:  "rule",
 					Usage: "describe rule $rule_name",
 					Usage: "describe rule $rule_name",
 					Action: func(c *cli.Context) error {
 					Action: func(c *cli.Context) error {
@@ -364,7 +400,7 @@ func main() {
 		{
 		{
 			Name:    "drop",
 			Name:    "drop",
 			Aliases: []string{"drop"},
 			Aliases: []string{"drop"},
-			Usage:   "drop stream $stream_name | drop rule $rule_name | drop plugin $plugin_type $plugin_name -r $stop",
+			Usage:   "drop stream $stream_name | drop table $table_name |drop rule $rule_name | drop plugin $plugin_type $plugin_name -r $stop",
 			Subcommands: []cli.Command{
 			Subcommands: []cli.Command{
 				{
 				{
 					Name:  "stream",
 					Name:  "stream",
@@ -376,6 +412,15 @@ func main() {
 					},
 					},
 				},
 				},
 				{
 				{
+					Name:  "table",
+					Usage: "drop table $table_name",
+					//Flags: nflag,
+					Action: func(c *cli.Context) error {
+						streamProcess(client, "")
+						return nil
+					},
+				},
+				{
 					Name:  "rule",
 					Name:  "rule",
 					Usage: "drop rule $rule_name",
 					Usage: "drop rule $rule_name",
 					//Flags: nflag,
 					//Flags: nflag,
@@ -444,7 +489,7 @@ func main() {
 		{
 		{
 			Name:    "show",
 			Name:    "show",
 			Aliases: []string{"show"},
 			Aliases: []string{"show"},
-			Usage:   "show streams | show rules | show plugins $plugin_type",
+			Usage:   "show streams | show tables | show rules | show plugins $plugin_type",
 
 
 			Subcommands: []cli.Command{
 			Subcommands: []cli.Command{
 				{
 				{
@@ -456,6 +501,14 @@ func main() {
 					},
 					},
 				},
 				},
 				{
 				{
+					Name:  "tables",
+					Usage: "show tables",
+					Action: func(c *cli.Context) error {
+						streamProcess(client, "")
+						return nil
+					},
+				},
+				{
 					Name:  "rules",
 					Name:  "rules",
 					Usage: "show rules",
 					Usage: "show rules",
 					Action: func(c *cli.Context) error {
 					Action: func(c *cli.Context) error {

+ 85 - 0
xstream/extensions/file_source.go

@@ -0,0 +1,85 @@
+package extensions
+
+import (
+	"errors"
+	"fmt"
+	"github.com/emqx/kuiper/common"
+	"github.com/emqx/kuiper/xstream/api"
+	"os"
+	"path"
+	"path/filepath"
+)
+
+type FileType string
+
+const (
+	JSON_TYPE FileType = "json"
+)
+
+var fileTypes = map[FileType]bool{
+	JSON_TYPE: true,
+}
+
+type FileSourceConfig struct {
+	FileType FileType `json:"fileType"`
+	Path     string   `json:"Path"`
+}
+
+// The BATCH to load data from file at once
+type FileSource struct {
+	file   string
+	config *FileSourceConfig
+}
+
+func (fs *FileSource) Configure(fileName string, props map[string]interface{}) error {
+	cfg := &FileSourceConfig{}
+	err := common.MapToStruct(props, cfg)
+	if err != nil {
+		return fmt.Errorf("read properties %v fail with error: %v", props, err)
+	}
+	if cfg.FileType == "" {
+		return errors.New("missing or invalid property fileType, must be 'json'")
+	}
+	if _, ok := fileTypes[cfg.FileType]; !ok {
+		return fmt.Errorf("invalid property fileType: %s", cfg.FileType)
+	}
+	if cfg.Path == "" {
+		return errors.New("missing property Path")
+	}
+	if fileName == "" {
+		return errors.New("source must be specified")
+	}
+	if !filepath.IsAbs(cfg.Path) {
+		cfg.Path, err = common.GetLoc("/" + cfg.Path)
+		if err != nil {
+			return fmt.Errorf("invalid path %s", cfg.Path)
+		}
+	}
+	fs.file = path.Join(cfg.Path, fileName)
+
+	if fi, err := os.Stat(fs.file); err != nil {
+		if os.IsNotExist(err) {
+			return fmt.Errorf("file %s not exist", fs.file)
+		} else if !fi.Mode().IsRegular() {
+			return fmt.Errorf("file %s is not a regular file", fs.file)
+		}
+	}
+	fs.config = cfg
+	return nil
+}
+
+func (fs *FileSource) Load(ctx api.StreamContext) ([]api.SourceTuple, error) {
+	switch fs.config.FileType {
+	case JSON_TYPE:
+		ctx.GetLogger().Debugf("Start to load from file %s", fs.file)
+		resultMap := make([]map[string]interface{}, 0)
+		err := common.ReadJsonUnmarshal(fs.file, &resultMap)
+		result := make([]api.SourceTuple, len(resultMap))
+		for i, m := range resultMap {
+			result[i] = api.NewDefaultSourceTuple(m, nil)
+		}
+		ctx.GetLogger().Debugf("loaded %s, check error %s", fs.file, err)
+		return result, err
+	}
+	return nil, fmt.Errorf("invalid file type %s", fs.config.FileType)
+}

+ 141 - 0
xstream/nodes/join_align_node.go

@@ -0,0 +1,141 @@
+package nodes
+
+import (
+	"errors"
+	"fmt"
+	"github.com/emqx/kuiper/xsql"
+	"github.com/emqx/kuiper/xstream/api"
+)
+
+/*
+ *  This node will block the stream and buffer all the table tuples. Once buffered, it will combine the later input with the buffer
+ *  The input for batch table MUST be *WindowTuples
+ */
+type JoinAlignNode struct {
+	*defaultSinkNode
+	statManager StatManager
+	emitters    []string
+	// states
+	batch xsql.WindowTuplesSet
+}
+
+const StreamInputsKey = "$$streamInputs"
+
+func NewJoinAlignNode(name string, emitters []string, options *api.RuleOption) (*JoinAlignNode, error) {
+	n := &JoinAlignNode{
+		emitters: emitters,
+		batch:    make([]xsql.WindowTuples, len(emitters)),
+	}
+	n.defaultSinkNode = &defaultSinkNode{
+		input: make(chan interface{}, options.BufferLength),
+		defaultNode: &defaultNode{
+			outputs:   make(map[string]chan<- interface{}),
+			name:      name,
+			sendError: options.SendError,
+		},
+	}
+	return n, nil
+}
+
+func (n *JoinAlignNode) Exec(ctx api.StreamContext, errCh chan<- error) {
+	n.ctx = ctx
+	log := ctx.GetLogger()
+	log.Debugf("JoinAlignNode %s is started", n.name)
+
+	if len(n.outputs) <= 0 {
+		go func() { errCh <- fmt.Errorf("no output channel found") }()
+		return
+	}
+	stats, err := NewStatManager("op", ctx)
+	if err != nil {
+		go func() { errCh <- err }()
+		return
+	}
+	n.statManager = stats
+	var inputs []xsql.WindowTuplesSet
+	batchLen := len(n.emitters)
+	go func() {
+		for {
+			log.Debugf("JoinAlignNode %s is looping", n.name)
+			select {
+			// process incoming item
+			case item, opened := <-n.input:
+				processed := false
+				if item, processed = n.preprocess(item); processed {
+					break
+				}
+				n.statManager.IncTotalRecordsIn()
+				n.statManager.ProcessTimeStart()
+				if !opened {
+					n.statManager.IncTotalExceptions()
+					break
+				}
+				switch d := item.(type) {
+				case error:
+					n.Broadcast(d)
+					n.statManager.IncTotalExceptions()
+				case *xsql.Tuple:
+					log.Debugf("JoinAlignNode receive tuple input %s", d)
+					var temp xsql.WindowTuplesSet = make([]xsql.WindowTuples, 0)
+					temp = temp.AddTuple(d)
+					if batchLen == 0 {
+						n.alignBatch(ctx, temp)
+					} else {
+						log.Debugf("JoinAlignNode buffer input")
+						inputs = append(inputs, temp)
+						ctx.PutState(StreamInputsKey, inputs)
+						n.statManager.SetBufferLength(int64(len(n.input)))
+					}
+				case xsql.WindowTuplesSet:
+					log.Debugf("JoinAlignNode receive window input %s", d)
+					if batchLen == 0 {
+						n.alignBatch(ctx, d)
+					} else {
+						log.Debugf("JoinAlignNode buffer input")
+						inputs = append(inputs, d)
+						ctx.PutState(StreamInputsKey, inputs)
+						n.statManager.SetBufferLength(int64(len(n.input)))
+					}
+				case xsql.WindowTuples:
+					log.Debugf("JoinAlignNode receive batch source %s", d)
+					if batchLen <= 0 {
+						errCh <- errors.New("Join receives too many table content")
+					}
+					n.batch[len(n.emitters)-batchLen] = d
+					batchLen -= 1
+					if batchLen == 0 {
+						for _, w := range inputs {
+							n.alignBatch(ctx, w)
+						}
+					}
+				default:
+					n.Broadcast(fmt.Errorf("run JoinAlignNode error: invalid input type but got %[1]T(%[1]v)", d))
+					n.statManager.IncTotalExceptions()
+				}
+			case <-ctx.Done():
+				log.Infoln("Cancelling join align node....")
+				return
+			}
+		}
+	}()
+}
+
+func (n *JoinAlignNode) alignBatch(ctx api.StreamContext, w xsql.WindowTuplesSet) {
+	n.statManager.ProcessTimeStart()
+	w = append(w, n.batch...)
+	n.Broadcast(w)
+	n.statManager.ProcessTimeEnd()
+	n.statManager.IncTotalRecordsOut()
+	n.statManager.SetBufferLength(int64(len(n.input)))
+	ctx.PutState(StreamInputsKey, nil)
+}
+
+func (n *JoinAlignNode) GetMetrics() [][]interface{} {
+	if n.statManager != nil {
+		return [][]interface{}{
+			n.statManager.GetMetrics(),
+		}
+	} else {
+		return nil
+	}
+}

+ 56 - 0
xstream/nodes/node.go

@@ -2,8 +2,11 @@ package nodes
 
 
 import (
 import (
 	"fmt"
 	"fmt"
+	"github.com/emqx/kuiper/common"
 	"github.com/emqx/kuiper/xstream/api"
 	"github.com/emqx/kuiper/xstream/api"
 	"github.com/emqx/kuiper/xstream/checkpoints"
 	"github.com/emqx/kuiper/xstream/checkpoints"
+	"github.com/go-yaml/yaml"
+	"strings"
 	"sync"
 	"sync"
 )
 )
 
 
@@ -17,6 +20,16 @@ type OperatorNode interface {
 	SetBarrierHandler(checkpoints.BarrierHandler)
 	SetBarrierHandler(checkpoints.BarrierHandler)
 }
 }
 
 
+type DataSourceNode interface {
+	api.Emitter
+	Open(ctx api.StreamContext, errCh chan<- error)
+	GetName() string
+	GetMetrics() [][]interface{}
+	Broadcast(val interface{}) error
+	GetStreamContext() api.StreamContext
+	SetQos(api.Qos)
+}
+
 type defaultNode struct {
 type defaultNode struct {
 	name         string
 	name         string
 	outputs      map[string]chan<- interface{}
 	outputs      map[string]chan<- interface{}
@@ -142,3 +155,46 @@ func (o *defaultSinkNode) preprocess(data interface{}) (interface{}, bool) {
 	}
 	}
 	return data, false
 	return data, false
 }
 }
+
+func getSourceConf(ctx api.StreamContext, sourceType string, options map[string]string) map[string]interface{} {
+	confkey := options["CONF_KEY"]
+	logger := ctx.GetLogger()
+	confPath := "sources/" + sourceType + ".yaml"
+	if sourceType == "mqtt" {
+		confPath = "mqtt_source.yaml"
+	}
+	conf, err := common.LoadConf(confPath)
+	props := make(map[string]interface{})
+	if err == nil {
+		cfg := make(map[interface{}]interface{})
+		if err := yaml.Unmarshal(conf, &cfg); err != nil {
+			logger.Warnf("fail to parse yaml for source %s. Return an empty configuration", sourceType)
+		} else {
+			def, ok := cfg["default"]
+			if !ok {
+				logger.Warnf("default conf is not found", confkey)
+			} else {
+				if def1, ok1 := def.(map[interface{}]interface{}); ok1 {
+					props = common.ConvertMap(def1)
+				}
+				if c, ok := cfg[confkey]; ok {
+					if c1, ok := c.(map[interface{}]interface{}); ok {
+						c2 := common.ConvertMap(c1)
+						for k, v := range c2 {
+							props[k] = v
+						}
+					}
+				}
+			}
+		}
+	} else {
+		logger.Warnf("config file %s.yaml is not loaded properly. Return an empty configuration", sourceType)
+	}
+	f, ok := options["FORMAT"]
+	if !ok || f == "" {
+		f = "json"
+	}
+	props["format"] = strings.ToLower(f)
+	logger.Debugf("get conf for %s with conf key %s: %v", sourceType, confkey, props)
+	return props
+}

+ 1 - 46
xstream/nodes/source_node.go

@@ -6,8 +6,6 @@ import (
 	"github.com/emqx/kuiper/xsql"
 	"github.com/emqx/kuiper/xsql"
 	"github.com/emqx/kuiper/xstream/api"
 	"github.com/emqx/kuiper/xstream/api"
 	"github.com/emqx/kuiper/xstream/extensions"
 	"github.com/emqx/kuiper/xstream/extensions"
-	"github.com/go-yaml/yaml"
-	"strings"
 	"sync"
 	"sync"
 )
 )
 
 
@@ -58,7 +56,7 @@ func (m *SourceNode) Open(ctx api.StreamContext, errCh chan<- error) {
 	logger := ctx.GetLogger()
 	logger := ctx.GetLogger()
 	logger.Infof("open source node %s with option %v", m.name, m.options)
 	logger.Infof("open source node %s with option %v", m.name, m.options)
 	go func() {
 	go func() {
-		props := m.getConf(ctx)
+		props := getSourceConf(ctx, m.sourceType, m.options)
 		if c, ok := props["concurrency"]; ok {
 		if c, ok := props["concurrency"]; ok {
 			if t, err := common.ToInt(c); err != nil || t <= 0 {
 			if t, err := common.ToInt(c); err != nil || t <= 0 {
 				logger.Warnf("invalid type for concurrency property, should be positive integer but found %t", c)
 				logger.Warnf("invalid type for concurrency property, should be positive integer but found %t", c)
@@ -206,46 +204,3 @@ func (m *SourceNode) close(ctx api.StreamContext, logger api.Logger) {
 		}
 		}
 	}
 	}
 }
 }
-
-func (m *SourceNode) getConf(ctx api.StreamContext) map[string]interface{} {
-	confkey := m.options["CONF_KEY"]
-	logger := ctx.GetLogger()
-	confPath := "sources/" + m.sourceType + ".yaml"
-	if m.sourceType == "mqtt" {
-		confPath = "mqtt_source.yaml"
-	}
-	conf, err := common.LoadConf(confPath)
-	props := make(map[string]interface{})
-	if err == nil {
-		cfg := make(map[interface{}]interface{})
-		if err := yaml.Unmarshal(conf, &cfg); err != nil {
-			logger.Warnf("fail to parse yaml for source %s. Return an empty configuration", m.sourceType)
-		} else {
-			def, ok := cfg["default"]
-			if !ok {
-				logger.Warnf("default conf is not found", confkey)
-			} else {
-				if def1, ok1 := def.(map[interface{}]interface{}); ok1 {
-					props = common.ConvertMap(def1)
-				}
-				if c, ok := cfg[confkey]; ok {
-					if c1, ok := c.(map[interface{}]interface{}); ok {
-						c2 := common.ConvertMap(c1)
-						for k, v := range c2 {
-							props[k] = v
-						}
-					}
-				}
-			}
-		}
-	} else {
-		logger.Warnf("config file %s.yaml is not loaded properly. Return an empty configuration", m.sourceType)
-	}
-	f, ok := m.options["FORMAT"]
-	if !ok || f == "" {
-		f = "json"
-	}
-	props["format"] = strings.ToLower(f)
-	logger.Debugf("get conf for %s with conf key %s: %v", m.sourceType, confkey, props)
-	return props
-}

+ 2 - 2
xstream/nodes/source_node_test.go

@@ -32,7 +32,7 @@ func TestGetConf_Apply(t *testing.T) {
 	})
 	})
 	contextLogger := common.Log.WithField("rule", "test")
 	contextLogger := common.Log.WithField("rule", "test")
 	ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
 	ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
-	conf := n.getConf(ctx)
+	conf := getSourceConf(ctx, n.sourceType, n.options)
 	if !reflect.DeepEqual(result, conf) {
 	if !reflect.DeepEqual(result, conf) {
 		t.Errorf("result mismatch:\n\nexp=%s\n\ngot=%s\n\n", result, conf)
 		t.Errorf("result mismatch:\n\nexp=%s\n\ngot=%s\n\n", result, conf)
 	}
 	}
@@ -55,7 +55,7 @@ func TestGetConfAndConvert_Apply(t *testing.T) {
 	})
 	})
 	contextLogger := common.Log.WithField("rule", "test")
 	contextLogger := common.Log.WithField("rule", "test")
 	ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
 	ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
-	conf := n.getConf(ctx)
+	conf := getSourceConf(ctx, n.sourceType, n.options)
 	if !reflect.DeepEqual(result, conf) {
 	if !reflect.DeepEqual(result, conf) {
 		t.Errorf("result mismatch:\n\nexp=%s\n\ngot=%s\n\n", result, conf)
 		t.Errorf("result mismatch:\n\nexp=%s\n\ngot=%s\n\n", result, conf)
 		return
 		return

+ 99 - 0
xstream/nodes/table_node.go

@@ -0,0 +1,99 @@
+package nodes
+
+import (
+	"github.com/emqx/kuiper/common"
+	"github.com/emqx/kuiper/xsql"
+	"github.com/emqx/kuiper/xstream/api"
+	"github.com/emqx/kuiper/xstream/extensions"
+)
+
+// Node for table source
+type TableNode struct {
+	*defaultNode
+	sourceType string
+	options    map[string]string
+}
+
+func NewTableNode(name string, options map[string]string) *TableNode {
+	t, ok := options["TYPE"]
+	if !ok {
+		t = "file"
+	}
+	return &TableNode{
+		sourceType: t,
+		defaultNode: &defaultNode{
+			name:        name,
+			outputs:     make(map[string]chan<- interface{}),
+			concurrency: 1,
+		},
+		options: options,
+	}
+}
+
+func (m *TableNode) Open(ctx api.StreamContext, errCh chan<- error) {
+	m.ctx = ctx
+	logger := ctx.GetLogger()
+	logger.Infof("open table node %s with option %v", m.name, m.options)
+	go func() {
+		props := getSourceConf(ctx, m.sourceType, m.options)
+		//TODO apply properties like concurrency
+		source, err := doGetTableSource(m.sourceType)
+		if err != nil {
+			m.drainError(errCh, err, ctx)
+			return
+		}
+		err = source.Configure(m.options["DATASOURCE"], props)
+		if err != nil {
+			m.drainError(errCh, err, ctx)
+			return
+		}
+		stats, err := NewStatManager("source", ctx)
+		if err != nil {
+			m.drainError(errCh, err, ctx)
+			return
+		}
+		m.statManagers = append(m.statManagers, stats)
+		stats.ProcessTimeStart()
+		if data, err := source.Load(ctx); err != nil {
+			stats.IncTotalExceptions()
+			stats.ProcessTimeEnd()
+			m.drainError(errCh, err, ctx)
+			return
+		} else {
+			stats.IncTotalRecordsIn()
+			stats.ProcessTimeEnd()
+			logger.Debugf("table node %s is sending result", m.name)
+			result := make([]*xsql.Tuple, len(data))
+			for i, t := range data {
+				tuple := &xsql.Tuple{Emitter: m.name, Message: t.Message(), Metadata: t.Meta(), Timestamp: common.GetNowInMilli()}
+				result[i] = tuple
+			}
+			m.doBroadcast(result)
+			stats.IncTotalRecordsOut()
+			logger.Debugf("table node %s has consumed all data", m.name)
+		}
+	}()
+}
+
+func (m *TableNode) drainError(errCh chan<- error, err error, ctx api.StreamContext) {
+	select {
+	case errCh <- err:
+	case <-ctx.Done():
+
+	}
+	return
+}
+
+func doGetTableSource(t string) (api.TableSource, error) {
+	var s api.TableSource
+	switch t {
+	case "file":
+		s = &extensions.FileSource{}
+	default: //TODO table source plugin
+		//s, err = plugins.GetTableSource(t)
+		//if err != nil {
+		//	return nil, err
+		//}
+	}
+	return s, nil
+}

+ 81 - 0
xstream/nodes/table_node_test.go

@@ -0,0 +1,81 @@
+package nodes
+
+import (
+	"github.com/emqx/kuiper/common"
+	"github.com/emqx/kuiper/xsql"
+	"github.com/emqx/kuiper/xstream/contexts"
+	"github.com/emqx/kuiper/xstream/test"
+	"reflect"
+	"testing"
+)
+
+func TestTableNode(t *testing.T) {
+	test.ResetClock(1541152486000)
+	var tests = []struct {
+		name    string
+		options map[string]string
+		result  []*xsql.Tuple
+	}{
+		{ //0
+			name: "test0",
+			options: map[string]string{
+				"TYPE":       "file",
+				"DATASOURCE": "lookup.json",
+				"CONF_KEY":   "test",
+			},
+			result: []*xsql.Tuple{
+				{
+					Emitter: "test0",
+					Message: map[string]interface{}{
+						"id":   float64(1541152486013),
+						"name": "name1",
+						"size": float64(2),
+					},
+					Timestamp: common.GetNowInMilli(),
+				},
+				{
+					Emitter: "test0",
+					Message: map[string]interface{}{
+						"id":   float64(1541152487632),
+						"name": "name2",
+						"size": float64(6),
+					},
+					Timestamp: common.GetNowInMilli(),
+				},
+				{
+					Emitter: "test0",
+					Message: map[string]interface{}{
+						"id":   float64(1541152489252),
+						"name": "name3",
+						"size": float64(4),
+					},
+					Timestamp: common.GetNowInMilli(),
+				},
+			},
+		},
+	}
+
+	t.Logf("The test bucket size is %d.\n\n", len(tests))
+	for i, tt := range tests {
+		n := NewTableNode(tt.name, tt.options)
+		resultCh := make(chan interface{})
+		errCh := make(chan error)
+		contextLogger := common.Log.WithField("test", "test")
+		ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
+		n.AddOutput(resultCh, "test")
+		n.Open(ctx, errCh)
+		select {
+		case err := <-errCh:
+			t.Error(err)
+		case d := <-resultCh:
+			r, ok := d.([]*xsql.Tuple)
+			if !ok {
+				t.Errorf("%d. \nresult is not tuple list:got=%#v\n\n", i, d)
+				break
+			}
+			if !reflect.DeepEqual(tt.result, r) {
+				t.Errorf("%d. \nresult mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.result, r)
+			}
+		}
+	}
+}

+ 428 - 0
xstream/operators/field_processor.go

@@ -0,0 +1,428 @@
+package operators
+
+import (
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"github.com/emqx/kuiper/common"
+	"github.com/emqx/kuiper/xsql"
+	"math"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+)
+
+type defaultFieldProcessor struct {
+	streamFields    []interface{}
+	aliasFields     xsql.Fields
+	timestampFormat string
+	isBinary        bool
+}
+
+func (p *defaultFieldProcessor) processField(tuple *xsql.Tuple, fv *xsql.FunctionValuer) (map[string]interface{}, error) {
+	result := make(map[string]interface{})
+	if p.streamFields != nil {
+		for _, f := range p.streamFields {
+			switch sf := f.(type) {
+			case *xsql.StreamField:
+				if p.isBinary {
+					tuple.Message[sf.Name] = tuple.Message[common.DEFAULT_FIELD]
+				}
+				if e := p.addRecField(sf.FieldType, result, tuple.Message, sf.Name); e != nil {
+					return nil, e
+				}
+			case string: //schemaless
+				if p.isBinary {
+					result = tuple.Message
+				} else {
+					if m, ok := tuple.Message.Value(sf); ok {
+						result[sf] = m
+					}
+				}
+			}
+			if p.isBinary {
+				break //binary format should only have ONE field
+			}
+		}
+	} else {
+		result = tuple.Message
+	}
+	//If the field has alias name, then evaluate the alias field before transfer it to proceeding operators, and put it into result.
+	//Otherwise, the GROUP BY, ORDER BY statement cannot get the value.
+	for _, f := range p.aliasFields {
+		ve := &xsql.ValuerEval{Valuer: xsql.MultiValuer(tuple, fv)}
+		v := ve.Eval(f.Expr)
+		if e, ok := v.(error); ok {
+			return nil, e
+		} else {
+			result[f.AName] = v
+		}
+	}
+	return result, nil
+}
+
+func (p *defaultFieldProcessor) addRecField(ft xsql.FieldType, r map[string]interface{}, j xsql.Message, n string) error {
+	if t, ok := j.Value(n); ok {
+		v := reflect.ValueOf(t)
+		jtype := v.Kind()
+		switch st := ft.(type) {
+		case *xsql.BasicType:
+			switch st.Type {
+			case xsql.UNKNOWN:
+				return fmt.Errorf("invalid data type unknown defined for %s, please check the stream definition", t)
+			case xsql.BIGINT:
+				if jtype == reflect.Int {
+					r[n] = t.(int)
+				} else if jtype == reflect.Float64 {
+					if tt, ok1 := t.(float64); ok1 {
+						if tt > math.MaxInt64 {
+							r[n] = uint64(tt)
+						} else {
+							r[n] = int(tt)
+						}
+					}
+				} else if jtype == reflect.String {
+					if i, err := strconv.Atoi(t.(string)); err != nil {
+						return fmt.Errorf("invalid data type for %s, expect bigint but found %[2]T(%[2]v)", n, t)
+					} else {
+						r[n] = i
+					}
+				} else if jtype == reflect.Uint64 {
+					r[n] = t.(uint64)
+				} else {
+					return fmt.Errorf("invalid data type for %s, expect bigint but found %[2]T(%[2]v)", n, t)
+				}
+			case xsql.FLOAT:
+				if jtype == reflect.Float64 {
+					r[n] = t.(float64)
+				} else if jtype == reflect.String {
+					if f, err := strconv.ParseFloat(t.(string), 64); err != nil {
+						return fmt.Errorf("invalid data type for %s, expect float but found %[2]T(%[2]v)", n, t)
+					} else {
+						r[n] = f
+					}
+				} else {
+					return fmt.Errorf("invalid data type for %s, expect float but found %[2]T(%[2]v)", n, t)
+				}
+			case xsql.STRINGS:
+				if jtype == reflect.String {
+					r[n] = t.(string)
+				} else {
+					return fmt.Errorf("invalid data type for %s, expect string but found %[2]T(%[2]v)", n, t)
+				}
+			case xsql.DATETIME:
+				switch jtype {
+				case reflect.Int:
+					ai := t.(int64)
+					r[n] = common.TimeFromUnixMilli(ai)
+				case reflect.Float64:
+					ai := int64(t.(float64))
+					r[n] = common.TimeFromUnixMilli(ai)
+				case reflect.String:
+					if t, err := p.parseTime(t.(string)); err != nil {
+						return fmt.Errorf("invalid data type for %s, cannot convert to datetime: %s", n, err)
+					} else {
+						r[n] = t
+					}
+				default:
+					return fmt.Errorf("invalid data type for %s, expect datatime but find %[2]T(%[2]v)", n, t)
+				}
+			case xsql.BOOLEAN:
+				if jtype == reflect.Bool {
+					r[n] = t.(bool)
+				} else if jtype == reflect.String {
+					if i, err := strconv.ParseBool(t.(string)); err != nil {
+						return fmt.Errorf("invalid data type for %s, expect boolean but found %[2]T(%[2]v)", n, t)
+					} else {
+						r[n] = i
+					}
+				} else {
+					return fmt.Errorf("invalid data type for %s, expect boolean but found %[2]T(%[2]v)", n, t)
+				}
+			case xsql.BYTEA:
+				if jtype == reflect.String {
+					if b, err := base64.StdEncoding.DecodeString(t.(string)); err != nil {
+						return fmt.Errorf("invalid data type for %s, expect bytea but found %[2]T(%[2]v) which cannot base64 decode", n, t)
+					} else {
+						r[n] = b
+					}
+				} else if jtype == reflect.Slice {
+					if b, ok := t.([]byte); ok {
+						r[n] = b
+					} else {
+						return fmt.Errorf("invalid data type for %s, expect bytea but found %[2]T(%[2]v)", n, t)
+					}
+				}
+			default:
+				return fmt.Errorf("invalid data type for %s, it is not supported yet", st)
+			}
+		case *xsql.ArrayType:
+			var s []interface{}
+			if t == nil {
+				s = nil
+			} else if jtype == reflect.Slice {
+				s = t.([]interface{})
+			} else if jtype == reflect.String {
+				err := json.Unmarshal([]byte(t.(string)), &s)
+				if err != nil {
+					return fmt.Errorf("invalid data type for %s, expect array but found %[2]T(%[2]v)", n, t)
+				}
+			} else {
+				return fmt.Errorf("invalid data type for %s, expect array but found %[2]T(%[2]v)", n, t)
+			}
+
+			if tempArr, err := p.addArrayField(st, s); err != nil {
+				return fmt.Errorf("fail to parse field %s: %s", n, err)
+			} else {
+				r[n] = tempArr
+			}
+		case *xsql.RecType:
+			nextJ := make(map[string]interface{})
+			if t == nil {
+				nextJ = nil
+				r[n] = nextJ
+				return nil
+			} else if jtype == reflect.Map {
+				nextJ, ok = t.(map[string]interface{})
+				if !ok {
+					return fmt.Errorf("invalid data type for %s, expect map but found %[2]T(%[2]v)", n, t)
+				}
+			} else if jtype == reflect.String {
+				err := json.Unmarshal([]byte(t.(string)), &nextJ)
+				if err != nil {
+					return fmt.Errorf("invalid data type for %s, expect map but found %[2]T(%[2]v)", n, t)
+				}
+			} else {
+				return fmt.Errorf("invalid data type for %s, expect struct but found %[2]T(%[2]v)", n, t)
+			}
+			nextR := make(map[string]interface{})
+			for _, nextF := range st.StreamFields {
+				nextP := strings.ToLower(nextF.Name)
+				if e := p.addRecField(nextF.FieldType, nextR, nextJ, nextP); e != nil {
+					return e
+				}
+			}
+			r[n] = nextR
+		default:
+			return fmt.Errorf("unsupported type %T", st)
+		}
+		return nil
+	} else {
+		return fmt.Errorf("invalid data %s, field %s not found", j, n)
+	}
+}
+
+//ft must be xsql.ArrayType
+//side effect: r[p] will be set to the new array
+func (p *defaultFieldProcessor) addArrayField(ft *xsql.ArrayType, srcSlice []interface{}) (interface{}, error) {
+	if ft.FieldType != nil { //complex type array or struct
+		switch st := ft.FieldType.(type) { //Only two complex types supported here
+		case *xsql.ArrayType: //TODO handle array of array. Now the type is treated as interface{}
+			if srcSlice == nil {
+				return [][]interface{}(nil), nil
+			}
+			var s []interface{}
+			var tempSlice reflect.Value
+			for i, t := range srcSlice {
+				jtype := reflect.ValueOf(t).Kind()
+				if t == nil {
+					s = nil
+				} else if jtype == reflect.Slice || jtype == reflect.Array {
+					s = t.([]interface{})
+				} else if jtype == reflect.String {
+					err := json.Unmarshal([]byte(t.(string)), &s)
+					if err != nil {
+						return nil, fmt.Errorf("invalid data type for [%d], expect array but found %[2]T(%[2]v)", i, t)
+					}
+				} else {
+					return nil, fmt.Errorf("invalid data type for [%d], expect array but found %[2]T(%[2]v)", i, t)
+				}
+				if tempArr, err := p.addArrayField(st, s); err != nil {
+					return nil, err
+				} else {
+					if !tempSlice.IsValid() {
+						s := reflect.TypeOf(tempArr)
+						tempSlice = reflect.MakeSlice(reflect.SliceOf(s), 0, 0)
+					}
+					tempSlice = reflect.Append(tempSlice, reflect.ValueOf(tempArr))
+				}
+			}
+			return tempSlice.Interface(), nil
+		case *xsql.RecType:
+			if srcSlice == nil {
+				return []map[string]interface{}(nil), nil
+			}
+			tempSlice := make([]map[string]interface{}, 0)
+			for i, t := range srcSlice {
+				jtype := reflect.ValueOf(t).Kind()
+				j := make(map[string]interface{})
+				var ok bool
+				if t == nil {
+					j = nil
+					tempSlice = append(tempSlice, j)
+					continue
+				} else if jtype == reflect.Map {
+					j, ok = t.(map[string]interface{})
+					if !ok {
+						return nil, fmt.Errorf("invalid data type for [%d], expect map but found %[2]T(%[2]v)", i, t)
+					}
+
+				} else if jtype == reflect.String {
+					err := json.Unmarshal([]byte(t.(string)), &j)
+					if err != nil {
+						return nil, fmt.Errorf("invalid data type for [%d], expect map but found %[2]T(%[2]v)", i, t)
+					}
+				} else {
+					return nil, fmt.Errorf("invalid data type for [%d], expect map but found %[2]T(%[2]v)", i, t)
+				}
+				r := make(map[string]interface{})
+				for _, f := range st.StreamFields {
+					n := f.Name
+					if e := p.addRecField(f.FieldType, r, j, n); e != nil {
+						return nil, e
+					}
+				}
+				tempSlice = append(tempSlice, r)
+			}
+			return tempSlice, nil
+		default:
+			return nil, fmt.Errorf("unsupported type %T", st)
+		}
+	} else { //basic type
+		switch ft.Type {
+		case xsql.UNKNOWN:
+			return nil, fmt.Errorf("invalid data type unknown defined for %s, please checke the stream definition", srcSlice)
+		case xsql.BIGINT:
+			if srcSlice == nil {
+				return []int(nil), nil
+			}
+			tempSlice := make([]int, 0)
+			for i, t := range srcSlice {
+				jtype := reflect.ValueOf(t).Kind()
+				if jtype == reflect.Float64 {
+					tempSlice = append(tempSlice, int(t.(float64)))
+				} else if jtype == reflect.String {
+					if v, err := strconv.Atoi(t.(string)); err != nil {
+						return nil, fmt.Errorf("invalid data type for [%d], expect float but found %[2]T(%[2]v)", i, t)
+					} else {
+						tempSlice = append(tempSlice, v)
+					}
+				} else {
+					return nil, fmt.Errorf("invalid data type for [%d], expect float but found %[2]T(%[2]v)", i, t)
+				}
+			}
+			return tempSlice, nil
+		case xsql.FLOAT:
+			if srcSlice == nil {
+				return []float64(nil), nil
+			}
+			tempSlice := make([]float64, 0)
+			for i, t := range srcSlice {
+				jtype := reflect.ValueOf(t).Kind()
+				if jtype == reflect.Float64 {
+					tempSlice = append(tempSlice, t.(float64))
+				} else if jtype == reflect.String {
+					if f, err := strconv.ParseFloat(t.(string), 64); err != nil {
+						return nil, fmt.Errorf("invalid data type for [%d], expect float but found %[2]T(%[2]v)", i, t)
+					} else {
+						tempSlice = append(tempSlice, f)
+					}
+				} else {
+					return nil, fmt.Errorf("invalid data type for [%d], expect float but found %[2]T(%[2]v)", i, t)
+				}
+			}
+			return tempSlice, nil
+		case xsql.STRINGS:
+			if srcSlice == nil {
+				return []string(nil), nil
+			}
+			tempSlice := make([]string, 0)
+			for i, t := range srcSlice {
+				if reflect.ValueOf(t).Kind() == reflect.String {
+					tempSlice = append(tempSlice, t.(string))
+				} else {
+					return nil, fmt.Errorf("invalid data type for [%d], expect string but found %[2]T(%[2]v)", i, t)
+				}
+			}
+			return tempSlice, nil
+		case xsql.DATETIME:
+			if srcSlice == nil {
+				return []time.Time(nil), nil
+			}
+			tempSlice := make([]time.Time, 0)
+			for i, t := range srcSlice {
+				jtype := reflect.ValueOf(t).Kind()
+				switch jtype {
+				case reflect.Int:
+					ai := t.(int64)
+					tempSlice = append(tempSlice, common.TimeFromUnixMilli(ai))
+				case reflect.Float64:
+					ai := int64(t.(float64))
+					tempSlice = append(tempSlice, common.TimeFromUnixMilli(ai))
+				case reflect.String:
+					if ai, err := p.parseTime(t.(string)); err != nil {
+						return nil, fmt.Errorf("invalid data type for %s, cannot convert to datetime: %[2]T(%[2]v)", t, err)
+					} else {
+						tempSlice = append(tempSlice, ai)
+					}
+				default:
+					return nil, fmt.Errorf("invalid data type for [%d], expect datetime but found %[2]T(%[2]v)", i, t)
+				}
+			}
+			return tempSlice, nil
+		case xsql.BOOLEAN:
+			if srcSlice == nil {
+				return []bool(nil), nil
+			}
+			tempSlice := make([]bool, 0)
+			for i, t := range srcSlice {
+				jtype := reflect.ValueOf(t).Kind()
+				if jtype == reflect.Bool {
+					tempSlice = append(tempSlice, t.(bool))
+				} else if jtype == reflect.String {
+					if v, err := strconv.ParseBool(t.(string)); err != nil {
+						return nil, fmt.Errorf("invalid data type for [%d], expect boolean but found %[2]T(%[2]v)", i, t)
+					} else {
+						tempSlice = append(tempSlice, v)
+					}
+				} else {
+					return nil, fmt.Errorf("invalid data type for [%d], expect boolean but found %[2]T(%[2]v)", i, t)
+				}
+			}
+			return tempSlice, nil
+		case xsql.BYTEA:
+			if srcSlice == nil {
+				return [][]byte(nil), nil
+			}
+			tempSlice := make([][]byte, 0)
+			for i, t := range srcSlice {
+				jtype := reflect.ValueOf(t).Kind()
+				if jtype == reflect.String {
+					if b, err := base64.StdEncoding.DecodeString(t.(string)); err != nil {
+						return nil, fmt.Errorf("invalid data type for [%d], expect bytea but found %[2]T(%[2]v) which cannot base64 decode", i, t)
+					} else {
+						tempSlice = append(tempSlice, b)
+					}
+				} else if jtype == reflect.Slice {
+					if b, ok := t.([]byte); ok {
+						tempSlice = append(tempSlice, b)
+					} else {
+						return nil, fmt.Errorf("invalid data type for [%d], expect bytea but found %[2]T(%[2]v)", i, t)
+					}
+				}
+			}
+			return tempSlice, nil
+		default:
+			return nil, fmt.Errorf("invalid data type for %T", ft.Type)
+		}
+	}
+}
+
+func (p *defaultFieldProcessor) parseTime(s string) (time.Time, error) {
+	if p.timestampFormat != "" {
+		return common.ParseTime(s, p.timestampFormat)
+	} else {
+		return time.Parse(common.JSISO, s)
+	}
+}

+ 18 - 0
xstream/operators/filter_operator.go

@@ -33,6 +33,24 @@ func (p *FilterOp) Apply(ctx api.StreamContext, data interface{}, fv *xsql.Funct
 		default:
 		default:
 			return fmt.Errorf("run Where error: invalid condition that returns non-bool value %[1]T(%[1]v)", r)
 			return fmt.Errorf("run Where error: invalid condition that returns non-bool value %[1]T(%[1]v)", r)
 		}
 		}
+	case xsql.WindowTuples: // For batch table, will return the batch
+		var f []xsql.Tuple
+		for _, t := range input.Tuples {
+			ve := &xsql.ValuerEval{Valuer: xsql.MultiValuer(&t, fv)}
+			result := ve.Eval(p.Condition)
+			switch val := result.(type) {
+			case error:
+				return fmt.Errorf("run Where error: %s", val)
+			case bool:
+				if val {
+					f = append(f, t)
+				}
+			default:
+				return fmt.Errorf("run Where error: invalid condition that returns non-bool value %[1]T(%[1]v)", val)
+			}
+		}
+		input.Tuples = f
+		return input
 	case xsql.WindowTuplesSet:
 	case xsql.WindowTuplesSet:
 		if len(input) != 1 {
 		if len(input) != 1 {
 			return fmt.Errorf("run Where error: the input WindowTuplesSet with multiple tuples cannot be evaluated")
 			return fmt.Errorf("run Where error: the input WindowTuplesSet with multiple tuples cannot be evaluated")

+ 13 - 419
xstream/operators/preprocessor.go

@@ -1,33 +1,27 @@
 package operators
 package operators
 
 
 import (
 import (
-	"encoding/base64"
-	"encoding/json"
 	"fmt"
 	"fmt"
 	"github.com/emqx/kuiper/common"
 	"github.com/emqx/kuiper/common"
 	"github.com/emqx/kuiper/xsql"
 	"github.com/emqx/kuiper/xsql"
 	"github.com/emqx/kuiper/xstream/api"
 	"github.com/emqx/kuiper/xstream/api"
-	"math"
-	"reflect"
-	"strconv"
-	"strings"
-	"time"
 )
 )
 
 
 type Preprocessor struct {
 type Preprocessor struct {
 	//Pruned stream fields. Could be streamField(with data type info) or string
 	//Pruned stream fields. Could be streamField(with data type info) or string
-	streamFields    []interface{}
-	aliasFields     xsql.Fields
-	allMeta         bool
-	metaFields      []string //only needed if not allMeta
-	isEventTime     bool
-	timestampField  string
-	timestampFormat string
-	isBinary        bool
+	defaultFieldProcessor
+	allMeta        bool
+	metaFields     []string //only needed if not allMeta
+	isEventTime    bool
+	timestampField string
 }
 }
 
 
 func NewPreprocessor(fields []interface{}, fs xsql.Fields, allMeta bool, metaFields []string, iet bool, timestampField string, timestampFormat string, isBinary bool) (*Preprocessor, error) {
 func NewPreprocessor(fields []interface{}, fs xsql.Fields, allMeta bool, metaFields []string, iet bool, timestampField string, timestampFormat string, isBinary bool) (*Preprocessor, error) {
-	p := &Preprocessor{streamFields: fields, aliasFields: fs, allMeta: allMeta, metaFields: metaFields, isEventTime: iet, isBinary: isBinary, timestampFormat: timestampFormat, timestampField: timestampField}
+	p := &Preprocessor{
+		allMeta: allMeta, metaFields: metaFields, isEventTime: iet, timestampField: timestampField}
+	p.defaultFieldProcessor = defaultFieldProcessor{
+		streamFields: fields, aliasFields: fs, isBinary: isBinary, timestampFormat: timestampFormat,
+	}
 	return p, nil
 	return p, nil
 }
 }
 
 
@@ -44,44 +38,9 @@ func (p *Preprocessor) Apply(ctx api.StreamContext, data interface{}, fv *xsql.F
 
 
 	log.Debugf("preprocessor receive %s", tuple.Message)
 	log.Debugf("preprocessor receive %s", tuple.Message)
 
 
-	result := make(map[string]interface{})
-	if p.streamFields != nil {
-		for _, f := range p.streamFields {
-			switch sf := f.(type) {
-			case *xsql.StreamField:
-				if p.isBinary {
-					tuple.Message[sf.Name] = tuple.Message[common.DEFAULT_FIELD]
-				}
-				if e := p.addRecField(sf.FieldType, result, tuple.Message, sf.Name); e != nil {
-					return fmt.Errorf("error in preprocessor: %s", e)
-				}
-			case string: //schemaless
-				if p.isBinary {
-					result = tuple.Message
-				} else {
-					if m, ok := tuple.Message.Value(sf); ok {
-						result[sf] = m
-					}
-				}
-			}
-			if p.isBinary {
-				break //binary format should only have ONE field
-			}
-		}
-	} else {
-		result = tuple.Message
-	}
-
-	//If the field has alias name, then evaluate the alias field before transfer it to proceeding operators, and put it into result.
-	//Otherwise, the GROUP BY, ORDER BY statement cannot get the value.
-	for _, f := range p.aliasFields {
-		ve := &xsql.ValuerEval{Valuer: xsql.MultiValuer(tuple, fv)}
-		v := ve.Eval(f.Expr)
-		if _, ok := v.(error); ok {
-			return v
-		} else {
-			result[f.AName] = v
-		}
+	result, err := p.processField(tuple, fv)
+	if err != nil {
+		return fmt.Errorf("error in preprocessor: %s", err)
 	}
 	}
 
 
 	tuple.Message = result
 	tuple.Message = result
@@ -106,368 +65,3 @@ func (p *Preprocessor) Apply(ctx api.StreamContext, data interface{}, fv *xsql.F
 	}
 	}
 	return tuple
 	return tuple
 }
 }
-
-func (p *Preprocessor) parseTime(s string) (time.Time, error) {
-	if p.timestampFormat != "" {
-		return common.ParseTime(s, p.timestampFormat)
-	} else {
-		return time.Parse(common.JSISO, s)
-	}
-}
-
-func (p *Preprocessor) addRecField(ft xsql.FieldType, r map[string]interface{}, j xsql.Message, n string) error {
-	if t, ok := j.Value(n); ok {
-		v := reflect.ValueOf(t)
-		jtype := v.Kind()
-		switch st := ft.(type) {
-		case *xsql.BasicType:
-			switch st.Type {
-			case xsql.UNKNOWN:
-				return fmt.Errorf("invalid data type unknown defined for %s, please check the stream definition", t)
-			case xsql.BIGINT:
-				if jtype == reflect.Int {
-					r[n] = t.(int)
-				} else if jtype == reflect.Float64 {
-					if tt, ok1 := t.(float64); ok1 {
-						if tt > math.MaxInt64 {
-							r[n] = uint64(tt)
-						} else {
-							r[n] = int(tt)
-						}
-					}
-				} else if jtype == reflect.String {
-					if i, err := strconv.Atoi(t.(string)); err != nil {
-						return fmt.Errorf("invalid data type for %s, expect bigint but found %[2]T(%[2]v)", n, t)
-					} else {
-						r[n] = i
-					}
-				} else if jtype == reflect.Uint64 {
-					r[n] = t.(uint64)
-				} else {
-					return fmt.Errorf("invalid data type for %s, expect bigint but found %[2]T(%[2]v)", n, t)
-				}
-			case xsql.FLOAT:
-				if jtype == reflect.Float64 {
-					r[n] = t.(float64)
-				} else if jtype == reflect.String {
-					if f, err := strconv.ParseFloat(t.(string), 64); err != nil {
-						return fmt.Errorf("invalid data type for %s, expect float but found %[2]T(%[2]v)", n, t)
-					} else {
-						r[n] = f
-					}
-				} else {
-					return fmt.Errorf("invalid data type for %s, expect float but found %[2]T(%[2]v)", n, t)
-				}
-			case xsql.STRINGS:
-				if jtype == reflect.String {
-					r[n] = t.(string)
-				} else {
-					return fmt.Errorf("invalid data type for %s, expect string but found %[2]T(%[2]v)", n, t)
-				}
-			case xsql.DATETIME:
-				switch jtype {
-				case reflect.Int:
-					ai := t.(int64)
-					r[n] = common.TimeFromUnixMilli(ai)
-				case reflect.Float64:
-					ai := int64(t.(float64))
-					r[n] = common.TimeFromUnixMilli(ai)
-				case reflect.String:
-					if t, err := p.parseTime(t.(string)); err != nil {
-						return fmt.Errorf("invalid data type for %s, cannot convert to datetime: %s", n, err)
-					} else {
-						r[n] = t
-					}
-				default:
-					return fmt.Errorf("invalid data type for %s, expect datatime but find %[2]T(%[2]v)", n, t)
-				}
-			case xsql.BOOLEAN:
-				if jtype == reflect.Bool {
-					r[n] = t.(bool)
-				} else if jtype == reflect.String {
-					if i, err := strconv.ParseBool(t.(string)); err != nil {
-						return fmt.Errorf("invalid data type for %s, expect boolean but found %[2]T(%[2]v)", n, t)
-					} else {
-						r[n] = i
-					}
-				} else {
-					return fmt.Errorf("invalid data type for %s, expect boolean but found %[2]T(%[2]v)", n, t)
-				}
-			case xsql.BYTEA:
-				if jtype == reflect.String {
-					if b, err := base64.StdEncoding.DecodeString(t.(string)); err != nil {
-						return fmt.Errorf("invalid data type for %s, expect bytea but found %[2]T(%[2]v) which cannot base64 decode", n, t)
-					} else {
-						r[n] = b
-					}
-				} else if jtype == reflect.Slice {
-					if b, ok := t.([]byte); ok {
-						r[n] = b
-					} else {
-						return fmt.Errorf("invalid data type for %s, expect bytea but found %[2]T(%[2]v)", n, t)
-					}
-				}
-			default:
-				return fmt.Errorf("invalid data type for %s, it is not supported yet", st)
-			}
-		case *xsql.ArrayType:
-			var s []interface{}
-			if t == nil {
-				s = nil
-			} else if jtype == reflect.Slice {
-				s = t.([]interface{})
-			} else if jtype == reflect.String {
-				err := json.Unmarshal([]byte(t.(string)), &s)
-				if err != nil {
-					return fmt.Errorf("invalid data type for %s, expect array but found %[2]T(%[2]v)", n, t)
-				}
-			} else {
-				return fmt.Errorf("invalid data type for %s, expect array but found %[2]T(%[2]v)", n, t)
-			}
-
-			if tempArr, err := p.addArrayField(st, s); err != nil {
-				return fmt.Errorf("fail to parse field %s: %s", n, err)
-			} else {
-				r[n] = tempArr
-			}
-		case *xsql.RecType:
-			nextJ := make(map[string]interface{})
-			if t == nil {
-				nextJ = nil
-				r[n] = nextJ
-				return nil
-			} else if jtype == reflect.Map {
-				nextJ, ok = t.(map[string]interface{})
-				if !ok {
-					return fmt.Errorf("invalid data type for %s, expect map but found %[2]T(%[2]v)", n, t)
-				}
-			} else if jtype == reflect.String {
-				err := json.Unmarshal([]byte(t.(string)), &nextJ)
-				if err != nil {
-					return fmt.Errorf("invalid data type for %s, expect map but found %[2]T(%[2]v)", n, t)
-				}
-			} else {
-				return fmt.Errorf("invalid data type for %s, expect struct but found %[2]T(%[2]v)", n, t)
-			}
-			nextR := make(map[string]interface{})
-			for _, nextF := range st.StreamFields {
-				nextP := strings.ToLower(nextF.Name)
-				if e := p.addRecField(nextF.FieldType, nextR, nextJ, nextP); e != nil {
-					return e
-				}
-			}
-			r[n] = nextR
-		default:
-			return fmt.Errorf("unsupported type %T", st)
-		}
-		return nil
-	} else {
-		return fmt.Errorf("invalid data %s, field %s not found", j, n)
-	}
-}
-
-//ft must be xsql.ArrayType
-//side effect: r[p] will be set to the new array
-func (p *Preprocessor) addArrayField(ft *xsql.ArrayType, srcSlice []interface{}) (interface{}, error) {
-	if ft.FieldType != nil { //complex type array or struct
-		switch st := ft.FieldType.(type) { //Only two complex types supported here
-		case *xsql.ArrayType: //TODO handle array of array. Now the type is treated as interface{}
-			if srcSlice == nil {
-				return [][]interface{}(nil), nil
-			}
-			var s []interface{}
-			var tempSlice reflect.Value
-			for i, t := range srcSlice {
-				jtype := reflect.ValueOf(t).Kind()
-				if t == nil {
-					s = nil
-				} else if jtype == reflect.Slice || jtype == reflect.Array {
-					s = t.([]interface{})
-				} else if jtype == reflect.String {
-					err := json.Unmarshal([]byte(t.(string)), &s)
-					if err != nil {
-						return nil, fmt.Errorf("invalid data type for [%d], expect array but found %[2]T(%[2]v)", i, t)
-					}
-				} else {
-					return nil, fmt.Errorf("invalid data type for [%d], expect array but found %[2]T(%[2]v)", i, t)
-				}
-				if tempArr, err := p.addArrayField(st, s); err != nil {
-					return nil, err
-				} else {
-					if !tempSlice.IsValid() {
-						s := reflect.TypeOf(tempArr)
-						tempSlice = reflect.MakeSlice(reflect.SliceOf(s), 0, 0)
-					}
-					tempSlice = reflect.Append(tempSlice, reflect.ValueOf(tempArr))
-				}
-			}
-			return tempSlice.Interface(), nil
-		case *xsql.RecType:
-			if srcSlice == nil {
-				return []map[string]interface{}(nil), nil
-			}
-			tempSlice := make([]map[string]interface{}, 0)
-			for i, t := range srcSlice {
-				jtype := reflect.ValueOf(t).Kind()
-				j := make(map[string]interface{})
-				var ok bool
-				if t == nil {
-					j = nil
-					tempSlice = append(tempSlice, j)
-					continue
-				} else if jtype == reflect.Map {
-					j, ok = t.(map[string]interface{})
-					if !ok {
-						return nil, fmt.Errorf("invalid data type for [%d], expect map but found %[2]T(%[2]v)", i, t)
-					}
-
-				} else if jtype == reflect.String {
-					err := json.Unmarshal([]byte(t.(string)), &j)
-					if err != nil {
-						return nil, fmt.Errorf("invalid data type for [%d], expect map but found %[2]T(%[2]v)", i, t)
-					}
-				} else {
-					return nil, fmt.Errorf("invalid data type for [%d], expect map but found %[2]T(%[2]v)", i, t)
-				}
-				r := make(map[string]interface{})
-				for _, f := range st.StreamFields {
-					n := f.Name
-					if e := p.addRecField(f.FieldType, r, j, n); e != nil {
-						return nil, e
-					}
-				}
-				tempSlice = append(tempSlice, r)
-			}
-			return tempSlice, nil
-		default:
-			return nil, fmt.Errorf("unsupported type %T", st)
-		}
-	} else { //basic type
-		switch ft.Type {
-		case xsql.UNKNOWN:
-			return nil, fmt.Errorf("invalid data type unknown defined for %s, please checke the stream definition", srcSlice)
-		case xsql.BIGINT:
-			if srcSlice == nil {
-				return []int(nil), nil
-			}
-			tempSlice := make([]int, 0)
-			for i, t := range srcSlice {
-				jtype := reflect.ValueOf(t).Kind()
-				if jtype == reflect.Float64 {
-					tempSlice = append(tempSlice, int(t.(float64)))
-				} else if jtype == reflect.String {
-					if v, err := strconv.Atoi(t.(string)); err != nil {
-						return nil, fmt.Errorf("invalid data type for [%d], expect float but found %[2]T(%[2]v)", i, t)
-					} else {
-						tempSlice = append(tempSlice, v)
-					}
-				} else {
-					return nil, fmt.Errorf("invalid data type for [%d], expect float but found %[2]T(%[2]v)", i, t)
-				}
-			}
-			return tempSlice, nil
-		case xsql.FLOAT:
-			if srcSlice == nil {
-				return []float64(nil), nil
-			}
-			tempSlice := make([]float64, 0)
-			for i, t := range srcSlice {
-				jtype := reflect.ValueOf(t).Kind()
-				if jtype == reflect.Float64 {
-					tempSlice = append(tempSlice, t.(float64))
-				} else if jtype == reflect.String {
-					if f, err := strconv.ParseFloat(t.(string), 64); err != nil {
-						return nil, fmt.Errorf("invalid data type for [%d], expect float but found %[2]T(%[2]v)", i, t)
-					} else {
-						tempSlice = append(tempSlice, f)
-					}
-				} else {
-					return nil, fmt.Errorf("invalid data type for [%d], expect float but found %[2]T(%[2]v)", i, t)
-				}
-			}
-			return tempSlice, nil
-		case xsql.STRINGS:
-			if srcSlice == nil {
-				return []string(nil), nil
-			}
-			tempSlice := make([]string, 0)
-			for i, t := range srcSlice {
-				if reflect.ValueOf(t).Kind() == reflect.String {
-					tempSlice = append(tempSlice, t.(string))
-				} else {
-					return nil, fmt.Errorf("invalid data type for [%d], expect string but found %[2]T(%[2]v)", i, t)
-				}
-			}
-			return tempSlice, nil
-		case xsql.DATETIME:
-			if srcSlice == nil {
-				return []time.Time(nil), nil
-			}
-			tempSlice := make([]time.Time, 0)
-			for i, t := range srcSlice {
-				jtype := reflect.ValueOf(t).Kind()
-				switch jtype {
-				case reflect.Int:
-					ai := t.(int64)
-					tempSlice = append(tempSlice, common.TimeFromUnixMilli(ai))
-				case reflect.Float64:
-					ai := int64(t.(float64))
-					tempSlice = append(tempSlice, common.TimeFromUnixMilli(ai))
-				case reflect.String:
-					if ai, err := p.parseTime(t.(string)); err != nil {
-						return nil, fmt.Errorf("invalid data type for %s, cannot convert to datetime: %[2]T(%[2]v)", t, err)
-					} else {
-						tempSlice = append(tempSlice, ai)
-					}
-				default:
-					return nil, fmt.Errorf("invalid data type for [%d], expect datetime but found %[2]T(%[2]v)", i, t)
-				}
-			}
-			return tempSlice, nil
-		case xsql.BOOLEAN:
-			if srcSlice == nil {
-				return []bool(nil), nil
-			}
-			tempSlice := make([]bool, 0)
-			for i, t := range srcSlice {
-				jtype := reflect.ValueOf(t).Kind()
-				if jtype == reflect.Bool {
-					tempSlice = append(tempSlice, t.(bool))
-				} else if jtype == reflect.String {
-					if v, err := strconv.ParseBool(t.(string)); err != nil {
-						return nil, fmt.Errorf("invalid data type for [%d], expect boolean but found %[2]T(%[2]v)", i, t)
-					} else {
-						tempSlice = append(tempSlice, v)
-					}
-				} else {
-					return nil, fmt.Errorf("invalid data type for [%d], expect boolean but found %[2]T(%[2]v)", i, t)
-				}
-			}
-			return tempSlice, nil
-		case xsql.BYTEA:
-			if srcSlice == nil {
-				return [][]byte(nil), nil
-			}
-			tempSlice := make([][]byte, 0)
-			for i, t := range srcSlice {
-				jtype := reflect.ValueOf(t).Kind()
-				if jtype == reflect.String {
-					if b, err := base64.StdEncoding.DecodeString(t.(string)); err != nil {
-						return nil, fmt.Errorf("invalid data type for [%d], expect bytea but found %[2]T(%[2]v) which cannot base64 decode", i, t)
-					} else {
-						tempSlice = append(tempSlice, b)
-					}
-				} else if jtype == reflect.Slice {
-					if b, ok := t.([]byte); ok {
-						tempSlice = append(tempSlice, b)
-					} else {
-						return nil, fmt.Errorf("invalid data type for [%d], expect bytea but found %[2]T(%[2]v)", i, t)
-					}
-				}
-			}
-			return tempSlice, nil
-		default:
-			return nil, fmt.Errorf("invalid data type for %T", ft.Type)
-		}
-	}
-}

+ 19 - 12
xstream/operators/preprocessor_test.go

@@ -525,7 +525,8 @@ func TestPreprocessor_Apply(t *testing.T) {
 	contextLogger := common.Log.WithField("rule", "TestPreprocessor_Apply")
 	contextLogger := common.Log.WithField("rule", "TestPreprocessor_Apply")
 	ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
 	ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
 	for i, tt := range tests {
 	for i, tt := range tests {
-		pp := &Preprocessor{streamFields: convertFields(tt.stmt.StreamFields)}
+		pp := &Preprocessor{}
+		pp.streamFields = convertFields(tt.stmt.StreamFields)
 
 
 		dm := make(map[string]interface{})
 		dm := make(map[string]interface{})
 		if e := json.Unmarshal(tt.data, &dm); e != nil {
 		if e := json.Unmarshal(tt.data, &dm); e != nil {
@@ -658,7 +659,9 @@ func TestPreprocessorTime_Apply(t *testing.T) {
 	contextLogger := common.Log.WithField("rule", "TestPreprocessorTime_Apply")
 	contextLogger := common.Log.WithField("rule", "TestPreprocessorTime_Apply")
 	ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
 	ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
 	for i, tt := range tests {
 	for i, tt := range tests {
-		pp := &Preprocessor{streamFields: convertFields(tt.stmt.StreamFields), timestampFormat: tt.stmt.Options["TIMESTAMP_FORMAT"]}
+		pp := &Preprocessor{}
+		pp.streamFields = convertFields(tt.stmt.StreamFields)
+		pp.timestampFormat = tt.stmt.Options["TIMESTAMP_FORMAT"]
 		dm := make(map[string]interface{})
 		dm := make(map[string]interface{})
 		if e := json.Unmarshal(tt.data, &dm); e != nil {
 		if e := json.Unmarshal(tt.data, &dm); e != nil {
 			log.Fatal(e)
 			log.Fatal(e)
@@ -840,12 +843,14 @@ func TestPreprocessorEventtime_Apply(t *testing.T) {
 	for i, tt := range tests {
 	for i, tt := range tests {
 
 
 		pp := &Preprocessor{
 		pp := &Preprocessor{
-			streamFields:    convertFields(tt.stmt.StreamFields),
-			aliasFields:     nil,
-			isEventTime:     true,
-			timestampField:  tt.stmt.Options["TIMESTAMP"],
-			timestampFormat: tt.stmt.Options["TIMESTAMP_FORMAT"],
-			isBinary:        false,
+			defaultFieldProcessor: defaultFieldProcessor{
+				streamFields:    convertFields(tt.stmt.StreamFields),
+				aliasFields:     nil,
+				isBinary:        false,
+				timestampFormat: tt.stmt.Options["TIMESTAMP_FORMAT"],
+			},
+			isEventTime:    true,
+			timestampField: tt.stmt.Options["TIMESTAMP"],
 		}
 		}
 
 
 		dm := make(map[string]interface{})
 		dm := make(map[string]interface{})
@@ -925,8 +930,8 @@ func TestPreprocessorError(t *testing.T) {
 	ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
 	ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
 	for i, tt := range tests {
 	for i, tt := range tests {
 
 
-		pp := &Preprocessor{streamFields: convertFields(tt.stmt.StreamFields)}
-
+		pp := &Preprocessor{}
+		pp.streamFields = convertFields(tt.stmt.StreamFields)
 		dm := make(map[string]interface{})
 		dm := make(map[string]interface{})
 		if e := json.Unmarshal(tt.data, &dm); e != nil {
 		if e := json.Unmarshal(tt.data, &dm); e != nil {
 			log.Fatal(e)
 			log.Fatal(e)
@@ -1052,7 +1057,9 @@ func TestPreprocessorForBinary(t *testing.T) {
 	contextLogger := common.Log.WithField("rule", "TestPreprocessorForBinary")
 	contextLogger := common.Log.WithField("rule", "TestPreprocessorForBinary")
 	ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
 	ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
 	for i, tt := range tests {
 	for i, tt := range tests {
-		pp := &Preprocessor{streamFields: convertFields(tt.stmt.StreamFields), isBinary: tt.isBinary}
+		pp := &Preprocessor{}
+		pp.streamFields = convertFields(tt.stmt.StreamFields)
+		pp.isBinary = tt.isBinary
 		format := "json"
 		format := "json"
 		if tt.isBinary {
 		if tt.isBinary {
 			format = "binary"
 			format = "binary"
@@ -1065,7 +1072,7 @@ func TestPreprocessorForBinary(t *testing.T) {
 			fv, afv := xsql.NewFunctionValuersForOp(nil)
 			fv, afv := xsql.NewFunctionValuersForOp(nil)
 			result := pp.Apply(ctx, tuple, fv, afv)
 			result := pp.Apply(ctx, tuple, fv, afv)
 			if !reflect.DeepEqual(tt.result, result) {
 			if !reflect.DeepEqual(tt.result, result) {
-				t.Errorf("%d. %q\n\nresult mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tuple, tt.result, result)
+				t.Errorf("%d. %q\n\nresult mismatch", i, tuple)
 			}
 			}
 		}
 		}
 
 

+ 46 - 0
xstream/operators/table_processor.go

@@ -0,0 +1,46 @@
+package operators
+
+import (
+	"fmt"
+	"github.com/emqx/kuiper/xsql"
+	"github.com/emqx/kuiper/xstream/api"
+)
+
+type TableProcessor struct {
+	//Pruned stream fields. Could be streamField(with data type info) or string
+	defaultFieldProcessor
+}
+
+func NewTableProcessor(fields []interface{}, fs xsql.Fields, timestampFormat string) (*TableProcessor, error) {
+	p := &TableProcessor{}
+	p.defaultFieldProcessor = defaultFieldProcessor{
+		streamFields: fields, aliasFields: fs, isBinary: false, timestampFormat: timestampFormat,
+	}
+	return p, nil
+}
+
+/*
+ *	input: []*xsql.Tuple
+ *	output: WindowTuples
+ */
+func (p *TableProcessor) Apply(ctx api.StreamContext, data interface{}, fv *xsql.FunctionValuer, _ *xsql.AggregateFunctionValuer) interface{} {
+	logger := ctx.GetLogger()
+	tuples, ok := data.([]*xsql.Tuple)
+	if !ok {
+		return fmt.Errorf("expect []*xsql.Tuple data type")
+	}
+	logger.Debugf("Start to process table fields")
+	w := xsql.WindowTuples{
+		Emitter: tuples[0].Emitter,
+		Tuples:  make([]xsql.Tuple, len(tuples)),
+	}
+	for i, t := range tuples {
+		result, err := p.processField(t, fv)
+		if err != nil {
+			return fmt.Errorf("error in table processor: %s", err)
+		}
+		t.Message = result
+		w.Tuples[i] = *t
+	}
+	return w
+}

+ 131 - 0
xstream/operators/table_processor_test.go

@@ -0,0 +1,131 @@
+package operators
+
+import (
+	"encoding/json"
+	"fmt"
+	"github.com/emqx/kuiper/common"
+	"github.com/emqx/kuiper/xsql"
+	"github.com/emqx/kuiper/xstream/contexts"
+	"reflect"
+	"testing"
+)
+
+func TestTableProcessor_Apply(t *testing.T) {
+
+	var tests = []struct {
+		stmt   *xsql.StreamStmt
+		data   []byte
+		result interface{}
+	}{
+		{
+			stmt: &xsql.StreamStmt{
+				Name: xsql.StreamName("demo"),
+				StreamFields: []xsql.StreamField{
+					{Name: "a", FieldType: &xsql.ArrayType{
+						Type: xsql.STRUCT,
+						FieldType: &xsql.RecType{
+							StreamFields: []xsql.StreamField{
+								{Name: "b", FieldType: &xsql.BasicType{Type: xsql.STRINGS}},
+							},
+						},
+					}},
+				},
+			},
+			data: []byte(`[{"a": [{"b" : "hello1"}, {"b" : "hello2"}]},{"a": [{"b" : "hello2"}, {"b" : "hello3"}]},{"a": [{"b" : "hello3"}, {"b" : "hello4"}]}]`),
+			result: xsql.WindowTuples{
+				Emitter: "demo",
+				Tuples: []xsql.Tuple{
+					{
+						Message: xsql.Message{
+							"a": []map[string]interface{}{
+								{"b": "hello1"},
+								{"b": "hello2"},
+							},
+						},
+						Emitter: "demo",
+					},
+					{
+						Message: xsql.Message{
+							"a": []map[string]interface{}{
+								{"b": "hello2"},
+								{"b": "hello3"},
+							},
+						},
+						Emitter: "demo",
+					},
+					{
+						Message: xsql.Message{
+							"a": []map[string]interface{}{
+								{"b": "hello3"},
+								{"b": "hello4"},
+							},
+						},
+						Emitter: "demo",
+					},
+				},
+			},
+		}, {
+			stmt: &xsql.StreamStmt{
+				Name:         xsql.StreamName("demo"),
+				StreamFields: nil,
+			},
+			data: []byte(`[{"a": {"b" : "hello", "c": {"d": 35.2}}},{"a": {"b" : "world", "c": {"d": 65.2}}}]`),
+			result: xsql.WindowTuples{
+				Emitter: "demo",
+				Tuples: []xsql.Tuple{
+					{
+						Message: xsql.Message{
+							"a": map[string]interface{}{
+								"b": "hello",
+								"c": map[string]interface{}{
+									"d": 35.2,
+								},
+							},
+						},
+						Emitter: "demo",
+					},
+					{
+						Message: xsql.Message{
+							"a": map[string]interface{}{
+								"b": "world",
+								"c": map[string]interface{}{
+									"d": 65.2,
+								},
+							},
+						},
+						Emitter: "demo",
+					},
+				},
+			},
+		},
+	}
+	fmt.Printf("The test bucket size is %d.\n\n", len(tests))
+
+	defer common.CloseLogger()
+	contextLogger := common.Log.WithField("rule", "TestPreprocessor_Apply")
+	ctx := contexts.WithValue(contexts.Background(), contexts.LoggerKey, contextLogger)
+	for i, tt := range tests {
+		pp := &TableProcessor{}
+		pp.streamFields = convertFields(tt.stmt.StreamFields)
+
+		var dm []map[string]interface{}
+		if e := json.Unmarshal(tt.data, &dm); e != nil {
+			t.Log(e)
+			t.Fail()
+		} else {
+			tuples := make([]*xsql.Tuple, len(dm))
+			for i, m := range dm {
+				tuples[i] = &xsql.Tuple{
+					Emitter: "demo",
+					Message: m,
+				}
+			}
+			fv, afv := xsql.NewFunctionValuersForOp(nil)
+			result := pp.Apply(ctx, tuples, fv, afv)
+			if !reflect.DeepEqual(tt.result, result) {
+				t.Errorf("%d. result mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.result, result)
+			}
+		}
+
+	}
+}

+ 36 - 0
xstream/planner/joinAlignPlan.go

@@ -0,0 +1,36 @@
+package planner
+
+import "github.com/emqx/kuiper/xsql"
+
+type JoinAlignPlan struct {
+	baseLogicalPlan
+	Emitters []string
+}
+
+func (p JoinAlignPlan) Init() *JoinAlignPlan {
+	p.baseLogicalPlan.self = &p
+	return &p
+}
+
+// Push down to table first, then push to window
+func (p *JoinAlignPlan) PushDownPredicate(condition xsql.Expr) (xsql.Expr, LogicalPlan) {
+	if len(p.children) == 0 {
+		return condition, p.self
+	}
+	rest := condition
+	for i, child := range p.children {
+		if _, ok := child.(*DataSourcePlan); ok {
+			var newChild LogicalPlan
+			rest, newChild = child.PushDownPredicate(rest)
+			p.children[i] = newChild
+		}
+	}
+	for i, child := range p.children {
+		if _, ok := child.(*DataSourcePlan); !ok {
+			var newChild LogicalPlan
+			rest, newChild = child.PushDownPredicate(rest)
+			p.children[i] = newChild
+		}
+	}
+	return rest, p.self
+}

+ 64 - 44
xstream/planner/planner.go

@@ -1,6 +1,7 @@
 package planner
 package planner
 
 
 import (
 import (
+	"errors"
 	"fmt"
 	"fmt"
 	"github.com/emqx/kuiper/common"
 	"github.com/emqx/kuiper/common"
 	"github.com/emqx/kuiper/common/kv"
 	"github.com/emqx/kuiper/common/kv"
@@ -10,7 +11,6 @@ import (
 	"github.com/emqx/kuiper/xstream/nodes"
 	"github.com/emqx/kuiper/xstream/nodes"
 	"github.com/emqx/kuiper/xstream/operators"
 	"github.com/emqx/kuiper/xstream/operators"
 	"path"
 	"path"
-	"strings"
 )
 )
 
 
 func Plan(rule *api.Rule, storePath string) (*xstream.TopologyNew, error) {
 func Plan(rule *api.Rule, storePath string) (*xstream.TopologyNew, error) {
@@ -28,9 +28,9 @@ func PlanWithSourcesAndSinks(rule *api.Rule, storePath string, sources []*nodes.
 	}
 	}
 	// validation
 	// validation
 	streamsFromStmt := xsql.GetStreams(stmt)
 	streamsFromStmt := xsql.GetStreams(stmt)
-	if len(sources) > 0 && len(sources) != len(streamsFromStmt) {
-		return nil, fmt.Errorf("Invalid parameter sources or streams, the length cannot match the statement, expect %d sources.", len(streamsFromStmt))
-	}
+	//if len(sources) > 0 && len(sources) != len(streamsFromStmt) {
+	//	return nil, fmt.Errorf("Invalid parameter sources or streams, the length cannot match the statement, expect %d sources.", len(streamsFromStmt))
+	//}
 	if rule.Options.SendMetaToSink && (len(streamsFromStmt) > 1 || stmt.Dimensions != nil) {
 	if rule.Options.SendMetaToSink && (len(streamsFromStmt) > 1 || stmt.Dimensions != nil) {
 		return nil, fmt.Errorf("Invalid option sendMetaToSink, it can not be applied to window")
 		return nil, fmt.Errorf("Invalid option sendMetaToSink, it can not be applied to window")
 	}
 	}
@@ -102,29 +102,41 @@ func buildOps(lp LogicalPlan, tp *xstream.TopologyNew, options *api.RuleOption,
 	)
 	)
 	switch t := lp.(type) {
 	switch t := lp.(type) {
 	case *DataSourcePlan:
 	case *DataSourcePlan:
-		pp, err := operators.NewPreprocessor(t.streamFields, t.alias, t.allMeta, t.metaFields, t.iet, t.timestampField, t.timestampFormat, t.isBinary)
-		if err != nil {
-			return nil, 0, err
-		}
-		var srcNode *nodes.SourceNode
-		if len(sources) == 0 {
-			node := nodes.NewSourceNode(t.name, t.streamStmt.Options)
-			srcNode = node
-		} else {
-			found := false
-			for _, source := range sources {
-				if t.name == source.GetName() {
-					srcNode = source
-					found = true
+		switch t.streamStmt.StreamType {
+		case xsql.TypeStream:
+			pp, err := operators.NewPreprocessor(t.streamFields, t.alias, t.allMeta, t.metaFields, t.iet, t.timestampField, t.timestampFormat, t.isBinary)
+			if err != nil {
+				return nil, 0, err
+			}
+			var srcNode *nodes.SourceNode
+			if len(sources) == 0 {
+				node := nodes.NewSourceNode(t.name, t.streamStmt.Options)
+				srcNode = node
+			} else {
+				found := false
+				for _, source := range sources {
+					if t.name == source.GetName() {
+						srcNode = source
+						found = true
+					}
+				}
+				if !found {
+					return nil, 0, fmt.Errorf("can't find predefined source %s", t.name)
 				}
 				}
 			}
 			}
-			if !found {
-				return nil, 0, fmt.Errorf("can't find predefined source %s", t.name)
+			tp.AddSrc(srcNode)
+			op = xstream.Transform(pp, fmt.Sprintf("%d_preprocessor_%s", newIndex, t.name), options)
+			inputs = []api.Emitter{srcNode}
+		case xsql.TypeTable:
+			pp, err := operators.NewTableProcessor(t.streamFields, t.alias, t.timestampFormat)
+			if err != nil {
+				return nil, 0, err
 			}
 			}
+			srcNode := nodes.NewTableNode(t.name, t.streamStmt.Options)
+			tp.AddSrc(srcNode)
+			op = xstream.Transform(pp, fmt.Sprintf("%d_tableprocessor_%s", newIndex, t.name), options)
+			inputs = []api.Emitter{srcNode}
 		}
 		}
-		tp.AddSrc(srcNode)
-		op = xstream.Transform(pp, fmt.Sprintf("%d_preprocessor_%s", newIndex, t.name), options)
-		inputs = []api.Emitter{srcNode}
 	case *WindowPlan:
 	case *WindowPlan:
 		if t.condition != nil {
 		if t.condition != nil {
 			wfilterOp := xstream.Transform(&operators.FilterOp{Condition: t.condition}, fmt.Sprintf("%d_windowFilter", newIndex), options)
 			wfilterOp := xstream.Transform(&operators.FilterOp{Condition: t.condition}, fmt.Sprintf("%d_windowFilter", newIndex), options)
@@ -141,6 +153,8 @@ func buildOps(lp LogicalPlan, tp *xstream.TopologyNew, options *api.RuleOption,
 		if err != nil {
 		if err != nil {
 			return nil, 0, err
 			return nil, 0, err
 		}
 		}
+	case *JoinAlignPlan:
+		op, err = nodes.NewJoinAlignNode(fmt.Sprintf("%d_join_aligner", newIndex), t.Emitters, options)
 	case *JoinPlan:
 	case *JoinPlan:
 		op = xstream.Transform(&operators.JoinOp{Joins: t.joins, From: t.from}, fmt.Sprintf("%d_join", newIndex), options)
 		op = xstream.Transform(&operators.JoinOp{Joins: t.joins, From: t.from}, fmt.Sprintf("%d_join", newIndex), options)
 	case *FilterPlan:
 	case *FilterPlan:
@@ -163,27 +177,15 @@ func buildOps(lp LogicalPlan, tp *xstream.TopologyNew, options *api.RuleOption,
 	return op, newIndex, nil
 	return op, newIndex, nil
 }
 }
 
 
-func getStream(m kv.KeyValue, name string) (stmt *xsql.StreamStmt, err error) {
-	var s string
-	f, err := m.Get(name, &s)
-	if !f || err != nil {
-		return nil, fmt.Errorf("Cannot find key %s. ", name)
-	}
-	parser := xsql.NewParser(strings.NewReader(s))
-	stream, err := xsql.Language.Parse(parser)
-	stmt, ok := stream.(*xsql.StreamStmt)
-	if !ok {
-		err = fmt.Errorf("Error resolving the stream %s, the data in db may be corrupted.", name)
-	}
-	return
-}
-
 func createLogicalPlan(stmt *xsql.SelectStatement, opt *api.RuleOption, store kv.KeyValue) (LogicalPlan, error) {
 func createLogicalPlan(stmt *xsql.SelectStatement, opt *api.RuleOption, store kv.KeyValue) (LogicalPlan, error) {
 	streamsFromStmt := xsql.GetStreams(stmt)
 	streamsFromStmt := xsql.GetStreams(stmt)
 	dimensions := stmt.Dimensions
 	dimensions := stmt.Dimensions
 	var (
 	var (
-		p                     LogicalPlan
-		children              []LogicalPlan
+		p        LogicalPlan
+		children []LogicalPlan
+		// If there are tables, the plan graph will be different for join/window
+		tableChildren         []LogicalPlan
+		tableEmitters         []string
 		w                     *xsql.Window
 		w                     *xsql.Window
 		ds                    xsql.Dimensions
 		ds                    xsql.Dimensions
 		alias, aggregateAlias xsql.Fields
 		alias, aggregateAlias xsql.Fields
@@ -197,8 +199,9 @@ func createLogicalPlan(stmt *xsql.SelectStatement, opt *api.RuleOption, store kv
 			}
 			}
 		}
 		}
 	}
 	}
+
 	for _, s := range streamsFromStmt {
 	for _, s := range streamsFromStmt {
-		streamStmt, err := getStream(store, s)
+		streamStmt, err := xsql.GetDataSource(store, s)
 		if err != nil {
 		if err != nil {
 			return nil, fmt.Errorf("fail to get stream %s, please check if stream is created", s)
 			return nil, fmt.Errorf("fail to get stream %s, please check if stream is created", s)
 		}
 		}
@@ -209,11 +212,20 @@ func createLogicalPlan(stmt *xsql.SelectStatement, opt *api.RuleOption, store kv
 			alias:      alias,
 			alias:      alias,
 			allMeta:    opt.SendMetaToSink,
 			allMeta:    opt.SendMetaToSink,
 		}.Init()
 		}.Init()
-		children = append(children, p)
+		if streamStmt.StreamType == xsql.TypeStream {
+			children = append(children, p)
+		} else {
+			tableChildren = append(tableChildren, p)
+			tableEmitters = append(tableEmitters, string(streamStmt.Name))
+		}
+
 	}
 	}
 	if dimensions != nil {
 	if dimensions != nil {
 		w = dimensions.GetWindow()
 		w = dimensions.GetWindow()
 		if w != nil {
 		if w != nil {
+			if len(children) == 0 {
+				return nil, errors.New("cannot run window for TABLE sources")
+			}
 			wp := WindowPlan{
 			wp := WindowPlan{
 				wtype:       w.WindowType,
 				wtype:       w.WindowType,
 				length:      w.Length.Val,
 				length:      w.Length.Val,
@@ -235,7 +247,16 @@ func createLogicalPlan(stmt *xsql.SelectStatement, opt *api.RuleOption, store kv
 			p = wp
 			p = wp
 		}
 		}
 	}
 	}
-	if w != nil && stmt.Joins != nil {
+	if stmt.Joins != nil {
+		if len(tableChildren) > 0 {
+			p = JoinAlignPlan{
+				Emitters: tableEmitters,
+			}.Init()
+			p.SetChildren(append(children, tableChildren...))
+			children = []LogicalPlan{p}
+		} else if w == nil {
+			return nil, errors.New("need to run stream join in windows")
+		}
 		// TODO extract on filter
 		// TODO extract on filter
 		p = JoinPlan{
 		p = JoinPlan{
 			from:  stmt.Sources[0].(*xsql.Table),
 			from:  stmt.Sources[0].(*xsql.Table),
@@ -287,7 +308,6 @@ func createLogicalPlan(stmt *xsql.SelectStatement, opt *api.RuleOption, store kv
 			sendMeta:    opt.SendMetaToSink,
 			sendMeta:    opt.SendMetaToSink,
 		}.Init()
 		}.Init()
 		p.SetChildren(children)
 		p.SetChildren(children)
-		children = []LogicalPlan{p}
 	}
 	}
 
 
 	return optimize(p)
 	return optimize(p)

+ 258 - 8
xstream/planner/planner_test.go

@@ -1,6 +1,7 @@
 package planner
 package planner
 
 
 import (
 import (
+	"encoding/json"
 	"fmt"
 	"fmt"
 	"github.com/emqx/kuiper/common"
 	"github.com/emqx/kuiper/common"
 	"github.com/emqx/kuiper/common/kv"
 	"github.com/emqx/kuiper/common/kv"
@@ -40,17 +41,36 @@ func Test_createLogicalPlan(t *testing.T) {
 					temp BIGINT,
 					temp BIGINT,
 					name string
 					name string
 				) WITH (DATASOURCE="src1", FORMAT="json", KEY="ts");`,
 				) WITH (DATASOURCE="src1", FORMAT="json", KEY="ts");`,
-		"src2": `CREATE STREAM src1 (
+		"src2": `CREATE STREAM src2 (
 					id2 BIGINT,
 					id2 BIGINT,
 					hum BIGINT
 					hum BIGINT
-				) WITH (DATASOURCE="src1", FORMAT="json", KEY="ts");`,
+				) WITH (DATASOURCE="src2", FORMAT="json", KEY="ts");`,
+		"table1": `CREATE TABLE table1 (
+					id BIGINT,
+					name STRING,
+					value STRING,
+					hum BIGINT
+				) WITH (TYPE="file");`,
+	}
+	types := map[string]xsql.StreamType{
+		"src1":   xsql.TypeStream,
+		"src2":   xsql.TypeStream,
+		"table1": xsql.TypeTable,
 	}
 	}
 	for name, sql := range streamSqls {
 	for name, sql := range streamSqls {
-		store.Set(name, sql)
+		s, err := json.Marshal(&xsql.StreamInfo{
+			StreamType: types[name],
+			Statement:  sql,
+		})
+		if err != nil {
+			t.Error(err)
+			t.Fail()
+		}
+		store.Set(name, string(s))
 	}
 	}
 	streams := make(map[string]*xsql.StreamStmt)
 	streams := make(map[string]*xsql.StreamStmt)
 	for n, _ := range streamSqls {
 	for n, _ := range streamSqls {
-		streamStmt, err := getStream(store, n)
+		streamStmt, err := xsql.GetDataSource(store, n)
 		if err != nil {
 		if err != nil {
 			t.Errorf("fail to get stream %s, please check if stream is created", n)
 			t.Errorf("fail to get stream %s, please check if stream is created", n)
 			return
 			return
@@ -581,6 +601,229 @@ func Test_createLogicalPlan(t *testing.T) {
 				isAggregate: false,
 				isAggregate: false,
 				sendMeta:    false,
 				sendMeta:    false,
 			}.Init(),
 			}.Init(),
+		}, { // 7 window error for table
+			sql: `SELECT value FROM table1 WHERE name = "v1" GROUP BY TUMBLINGWINDOW(ss, 10) FILTER( WHERE temp > 2)`,
+			p:   nil,
+			err: "cannot run window for TABLE sources",
+		}, { // 8 join table without window
+			sql: `SELECT id1 FROM src1 INNER JOIN table1 on src1.id1 = table1.id and src1.temp > 20 and table1.hum < 60 WHERE src1.id1 > 111`,
+			p: ProjectPlan{
+				baseLogicalPlan: baseLogicalPlan{
+					children: []LogicalPlan{
+						JoinPlan{
+							baseLogicalPlan: baseLogicalPlan{
+								children: []LogicalPlan{
+									JoinAlignPlan{
+										baseLogicalPlan: baseLogicalPlan{
+											children: []LogicalPlan{
+												FilterPlan{
+													baseLogicalPlan: baseLogicalPlan{
+														children: []LogicalPlan{
+															DataSourcePlan{
+																name: "src1",
+																streamFields: []interface{}{
+																	&xsql.StreamField{
+																		Name:      "id1",
+																		FieldType: &xsql.BasicType{Type: xsql.BIGINT},
+																	},
+																	&xsql.StreamField{
+																		Name:      "temp",
+																		FieldType: &xsql.BasicType{Type: xsql.BIGINT},
+																	},
+																},
+																streamStmt: streams["src1"],
+																metaFields: []string{},
+															}.Init(),
+														},
+													},
+													condition: &xsql.BinaryExpr{
+														RHS: &xsql.BinaryExpr{
+															OP:  xsql.GT,
+															LHS: &xsql.FieldRef{Name: "temp", StreamName: "src1"},
+															RHS: &xsql.IntegerLiteral{Val: 20},
+														},
+														OP: xsql.AND,
+														LHS: &xsql.BinaryExpr{
+															OP:  xsql.GT,
+															LHS: &xsql.FieldRef{Name: "id1", StreamName: "src1"},
+															RHS: &xsql.IntegerLiteral{Val: 111},
+														},
+													},
+												}.Init(),
+												FilterPlan{
+													baseLogicalPlan: baseLogicalPlan{
+														children: []LogicalPlan{
+															DataSourcePlan{
+																name: "table1",
+																streamFields: []interface{}{
+																	&xsql.StreamField{
+																		Name:      "hum",
+																		FieldType: &xsql.BasicType{Type: xsql.BIGINT},
+																	},
+																	&xsql.StreamField{
+																		Name:      "id",
+																		FieldType: &xsql.BasicType{Type: xsql.BIGINT},
+																	},
+																},
+																streamStmt: streams["table1"],
+																metaFields: []string{},
+															}.Init(),
+														},
+													},
+													condition: &xsql.BinaryExpr{
+														OP:  xsql.LT,
+														LHS: &xsql.FieldRef{Name: "hum", StreamName: "table1"},
+														RHS: &xsql.IntegerLiteral{Val: 60},
+													},
+												}.Init(),
+											},
+										},
+										Emitters: []string{"table1"},
+									}.Init(),
+								},
+							},
+							from: &xsql.Table{
+								Name: "src1",
+							},
+							joins: []xsql.Join{
+								{
+									Name:     "table1",
+									Alias:    "",
+									JoinType: xsql.INNER_JOIN,
+									Expr: &xsql.BinaryExpr{
+										LHS: &xsql.FieldRef{Name: "id1", StreamName: "src1"},
+										OP:  xsql.EQ,
+										RHS: &xsql.FieldRef{Name: "id", StreamName: "table1"},
+									},
+								},
+							},
+						}.Init(),
+					},
+				},
+				fields: []xsql.Field{
+					{
+						Expr:  &xsql.FieldRef{Name: "id1"},
+						Name:  "id1",
+						AName: ""},
+				},
+				isAggregate: false,
+				sendMeta:    false,
+			}.Init(),
+		}, { // 8 join table with window
+			sql: `SELECT id1 FROM src1 INNER JOIN table1 on src1.id1 = table1.id and src1.temp > 20 and table1.hum < 60 WHERE src1.id1 > 111 GROUP BY TUMBLINGWINDOW(ss, 10)`,
+			p: ProjectPlan{
+				baseLogicalPlan: baseLogicalPlan{
+					children: []LogicalPlan{
+						JoinPlan{
+							baseLogicalPlan: baseLogicalPlan{
+								children: []LogicalPlan{
+									JoinAlignPlan{
+										baseLogicalPlan: baseLogicalPlan{
+											children: []LogicalPlan{
+												WindowPlan{
+													baseLogicalPlan: baseLogicalPlan{
+														children: []LogicalPlan{
+															FilterPlan{
+																baseLogicalPlan: baseLogicalPlan{
+																	children: []LogicalPlan{
+																		DataSourcePlan{
+																			name: "src1",
+																			streamFields: []interface{}{
+																				&xsql.StreamField{
+																					Name:      "id1",
+																					FieldType: &xsql.BasicType{Type: xsql.BIGINT},
+																				},
+																				&xsql.StreamField{
+																					Name:      "temp",
+																					FieldType: &xsql.BasicType{Type: xsql.BIGINT},
+																				},
+																			},
+																			streamStmt: streams["src1"],
+																			metaFields: []string{},
+																		}.Init(),
+																	},
+																},
+																condition: &xsql.BinaryExpr{
+																	RHS: &xsql.BinaryExpr{
+																		OP:  xsql.GT,
+																		LHS: &xsql.FieldRef{Name: "temp", StreamName: "src1"},
+																		RHS: &xsql.IntegerLiteral{Val: 20},
+																	},
+																	OP: xsql.AND,
+																	LHS: &xsql.BinaryExpr{
+																		OP:  xsql.GT,
+																		LHS: &xsql.FieldRef{Name: "id1", StreamName: "src1"},
+																		RHS: &xsql.IntegerLiteral{Val: 111},
+																	},
+																},
+															}.Init(),
+														},
+													},
+													condition: nil,
+													wtype:     xsql.TUMBLING_WINDOW,
+													length:    10000,
+													interval:  0,
+													limit:     0,
+												}.Init(),
+												FilterPlan{
+													baseLogicalPlan: baseLogicalPlan{
+														children: []LogicalPlan{
+															DataSourcePlan{
+																name: "table1",
+																streamFields: []interface{}{
+																	&xsql.StreamField{
+																		Name:      "hum",
+																		FieldType: &xsql.BasicType{Type: xsql.BIGINT},
+																	},
+																	&xsql.StreamField{
+																		Name:      "id",
+																		FieldType: &xsql.BasicType{Type: xsql.BIGINT},
+																	},
+																},
+																streamStmt: streams["table1"],
+																metaFields: []string{},
+															}.Init(),
+														},
+													},
+													condition: &xsql.BinaryExpr{
+														OP:  xsql.LT,
+														LHS: &xsql.FieldRef{Name: "hum", StreamName: "table1"},
+														RHS: &xsql.IntegerLiteral{Val: 60},
+													},
+												}.Init(),
+											},
+										},
+										Emitters: []string{"table1"},
+									}.Init(),
+								},
+							},
+							from: &xsql.Table{
+								Name: "src1",
+							},
+							joins: []xsql.Join{
+								{
+									Name:     "table1",
+									Alias:    "",
+									JoinType: xsql.INNER_JOIN,
+									Expr: &xsql.BinaryExpr{
+										LHS: &xsql.FieldRef{Name: "id1", StreamName: "src1"},
+										OP:  xsql.EQ,
+										RHS: &xsql.FieldRef{Name: "id", StreamName: "table1"},
+									},
+								},
+							},
+						}.Init(),
+					},
+				},
+				fields: []xsql.Field{
+					{
+						Expr:  &xsql.FieldRef{Name: "id1"},
+						Name:  "id1",
+						AName: ""},
+				},
+				isAggregate: false,
+				sendMeta:    false,
+			}.Init(),
 		},
 		},
 	}
 	}
 	fmt.Printf("The test bucket size is %d.\n\n", len(tests))
 	fmt.Printf("The test bucket size is %d.\n\n", len(tests))
@@ -601,11 +844,18 @@ func Test_createLogicalPlan(t *testing.T) {
 			CheckpointInterval: 0,
 			CheckpointInterval: 0,
 			SendError:          true,
 			SendError:          true,
 		}, store)
 		}, store)
-		if err != nil {
-			t.Errorf("%d. %q\n\nerror:%v\n\n", i, tt.sql, err)
-		}
-		if !reflect.DeepEqual(tt.p, p) {
+		if !reflect.DeepEqual(tt.err, errstring(err)) {
+			t.Errorf("%d. %q: error mismatch:\n  exp=%s\n  got=%s\n\n", i, tt.sql, tt.err, err)
+		} else if !reflect.DeepEqual(tt.p, p) {
 			t.Errorf("%d. %q\n\nstmt mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.sql, tt.p, p)
 			t.Errorf("%d. %q\n\nstmt mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.sql, tt.p, p)
 		}
 		}
 	}
 	}
 }
 }
+
+// errstring returns the string representation of an error.
+func errstring(err error) string {
+	if err != nil {
+		return err.Error()
+	}
+	return ""
+}

+ 33 - 13
xstream/server/server/rest.go

@@ -6,6 +6,7 @@ import (
 	"fmt"
 	"fmt"
 	"github.com/emqx/kuiper/common"
 	"github.com/emqx/kuiper/common"
 	"github.com/emqx/kuiper/plugins"
 	"github.com/emqx/kuiper/plugins"
+	"github.com/emqx/kuiper/xsql"
 	"github.com/emqx/kuiper/xstream/api"
 	"github.com/emqx/kuiper/xstream/api"
 	"github.com/gorilla/handlers"
 	"github.com/gorilla/handlers"
 	"github.com/gorilla/mux"
 	"github.com/gorilla/mux"
@@ -78,6 +79,8 @@ func createRestServer(ip string, port int) *http.Server {
 	r.HandleFunc("/ping", pingHandler).Methods(http.MethodGet)
 	r.HandleFunc("/ping", pingHandler).Methods(http.MethodGet)
 	r.HandleFunc("/streams", streamsHandler).Methods(http.MethodGet, http.MethodPost)
 	r.HandleFunc("/streams", streamsHandler).Methods(http.MethodGet, http.MethodPost)
 	r.HandleFunc("/streams/{name}", streamHandler).Methods(http.MethodGet, http.MethodDelete, http.MethodPut)
 	r.HandleFunc("/streams/{name}", streamHandler).Methods(http.MethodGet, http.MethodDelete, http.MethodPut)
+	r.HandleFunc("/tables", tablesHandler).Methods(http.MethodGet, http.MethodPost)
+	r.HandleFunc("/tables/{name}", tableHandler).Methods(http.MethodGet, http.MethodDelete, http.MethodPut)
 	r.HandleFunc("/rules", rulesHandler).Methods(http.MethodGet, http.MethodPost)
 	r.HandleFunc("/rules", rulesHandler).Methods(http.MethodGet, http.MethodPost)
 	r.HandleFunc("/rules/{name}", ruleHandler).Methods(http.MethodDelete, http.MethodGet, http.MethodPut)
 	r.HandleFunc("/rules/{name}", ruleHandler).Methods(http.MethodDelete, http.MethodGet, http.MethodPut)
 	r.HandleFunc("/rules/{name}/status", getStatusRuleHandler).Methods(http.MethodGet)
 	r.HandleFunc("/rules/{name}/status", getStatusRuleHandler).Methods(http.MethodGet)
@@ -150,14 +153,13 @@ func pingHandler(w http.ResponseWriter, r *http.Request) {
 	w.WriteHeader(http.StatusOK)
 	w.WriteHeader(http.StatusOK)
 }
 }
 
 
-//list or create streams
-func streamsHandler(w http.ResponseWriter, r *http.Request) {
+func sourcesManageHandler(w http.ResponseWriter, r *http.Request, st xsql.StreamType) {
 	defer r.Body.Close()
 	defer r.Body.Close()
 	switch r.Method {
 	switch r.Method {
 	case http.MethodGet:
 	case http.MethodGet:
-		content, err := streamProcessor.ShowStream()
+		content, err := streamProcessor.ShowStream(st)
 		if err != nil {
 		if err != nil {
-			handleError(w, err, "Stream command error", logger)
+			handleError(w, err, fmt.Sprintf("%s command error", strings.Title(xsql.StreamTypeMap[st])), logger)
 			return
 			return
 		}
 		}
 		jsonResponse(content, w, logger)
 		jsonResponse(content, w, logger)
@@ -169,7 +171,7 @@ func streamsHandler(w http.ResponseWriter, r *http.Request) {
 		}
 		}
 		content, err := streamProcessor.ExecStreamSql(v.Sql)
 		content, err := streamProcessor.ExecStreamSql(v.Sql)
 		if err != nil {
 		if err != nil {
-			handleError(w, err, "Stream command error", logger)
+			handleError(w, err, fmt.Sprintf("%s command error", strings.Title(xsql.StreamTypeMap[st])), logger)
 			return
 			return
 		}
 		}
 		w.WriteHeader(http.StatusCreated)
 		w.WriteHeader(http.StatusCreated)
@@ -177,24 +179,23 @@ func streamsHandler(w http.ResponseWriter, r *http.Request) {
 	}
 	}
 }
 }
 
 
-//describe or delete a stream
-func streamHandler(w http.ResponseWriter, r *http.Request) {
+func sourceManageHandler(w http.ResponseWriter, r *http.Request, st xsql.StreamType) {
 	defer r.Body.Close()
 	defer r.Body.Close()
 	vars := mux.Vars(r)
 	vars := mux.Vars(r)
 	name := vars["name"]
 	name := vars["name"]
 
 
 	switch r.Method {
 	switch r.Method {
 	case http.MethodGet:
 	case http.MethodGet:
-		content, err := streamProcessor.DescStream(name)
+		content, err := streamProcessor.DescStream(name, st)
 		if err != nil {
 		if err != nil {
-			handleError(w, err, "describe stream error", logger)
+			handleError(w, err, fmt.Sprintf("describe %s error", xsql.StreamTypeMap[st]), logger)
 			return
 			return
 		}
 		}
 		jsonResponse(content, w, logger)
 		jsonResponse(content, w, logger)
 	case http.MethodDelete:
 	case http.MethodDelete:
-		content, err := streamProcessor.DropStream(name)
+		content, err := streamProcessor.DropStream(name, st)
 		if err != nil {
 		if err != nil {
-			handleError(w, err, "delete stream error", logger)
+			handleError(w, err, fmt.Sprintf("delete %s error", xsql.StreamTypeMap[st]), logger)
 			return
 			return
 		}
 		}
 		w.WriteHeader(http.StatusOK)
 		w.WriteHeader(http.StatusOK)
@@ -205,9 +206,9 @@ func streamHandler(w http.ResponseWriter, r *http.Request) {
 			handleError(w, err, "Invalid body", logger)
 			handleError(w, err, "Invalid body", logger)
 			return
 			return
 		}
 		}
-		content, err := streamProcessor.ExecReplaceStream(v.Sql)
+		content, err := streamProcessor.ExecReplaceStream(v.Sql, st)
 		if err != nil {
 		if err != nil {
-			handleError(w, err, "Stream command error", logger)
+			handleError(w, err, fmt.Sprintf("%s command error", strings.Title(xsql.StreamTypeMap[st])), logger)
 			return
 			return
 		}
 		}
 		w.WriteHeader(http.StatusOK)
 		w.WriteHeader(http.StatusOK)
@@ -215,6 +216,25 @@ func streamHandler(w http.ResponseWriter, r *http.Request) {
 	}
 	}
 }
 }
 
 
+//list or create streams
+func streamsHandler(w http.ResponseWriter, r *http.Request) {
+	sourcesManageHandler(w, r, xsql.TypeStream)
+}
+
+//describe or delete a stream
+func streamHandler(w http.ResponseWriter, r *http.Request) {
+	sourceManageHandler(w, r, xsql.TypeStream)
+}
+
+//list or create tables
+func tablesHandler(w http.ResponseWriter, r *http.Request) {
+	sourcesManageHandler(w, r, xsql.TypeTable)
+}
+
+func tableHandler(w http.ResponseWriter, r *http.Request) {
+	sourceManageHandler(w, r, xsql.TypeTable)
+}
+
 //list or create rules
 //list or create rules
 func rulesHandler(w http.ResponseWriter, r *http.Request) {
 func rulesHandler(w http.ResponseWriter, r *http.Request) {
 	defer r.Body.Close()
 	defer r.Body.Close()

+ 3 - 3
xstream/streams.go

@@ -18,7 +18,7 @@ type PrintableTopo struct {
 }
 }
 
 
 type TopologyNew struct {
 type TopologyNew struct {
-	sources            []*nodes.SourceNode
+	sources            []nodes.DataSourceNode
 	sinks              []*nodes.SinkNode
 	sinks              []*nodes.SinkNode
 	ctx                api.StreamContext
 	ctx                api.StreamContext
 	cancel             context.CancelFunc
 	cancel             context.CancelFunc
@@ -58,7 +58,7 @@ func (s *TopologyNew) Cancel() {
 	s.coordinator = nil
 	s.coordinator = nil
 }
 }
 
 
-func (s *TopologyNew) AddSrc(src *nodes.SourceNode) *TopologyNew {
+func (s *TopologyNew) AddSrc(src nodes.DataSourceNode) *TopologyNew {
 	s.sources = append(s.sources, src)
 	s.sources = append(s.sources, src)
 	s.topo.Sources = append(s.topo.Sources, fmt.Sprintf("source_%s", src.GetName()))
 	s.topo.Sources = append(s.topo.Sources, fmt.Sprintf("source_%s", src.GetName()))
 	return s
 	return s
@@ -86,7 +86,7 @@ func (s *TopologyNew) AddOperator(inputs []api.Emitter, operator nodes.OperatorN
 
 
 func (s *TopologyNew) addEdge(from api.TopNode, to api.TopNode, toType string) {
 func (s *TopologyNew) addEdge(from api.TopNode, to api.TopNode, toType string) {
 	fromType := "op"
 	fromType := "op"
-	if _, ok := from.(*nodes.SourceNode); ok {
+	if _, ok := from.(nodes.DataSourceNode); ok {
 		fromType = "source"
 		fromType = "source"
 	}
 	}
 	f := fmt.Sprintf("%s_%s", fromType, from.GetName())
 	f := fmt.Sprintf("%s_%s", fromType, from.GetName())