123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384 |
- // Copyright 2022 EMQ Technologies Co., Ltd.
- //
- // Licensed under the Apache License, Version 2.0 (the "License");
- // you may not use this file except in compliance with the License.
- // You may obtain a copy of the License at
- //
- // http://www.apache.org/licenses/LICENSE-2.0
- //
- // Unless required by applicable law or agreed to in writing, software
- // distributed under the License is distributed on an "AS IS" BASIS,
- // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- // See the License for the specific language governing permissions and
- // limitations under the License.
- package planner
- import (
- "errors"
- "fmt"
- "github.com/lf-edge/ekuiper/internal/conf"
- store2 "github.com/lf-edge/ekuiper/internal/pkg/store"
- "github.com/lf-edge/ekuiper/internal/topo"
- "github.com/lf-edge/ekuiper/internal/topo/node"
- "github.com/lf-edge/ekuiper/internal/topo/operator"
- "github.com/lf-edge/ekuiper/internal/xsql"
- "github.com/lf-edge/ekuiper/pkg/api"
- "github.com/lf-edge/ekuiper/pkg/ast"
- "github.com/lf-edge/ekuiper/pkg/kv"
- )
- func Plan(rule *api.Rule) (*topo.Topo, error) {
- if rule.Sql != "" {
- return PlanSQLWithSourcesAndSinks(rule, nil, nil)
- } else {
- return PlanByGraph(rule)
- }
- }
- // PlanSQLWithSourcesAndSinks For test only
- func PlanSQLWithSourcesAndSinks(rule *api.Rule, sources []*node.SourceNode, sinks []*node.SinkNode) (*topo.Topo, error) {
- sql := rule.Sql
- conf.Log.Infof("Init rule with options %+v", rule.Options)
- stmt, err := xsql.GetStatementFromSql(sql)
- if err != nil {
- return nil, err
- }
- // validation
- streamsFromStmt := xsql.GetStreams(stmt)
- //if len(sources) > 0 && len(sources) != len(streamsFromStmt) {
- // return nil, fmt.Errorf("Invalid parameter sources or streams, the length cannot match the statement, expect %d sources.", len(streamsFromStmt))
- //}
- if rule.Options.SendMetaToSink && (len(streamsFromStmt) > 1 || stmt.Dimensions != nil) {
- return nil, fmt.Errorf("Invalid option sendMetaToSink, it can not be applied to window")
- }
- err, store := store2.GetKV("stream")
- if err != nil {
- return nil, err
- }
- // Create logical plan and optimize. Logical plans are a linked list
- lp, err := createLogicalPlan(stmt, rule.Options, store)
- if err != nil {
- return nil, err
- }
- tp, err := createTopo(rule, lp, sources, sinks, streamsFromStmt)
- if err != nil {
- return nil, err
- }
- return tp, nil
- }
- func createTopo(rule *api.Rule, lp LogicalPlan, sources []*node.SourceNode, sinks []*node.SinkNode, streamsFromStmt []string) (*topo.Topo, error) {
- // Create topology
- tp, err := topo.NewWithNameAndQos(rule.Id, rule.Options.Qos, rule.Options.CheckpointInterval)
- if err != nil {
- return nil, err
- }
- input, _, err := buildOps(lp, tp, rule.Options, sources, streamsFromStmt, 0)
- if err != nil {
- return nil, err
- }
- inputs := []api.Emitter{input}
- // Add actions
- if len(sinks) > 0 { // For use of mock sink in testing
- for _, sink := range sinks {
- tp.AddSink(inputs, sink)
- }
- } else {
- for i, m := range rule.Actions {
- for name, action := range m {
- props, ok := action.(map[string]interface{})
- if !ok {
- return nil, fmt.Errorf("expect map[string]interface{} type for the action properties, but found %v", action)
- }
- tp.AddSink(inputs, node.NewSinkNode(fmt.Sprintf("%s_%d", name, i), name, props))
- }
- }
- }
- return tp, nil
- }
- func buildOps(lp LogicalPlan, tp *topo.Topo, options *api.RuleOption, sources []*node.SourceNode, streamsFromStmt []string, index int) (api.Emitter, int, error) {
- var inputs []api.Emitter
- newIndex := index
- for _, c := range lp.Children() {
- input, ni, err := buildOps(c, tp, options, sources, streamsFromStmt, newIndex)
- if err != nil {
- return nil, 0, err
- }
- newIndex = ni
- inputs = append(inputs, input)
- }
- newIndex++
- var (
- op api.Emitter
- err error
- )
- switch t := lp.(type) {
- case *DataSourcePlan:
- isSchemaless := t.streamStmt.StreamFields == nil
- switch t.streamStmt.StreamType {
- case ast.TypeStream:
- pp, err := operator.NewPreprocessor(isSchemaless, t.streamFields, t.allMeta, t.metaFields, t.iet, t.timestampField, t.timestampFormat, t.isBinary, t.streamStmt.Options.STRICT_VALIDATION)
- if err != nil {
- return nil, 0, err
- }
- var srcNode *node.SourceNode
- if len(sources) == 0 {
- sourceNode := node.NewSourceNode(string(t.name), t.streamStmt.StreamType, pp, t.streamStmt.Options, options.SendError)
- srcNode = sourceNode
- } else {
- srcNode = getMockSource(sources, string(t.name))
- if srcNode == nil {
- return nil, 0, fmt.Errorf("can't find predefined source %s", t.name)
- }
- }
- tp.AddSrc(srcNode)
- inputs = []api.Emitter{srcNode}
- op = srcNode
- case ast.TypeTable:
- pp, err := operator.NewTableProcessor(isSchemaless, string(t.name), t.streamFields, t.streamStmt.Options)
- if err != nil {
- return nil, 0, err
- }
- var srcNode *node.SourceNode
- if len(sources) > 0 {
- srcNode = getMockSource(sources, string(t.name))
- }
- if srcNode == nil {
- srcNode = node.NewSourceNode(string(t.name), t.streamStmt.StreamType, pp, t.streamStmt.Options, options.SendError)
- }
- tp.AddSrc(srcNode)
- inputs = []api.Emitter{srcNode}
- op = srcNode
- }
- case *AnalyticFuncsPlan:
- op = Transform(&operator.AnalyticFuncsOp{Funcs: t.funcs}, fmt.Sprintf("%d_analytic", newIndex), options)
- case *WindowPlan:
- if t.condition != nil {
- wfilterOp := Transform(&operator.FilterOp{Condition: t.condition}, fmt.Sprintf("%d_windowFilter", newIndex), options)
- wfilterOp.SetConcurrency(options.Concurrency)
- tp.AddOperator(inputs, wfilterOp)
- inputs = []api.Emitter{wfilterOp}
- }
- op, err = node.NewWindowOp(fmt.Sprintf("%d_window", newIndex), node.WindowConfig{
- Type: t.wtype,
- Length: t.length,
- Interval: t.interval,
- }, streamsFromStmt, options)
- if err != nil {
- return nil, 0, err
- }
- case *LookupPlan:
- op, err = node.NewLookupNode(t.joinExpr.Name, t.fields, t.keys, t.joinExpr.JoinType, t.valvars, t.options, options)
- case *JoinAlignPlan:
- op, err = node.NewJoinAlignNode(fmt.Sprintf("%d_join_aligner", newIndex), t.Emitters, options)
- case *JoinPlan:
- op = Transform(&operator.JoinOp{Joins: t.joins, From: t.from}, fmt.Sprintf("%d_join", newIndex), options)
- case *FilterPlan:
- op = Transform(&operator.FilterOp{Condition: t.condition}, fmt.Sprintf("%d_filter", newIndex), options)
- case *AggregatePlan:
- op = Transform(&operator.AggregateOp{Dimensions: t.dimensions}, fmt.Sprintf("%d_aggregate", newIndex), options)
- case *HavingPlan:
- op = Transform(&operator.HavingOp{Condition: t.condition}, fmt.Sprintf("%d_having", newIndex), options)
- case *OrderPlan:
- op = Transform(&operator.OrderOp{SortFields: t.SortFields}, fmt.Sprintf("%d_order", newIndex), options)
- case *ProjectPlan:
- op = Transform(&operator.ProjectOp{ColNames: t.colNames, AliasNames: t.aliasNames, AliasFields: t.aliasFields, ExprFields: t.exprFields, IsAggregate: t.isAggregate, AllWildcard: t.allWildcard, WildcardEmitters: t.wildcardEmitters, ExprNames: t.exprNames, SendMeta: t.sendMeta}, fmt.Sprintf("%d_project", newIndex), options)
- default:
- return nil, 0, fmt.Errorf("unknown logical plan %v", t)
- }
- if uop, ok := op.(*node.UnaryOperator); ok {
- uop.SetConcurrency(options.Concurrency)
- }
- if onode, ok := op.(node.OperatorNode); ok {
- tp.AddOperator(inputs, onode)
- }
- return op, newIndex, nil
- }
- func getMockSource(sources []*node.SourceNode, name string) *node.SourceNode {
- for _, source := range sources {
- if name == source.GetName() {
- return source
- }
- }
- return nil
- }
- func createLogicalPlan(stmt *ast.SelectStatement, opt *api.RuleOption, store kv.KeyValue) (LogicalPlan, error) {
- dimensions := stmt.Dimensions
- var (
- p LogicalPlan
- children []LogicalPlan
- // If there are tables, the plan graph will be different for join/window
- lookupTableChildren map[string]*ast.Options
- scanTableChildren []LogicalPlan
- scanTableEmitters []string
- w *ast.Window
- ds ast.Dimensions
- )
- streamStmts, analyticFuncs, err := decorateStmt(stmt, store)
- if err != nil {
- return nil, err
- }
- for _, streamStmt := range streamStmts {
- if streamStmt.StreamType == ast.TypeTable && streamStmt.Options.KIND == ast.StreamKindLookup {
- if lookupTableChildren == nil {
- lookupTableChildren = make(map[string]*ast.Options)
- }
- lookupTableChildren[string(streamStmt.Name)] = streamStmt.Options
- } else {
- p = DataSourcePlan{
- name: streamStmt.Name,
- streamStmt: streamStmt,
- iet: opt.IsEventTime,
- allMeta: opt.SendMetaToSink,
- }.Init()
- if streamStmt.StreamType == ast.TypeStream {
- children = append(children, p)
- } else {
- scanTableChildren = append(scanTableChildren, p)
- scanTableEmitters = append(scanTableEmitters, string(streamStmt.Name))
- }
- }
- }
- if len(analyticFuncs) > 0 {
- p = AnalyticFuncsPlan{
- funcs: analyticFuncs,
- }.Init()
- p.SetChildren(children)
- children = []LogicalPlan{p}
- }
- if dimensions != nil {
- w = dimensions.GetWindow()
- if w != nil {
- if len(children) == 0 {
- return nil, errors.New("cannot run window for TABLE sources")
- }
- wp := WindowPlan{
- wtype: w.WindowType,
- length: w.Length.Val,
- isEventTime: opt.IsEventTime,
- }.Init()
- if w.Interval != nil {
- wp.interval = w.Interval.Val
- } else if w.WindowType == ast.COUNT_WINDOW {
- //if no interval value is set, and it's count window, then set interval to length value.
- wp.interval = w.Length.Val
- }
- if w.Filter != nil {
- wp.condition = w.Filter
- }
- // TODO calculate limit
- // TODO incremental aggregate
- wp.SetChildren(children)
- children = []LogicalPlan{wp}
- p = wp
- }
- }
- if stmt.Joins != nil {
- if len(lookupTableChildren) == 0 && len(scanTableChildren) == 0 && w == nil {
- return nil, errors.New("a time window or count window is required to join multiple streams")
- }
- if len(lookupTableChildren) > 0 {
- var joins []ast.Join
- for _, join := range stmt.Joins {
- if streamOpt, ok := lookupTableChildren[join.Name]; ok {
- lookupPlan := LookupPlan{
- joinExpr: join,
- options: streamOpt,
- }
- if !lookupPlan.validateAndExtractCondition() {
- return nil, fmt.Errorf("join condition %s is invalid, at least one equi-join predicate is required", join.Expr)
- }
- p = lookupPlan.Init()
- p.SetChildren(children)
- children = []LogicalPlan{p}
- delete(lookupTableChildren, join.Name)
- } else {
- joins = append(joins, join)
- }
- }
- if len(lookupTableChildren) > 0 {
- return nil, fmt.Errorf("cannot find lookup table %v in any join", lookupTableChildren)
- }
- stmt.Joins = joins
- }
- // Not all joins are lookup joins, so we need to create a join plan for the remaining joins
- if len(stmt.Joins) > 0 {
- if len(scanTableChildren) > 0 {
- p = JoinAlignPlan{
- Emitters: scanTableEmitters,
- }.Init()
- p.SetChildren(append(children, scanTableChildren...))
- children = []LogicalPlan{p}
- }
- // TODO extract on filter
- p = JoinPlan{
- from: stmt.Sources[0].(*ast.Table),
- joins: stmt.Joins,
- }.Init()
- p.SetChildren(children)
- children = []LogicalPlan{p}
- }
- }
- if stmt.Condition != nil {
- p = FilterPlan{
- condition: stmt.Condition,
- }.Init()
- p.SetChildren(children)
- children = []LogicalPlan{p}
- }
- // TODO handle aggregateAlias in optimization as it does not only happen in select fields
- if dimensions != nil {
- ds = dimensions.GetGroups()
- if ds != nil && len(ds) > 0 {
- p = AggregatePlan{
- dimensions: ds,
- }.Init()
- p.SetChildren(children)
- children = []LogicalPlan{p}
- }
- }
- if stmt.Having != nil {
- p = HavingPlan{
- condition: stmt.Having,
- }.Init()
- p.SetChildren(children)
- children = []LogicalPlan{p}
- }
- if stmt.SortFields != nil {
- p = OrderPlan{
- SortFields: stmt.SortFields,
- }.Init()
- p.SetChildren(children)
- children = []LogicalPlan{p}
- }
- if stmt.Fields != nil {
- p = ProjectPlan{
- fields: stmt.Fields,
- isAggregate: xsql.IsAggStatement(stmt),
- sendMeta: opt.SendMetaToSink,
- }.Init()
- p.SetChildren(children)
- }
- return optimize(p)
- }
- func Transform(op node.UnOperation, name string, options *api.RuleOption) *node.UnaryOperator {
- unaryOperator := node.New(name, options)
- unaryOperator.SetOperation(op)
- return unaryOperator
- }
|