xsql_processor.go 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657
  1. package processors
  2. import (
  3. "bytes"
  4. "encoding/json"
  5. "fmt"
  6. "github.com/emqx/kuiper/common"
  7. "github.com/emqx/kuiper/xsql"
  8. "github.com/emqx/kuiper/xsql/plans"
  9. "github.com/emqx/kuiper/xstream"
  10. "github.com/emqx/kuiper/xstream/api"
  11. "github.com/emqx/kuiper/xstream/nodes"
  12. "os"
  13. "path"
  14. "strings"
  15. )
  16. var log = common.Log
  17. type StreamProcessor struct {
  18. db common.KeyValue
  19. }
  20. //@params d : the directory of the DB to save the stream info
  21. func NewStreamProcessor(d string) *StreamProcessor {
  22. processor := &StreamProcessor{
  23. db: common.GetSimpleKVStore(d),
  24. }
  25. return processor
  26. }
  27. func (p *StreamProcessor) ExecStmt(statement string) (result []string, err error) {
  28. parser := xsql.NewParser(strings.NewReader(statement))
  29. stmt, err := xsql.Language.Parse(parser)
  30. if err != nil {
  31. return nil, err
  32. }
  33. switch s := stmt.(type) {
  34. case *xsql.StreamStmt:
  35. var r string
  36. r, err = p.execCreateStream(s, statement)
  37. result = append(result, r)
  38. case *xsql.ShowStreamsStatement:
  39. result, err = p.execShowStream(s)
  40. case *xsql.DescribeStreamStatement:
  41. var r string
  42. r, err = p.execDescribeStream(s)
  43. result = append(result, r)
  44. case *xsql.ExplainStreamStatement:
  45. var r string
  46. r, err = p.execExplainStream(s)
  47. result = append(result, r)
  48. case *xsql.DropStreamStatement:
  49. var r string
  50. r, err = p.execDropStream(s)
  51. result = append(result, r)
  52. default:
  53. return nil, fmt.Errorf("Invalid stream statement: %s", statement)
  54. }
  55. return
  56. }
  57. func (p *StreamProcessor) execCreateStream(stmt *xsql.StreamStmt, statement string) (string, error) {
  58. err := p.db.Open()
  59. if err != nil {
  60. return "", fmt.Errorf("Create stream fails, error when opening db: %v.", err)
  61. }
  62. defer p.db.Close()
  63. err = p.db.Set(string(stmt.Name), statement)
  64. if err != nil {
  65. return "", fmt.Errorf("Create stream fails: %v.", err)
  66. } else {
  67. info := fmt.Sprintf("Stream %s is created.", stmt.Name)
  68. log.Printf("%s", info)
  69. return info, nil
  70. }
  71. }
  72. func (p *StreamProcessor) ExecReplaceStream(statement string) (string, error) {
  73. parser := xsql.NewParser(strings.NewReader(statement))
  74. stmt, err := xsql.Language.Parse(parser)
  75. if err != nil {
  76. return "", err
  77. }
  78. switch s := stmt.(type) {
  79. case *xsql.StreamStmt:
  80. if err = p.db.Open(); nil != err {
  81. return "", fmt.Errorf("Replace stream fails, error when opening db: %v.", err)
  82. }
  83. defer p.db.Close()
  84. if err = p.db.Replace(string(s.Name), statement); nil != err {
  85. return "", fmt.Errorf("Replace stream fails: %v.", err)
  86. } else {
  87. info := fmt.Sprintf("Stream %s is replaced.", s.Name)
  88. log.Printf("%s", info)
  89. return info, nil
  90. }
  91. default:
  92. return "", fmt.Errorf("Invalid stream statement: %s", statement)
  93. }
  94. return "", nil
  95. }
  96. func (p *StreamProcessor) ExecStreamSql(statement string) (string, error) {
  97. r, err := p.ExecStmt(statement)
  98. if err != nil {
  99. return "", err
  100. } else {
  101. return strings.Join(r, "\n"), err
  102. }
  103. }
  104. func (p *StreamProcessor) execShowStream(_ *xsql.ShowStreamsStatement) ([]string, error) {
  105. keys, err := p.ShowStream()
  106. if len(keys) == 0 {
  107. keys = append(keys, "No stream definitions are found.")
  108. }
  109. return keys, err
  110. }
  111. func (p *StreamProcessor) ShowStream() ([]string, error) {
  112. err := p.db.Open()
  113. if err != nil {
  114. return nil, fmt.Errorf("Show stream fails, error when opening db: %v.", err)
  115. }
  116. defer p.db.Close()
  117. return p.db.Keys()
  118. }
  119. func (p *StreamProcessor) execDescribeStream(stmt *xsql.DescribeStreamStatement) (string, error) {
  120. streamStmt, err := p.DescStream(stmt.Name)
  121. if err != nil {
  122. return "", err
  123. }
  124. var buff bytes.Buffer
  125. buff.WriteString("Fields\n--------------------------------------------------------------------------------\n")
  126. for _, f := range streamStmt.StreamFields {
  127. buff.WriteString(f.Name + "\t")
  128. buff.WriteString(xsql.PrintFieldType(f.FieldType))
  129. buff.WriteString("\n")
  130. }
  131. buff.WriteString("\n")
  132. common.PrintMap(streamStmt.Options, &buff)
  133. return buff.String(), err
  134. }
  135. func (p *StreamProcessor) DescStream(name string) (*xsql.StreamStmt, error) {
  136. err := p.db.Open()
  137. if err != nil {
  138. return nil, fmt.Errorf("Describe stream fails, error when opening db: %v.", err)
  139. }
  140. defer p.db.Close()
  141. s, f := p.db.Get(name)
  142. if !f {
  143. return nil, common.NewErrorWithCode(common.NOT_FOUND, fmt.Sprintf("Stream %s is not found.", name))
  144. }
  145. s1 := s.(string)
  146. parser := xsql.NewParser(strings.NewReader(s1))
  147. stream, err := xsql.Language.Parse(parser)
  148. if err != nil {
  149. return nil, err
  150. }
  151. streamStmt, ok := stream.(*xsql.StreamStmt)
  152. if !ok {
  153. return nil, fmt.Errorf("Error resolving the stream %s, the data in db may be corrupted.", name)
  154. }
  155. return streamStmt, nil
  156. }
  157. func (p *StreamProcessor) execExplainStream(stmt *xsql.ExplainStreamStatement) (string, error) {
  158. err := p.db.Open()
  159. if err != nil {
  160. return "", fmt.Errorf("Explain stream fails, error when opening db: %v.", err)
  161. }
  162. defer p.db.Close()
  163. _, f := p.db.Get(stmt.Name)
  164. if !f {
  165. return "", fmt.Errorf("Stream %s is not found.", stmt.Name)
  166. }
  167. return "TO BE SUPPORTED", nil
  168. }
  169. func (p *StreamProcessor) execDropStream(stmt *xsql.DropStreamStatement) (string, error) {
  170. s, err := p.DropStream(stmt.Name)
  171. if err != nil {
  172. return s, fmt.Errorf("Drop stream fails: %s.", err)
  173. }
  174. return s, nil
  175. }
  176. func (p *StreamProcessor) DropStream(name string) (string, error) {
  177. err := p.db.Open()
  178. if err != nil {
  179. return "", fmt.Errorf("error when opening db: %v", err)
  180. }
  181. defer p.db.Close()
  182. err = p.db.Delete(name)
  183. if err != nil {
  184. return "", err
  185. } else {
  186. return fmt.Sprintf("Stream %s is dropped.", name), nil
  187. }
  188. }
  189. func GetStream(m *common.SimpleKVStore, name string) (stmt *xsql.StreamStmt, err error) {
  190. s, f := m.Get(name)
  191. if !f {
  192. return nil, fmt.Errorf("Cannot find key %s. ", name)
  193. }
  194. s1, _ := s.(string)
  195. parser := xsql.NewParser(strings.NewReader(s1))
  196. stream, err := xsql.Language.Parse(parser)
  197. stmt, ok := stream.(*xsql.StreamStmt)
  198. if !ok {
  199. err = fmt.Errorf("Error resolving the stream %s, the data in db may be corrupted.", name)
  200. }
  201. return
  202. }
  203. type RuleProcessor struct {
  204. db common.KeyValue
  205. rootDbDir string
  206. }
  207. func NewRuleProcessor(d string) *RuleProcessor {
  208. processor := &RuleProcessor{
  209. db: common.GetSimpleKVStore(path.Join(d, "rule")),
  210. rootDbDir: d,
  211. }
  212. return processor
  213. }
  214. func (p *RuleProcessor) ExecCreate(name, ruleJson string) (*api.Rule, error) {
  215. rule, err := p.getRuleByJson(name, ruleJson)
  216. if err != nil {
  217. return nil, err
  218. }
  219. err = p.db.Open()
  220. if err != nil {
  221. return nil, err
  222. }
  223. defer p.db.Close()
  224. err = p.db.Set(rule.Id, ruleJson)
  225. if err != nil {
  226. return nil, err
  227. } else {
  228. log.Infof("Rule %s is created.", rule.Id)
  229. }
  230. return rule, nil
  231. }
  232. func (p *RuleProcessor) ExecUpdate(name, ruleJson string) (*api.Rule, error) {
  233. rule, err := p.getRuleByJson(name, ruleJson)
  234. if err != nil {
  235. return nil, err
  236. }
  237. err = p.db.Open()
  238. if err != nil {
  239. return nil, err
  240. }
  241. defer p.db.Close()
  242. err = p.db.Replace(rule.Id, ruleJson)
  243. if err != nil {
  244. return nil, err
  245. } else {
  246. log.Infof("Rule %s is update.", rule.Id)
  247. }
  248. return rule, nil
  249. }
  250. func (p *RuleProcessor) ExecReplaceRuleState(name string, triggered bool) (err error) {
  251. rule, err := p.GetRuleByName(name)
  252. if err != nil {
  253. return err
  254. }
  255. rule.Triggered = triggered
  256. ruleJson, err := json.Marshal(rule)
  257. if err != nil {
  258. return fmt.Errorf("Marshal rule %s error : %s.", name, err)
  259. }
  260. err = p.db.Open()
  261. if err != nil {
  262. return err
  263. }
  264. defer p.db.Close()
  265. err = p.db.Replace(name, string(ruleJson))
  266. if err != nil {
  267. return err
  268. } else {
  269. log.Infof("Rule %s is replaced.", name)
  270. }
  271. return err
  272. }
  273. func (p *RuleProcessor) GetRuleByName(name string) (*api.Rule, error) {
  274. err := p.db.Open()
  275. if err != nil {
  276. return nil, err
  277. }
  278. defer p.db.Close()
  279. s, f := p.db.Get(name)
  280. if !f {
  281. return nil, common.NewErrorWithCode(common.NOT_FOUND, fmt.Sprintf("Rule %s is not found.", name))
  282. }
  283. s1, _ := s.(string)
  284. return p.getRuleByJson(name, s1)
  285. }
  286. func (p *RuleProcessor) getDefaultRule(name, sql string) *api.Rule {
  287. return &api.Rule{
  288. Id: name,
  289. Sql: sql,
  290. Options: &api.RuleOption{
  291. IsEventTime: false,
  292. LateTol: 1000,
  293. Concurrency: 1,
  294. BufferLength: 1024,
  295. SendMetaToSink: false,
  296. Qos: api.AtMostOnce,
  297. CheckpointInterval: 300000,
  298. },
  299. }
  300. }
  301. func getStatementFromSql(sql string) (*xsql.SelectStatement, error) {
  302. parser := xsql.NewParser(strings.NewReader(sql))
  303. if stmt, err := xsql.Language.Parse(parser); err != nil {
  304. return nil, fmt.Errorf("Parse SQL %s error: %s.", sql, err)
  305. } else {
  306. if r, ok := stmt.(*xsql.SelectStatement); !ok {
  307. return nil, fmt.Errorf("SQL %s is not a select statement.", sql)
  308. } else {
  309. return r, nil
  310. }
  311. }
  312. }
  313. func (p *RuleProcessor) getRuleByJson(name, ruleJson string) (*api.Rule, error) {
  314. opt := common.Config.Rule
  315. //set default rule options
  316. rule := &api.Rule{
  317. Options: &opt,
  318. }
  319. if err := json.Unmarshal([]byte(ruleJson), &rule); err != nil {
  320. return nil, fmt.Errorf("Parse rule %s error : %s.", ruleJson, err)
  321. }
  322. //validation
  323. if rule.Id == "" && name == "" {
  324. return nil, fmt.Errorf("Missing rule id.")
  325. }
  326. if name != "" && rule.Id != "" && name != rule.Id {
  327. return nil, fmt.Errorf("Name is not consistent with rule id.")
  328. }
  329. if rule.Id == "" {
  330. rule.Id = name
  331. }
  332. if rule.Sql == "" {
  333. return nil, fmt.Errorf("Missing rule SQL.")
  334. }
  335. if _, err := getStatementFromSql(rule.Sql); err != nil {
  336. return nil, err
  337. }
  338. if rule.Actions == nil || len(rule.Actions) == 0 {
  339. return nil, fmt.Errorf("Missing rule actions.")
  340. }
  341. if rule.Options == nil {
  342. rule.Options = &api.RuleOption{}
  343. }
  344. //Set default options
  345. if rule.Options.CheckpointInterval < 0 {
  346. return nil, fmt.Errorf("rule option checkpointInterval %d is invalid, require a positive integer", rule.Options.CheckpointInterval)
  347. }
  348. if rule.Options.Concurrency < 0 {
  349. return nil, fmt.Errorf("rule option concurrency %d is invalid, require a positive integer", rule.Options.Concurrency)
  350. }
  351. if rule.Options.BufferLength < 0 {
  352. return nil, fmt.Errorf("rule option bufferLength %d is invalid, require a positive integer", rule.Options.BufferLength)
  353. }
  354. if rule.Options.LateTol < 0 {
  355. return nil, fmt.Errorf("rule option lateTolerance %d is invalid, require a positive integer", rule.Options.LateTol)
  356. }
  357. return rule, nil
  358. }
  359. func (p *RuleProcessor) ExecInitRule(rule *api.Rule) (*xstream.TopologyNew, error) {
  360. if tp, inputs, err := p.createTopo(rule); err != nil {
  361. return nil, err
  362. } else {
  363. for i, m := range rule.Actions {
  364. for name, action := range m {
  365. props, ok := action.(map[string]interface{})
  366. if !ok {
  367. return nil, fmt.Errorf("expect map[string]interface{} type for the action properties, but found %v", action)
  368. }
  369. tp.AddSink(inputs, nodes.NewSinkNode(fmt.Sprintf("%s_%d", name, i), name, props))
  370. }
  371. }
  372. return tp, nil
  373. }
  374. }
  375. func (p *RuleProcessor) ExecQuery(ruleid, sql string) (*xstream.TopologyNew, error) {
  376. if tp, inputs, err := p.createTopo(p.getDefaultRule(ruleid, sql)); err != nil {
  377. return nil, err
  378. } else {
  379. tp.AddSink(inputs, nodes.NewSinkNode("sink_memory_log", "logToMemory", nil))
  380. go func() {
  381. select {
  382. case err := <-tp.Open():
  383. log.Infof("closing query for error: %v", err)
  384. tp.GetContext().SetError(err)
  385. tp.Cancel()
  386. }
  387. }()
  388. return tp, nil
  389. }
  390. }
  391. func (p *RuleProcessor) ExecDesc(name string) (string, error) {
  392. err := p.db.Open()
  393. if err != nil {
  394. return "", err
  395. }
  396. defer p.db.Close()
  397. s, f := p.db.Get(name)
  398. if !f {
  399. return "", fmt.Errorf("Rule %s is not found.", name)
  400. }
  401. s1, _ := s.(string)
  402. dst := &bytes.Buffer{}
  403. if err := json.Indent(dst, []byte(s1), "", " "); err != nil {
  404. return "", err
  405. }
  406. return fmt.Sprintln(dst.String()), nil
  407. }
  408. func (p *RuleProcessor) GetAllRules() ([]string, error) {
  409. err := p.db.Open()
  410. if err != nil {
  411. return nil, err
  412. }
  413. defer p.db.Close()
  414. return p.db.Keys()
  415. }
  416. func (p *RuleProcessor) ExecDrop(name string) (string, error) {
  417. err := p.db.Open()
  418. if err != nil {
  419. return "", err
  420. }
  421. defer p.db.Close()
  422. result := fmt.Sprintf("Rule %s is dropped.", name)
  423. if ruleJson, ok := p.db.Get(name); ok {
  424. rule, err := p.getRuleByJson(name, ruleJson.(string))
  425. if err != nil {
  426. return "", err
  427. }
  428. if err := cleanSinkCache(rule); err != nil {
  429. result = fmt.Sprintf("%s. Clean sink cache faile: %s.", result, err)
  430. }
  431. if err := cleanCheckpoint(name); err != nil {
  432. result = fmt.Sprintf("%s. Clean checkpoint cache faile: %s.", result, err)
  433. }
  434. }
  435. err = p.db.Delete(name)
  436. if err != nil {
  437. return "", err
  438. } else {
  439. return result, nil
  440. }
  441. }
  442. func cleanCheckpoint(name string) error {
  443. dbDir, _ := common.GetDataLoc()
  444. c := path.Join(dbDir, "checkpoints", name)
  445. return os.RemoveAll(c)
  446. }
  447. func cleanSinkCache(rule *api.Rule) error {
  448. dbDir, err := common.GetDataLoc()
  449. if err != nil {
  450. return err
  451. }
  452. store := common.GetSimpleKVStore(path.Join(dbDir, "sink"))
  453. err = store.Open()
  454. if err != nil {
  455. return err
  456. }
  457. defer store.Close()
  458. for d, m := range rule.Actions {
  459. con := 1
  460. for name, action := range m {
  461. props, _ := action.(map[string]interface{})
  462. if c, ok := props["concurrency"]; ok {
  463. if t, err := common.ToInt(c); err == nil && t > 0 {
  464. con = t
  465. }
  466. }
  467. for i := 0; i < con; i++ {
  468. key := fmt.Sprintf("%s%s_%d%d", rule.Id, name, d, i)
  469. common.Log.Debugf("delete cache key %s", key)
  470. store.Delete(key)
  471. }
  472. }
  473. }
  474. return nil
  475. }
  476. func (p *RuleProcessor) createTopo(rule *api.Rule) (*xstream.TopologyNew, []api.Emitter, error) {
  477. return p.createTopoWithSources(rule, nil)
  478. }
  479. //For test to mock source
  480. func (p *RuleProcessor) createTopoWithSources(rule *api.Rule, sources []*nodes.SourceNode) (*xstream.TopologyNew, []api.Emitter, error) {
  481. name := rule.Id
  482. sql := rule.Sql
  483. log.Infof("Init rule with options %+v", rule.Options)
  484. shouldCreateSource := sources == nil
  485. if selectStmt, err := getStatementFromSql(sql); err != nil {
  486. return nil, nil, err
  487. } else {
  488. tp, err := xstream.NewWithNameAndQos(name, rule.Options.Qos, rule.Options.CheckpointInterval)
  489. if err != nil {
  490. return nil, nil, err
  491. }
  492. var inputs []api.Emitter
  493. streamsFromStmt := xsql.GetStreams(selectStmt)
  494. dimensions := selectStmt.Dimensions
  495. if !shouldCreateSource && len(streamsFromStmt) != len(sources) {
  496. return nil, nil, fmt.Errorf("Invalid parameter sources or streams, the length cannot match the statement, expect %d sources.", len(streamsFromStmt))
  497. }
  498. if rule.Options.SendMetaToSink && (len(streamsFromStmt) > 1 || dimensions != nil) {
  499. return nil, nil, fmt.Errorf("Invalid option sendMetaToSink, it can not be applied to window")
  500. }
  501. store := common.GetSimpleKVStore(path.Join(p.rootDbDir, "stream"))
  502. err = store.Open()
  503. if err != nil {
  504. return nil, nil, err
  505. }
  506. defer store.Close()
  507. var alias, aggregateAlias xsql.Fields
  508. for _, f := range selectStmt.Fields {
  509. if f.AName != "" {
  510. if !xsql.HasAggFuncs(f.Expr) {
  511. alias = append(alias, f)
  512. } else {
  513. aggregateAlias = append(aggregateAlias, f)
  514. }
  515. }
  516. }
  517. for i, s := range streamsFromStmt {
  518. streamStmt, err := GetStream(store, s)
  519. if err != nil {
  520. return nil, nil, fmt.Errorf("fail to get stream %s, please check if stream is created", s)
  521. }
  522. pp, err := plans.NewPreprocessor(streamStmt, alias, rule.Options.IsEventTime)
  523. if err != nil {
  524. return nil, nil, err
  525. }
  526. var srcNode *nodes.SourceNode
  527. if shouldCreateSource {
  528. node := nodes.NewSourceNode(s, streamStmt.Options)
  529. srcNode = node
  530. } else {
  531. srcNode = sources[i]
  532. }
  533. tp.AddSrc(srcNode)
  534. preprocessorOp := xstream.Transform(pp, "preprocessor_"+s, rule.Options.BufferLength)
  535. preprocessorOp.SetConcurrency(rule.Options.Concurrency)
  536. tp.AddOperator([]api.Emitter{srcNode}, preprocessorOp)
  537. inputs = append(inputs, preprocessorOp)
  538. }
  539. var w *xsql.Window
  540. if dimensions != nil {
  541. w = dimensions.GetWindow()
  542. if w != nil {
  543. if w.Filter != nil {
  544. wfilterOp := xstream.Transform(&plans.FilterPlan{Condition: w.Filter}, "windowFilter", rule.Options.BufferLength)
  545. wfilterOp.SetConcurrency(rule.Options.Concurrency)
  546. tp.AddOperator(inputs, wfilterOp)
  547. inputs = []api.Emitter{wfilterOp}
  548. }
  549. wop, err := nodes.NewWindowOp("window", w, rule.Options.IsEventTime, rule.Options.LateTol, streamsFromStmt, rule.Options.BufferLength)
  550. if err != nil {
  551. return nil, nil, err
  552. }
  553. tp.AddOperator(inputs, wop)
  554. inputs = []api.Emitter{wop}
  555. }
  556. }
  557. if w != nil && selectStmt.Joins != nil {
  558. joinOp := xstream.Transform(&plans.JoinPlan{Joins: selectStmt.Joins, From: selectStmt.Sources[0].(*xsql.Table)}, "join", rule.Options.BufferLength)
  559. joinOp.SetConcurrency(rule.Options.Concurrency)
  560. tp.AddOperator(inputs, joinOp)
  561. inputs = []api.Emitter{joinOp}
  562. }
  563. if selectStmt.Condition != nil {
  564. filterOp := xstream.Transform(&plans.FilterPlan{Condition: selectStmt.Condition}, "filter", rule.Options.BufferLength)
  565. filterOp.SetConcurrency(rule.Options.Concurrency)
  566. tp.AddOperator(inputs, filterOp)
  567. inputs = []api.Emitter{filterOp}
  568. }
  569. var ds xsql.Dimensions
  570. if dimensions != nil || len(aggregateAlias) > 0 {
  571. ds = dimensions.GetGroups()
  572. if (ds != nil && len(ds) > 0) || len(aggregateAlias) > 0 {
  573. aggregateOp := xstream.Transform(&plans.AggregatePlan{Dimensions: ds, Alias: aggregateAlias}, "aggregate", rule.Options.BufferLength)
  574. aggregateOp.SetConcurrency(rule.Options.Concurrency)
  575. tp.AddOperator(inputs, aggregateOp)
  576. inputs = []api.Emitter{aggregateOp}
  577. }
  578. }
  579. if selectStmt.Having != nil {
  580. havingOp := xstream.Transform(&plans.HavingPlan{selectStmt.Having}, "having", rule.Options.BufferLength)
  581. havingOp.SetConcurrency(rule.Options.Concurrency)
  582. tp.AddOperator(inputs, havingOp)
  583. inputs = []api.Emitter{havingOp}
  584. }
  585. if selectStmt.SortFields != nil {
  586. orderOp := xstream.Transform(&plans.OrderPlan{SortFields: selectStmt.SortFields}, "order", rule.Options.BufferLength)
  587. orderOp.SetConcurrency(rule.Options.Concurrency)
  588. tp.AddOperator(inputs, orderOp)
  589. inputs = []api.Emitter{orderOp}
  590. }
  591. if selectStmt.Fields != nil {
  592. projectOp := xstream.Transform(&plans.ProjectPlan{Fields: selectStmt.Fields, IsAggregate: xsql.IsAggStatement(selectStmt), SendMeta: rule.Options.SendMetaToSink}, "project", rule.Options.BufferLength)
  593. projectOp.SetConcurrency(rule.Options.Concurrency)
  594. tp.AddOperator(inputs, projectOp)
  595. inputs = []api.Emitter{projectOp}
  596. }
  597. return tp, inputs, nil
  598. }
  599. }