xsql_processor.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531
  1. package processors
  2. import (
  3. "bytes"
  4. "encoding/json"
  5. "fmt"
  6. "github.com/emqx/kuiper/common"
  7. "github.com/emqx/kuiper/xsql"
  8. "github.com/emqx/kuiper/xsql/plans"
  9. "github.com/emqx/kuiper/xstream"
  10. "github.com/emqx/kuiper/xstream/api"
  11. "github.com/emqx/kuiper/xstream/nodes"
  12. "path"
  13. "strings"
  14. )
  15. var log = common.Log
  16. type StreamProcessor struct {
  17. db common.KeyValue
  18. }
  19. //@params d : the directory of the DB to save the stream info
  20. func NewStreamProcessor(d string) *StreamProcessor {
  21. processor := &StreamProcessor{
  22. db: common.GetSimpleKVStore(d),
  23. }
  24. return processor
  25. }
  26. func (p *StreamProcessor) ExecStmt(statement string) (result []string, err error) {
  27. parser := xsql.NewParser(strings.NewReader(statement))
  28. stmt, err := xsql.Language.Parse(parser)
  29. if err != nil {
  30. return nil, err
  31. }
  32. switch s := stmt.(type) {
  33. case *xsql.StreamStmt:
  34. var r string
  35. r, err = p.execCreateStream(s, statement)
  36. result = append(result, r)
  37. case *xsql.ShowStreamsStatement:
  38. result, err = p.execShowStream(s)
  39. case *xsql.DescribeStreamStatement:
  40. var r string
  41. r, err = p.execDescribeStream(s)
  42. result = append(result, r)
  43. case *xsql.ExplainStreamStatement:
  44. var r string
  45. r, err = p.execExplainStream(s)
  46. result = append(result, r)
  47. case *xsql.DropStreamStatement:
  48. var r string
  49. r, err = p.execDropStream(s)
  50. result = append(result, r)
  51. default:
  52. return nil, fmt.Errorf("Invalid stream statement: %s", statement)
  53. }
  54. return
  55. }
  56. func (p *StreamProcessor) execCreateStream(stmt *xsql.StreamStmt, statement string) (string, error) {
  57. err := p.db.Open()
  58. if err != nil {
  59. return "", fmt.Errorf("Create stream fails, error when opening db: %v.", err)
  60. }
  61. defer p.db.Close()
  62. err = p.db.Set(string(stmt.Name), statement)
  63. if err != nil {
  64. return "", fmt.Errorf("Create stream fails: %v.", err)
  65. } else {
  66. info := fmt.Sprintf("Stream %s is created.", stmt.Name)
  67. log.Printf("%s", info)
  68. return info, nil
  69. }
  70. }
  71. func (p *StreamProcessor) ExecStreamSql(statement string) (string, error) {
  72. r, err := p.ExecStmt(statement)
  73. if err != nil {
  74. return "", err
  75. } else {
  76. return strings.Join(r, "\n"), err
  77. }
  78. }
  79. func (p *StreamProcessor) execShowStream(stmt *xsql.ShowStreamsStatement) ([]string, error) {
  80. keys, err := p.ShowStream()
  81. if len(keys) == 0 {
  82. keys = append(keys, "No stream definitions are found.")
  83. }
  84. return keys, err
  85. }
  86. func (p *StreamProcessor) ShowStream() ([]string, error) {
  87. err := p.db.Open()
  88. if err != nil {
  89. return nil, fmt.Errorf("Show stream fails, error when opening db: %v.", err)
  90. }
  91. defer p.db.Close()
  92. return p.db.Keys()
  93. }
  94. func (p *StreamProcessor) execDescribeStream(stmt *xsql.DescribeStreamStatement) (string, error) {
  95. streamStmt, err := p.DescStream(stmt.Name)
  96. if err != nil {
  97. return "", err
  98. }
  99. var buff bytes.Buffer
  100. buff.WriteString("Fields\n--------------------------------------------------------------------------------\n")
  101. for _, f := range streamStmt.StreamFields {
  102. buff.WriteString(f.Name + "\t")
  103. buff.WriteString(xsql.PrintFieldType(f.FieldType))
  104. buff.WriteString("\n")
  105. }
  106. buff.WriteString("\n")
  107. common.PrintMap(streamStmt.Options, &buff)
  108. return buff.String(), err
  109. }
  110. func (p *StreamProcessor) DescStream(name string) (*xsql.StreamStmt, error) {
  111. err := p.db.Open()
  112. if err != nil {
  113. return nil, fmt.Errorf("Describe stream fails, error when opening db: %v.", err)
  114. }
  115. defer p.db.Close()
  116. s, f := p.db.Get(name)
  117. if !f {
  118. return nil, common.NewErrorWithCode(common.NOT_FOUND, fmt.Sprintf("Stream %s is not found.", name))
  119. }
  120. s1 := s.(string)
  121. parser := xsql.NewParser(strings.NewReader(s1))
  122. stream, err := xsql.Language.Parse(parser)
  123. if err != nil {
  124. return nil, err
  125. }
  126. streamStmt, ok := stream.(*xsql.StreamStmt)
  127. if !ok {
  128. return nil, fmt.Errorf("Error resolving the stream %s, the data in db may be corrupted.", name)
  129. }
  130. return streamStmt, nil
  131. }
  132. func (p *StreamProcessor) execExplainStream(stmt *xsql.ExplainStreamStatement) (string, error) {
  133. err := p.db.Open()
  134. if err != nil {
  135. return "", fmt.Errorf("Explain stream fails, error when opening db: %v.", err)
  136. }
  137. defer p.db.Close()
  138. _, f := p.db.Get(stmt.Name)
  139. if !f {
  140. return "", fmt.Errorf("Stream %s is not found.", stmt.Name)
  141. }
  142. return "TO BE SUPPORTED", nil
  143. }
  144. func (p *StreamProcessor) execDropStream(stmt *xsql.DropStreamStatement) (string, error) {
  145. s, err := p.DropStream(stmt.Name)
  146. if err != nil {
  147. return s, fmt.Errorf("Drop stream fails: %s.", err)
  148. }
  149. return s, nil
  150. }
  151. func (p *StreamProcessor) DropStream(name string) (string, error) {
  152. err := p.db.Open()
  153. if err != nil {
  154. return "", fmt.Errorf("error when opening db: %v", err)
  155. }
  156. defer p.db.Close()
  157. err = p.db.Delete(name)
  158. if err != nil {
  159. return "", err
  160. } else {
  161. return fmt.Sprintf("Stream %s is dropped.", name), nil
  162. }
  163. }
  164. func GetStream(m *common.SimpleKVStore, name string) (stmt *xsql.StreamStmt, err error) {
  165. s, f := m.Get(name)
  166. if !f {
  167. return nil, fmt.Errorf("Cannot find key %s. ", name)
  168. }
  169. s1, _ := s.(string)
  170. parser := xsql.NewParser(strings.NewReader(s1))
  171. stream, err := xsql.Language.Parse(parser)
  172. stmt, ok := stream.(*xsql.StreamStmt)
  173. if !ok {
  174. err = fmt.Errorf("Error resolving the stream %s, the data in db may be corrupted.", name)
  175. }
  176. return
  177. }
  178. type RuleProcessor struct {
  179. db common.KeyValue
  180. rootDbDir string
  181. }
  182. func NewRuleProcessor(d string) *RuleProcessor {
  183. processor := &RuleProcessor{
  184. db: common.GetSimpleKVStore(path.Join(d, "rule")),
  185. rootDbDir: d,
  186. }
  187. return processor
  188. }
  189. func (p *RuleProcessor) ExecCreate(name, ruleJson string) (*api.Rule, error) {
  190. rule, err := p.getRuleByJson(name, ruleJson)
  191. if err != nil {
  192. return nil, err
  193. }
  194. err = p.db.Open()
  195. if err != nil {
  196. return nil, err
  197. }
  198. defer p.db.Close()
  199. err = p.db.Set(rule.Id, ruleJson)
  200. if err != nil {
  201. return nil, err
  202. } else {
  203. log.Infof("Rule %s is created.", rule.Id)
  204. }
  205. return rule, nil
  206. }
  207. func (p *RuleProcessor) ExecReplaceRuleState(name string, triggered bool) (err error) {
  208. rule, err := p.GetRuleByName(name)
  209. if err != nil {
  210. return err
  211. }
  212. rule.Triggered = triggered
  213. ruleJson, err := json.Marshal(rule)
  214. if err != nil {
  215. return fmt.Errorf("Marshal rule %s error : %s.", name, err)
  216. }
  217. err = p.db.Open()
  218. if err != nil {
  219. return err
  220. }
  221. defer p.db.Close()
  222. err = p.db.Replace(name, string(ruleJson))
  223. if err != nil {
  224. return err
  225. } else {
  226. log.Infof("Rule %s is replaced.", name)
  227. }
  228. return err
  229. }
  230. func (p *RuleProcessor) GetRuleByName(name string) (*api.Rule, error) {
  231. err := p.db.Open()
  232. if err != nil {
  233. return nil, err
  234. }
  235. defer p.db.Close()
  236. s, f := p.db.Get(name)
  237. if !f {
  238. return nil, common.NewErrorWithCode(common.NOT_FOUND, fmt.Sprintf("Rule %s is not found.", name))
  239. }
  240. s1, _ := s.(string)
  241. return p.getRuleByJson(name, s1)
  242. }
  243. func (p *RuleProcessor) getRuleByJson(name, ruleJson string) (*api.Rule, error) {
  244. //set default rule options
  245. rule := &api.Rule{
  246. Options: &api.RuleOption{
  247. LateTol: 1000,
  248. Concurrency: 1,
  249. BufferLength: 1024,
  250. CheckpointInterval: 300000, //5 minutes
  251. },
  252. }
  253. if err := json.Unmarshal([]byte(ruleJson), &rule); err != nil {
  254. return nil, fmt.Errorf("Parse rule %s error : %s.", ruleJson, err)
  255. }
  256. //validation
  257. if rule.Id == "" && name == "" {
  258. return nil, fmt.Errorf("Missing rule id.")
  259. }
  260. if name != "" && rule.Id != "" && name != rule.Id {
  261. return nil, fmt.Errorf("Name is not consistent with rule id.")
  262. }
  263. if rule.Id == "" {
  264. rule.Id = name
  265. }
  266. if rule.Sql == "" {
  267. return nil, fmt.Errorf("Missing rule SQL.")
  268. }
  269. if rule.Actions == nil || len(rule.Actions) == 0 {
  270. return nil, fmt.Errorf("Missing rule actions.")
  271. }
  272. if rule.Options == nil {
  273. rule.Options = &api.RuleOption{}
  274. }
  275. //Set default options
  276. if rule.Options.CheckpointInterval < 0 {
  277. return nil, fmt.Errorf("rule option checkpointInterval %d is invalid, require a positive integer", rule.Options.CheckpointInterval)
  278. }
  279. if rule.Options.Concurrency < 0 {
  280. return nil, fmt.Errorf("rule option concurrency %d is invalid, require a positive integer", rule.Options.Concurrency)
  281. }
  282. if rule.Options.BufferLength < 0 {
  283. return nil, fmt.Errorf("rule option bufferLength %d is invalid, require a positive integer", rule.Options.BufferLength)
  284. }
  285. if rule.Options.LateTol < 0 {
  286. return nil, fmt.Errorf("rule option lateTolerance %d is invalid, require a positive integer", rule.Options.LateTol)
  287. }
  288. return rule, nil
  289. }
  290. func (p *RuleProcessor) ExecInitRule(rule *api.Rule) (*xstream.TopologyNew, error) {
  291. if tp, inputs, err := p.createTopo(rule); err != nil {
  292. return nil, err
  293. } else {
  294. for i, m := range rule.Actions {
  295. for name, action := range m {
  296. props, ok := action.(map[string]interface{})
  297. if !ok {
  298. return nil, fmt.Errorf("expect map[string]interface{} type for the action properties, but found %v", action)
  299. }
  300. tp.AddSink(inputs, nodes.NewSinkNode(fmt.Sprintf("%s_%d", name, i), name, props))
  301. }
  302. }
  303. return tp, nil
  304. }
  305. }
  306. func (p *RuleProcessor) ExecQuery(ruleid, sql string) (*xstream.TopologyNew, error) {
  307. if tp, inputs, err := p.createTopo(&api.Rule{Id: ruleid, Sql: sql}); err != nil {
  308. return nil, err
  309. } else {
  310. tp.AddSink(inputs, nodes.NewSinkNode("sink_memory_log", "logToMemory", nil))
  311. go func() {
  312. select {
  313. case err := <-tp.Open():
  314. log.Infof("closing query for error: %v", err)
  315. tp.GetContext().SetError(err)
  316. tp.Cancel()
  317. }
  318. }()
  319. return tp, nil
  320. }
  321. }
  322. func (p *RuleProcessor) ExecDesc(name string) (string, error) {
  323. err := p.db.Open()
  324. if err != nil {
  325. return "", err
  326. }
  327. defer p.db.Close()
  328. s, f := p.db.Get(name)
  329. if !f {
  330. return "", fmt.Errorf("Rule %s is not found.", name)
  331. }
  332. s1, _ := s.(string)
  333. dst := &bytes.Buffer{}
  334. if err := json.Indent(dst, []byte(s1), "", " "); err != nil {
  335. return "", err
  336. }
  337. return fmt.Sprintln(dst.String()), nil
  338. }
  339. func (p *RuleProcessor) GetAllRules() ([]string, error) {
  340. err := p.db.Open()
  341. if err != nil {
  342. return nil, err
  343. }
  344. defer p.db.Close()
  345. return p.db.Keys()
  346. }
  347. func (p *RuleProcessor) ExecDrop(name string) (string, error) {
  348. err := p.db.Open()
  349. if err != nil {
  350. return "", err
  351. }
  352. defer p.db.Close()
  353. err = p.db.Delete(name)
  354. if err != nil {
  355. return "", err
  356. } else {
  357. return fmt.Sprintf("Rule %s is dropped.", name), nil
  358. }
  359. }
  360. func (p *RuleProcessor) createTopo(rule *api.Rule) (*xstream.TopologyNew, []api.Emitter, error) {
  361. return p.createTopoWithSources(rule, nil)
  362. }
  363. //For test to mock source
  364. func (p *RuleProcessor) createTopoWithSources(rule *api.Rule, sources []*nodes.SourceNode) (*xstream.TopologyNew, []api.Emitter, error) {
  365. name := rule.Id
  366. sql := rule.Sql
  367. log.Infof("Init rule with options %+v", rule.Options)
  368. shouldCreateSource := sources == nil
  369. parser := xsql.NewParser(strings.NewReader(sql))
  370. if stmt, err := xsql.Language.Parse(parser); err != nil {
  371. return nil, nil, fmt.Errorf("Parse SQL %s error: %s.", sql, err)
  372. } else {
  373. if selectStmt, ok := stmt.(*xsql.SelectStatement); !ok {
  374. return nil, nil, fmt.Errorf("SQL %s is not a select statement.", sql)
  375. } else {
  376. tp, err := xstream.NewWithNameAndQos(name, rule.Options.Qos, rule.Options.CheckpointInterval)
  377. if err != nil {
  378. return nil, nil, err
  379. }
  380. var inputs []api.Emitter
  381. streamsFromStmt := xsql.GetStreams(selectStmt)
  382. dimensions := selectStmt.Dimensions
  383. if !shouldCreateSource && len(streamsFromStmt) != len(sources) {
  384. return nil, nil, fmt.Errorf("Invalid parameter sources or streams, the length cannot match the statement, expect %d sources.", len(streamsFromStmt))
  385. }
  386. if rule.Options.SendMetaToSink && (len(streamsFromStmt) > 1 || dimensions != nil) {
  387. return nil, nil, fmt.Errorf("Invalid option sendMetaToSink, it can not be applied to window")
  388. }
  389. store := common.GetSimpleKVStore(path.Join(p.rootDbDir, "stream"))
  390. err = store.Open()
  391. if err != nil {
  392. return nil, nil, err
  393. }
  394. defer store.Close()
  395. var alias, aggregateAlias xsql.Fields
  396. for _, f := range selectStmt.Fields {
  397. if f.AName != "" {
  398. if !xsql.HasAggFuncs(f.Expr) {
  399. alias = append(alias, f)
  400. } else {
  401. aggregateAlias = append(aggregateAlias, f)
  402. }
  403. }
  404. }
  405. for i, s := range streamsFromStmt {
  406. streamStmt, err := GetStream(store, s)
  407. if err != nil {
  408. return nil, nil, fmt.Errorf("fail to get stream %s, please check if stream is created", s)
  409. }
  410. pp, err := plans.NewPreprocessor(streamStmt, alias, rule.Options.IsEventTime)
  411. if err != nil {
  412. return nil, nil, err
  413. }
  414. if shouldCreateSource {
  415. node := nodes.NewSourceNode(s, streamStmt.Options)
  416. tp.AddSrc(node)
  417. preprocessorOp := xstream.Transform(pp, "preprocessor_"+s, rule.Options.BufferLength)
  418. preprocessorOp.SetConcurrency(rule.Options.Concurrency)
  419. tp.AddOperator([]api.Emitter{node}, preprocessorOp)
  420. inputs = append(inputs, preprocessorOp)
  421. } else {
  422. tp.AddSrc(sources[i])
  423. preprocessorOp := xstream.Transform(pp, "preprocessor_"+s, rule.Options.BufferLength)
  424. preprocessorOp.SetConcurrency(rule.Options.Concurrency)
  425. tp.AddOperator([]api.Emitter{sources[i]}, preprocessorOp)
  426. inputs = append(inputs, preprocessorOp)
  427. }
  428. }
  429. var w *xsql.Window
  430. if dimensions != nil {
  431. w = dimensions.GetWindow()
  432. if w != nil {
  433. wop, err := nodes.NewWindowOp("window", w, rule.Options.IsEventTime, rule.Options.LateTol, streamsFromStmt, rule.Options.BufferLength)
  434. if err != nil {
  435. return nil, nil, err
  436. }
  437. tp.AddOperator(inputs, wop)
  438. inputs = []api.Emitter{wop}
  439. }
  440. }
  441. if w != nil && selectStmt.Joins != nil {
  442. joinOp := xstream.Transform(&plans.JoinPlan{Joins: selectStmt.Joins, From: selectStmt.Sources[0].(*xsql.Table)}, "join", rule.Options.BufferLength)
  443. joinOp.SetConcurrency(rule.Options.Concurrency)
  444. tp.AddOperator(inputs, joinOp)
  445. inputs = []api.Emitter{joinOp}
  446. }
  447. if selectStmt.Condition != nil {
  448. filterOp := xstream.Transform(&plans.FilterPlan{Condition: selectStmt.Condition}, "filter", rule.Options.BufferLength)
  449. filterOp.SetConcurrency(rule.Options.Concurrency)
  450. tp.AddOperator(inputs, filterOp)
  451. inputs = []api.Emitter{filterOp}
  452. }
  453. var ds xsql.Dimensions
  454. if dimensions != nil || len(aggregateAlias) > 0 {
  455. ds = dimensions.GetGroups()
  456. if (ds != nil && len(ds) > 0) || len(aggregateAlias) > 0 {
  457. aggregateOp := xstream.Transform(&plans.AggregatePlan{Dimensions: ds, Alias: aggregateAlias}, "aggregate", rule.Options.BufferLength)
  458. aggregateOp.SetConcurrency(rule.Options.Concurrency)
  459. tp.AddOperator(inputs, aggregateOp)
  460. inputs = []api.Emitter{aggregateOp}
  461. }
  462. }
  463. if selectStmt.Having != nil {
  464. havingOp := xstream.Transform(&plans.HavingPlan{selectStmt.Having}, "having", rule.Options.BufferLength)
  465. havingOp.SetConcurrency(rule.Options.Concurrency)
  466. tp.AddOperator(inputs, havingOp)
  467. inputs = []api.Emitter{havingOp}
  468. }
  469. if selectStmt.SortFields != nil {
  470. orderOp := xstream.Transform(&plans.OrderPlan{SortFields: selectStmt.SortFields}, "order", rule.Options.BufferLength)
  471. orderOp.SetConcurrency(rule.Options.Concurrency)
  472. tp.AddOperator(inputs, orderOp)
  473. inputs = []api.Emitter{orderOp}
  474. }
  475. if selectStmt.Fields != nil {
  476. projectOp := xstream.Transform(&plans.ProjectPlan{Fields: selectStmt.Fields, IsAggregate: xsql.IsAggStatement(selectStmt), SendMeta: rule.Options.SendMetaToSink}, "project", rule.Options.BufferLength)
  477. projectOp.SetConcurrency(rule.Options.Concurrency)
  478. tp.AddOperator(inputs, projectOp)
  479. inputs = []api.Emitter{projectOp}
  480. }
  481. return tp, inputs, nil
  482. }
  483. }
  484. }