xsql_processor.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521
  1. package processors
  2. import (
  3. "bytes"
  4. "encoding/json"
  5. "fmt"
  6. "github.com/emqx/kuiper/common"
  7. "github.com/emqx/kuiper/xsql"
  8. "github.com/emqx/kuiper/xsql/plans"
  9. "github.com/emqx/kuiper/xstream"
  10. "github.com/emqx/kuiper/xstream/api"
  11. "github.com/emqx/kuiper/xstream/nodes"
  12. "github.com/emqx/kuiper/xstream/operators"
  13. "path"
  14. "strings"
  15. )
  16. var log = common.Log
  17. type StreamProcessor struct {
  18. db common.KeyValue
  19. }
  20. //@params d : the directory of the DB to save the stream info
  21. func NewStreamProcessor(d string) *StreamProcessor {
  22. processor := &StreamProcessor{
  23. db: common.GetSimpleKVStore(d),
  24. }
  25. return processor
  26. }
  27. func (p *StreamProcessor) ExecStmt(statement string) (result []string, err error) {
  28. parser := xsql.NewParser(strings.NewReader(statement))
  29. stmt, err := xsql.Language.Parse(parser)
  30. if err != nil {
  31. return nil, err
  32. }
  33. switch s := stmt.(type) {
  34. case *xsql.StreamStmt:
  35. var r string
  36. r, err = p.execCreateStream(s, statement)
  37. result = append(result, r)
  38. case *xsql.ShowStreamsStatement:
  39. result, err = p.execShowStream(s)
  40. case *xsql.DescribeStreamStatement:
  41. var r string
  42. r, err = p.execDescribeStream(s)
  43. result = append(result, r)
  44. case *xsql.ExplainStreamStatement:
  45. var r string
  46. r, err = p.execExplainStream(s)
  47. result = append(result, r)
  48. case *xsql.DropStreamStatement:
  49. var r string
  50. r, err = p.execDropStream(s)
  51. result = append(result, r)
  52. default:
  53. return nil, fmt.Errorf("Invalid stream statement: %s", statement)
  54. }
  55. return
  56. }
  57. func (p *StreamProcessor) execCreateStream(stmt *xsql.StreamStmt, statement string) (string, error) {
  58. err := p.db.Open()
  59. if err != nil {
  60. return "", fmt.Errorf("Create stream fails, error when opening db: %v.", err)
  61. }
  62. defer p.db.Close()
  63. err = p.db.Set(string(stmt.Name), statement)
  64. if err != nil {
  65. return "", fmt.Errorf("Create stream fails: %v.", err)
  66. } else {
  67. info := fmt.Sprintf("Stream %s is created.", stmt.Name)
  68. log.Printf("%s", info)
  69. return info, nil
  70. }
  71. }
  72. func (p *StreamProcessor) ExecStreamSql(statement string) (string, error) {
  73. r, err := p.ExecStmt(statement)
  74. if err != nil {
  75. return "", err
  76. } else {
  77. return strings.Join(r, "\n"), err
  78. }
  79. }
  80. func (p *StreamProcessor) execShowStream(stmt *xsql.ShowStreamsStatement) ([]string, error) {
  81. keys, err := p.ShowStream()
  82. if len(keys) == 0 {
  83. keys = append(keys, "No stream definitions are found.")
  84. }
  85. return keys, err
  86. }
  87. func (p *StreamProcessor) ShowStream() ([]string, error) {
  88. err := p.db.Open()
  89. if err != nil {
  90. return nil, fmt.Errorf("Show stream fails, error when opening db: %v.", err)
  91. }
  92. defer p.db.Close()
  93. return p.db.Keys()
  94. }
  95. func (p *StreamProcessor) execDescribeStream(stmt *xsql.DescribeStreamStatement) (string, error) {
  96. streamStmt, err := p.DescStream(stmt.Name)
  97. if err != nil {
  98. return "", err
  99. }
  100. var buff bytes.Buffer
  101. buff.WriteString("Fields\n--------------------------------------------------------------------------------\n")
  102. for _, f := range streamStmt.StreamFields {
  103. buff.WriteString(f.Name + "\t")
  104. buff.WriteString(xsql.PrintFieldType(f.FieldType))
  105. buff.WriteString("\n")
  106. }
  107. buff.WriteString("\n")
  108. common.PrintMap(streamStmt.Options, &buff)
  109. return buff.String(), err
  110. }
  111. func (p *StreamProcessor) DescStream(name string) (*xsql.StreamStmt, error) {
  112. err := p.db.Open()
  113. if err != nil {
  114. return nil, fmt.Errorf("Describe stream fails, error when opening db: %v.", err)
  115. }
  116. defer p.db.Close()
  117. s, f := p.db.Get(name)
  118. if !f {
  119. return nil, common.NewErrorWithCode(common.NOT_FOUND, fmt.Sprintf("Stream %s is not found.", name))
  120. }
  121. s1 := s.(string)
  122. parser := xsql.NewParser(strings.NewReader(s1))
  123. stream, err := xsql.Language.Parse(parser)
  124. if err != nil {
  125. return nil, err
  126. }
  127. streamStmt, ok := stream.(*xsql.StreamStmt)
  128. if !ok {
  129. return nil, fmt.Errorf("Error resolving the stream %s, the data in db may be corrupted.", name)
  130. }
  131. return streamStmt, nil
  132. }
  133. func (p *StreamProcessor) execExplainStream(stmt *xsql.ExplainStreamStatement) (string, error) {
  134. err := p.db.Open()
  135. if err != nil {
  136. return "", fmt.Errorf("Explain stream fails, error when opening db: %v.", err)
  137. }
  138. defer p.db.Close()
  139. _, f := p.db.Get(stmt.Name)
  140. if !f {
  141. return "", fmt.Errorf("Stream %s is not found.", stmt.Name)
  142. }
  143. return "TO BE SUPPORTED", nil
  144. }
  145. func (p *StreamProcessor) execDropStream(stmt *xsql.DropStreamStatement) (string, error) {
  146. s, err := p.DropStream(stmt.Name)
  147. if err != nil {
  148. return s, fmt.Errorf("Drop stream fails: %s.", err)
  149. }
  150. return s, nil
  151. }
  152. func (p *StreamProcessor) DropStream(name string) (string, error) {
  153. err := p.db.Open()
  154. if err != nil {
  155. return "", fmt.Errorf("error when opening db: %v", err)
  156. }
  157. defer p.db.Close()
  158. err = p.db.Delete(name)
  159. if err != nil {
  160. return "", err
  161. } else {
  162. return fmt.Sprintf("Stream %s is dropped.", name), nil
  163. }
  164. }
  165. func GetStream(m *common.SimpleKVStore, name string) (stmt *xsql.StreamStmt, err error) {
  166. s, f := m.Get(name)
  167. if !f {
  168. return nil, fmt.Errorf("Cannot find key %s. ", name)
  169. }
  170. s1, _ := s.(string)
  171. parser := xsql.NewParser(strings.NewReader(s1))
  172. stream, err := xsql.Language.Parse(parser)
  173. stmt, ok := stream.(*xsql.StreamStmt)
  174. if !ok {
  175. err = fmt.Errorf("Error resolving the stream %s, the data in db may be corrupted.", name)
  176. }
  177. return
  178. }
  179. type RuleProcessor struct {
  180. db common.KeyValue
  181. rootDbDir string
  182. }
  183. func NewRuleProcessor(d string) *RuleProcessor {
  184. processor := &RuleProcessor{
  185. db: common.GetSimpleKVStore(path.Join(d, "rule")),
  186. rootDbDir: d,
  187. }
  188. return processor
  189. }
  190. func (p *RuleProcessor) ExecCreate(name, ruleJson string) (*api.Rule, error) {
  191. rule, err := p.getRuleByJson(name, ruleJson)
  192. if err != nil {
  193. return nil, err
  194. }
  195. err = p.db.Open()
  196. if err != nil {
  197. return nil, err
  198. }
  199. defer p.db.Close()
  200. err = p.db.Set(rule.Id, ruleJson)
  201. if err != nil {
  202. return nil, err
  203. } else {
  204. log.Infof("Rule %s is created.", rule.Id)
  205. }
  206. return rule, nil
  207. }
  208. func (p *RuleProcessor) GetRuleByName(name string) (*api.Rule, error) {
  209. err := p.db.Open()
  210. if err != nil {
  211. return nil, err
  212. }
  213. defer p.db.Close()
  214. s, f := p.db.Get(name)
  215. if !f {
  216. return nil, common.NewErrorWithCode(common.NOT_FOUND, fmt.Sprintf("Rule %s is not found.", name))
  217. }
  218. s1, _ := s.(string)
  219. return p.getRuleByJson(name, s1)
  220. }
  221. func (p *RuleProcessor) getRuleByJson(name, ruleJson string) (*api.Rule, error) {
  222. var rule api.Rule
  223. if err := json.Unmarshal([]byte(ruleJson), &rule); err != nil {
  224. return nil, fmt.Errorf("Parse rule %s error : %s.", ruleJson, err)
  225. }
  226. //validation
  227. if rule.Id == "" && name == "" {
  228. return nil, fmt.Errorf("Missing rule id.")
  229. }
  230. if name != "" && rule.Id != "" && name != rule.Id {
  231. return nil, fmt.Errorf("Name is not consistent with rule id.")
  232. }
  233. if rule.Id == "" {
  234. rule.Id = name
  235. }
  236. if rule.Sql == "" {
  237. return nil, fmt.Errorf("Missing rule SQL.")
  238. }
  239. if rule.Actions == nil || len(rule.Actions) == 0 {
  240. return nil, fmt.Errorf("Missing rule actions.")
  241. }
  242. return &rule, nil
  243. }
  244. func (p *RuleProcessor) ExecInitRule(rule *api.Rule) (*xstream.TopologyNew, error) {
  245. if tp, inputs, err := p.createTopo(rule); err != nil {
  246. return nil, err
  247. } else {
  248. for i, m := range rule.Actions {
  249. for name, action := range m {
  250. props, ok := action.(map[string]interface{})
  251. if !ok {
  252. return nil, fmt.Errorf("expect map[string]interface{} type for the action properties, but found %v", action)
  253. }
  254. tp.AddSink(inputs, nodes.NewSinkNode(fmt.Sprintf("%s_%d", name, i), name, props))
  255. }
  256. }
  257. return tp, nil
  258. }
  259. }
  260. func (p *RuleProcessor) ExecQuery(ruleid, sql string) (*xstream.TopologyNew, error) {
  261. if tp, inputs, err := p.createTopo(&api.Rule{Id: ruleid, Sql: sql}); err != nil {
  262. return nil, err
  263. } else {
  264. tp.AddSink(inputs, nodes.NewSinkNode("sink_memory_log", "logToMemory", nil))
  265. go func() {
  266. select {
  267. case err := <-tp.Open():
  268. log.Infof("closing query for error: %v", err)
  269. tp.GetContext().SetError(err)
  270. tp.Cancel()
  271. }
  272. }()
  273. return tp, nil
  274. }
  275. }
  276. func (p *RuleProcessor) ExecDesc(name string) (string, error) {
  277. err := p.db.Open()
  278. if err != nil {
  279. return "", err
  280. }
  281. defer p.db.Close()
  282. s, f := p.db.Get(name)
  283. if !f {
  284. return "", fmt.Errorf("Rule %s is not found.", name)
  285. }
  286. s1, _ := s.(string)
  287. dst := &bytes.Buffer{}
  288. if err := json.Indent(dst, []byte(s1), "", " "); err != nil {
  289. return "", err
  290. }
  291. return fmt.Sprintln(dst.String()), nil
  292. }
  293. func (p *RuleProcessor) GetAllRules() ([]string, error) {
  294. err := p.db.Open()
  295. if err != nil {
  296. return nil, err
  297. }
  298. defer p.db.Close()
  299. return p.db.Keys()
  300. }
  301. func (p *RuleProcessor) ExecDrop(name string) (string, error) {
  302. err := p.db.Open()
  303. if err != nil {
  304. return "", err
  305. }
  306. defer p.db.Close()
  307. err = p.db.Delete(name)
  308. if err != nil {
  309. return "", err
  310. } else {
  311. return fmt.Sprintf("Rule %s is dropped.", name), nil
  312. }
  313. }
  314. func (p *RuleProcessor) createTopo(rule *api.Rule) (*xstream.TopologyNew, []api.Emitter, error) {
  315. return p.createTopoWithSources(rule, nil)
  316. }
  317. //For test to mock source
  318. func (p *RuleProcessor) createTopoWithSources(rule *api.Rule, sources []*nodes.SourceNode) (*xstream.TopologyNew, []api.Emitter, error) {
  319. name := rule.Id
  320. sql := rule.Sql
  321. var (
  322. isEventTime bool
  323. lateTol int64
  324. concurrency = 1
  325. bufferLength = 1024
  326. sendMataToSink = false
  327. )
  328. if iet, ok := rule.Options["isEventTime"]; ok {
  329. isEventTime, ok = iet.(bool)
  330. if !ok {
  331. return nil, nil, fmt.Errorf("Invalid rule option isEventTime %v, bool type is required.", iet)
  332. }
  333. }
  334. if isEventTime {
  335. if l, ok := rule.Options["lateTolerance"]; ok {
  336. if fl, ok := l.(float64); ok {
  337. lateTol = int64(fl)
  338. } else {
  339. return nil, nil, fmt.Errorf("Invalid rule option lateTolerance %v, int type is required.", l)
  340. }
  341. }
  342. }
  343. if l, ok := rule.Options["concurrency"]; ok {
  344. if fl, ok := l.(float64); ok {
  345. concurrency = int(fl)
  346. } else {
  347. return nil, nil, fmt.Errorf("Invalid rule option concurrency %v, int type is required.", l)
  348. }
  349. }
  350. if l, ok := rule.Options["bufferLength"]; ok {
  351. if fl, ok := l.(float64); ok {
  352. bufferLength = int(fl)
  353. } else {
  354. return nil, nil, fmt.Errorf("Invalid rule option bufferLength %v, int type is required.", l)
  355. }
  356. }
  357. if l, ok := rule.Options["sendMetaToSink"]; ok {
  358. if fl, ok := l.(bool); ok {
  359. sendMataToSink = fl
  360. } else {
  361. return nil, nil, fmt.Errorf("Invalid rule option sendMetaToSink %v, bool type is required.", l)
  362. }
  363. }
  364. log.Infof("Init rule with options {isEventTime: %v, lateTolerance: %d, concurrency: %d, bufferLength: %d", isEventTime, lateTol, concurrency, bufferLength)
  365. shouldCreateSource := sources == nil
  366. parser := xsql.NewParser(strings.NewReader(sql))
  367. if stmt, err := xsql.Language.Parse(parser); err != nil {
  368. return nil, nil, fmt.Errorf("Parse SQL %s error: %s.", sql, err)
  369. } else {
  370. if selectStmt, ok := stmt.(*xsql.SelectStatement); !ok {
  371. return nil, nil, fmt.Errorf("SQL %s is not a select statement.", sql)
  372. } else {
  373. tp := xstream.NewWithName(name)
  374. var inputs []api.Emitter
  375. streamsFromStmt := xsql.GetStreams(selectStmt)
  376. dimensions := selectStmt.Dimensions
  377. if !shouldCreateSource && len(streamsFromStmt) != len(sources) {
  378. return nil, nil, fmt.Errorf("Invalid parameter sources or streams, the length cannot match the statement, expect %d sources.", len(streamsFromStmt))
  379. }
  380. if sendMataToSink && (len(streamsFromStmt) > 1 || dimensions != nil) {
  381. return nil, nil, fmt.Errorf("Invalid option sendMetaToSink, it can not be applied to window")
  382. }
  383. store := common.GetSimpleKVStore(path.Join(p.rootDbDir, "stream"))
  384. err := store.Open()
  385. if err != nil {
  386. return nil, nil, err
  387. }
  388. defer store.Close()
  389. var alias, aggregateAlias xsql.Fields
  390. for _, f := range selectStmt.Fields {
  391. if f.AName != "" {
  392. if !xsql.HasAggFuncs(f.Expr) {
  393. alias = append(alias, f)
  394. } else {
  395. aggregateAlias = append(aggregateAlias, f)
  396. }
  397. }
  398. }
  399. for i, s := range streamsFromStmt {
  400. streamStmt, err := GetStream(store, s)
  401. if err != nil {
  402. return nil, nil, fmt.Errorf("fail to get stream %s, please check if stream is created", s)
  403. }
  404. pp, err := plans.NewPreprocessor(streamStmt, alias, isEventTime)
  405. if err != nil {
  406. return nil, nil, err
  407. }
  408. if shouldCreateSource {
  409. node := nodes.NewSourceNode(s, streamStmt.Options)
  410. tp.AddSrc(node)
  411. preprocessorOp := xstream.Transform(pp, "preprocessor_"+s, bufferLength)
  412. preprocessorOp.SetConcurrency(concurrency)
  413. tp.AddOperator([]api.Emitter{node}, preprocessorOp)
  414. inputs = append(inputs, preprocessorOp)
  415. } else {
  416. tp.AddSrc(sources[i])
  417. preprocessorOp := xstream.Transform(pp, "preprocessor_"+s, bufferLength)
  418. preprocessorOp.SetConcurrency(concurrency)
  419. tp.AddOperator([]api.Emitter{sources[i]}, preprocessorOp)
  420. inputs = append(inputs, preprocessorOp)
  421. }
  422. }
  423. var w *xsql.Window
  424. if dimensions != nil {
  425. w = dimensions.GetWindow()
  426. if w != nil {
  427. wop, err := operators.NewWindowOp("window", w, isEventTime, lateTol, streamsFromStmt, bufferLength)
  428. if err != nil {
  429. return nil, nil, err
  430. }
  431. tp.AddOperator(inputs, wop)
  432. inputs = []api.Emitter{wop}
  433. }
  434. }
  435. if w != nil && selectStmt.Joins != nil {
  436. joinOp := xstream.Transform(&plans.JoinPlan{Joins: selectStmt.Joins, From: selectStmt.Sources[0].(*xsql.Table)}, "join", bufferLength)
  437. joinOp.SetConcurrency(concurrency)
  438. tp.AddOperator(inputs, joinOp)
  439. inputs = []api.Emitter{joinOp}
  440. }
  441. if selectStmt.Condition != nil {
  442. filterOp := xstream.Transform(&plans.FilterPlan{Condition: selectStmt.Condition}, "filter", bufferLength)
  443. filterOp.SetConcurrency(concurrency)
  444. tp.AddOperator(inputs, filterOp)
  445. inputs = []api.Emitter{filterOp}
  446. }
  447. var ds xsql.Dimensions
  448. if dimensions != nil || len(aggregateAlias) > 0 {
  449. ds = dimensions.GetGroups()
  450. if (ds != nil && len(ds) > 0) || len(aggregateAlias) > 0 {
  451. aggregateOp := xstream.Transform(&plans.AggregatePlan{Dimensions: ds, Alias: aggregateAlias}, "aggregate", bufferLength)
  452. aggregateOp.SetConcurrency(concurrency)
  453. tp.AddOperator(inputs, aggregateOp)
  454. inputs = []api.Emitter{aggregateOp}
  455. }
  456. }
  457. if selectStmt.Having != nil {
  458. havingOp := xstream.Transform(&plans.HavingPlan{selectStmt.Having}, "having", bufferLength)
  459. havingOp.SetConcurrency(concurrency)
  460. tp.AddOperator(inputs, havingOp)
  461. inputs = []api.Emitter{havingOp}
  462. }
  463. if selectStmt.SortFields != nil {
  464. orderOp := xstream.Transform(&plans.OrderPlan{SortFields: selectStmt.SortFields}, "order", bufferLength)
  465. orderOp.SetConcurrency(concurrency)
  466. tp.AddOperator(inputs, orderOp)
  467. inputs = []api.Emitter{orderOp}
  468. }
  469. if selectStmt.Fields != nil {
  470. projectOp := xstream.Transform(&plans.ProjectPlan{Fields: selectStmt.Fields, IsAggregate: xsql.IsAggStatement(selectStmt), SendMeta: sendMataToSink}, "project", bufferLength)
  471. projectOp.SetConcurrency(concurrency)
  472. tp.AddOperator(inputs, projectOp)
  473. inputs = []api.Emitter{projectOp}
  474. }
  475. return tp, inputs, nil
  476. }
  477. }
  478. }