stream.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427
  1. // Copyright 2021-2022 EMQ Technologies Co., Ltd.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package processor
  15. import (
  16. "bytes"
  17. "encoding/json"
  18. "fmt"
  19. "github.com/lf-edge/ekuiper/internal/conf"
  20. "github.com/lf-edge/ekuiper/internal/pkg/store"
  21. "github.com/lf-edge/ekuiper/internal/topo/lookup"
  22. "github.com/lf-edge/ekuiper/internal/xsql"
  23. "github.com/lf-edge/ekuiper/pkg/ast"
  24. "github.com/lf-edge/ekuiper/pkg/errorx"
  25. "github.com/lf-edge/ekuiper/pkg/kv"
  26. "strings"
  27. )
  28. var (
  29. log = conf.Log
  30. )
  31. type StreamProcessor struct {
  32. db kv.KeyValue
  33. }
  34. func NewStreamProcessor() *StreamProcessor {
  35. err, db := store.GetKV("stream")
  36. if err != nil {
  37. panic(fmt.Sprintf("Can not initalize store for the stream processor at path 'stream': %v", err))
  38. }
  39. processor := &StreamProcessor{
  40. db: db,
  41. }
  42. return processor
  43. }
  44. func (p *StreamProcessor) ExecStmt(statement string) (result []string, err error) {
  45. parser := xsql.NewParser(strings.NewReader(statement))
  46. stmt, err := xsql.Language.Parse(parser)
  47. if err != nil {
  48. return nil, err
  49. }
  50. switch s := stmt.(type) {
  51. case *ast.StreamStmt: //Table is also StreamStmt
  52. var r string
  53. err = p.execSave(s, statement, false)
  54. stt := ast.StreamTypeMap[s.StreamType]
  55. if err != nil {
  56. err = fmt.Errorf("Create %s fails: %v.", stt, err)
  57. } else {
  58. r = fmt.Sprintf("%s %s is created.", strings.Title(stt), s.Name)
  59. log.Printf("%s", r)
  60. }
  61. result = append(result, r)
  62. case *ast.ShowStreamsStatement:
  63. result, err = p.execShow(ast.TypeStream)
  64. case *ast.ShowTablesStatement:
  65. result, err = p.execShow(ast.TypeTable)
  66. case *ast.DescribeStreamStatement:
  67. var r string
  68. r, err = p.execDescribe(s, ast.TypeStream)
  69. result = append(result, r)
  70. case *ast.DescribeTableStatement:
  71. var r string
  72. r, err = p.execDescribe(s, ast.TypeTable)
  73. result = append(result, r)
  74. case *ast.ExplainStreamStatement:
  75. var r string
  76. r, err = p.execExplain(s, ast.TypeStream)
  77. result = append(result, r)
  78. case *ast.ExplainTableStatement:
  79. var r string
  80. r, err = p.execExplain(s, ast.TypeTable)
  81. result = append(result, r)
  82. case *ast.DropStreamStatement:
  83. var r string
  84. r, err = p.execDrop(s, ast.TypeStream)
  85. result = append(result, r)
  86. case *ast.DropTableStatement:
  87. var r string
  88. r, err = p.execDrop(s, ast.TypeTable)
  89. result = append(result, r)
  90. default:
  91. return nil, fmt.Errorf("Invalid stream statement: %s", statement)
  92. }
  93. return
  94. }
  95. func (p *StreamProcessor) RecoverLookupTable() error {
  96. keys, err := p.db.Keys()
  97. if err != nil {
  98. return fmt.Errorf("error loading data from db: %v.", err)
  99. }
  100. var (
  101. v string
  102. vs = &xsql.StreamInfo{}
  103. )
  104. for _, k := range keys {
  105. if ok, _ := p.db.Get(k, &v); ok {
  106. if err := json.Unmarshal([]byte(v), vs); err == nil && vs.StreamType == ast.TypeTable {
  107. parser := xsql.NewParser(strings.NewReader(vs.Statement))
  108. stmt, e := xsql.Language.Parse(parser)
  109. if e != nil {
  110. log.Error(err)
  111. }
  112. switch s := stmt.(type) {
  113. case *ast.StreamStmt:
  114. log.Infof("Starting lookup table %s", s.Name)
  115. e = lookup.CreateInstance(string(s.Name), s.Options.TYPE, s.Options)
  116. if err != nil {
  117. log.Errorf("%s", err.Error())
  118. return err
  119. }
  120. default:
  121. log.Errorf("Invalid lookup table statement: %s", vs.Statement)
  122. }
  123. }
  124. }
  125. }
  126. return nil
  127. }
  128. func (p *StreamProcessor) execSave(stmt *ast.StreamStmt, statement string, replace bool) error {
  129. if stmt.StreamType == ast.TypeTable && stmt.Options.KIND == ast.StreamKindLookup {
  130. log.Infof("Creating lookup table %s", stmt.Name)
  131. err := lookup.CreateInstance(string(stmt.Name), stmt.Options.TYPE, stmt.Options)
  132. if err != nil {
  133. return err
  134. }
  135. }
  136. s, err := json.Marshal(xsql.StreamInfo{
  137. StreamType: stmt.StreamType,
  138. Statement: statement,
  139. StreamKind: stmt.Options.KIND,
  140. })
  141. if err != nil {
  142. return fmt.Errorf("error when saving to db: %v.", err)
  143. }
  144. if replace {
  145. err = p.db.Set(string(stmt.Name), string(s))
  146. } else {
  147. err = p.db.Setnx(string(stmt.Name), string(s))
  148. }
  149. return err
  150. }
  151. func (p *StreamProcessor) ExecReplaceStream(name string, statement string, st ast.StreamType) (string, error) {
  152. parser := xsql.NewParser(strings.NewReader(statement))
  153. stmt, err := xsql.Language.Parse(parser)
  154. if err != nil {
  155. return "", err
  156. }
  157. stt := ast.StreamTypeMap[st]
  158. switch s := stmt.(type) {
  159. case *ast.StreamStmt:
  160. if s.StreamType != st {
  161. return "", errorx.NewWithCode(errorx.NOT_FOUND, fmt.Sprintf("%s %s is not found", ast.StreamTypeMap[st], s.Name))
  162. }
  163. if string(s.Name) != name {
  164. return "", fmt.Errorf("Replace %s fails: the sql statement must update the %s source.", name, name)
  165. }
  166. err = p.execSave(s, statement, true)
  167. if err != nil {
  168. return "", fmt.Errorf("Replace %s fails: %v.", stt, err)
  169. } else {
  170. info := fmt.Sprintf("%s %s is replaced.", strings.Title(stt), s.Name)
  171. log.Printf("%s", info)
  172. return info, nil
  173. }
  174. default:
  175. return "", fmt.Errorf("Invalid %s statement: %s", stt, statement)
  176. }
  177. }
  178. func (p *StreamProcessor) ExecStreamSql(statement string) (string, error) {
  179. r, err := p.ExecStmt(statement)
  180. if err != nil {
  181. return "", err
  182. } else {
  183. return strings.Join(r, "\n"), err
  184. }
  185. }
  186. func (p *StreamProcessor) execShow(st ast.StreamType) ([]string, error) {
  187. keys, err := p.ShowStream(st)
  188. if len(keys) == 0 {
  189. keys = append(keys, fmt.Sprintf("No %s definitions are found.", ast.StreamTypeMap[st]))
  190. }
  191. return keys, err
  192. }
  193. func (p *StreamProcessor) ShowStream(st ast.StreamType) ([]string, error) {
  194. stt := ast.StreamTypeMap[st]
  195. keys, err := p.db.Keys()
  196. if err != nil {
  197. return nil, fmt.Errorf("Show %ss fails, error when loading data from db: %v.", stt, err)
  198. }
  199. var (
  200. v string
  201. vs = &xsql.StreamInfo{}
  202. result = make([]string, 0)
  203. )
  204. for _, k := range keys {
  205. if ok, _ := p.db.Get(k, &v); ok {
  206. if err := json.Unmarshal([]byte(v), vs); err == nil && vs.StreamType == st {
  207. result = append(result, k)
  208. }
  209. }
  210. }
  211. return result, nil
  212. }
  213. func (p *StreamProcessor) ShowTable(kind string) ([]string, error) {
  214. if kind == "" {
  215. return p.ShowStream(ast.TypeTable)
  216. }
  217. keys, err := p.db.Keys()
  218. if err != nil {
  219. return nil, fmt.Errorf("Show tables fails, error when loading data from db: %v.", err)
  220. }
  221. var (
  222. v string
  223. vs = &xsql.StreamInfo{}
  224. result = make([]string, 0)
  225. )
  226. for _, k := range keys {
  227. if ok, _ := p.db.Get(k, &v); ok {
  228. if err := json.Unmarshal([]byte(v), vs); err == nil && vs.StreamType == ast.TypeTable {
  229. if kind == "scan" && (vs.StreamKind == ast.StreamKindScan || vs.StreamKind == "") {
  230. result = append(result, k)
  231. } else if kind == "lookup" && vs.StreamKind == ast.StreamKindLookup {
  232. result = append(result, k)
  233. }
  234. }
  235. }
  236. }
  237. return result, nil
  238. }
  239. func (p *StreamProcessor) getStream(name string, st ast.StreamType) (string, error) {
  240. vs, err := xsql.GetDataSourceStatement(p.db, name)
  241. if vs != nil && vs.StreamType == st {
  242. return vs.Statement, nil
  243. }
  244. if err != nil {
  245. return "", err
  246. }
  247. return "", errorx.NewWithCode(errorx.NOT_FOUND, fmt.Sprintf("%s %s is not found", ast.StreamTypeMap[st], name))
  248. }
  249. func (p *StreamProcessor) execDescribe(stmt ast.NameNode, st ast.StreamType) (string, error) {
  250. streamStmt, err := p.DescStream(stmt.GetName(), st)
  251. if err != nil {
  252. return "", err
  253. }
  254. switch s := streamStmt.(type) {
  255. case *ast.StreamStmt:
  256. var buff bytes.Buffer
  257. buff.WriteString("Fields\n--------------------------------------------------------------------------------\n")
  258. for _, f := range s.StreamFields {
  259. buff.WriteString(f.Name + "\t")
  260. buff.WriteString(printFieldType(f.FieldType))
  261. buff.WriteString("\n")
  262. }
  263. buff.WriteString("\n")
  264. printOptions(s.Options, &buff)
  265. return buff.String(), err
  266. default:
  267. return "%s", fmt.Errorf("Error resolving the %s %s, the data in db may be corrupted.", ast.StreamTypeMap[st], stmt.GetName())
  268. }
  269. }
  270. func printOptions(opts *ast.Options, buff *bytes.Buffer) {
  271. if opts.CONF_KEY != "" {
  272. buff.WriteString(fmt.Sprintf("CONF_KEY: %s\n", opts.CONF_KEY))
  273. }
  274. if opts.DATASOURCE != "" {
  275. buff.WriteString(fmt.Sprintf("DATASOURCE: %s\n", opts.DATASOURCE))
  276. }
  277. if opts.FORMAT != "" {
  278. buff.WriteString(fmt.Sprintf("FORMAT: %s\n", opts.FORMAT))
  279. }
  280. if opts.SCHEMAID != "" {
  281. buff.WriteString(fmt.Sprintf("SCHEMAID: %s\n", opts.SCHEMAID))
  282. }
  283. if opts.KEY != "" {
  284. buff.WriteString(fmt.Sprintf("KEY: %s\n", opts.KEY))
  285. }
  286. if opts.RETAIN_SIZE != 0 {
  287. buff.WriteString(fmt.Sprintf("RETAIN_SIZE: %d\n", opts.RETAIN_SIZE))
  288. }
  289. if opts.SHARED {
  290. buff.WriteString(fmt.Sprintf("SHARED: %v\n", opts.SHARED))
  291. }
  292. if opts.STRICT_VALIDATION {
  293. buff.WriteString(fmt.Sprintf("STRICT_VALIDATION: %v\n", opts.STRICT_VALIDATION))
  294. }
  295. if opts.TIMESTAMP != "" {
  296. buff.WriteString(fmt.Sprintf("TIMESTAMP: %s\n", opts.TIMESTAMP))
  297. }
  298. if opts.TIMESTAMP_FORMAT != "" {
  299. buff.WriteString(fmt.Sprintf("TIMESTAMP_FORMAT: %s\n", opts.TIMESTAMP_FORMAT))
  300. }
  301. if opts.TYPE != "" {
  302. buff.WriteString(fmt.Sprintf("TYPE: %s\n", opts.TYPE))
  303. }
  304. }
  305. func (p *StreamProcessor) DescStream(name string, st ast.StreamType) (ast.Statement, error) {
  306. statement, err := p.getStream(name, st)
  307. if err != nil {
  308. return nil, fmt.Errorf("Describe %s fails, %s.", ast.StreamTypeMap[st], err)
  309. }
  310. parser := xsql.NewParser(strings.NewReader(statement))
  311. stream, err := xsql.Language.Parse(parser)
  312. if err != nil {
  313. return nil, err
  314. }
  315. return stream, nil
  316. }
  317. func (p *StreamProcessor) execExplain(stmt ast.NameNode, st ast.StreamType) (string, error) {
  318. _, err := p.getStream(stmt.GetName(), st)
  319. if err != nil {
  320. return "", fmt.Errorf("Explain %s fails, %s.", ast.StreamTypeMap[st], err)
  321. }
  322. return "TO BE SUPPORTED", nil
  323. }
  324. func (p *StreamProcessor) execDrop(stmt ast.NameNode, st ast.StreamType) (string, error) {
  325. s, err := p.DropStream(stmt.GetName(), st)
  326. if err != nil {
  327. return s, fmt.Errorf("Drop %s fails: %s.", ast.StreamTypeMap[st], err)
  328. }
  329. return s, nil
  330. }
  331. func (p *StreamProcessor) DropStream(name string, st ast.StreamType) (string, error) {
  332. if st == ast.TypeTable {
  333. err := lookup.DropInstance(name)
  334. if err != nil {
  335. return "", err
  336. }
  337. }
  338. _, err := p.getStream(name, st)
  339. if err != nil {
  340. return "", err
  341. }
  342. err = p.db.Delete(name)
  343. if err != nil {
  344. return "", err
  345. } else {
  346. return fmt.Sprintf("%s %s is dropped.", strings.Title(ast.StreamTypeMap[st]), name), nil
  347. }
  348. }
  349. func printFieldType(ft ast.FieldType) (result string) {
  350. switch t := ft.(type) {
  351. case *ast.BasicType:
  352. result = t.Type.String()
  353. case *ast.ArrayType:
  354. result = "array("
  355. if t.FieldType != nil {
  356. result += printFieldType(t.FieldType)
  357. } else {
  358. result += t.Type.String()
  359. }
  360. result += ")"
  361. case *ast.RecType:
  362. result = "struct("
  363. isFirst := true
  364. for _, f := range t.StreamFields {
  365. if isFirst {
  366. isFirst = false
  367. } else {
  368. result += ", "
  369. }
  370. result = result + f.Name + " " + printFieldType(f.FieldType)
  371. }
  372. result += ")"
  373. }
  374. return
  375. }
  376. // GetAll return all streams and tables defined to export.
  377. func (p *StreamProcessor) GetAll() (result map[string]map[string]string, e error) {
  378. defs, err := p.db.All()
  379. if err != nil {
  380. e = err
  381. return
  382. }
  383. var (
  384. vs = &xsql.StreamInfo{}
  385. )
  386. result = map[string]map[string]string{
  387. "streams": make(map[string]string),
  388. "tables": make(map[string]string),
  389. }
  390. for k, v := range defs {
  391. if err := json.Unmarshal([]byte(v), vs); err == nil {
  392. switch vs.StreamType {
  393. case ast.TypeStream:
  394. result["streams"][k] = vs.Statement
  395. case ast.TypeTable:
  396. result["tables"][k] = vs.Statement
  397. }
  398. }
  399. }
  400. return
  401. }