stream.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511
  1. // Copyright 2021-2022 EMQ Technologies Co., Ltd.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package processor
  15. import (
  16. "bytes"
  17. "encoding/json"
  18. "fmt"
  19. "github.com/lf-edge/ekuiper/internal/conf"
  20. "github.com/lf-edge/ekuiper/internal/pkg/store"
  21. "github.com/lf-edge/ekuiper/internal/schema"
  22. "github.com/lf-edge/ekuiper/internal/topo/lookup"
  23. "github.com/lf-edge/ekuiper/internal/xsql"
  24. "github.com/lf-edge/ekuiper/pkg/ast"
  25. "github.com/lf-edge/ekuiper/pkg/errorx"
  26. "github.com/lf-edge/ekuiper/pkg/kv"
  27. "strings"
  28. )
  29. var (
  30. log = conf.Log
  31. )
  32. type StreamProcessor struct {
  33. db kv.KeyValue
  34. }
  35. func NewStreamProcessor() *StreamProcessor {
  36. err, db := store.GetKV("stream")
  37. if err != nil {
  38. panic(fmt.Sprintf("Can not initalize store for the stream processor at path 'stream': %v", err))
  39. }
  40. processor := &StreamProcessor{
  41. db: db,
  42. }
  43. return processor
  44. }
  45. func (p *StreamProcessor) ExecStmt(statement string) (result []string, err error) {
  46. parser := xsql.NewParser(strings.NewReader(statement))
  47. stmt, err := xsql.Language.Parse(parser)
  48. if err != nil {
  49. return nil, err
  50. }
  51. switch s := stmt.(type) {
  52. case *ast.StreamStmt: //Table is also StreamStmt
  53. var r string
  54. err = p.execSave(s, statement, false)
  55. stt := ast.StreamTypeMap[s.StreamType]
  56. if err != nil {
  57. err = fmt.Errorf("Create %s fails: %v.", stt, err)
  58. } else {
  59. r = fmt.Sprintf("%s %s is created.", strings.Title(stt), s.Name)
  60. log.Printf("%s", r)
  61. }
  62. result = append(result, r)
  63. case *ast.ShowStreamsStatement:
  64. result, err = p.execShow(ast.TypeStream)
  65. case *ast.ShowTablesStatement:
  66. result, err = p.execShow(ast.TypeTable)
  67. case *ast.DescribeStreamStatement:
  68. var r string
  69. r, err = p.execDescribe(s, ast.TypeStream)
  70. result = append(result, r)
  71. case *ast.DescribeTableStatement:
  72. var r string
  73. r, err = p.execDescribe(s, ast.TypeTable)
  74. result = append(result, r)
  75. case *ast.ExplainStreamStatement:
  76. var r string
  77. r, err = p.execExplain(s, ast.TypeStream)
  78. result = append(result, r)
  79. case *ast.ExplainTableStatement:
  80. var r string
  81. r, err = p.execExplain(s, ast.TypeTable)
  82. result = append(result, r)
  83. case *ast.DropStreamStatement:
  84. var r string
  85. r, err = p.execDrop(s, ast.TypeStream)
  86. result = append(result, r)
  87. case *ast.DropTableStatement:
  88. var r string
  89. r, err = p.execDrop(s, ast.TypeTable)
  90. result = append(result, r)
  91. default:
  92. return nil, fmt.Errorf("Invalid stream statement: %s", statement)
  93. }
  94. return
  95. }
  96. func (p *StreamProcessor) RecoverLookupTable() error {
  97. keys, err := p.db.Keys()
  98. if err != nil {
  99. return fmt.Errorf("error loading data from db: %v.", err)
  100. }
  101. var (
  102. v string
  103. vs = &xsql.StreamInfo{}
  104. )
  105. for _, k := range keys {
  106. if ok, _ := p.db.Get(k, &v); ok {
  107. if err := json.Unmarshal([]byte(v), vs); err == nil && vs.StreamType == ast.TypeTable {
  108. parser := xsql.NewParser(strings.NewReader(vs.Statement))
  109. stmt, e := xsql.Language.Parse(parser)
  110. if e != nil {
  111. log.Error(err)
  112. }
  113. switch s := stmt.(type) {
  114. case *ast.StreamStmt:
  115. log.Infof("Starting lookup table %s", s.Name)
  116. e = lookup.CreateInstance(string(s.Name), s.Options.TYPE, s.Options)
  117. if err != nil {
  118. log.Errorf("%s", err.Error())
  119. return err
  120. }
  121. default:
  122. log.Errorf("Invalid lookup table statement: %s", vs.Statement)
  123. }
  124. }
  125. }
  126. }
  127. return nil
  128. }
  129. func (p *StreamProcessor) execSave(stmt *ast.StreamStmt, statement string, replace bool) error {
  130. if stmt.StreamType == ast.TypeTable && stmt.Options.KIND == ast.StreamKindLookup {
  131. log.Infof("Creating lookup table %s", stmt.Name)
  132. err := lookup.CreateInstance(string(stmt.Name), stmt.Options.TYPE, stmt.Options)
  133. if err != nil {
  134. return err
  135. }
  136. }
  137. s, err := json.Marshal(xsql.StreamInfo{
  138. StreamType: stmt.StreamType,
  139. Statement: statement,
  140. StreamKind: stmt.Options.KIND,
  141. })
  142. if err != nil {
  143. return fmt.Errorf("error when saving to db: %v.", err)
  144. }
  145. if replace {
  146. err = p.db.Set(string(stmt.Name), string(s))
  147. } else {
  148. err = p.db.Setnx(string(stmt.Name), string(s))
  149. }
  150. return err
  151. }
  152. func (p *StreamProcessor) ExecReplaceStream(name string, statement string, st ast.StreamType) (string, error) {
  153. parser := xsql.NewParser(strings.NewReader(statement))
  154. stmt, err := xsql.Language.Parse(parser)
  155. if err != nil {
  156. return "", err
  157. }
  158. stt := ast.StreamTypeMap[st]
  159. switch s := stmt.(type) {
  160. case *ast.StreamStmt:
  161. if s.StreamType != st {
  162. return "", errorx.NewWithCode(errorx.NOT_FOUND, fmt.Sprintf("%s %s is not found", ast.StreamTypeMap[st], s.Name))
  163. }
  164. if string(s.Name) != name {
  165. return "", fmt.Errorf("Replace %s fails: the sql statement must update the %s source.", name, name)
  166. }
  167. err = p.execSave(s, statement, true)
  168. if err != nil {
  169. return "", fmt.Errorf("Replace %s fails: %v.", stt, err)
  170. } else {
  171. info := fmt.Sprintf("%s %s is replaced.", strings.Title(stt), s.Name)
  172. log.Printf("%s", info)
  173. return info, nil
  174. }
  175. default:
  176. return "", fmt.Errorf("Invalid %s statement: %s", stt, statement)
  177. }
  178. }
  179. func (p *StreamProcessor) ExecStreamSql(statement string) (string, error) {
  180. r, err := p.ExecStmt(statement)
  181. if err != nil {
  182. return "", err
  183. } else {
  184. return strings.Join(r, "\n"), err
  185. }
  186. }
  187. func (p *StreamProcessor) execShow(st ast.StreamType) ([]string, error) {
  188. keys, err := p.ShowStream(st)
  189. if len(keys) == 0 {
  190. keys = append(keys, fmt.Sprintf("No %s definitions are found.", ast.StreamTypeMap[st]))
  191. }
  192. return keys, err
  193. }
  194. func (p *StreamProcessor) ShowStream(st ast.StreamType) ([]string, error) {
  195. stt := ast.StreamTypeMap[st]
  196. keys, err := p.db.Keys()
  197. if err != nil {
  198. return nil, fmt.Errorf("Show %ss fails, error when loading data from db: %v.", stt, err)
  199. }
  200. var (
  201. v string
  202. vs = &xsql.StreamInfo{}
  203. result = make([]string, 0)
  204. )
  205. for _, k := range keys {
  206. if ok, _ := p.db.Get(k, &v); ok {
  207. if err := json.Unmarshal([]byte(v), vs); err == nil && vs.StreamType == st {
  208. result = append(result, k)
  209. }
  210. }
  211. }
  212. return result, nil
  213. }
  214. func (p *StreamProcessor) ShowTable(kind string) ([]string, error) {
  215. if kind == "" {
  216. return p.ShowStream(ast.TypeTable)
  217. }
  218. keys, err := p.db.Keys()
  219. if err != nil {
  220. return nil, fmt.Errorf("Show tables fails, error when loading data from db: %v.", err)
  221. }
  222. var (
  223. v string
  224. vs = &xsql.StreamInfo{}
  225. result = make([]string, 0)
  226. )
  227. for _, k := range keys {
  228. if ok, _ := p.db.Get(k, &v); ok {
  229. if err := json.Unmarshal([]byte(v), vs); err == nil && vs.StreamType == ast.TypeTable {
  230. if kind == "scan" && (vs.StreamKind == ast.StreamKindScan || vs.StreamKind == "") {
  231. result = append(result, k)
  232. } else if kind == "lookup" && vs.StreamKind == ast.StreamKindLookup {
  233. result = append(result, k)
  234. }
  235. }
  236. }
  237. }
  238. return result, nil
  239. }
  240. func (p *StreamProcessor) getStream(name string, st ast.StreamType) (string, error) {
  241. vs, err := xsql.GetDataSourceStatement(p.db, name)
  242. if vs != nil && vs.StreamType == st {
  243. return vs.Statement, nil
  244. }
  245. if err != nil {
  246. return "", err
  247. }
  248. return "", errorx.NewWithCode(errorx.NOT_FOUND, fmt.Sprintf("%s %s is not found", ast.StreamTypeMap[st], name))
  249. }
  250. func (p *StreamProcessor) execDescribe(stmt ast.NameNode, st ast.StreamType) (string, error) {
  251. streamStmt, err := p.DescStream(stmt.GetName(), st)
  252. if err != nil {
  253. return "", err
  254. }
  255. switch s := streamStmt.(type) {
  256. case *ast.StreamStmt:
  257. var buff bytes.Buffer
  258. buff.WriteString("Fields\n--------------------------------------------------------------------------------\n")
  259. for _, f := range s.StreamFields {
  260. buff.WriteString(f.Name + "\t")
  261. buff.WriteString(printFieldType(f.FieldType))
  262. buff.WriteString("\n")
  263. }
  264. buff.WriteString("\n")
  265. printOptions(s.Options, &buff)
  266. return buff.String(), err
  267. default:
  268. return "%s", fmt.Errorf("Error resolving the %s %s, the data in db may be corrupted.", ast.StreamTypeMap[st], stmt.GetName())
  269. }
  270. }
  271. func printOptions(opts *ast.Options, buff *bytes.Buffer) {
  272. if opts.CONF_KEY != "" {
  273. buff.WriteString(fmt.Sprintf("CONF_KEY: %s\n", opts.CONF_KEY))
  274. }
  275. if opts.DATASOURCE != "" {
  276. buff.WriteString(fmt.Sprintf("DATASOURCE: %s\n", opts.DATASOURCE))
  277. }
  278. if opts.FORMAT != "" {
  279. buff.WriteString(fmt.Sprintf("FORMAT: %s\n", opts.FORMAT))
  280. }
  281. if opts.SCHEMAID != "" {
  282. buff.WriteString(fmt.Sprintf("SCHEMAID: %s\n", opts.SCHEMAID))
  283. }
  284. if opts.KEY != "" {
  285. buff.WriteString(fmt.Sprintf("KEY: %s\n", opts.KEY))
  286. }
  287. if opts.RETAIN_SIZE != 0 {
  288. buff.WriteString(fmt.Sprintf("RETAIN_SIZE: %d\n", opts.RETAIN_SIZE))
  289. }
  290. if opts.SHARED {
  291. buff.WriteString(fmt.Sprintf("SHARED: %v\n", opts.SHARED))
  292. }
  293. if opts.STRICT_VALIDATION {
  294. buff.WriteString(fmt.Sprintf("STRICT_VALIDATION: %v\n", opts.STRICT_VALIDATION))
  295. }
  296. if opts.TIMESTAMP != "" {
  297. buff.WriteString(fmt.Sprintf("TIMESTAMP: %s\n", opts.TIMESTAMP))
  298. }
  299. if opts.TIMESTAMP_FORMAT != "" {
  300. buff.WriteString(fmt.Sprintf("TIMESTAMP_FORMAT: %s\n", opts.TIMESTAMP_FORMAT))
  301. }
  302. if opts.TYPE != "" {
  303. buff.WriteString(fmt.Sprintf("TYPE: %s\n", opts.TYPE))
  304. }
  305. }
  306. func (p *StreamProcessor) DescStream(name string, st ast.StreamType) (ast.Statement, error) {
  307. statement, err := p.getStream(name, st)
  308. if err != nil {
  309. return nil, fmt.Errorf("Describe %s fails, %s.", ast.StreamTypeMap[st], err)
  310. }
  311. parser := xsql.NewParser(strings.NewReader(statement))
  312. stream, err := xsql.Language.Parse(parser)
  313. if err != nil {
  314. return nil, err
  315. }
  316. return stream, nil
  317. }
  318. func (p *StreamProcessor) GetInferredSchema(name string, st ast.StreamType) (ast.StreamFields, error) {
  319. statement, err := p.getStream(name, st)
  320. if err != nil {
  321. return nil, fmt.Errorf("Describe %s fails, %s.", ast.StreamTypeMap[st], err)
  322. }
  323. parser := xsql.NewParser(strings.NewReader(statement))
  324. stream, err := xsql.Language.Parse(parser)
  325. if err != nil {
  326. return nil, err
  327. }
  328. stmt, ok := stream.(*ast.StreamStmt)
  329. if !ok {
  330. return nil, fmt.Errorf("Describe %s fails, cannot parse the data \"%s\" to a stream statement", ast.StreamTypeMap[st], statement)
  331. }
  332. if stmt.Options.SCHEMAID != "" {
  333. return schema.InferFromSchemaFile(stmt.Options.FORMAT, stmt.Options.SCHEMAID)
  334. }
  335. return nil, nil
  336. }
  337. // GetInferredJsonSchema return schema in json schema type
  338. func (p *StreamProcessor) GetInferredJsonSchema(name string, st ast.StreamType) (map[string]*ast.JsonStreamField, error) {
  339. statement, err := p.getStream(name, st)
  340. if err != nil {
  341. return nil, fmt.Errorf("Describe %s fails, %s.", ast.StreamTypeMap[st], err)
  342. }
  343. parser := xsql.NewParser(strings.NewReader(statement))
  344. stream, err := xsql.Language.Parse(parser)
  345. if err != nil {
  346. return nil, err
  347. }
  348. stmt, ok := stream.(*ast.StreamStmt)
  349. if !ok {
  350. return nil, fmt.Errorf("Describe %s fails, cannot parse the data \"%s\" to a stream statement", ast.StreamTypeMap[st], statement)
  351. }
  352. sfs := stmt.StreamFields
  353. if stmt.Options.SCHEMAID != "" {
  354. sfs, err = schema.InferFromSchemaFile(stmt.Options.FORMAT, stmt.Options.SCHEMAID)
  355. if err != nil {
  356. return nil, err
  357. }
  358. }
  359. return convertSchema(sfs), nil
  360. }
  361. func convertSchema(sfs ast.StreamFields) map[string]*ast.JsonStreamField {
  362. result := make(map[string]*ast.JsonStreamField, len(sfs))
  363. for _, sf := range sfs {
  364. result[sf.Name] = convertFieldType(sf.FieldType)
  365. }
  366. return result
  367. }
  368. func convertFieldType(sf ast.FieldType) *ast.JsonStreamField {
  369. switch t := sf.(type) {
  370. case *ast.BasicType:
  371. return &ast.JsonStreamField{
  372. Type: t.Type.String(),
  373. }
  374. case *ast.ArrayType:
  375. var items *ast.JsonStreamField
  376. switch t.Type {
  377. case ast.ARRAY, ast.STRUCT:
  378. items = convertFieldType(t.FieldType)
  379. default:
  380. items = &ast.JsonStreamField{
  381. Type: t.Type.String(),
  382. }
  383. }
  384. return &ast.JsonStreamField{
  385. Type: "array",
  386. Items: items,
  387. }
  388. case *ast.RecType:
  389. return &ast.JsonStreamField{
  390. Type: "struct",
  391. Properties: convertSchema(t.StreamFields),
  392. }
  393. default: // should never happen
  394. return nil
  395. }
  396. }
  397. func (p *StreamProcessor) execExplain(stmt ast.NameNode, st ast.StreamType) (string, error) {
  398. _, err := p.getStream(stmt.GetName(), st)
  399. if err != nil {
  400. return "", fmt.Errorf("Explain %s fails, %s.", ast.StreamTypeMap[st], err)
  401. }
  402. return "TO BE SUPPORTED", nil
  403. }
  404. func (p *StreamProcessor) execDrop(stmt ast.NameNode, st ast.StreamType) (string, error) {
  405. s, err := p.DropStream(stmt.GetName(), st)
  406. if err != nil {
  407. return s, fmt.Errorf("Drop %s fails: %s.", ast.StreamTypeMap[st], err)
  408. }
  409. return s, nil
  410. }
  411. func (p *StreamProcessor) DropStream(name string, st ast.StreamType) (string, error) {
  412. if st == ast.TypeTable {
  413. err := lookup.DropInstance(name)
  414. if err != nil {
  415. return "", err
  416. }
  417. }
  418. _, err := p.getStream(name, st)
  419. if err != nil {
  420. return "", err
  421. }
  422. err = p.db.Delete(name)
  423. if err != nil {
  424. return "", err
  425. } else {
  426. return fmt.Sprintf("%s %s is dropped.", strings.Title(ast.StreamTypeMap[st]), name), nil
  427. }
  428. }
  429. func printFieldType(ft ast.FieldType) (result string) {
  430. switch t := ft.(type) {
  431. case *ast.BasicType:
  432. result = t.Type.String()
  433. case *ast.ArrayType:
  434. result = "array("
  435. if t.FieldType != nil {
  436. result += printFieldType(t.FieldType)
  437. } else {
  438. result += t.Type.String()
  439. }
  440. result += ")"
  441. case *ast.RecType:
  442. result = "struct("
  443. isFirst := true
  444. for _, f := range t.StreamFields {
  445. if isFirst {
  446. isFirst = false
  447. } else {
  448. result += ", "
  449. }
  450. result = result + f.Name + " " + printFieldType(f.FieldType)
  451. }
  452. result += ")"
  453. }
  454. return
  455. }
  456. // GetAll return all streams and tables defined to export.
  457. func (p *StreamProcessor) GetAll() (result map[string]map[string]string, e error) {
  458. defs, err := p.db.All()
  459. if err != nil {
  460. e = err
  461. return
  462. }
  463. var (
  464. vs = &xsql.StreamInfo{}
  465. )
  466. result = map[string]map[string]string{
  467. "streams": make(map[string]string),
  468. "tables": make(map[string]string),
  469. }
  470. for k, v := range defs {
  471. if err := json.Unmarshal([]byte(v), vs); err == nil {
  472. switch vs.StreamType {
  473. case ast.TypeStream:
  474. result["streams"][k] = vs.Statement
  475. case ast.TypeTable:
  476. result["tables"][k] = vs.Statement
  477. }
  478. }
  479. }
  480. return
  481. }