stream.go 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372
  1. // Copyright 2021-2022 EMQ Technologies Co., Ltd.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package processor
  15. import (
  16. "bytes"
  17. "encoding/json"
  18. "fmt"
  19. "github.com/lf-edge/ekuiper/internal/conf"
  20. "github.com/lf-edge/ekuiper/internal/pkg/store"
  21. "github.com/lf-edge/ekuiper/internal/topo/lookup"
  22. "github.com/lf-edge/ekuiper/internal/xsql"
  23. "github.com/lf-edge/ekuiper/pkg/ast"
  24. "github.com/lf-edge/ekuiper/pkg/errorx"
  25. "github.com/lf-edge/ekuiper/pkg/kv"
  26. "strings"
  27. )
  28. var (
  29. log = conf.Log
  30. )
  31. type StreamProcessor struct {
  32. db kv.KeyValue
  33. }
  34. func NewStreamProcessor() *StreamProcessor {
  35. err, db := store.GetKV("stream")
  36. if err != nil {
  37. panic(fmt.Sprintf("Can not initalize store for the stream processor at path 'stream': %v", err))
  38. }
  39. processor := &StreamProcessor{
  40. db: db,
  41. }
  42. return processor
  43. }
  44. func (p *StreamProcessor) ExecStmt(statement string) (result []string, err error) {
  45. parser := xsql.NewParser(strings.NewReader(statement))
  46. stmt, err := xsql.Language.Parse(parser)
  47. if err != nil {
  48. return nil, err
  49. }
  50. switch s := stmt.(type) {
  51. case *ast.StreamStmt: //Table is also StreamStmt
  52. var r string
  53. err = p.execSave(s, statement, false)
  54. stt := ast.StreamTypeMap[s.StreamType]
  55. if err != nil {
  56. err = fmt.Errorf("Create %s fails: %v.", stt, err)
  57. } else {
  58. r = fmt.Sprintf("%s %s is created.", strings.Title(stt), s.Name)
  59. log.Printf("%s", r)
  60. }
  61. result = append(result, r)
  62. case *ast.ShowStreamsStatement:
  63. result, err = p.execShow(ast.TypeStream)
  64. case *ast.ShowTablesStatement:
  65. result, err = p.execShow(ast.TypeTable)
  66. case *ast.DescribeStreamStatement:
  67. var r string
  68. r, err = p.execDescribe(s, ast.TypeStream)
  69. result = append(result, r)
  70. case *ast.DescribeTableStatement:
  71. var r string
  72. r, err = p.execDescribe(s, ast.TypeTable)
  73. result = append(result, r)
  74. case *ast.ExplainStreamStatement:
  75. var r string
  76. r, err = p.execExplain(s, ast.TypeStream)
  77. result = append(result, r)
  78. case *ast.ExplainTableStatement:
  79. var r string
  80. r, err = p.execExplain(s, ast.TypeTable)
  81. result = append(result, r)
  82. case *ast.DropStreamStatement:
  83. var r string
  84. r, err = p.execDrop(s, ast.TypeStream)
  85. result = append(result, r)
  86. case *ast.DropTableStatement:
  87. var r string
  88. r, err = p.execDrop(s, ast.TypeTable)
  89. result = append(result, r)
  90. default:
  91. return nil, fmt.Errorf("Invalid stream statement: %s", statement)
  92. }
  93. return
  94. }
  95. func (p *StreamProcessor) RecoverLookupTable() error {
  96. keys, err := p.db.Keys()
  97. if err != nil {
  98. return fmt.Errorf("error loading data from db: %v.", err)
  99. }
  100. var (
  101. v string
  102. vs = &xsql.StreamInfo{}
  103. )
  104. for _, k := range keys {
  105. if ok, _ := p.db.Get(k, &v); ok {
  106. if err := json.Unmarshal([]byte(v), vs); err == nil && vs.StreamType == ast.TypeTable {
  107. parser := xsql.NewParser(strings.NewReader(vs.Statement))
  108. stmt, e := xsql.Language.Parse(parser)
  109. if e != nil {
  110. log.Error(err)
  111. }
  112. switch s := stmt.(type) {
  113. case *ast.StreamStmt:
  114. log.Infof("Starting lookup table %s", s.Name)
  115. e = lookup.CreateInstance(string(s.Name), s.Options.TYPE, s.Options)
  116. if err != nil {
  117. log.Errorf("%s", err.Error())
  118. return err
  119. }
  120. default:
  121. log.Errorf("Invalid lookup table statement: %s", vs.Statement)
  122. }
  123. }
  124. }
  125. }
  126. return nil
  127. }
  128. func (p *StreamProcessor) execSave(stmt *ast.StreamStmt, statement string, replace bool) error {
  129. if stmt.StreamType == ast.TypeTable && stmt.Options.KIND == ast.StreamKindLookup {
  130. log.Infof("Creating lookup table %s", stmt.Name)
  131. err := lookup.CreateInstance(string(stmt.Name), stmt.Options.TYPE, stmt.Options)
  132. if err != nil {
  133. return err
  134. }
  135. }
  136. s, err := json.Marshal(xsql.StreamInfo{
  137. StreamType: stmt.StreamType,
  138. Statement: statement,
  139. })
  140. if err != nil {
  141. return fmt.Errorf("error when saving to db: %v.", err)
  142. }
  143. if replace {
  144. err = p.db.Set(string(stmt.Name), string(s))
  145. } else {
  146. err = p.db.Setnx(string(stmt.Name), string(s))
  147. }
  148. return err
  149. }
  150. func (p *StreamProcessor) ExecReplaceStream(name string, statement string, st ast.StreamType) (string, error) {
  151. parser := xsql.NewParser(strings.NewReader(statement))
  152. stmt, err := xsql.Language.Parse(parser)
  153. if err != nil {
  154. return "", err
  155. }
  156. stt := ast.StreamTypeMap[st]
  157. switch s := stmt.(type) {
  158. case *ast.StreamStmt:
  159. if s.StreamType != st {
  160. return "", errorx.NewWithCode(errorx.NOT_FOUND, fmt.Sprintf("%s %s is not found", ast.StreamTypeMap[st], s.Name))
  161. }
  162. if string(s.Name) != name {
  163. return "", fmt.Errorf("Replace %s fails: the sql statement must update the %s source.", name, name)
  164. }
  165. err = p.execSave(s, statement, true)
  166. if err != nil {
  167. return "", fmt.Errorf("Replace %s fails: %v.", stt, err)
  168. } else {
  169. info := fmt.Sprintf("%s %s is replaced.", strings.Title(stt), s.Name)
  170. log.Printf("%s", info)
  171. return info, nil
  172. }
  173. default:
  174. return "", fmt.Errorf("Invalid %s statement: %s", stt, statement)
  175. }
  176. }
  177. func (p *StreamProcessor) ExecStreamSql(statement string) (string, error) {
  178. r, err := p.ExecStmt(statement)
  179. if err != nil {
  180. return "", err
  181. } else {
  182. return strings.Join(r, "\n"), err
  183. }
  184. }
  185. func (p *StreamProcessor) execShow(st ast.StreamType) ([]string, error) {
  186. keys, err := p.ShowStream(st)
  187. if len(keys) == 0 {
  188. keys = append(keys, fmt.Sprintf("No %s definitions are found.", ast.StreamTypeMap[st]))
  189. }
  190. return keys, err
  191. }
  192. func (p *StreamProcessor) ShowStream(st ast.StreamType) ([]string, error) {
  193. stt := ast.StreamTypeMap[st]
  194. keys, err := p.db.Keys()
  195. if err != nil {
  196. return nil, fmt.Errorf("Show %ss fails, error when loading data from db: %v.", stt, err)
  197. }
  198. var (
  199. v string
  200. vs = &xsql.StreamInfo{}
  201. result = make([]string, 0)
  202. )
  203. for _, k := range keys {
  204. if ok, _ := p.db.Get(k, &v); ok {
  205. if err := json.Unmarshal([]byte(v), vs); err == nil && vs.StreamType == st {
  206. result = append(result, k)
  207. }
  208. }
  209. }
  210. return result, nil
  211. }
  212. func (p *StreamProcessor) getStream(name string, st ast.StreamType) (string, error) {
  213. vs, err := xsql.GetDataSourceStatement(p.db, name)
  214. if vs != nil && vs.StreamType == st {
  215. return vs.Statement, nil
  216. }
  217. if err != nil {
  218. return "", err
  219. }
  220. return "", errorx.NewWithCode(errorx.NOT_FOUND, fmt.Sprintf("%s %s is not found", ast.StreamTypeMap[st], name))
  221. }
  222. func (p *StreamProcessor) execDescribe(stmt ast.NameNode, st ast.StreamType) (string, error) {
  223. streamStmt, err := p.DescStream(stmt.GetName(), st)
  224. if err != nil {
  225. return "", err
  226. }
  227. switch s := streamStmt.(type) {
  228. case *ast.StreamStmt:
  229. var buff bytes.Buffer
  230. buff.WriteString("Fields\n--------------------------------------------------------------------------------\n")
  231. for _, f := range s.StreamFields {
  232. buff.WriteString(f.Name + "\t")
  233. buff.WriteString(printFieldType(f.FieldType))
  234. buff.WriteString("\n")
  235. }
  236. buff.WriteString("\n")
  237. printOptions(s.Options, &buff)
  238. return buff.String(), err
  239. default:
  240. return "%s", fmt.Errorf("Error resolving the %s %s, the data in db may be corrupted.", ast.StreamTypeMap[st], stmt.GetName())
  241. }
  242. }
  243. func printOptions(opts *ast.Options, buff *bytes.Buffer) {
  244. if opts.CONF_KEY != "" {
  245. buff.WriteString(fmt.Sprintf("CONF_KEY: %s\n", opts.CONF_KEY))
  246. }
  247. if opts.DATASOURCE != "" {
  248. buff.WriteString(fmt.Sprintf("DATASOURCE: %s\n", opts.DATASOURCE))
  249. }
  250. if opts.FORMAT != "" {
  251. buff.WriteString(fmt.Sprintf("FORMAT: %s\n", opts.FORMAT))
  252. }
  253. if opts.SCHEMAID != "" {
  254. buff.WriteString(fmt.Sprintf("SCHEMAID: %s\n", opts.SCHEMAID))
  255. }
  256. if opts.KEY != "" {
  257. buff.WriteString(fmt.Sprintf("KEY: %s\n", opts.KEY))
  258. }
  259. if opts.RETAIN_SIZE != 0 {
  260. buff.WriteString(fmt.Sprintf("RETAIN_SIZE: %d\n", opts.RETAIN_SIZE))
  261. }
  262. if opts.SHARED {
  263. buff.WriteString(fmt.Sprintf("SHARED: %v\n", opts.SHARED))
  264. }
  265. if opts.STRICT_VALIDATION {
  266. buff.WriteString(fmt.Sprintf("STRICT_VALIDATION: %v\n", opts.STRICT_VALIDATION))
  267. }
  268. if opts.TIMESTAMP != "" {
  269. buff.WriteString(fmt.Sprintf("TIMESTAMP: %s\n", opts.TIMESTAMP))
  270. }
  271. if opts.TIMESTAMP_FORMAT != "" {
  272. buff.WriteString(fmt.Sprintf("TIMESTAMP_FORMAT: %s\n", opts.TIMESTAMP_FORMAT))
  273. }
  274. if opts.TYPE != "" {
  275. buff.WriteString(fmt.Sprintf("TYPE: %s\n", opts.TYPE))
  276. }
  277. }
  278. func (p *StreamProcessor) DescStream(name string, st ast.StreamType) (ast.Statement, error) {
  279. statement, err := p.getStream(name, st)
  280. if err != nil {
  281. return nil, fmt.Errorf("Describe %s fails, %s.", ast.StreamTypeMap[st], err)
  282. }
  283. parser := xsql.NewParser(strings.NewReader(statement))
  284. stream, err := xsql.Language.Parse(parser)
  285. if err != nil {
  286. return nil, err
  287. }
  288. return stream, nil
  289. }
  290. func (p *StreamProcessor) execExplain(stmt ast.NameNode, st ast.StreamType) (string, error) {
  291. _, err := p.getStream(stmt.GetName(), st)
  292. if err != nil {
  293. return "", fmt.Errorf("Explain %s fails, %s.", ast.StreamTypeMap[st], err)
  294. }
  295. return "TO BE SUPPORTED", nil
  296. }
  297. func (p *StreamProcessor) execDrop(stmt ast.NameNode, st ast.StreamType) (string, error) {
  298. s, err := p.DropStream(stmt.GetName(), st)
  299. if err != nil {
  300. return s, fmt.Errorf("Drop %s fails: %s.", ast.StreamTypeMap[st], err)
  301. }
  302. return s, nil
  303. }
  304. func (p *StreamProcessor) DropStream(name string, st ast.StreamType) (string, error) {
  305. if st == ast.TypeTable {
  306. err := lookup.DropInstance(name)
  307. if err != nil {
  308. return "", err
  309. }
  310. }
  311. _, err := p.getStream(name, st)
  312. if err != nil {
  313. return "", err
  314. }
  315. err = p.db.Delete(name)
  316. if err != nil {
  317. return "", err
  318. } else {
  319. return fmt.Sprintf("%s %s is dropped.", strings.Title(ast.StreamTypeMap[st]), name), nil
  320. }
  321. }
  322. func printFieldType(ft ast.FieldType) (result string) {
  323. switch t := ft.(type) {
  324. case *ast.BasicType:
  325. result = t.Type.String()
  326. case *ast.ArrayType:
  327. result = "array("
  328. if t.FieldType != nil {
  329. result += printFieldType(t.FieldType)
  330. } else {
  331. result += t.Type.String()
  332. }
  333. result += ")"
  334. case *ast.RecType:
  335. result = "struct("
  336. isFirst := true
  337. for _, f := range t.StreamFields {
  338. if isFirst {
  339. isFirst = false
  340. } else {
  341. result += ", "
  342. }
  343. result = result + f.Name + " " + printFieldType(f.FieldType)
  344. }
  345. result += ")"
  346. }
  347. return
  348. }