analyzer.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486
  1. // Copyright 2022-2023 EMQ Technologies Co., Ltd.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package planner
  15. import (
  16. "fmt"
  17. "strings"
  18. "github.com/lf-edge/ekuiper/internal/binder/function"
  19. "github.com/lf-edge/ekuiper/internal/schema"
  20. "github.com/lf-edge/ekuiper/internal/xsql"
  21. "github.com/lf-edge/ekuiper/pkg/ast"
  22. "github.com/lf-edge/ekuiper/pkg/kv"
  23. )
  24. type streamInfo struct {
  25. stmt *ast.StreamStmt
  26. schema ast.StreamFields
  27. }
  28. // Analyze the select statement by decorating the info from stream statement.
  29. // Typically, set the correct stream name for fieldRefs
  30. func decorateStmt(s *ast.SelectStatement, store kv.KeyValue) ([]*streamInfo, []*ast.Call, error) {
  31. streamsFromStmt := xsql.GetStreams(s)
  32. streamStmts := make([]*streamInfo, len(streamsFromStmt))
  33. isSchemaless := false
  34. for i, s := range streamsFromStmt {
  35. streamStmt, err := xsql.GetDataSource(store, s)
  36. if err != nil {
  37. return nil, nil, fmt.Errorf("fail to get stream %s, please check if stream is created", s)
  38. }
  39. si, err := convertStreamInfo(streamStmt)
  40. if err != nil {
  41. return nil, nil, err
  42. }
  43. streamStmts[i] = si
  44. if si.schema == nil {
  45. isSchemaless = true
  46. }
  47. }
  48. dsn := ast.DefaultStream
  49. if len(streamsFromStmt) == 1 {
  50. dsn = streamStmts[0].stmt.Name
  51. }
  52. // [fieldName][streamsName][*aliasRef] if alias, with special key alias/default. Each key has exactly one value
  53. fieldsMap := newFieldsMap(isSchemaless, dsn)
  54. if !isSchemaless {
  55. for _, streamStmt := range streamStmts {
  56. for _, field := range streamStmt.schema {
  57. fieldsMap.reserve(field.Name, streamStmt.stmt.Name)
  58. }
  59. }
  60. }
  61. var (
  62. walkErr error
  63. aliasFields []*ast.Field
  64. analyticFuncs []*ast.Call
  65. )
  66. // Scan columns fields: bind all field refs, collect alias
  67. for i, f := range s.Fields {
  68. ast.WalkFunc(f.Expr, func(n ast.Node) bool {
  69. switch f := n.(type) {
  70. case *ast.FieldRef:
  71. walkErr = fieldsMap.bind(f)
  72. }
  73. return true
  74. })
  75. if walkErr != nil {
  76. return nil, nil, walkErr
  77. }
  78. if f.AName != "" {
  79. aliasFields = append(aliasFields, &s.Fields[i])
  80. }
  81. }
  82. // bind alias field expressions
  83. for _, f := range aliasFields {
  84. ar, err := ast.NewAliasRef(f.Expr)
  85. if err != nil {
  86. walkErr = err
  87. } else {
  88. f.Expr = &ast.FieldRef{
  89. StreamName: ast.AliasStream,
  90. Name: f.AName,
  91. AliasRef: ar,
  92. }
  93. walkErr = fieldsMap.save(f.AName, ast.AliasStream, ar)
  94. }
  95. }
  96. // Bind field ref for alias AND set StreamName for all field ref
  97. ast.WalkFunc(s, func(n ast.Node) bool {
  98. switch f := n.(type) {
  99. case ast.Fields: // do not bind selection fields, should have done above
  100. return false
  101. case *ast.FieldRef:
  102. if f.StreamName != "" && f.StreamName != ast.DefaultStream {
  103. // check if stream exists
  104. found := false
  105. for _, sn := range streamsFromStmt {
  106. if sn == string(f.StreamName) {
  107. found = true
  108. break
  109. }
  110. }
  111. if !found {
  112. walkErr = fmt.Errorf("stream %s not found", f.StreamName)
  113. return true
  114. }
  115. }
  116. walkErr = fieldsMap.bind(f)
  117. }
  118. return true
  119. })
  120. if walkErr != nil {
  121. return nil, nil, walkErr
  122. }
  123. walkErr = validate(s)
  124. // Collect all analytic function calls so that we can let them run firstly
  125. ast.WalkFunc(s, func(n ast.Node) bool {
  126. switch f := n.(type) {
  127. case ast.Fields:
  128. return false
  129. case *ast.Call:
  130. if function.IsAnalyticFunc(f.Name) {
  131. f.CachedField = fmt.Sprintf("%s_%s_%d", function.AnalyticPrefix, f.Name, f.FuncId)
  132. f.Cached = true
  133. analyticFuncs = append(analyticFuncs, &ast.Call{
  134. Name: f.Name,
  135. FuncId: f.FuncId,
  136. FuncType: f.FuncType,
  137. Args: f.Args,
  138. CachedField: f.CachedField,
  139. Partition: f.Partition,
  140. WhenExpr: f.WhenExpr,
  141. })
  142. }
  143. }
  144. return true
  145. })
  146. if walkErr != nil {
  147. return nil, nil, walkErr
  148. }
  149. // walk sources at last to let them run firstly
  150. // because other clause may depend on the alias defined here
  151. ast.WalkFunc(s.Fields, func(n ast.Node) bool {
  152. switch f := n.(type) {
  153. case *ast.Call:
  154. if function.IsAnalyticFunc(f.Name) {
  155. f.CachedField = fmt.Sprintf("%s_%s_%d", function.AnalyticPrefix, f.Name, f.FuncId)
  156. f.Cached = true
  157. analyticFuncs = append(analyticFuncs, &ast.Call{
  158. Name: f.Name,
  159. FuncId: f.FuncId,
  160. FuncType: f.FuncType,
  161. Args: f.Args,
  162. CachedField: f.CachedField,
  163. Partition: f.Partition,
  164. WhenExpr: f.WhenExpr,
  165. })
  166. }
  167. }
  168. return true
  169. })
  170. if walkErr != nil {
  171. return nil, nil, walkErr
  172. }
  173. return streamStmts, analyticFuncs, walkErr
  174. }
  175. func validate(s *ast.SelectStatement) (err error) {
  176. if xsql.IsAggregate(s.Condition) {
  177. return fmt.Errorf("Not allowed to call aggregate functions in WHERE clause.")
  178. }
  179. if !allAggregate(s.Having) {
  180. return fmt.Errorf("Not allowed to call non-aggregate functions in HAVING clause.")
  181. }
  182. for _, d := range s.Dimensions {
  183. if xsql.IsAggregate(d.Expr) {
  184. return fmt.Errorf("Not allowed to call aggregate functions in GROUP BY clause.")
  185. }
  186. }
  187. ast.WalkFunc(s, func(n ast.Node) bool {
  188. switch f := n.(type) {
  189. case *ast.Call:
  190. // aggregate call should not have any aggregate arg
  191. if function.IsAggFunc(f.Name) {
  192. for _, arg := range f.Args {
  193. tr := xsql.IsAggregate(arg)
  194. if tr {
  195. err = fmt.Errorf("invalid argument for func %s: aggregate argument is not allowed", f.Name)
  196. return false
  197. }
  198. }
  199. }
  200. }
  201. return true
  202. })
  203. return
  204. }
  205. // file-private functions below
  206. // allAggregate checks if all expressions of binary expression are aggregate
  207. func allAggregate(expr ast.Expr) (r bool) {
  208. r = true
  209. ast.WalkFunc(expr, func(n ast.Node) bool {
  210. switch f := expr.(type) {
  211. case *ast.BinaryExpr:
  212. switch f.OP {
  213. case ast.SUBSET, ast.ARROW:
  214. // do nothing
  215. default:
  216. r = allAggregate(f.LHS) && allAggregate(f.RHS)
  217. return false
  218. }
  219. case *ast.Call, *ast.FieldRef:
  220. if !xsql.IsAggregate(f) {
  221. r = false
  222. return false
  223. }
  224. }
  225. return true
  226. })
  227. return
  228. }
  229. func convertStreamInfo(streamStmt *ast.StreamStmt) (*streamInfo, error) {
  230. ss := streamStmt.StreamFields
  231. var err error
  232. if streamStmt.Options.SCHEMAID != "" {
  233. ss, err = schema.InferFromSchemaFile(streamStmt.Options.FORMAT, streamStmt.Options.SCHEMAID)
  234. if err != nil {
  235. return nil, err
  236. }
  237. }
  238. return &streamInfo{
  239. stmt: streamStmt,
  240. schema: ss,
  241. }, nil
  242. }
  243. type fieldsMap struct {
  244. content map[string]streamFieldStore
  245. isSchemaless bool
  246. defaultStream ast.StreamName
  247. }
  248. func newFieldsMap(isSchemaless bool, defaultStream ast.StreamName) *fieldsMap {
  249. return &fieldsMap{content: make(map[string]streamFieldStore), isSchemaless: isSchemaless, defaultStream: defaultStream}
  250. }
  251. func (f *fieldsMap) reserve(fieldName string, streamName ast.StreamName) {
  252. lname := strings.ToLower(fieldName)
  253. if fm, ok := f.content[lname]; ok {
  254. fm.add(streamName)
  255. } else {
  256. fm := newStreamFieldStore(f.isSchemaless, f.defaultStream)
  257. fm.add(streamName)
  258. f.content[lname] = fm
  259. }
  260. }
  261. func (f *fieldsMap) save(fieldName string, streamName ast.StreamName, field *ast.AliasRef) error {
  262. lname := strings.ToLower(fieldName)
  263. fm, ok := f.content[lname]
  264. if !ok {
  265. if streamName == ast.AliasStream || f.isSchemaless {
  266. fm = newStreamFieldStore(f.isSchemaless, f.defaultStream)
  267. f.content[lname] = fm
  268. } else {
  269. return fmt.Errorf("unknown field %s", fieldName)
  270. }
  271. }
  272. err := fm.ref(streamName, field)
  273. if err != nil {
  274. return fmt.Errorf("%s%s", err, fieldName)
  275. }
  276. return nil
  277. }
  278. func (f *fieldsMap) bind(fr *ast.FieldRef) error {
  279. lname := strings.ToLower(fr.Name)
  280. fm, ok := f.content[lname]
  281. if !ok {
  282. if f.isSchemaless && fr.Name != "" {
  283. fm = newStreamFieldStore(f.isSchemaless, f.defaultStream)
  284. f.content[lname] = fm
  285. } else {
  286. return fmt.Errorf("unknown field %s", fr.Name)
  287. }
  288. }
  289. err := fm.bindRef(fr)
  290. if err != nil {
  291. return fmt.Errorf("%s%s", err, fr.Name)
  292. }
  293. return nil
  294. }
  295. type streamFieldStore interface {
  296. add(k ast.StreamName)
  297. ref(k ast.StreamName, v *ast.AliasRef) error
  298. bindRef(f *ast.FieldRef) error
  299. }
  300. func newStreamFieldStore(isSchemaless bool, defaultStream ast.StreamName) streamFieldStore {
  301. if !isSchemaless {
  302. return &streamFieldMap{content: make(map[ast.StreamName]*ast.AliasRef)}
  303. } else {
  304. return &streamFieldMapSchemaless{content: make(map[ast.StreamName]*ast.AliasRef), defaultStream: defaultStream}
  305. }
  306. }
  307. type streamFieldMap struct {
  308. content map[ast.StreamName]*ast.AliasRef
  309. }
  310. // add the stream name must not be default.
  311. // This is used when traversing stream schema
  312. func (s *streamFieldMap) add(k ast.StreamName) {
  313. s.content[k] = nil
  314. }
  315. // bind for schema field, all keys must be created before running bind
  316. // can bind alias & col. For alias, the stream name must be empty; For col, the field must be a col
  317. func (s *streamFieldMap) ref(k ast.StreamName, v *ast.AliasRef) error {
  318. if k == ast.AliasStream { // must not exist, save alias ref for alias
  319. _, ok := s.content[k]
  320. if ok {
  321. return fmt.Errorf("duplicate alias ")
  322. }
  323. s.content[k] = v
  324. } else { // the key must exist after the schema travers, do validation
  325. if k == ast.DefaultStream { // In schema mode, default stream won't be a key
  326. l := len(s.content)
  327. if l == 0 {
  328. return fmt.Errorf("unknow field ")
  329. } else if l == 1 {
  330. // valid, do nothing
  331. } else {
  332. return fmt.Errorf("ambiguous field ")
  333. }
  334. } else {
  335. _, ok := s.content[k]
  336. if !ok {
  337. return fmt.Errorf("unknow field %s.", k)
  338. }
  339. }
  340. }
  341. return nil
  342. }
  343. func (s *streamFieldMap) bindRef(fr *ast.FieldRef) error {
  344. l := len(s.content)
  345. if fr.StreamName == "" {
  346. fr.StreamName = ast.DefaultStream
  347. }
  348. k := fr.StreamName
  349. if k == ast.DefaultStream {
  350. switch l {
  351. case 0:
  352. return fmt.Errorf("unknown field ")
  353. case 1: // if alias, return this
  354. for sk, sv := range s.content {
  355. fr.RefSelection(sv)
  356. fr.StreamName = sk
  357. }
  358. return nil
  359. default:
  360. r, ok := s.content[ast.AliasStream] // if alias exists
  361. if ok {
  362. fr.RefSelection(r)
  363. fr.StreamName = ast.AliasStream
  364. return nil
  365. } else {
  366. return fmt.Errorf("ambiguous field ")
  367. }
  368. }
  369. } else {
  370. r, ok := s.content[k]
  371. if ok {
  372. fr.RefSelection(r)
  373. return nil
  374. } else {
  375. return fmt.Errorf("unknown field %s.", k)
  376. }
  377. }
  378. }
  379. type streamFieldMapSchemaless struct {
  380. content map[ast.StreamName]*ast.AliasRef
  381. defaultStream ast.StreamName
  382. }
  383. // add this should not be called for schemaless
  384. func (s *streamFieldMapSchemaless) add(k ast.StreamName) {
  385. s.content[k] = nil
  386. }
  387. // bind for schemaless field, create column if not exist
  388. // can bind alias & col. For alias, the stream name must be empty; For col, the field must be a col
  389. func (s *streamFieldMapSchemaless) ref(k ast.StreamName, v *ast.AliasRef) error {
  390. if k == ast.AliasStream { // must not exist
  391. _, ok := s.content[k]
  392. if ok {
  393. return fmt.Errorf("duplicate alias ")
  394. }
  395. s.content[k] = v
  396. } else { // the key may or may not exist. But always have only one default stream field.
  397. // Replace with stream name if another stream found. The key can be duplicate
  398. l := len(s.content)
  399. if k == ast.DefaultStream { // In schemaless mode, default stream can only exist when length is 1
  400. if l < 1 {
  401. // valid, do nothing
  402. } else {
  403. return fmt.Errorf("ambiguous field ")
  404. }
  405. } else {
  406. if l == 1 {
  407. for sk := range s.content {
  408. if sk == ast.DefaultStream {
  409. delete(s.content, k)
  410. }
  411. }
  412. }
  413. }
  414. }
  415. return nil
  416. }
  417. func (s *streamFieldMapSchemaless) bindRef(fr *ast.FieldRef) error {
  418. l := len(s.content)
  419. if fr.StreamName == "" || fr.StreamName == ast.DefaultStream {
  420. if l == 1 {
  421. for sk := range s.content {
  422. fr.StreamName = sk
  423. }
  424. }
  425. }
  426. k := fr.StreamName
  427. if k == ast.DefaultStream {
  428. switch l {
  429. case 0: // must be a column because alias are fields and have been traversed
  430. // reserve a hole and do nothing
  431. fr.StreamName = s.defaultStream
  432. s.content[s.defaultStream] = nil
  433. return nil
  434. case 1: // if alias or single col, return this
  435. for sk, sv := range s.content {
  436. fr.RefSelection(sv)
  437. fr.StreamName = sk
  438. }
  439. return nil
  440. default:
  441. r, ok := s.content[ast.AliasStream] // if alias exists
  442. if ok {
  443. fr.RefSelection(r)
  444. fr.StreamName = ast.AliasStream
  445. return nil
  446. } else {
  447. fr.StreamName = s.defaultStream
  448. }
  449. }
  450. }
  451. if fr.StreamName != ast.DefaultStream {
  452. r, ok := s.content[k]
  453. if !ok { // reserver a hole
  454. s.content[k] = nil
  455. } else {
  456. fr.RefSelection(r)
  457. }
  458. return nil
  459. }
  460. return fmt.Errorf("ambiguous field ")
  461. }