table_processor.go 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104
  1. // Copyright 2021-2022 EMQ Technologies Co., Ltd.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package operator
  15. import (
  16. "fmt"
  17. "github.com/lf-edge/ekuiper/internal/xsql"
  18. "github.com/lf-edge/ekuiper/pkg/api"
  19. "github.com/lf-edge/ekuiper/pkg/ast"
  20. )
  21. type TableProcessor struct {
  22. // Pruned stream fields. Could be streamField(with data type info) or string
  23. defaultFieldProcessor
  24. checkSchema bool
  25. isBatchInput bool // whether the inputs are batched, such as file which sends multiple messages at a batch. If batch input, only fires when EOF is received. This is mutual exclusive with retainSize.
  26. retainSize int // how many(maximum) messages to be retained for each output
  27. emitterName string
  28. // States
  29. output *xsql.WindowTuples // current batched message collection
  30. batchEmitted bool // if batch input, this is the signal for whether the last batch has emitted. If true, reinitialize.
  31. }
  32. func NewTableProcessor(isSchemaless bool, name string, fields map[string]*ast.JsonStreamField, options *ast.Options) (*TableProcessor, error) {
  33. p := &TableProcessor{emitterName: name, batchEmitted: true, retainSize: 1}
  34. if !isSchemaless && options.STRICT_VALIDATION {
  35. p.defaultFieldProcessor = defaultFieldProcessor{
  36. streamFields: fields, timestampFormat: options.TIMESTAMP_FORMAT,
  37. }
  38. p.checkSchema = true
  39. }
  40. if options.RETAIN_SIZE > 0 {
  41. p.retainSize = options.RETAIN_SIZE
  42. p.isBatchInput = false
  43. } else if isBatch(options.TYPE) {
  44. p.isBatchInput = true
  45. p.retainSize = 0
  46. }
  47. return p, nil
  48. }
  49. // Apply
  50. //
  51. // input: *xsql.Tuple or BatchCount
  52. // output: WindowTuples
  53. func (p *TableProcessor) Apply(ctx api.StreamContext, data interface{}, fv *xsql.FunctionValuer, _ *xsql.AggregateFunctionValuer) interface{} {
  54. logger := ctx.GetLogger()
  55. tuple, ok := data.(*xsql.Tuple)
  56. if !ok {
  57. return fmt.Errorf("expect *xsql.Tuple data type")
  58. }
  59. logger.Debugf("preprocessor receive %v", tuple)
  60. if p.batchEmitted {
  61. p.output = &xsql.WindowTuples{
  62. Content: make([]xsql.TupleRow, 0),
  63. }
  64. p.batchEmitted = false
  65. }
  66. if tuple.Message != nil {
  67. if p.checkSchema {
  68. err := p.validateAndConvert(tuple)
  69. if err != nil {
  70. return fmt.Errorf("error in preprocessor: %s", err)
  71. }
  72. }
  73. var newTuples []xsql.TupleRow
  74. _ = p.output.Range(func(i int, r xsql.ReadonlyRow) (bool, error) {
  75. if p.retainSize > 0 && p.output.Len() == p.retainSize && i == 0 {
  76. return true, nil
  77. }
  78. newTuples = append(newTuples, r.(xsql.TupleRow))
  79. return true, nil
  80. })
  81. newTuples = append(newTuples, tuple)
  82. p.output = &xsql.WindowTuples{
  83. Content: newTuples,
  84. }
  85. if !p.isBatchInput {
  86. return p.output
  87. }
  88. } else if p.isBatchInput { // EOF
  89. p.batchEmitted = true
  90. return p.output
  91. }
  92. return nil
  93. }
  94. func isBatch(t string) bool {
  95. return t == "file" || t == ""
  96. }