table_processor.go 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. // Copyright 2021-2022 EMQ Technologies Co., Ltd.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package operator
  15. import (
  16. "fmt"
  17. "github.com/lf-edge/ekuiper/internal/xsql"
  18. "github.com/lf-edge/ekuiper/pkg/api"
  19. "github.com/lf-edge/ekuiper/pkg/ast"
  20. )
  21. type TableProcessor struct {
  22. //Pruned stream fields. Could be streamField(with data type info) or string
  23. defaultFieldProcessor
  24. checkSchema bool
  25. isBatchInput bool // whether the inputs are batched, such as file which sends multiple messages at a batch. If batch input, only fires when EOF is received. This is mutual exclusive with retainSize.
  26. retainSize int // how many(maximum) messages to be retained for each output
  27. emitterName string
  28. // States
  29. output *xsql.WindowTuples // current batched message collection
  30. batchEmitted bool // if batch input, this is the signal for whether the last batch has emitted. If true, reinitialize.
  31. }
  32. func NewTableProcessor(isSchemaless bool, name string, fields map[string]*ast.JsonStreamField, options *ast.Options) (*TableProcessor, error) {
  33. p := &TableProcessor{emitterName: name, batchEmitted: true, retainSize: 1}
  34. if !isSchemaless && options.STRICT_VALIDATION {
  35. p.defaultFieldProcessor = defaultFieldProcessor{
  36. streamFields: fields, timestampFormat: options.TIMESTAMP_FORMAT,
  37. }
  38. p.checkSchema = true
  39. }
  40. if options.RETAIN_SIZE > 0 {
  41. p.retainSize = options.RETAIN_SIZE
  42. p.isBatchInput = false
  43. } else if isBatch(options.TYPE) {
  44. p.isBatchInput = true
  45. p.retainSize = 0
  46. }
  47. return p, nil
  48. }
  49. // Apply
  50. // input: *xsql.Tuple or BatchCount
  51. // output: WindowTuples
  52. func (p *TableProcessor) Apply(ctx api.StreamContext, data interface{}, fv *xsql.FunctionValuer, _ *xsql.AggregateFunctionValuer) interface{} {
  53. logger := ctx.GetLogger()
  54. tuple, ok := data.(*xsql.Tuple)
  55. if !ok {
  56. return fmt.Errorf("expect *xsql.Tuple data type")
  57. }
  58. logger.Debugf("preprocessor receive %v", tuple)
  59. if p.batchEmitted {
  60. p.output = &xsql.WindowTuples{
  61. Content: make([]xsql.TupleRow, 0),
  62. }
  63. p.batchEmitted = false
  64. }
  65. if tuple.Message != nil {
  66. if p.checkSchema {
  67. err := p.validateAndConvert(tuple)
  68. if err != nil {
  69. return fmt.Errorf("error in preprocessor: %s", err)
  70. }
  71. }
  72. var newTuples []xsql.TupleRow
  73. _ = p.output.Range(func(i int, r xsql.ReadonlyRow) (bool, error) {
  74. if p.retainSize > 0 && p.output.Len() == p.retainSize && i == 0 {
  75. return true, nil
  76. }
  77. newTuples = append(newTuples, r.(xsql.TupleRow))
  78. return true, nil
  79. })
  80. newTuples = append(newTuples, tuple)
  81. p.output = &xsql.WindowTuples{
  82. Content: newTuples,
  83. }
  84. if !p.isBatchInput {
  85. return p.output
  86. }
  87. } else if p.isBatchInput { // EOF
  88. p.batchEmitted = true
  89. return p.output
  90. }
  91. return nil
  92. }
  93. func isBatch(t string) bool {
  94. return t == "file" || t == ""
  95. }