source_node.go 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. // Copyright 2021-2022 EMQ Technologies Co., Ltd.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package node
  15. import (
  16. "fmt"
  17. "github.com/lf-edge/ekuiper/internal/conf"
  18. "github.com/lf-edge/ekuiper/internal/converter"
  19. "github.com/lf-edge/ekuiper/internal/topo/context"
  20. nodeConf "github.com/lf-edge/ekuiper/internal/topo/node/conf"
  21. "github.com/lf-edge/ekuiper/internal/topo/node/metric"
  22. "github.com/lf-edge/ekuiper/internal/xsql"
  23. "github.com/lf-edge/ekuiper/pkg/api"
  24. "github.com/lf-edge/ekuiper/pkg/ast"
  25. "github.com/lf-edge/ekuiper/pkg/cast"
  26. "github.com/lf-edge/ekuiper/pkg/infra"
  27. "strings"
  28. "sync"
  29. )
  30. type SourceNode struct {
  31. *defaultNode
  32. streamType ast.StreamType
  33. sourceType string
  34. options *ast.Options
  35. bufferLength int
  36. props map[string]interface{}
  37. mutex sync.RWMutex
  38. sources []api.Source
  39. preprocessOp UnOperation
  40. }
  41. func NewSourceNode(name string, st ast.StreamType, op UnOperation, options *ast.Options, sendError bool) *SourceNode {
  42. t := options.TYPE
  43. if t == "" {
  44. if st == ast.TypeStream {
  45. t = "mqtt"
  46. } else if st == ast.TypeTable {
  47. t = "file"
  48. }
  49. }
  50. return &SourceNode{
  51. streamType: st,
  52. sourceType: t,
  53. defaultNode: &defaultNode{
  54. name: name,
  55. outputs: make(map[string]chan<- interface{}),
  56. concurrency: 1,
  57. sendError: sendError,
  58. },
  59. preprocessOp: op,
  60. options: options,
  61. }
  62. }
  63. const OffsetKey = "$$offset"
  64. func (m *SourceNode) Open(ctx api.StreamContext, errCh chan<- error) {
  65. m.ctx = ctx
  66. logger := ctx.GetLogger()
  67. logger.Infof("open source node %s with option %v", m.name, m.options)
  68. go func() {
  69. panicOrError := infra.SafeRun(func() error {
  70. props := nodeConf.GetSourceConf(m.sourceType, m.options)
  71. m.props = props
  72. if c, ok := props["concurrency"]; ok {
  73. if t, err := cast.ToInt(c, cast.STRICT); err != nil || t <= 0 {
  74. logger.Warnf("invalid type for concurrency property, should be positive integer but found %t", c)
  75. } else {
  76. m.concurrency = t
  77. }
  78. }
  79. bl := 102400
  80. if c, ok := props["bufferLength"]; ok {
  81. if t, err := cast.ToInt(c, cast.STRICT); err != nil || t <= 0 {
  82. logger.Warnf("invalid type for bufferLength property, should be positive integer but found %t", c)
  83. } else {
  84. bl = t
  85. }
  86. }
  87. m.bufferLength = bl
  88. // Set retain size for table type
  89. if m.options.RETAIN_SIZE > 0 && m.streamType == ast.TypeTable {
  90. props["$retainSize"] = m.options.RETAIN_SIZE
  91. }
  92. format := fmt.Sprintf("%v", props["format"])
  93. schemaFile := ""
  94. schemaName := m.options.SCHEMAID
  95. if schemaName != "" {
  96. r := strings.Split(schemaName, ".")
  97. if len(r) != 2 {
  98. return fmt.Errorf("invalid schemaId: %s", schemaName)
  99. }
  100. schemaFile = r[0]
  101. schemaName = r[1]
  102. }
  103. converter, err := converter.GetOrCreateConverter(format, schemaFile, schemaName)
  104. if err != nil {
  105. msg := fmt.Sprintf("cannot get converter from format %s, schemaId %s: %v", format, m.options.SCHEMAID, err)
  106. logger.Warnf(msg)
  107. return fmt.Errorf(msg)
  108. }
  109. ctx = context.WithValue(ctx.(*context.DefaultContext), context.DecodeKey, converter)
  110. m.reset()
  111. logger.Infof("open source node with props %v, concurrency: %d, bufferLength: %d", conf.Printable(m.props), m.concurrency, m.bufferLength)
  112. for i := 0; i < m.concurrency; i++ { // workers
  113. go func(instance int) {
  114. poe := infra.SafeRun(func() error {
  115. //Do open source instances
  116. var (
  117. si *sourceInstance
  118. buffer *DynamicChannelBuffer
  119. err error
  120. )
  121. si, err = getSourceInstance(m, instance)
  122. if err != nil {
  123. return err
  124. }
  125. m.mutex.Lock()
  126. m.sources = append(m.sources, si.source)
  127. m.mutex.Unlock()
  128. buffer = si.dataCh
  129. defer func() {
  130. logger.Infof("source %s done", m.name)
  131. m.close()
  132. buffer.Close()
  133. }()
  134. stats, err := metric.NewStatManager(ctx, "source")
  135. if err != nil {
  136. return err
  137. }
  138. m.mutex.Lock()
  139. m.statManagers = append(m.statManagers, stats)
  140. m.mutex.Unlock()
  141. logger.Infof("Start source %s instance %d successfully", m.name, instance)
  142. for {
  143. select {
  144. case <-ctx.Done():
  145. return nil
  146. case err := <-si.errorCh:
  147. return err
  148. case data := <-buffer.Out:
  149. if t, ok := data.(*xsql.ErrorSourceTuple); ok {
  150. logger.Errorf("Source %s error: %v", ctx.GetOpId(), t.Error)
  151. stats.IncTotalExceptions(t.Error.Error())
  152. continue
  153. }
  154. stats.IncTotalRecordsIn()
  155. stats.ProcessTimeStart()
  156. tuple := &xsql.Tuple{Emitter: m.name, Message: data.Message(), Timestamp: conf.GetNowInMilli(), Metadata: data.Meta()}
  157. var processedData interface{}
  158. if m.preprocessOp != nil {
  159. processedData = m.preprocessOp.Apply(ctx, tuple, nil, nil)
  160. } else {
  161. processedData = tuple
  162. }
  163. stats.ProcessTimeEnd()
  164. //blocking
  165. switch val := processedData.(type) {
  166. case nil:
  167. continue
  168. case error:
  169. logger.Errorf("Source %s preprocess error: %s", ctx.GetOpId(), val)
  170. m.Broadcast(val)
  171. stats.IncTotalExceptions(val.Error())
  172. default:
  173. m.Broadcast(val)
  174. }
  175. stats.IncTotalRecordsOut()
  176. stats.SetBufferLength(int64(buffer.GetLength()))
  177. if rw, ok := si.source.(api.Rewindable); ok {
  178. if offset, err := rw.GetOffset(); err != nil {
  179. infra.DrainError(ctx, err, errCh)
  180. } else {
  181. err = ctx.PutState(OffsetKey, offset)
  182. if err != nil {
  183. return err
  184. }
  185. logger.Debugf("Source save offset %v", offset)
  186. }
  187. }
  188. }
  189. }
  190. })
  191. if poe != nil {
  192. infra.DrainError(ctx, poe, errCh)
  193. }
  194. }(i)
  195. }
  196. return nil
  197. })
  198. if panicOrError != nil {
  199. infra.DrainError(ctx, panicOrError, errCh)
  200. }
  201. }()
  202. }
  203. func (m *SourceNode) reset() {
  204. m.statManagers = nil
  205. }
  206. func (m *SourceNode) close() {
  207. if m.options.SHARED {
  208. removeSourceInstance(m)
  209. }
  210. }