source_pool.go 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. // Copyright 2021 EMQ Technologies Co., Ltd.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package node
  15. import (
  16. "context"
  17. "fmt"
  18. "github.com/lf-edge/ekuiper/internal/conf"
  19. kctx "github.com/lf-edge/ekuiper/internal/topo/context"
  20. "github.com/lf-edge/ekuiper/pkg/api"
  21. "sync"
  22. )
  23. //// Package vars and funcs
  24. var (
  25. pool = &sourcePool{
  26. registry: make(map[string]*sourceSingleton),
  27. }
  28. )
  29. // node is readonly
  30. func getSourceInstance(node *SourceNode, index int) (*sourceInstance, error) {
  31. var si *sourceInstance
  32. if node.options.SHARED {
  33. rkey := fmt.Sprintf("%s.%s", node.sourceType, node.name)
  34. s, ok := pool.load(rkey)
  35. if !ok {
  36. ns, err := getSource(node.sourceType)
  37. if err != nil {
  38. return nil, err
  39. }
  40. s, err = pool.addInstance(rkey, node, ns, index)
  41. if err != nil {
  42. return nil, err
  43. }
  44. }
  45. // attach
  46. instanceKey := fmt.Sprintf("%s.%s.%d", rkey, node.ctx.GetRuleId(), index)
  47. err := s.attach(instanceKey, node.bufferLength)
  48. if err != nil {
  49. return nil, err
  50. }
  51. si = &sourceInstance{
  52. source: s.source,
  53. ctx: s.ctx,
  54. sourceInstanceChannels: s.outputs[instanceKey],
  55. }
  56. } else {
  57. ns, err := getSource(node.sourceType)
  58. if err != nil {
  59. return nil, err
  60. }
  61. si, err = start(nil, node, ns, index)
  62. if err != nil {
  63. return nil, err
  64. }
  65. }
  66. return si, nil
  67. }
  68. // removeSourceInstance remove an attach from the sourceSingleton
  69. // If all attaches are removed, close the sourceSingleton and remove it from the pool registry
  70. // ONLY apply to shared instance
  71. func removeSourceInstance(node *SourceNode) {
  72. for i := 0; i < node.concurrency; i++ {
  73. rkey := fmt.Sprintf("%s.%s", node.sourceType, node.name)
  74. pool.deleteInstance(rkey, node, i)
  75. }
  76. }
  77. //// data types
  78. /*
  79. * Pool for all keyed source instance.
  80. * Create an instance, and start the source go routine when the keyed was hit the first time.
  81. * For later hit, create the new set of channels and attach to the instance
  82. * When hit a delete (when close a rule), remove the attached channels. If all channels removed, remove the instance from the pool
  83. * For performance reason, the pool only holds the shared instance. Rule specific instance are holden by rule source node itself
  84. */
  85. type sourcePool struct {
  86. registry map[string]*sourceSingleton
  87. sync.RWMutex
  88. }
  89. func (p *sourcePool) load(k string) (*sourceSingleton, bool) {
  90. p.RLock()
  91. defer p.RUnlock()
  92. s, ok := p.registry[k]
  93. return s, ok
  94. }
  95. func (p *sourcePool) addInstance(k string, node *SourceNode, source api.Source, index int) (*sourceSingleton, error) {
  96. p.Lock()
  97. defer p.Unlock()
  98. s, ok := p.registry[k]
  99. if !ok {
  100. contextLogger := conf.Log.WithField("source_pool", k)
  101. ctx := kctx.WithValue(kctx.Background(), kctx.LoggerKey, contextLogger)
  102. // TODO cancel
  103. sctx, cancel := ctx.WithCancel()
  104. si, err := start(sctx, node, source, index)
  105. if err != nil {
  106. return nil, err
  107. }
  108. newS := &sourceSingleton{
  109. sourceInstance: si,
  110. outputs: make(map[string]*sourceInstanceChannels),
  111. cancel: cancel,
  112. }
  113. p.registry[k] = newS
  114. go newS.run(node.sourceType, node.name)
  115. s = newS
  116. }
  117. return s, nil
  118. }
  119. func (p *sourcePool) deleteInstance(k string, node *SourceNode, index int) {
  120. p.Lock()
  121. defer p.Unlock()
  122. s, ok := p.registry[k]
  123. if ok {
  124. instanceKey := fmt.Sprintf("%s.%s.%d", k, node.ctx.GetRuleId(), index)
  125. end := s.detach(instanceKey)
  126. if end {
  127. s.cancel()
  128. _ = s.source.Close(s.ctx)
  129. s.dataCh.Close()
  130. delete(p.registry, k)
  131. }
  132. }
  133. }
  134. type sourceInstance struct {
  135. source api.Source
  136. ctx api.StreamContext
  137. *sourceInstanceChannels
  138. }
  139. // Hold the only instance for all shared source
  140. // And hold the reference to all shared source input channels. Must be sync when dealing with outputs
  141. type sourceSingleton struct {
  142. *sourceInstance // immutable
  143. cancel context.CancelFunc // immutable
  144. outputs map[string]*sourceInstanceChannels // read-write lock
  145. sync.RWMutex
  146. }
  147. type sourceInstanceChannels struct {
  148. dataCh *DynamicChannelBuffer
  149. errorCh chan error
  150. }
  151. func newSourceInstanceChannels(bl int) *sourceInstanceChannels {
  152. buffer := NewDynamicChannelBuffer()
  153. buffer.SetLimit(bl)
  154. errorOutput := make(chan error)
  155. return &sourceInstanceChannels{
  156. dataCh: buffer,
  157. errorCh: errorOutput,
  158. }
  159. }
  160. func (ss *sourceSingleton) run(name, key string) {
  161. logger := ss.ctx.GetLogger()
  162. logger.Infof("Start source %s shared instance %s successfully", name, key)
  163. for {
  164. select {
  165. case <-ss.ctx.Done():
  166. logger.Infof("source %s shared instance %s done", name, key)
  167. return
  168. case err := <-ss.errorCh:
  169. ss.broadcastError(err)
  170. return
  171. case data := <-ss.dataCh.Out:
  172. logger.Debugf("broadcast data %v from source pool %s:%s", data, name, key)
  173. ss.broadcast(data)
  174. }
  175. }
  176. }
  177. func (ss *sourceSingleton) broadcast(val api.SourceTuple) {
  178. logger := ss.ctx.GetLogger()
  179. var wg sync.WaitGroup
  180. ss.RLock()
  181. wg.Add(len(ss.outputs))
  182. for n, out := range ss.outputs {
  183. go func(name string, output chan<- api.SourceTuple) {
  184. select {
  185. case output <- val:
  186. logger.Debugf("broadcast from source pool to %s done", name)
  187. case <-ss.ctx.Done():
  188. // rule stop so stop waiting
  189. }
  190. wg.Done()
  191. }(n, out.dataCh.Out)
  192. }
  193. ss.RUnlock()
  194. wg.Wait()
  195. }
  196. func (ss *sourceSingleton) broadcastError(err error) {
  197. logger := ss.ctx.GetLogger()
  198. var wg sync.WaitGroup
  199. ss.RLock()
  200. wg.Add(len(ss.outputs))
  201. for n, out := range ss.outputs {
  202. go func(name string, output chan<- error) {
  203. select {
  204. case output <- err:
  205. logger.Debugf("broadcast error from source pool to %s done", name)
  206. case <-ss.ctx.Done():
  207. // rule stop so stop waiting
  208. }
  209. wg.Done()
  210. }(n, out.errorCh)
  211. }
  212. ss.RUnlock()
  213. logger.Debugf("broadcasting from source pool")
  214. wg.Wait()
  215. }
  216. func (ss *sourceSingleton) attach(instanceKey string, bl int) error {
  217. ss.Lock()
  218. defer ss.Unlock()
  219. if _, ok := ss.outputs[instanceKey]; !ok {
  220. ss.outputs[instanceKey] = newSourceInstanceChannels(bl)
  221. } else {
  222. // should not happen
  223. return fmt.Errorf("fail to attach source instance, already has an output of the same key %s", instanceKey)
  224. }
  225. return nil
  226. }
  227. // detach Detach an instance and return if the singleton is ended
  228. func (ss *sourceSingleton) detach(instanceKey string) bool {
  229. ss.Lock()
  230. defer ss.Unlock()
  231. if chs, ok := ss.outputs[instanceKey]; ok {
  232. chs.dataCh.Close()
  233. } else {
  234. // should not happen
  235. ss.ctx.GetLogger().Warnf("detach source instance %s, not found", instanceKey)
  236. }
  237. delete(ss.outputs, instanceKey)
  238. if len(ss.outputs) == 0 {
  239. ss.cancel()
  240. return true
  241. }
  242. return false
  243. }
  244. func start(poolCtx api.StreamContext, node *SourceNode, s api.Source, instanceIndex int) (*sourceInstance, error) {
  245. err := s.Configure(node.options.DATASOURCE, node.props)
  246. if err != nil {
  247. return nil, err
  248. }
  249. ctx := poolCtx
  250. if poolCtx == nil {
  251. ctx = node.ctx
  252. if rw, ok := s.(api.Rewindable); ok {
  253. if offset, err := ctx.GetState(OffsetKey); err != nil {
  254. return nil, err
  255. } else if offset != nil {
  256. ctx.GetLogger().Infof("Source rewind from %v", offset)
  257. err = rw.Rewind(offset)
  258. if err != nil {
  259. return nil, err
  260. }
  261. }
  262. }
  263. }
  264. chs := newSourceInstanceChannels(node.bufferLength)
  265. go s.Open(ctx.WithInstance(instanceIndex), chs.dataCh.In, chs.errorCh)
  266. return &sourceInstance{
  267. source: s,
  268. sourceInstanceChannels: chs,
  269. ctx: ctx,
  270. }, nil
  271. }