source_pool.go 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304
  1. // Copyright 2021 EMQ Technologies Co., Ltd.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package node
  15. import (
  16. "context"
  17. "fmt"
  18. "github.com/lf-edge/ekuiper/internal/binder/io"
  19. "github.com/lf-edge/ekuiper/internal/conf"
  20. kctx "github.com/lf-edge/ekuiper/internal/topo/context"
  21. "github.com/lf-edge/ekuiper/pkg/api"
  22. "sync"
  23. )
  24. //// Package vars and funcs
  25. var (
  26. pool = &sourcePool{
  27. registry: make(map[string]*sourceSingleton),
  28. }
  29. )
  30. // node is readonly
  31. func getSourceInstance(node *SourceNode, index int) (*sourceInstance, error) {
  32. var si *sourceInstance
  33. if node.options.SHARED {
  34. rkey := fmt.Sprintf("%s.%s", node.sourceType, node.name)
  35. s, ok := pool.load(rkey)
  36. if !ok {
  37. ns, err := io.Source(node.sourceType)
  38. if ns != nil {
  39. s, err = pool.addInstance(rkey, node, ns, index)
  40. if err != nil {
  41. return nil, err
  42. }
  43. } else {
  44. if err != nil {
  45. return nil, err
  46. } else {
  47. return nil, fmt.Errorf("source %s not found", node.sourceType)
  48. }
  49. }
  50. }
  51. // attach
  52. instanceKey := fmt.Sprintf("%s.%s.%d", rkey, node.ctx.GetRuleId(), index)
  53. err := s.attach(instanceKey, node.bufferLength)
  54. if err != nil {
  55. return nil, err
  56. }
  57. si = &sourceInstance{
  58. source: s.source,
  59. ctx: s.ctx,
  60. sourceInstanceChannels: s.outputs[instanceKey],
  61. }
  62. } else {
  63. ns, err := io.Source(node.sourceType)
  64. if ns != nil {
  65. si, err = start(nil, node, ns, index)
  66. if err != nil {
  67. return nil, err
  68. }
  69. } else {
  70. if err != nil {
  71. return nil, err
  72. } else {
  73. return nil, fmt.Errorf("source %s not found", node.sourceType)
  74. }
  75. }
  76. }
  77. return si, nil
  78. }
  79. // removeSourceInstance remove an attach from the sourceSingleton
  80. // If all attaches are removed, close the sourceSingleton and remove it from the pool registry
  81. // ONLY apply to shared instance
  82. func removeSourceInstance(node *SourceNode) {
  83. for i := 0; i < node.concurrency; i++ {
  84. rkey := fmt.Sprintf("%s.%s", node.sourceType, node.name)
  85. pool.deleteInstance(rkey, node, i)
  86. }
  87. }
  88. //// data types
  89. /*
  90. * Pool for all keyed source instance.
  91. * Create an instance, and start the source go routine when the keyed was hit the first time.
  92. * For later hit, create the new set of channels and attach to the instance
  93. * When hit a delete (when close a rule), remove the attached channels. If all channels removed, remove the instance from the pool
  94. * For performance reason, the pool only holds the shared instance. Rule specific instance are holden by rule source node itself
  95. */
  96. type sourcePool struct {
  97. registry map[string]*sourceSingleton
  98. sync.RWMutex
  99. }
  100. func (p *sourcePool) load(k string) (*sourceSingleton, bool) {
  101. p.RLock()
  102. defer p.RUnlock()
  103. s, ok := p.registry[k]
  104. return s, ok
  105. }
  106. func (p *sourcePool) addInstance(k string, node *SourceNode, source api.Source, index int) (*sourceSingleton, error) {
  107. p.Lock()
  108. defer p.Unlock()
  109. s, ok := p.registry[k]
  110. if !ok {
  111. contextLogger := conf.Log.WithField("source_pool", k)
  112. ctx := kctx.WithValue(kctx.Background(), kctx.LoggerKey, contextLogger)
  113. // TODO cancel
  114. sctx, cancel := ctx.WithCancel()
  115. si, err := start(sctx, node, source, index)
  116. if err != nil {
  117. return nil, err
  118. }
  119. newS := &sourceSingleton{
  120. sourceInstance: si,
  121. outputs: make(map[string]*sourceInstanceChannels),
  122. cancel: cancel,
  123. }
  124. p.registry[k] = newS
  125. go newS.run(node.sourceType, node.name)
  126. s = newS
  127. }
  128. return s, nil
  129. }
  130. func (p *sourcePool) deleteInstance(k string, node *SourceNode, index int) {
  131. p.Lock()
  132. defer p.Unlock()
  133. s, ok := p.registry[k]
  134. if ok {
  135. instanceKey := fmt.Sprintf("%s.%s.%d", k, node.ctx.GetRuleId(), index)
  136. end := s.detach(instanceKey)
  137. if end {
  138. s.cancel()
  139. _ = s.source.Close(s.ctx)
  140. s.dataCh.Close()
  141. delete(p.registry, k)
  142. }
  143. }
  144. }
  145. type sourceInstance struct {
  146. source api.Source
  147. ctx api.StreamContext
  148. *sourceInstanceChannels
  149. }
  150. // Hold the only instance for all shared source
  151. // And hold the reference to all shared source input channels. Must be sync when dealing with outputs
  152. type sourceSingleton struct {
  153. *sourceInstance // immutable
  154. cancel context.CancelFunc // immutable
  155. outputs map[string]*sourceInstanceChannels // read-write lock
  156. sync.RWMutex
  157. }
  158. type sourceInstanceChannels struct {
  159. dataCh *DynamicChannelBuffer
  160. errorCh chan error
  161. }
  162. func newSourceInstanceChannels(bl int) *sourceInstanceChannels {
  163. buffer := NewDynamicChannelBuffer()
  164. buffer.SetLimit(bl)
  165. errorOutput := make(chan error)
  166. return &sourceInstanceChannels{
  167. dataCh: buffer,
  168. errorCh: errorOutput,
  169. }
  170. }
  171. func (ss *sourceSingleton) run(name, key string) {
  172. logger := ss.ctx.GetLogger()
  173. logger.Infof("Start source %s shared instance %s successfully", name, key)
  174. for {
  175. select {
  176. case <-ss.ctx.Done():
  177. logger.Infof("source %s shared instance %s done", name, key)
  178. return
  179. case err := <-ss.errorCh:
  180. ss.broadcastError(err)
  181. return
  182. case data := <-ss.dataCh.Out:
  183. logger.Debugf("broadcast data %v from source pool %s:%s", data, name, key)
  184. ss.broadcast(data)
  185. }
  186. }
  187. }
  188. func (ss *sourceSingleton) broadcast(val api.SourceTuple) {
  189. logger := ss.ctx.GetLogger()
  190. ss.RLock()
  191. for n, out := range ss.outputs {
  192. go func(name string, dataCh *DynamicChannelBuffer) {
  193. select {
  194. case dataCh.Out <- val:
  195. logger.Debugf("broadcast from source pool to %s done", name)
  196. case <-ss.ctx.Done():
  197. case <-dataCh.done:
  198. // detached
  199. }
  200. }(n, out.dataCh)
  201. }
  202. ss.RUnlock()
  203. }
  204. func (ss *sourceSingleton) broadcastError(err error) {
  205. logger := ss.ctx.GetLogger()
  206. var wg sync.WaitGroup
  207. ss.RLock()
  208. wg.Add(len(ss.outputs))
  209. for n, out := range ss.outputs {
  210. go func(name string, output chan<- error) {
  211. select {
  212. case output <- err:
  213. logger.Debugf("broadcast error from source pool to %s done", name)
  214. case <-ss.ctx.Done():
  215. // rule stop so stop waiting
  216. }
  217. wg.Done()
  218. }(n, out.errorCh)
  219. }
  220. ss.RUnlock()
  221. logger.Debugf("broadcasting from source pool")
  222. wg.Wait()
  223. }
  224. func (ss *sourceSingleton) attach(instanceKey string, bl int) error {
  225. ss.Lock()
  226. defer ss.Unlock()
  227. if _, ok := ss.outputs[instanceKey]; !ok {
  228. ss.outputs[instanceKey] = newSourceInstanceChannels(bl)
  229. } else {
  230. // should not happen
  231. return fmt.Errorf("fail to attach source instance, already has an output of the same key %s", instanceKey)
  232. }
  233. return nil
  234. }
  235. // detach Detach an instance and return if the singleton is ended
  236. func (ss *sourceSingleton) detach(instanceKey string) bool {
  237. ss.Lock()
  238. defer ss.Unlock()
  239. if chs, ok := ss.outputs[instanceKey]; ok {
  240. chs.dataCh.Close()
  241. } else {
  242. // should not happen
  243. ss.ctx.GetLogger().Warnf("detach source instance %s, not found", instanceKey)
  244. return false
  245. }
  246. delete(ss.outputs, instanceKey)
  247. if len(ss.outputs) == 0 {
  248. ss.cancel()
  249. return true
  250. }
  251. return false
  252. }
  253. func start(poolCtx api.StreamContext, node *SourceNode, s api.Source, instanceIndex int) (*sourceInstance, error) {
  254. err := s.Configure(node.options.DATASOURCE, node.props)
  255. if err != nil {
  256. return nil, err
  257. }
  258. ctx := poolCtx
  259. if poolCtx == nil {
  260. ctx = node.ctx
  261. if rw, ok := s.(api.Rewindable); ok {
  262. if offset, err := ctx.GetState(OffsetKey); err != nil {
  263. return nil, err
  264. } else if offset != nil {
  265. ctx.GetLogger().Infof("Source rewind from %v", offset)
  266. err = rw.Rewind(offset)
  267. if err != nil {
  268. return nil, err
  269. }
  270. }
  271. }
  272. }
  273. chs := newSourceInstanceChannels(node.bufferLength)
  274. go s.Open(ctx.WithInstance(instanceIndex), chs.dataCh.In, chs.errorCh)
  275. return &sourceInstance{
  276. source: s,
  277. sourceInstanceChannels: chs,
  278. ctx: ctx,
  279. }, nil
  280. }