source_pool.go 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. // Copyright 2021-2022 EMQ Technologies Co., Ltd.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package node
  15. import (
  16. "context"
  17. "fmt"
  18. "github.com/lf-edge/ekuiper/internal/binder/io"
  19. "github.com/lf-edge/ekuiper/internal/conf"
  20. kctx "github.com/lf-edge/ekuiper/internal/topo/context"
  21. "github.com/lf-edge/ekuiper/internal/topo/state"
  22. "github.com/lf-edge/ekuiper/pkg/api"
  23. "github.com/lf-edge/ekuiper/pkg/infra"
  24. "sync"
  25. )
  26. //// Package vars and funcs
  27. var (
  28. pool = &sourcePool{
  29. registry: make(map[string]*sourceSingleton),
  30. }
  31. )
  32. // node is readonly
  33. func getSourceInstance(node *SourceNode, index int) (*sourceInstance, error) {
  34. var si *sourceInstance
  35. if node.options.SHARED {
  36. rkey := fmt.Sprintf("%s.%s", node.sourceType, node.name)
  37. s, ok := pool.load(rkey)
  38. if !ok {
  39. ns, err := io.Source(node.sourceType)
  40. if ns != nil {
  41. s, err = pool.addInstance(rkey, node, ns)
  42. if err != nil {
  43. return nil, err
  44. }
  45. } else {
  46. if err != nil {
  47. return nil, err
  48. } else {
  49. return nil, fmt.Errorf("source %s not found", node.sourceType)
  50. }
  51. }
  52. }
  53. // attach
  54. instanceKey := fmt.Sprintf("%s.%s.%d", rkey, node.ctx.GetRuleId(), index)
  55. err := s.attach(instanceKey, node.bufferLength)
  56. if err != nil {
  57. return nil, err
  58. }
  59. si = &sourceInstance{
  60. source: s.source,
  61. ctx: s.ctx,
  62. sourceInstanceChannels: s.outputs[instanceKey],
  63. }
  64. } else {
  65. ns, err := io.Source(node.sourceType)
  66. if ns != nil {
  67. si, err = start(nil, node, ns)
  68. if err != nil {
  69. return nil, err
  70. }
  71. go func() {
  72. err := infra.SafeRun(func() error {
  73. nctx := node.ctx.WithInstance(index)
  74. defer si.source.Close(nctx)
  75. si.source.Open(nctx, si.dataCh.In, si.errorCh)
  76. return nil
  77. })
  78. if err != nil {
  79. infra.DrainError(node.ctx, err, si.errorCh)
  80. }
  81. }()
  82. } else {
  83. if err != nil {
  84. return nil, err
  85. } else {
  86. return nil, fmt.Errorf("source %s not found", node.sourceType)
  87. }
  88. }
  89. }
  90. return si, nil
  91. }
  92. // removeSourceInstance remove an attach from the sourceSingleton
  93. // If all attaches are removed, close the sourceSingleton and remove it from the pool registry
  94. // ONLY apply to shared instance
  95. func removeSourceInstance(node *SourceNode) {
  96. for i := 0; i < node.concurrency; i++ {
  97. rkey := fmt.Sprintf("%s.%s", node.sourceType, node.name)
  98. pool.deleteInstance(rkey, node, i)
  99. }
  100. }
  101. //// data types
  102. /*
  103. * Pool for all keyed source instance.
  104. * Create an instance, and start the source go routine when the keyed was hit the first time.
  105. * For later hit, create the new set of channels and attach to the instance
  106. * When hit a delete (when close a rule), remove the attached channels. If all channels removed, remove the instance from the pool
  107. * For performance reason, the pool only holds the shared instance. Rule specific instance are holden by rule source node itself
  108. */
  109. type sourcePool struct {
  110. registry map[string]*sourceSingleton
  111. sync.RWMutex
  112. }
  113. func (p *sourcePool) load(k string) (*sourceSingleton, bool) {
  114. p.RLock()
  115. defer p.RUnlock()
  116. s, ok := p.registry[k]
  117. return s, ok
  118. }
  119. func (p *sourcePool) addInstance(k string, node *SourceNode, source api.Source) (*sourceSingleton, error) {
  120. p.Lock()
  121. defer p.Unlock()
  122. s, ok := p.registry[k]
  123. if !ok {
  124. contextLogger := conf.Log.WithField("source_pool", k)
  125. ctx := kctx.WithValue(kctx.Background(), kctx.LoggerKey, contextLogger)
  126. ruleId := "$$source_pool_" + k
  127. opId := "source_pool_" + k
  128. store, err := state.CreateStore("source_pool_"+k, 0)
  129. if err != nil {
  130. ctx.GetLogger().Errorf("source pool %s create store error %v", k, err)
  131. return nil, err
  132. }
  133. sctx, cancel := ctx.WithMeta(ruleId, opId, store).WithCancel()
  134. si, err := start(sctx, node, source)
  135. if err != nil {
  136. return nil, err
  137. }
  138. newS := &sourceSingleton{
  139. sourceInstance: si,
  140. outputs: make(map[string]*sourceInstanceChannels),
  141. cancel: cancel,
  142. }
  143. p.registry[k] = newS
  144. go func() {
  145. err := infra.SafeRun(func() error {
  146. defer si.source.Close(sctx)
  147. si.source.Open(sctx, si.dataCh.In, si.errorCh)
  148. return nil
  149. })
  150. if err != nil {
  151. newS.broadcastError(err)
  152. }
  153. }()
  154. go func() {
  155. err := infra.SafeRun(func() error {
  156. newS.run(node.sourceType, node.name)
  157. return nil
  158. })
  159. if err != nil {
  160. newS.broadcastError(err)
  161. }
  162. }()
  163. s = newS
  164. }
  165. return s, nil
  166. }
  167. func (p *sourcePool) deleteInstance(k string, node *SourceNode, index int) {
  168. p.Lock()
  169. defer p.Unlock()
  170. s, ok := p.registry[k]
  171. if ok {
  172. instanceKey := fmt.Sprintf("%s.%s.%d", k, node.ctx.GetRuleId(), index)
  173. end := s.detach(instanceKey)
  174. if end {
  175. s.cancel()
  176. s.dataCh.Close()
  177. delete(p.registry, k)
  178. }
  179. }
  180. }
  181. type sourceInstance struct {
  182. source api.Source
  183. ctx api.StreamContext
  184. *sourceInstanceChannels
  185. }
  186. // Hold the only instance for all shared source
  187. // And hold the reference to all shared source input channels. Must be sync when dealing with outputs
  188. type sourceSingleton struct {
  189. *sourceInstance // immutable
  190. cancel context.CancelFunc // immutable
  191. outputs map[string]*sourceInstanceChannels // read-write lock
  192. sync.RWMutex
  193. }
  194. type sourceInstanceChannels struct {
  195. dataCh *DynamicChannelBuffer
  196. errorCh chan error
  197. }
  198. func newSourceInstanceChannels(bl int) *sourceInstanceChannels {
  199. buffer := NewDynamicChannelBuffer()
  200. buffer.SetLimit(bl)
  201. errorOutput := make(chan error)
  202. return &sourceInstanceChannels{
  203. dataCh: buffer,
  204. errorCh: errorOutput,
  205. }
  206. }
  207. func (ss *sourceSingleton) run(name, key string) {
  208. logger := ss.ctx.GetLogger()
  209. logger.Infof("Start source %s shared instance %s successfully", name, key)
  210. for {
  211. select {
  212. case <-ss.ctx.Done():
  213. logger.Infof("source %s shared instance %s done", name, key)
  214. return
  215. case err := <-ss.errorCh:
  216. ss.broadcastError(err)
  217. return
  218. case data := <-ss.dataCh.Out:
  219. logger.Debugf("broadcast data %v from source pool %s:%s", data, name, key)
  220. ss.broadcast(data)
  221. }
  222. }
  223. }
  224. func (ss *sourceSingleton) broadcast(val api.SourceTuple) {
  225. ss.RLock()
  226. defer ss.RUnlock()
  227. for name, out := range ss.outputs {
  228. select {
  229. case out.dataCh.In <- val:
  230. case <-ss.ctx.Done():
  231. case <-out.dataCh.done:
  232. // detached
  233. default:
  234. ss.ctx.GetLogger().Errorf("share source drop message to %s", name)
  235. }
  236. }
  237. }
  238. func (ss *sourceSingleton) broadcastError(err error) {
  239. logger := ss.ctx.GetLogger()
  240. var wg sync.WaitGroup
  241. ss.RLock()
  242. wg.Add(len(ss.outputs))
  243. for n, out := range ss.outputs {
  244. go func(name string, output chan<- error) {
  245. infra.DrainError(ss.ctx, err, output)
  246. wg.Done()
  247. }(n, out.errorCh)
  248. }
  249. ss.RUnlock()
  250. logger.Debugf("broadcasting from source pool")
  251. wg.Wait()
  252. }
  253. func (ss *sourceSingleton) attach(instanceKey string, bl int) error {
  254. ss.Lock()
  255. defer ss.Unlock()
  256. if _, ok := ss.outputs[instanceKey]; !ok {
  257. ss.outputs[instanceKey] = newSourceInstanceChannels(bl)
  258. } else {
  259. // should not happen
  260. return fmt.Errorf("fail to attach source instance, already has an output of the same key %s", instanceKey)
  261. }
  262. return nil
  263. }
  264. // detach Detach an instance and return if the singleton is ended
  265. func (ss *sourceSingleton) detach(instanceKey string) bool {
  266. ss.Lock()
  267. defer ss.Unlock()
  268. if chs, ok := ss.outputs[instanceKey]; ok {
  269. chs.dataCh.Close()
  270. } else {
  271. // should not happen
  272. ss.ctx.GetLogger().Warnf("detach source instance %s, not found", instanceKey)
  273. return false
  274. }
  275. delete(ss.outputs, instanceKey)
  276. if len(ss.outputs) == 0 {
  277. ss.cancel()
  278. return true
  279. }
  280. return false
  281. }
  282. func start(poolCtx api.StreamContext, node *SourceNode, s api.Source) (*sourceInstance, error) {
  283. err := s.Configure(node.options.DATASOURCE, node.props)
  284. if err != nil {
  285. return nil, err
  286. }
  287. ctx := poolCtx
  288. if poolCtx == nil {
  289. ctx = node.ctx
  290. if rw, ok := s.(api.Rewindable); ok {
  291. if offset, err := ctx.GetState(OffsetKey); err != nil {
  292. return nil, err
  293. } else if offset != nil {
  294. ctx.GetLogger().Infof("Source rewind from %v", offset)
  295. err = rw.Rewind(offset)
  296. if err != nil {
  297. return nil, err
  298. }
  299. }
  300. }
  301. }
  302. chs := newSourceInstanceChannels(node.bufferLength)
  303. return &sourceInstance{
  304. source: s,
  305. sourceInstanceChannels: chs,
  306. ctx: ctx,
  307. }, nil
  308. }