source_pool.go 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346
  1. // Copyright 2021-2022 EMQ Technologies Co., Ltd.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package node
  15. import (
  16. "context"
  17. "fmt"
  18. "github.com/lf-edge/ekuiper/internal/binder/io"
  19. "github.com/lf-edge/ekuiper/internal/conf"
  20. kctx "github.com/lf-edge/ekuiper/internal/topo/context"
  21. "github.com/lf-edge/ekuiper/internal/topo/state"
  22. "github.com/lf-edge/ekuiper/pkg/api"
  23. "github.com/lf-edge/ekuiper/pkg/infra"
  24. "sync"
  25. "time"
  26. )
  27. //// Package vars and funcs
  28. var (
  29. pool = &sourcePool{
  30. registry: make(map[string]*sourceSingleton),
  31. }
  32. )
  33. // node is readonly
  34. func getSourceInstance(node *SourceNode, index int) (*sourceInstance, error) {
  35. var si *sourceInstance
  36. if node.options.SHARED {
  37. rkey := fmt.Sprintf("%s.%s", node.sourceType, node.name)
  38. s, ok := pool.load(rkey)
  39. if !ok {
  40. ns, err := io.Source(node.sourceType)
  41. if ns != nil {
  42. s, err = pool.addInstance(rkey, node, ns)
  43. if err != nil {
  44. return nil, err
  45. }
  46. } else {
  47. if err != nil {
  48. return nil, err
  49. } else {
  50. return nil, fmt.Errorf("source %s not found", node.sourceType)
  51. }
  52. }
  53. }
  54. // attach
  55. instanceKey := fmt.Sprintf("%s.%s.%d", rkey, node.ctx.GetRuleId(), index)
  56. err := s.attach(instanceKey, node.bufferLength)
  57. if err != nil {
  58. return nil, err
  59. }
  60. si = &sourceInstance{
  61. source: s.source,
  62. ctx: s.ctx,
  63. sourceInstanceChannels: s.outputs[instanceKey],
  64. }
  65. } else {
  66. ns, err := io.Source(node.sourceType)
  67. if ns != nil {
  68. si, err = start(nil, node, ns)
  69. if err != nil {
  70. return nil, err
  71. }
  72. go func() {
  73. err := infra.SafeRun(func() error {
  74. nctx := node.ctx.WithInstance(index)
  75. defer si.source.Close(nctx)
  76. si.source.Open(nctx, si.dataCh.In, si.errorCh)
  77. return nil
  78. })
  79. if err != nil {
  80. infra.DrainError(node.ctx, err, si.errorCh)
  81. }
  82. }()
  83. } else {
  84. if err != nil {
  85. return nil, err
  86. } else {
  87. return nil, fmt.Errorf("source %s not found", node.sourceType)
  88. }
  89. }
  90. }
  91. return si, nil
  92. }
  93. // removeSourceInstance remove an attach from the sourceSingleton
  94. // If all attaches are removed, close the sourceSingleton and remove it from the pool registry
  95. // ONLY apply to shared instance
  96. func removeSourceInstance(node *SourceNode) {
  97. for i := 0; i < node.concurrency; i++ {
  98. rkey := fmt.Sprintf("%s.%s", node.sourceType, node.name)
  99. pool.deleteInstance(rkey, node, i)
  100. }
  101. }
  102. //// data types
  103. /*
  104. * Pool for all keyed source instance.
  105. * Create an instance, and start the source go routine when the keyed was hit the first time.
  106. * For later hit, create the new set of channels and attach to the instance
  107. * When hit a delete (when close a rule), remove the attached channels. If all channels removed, remove the instance from the pool
  108. * For performance reason, the pool only holds the shared instance. Rule specific instance are holden by rule source node itself
  109. */
  110. type sourcePool struct {
  111. registry map[string]*sourceSingleton
  112. sync.RWMutex
  113. }
  114. func (p *sourcePool) load(k string) (*sourceSingleton, bool) {
  115. p.RLock()
  116. defer p.RUnlock()
  117. s, ok := p.registry[k]
  118. return s, ok
  119. }
  120. func (p *sourcePool) addInstance(k string, node *SourceNode, source api.Source) (*sourceSingleton, error) {
  121. p.Lock()
  122. defer p.Unlock()
  123. s, ok := p.registry[k]
  124. if !ok {
  125. contextLogger := conf.Log.WithField("source_pool", k)
  126. ctx := kctx.WithValue(kctx.Background(), kctx.LoggerKey, contextLogger)
  127. ruleId := "$$source_pool_" + k
  128. opId := "source_pool_" + k
  129. store, err := state.CreateStore("source_pool_"+k, 0)
  130. if err != nil {
  131. ctx.GetLogger().Errorf("source pool %s create store error %v", k, err)
  132. return nil, err
  133. }
  134. sctx, cancel := ctx.WithMeta(ruleId, opId, store).WithCancel()
  135. sctx = kctx.WithValue(sctx.(*kctx.DefaultContext), kctx.DecodeKey, node.ctx.Value(kctx.DecodeKey))
  136. si, err := start(sctx, node, source)
  137. if err != nil {
  138. return nil, err
  139. }
  140. newS := &sourceSingleton{
  141. sourceInstance: si,
  142. outputs: make(map[string]*sourceInstanceChannels),
  143. cancel: cancel,
  144. }
  145. p.registry[k] = newS
  146. go func() {
  147. err := infra.SafeRun(func() error {
  148. defer si.source.Close(sctx)
  149. si.source.Open(sctx, si.dataCh.In, si.errorCh)
  150. return nil
  151. })
  152. if err != nil {
  153. newS.broadcastError(err)
  154. }
  155. }()
  156. go func() {
  157. err := infra.SafeRun(func() error {
  158. newS.run(node.sourceType, node.name)
  159. return nil
  160. })
  161. if err != nil {
  162. newS.broadcastError(err)
  163. }
  164. }()
  165. s = newS
  166. }
  167. return s, nil
  168. }
  169. func (p *sourcePool) deleteInstance(k string, node *SourceNode, index int) {
  170. p.Lock()
  171. defer p.Unlock()
  172. s, ok := p.registry[k]
  173. if ok {
  174. instanceKey := fmt.Sprintf("%s.%s.%d", k, node.ctx.GetRuleId(), index)
  175. end := s.detach(instanceKey)
  176. if end {
  177. s.cancel()
  178. s.dataCh.Close()
  179. delete(p.registry, k)
  180. }
  181. }
  182. }
  183. type sourceInstance struct {
  184. source api.Source
  185. ctx api.StreamContext
  186. *sourceInstanceChannels
  187. }
  188. // Hold the only instance for all shared source
  189. // And hold the reference to all shared source input channels. Must be sync when dealing with outputs
  190. type sourceSingleton struct {
  191. *sourceInstance // immutable
  192. cancel context.CancelFunc // immutable
  193. outputs map[string]*sourceInstanceChannels // read-write lock
  194. sync.RWMutex
  195. }
  196. type sourceInstanceChannels struct {
  197. dataCh *DynamicChannelBuffer
  198. errorCh chan error
  199. }
  200. func newSourceInstanceChannels(bl int) *sourceInstanceChannels {
  201. buffer := NewDynamicChannelBuffer()
  202. buffer.SetLimit(bl)
  203. errorOutput := make(chan error)
  204. return &sourceInstanceChannels{
  205. dataCh: buffer,
  206. errorCh: errorOutput,
  207. }
  208. }
  209. func (ss *sourceSingleton) run(name, key string) {
  210. logger := ss.ctx.GetLogger()
  211. logger.Infof("Start source %s shared instance %s successfully", name, key)
  212. for {
  213. select {
  214. case <-ss.ctx.Done():
  215. logger.Infof("source %s shared instance %s done", name, key)
  216. return
  217. case err := <-ss.errorCh:
  218. ss.broadcastError(err)
  219. return
  220. case data := <-ss.dataCh.Out:
  221. logger.Debugf("broadcast data %v from source pool %s:%s", data, name, key)
  222. ss.broadcast(data)
  223. }
  224. }
  225. }
  226. func (ss *sourceSingleton) broadcast(val api.SourceTuple) {
  227. ss.RLock()
  228. defer ss.RUnlock()
  229. for name, out := range ss.outputs {
  230. select {
  231. case out.dataCh.In <- val:
  232. case <-ss.ctx.Done():
  233. case <-out.dataCh.done:
  234. // detached
  235. default:
  236. ss.ctx.GetLogger().Errorf("share source drop message to %s", name)
  237. }
  238. }
  239. }
  240. func (ss *sourceSingleton) broadcastError(err error) {
  241. logger := ss.ctx.GetLogger()
  242. var wg sync.WaitGroup
  243. ss.RLock()
  244. wg.Add(len(ss.outputs))
  245. for n, out := range ss.outputs {
  246. go func(name string, output chan<- error) {
  247. infra.DrainError(ss.ctx, err, output)
  248. wg.Done()
  249. }(n, out.errorCh)
  250. }
  251. ss.RUnlock()
  252. logger.Debugf("broadcasting from source pool")
  253. wg.Wait()
  254. }
  255. func (ss *sourceSingleton) attach(instanceKey string, bl int) error {
  256. retry := 10
  257. var err error
  258. // retry multiple times in case the detach is still in progress
  259. for i := 0; i < retry; i++ {
  260. err = func() error {
  261. ss.Lock()
  262. defer ss.Unlock()
  263. if _, ok := ss.outputs[instanceKey]; !ok {
  264. ss.outputs[instanceKey] = newSourceInstanceChannels(bl)
  265. } else {
  266. // should not happen
  267. return fmt.Errorf("fail to attach source instance, already has an output of the same key %s", instanceKey)
  268. }
  269. return nil
  270. }()
  271. if err == nil {
  272. return nil
  273. }
  274. time.Sleep(time.Millisecond * 100)
  275. }
  276. return err
  277. }
  278. // detach Detach an instance and return if the singleton is ended
  279. func (ss *sourceSingleton) detach(instanceKey string) bool {
  280. ss.Lock()
  281. defer ss.Unlock()
  282. if chs, ok := ss.outputs[instanceKey]; ok {
  283. chs.dataCh.Close()
  284. } else {
  285. // should not happen
  286. ss.ctx.GetLogger().Warnf("detach source instance %s, not found", instanceKey)
  287. return false
  288. }
  289. delete(ss.outputs, instanceKey)
  290. if len(ss.outputs) == 0 {
  291. ss.cancel()
  292. return true
  293. }
  294. return false
  295. }
  296. func start(poolCtx api.StreamContext, node *SourceNode, s api.Source) (*sourceInstance, error) {
  297. err := s.Configure(node.options.DATASOURCE, node.props)
  298. if err != nil {
  299. return nil, err
  300. }
  301. ctx := poolCtx
  302. if poolCtx == nil {
  303. ctx = node.ctx
  304. if rw, ok := s.(api.Rewindable); ok {
  305. if offset, err := ctx.GetState(OffsetKey); err != nil {
  306. return nil, err
  307. } else if offset != nil {
  308. ctx.GetLogger().Infof("Source rewind from %v", offset)
  309. err = rw.Rewind(offset)
  310. if err != nil {
  311. return nil, err
  312. }
  313. }
  314. }
  315. }
  316. chs := newSourceInstanceChannels(node.bufferLength)
  317. return &sourceInstance{
  318. source: s,
  319. sourceInstanceChannels: chs,
  320. ctx: ctx,
  321. }, nil
  322. }