sink_cache.go 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. // Copyright 2021 EMQ Technologies Co., Ltd.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package node
  15. import (
  16. "encoding/gob"
  17. "fmt"
  18. "github.com/lf-edge/ekuiper/internal/conf"
  19. "github.com/lf-edge/ekuiper/internal/topo/checkpoint"
  20. "github.com/lf-edge/ekuiper/pkg/api"
  21. "github.com/lf-edge/ekuiper/pkg/kv"
  22. "io"
  23. "path"
  24. "sort"
  25. "strconv"
  26. )
  27. type CacheTuple struct {
  28. index int
  29. data interface{}
  30. }
  31. type LinkedQueue struct {
  32. Data map[int]interface{}
  33. Tail int
  34. }
  35. func (l *LinkedQueue) append(item interface{}) {
  36. l.Data[l.Tail] = item
  37. l.Tail++
  38. }
  39. func (l *LinkedQueue) delete(index int) {
  40. delete(l.Data, index)
  41. }
  42. func (l *LinkedQueue) reset() {
  43. l.Tail = 0
  44. }
  45. func (l *LinkedQueue) length() int {
  46. return len(l.Data)
  47. }
  48. func (l *LinkedQueue) clone() *LinkedQueue {
  49. result := &LinkedQueue{
  50. Data: make(map[int]interface{}),
  51. Tail: l.Tail,
  52. }
  53. for k, v := range l.Data {
  54. result.Data[k] = v
  55. }
  56. return result
  57. }
  58. func (l *LinkedQueue) String() string {
  59. return fmt.Sprintf("tail: %d, data: %v", l.Tail, l.Data)
  60. }
  61. type Cache struct {
  62. //Data and control channels
  63. in <-chan interface{}
  64. Out chan *CacheTuple
  65. Complete chan int
  66. errorCh chan<- error
  67. //states
  68. pending *LinkedQueue
  69. changed bool
  70. //serialize
  71. key string //the key for current cache
  72. store kv.KeyValue
  73. }
  74. func NewTimebasedCache(in <-chan interface{}, limit int, saveInterval int, errCh chan<- error, ctx api.StreamContext) *Cache {
  75. c := &Cache{
  76. in: in,
  77. Out: make(chan *CacheTuple, limit),
  78. Complete: make(chan int),
  79. errorCh: errCh,
  80. }
  81. go c.timebasedRun(ctx, saveInterval)
  82. return c
  83. }
  84. func (c *Cache) initStore(ctx api.StreamContext) {
  85. logger := ctx.GetLogger()
  86. c.pending = &LinkedQueue{
  87. Data: make(map[int]interface{}),
  88. Tail: 0,
  89. }
  90. dbDir, err := conf.GetDataLoc()
  91. logger.Debugf("cache saved to %s", dbDir)
  92. if err != nil {
  93. c.drainError(err)
  94. }
  95. c.store = kv.GetDefaultKVStore(path.Join(dbDir, "sink", ctx.GetRuleId()))
  96. c.key = ctx.GetOpId() + strconv.Itoa(ctx.GetInstanceId())
  97. logger.Debugf("cache saved to key %s", c.key)
  98. //load cache
  99. if err := c.loadCache(); err != nil {
  100. go c.drainError(err)
  101. return
  102. }
  103. }
  104. func (c *Cache) timebasedRun(ctx api.StreamContext, saveInterval int) {
  105. logger := ctx.GetLogger()
  106. c.initStore(ctx)
  107. ticker := conf.GetTicker(saveInterval)
  108. defer ticker.Stop()
  109. var tcount = 0
  110. for {
  111. select {
  112. case item := <-c.in:
  113. index := c.pending.Tail
  114. c.pending.append(item)
  115. //non blocking until limit exceeded
  116. c.Out <- &CacheTuple{
  117. index: index,
  118. data: item,
  119. }
  120. c.changed = true
  121. case index := <-c.Complete:
  122. c.pending.delete(index)
  123. c.changed = true
  124. case <-ticker.C:
  125. tcount++
  126. l := c.pending.length()
  127. if l == 0 {
  128. c.pending.reset()
  129. }
  130. //If the data is still changing, only do a save when the cache has more than threshold to prevent too much file IO
  131. //If the data is not changing in the time slot and have not saved before, save it. This is to prevent the
  132. //data won't be saved as the cache never pass the threshold
  133. //logger.Infof("ticker %t, l=%d\n", c.changed, l)
  134. if (c.changed && l > conf.Config.Sink.CacheThreshold) || (tcount == conf.Config.Sink.CacheTriggerCount && c.changed) {
  135. logger.Infof("save cache for rule %s, %s", ctx.GetRuleId(), c.pending.String())
  136. clone := c.pending.clone()
  137. c.changed = false
  138. go func() {
  139. if err := c.saveCache(logger, clone); err != nil {
  140. logger.Debugf("%v", err)
  141. c.drainError(err)
  142. }
  143. }()
  144. }
  145. if tcount >= conf.Config.Sink.CacheThreshold {
  146. tcount = 0
  147. }
  148. case <-ctx.Done():
  149. err := c.saveCache(logger, c.pending)
  150. if err != nil {
  151. logger.Warnf("Error found during saving cache: %s \n ", err)
  152. }
  153. logger.Infof("sink node %s instance cache %d done", ctx.GetOpId(), ctx.GetInstanceId())
  154. return
  155. }
  156. }
  157. }
  158. func (c *Cache) loadCache() error {
  159. gob.Register(c.pending)
  160. err := c.store.Open()
  161. if err != nil && err != io.EOF {
  162. return err
  163. }
  164. defer c.store.Close()
  165. if err == nil {
  166. mt := new(LinkedQueue)
  167. if f, err := c.store.Get(c.key, &mt); f {
  168. if nil != err {
  169. return fmt.Errorf("load malform cache, found %v(%v)", c.key, mt)
  170. }
  171. c.pending = mt
  172. c.changed = true
  173. // To store the keys in slice in sorted order
  174. var keys []int
  175. for k := range mt.Data {
  176. keys = append(keys, k)
  177. }
  178. sort.Ints(keys)
  179. for _, k := range keys {
  180. t := &CacheTuple{
  181. index: k,
  182. data: mt.Data[k],
  183. }
  184. c.Out <- t
  185. }
  186. return nil
  187. }
  188. }
  189. return nil
  190. }
  191. func (c *Cache) saveCache(logger api.Logger, p *LinkedQueue) error {
  192. err := c.store.Open()
  193. if err != nil {
  194. logger.Errorf("save cache error while opening cache store: %s", err)
  195. logger.Infof("clean the cache and reopen")
  196. c.store.Close()
  197. c.store.Clean()
  198. err = c.store.Open()
  199. if err != nil {
  200. logger.Errorf("save cache error after reset the cache store: %s", err)
  201. return err
  202. }
  203. }
  204. defer c.store.Close()
  205. return c.store.Set(c.key, p)
  206. }
  207. func (c *Cache) drainError(err error) {
  208. c.errorCh <- err
  209. }
  210. func (c *Cache) Length() int {
  211. return c.pending.length()
  212. }
  213. func NewCheckpointbasedCache(in <-chan interface{}, limit int, tch <-chan struct{}, errCh chan<- error, ctx api.StreamContext) *Cache {
  214. c := &Cache{
  215. in: in,
  216. Out: make(chan *CacheTuple, limit),
  217. Complete: make(chan int),
  218. errorCh: errCh,
  219. }
  220. go c.checkpointbasedRun(ctx, tch)
  221. return c
  222. }
  223. func (c *Cache) checkpointbasedRun(ctx api.StreamContext, tch <-chan struct{}) {
  224. logger := ctx.GetLogger()
  225. c.initStore(ctx)
  226. for {
  227. select {
  228. case item := <-c.in:
  229. // possibility of barrier, ignore if found
  230. if boe, ok := item.(*checkpoint.BufferOrEvent); ok {
  231. if _, ok := boe.Data.(*checkpoint.Barrier); ok {
  232. c.Out <- &CacheTuple{
  233. data: item,
  234. }
  235. logger.Debugf("sink cache send out barrier %v", boe.Data)
  236. break
  237. }
  238. }
  239. index := c.pending.Tail
  240. c.pending.append(item)
  241. //non blocking until limit exceeded
  242. c.Out <- &CacheTuple{
  243. index: index,
  244. data: item,
  245. }
  246. logger.Debugf("sink cache send out tuple %v", item)
  247. c.changed = true
  248. case index := <-c.Complete:
  249. c.pending.delete(index)
  250. c.changed = true
  251. case <-tch:
  252. logger.Infof("save cache for rule %s, %s", ctx.GetRuleId(), c.pending.String())
  253. clone := c.pending.clone()
  254. if c.changed {
  255. go func() {
  256. if err := c.saveCache(logger, clone); err != nil {
  257. logger.Debugf("%v", err)
  258. c.drainError(err)
  259. }
  260. }()
  261. }
  262. c.changed = false
  263. case <-ctx.Done():
  264. logger.Infof("sink node %s instance cache %d done", ctx.GetOpId(), ctx.GetInstanceId())
  265. return
  266. }
  267. }
  268. }