sql.go 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. // Copyright 2022-2023 EMQ Technologies Co., Ltd.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package main
  15. import (
  16. "encoding/json"
  17. "fmt"
  18. "reflect"
  19. "strings"
  20. "github.com/lf-edge/ekuiper/extensions/sqldatabase"
  21. "github.com/lf-edge/ekuiper/extensions/sqldatabase/driver"
  22. "github.com/lf-edge/ekuiper/extensions/util"
  23. "github.com/lf-edge/ekuiper/internal/topo/transform"
  24. "github.com/lf-edge/ekuiper/pkg/api"
  25. "github.com/lf-edge/ekuiper/pkg/ast"
  26. "github.com/lf-edge/ekuiper/pkg/cast"
  27. "github.com/lf-edge/ekuiper/pkg/errorx"
  28. )
  29. type sqlConfig struct {
  30. Url string `json:"url"`
  31. Table string `json:"table"`
  32. Fields []string `json:"fields"`
  33. DataTemplate string `json:"dataTemplate"`
  34. TableDataField string `json:"tableDataField"`
  35. DataField string `json:"dataField"`
  36. RowkindField string `json:"rowkindField"`
  37. KeyField string `json:"keyField"`
  38. }
  39. func (t *sqlConfig) buildInsertSql(ctx api.StreamContext, mapData map[string]interface{}) ([]string, string, error) {
  40. keys, vals, err := t.getKeyValues(ctx, mapData)
  41. if err != nil {
  42. return keys, "", err
  43. }
  44. if strings.HasPrefix(strings.ToLower(t.Url), "dm://") {
  45. for i, key := range keys {
  46. keys[i] = fmt.Sprintf(`"%v"`, key)
  47. }
  48. }
  49. sqlStr := "(" + strings.Join(vals, ",") + ")"
  50. return keys, sqlStr, nil
  51. }
  52. func (t *sqlConfig) getKeyValues(ctx api.StreamContext, mapData map[string]interface{}) ([]string, []string, error) {
  53. if 0 == len(mapData) {
  54. return nil, nil, fmt.Errorf("data is empty.")
  55. }
  56. logger := ctx.GetLogger()
  57. var keys, vals []string
  58. if len(t.Fields) != 0 {
  59. for _, k := range t.Fields {
  60. keys = append(keys, k)
  61. if v, ok := mapData[k]; ok && v != nil {
  62. if reflect.String == reflect.TypeOf(v).Kind() {
  63. vals = append(vals, fmt.Sprintf("'%v'", v))
  64. } else {
  65. vals = append(vals, fmt.Sprintf(`%v`, v))
  66. }
  67. } else {
  68. logger.Warnln("not found field:", k)
  69. vals = append(vals, fmt.Sprintf(`NULL`))
  70. }
  71. }
  72. } else {
  73. for k, v := range mapData {
  74. keys = append(keys, k)
  75. if reflect.String == reflect.TypeOf(v).Kind() {
  76. vals = append(vals, fmt.Sprintf("'%v'", v))
  77. } else {
  78. vals = append(vals, fmt.Sprintf(`%v`, v))
  79. }
  80. }
  81. }
  82. return keys, vals, nil
  83. }
  84. type sqlSink struct {
  85. conf *sqlConfig
  86. // The db connection instance
  87. db sqldatabase.DB
  88. }
  89. func (m *sqlSink) Configure(props map[string]interface{}) error {
  90. cfg := &sqlConfig{}
  91. err := cast.MapToStruct(props, cfg)
  92. if err != nil {
  93. return fmt.Errorf("read properties %v fail with error: %v", props, err)
  94. }
  95. if cfg.DataField == "" {
  96. cfg.DataField = cfg.TableDataField
  97. }
  98. if cfg.Url == "" {
  99. return fmt.Errorf("property Url is required")
  100. }
  101. if cfg.Table == "" {
  102. return fmt.Errorf("property Table is required")
  103. }
  104. if cfg.RowkindField != "" && cfg.KeyField == "" {
  105. return fmt.Errorf("keyField is required when rowkindField is set")
  106. }
  107. m.conf = cfg
  108. return nil
  109. }
  110. func (m *sqlSink) Open(ctx api.StreamContext) (err error) {
  111. logger := ctx.GetLogger()
  112. logger.Debugf("Opening sql sink")
  113. db, err := util.FetchDBToOneNode(util.GlobalPool, m.conf.Url)
  114. if err != nil {
  115. logger.Errorf("support build tags are %v", driver.KnownBuildTags())
  116. return err
  117. }
  118. m.db = db
  119. return
  120. }
  121. func (m *sqlSink) writeToDB(ctx api.StreamContext, sqlStr *string) error {
  122. ctx.GetLogger().Debugf(*sqlStr)
  123. r, err := m.db.Exec(*sqlStr)
  124. if err != nil {
  125. return fmt.Errorf("%s: %s", errorx.IOErr, err.Error())
  126. }
  127. d, err := r.RowsAffected()
  128. if err != nil {
  129. ctx.GetLogger().Errorf("get rows affected error: %s", err.Error())
  130. }
  131. ctx.GetLogger().Debugf("Rows affected: %d", d)
  132. return nil
  133. }
  134. func (m *sqlSink) Collect(ctx api.StreamContext, item interface{}) error {
  135. ctx.GetLogger().Debugf("sql sink receive %s", item)
  136. if m.conf.DataTemplate != "" {
  137. jsonBytes, _, err := ctx.TransformOutput(item)
  138. if err != nil {
  139. return err
  140. }
  141. tm := make(map[string]interface{})
  142. err = json.Unmarshal(jsonBytes, &tm)
  143. if err != nil {
  144. return fmt.Errorf("fail to decode data %s after applying dataTemplate for error %v", string(jsonBytes), err)
  145. }
  146. item = tm
  147. } else {
  148. tm, _, err := transform.TransItem(item, m.conf.DataField, m.conf.Fields)
  149. if err != nil {
  150. return fmt.Errorf("fail to 1 transform data %v for error %v!!!!!", item, err)
  151. }
  152. item = tm
  153. }
  154. var (
  155. table string
  156. err error
  157. )
  158. switch v := item.(type) {
  159. case map[string]interface{}:
  160. table, err = ctx.ParseTemplate(m.conf.Table, v)
  161. if err != nil {
  162. ctx.GetLogger().Errorf("parse template for table %s error: %v", m.conf.Table, err)
  163. return err
  164. }
  165. case []map[string]interface{}:
  166. if len(v) == 0 {
  167. ctx.GetLogger().Warnf("empty data array")
  168. return nil
  169. }
  170. table, err = ctx.ParseTemplate(m.conf.Table, v[0])
  171. if err != nil {
  172. ctx.GetLogger().Errorf("parse template for table %s error: %v", m.conf.Table, err)
  173. return err
  174. }
  175. }
  176. var keys []string = nil
  177. var values []string = nil
  178. var vars string
  179. if m.conf.RowkindField == "" {
  180. switch v := item.(type) {
  181. case []map[string]interface{}:
  182. for _, mapData := range v {
  183. keys, vars, err = m.conf.buildInsertSql(ctx, mapData)
  184. if err != nil {
  185. return err
  186. }
  187. values = append(values, vars)
  188. }
  189. if keys != nil {
  190. sqlStr := fmt.Sprintf("INSERT INTO %s (%s) values ", table, strings.Join(keys, ",")) + strings.Join(values, ",") + ";"
  191. return m.writeToDB(ctx, &sqlStr)
  192. }
  193. return nil
  194. case map[string]interface{}:
  195. keys, vars, err = m.conf.buildInsertSql(ctx, v)
  196. if err != nil {
  197. return err
  198. }
  199. values = append(values, vars)
  200. if keys != nil {
  201. sqlStr := fmt.Sprintf("INSERT INTO %s (%s) values ", table, strings.Join(keys, ",")) + strings.Join(values, ",") + ";"
  202. return m.writeToDB(ctx, &sqlStr)
  203. }
  204. return nil
  205. case []interface{}:
  206. for _, data := range v {
  207. mapData, ok := data.(map[string]interface{})
  208. if !ok {
  209. ctx.GetLogger().Errorf("unsupported type: %T", data)
  210. return fmt.Errorf("unsupported type: %T", data)
  211. }
  212. keys, vars, err = m.conf.buildInsertSql(ctx, mapData)
  213. if err != nil {
  214. ctx.GetLogger().Errorf("sql sink build sql error %v for data", err, mapData)
  215. return err
  216. }
  217. values = append(values, vars)
  218. }
  219. if keys != nil {
  220. sqlStr := fmt.Sprintf("INSERT INTO %s (%s) values ", table, strings.Join(keys, ",")) + strings.Join(values, ",") + ";"
  221. return m.writeToDB(ctx, &sqlStr)
  222. }
  223. return nil
  224. default: // never happen
  225. return fmt.Errorf("unsupported type: %T", item)
  226. }
  227. } else {
  228. switch d := item.(type) {
  229. case []map[string]interface{}:
  230. for _, el := range d {
  231. err := m.save(ctx, table, el)
  232. if err != nil {
  233. ctx.GetLogger().Error(err)
  234. }
  235. }
  236. case map[string]interface{}:
  237. err := m.save(ctx, table, d)
  238. if err != nil {
  239. return err
  240. }
  241. case []interface{}:
  242. for _, vv := range d {
  243. el, ok := vv.(map[string]interface{})
  244. if !ok {
  245. ctx.GetLogger().Errorf("unsupported type: %T", vv)
  246. return fmt.Errorf("unsupported type: %T", vv)
  247. }
  248. err := m.save(ctx, table, el)
  249. if err != nil {
  250. ctx.GetLogger().Error(err)
  251. }
  252. }
  253. default:
  254. return fmt.Errorf("unrecognized format of %s", item)
  255. }
  256. return nil
  257. }
  258. }
  259. func (m *sqlSink) Close(_ api.StreamContext) error {
  260. if m.db != nil {
  261. return util.ReturnDBFromOneNode(util.GlobalPool, m.conf.Url)
  262. }
  263. return nil
  264. }
  265. // save save updatable data only to db
  266. func (m *sqlSink) save(ctx api.StreamContext, table string, data map[string]interface{}) error {
  267. rowkind := ast.RowkindInsert
  268. c, ok := data[m.conf.RowkindField]
  269. if ok {
  270. rowkind, ok = c.(string)
  271. if !ok {
  272. return fmt.Errorf("rowkind field %s is not a string in data %v", m.conf.RowkindField, data)
  273. }
  274. if rowkind != ast.RowkindInsert && rowkind != ast.RowkindUpdate && rowkind != ast.RowkindDelete {
  275. return fmt.Errorf("invalid rowkind %s", rowkind)
  276. }
  277. }
  278. var sqlStr string
  279. switch rowkind {
  280. case ast.RowkindInsert:
  281. keys, vars, err := m.conf.buildInsertSql(ctx, data)
  282. if err != nil {
  283. return err
  284. }
  285. values := []string{vars}
  286. if keys != nil {
  287. sqlStr = fmt.Sprintf("INSERT INTO %s (%s) values ", table, strings.Join(keys, ",")) + strings.Join(values, ",") + ";"
  288. }
  289. case ast.RowkindUpdate:
  290. keyval, ok := data[m.conf.KeyField]
  291. if !ok {
  292. return fmt.Errorf("field %s does not exist in data %v", m.conf.KeyField, data)
  293. }
  294. keys, vals, err := m.conf.getKeyValues(ctx, data)
  295. if err != nil {
  296. return err
  297. }
  298. sqlStr = fmt.Sprintf("UPDATE %s SET ", table)
  299. for i, key := range keys {
  300. if i != 0 {
  301. sqlStr += ","
  302. }
  303. sqlStr += fmt.Sprintf("%s=%s", key, vals[i])
  304. }
  305. if _, ok := keyval.(string); ok {
  306. sqlStr += fmt.Sprintf(" WHERE %s = '%s';", m.conf.KeyField, keyval)
  307. } else {
  308. sqlStr += fmt.Sprintf(" WHERE %s = %v;", m.conf.KeyField, keyval)
  309. }
  310. case ast.RowkindDelete:
  311. keyval, ok := data[m.conf.KeyField]
  312. if !ok {
  313. return fmt.Errorf("field %s does not exist in data %v", m.conf.KeyField, data)
  314. }
  315. if _, ok := keyval.(string); ok {
  316. sqlStr = fmt.Sprintf("DELETE FROM %s WHERE %s = '%s';", table, m.conf.KeyField, keyval)
  317. } else {
  318. sqlStr = fmt.Sprintf("DELETE FROM %s WHERE %s = %v;", table, m.conf.KeyField, keyval)
  319. }
  320. default:
  321. return fmt.Errorf("invalid rowkind %s", rowkind)
  322. }
  323. return m.writeToDB(ctx, &sqlStr)
  324. }
  325. func Sql() api.Sink {
  326. return &sqlSink{}
  327. }