exporter.go 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. package exchange
  2. import (
  3. "reflect"
  4. "github.com/spf13/cast"
  5. "gorm.io/gorm"
  6. )
  7. type Exporter struct {
  8. resource interface{}
  9. rtResource reflect.Type
  10. metas []*Meta
  11. pkMetas []*Meta
  12. associations []string
  13. }
  14. func NewExporter(resource interface{}) *Exporter {
  15. return &Exporter{
  16. resource: resource,
  17. }
  18. }
  19. func (ep *Exporter) Metas(ms ...*Meta) *Exporter {
  20. ep.metas = ms
  21. return ep
  22. }
  23. func (ep *Exporter) Associations(ts ...string) *Exporter {
  24. ep.associations = ts
  25. return ep
  26. }
  27. func (ep *Exporter) Exec(db *gorm.DB, w Writer, opts ...ExporterExecOption) error {
  28. err := ep.validateAndInit()
  29. if err != nil {
  30. return err
  31. }
  32. maxParamsPerSQL := ep.parseOptions(opts...)
  33. records := reflect.New(reflect.SliceOf(ep.rtResource)).Elem()
  34. {
  35. // gorm using id to order in FindInBatches
  36. // var orderBy string
  37. // for i, m := range ep.pkMetas {
  38. // if i > 0 {
  39. // orderBy += ", "
  40. // }
  41. // orderBy += fmt.Sprintf("%s asc", m.snakeField)
  42. // }
  43. chunkRecords := reflect.New(reflect.SliceOf(ep.rtResource)).Interface()
  44. batchSize := maxParamsPerSQL
  45. if len(ep.pkMetas) > 0 {
  46. batchSize /= len(ep.pkMetas)
  47. }
  48. err = preloadDB(db, ep.associations).
  49. Model(ep.resource).
  50. // Order(orderBy).
  51. FindInBatches(chunkRecords, batchSize, func(tx *gorm.DB, batch int) error {
  52. records = reflect.AppendSlice(records, reflect.ValueOf(chunkRecords).Elem())
  53. return nil
  54. }).Error
  55. if err != nil {
  56. return err
  57. }
  58. }
  59. headers := make([]string, 0, len(ep.metas))
  60. for _, m := range ep.metas {
  61. headers = append(headers, m.columnHeader)
  62. }
  63. err = w.WriteHeader(headers)
  64. if err != nil {
  65. return err
  66. }
  67. vals := make([]string, len(ep.metas))
  68. for i := 0; i < records.Len(); i++ {
  69. record := records.Index(i)
  70. for i, m := range ep.metas {
  71. if m.valuer != nil {
  72. v, err := m.valuer(record.Interface())
  73. if err != nil {
  74. return err
  75. }
  76. vals[i] = v
  77. continue
  78. }
  79. vals[i] = cast.ToString(record.Elem().FieldByName(m.field).Interface())
  80. }
  81. err = w.WriteRow(vals)
  82. if err != nil {
  83. return err
  84. }
  85. }
  86. return w.Flush()
  87. }
  88. func (ep *Exporter) validateAndInit() error {
  89. if err := validateResourceAndMetas(ep.resource, ep.metas); err != nil {
  90. return err
  91. }
  92. ep.pkMetas = []*Meta{}
  93. for i, _ := range ep.metas {
  94. m := ep.metas[i]
  95. if m.primaryKey {
  96. ep.pkMetas = append(ep.pkMetas, m)
  97. }
  98. }
  99. ep.rtResource = reflect.TypeOf(ep.resource)
  100. return nil
  101. }
  102. func (ep *Exporter) parseOptions(opts ...ExporterExecOption) (
  103. maxParamsPerSQL int,
  104. ) {
  105. maxParamsPerSQL = 65000
  106. for _, opt := range opts {
  107. switch v := opt.(type) {
  108. case *maxParamsPerSQLOption:
  109. maxParamsPerSQL = v.v
  110. }
  111. }
  112. return maxParamsPerSQL
  113. }