135 lines
2.9 KiB
Go
135 lines
2.9 KiB
Go
package loader
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
|
|
"git.kapelle.org/niklas/s3browser/internal/s3"
|
|
"git.kapelle.org/niklas/s3browser/internal/types"
|
|
"github.com/graph-gophers/dataloader"
|
|
log "github.com/sirupsen/logrus"
|
|
)
|
|
|
|
// listObjectsBatch batch func for calling s3.ListObjects()
|
|
func listObjectsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
|
var results []*dataloader.Result
|
|
|
|
s3Client, ok := c.Value("s3Client").(s3.S3Service)
|
|
|
|
if !ok {
|
|
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
|
|
}
|
|
|
|
for _, v := range k {
|
|
id := v.Raw().(types.ID)
|
|
objects, err := s3Client.ListObjects(c, id)
|
|
if err != nil {
|
|
results = append(results, &dataloader.Result{
|
|
Data: nil,
|
|
Error: err,
|
|
})
|
|
} else {
|
|
results = append(results, &dataloader.Result{
|
|
Data: objects,
|
|
Error: nil,
|
|
})
|
|
}
|
|
}
|
|
|
|
return results
|
|
}
|
|
|
|
// listObjectsRecursiveBatch just like listObjectsBatch but with recursive set to true
|
|
func listObjectsRecursiveBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
|
var results []*dataloader.Result
|
|
|
|
s3Client, ok := c.Value("s3Client").(s3.S3Service)
|
|
|
|
if !ok {
|
|
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
|
|
}
|
|
|
|
for _, v := range k {
|
|
id := v.Raw().(types.ID)
|
|
objects, err := s3Client.ListObjectsRecursive(c, id)
|
|
if err != nil {
|
|
results = append(results, &dataloader.Result{
|
|
Data: nil,
|
|
Error: err,
|
|
})
|
|
} else {
|
|
results = append(results, &dataloader.Result{
|
|
Data: objects,
|
|
Error: nil,
|
|
})
|
|
}
|
|
}
|
|
|
|
return results
|
|
}
|
|
|
|
func listBucketsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
|
var results []*dataloader.Result
|
|
|
|
s3Client, ok := c.Value("s3Client").(s3.S3Service)
|
|
|
|
if !ok {
|
|
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
|
|
}
|
|
|
|
log.Debug("S3 'ListBuckets'")
|
|
buckets, err := s3Client.ListBuckets(c)
|
|
|
|
if err != nil {
|
|
return handleLoaderError(k, err)
|
|
}
|
|
|
|
result := &dataloader.Result{
|
|
Data: buckets,
|
|
Error: nil,
|
|
}
|
|
|
|
for range k {
|
|
results = append(results, result)
|
|
}
|
|
|
|
return results
|
|
}
|
|
|
|
func statObjectBatch(ctx context.Context, k dataloader.Keys) []*dataloader.Result {
|
|
log.Debug("statObjectBatch")
|
|
|
|
var results []*dataloader.Result
|
|
s3Client, ok := ctx.Value("s3Client").(s3.S3Service)
|
|
|
|
if !ok {
|
|
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
|
|
}
|
|
|
|
for _, v := range k {
|
|
id := v.Raw().(types.ID)
|
|
log.Debug("S3 'StatObject': ", id)
|
|
stat, err := s3Client.StatObject(ctx, id)
|
|
results = append(results, &dataloader.Result{
|
|
Data: stat,
|
|
Error: err,
|
|
})
|
|
}
|
|
|
|
return results
|
|
}
|
|
|
|
// handleLoaderError helper func when the whole batch failed
|
|
func handleLoaderError(k dataloader.Keys, err error) []*dataloader.Result {
|
|
log.Error(err.Error())
|
|
var results []*dataloader.Result
|
|
for range k {
|
|
results = append(results, &dataloader.Result{
|
|
Data: nil,
|
|
Error: err,
|
|
})
|
|
}
|
|
|
|
return results
|
|
}
|