2021-07-26 12:52:36 +00:00
|
|
|
package s3browser
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"path/filepath"
|
|
|
|
"strings"
|
|
|
|
|
2021-09-24 13:39:23 +00:00
|
|
|
types "git.kapelle.org/niklas/s3browser/internal/types"
|
2021-07-26 12:52:36 +00:00
|
|
|
"github.com/graph-gophers/dataloader"
|
|
|
|
"github.com/minio/minio-go/v7"
|
2021-08-12 15:48:28 +00:00
|
|
|
log "github.com/sirupsen/logrus"
|
2021-07-26 12:52:36 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// listObjectsBatch batch func for calling s3.ListObjects()
|
|
|
|
func listObjectsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
2021-08-12 15:48:28 +00:00
|
|
|
log.Debug("listObjectsBatch: ", k.Keys())
|
2021-07-26 12:52:36 +00:00
|
|
|
var results []*dataloader.Result
|
|
|
|
|
|
|
|
s3Client, ok := c.Value("s3Client").(*minio.Client)
|
|
|
|
|
|
|
|
if !ok {
|
|
|
|
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, v := range k {
|
2021-09-26 23:59:32 +00:00
|
|
|
id := v.Raw().(types.ID)
|
2021-07-26 12:52:36 +00:00
|
|
|
results = append(results, &dataloader.Result{
|
2021-09-26 23:59:32 +00:00
|
|
|
Data: listObjects(s3Client, id, false),
|
2021-08-06 14:31:07 +00:00
|
|
|
Error: nil,
|
2021-07-26 12:52:36 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return results
|
|
|
|
}
|
|
|
|
|
2021-08-27 20:03:19 +00:00
|
|
|
// listObjectsRecursiveBatch just like listObjectsBatch but with recursive set to true
|
|
|
|
func listObjectsRecursiveBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
|
|
|
log.Debug("listObjectsRecursiveBatch: ", k.Keys())
|
|
|
|
var results []*dataloader.Result
|
|
|
|
|
|
|
|
s3Client, ok := c.Value("s3Client").(*minio.Client)
|
|
|
|
|
|
|
|
if !ok {
|
|
|
|
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, v := range k {
|
2021-09-26 23:59:32 +00:00
|
|
|
id := v.Raw().(types.ID)
|
2021-08-27 20:03:19 +00:00
|
|
|
results = append(results, &dataloader.Result{
|
2021-09-26 23:59:32 +00:00
|
|
|
Data: listObjects(s3Client, id, true),
|
2021-08-27 20:03:19 +00:00
|
|
|
Error: nil,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return results
|
|
|
|
}
|
|
|
|
|
2021-07-26 12:52:36 +00:00
|
|
|
// listObjects helper func for listObjectsBatch
|
2021-09-26 23:59:32 +00:00
|
|
|
func listObjects(s3Client *minio.Client, id types.ID, recursive bool) []minio.ObjectInfo {
|
|
|
|
log.Debug("S3 call 'ListObjects': ", id)
|
|
|
|
objectCh := s3Client.ListObjects(context.Background(), id.Bucket, minio.ListObjectsOptions{
|
|
|
|
Prefix: id.Key,
|
2021-08-27 20:03:19 +00:00
|
|
|
Recursive: recursive,
|
2021-07-26 12:52:36 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
result := make([]minio.ObjectInfo, 0)
|
|
|
|
for obj := range objectCh {
|
|
|
|
result = append(result, obj)
|
|
|
|
}
|
|
|
|
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
|
|
|
// getFilesBatch batch func for getting all files in path. Uses "listObjects" dataloader
|
|
|
|
func getFilesBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
2021-08-12 15:48:28 +00:00
|
|
|
log.Debug("getFilesBatch: ", k.Keys())
|
2021-07-26 12:52:36 +00:00
|
|
|
var results []*dataloader.Result
|
|
|
|
|
|
|
|
loader, ok := c.Value("loader").(map[string]*dataloader.Loader)
|
|
|
|
if !ok {
|
|
|
|
return handleLoaderError(k, fmt.Errorf("Failed to get loader from context"))
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, v := range k {
|
2021-09-26 23:59:32 +00:00
|
|
|
id := v.Raw().(types.ID)
|
2021-09-24 13:39:23 +00:00
|
|
|
files := make([]types.File, 0)
|
2021-07-26 12:52:36 +00:00
|
|
|
|
2021-09-26 23:59:32 +00:00
|
|
|
thunk := loader["listObjects"].Load(c, id)
|
2021-07-26 12:52:36 +00:00
|
|
|
|
|
|
|
objects, _ := thunk()
|
|
|
|
|
|
|
|
// TODO: handle thunk error
|
|
|
|
|
|
|
|
for _, obj := range objects.([]minio.ObjectInfo) {
|
|
|
|
if obj.Err != nil {
|
|
|
|
// TODO: how to handle?
|
2021-08-06 14:31:07 +00:00
|
|
|
} else if !strings.HasSuffix(obj.Key, "/") {
|
2021-09-26 23:59:32 +00:00
|
|
|
resultID := types.ID{
|
|
|
|
Bucket: id.Bucket,
|
|
|
|
Key: obj.Key,
|
|
|
|
}
|
|
|
|
|
|
|
|
resultID.Normalize()
|
|
|
|
|
2021-09-24 13:39:23 +00:00
|
|
|
files = append(files, types.File{
|
2021-09-26 23:59:32 +00:00
|
|
|
ID: resultID,
|
2021-08-06 17:25:07 +00:00
|
|
|
Name: filepath.Base(obj.Key),
|
|
|
|
Size: obj.Size,
|
|
|
|
ContentType: obj.ContentType,
|
|
|
|
ETag: obj.ETag,
|
|
|
|
LastModified: obj.LastModified,
|
2021-08-06 14:31:07 +00:00
|
|
|
})
|
2021-07-26 12:52:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
results = append(results, &dataloader.Result{
|
|
|
|
Data: files,
|
|
|
|
Error: nil,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return results
|
|
|
|
}
|
|
|
|
|
|
|
|
// getFileBatch batch func for getting object info
|
|
|
|
func getFileBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
2021-08-12 15:48:28 +00:00
|
|
|
log.Debug("getFileBatch: ", k.Keys())
|
2021-07-26 12:52:36 +00:00
|
|
|
var results []*dataloader.Result
|
|
|
|
|
|
|
|
s3Client, ok := c.Value("s3Client").(*minio.Client)
|
|
|
|
|
|
|
|
if !ok {
|
|
|
|
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, v := range k {
|
2021-09-26 23:59:32 +00:00
|
|
|
id := v.Raw().(types.ID)
|
2021-08-12 15:48:28 +00:00
|
|
|
log.Debug("S3 call 'StatObject': ", v.String())
|
2021-09-26 23:59:32 +00:00
|
|
|
obj, err := s3Client.StatObject(context.Background(), id.Bucket, id.Key, minio.StatObjectOptions{})
|
2021-08-03 21:10:23 +00:00
|
|
|
|
2021-08-06 11:48:49 +00:00
|
|
|
if err != nil {
|
|
|
|
results = append(results, &dataloader.Result{
|
|
|
|
Data: nil,
|
|
|
|
Error: err,
|
|
|
|
})
|
|
|
|
} else {
|
2021-09-26 23:59:32 +00:00
|
|
|
resultID := types.ID{
|
|
|
|
Bucket: id.Bucket,
|
|
|
|
Key: obj.Key,
|
|
|
|
}
|
|
|
|
|
|
|
|
resultID.Normalize()
|
|
|
|
|
2021-07-26 12:52:36 +00:00
|
|
|
results = append(results, &dataloader.Result{
|
2021-09-24 13:39:23 +00:00
|
|
|
Data: &types.File{
|
2021-09-26 23:59:32 +00:00
|
|
|
ID: resultID,
|
2021-08-06 17:25:07 +00:00
|
|
|
Size: obj.Size,
|
|
|
|
ContentType: obj.ContentType,
|
|
|
|
ETag: obj.ETag,
|
|
|
|
LastModified: obj.LastModified,
|
2021-07-26 12:52:36 +00:00
|
|
|
},
|
|
|
|
Error: nil,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return results
|
|
|
|
}
|
|
|
|
|
|
|
|
// getDirsBatch batch func for getting dirs in a path
|
|
|
|
func getDirsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
2021-08-12 15:48:28 +00:00
|
|
|
log.Debug("getDirsBatch: ", k.Keys())
|
2021-07-26 12:52:36 +00:00
|
|
|
var results []*dataloader.Result
|
|
|
|
|
|
|
|
loader, ok := c.Value("loader").(map[string]*dataloader.Loader)
|
|
|
|
if !ok {
|
|
|
|
return handleLoaderError(k, fmt.Errorf("Failed to get loader from context"))
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, v := range k {
|
2021-09-26 23:59:32 +00:00
|
|
|
id := v.Raw().(types.ID)
|
2021-09-24 13:39:23 +00:00
|
|
|
dirs := make([]types.Directory, 0)
|
2021-07-26 12:52:36 +00:00
|
|
|
|
2021-09-26 23:59:32 +00:00
|
|
|
thunk := loader["listObjects"].Load(c, id)
|
2021-07-26 12:52:36 +00:00
|
|
|
|
|
|
|
objects, _ := thunk()
|
|
|
|
|
|
|
|
// TODO: handle thunk error
|
|
|
|
|
|
|
|
for _, obj := range objects.([]minio.ObjectInfo) {
|
|
|
|
if obj.Err != nil {
|
|
|
|
// TODO: how to handle?
|
2021-08-06 14:31:07 +00:00
|
|
|
} else if strings.HasSuffix(obj.Key, "/") {
|
2021-09-26 23:59:32 +00:00
|
|
|
resultID := types.ID{
|
|
|
|
Bucket: id.Bucket,
|
|
|
|
Key: obj.Key,
|
|
|
|
}
|
|
|
|
resultID.Normalize()
|
2021-09-24 13:39:23 +00:00
|
|
|
dirs = append(dirs, types.Directory{
|
2021-09-26 23:59:32 +00:00
|
|
|
ID: resultID,
|
2021-08-06 14:31:07 +00:00
|
|
|
Name: filepath.Base(obj.Key),
|
|
|
|
})
|
2021-07-26 12:52:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
results = append(results, &dataloader.Result{
|
|
|
|
Data: dirs,
|
|
|
|
Error: nil,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return results
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleLoaderError helper func when the whole batch failed
|
|
|
|
func handleLoaderError(k dataloader.Keys, err error) []*dataloader.Result {
|
2021-08-12 15:48:28 +00:00
|
|
|
log.Error(err.Error())
|
2021-07-26 12:52:36 +00:00
|
|
|
var results []*dataloader.Result
|
|
|
|
for range k {
|
|
|
|
results = append(results, &dataloader.Result{
|
|
|
|
Data: nil,
|
|
|
|
Error: err,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return results
|
|
|
|
}
|
|
|
|
|
2021-09-29 19:41:50 +00:00
|
|
|
func listBucketsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
|
|
|
log.Debug("listBucketsBatch")
|
|
|
|
var results []*dataloader.Result
|
|
|
|
|
|
|
|
s3Client, ok := c.Value("s3Client").(*minio.Client)
|
|
|
|
|
|
|
|
if !ok {
|
|
|
|
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
|
|
|
|
}
|
|
|
|
|
|
|
|
buckets, err := s3Client.ListBuckets(c)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return handleLoaderError(k, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var bucketStrings []string
|
|
|
|
|
|
|
|
for _, v := range buckets {
|
|
|
|
bucketStrings = append(bucketStrings, v.Name)
|
|
|
|
}
|
|
|
|
|
|
|
|
result := &dataloader.Result{
|
|
|
|
Data: bucketStrings,
|
|
|
|
Error: nil,
|
|
|
|
}
|
|
|
|
|
|
|
|
for range k {
|
|
|
|
results = append(results, result)
|
|
|
|
}
|
|
|
|
|
|
|
|
return results
|
|
|
|
}
|
|
|
|
|
2021-08-03 21:10:23 +00:00
|
|
|
// createDataloader create all dataloaders and return a map of them plus a cache for objects
|
2021-09-24 13:39:23 +00:00
|
|
|
func createDataloader(config types.AppConfig) map[string]*dataloader.Loader {
|
2021-07-26 12:52:36 +00:00
|
|
|
loaderMap := make(map[string]*dataloader.Loader, 0)
|
|
|
|
|
2021-08-06 14:31:07 +00:00
|
|
|
loaderMap["getFiles"] = dataloader.NewBatchedLoader(
|
|
|
|
getFilesBatch,
|
|
|
|
dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)),
|
|
|
|
)
|
|
|
|
|
|
|
|
loaderMap["getFile"] = dataloader.NewBatchedLoader(
|
|
|
|
getFileBatch,
|
|
|
|
dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)),
|
|
|
|
)
|
|
|
|
|
|
|
|
loaderMap["listObjects"] = dataloader.NewBatchedLoader(
|
|
|
|
listObjectsBatch,
|
|
|
|
dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)),
|
|
|
|
)
|
|
|
|
|
2021-08-27 20:03:19 +00:00
|
|
|
loaderMap["listObjectsRecursive"] = dataloader.NewBatchedLoader(
|
|
|
|
listObjectsRecursiveBatch,
|
|
|
|
dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)),
|
|
|
|
)
|
|
|
|
|
2021-08-06 14:31:07 +00:00
|
|
|
loaderMap["getDirs"] = dataloader.NewBatchedLoader(
|
|
|
|
getDirsBatch,
|
|
|
|
dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)),
|
|
|
|
)
|
2021-07-26 12:52:36 +00:00
|
|
|
|
2021-09-29 19:41:50 +00:00
|
|
|
loaderMap["listBuckets"] = dataloader.NewBatchedLoader(
|
|
|
|
listBucketsBatch,
|
|
|
|
dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)),
|
|
|
|
)
|
|
|
|
|
2021-08-06 11:48:49 +00:00
|
|
|
return loaderMap
|
2021-07-26 12:52:36 +00:00
|
|
|
}
|