212 lines
5.0 KiB
Go
212 lines
5.0 KiB
Go
package s3browser
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"path/filepath"
|
|
"strings"
|
|
|
|
"github.com/graph-gophers/dataloader"
|
|
"github.com/minio/minio-go/v7"
|
|
)
|
|
|
|
// listObjectsBatch batch func for calling s3.ListObjects()
|
|
func listObjectsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
|
var results []*dataloader.Result
|
|
|
|
s3Client, ok := c.Value("s3Client").(*minio.Client)
|
|
|
|
if !ok {
|
|
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
|
|
}
|
|
|
|
for _, v := range k {
|
|
results = append(results, &dataloader.Result{
|
|
Data: listObjects(s3Client, bucketName, v.String(), false),
|
|
Error: nil,
|
|
})
|
|
}
|
|
|
|
return results
|
|
}
|
|
|
|
// listObjects helper func for listObjectsBatch
|
|
func listObjects(s3Client *minio.Client, bukitName, path string, recursive bool) []minio.ObjectInfo {
|
|
objectCh := s3Client.ListObjects(context.Background(), bukitName, minio.ListObjectsOptions{
|
|
Prefix: path,
|
|
Recursive: false,
|
|
})
|
|
|
|
result := make([]minio.ObjectInfo, 0)
|
|
for obj := range objectCh {
|
|
result = append(result, obj)
|
|
}
|
|
|
|
return result
|
|
}
|
|
|
|
// getFilesBatch batch func for getting all files in path. Uses "listObjects" dataloader
|
|
func getFilesBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
|
var results []*dataloader.Result
|
|
|
|
loader, ok := c.Value("loader").(map[string]*dataloader.Loader)
|
|
if !ok {
|
|
return handleLoaderError(k, fmt.Errorf("Failed to get loader from context"))
|
|
}
|
|
|
|
for _, v := range k {
|
|
path := v.String()
|
|
files := make([]File, 0)
|
|
|
|
if !strings.HasSuffix(path, "/") {
|
|
path += "/"
|
|
}
|
|
|
|
thunk := loader["listObjects"].Load(c, dataloader.StringKey(path))
|
|
|
|
objects, _ := thunk()
|
|
|
|
// TODO: handle thunk error
|
|
|
|
for _, obj := range objects.([]minio.ObjectInfo) {
|
|
if obj.Err != nil {
|
|
// TODO: how to handle?
|
|
} else if !strings.HasSuffix(obj.Key, "/") {
|
|
files = append(files, File{
|
|
ID: obj.Key,
|
|
Name: filepath.Base(obj.Key),
|
|
Size: obj.Size,
|
|
ContentType: obj.ContentType,
|
|
ETag: obj.ETag,
|
|
LastModified: obj.LastModified,
|
|
})
|
|
}
|
|
}
|
|
|
|
results = append(results, &dataloader.Result{
|
|
Data: files,
|
|
Error: nil,
|
|
})
|
|
}
|
|
|
|
return results
|
|
}
|
|
|
|
// getFileBatch batch func for getting object info
|
|
func getFileBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
|
var results []*dataloader.Result
|
|
|
|
s3Client, ok := c.Value("s3Client").(*minio.Client)
|
|
|
|
if !ok {
|
|
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
|
|
}
|
|
|
|
for _, v := range k {
|
|
obj, err := s3Client.StatObject(context.Background(), bucketName, v.String(), minio.StatObjectOptions{})
|
|
|
|
if err != nil {
|
|
results = append(results, &dataloader.Result{
|
|
Data: nil,
|
|
Error: err,
|
|
})
|
|
} else {
|
|
results = append(results, &dataloader.Result{
|
|
Data: &File{
|
|
ID: obj.Key,
|
|
Size: obj.Size,
|
|
ContentType: obj.ContentType,
|
|
ETag: obj.ETag,
|
|
LastModified: obj.LastModified,
|
|
},
|
|
Error: nil,
|
|
})
|
|
}
|
|
}
|
|
|
|
return results
|
|
}
|
|
|
|
// getDirsBatch batch func for getting dirs in a path
|
|
func getDirsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
|
var results []*dataloader.Result
|
|
|
|
loader, ok := c.Value("loader").(map[string]*dataloader.Loader)
|
|
if !ok {
|
|
return handleLoaderError(k, fmt.Errorf("Failed to get loader from context"))
|
|
}
|
|
|
|
for _, v := range k {
|
|
path := v.String()
|
|
dirs := make([]Directory, 0)
|
|
|
|
if !strings.HasSuffix(path, "/") {
|
|
path += "/"
|
|
}
|
|
|
|
thunk := loader["listObjects"].Load(c, dataloader.StringKey(path))
|
|
|
|
objects, _ := thunk()
|
|
|
|
// TODO: handle thunk error
|
|
|
|
for _, obj := range objects.([]minio.ObjectInfo) {
|
|
if obj.Err != nil {
|
|
// TODO: how to handle?
|
|
} else if strings.HasSuffix(obj.Key, "/") {
|
|
dirs = append(dirs, Directory{
|
|
ID: obj.Key,
|
|
Name: filepath.Base(obj.Key),
|
|
})
|
|
}
|
|
}
|
|
|
|
results = append(results, &dataloader.Result{
|
|
Data: dirs,
|
|
Error: nil,
|
|
})
|
|
}
|
|
|
|
return results
|
|
}
|
|
|
|
// handleLoaderError helper func when the whole batch failed
|
|
func handleLoaderError(k dataloader.Keys, err error) []*dataloader.Result {
|
|
var results []*dataloader.Result
|
|
for range k {
|
|
results = append(results, &dataloader.Result{
|
|
Data: nil,
|
|
Error: err,
|
|
})
|
|
}
|
|
|
|
return results
|
|
}
|
|
|
|
// createDataloader create all dataloaders and return a map of them plus a cache for objects
|
|
func createDataloader(config AppConfig) map[string]*dataloader.Loader {
|
|
loaderMap := make(map[string]*dataloader.Loader, 0)
|
|
|
|
loaderMap["getFiles"] = dataloader.NewBatchedLoader(
|
|
getFilesBatch,
|
|
dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)),
|
|
)
|
|
|
|
loaderMap["getFile"] = dataloader.NewBatchedLoader(
|
|
getFileBatch,
|
|
dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)),
|
|
)
|
|
|
|
loaderMap["listObjects"] = dataloader.NewBatchedLoader(
|
|
listObjectsBatch,
|
|
dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)),
|
|
)
|
|
|
|
loaderMap["getDirs"] = dataloader.NewBatchedLoader(
|
|
getDirsBatch,
|
|
dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)),
|
|
)
|
|
|
|
return loaderMap
|
|
}
|