diff --git a/internal/ttl_cache.go b/internal/cache/ttl_cache.go similarity index 50% rename from internal/ttl_cache.go rename to internal/cache/ttl_cache.go index f68b6a8..67a26e1 100644 --- a/internal/ttl_cache.go +++ b/internal/cache/ttl_cache.go @@ -1,4 +1,4 @@ -package s3browser +package cache import ( "context" @@ -8,17 +8,18 @@ import ( gocache "github.com/patrickmn/go-cache" ) -type cache struct { +type TTLCache struct { c *gocache.Cache } -func newCache(ttl, cleanupInterval time.Duration) *cache { - return &cache{ +// Create new ttl cache +func NewTTLCache(ttl, cleanupInterval time.Duration) *TTLCache { + return &TTLCache{ c: gocache.New(ttl, cleanupInterval), } } -func (c *cache) Get(_ context.Context, key dataloader.Key) (dataloader.Thunk, bool) { +func (c *TTLCache) Get(_ context.Context, key dataloader.Key) (dataloader.Thunk, bool) { v, ok := c.c.Get(key.String()) if ok { return v.(dataloader.Thunk), ok @@ -26,11 +27,11 @@ func (c *cache) Get(_ context.Context, key dataloader.Key) (dataloader.Thunk, bo return nil, ok } -func (c *cache) Set(_ context.Context, key dataloader.Key, value dataloader.Thunk) { +func (c *TTLCache) Set(_ context.Context, key dataloader.Key, value dataloader.Thunk) { c.c.Set(key.String(), value, 0) } -func (c *cache) Delete(_ context.Context, key dataloader.Key) bool { +func (c *TTLCache) Delete(_ context.Context, key dataloader.Key) bool { if _, found := c.c.Get(key.String()); found { c.c.Delete(key.String()) return true @@ -38,6 +39,6 @@ func (c *cache) Delete(_ context.Context, key dataloader.Key) bool { return false } -func (c *cache) Clear() { +func (c *TTLCache) Clear() { c.c.Flush() } diff --git a/internal/dataloader.go b/internal/dataloader.go deleted file mode 100644 index 4d865c0..0000000 --- a/internal/dataloader.go +++ /dev/null @@ -1,299 +0,0 @@ -package s3browser - -import ( - "context" - "fmt" - "path/filepath" - "strings" - - types "git.kapelle.org/niklas/s3browser/internal/types" - "github.com/graph-gophers/dataloader" - "github.com/minio/minio-go/v7" - log "github.com/sirupsen/logrus" -) - -// listObjectsBatch batch func for calling s3.ListObjects() -func listObjectsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result { - log.Debug("listObjectsBatch: ", k.Keys()) - var results []*dataloader.Result - - s3Client, ok := c.Value("s3Client").(*minio.Client) - - if !ok { - return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context")) - } - - for _, v := range k { - id := v.Raw().(types.ID) - results = append(results, &dataloader.Result{ - Data: listObjects(s3Client, id, false), - Error: nil, - }) - } - - return results -} - -// listObjectsRecursiveBatch just like listObjectsBatch but with recursive set to true -func listObjectsRecursiveBatch(c context.Context, k dataloader.Keys) []*dataloader.Result { - log.Debug("listObjectsRecursiveBatch: ", k.Keys()) - var results []*dataloader.Result - - s3Client, ok := c.Value("s3Client").(*minio.Client) - - if !ok { - return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context")) - } - - for _, v := range k { - id := v.Raw().(types.ID) - results = append(results, &dataloader.Result{ - Data: listObjects(s3Client, id, true), - Error: nil, - }) - } - - return results -} - -// listObjects helper func for listObjectsBatch -func listObjects(s3Client *minio.Client, id types.ID, recursive bool) []minio.ObjectInfo { - log.Debug("S3 call 'ListObjects': ", id) - objectCh := s3Client.ListObjects(context.Background(), id.Bucket, minio.ListObjectsOptions{ - Prefix: id.Key, - Recursive: recursive, - }) - - result := make([]minio.ObjectInfo, 0) - for obj := range objectCh { - result = append(result, obj) - } - - return result -} - -// getFilesBatch batch func for getting all files in path. Uses "listObjects" dataloader -func getFilesBatch(c context.Context, k dataloader.Keys) []*dataloader.Result { - log.Debug("getFilesBatch: ", k.Keys()) - var results []*dataloader.Result - - loader, ok := c.Value("loader").(map[string]*dataloader.Loader) - if !ok { - return handleLoaderError(k, fmt.Errorf("Failed to get loader from context")) - } - - for _, v := range k { - id := v.Raw().(types.ID) - files := make([]types.File, 0) - - thunk := loader["listObjects"].Load(c, id) - - objects, _ := thunk() - - // TODO: handle thunk error - - for _, obj := range objects.([]minio.ObjectInfo) { - if obj.Err != nil { - // TODO: how to handle? - } else if !strings.HasSuffix(obj.Key, "/") { - resultID := types.ID{ - Bucket: id.Bucket, - Key: obj.Key, - } - - resultID.Normalize() - - files = append(files, types.File{ - ID: resultID, - Name: filepath.Base(obj.Key), - Size: obj.Size, - ContentType: obj.ContentType, - ETag: obj.ETag, - LastModified: obj.LastModified, - }) - } - } - - results = append(results, &dataloader.Result{ - Data: files, - Error: nil, - }) - } - - return results -} - -// getFileBatch batch func for getting object info -func getFileBatch(c context.Context, k dataloader.Keys) []*dataloader.Result { - log.Debug("getFileBatch: ", k.Keys()) - var results []*dataloader.Result - - s3Client, ok := c.Value("s3Client").(*minio.Client) - - if !ok { - return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context")) - } - - for _, v := range k { - id := v.Raw().(types.ID) - log.Debug("S3 call 'StatObject': ", v.String()) - obj, err := s3Client.StatObject(context.Background(), id.Bucket, id.Key, minio.StatObjectOptions{}) - - if err != nil { - results = append(results, &dataloader.Result{ - Data: nil, - Error: err, - }) - } else { - resultID := types.ID{ - Bucket: id.Bucket, - Key: obj.Key, - } - - resultID.Normalize() - - results = append(results, &dataloader.Result{ - Data: &types.File{ - ID: resultID, - Size: obj.Size, - ContentType: obj.ContentType, - ETag: obj.ETag, - LastModified: obj.LastModified, - }, - Error: nil, - }) - } - } - - return results -} - -// getDirsBatch batch func for getting dirs in a path -func getDirsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result { - log.Debug("getDirsBatch: ", k.Keys()) - var results []*dataloader.Result - - loader, ok := c.Value("loader").(map[string]*dataloader.Loader) - if !ok { - return handleLoaderError(k, fmt.Errorf("Failed to get loader from context")) - } - - for _, v := range k { - id := v.Raw().(types.ID) - dirs := make([]types.Directory, 0) - - thunk := loader["listObjects"].Load(c, id) - - objects, _ := thunk() - - // TODO: handle thunk error - - for _, obj := range objects.([]minio.ObjectInfo) { - if obj.Err != nil { - // TODO: how to handle? - } else if strings.HasSuffix(obj.Key, "/") { - resultID := types.ID{ - Bucket: id.Bucket, - Key: obj.Key, - } - resultID.Normalize() - dirs = append(dirs, types.Directory{ - ID: resultID, - Name: filepath.Base(obj.Key), - }) - } - } - - results = append(results, &dataloader.Result{ - Data: dirs, - Error: nil, - }) - } - - return results -} - -// handleLoaderError helper func when the whole batch failed -func handleLoaderError(k dataloader.Keys, err error) []*dataloader.Result { - log.Error(err.Error()) - var results []*dataloader.Result - for range k { - results = append(results, &dataloader.Result{ - Data: nil, - Error: err, - }) - } - - return results -} - -func listBucketsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result { - log.Debug("listBucketsBatch") - var results []*dataloader.Result - - s3Client, ok := c.Value("s3Client").(*minio.Client) - - if !ok { - return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context")) - } - - buckets, err := s3Client.ListBuckets(c) - - if err != nil { - return handleLoaderError(k, err) - } - - var bucketStrings []string - - for _, v := range buckets { - bucketStrings = append(bucketStrings, v.Name) - } - - result := &dataloader.Result{ - Data: bucketStrings, - Error: nil, - } - - for range k { - results = append(results, result) - } - - return results -} - -// createDataloader create all dataloaders and return a map of them plus a cache for objects -func createDataloader(config types.AppConfig) map[string]*dataloader.Loader { - loaderMap := make(map[string]*dataloader.Loader, 0) - - loaderMap["getFiles"] = dataloader.NewBatchedLoader( - getFilesBatch, - dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)), - ) - - loaderMap["getFile"] = dataloader.NewBatchedLoader( - getFileBatch, - dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)), - ) - - loaderMap["listObjects"] = dataloader.NewBatchedLoader( - listObjectsBatch, - dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)), - ) - - loaderMap["listObjectsRecursive"] = dataloader.NewBatchedLoader( - listObjectsRecursiveBatch, - dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)), - ) - - loaderMap["getDirs"] = dataloader.NewBatchedLoader( - getDirsBatch, - dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)), - ) - - loaderMap["listBuckets"] = dataloader.NewBatchedLoader( - listBucketsBatch, - dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)), - ) - - return loaderMap -} diff --git a/internal/gql/graphqlTypes.go b/internal/gql/graphqlTypes.go index ceb4b07..9d99cdb 100644 --- a/internal/gql/graphqlTypes.go +++ b/internal/gql/graphqlTypes.go @@ -5,11 +5,11 @@ import ( "path/filepath" "time" - "github.com/graph-gophers/dataloader" "github.com/graphql-go/graphql" "github.com/graphql-go/graphql/language/ast" helper "git.kapelle.org/niklas/s3browser/internal/helper" + "git.kapelle.org/niklas/s3browser/internal/loader" types "git.kapelle.org/niklas/s3browser/internal/types" ) @@ -198,10 +198,9 @@ func GraphqlTypes() { return nil, fmt.Errorf("Failed to parse Source for files resolve") } - loader := p.Context.Value("loader").(map[string]*dataloader.Loader) + loader := p.Context.Value("loader").(*loader.Loader) - thunk := loader["getFiles"].Load(p.Context, source.ID) - return thunk() + return loader.GetFiles(p.Context, source.ID) }, }) @@ -213,10 +212,8 @@ func GraphqlTypes() { return nil, fmt.Errorf("Failed to parse Source for directories resolve") } - loader := p.Context.Value("loader").(map[string]*dataloader.Loader) - thunk := loader["getDirs"].Load(p.Context, source.ID) - - return thunk() + loader := p.Context.Value("loader").(*loader.Loader) + return loader.GetDirs(p.Context, source.ID) }, }) @@ -258,17 +255,14 @@ func loadFile(p graphql.ResolveParams) (*types.File, error) { return nil, fmt.Errorf("Failed to parse source for resolve") } - loader := p.Context.Value("loader").(map[string]*dataloader.Loader) + loader := p.Context.Value("loader").(*loader.Loader) - thunk := loader["getFile"].Load(p.Context, source.ID) - result, err := thunk() + file, err := loader.GetFile(p.Context, source.ID) if err != nil { return nil, err } - file, ok := result.(*types.File) - if !ok { return nil, fmt.Errorf("Failed to load file") } diff --git a/internal/gql/mutations.go b/internal/gql/mutations.go index c75d9a6..c5ce6ff 100644 --- a/internal/gql/mutations.go +++ b/internal/gql/mutations.go @@ -5,10 +5,10 @@ import ( "fmt" "strings" - "github.com/graph-gophers/dataloader" "github.com/minio/minio-go/v7" helper "git.kapelle.org/niklas/s3browser/internal/helper" + "git.kapelle.org/niklas/s3browser/internal/loader" types "git.kapelle.org/niklas/s3browser/internal/types" ) @@ -27,8 +27,9 @@ func deleteMutation(ctx context.Context, id types.ID) error { return err } - // Invalidate cache - return helper.InvalidateCache(ctx, id) + ctx.Value("loader").(*loader.Loader).InvalidateCacheForFile(ctx, id) + + return nil } func copyMutation(ctx context.Context, src, dest types.ID) (*types.File, error) { @@ -57,19 +58,17 @@ func copyMutation(ctx context.Context, src, dest types.ID) (*types.File, error) return nil, err } - newid := types.ID{ + newID := types.ID{ Bucket: info.Bucket, Key: info.Key, } - newid.Normalize() + newID.Normalize() - // Invalidate cache - // TODO: check error - helper.InvalidateCache(ctx, newid) + ctx.Value("loader").(*loader.Loader).InvalidateCacheForFile(ctx, newID) return &types.File{ - ID: newid, + ID: newID, }, nil } @@ -114,7 +113,7 @@ func moveMutation(ctx context.Context, src, dest types.ID) (*types.File, error) newId.Normalize() - helper.InvalidateCache(ctx, newId) + ctx.Value("loader").(*loader.Loader).InvalidateCacheForFile(ctx, newId) return &types.File{ ID: newId, @@ -137,19 +136,17 @@ func createDirectory(ctx context.Context, id types.ID) (*types.Directory, error) return nil, err } - newId := types.ID{ + newID := types.ID{ Bucket: info.Bucket, Key: info.Key, } - newId.Normalize() + newID.Normalize() - // Invalidate cache - // TODO: check error - helper.InvalidateCacheForDir(ctx, newId) + ctx.Value("loader").(*loader.Loader).InvalidateCacheForDir(ctx, newID) return &types.Directory{ - ID: newId, + ID: newID, }, nil } @@ -161,28 +158,26 @@ func deleteDirectory(ctx context.Context, id types.ID) error { return fmt.Errorf("Failed to get s3Client from context") } - loader, ok := ctx.Value("loader").(map[string]*dataloader.Loader) + loader, ok := ctx.Value("loader").(*loader.Loader) if !ok { return fmt.Errorf("Failed to get dataloader from context") } // Get all files inside the directory - thunk := loader["listObjectsRecursive"].Load(ctx, id) - result, err := thunk() + files, err := loader.GetFilesRecursive(ctx, id) if err != nil { return err } - files, ok := result.([]minio.ObjectInfo) - if !ok { - return fmt.Errorf("Failed to get parse result from listObjects") - } - // Delete all child files - err = helper.DeleteMultiple(ctx, *s3Client, files) + var keysToDel []string + for _, file := range files { + keysToDel = append(keysToDel, file.ID.Key) + } + err = helper.DeleteMultiple(ctx, *s3Client, id.Bucket, keysToDel) if err != nil { return err @@ -200,8 +195,7 @@ func deleteDirectory(ctx context.Context, id types.ID) error { } } - //Invalidate cache - helper.InvalidateCacheForDir(ctx, id) + loader.InvalidateCacheForDir(ctx, id) return nil } diff --git a/internal/gql/schema.go b/internal/gql/schema.go index 059f088..1cba1fe 100644 --- a/internal/gql/schema.go +++ b/internal/gql/schema.go @@ -3,11 +3,11 @@ package gql import ( "fmt" - "github.com/graph-gophers/dataloader" "github.com/graphql-go/graphql" s3errors "git.kapelle.org/niklas/s3browser/internal/errors" helper "git.kapelle.org/niklas/s3browser/internal/helper" + "git.kapelle.org/niklas/s3browser/internal/loader" types "git.kapelle.org/niklas/s3browser/internal/types" log "github.com/sirupsen/logrus" ) @@ -36,9 +36,8 @@ func GraphqlSchema() (graphql.Schema, error) { log.Debug("querry 'files': ", path) - loader := p.Context.Value("loader").(map[string]*dataloader.Loader) - thunk := loader["getFiles"].Load(p.Context, path) - return thunk() + loader := p.Context.Value("loader").(*loader.Loader) + return loader.GetFiles(p.Context, *path) }, }, "directories": &graphql.Field{ @@ -61,9 +60,8 @@ func GraphqlSchema() (graphql.Schema, error) { log.Debug("querry 'directorys': ", path) - loader := p.Context.Value("loader").(map[string]*dataloader.Loader) - thunk := loader["getDirs"].Load(p.Context, path) - return thunk() + loader := p.Context.Value("loader").(*loader.Loader) + return loader.GetDirs(p.Context, *path) }, }, "file": &graphql.Field{ @@ -109,10 +107,8 @@ func GraphqlSchema() (graphql.Schema, error) { return nil, s3errors.ErrNotAuthenticated } - loader := p.Context.Value("loader").(map[string]*dataloader.Loader) - // The only reason we use a dataloader with a empty key is that we want to cache the result - thunk := loader["listBuckets"].Load(p.Context, dataloader.StringKey("")) - return thunk() + loader := p.Context.Value("loader").(*loader.Loader) + return loader.GetBuckets(p.Context) }, }, } diff --git a/internal/helper/helper.go b/internal/helper/helper.go index 9cf09c9..5ffbd64 100644 --- a/internal/helper/helper.go +++ b/internal/helper/helper.go @@ -2,75 +2,34 @@ package helper import ( "context" - "fmt" "path/filepath" "strings" "time" + types "git.kapelle.org/niklas/s3browser/internal/types" "github.com/golang-jwt/jwt" - "github.com/graph-gophers/dataloader" "github.com/minio/minio-go/v7" log "github.com/sirupsen/logrus" - - types "git.kapelle.org/niklas/s3browser/internal/types" ) -func InvalidateCache(ctx context.Context, id types.ID) error { - loader, ok := ctx.Value("loader").(map[string]*dataloader.Loader) - if !ok { - return fmt.Errorf("Failed to get loader from context") - } - - log.Debug("Invalidate cache for id: ", id) - - parent := id.Parent() - - loader["getFile"].Clear(ctx, id) - loader["getFiles"].Clear(ctx, parent) - loader["listObjects"].Clear(ctx, parent) - loader["listObjectsRecursive"].Clear(ctx, parent) - return nil -} - func GetFilenameFromKey(id string) string { return filepath.Base(id) } -func InvalidateCacheForDir(ctx context.Context, path types.ID) error { - loader, ok := ctx.Value("loader").(map[string]*dataloader.Loader) - if !ok { - return fmt.Errorf("Failed to get loader from context") - } - - log.Debug("Invalidate cache for dir: ", path) - - parent := GetParentDir(path) - - log.Debug("Cache clear dir: ", path, " parent: ", parent) - - loader["getFile"].Clear(ctx, path) - loader["listObjects"].Clear(ctx, path) - loader["listObjectsRecursive"].Clear(ctx, path) - loader["getFiles"].Clear(ctx, path) - loader["getDirs"].Clear(ctx, parent) - loader["listObjects"].Clear(ctx, parent) - loader["listObjectsRecursive"].Clear(ctx, parent) - - return nil -} - -func DeleteMultiple(ctx context.Context, s3Client minio.Client, ids []minio.ObjectInfo) error { - log.Debug("Delte multiple") +func DeleteMultiple(ctx context.Context, s3Client minio.Client, bucket string, keys []string) error { + log.Debug("Remove multiple objects") objectsCh := make(chan minio.ObjectInfo, 1) go func() { defer close(objectsCh) - for _, id := range ids { - objectsCh <- id + for _, id := range keys { + objectsCh <- minio.ObjectInfo{ + Key: id, + } } }() - for err := range s3Client.RemoveObjects(ctx, "dev", objectsCh, minio.RemoveObjectsOptions{}) { + for err := range s3Client.RemoveObjects(ctx, bucket, objectsCh, minio.RemoveObjectsOptions{}) { log.Error("Failed to delete object ", err.ObjectName, " because: ", err.Err.Error()) // TODO: error handel } @@ -98,6 +57,24 @@ func GetParentDir(id types.ID) types.ID { return parent } +func ObjInfoToFile(objInfo minio.ObjectInfo, bucket string) *types.File { + objID := types.ID{ + Bucket: bucket, + Key: objInfo.Key, + } + + objID.Normalize() + + return &types.File{ + ID: objID, + Name: GetFilenameFromKey(objID.Key), + Size: objInfo.Size, + ContentType: objInfo.ContentType, + ETag: objInfo.ETag, + LastModified: objInfo.LastModified, + } +} + func IsAuthenticated(ctx context.Context) bool { token, ok := ctx.Value("jwt").(*jwt.Token) return (ok && token.Valid) diff --git a/internal/httpserver/httpServer.go b/internal/httpserver/httpServer.go index 887c6ab..737822b 100644 --- a/internal/httpserver/httpServer.go +++ b/internal/httpserver/httpServer.go @@ -20,6 +20,7 @@ import ( log "github.com/sirupsen/logrus" helper "git.kapelle.org/niklas/s3browser/internal/helper" + "git.kapelle.org/niklas/s3browser/internal/loader" types "git.kapelle.org/niklas/s3browser/internal/types" ) @@ -195,7 +196,7 @@ func httpPostFile(ctx context.Context, rw http.ResponseWriter, r *http.Request) } // Invalidate cache - helper.InvalidateCache(ctx, *id) + ctx.Value("loader").(*loader.Loader).InvalidateCacheForFile(ctx, *id) rw.WriteHeader(http.StatusCreated) } diff --git a/internal/loader/batch.go b/internal/loader/batch.go new file mode 100644 index 0000000..72b2f93 --- /dev/null +++ b/internal/loader/batch.go @@ -0,0 +1,135 @@ +package loader + +import ( + "context" + "fmt" + + types "git.kapelle.org/niklas/s3browser/internal/types" + "github.com/graph-gophers/dataloader" + "github.com/minio/minio-go/v7" + log "github.com/sirupsen/logrus" +) + +// listObjectsBatch batch func for calling s3.ListObjects() +func listObjectsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result { + log.Debug("listObjectsBatch: ", k.Keys()) + var results []*dataloader.Result + + s3Client, ok := c.Value("s3Client").(*minio.Client) + + if !ok { + return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context")) + } + + for _, v := range k { + id := v.Raw().(types.ID) + results = append(results, &dataloader.Result{ + Data: listObjects(s3Client, id, false), + Error: nil, + }) + } + + return results +} + +// listObjectsRecursiveBatch just like listObjectsBatch but with recursive set to true +func listObjectsRecursiveBatch(c context.Context, k dataloader.Keys) []*dataloader.Result { + log.Debug("listObjectsRecursiveBatch: ", k.Keys()) + var results []*dataloader.Result + + s3Client, ok := c.Value("s3Client").(*minio.Client) + + if !ok { + return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context")) + } + + for _, v := range k { + id := v.Raw().(types.ID) + results = append(results, &dataloader.Result{ + Data: listObjects(s3Client, id, true), + Error: nil, + }) + } + + return results +} + +// listObjects helper func for listObjectsBatch +func listObjects(s3Client *minio.Client, id types.ID, recursive bool) []minio.ObjectInfo { + log.Debug("S3 call 'ListObjects': ", id) + objectCh := s3Client.ListObjects(context.Background(), id.Bucket, minio.ListObjectsOptions{ + Prefix: id.Key, + Recursive: recursive, + }) + + result := make([]minio.ObjectInfo, 0) + for obj := range objectCh { + result = append(result, obj) + } + + return result +} + +func listBucketsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result { + log.Debug("listBucketsBatch") + var results []*dataloader.Result + + s3Client, ok := c.Value("s3Client").(*minio.Client) + + if !ok { + return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context")) + } + + buckets, err := s3Client.ListBuckets(c) + + if err != nil { + return handleLoaderError(k, err) + } + + result := &dataloader.Result{ + Data: buckets, + Error: nil, + } + + for range k { + results = append(results, result) + } + + return results +} + +func statObjectBatch(ctx context.Context, k dataloader.Keys) []*dataloader.Result { + log.Debug("statObjectBatch") + + var results []*dataloader.Result + s3Client, ok := ctx.Value("s3Client").(*minio.Client) + + if !ok { + return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context")) + } + + for _, v := range k { + id := v.Raw().(types.ID) + stat, err := s3Client.StatObject(ctx, id.Bucket, id.Key, minio.GetObjectOptions{}) + results = append(results, &dataloader.Result{ + Data: stat, + Error: err, + }) + } + + return results +} + +// handleLoaderError helper func when the whole batch failed +func handleLoaderError(k dataloader.Keys, err error) []*dataloader.Result { + log.Error(err.Error()) + var results []*dataloader.Result + for range k { + results = append(results, &dataloader.Result{ + Data: nil, + Error: err, + }) + } + + return results +} diff --git a/internal/loader/loader.go b/internal/loader/loader.go new file mode 100644 index 0000000..14de716 --- /dev/null +++ b/internal/loader/loader.go @@ -0,0 +1,162 @@ +package loader + +import ( + "context" + "fmt" + "path/filepath" + "strings" + + "git.kapelle.org/niklas/s3browser/internal/cache" + "git.kapelle.org/niklas/s3browser/internal/helper" + types "git.kapelle.org/niklas/s3browser/internal/types" + "github.com/graph-gophers/dataloader" + "github.com/minio/minio-go/v7" + log "github.com/sirupsen/logrus" +) + +type Loader struct { + listObjectsLoader *dataloader.Loader + listObjectsRecursiveLoader *dataloader.Loader + statObjectLoader *dataloader.Loader + listBucketsLoader *dataloader.Loader +} + +func NewLoader(config types.AppConfig) *Loader { + return &Loader{ + listObjectsLoader: dataloader.NewBatchedLoader( + listObjectsBatch, + dataloader.WithCache(cache.NewTTLCache(config.CacheTTL, config.CacheCleanup)), + ), + listObjectsRecursiveLoader: dataloader.NewBatchedLoader( + listObjectsRecursiveBatch, + dataloader.WithCache(cache.NewTTLCache(config.CacheTTL, config.CacheCleanup)), + ), + statObjectLoader: dataloader.NewBatchedLoader( + statObjectBatch, + dataloader.WithCache(cache.NewTTLCache(config.CacheTTL, config.CacheCleanup)), + ), + listBucketsLoader: dataloader.NewBatchedLoader( + listBucketsBatch, + dataloader.WithCache(cache.NewTTLCache(config.CacheTTL, config.CacheCleanup)), + ), + } +} + +func (l *Loader) GetFiles(ctx context.Context, path types.ID) ([]types.File, error) { + thunk := l.listObjectsLoader.Load(ctx, path) + objects, err := thunk() + + if err != nil { + return nil, err + } + + var files []types.File + + for _, obj := range objects.([]minio.ObjectInfo) { + if obj.Err != nil { + return nil, obj.Err + } else if !strings.HasSuffix(obj.Key, "/") { + files = append(files, *helper.ObjInfoToFile(obj, path.Bucket)) + } + } + + return files, nil +} + +func (l *Loader) GetFile(ctx context.Context, id types.ID) (*types.File, error) { + thunk := l.statObjectLoader.Load(ctx, id) + + result, err := thunk() + if err != nil { + return nil, err + } + objInfo, ok := result.(minio.ObjectInfo) + + if !ok { + return nil, fmt.Errorf("Failed to stats object") + } + + return helper.ObjInfoToFile(objInfo, id.Bucket), nil +} + +func (l *Loader) GetDirs(ctx context.Context, path types.ID) ([]types.Directory, error) { + thunk := l.listObjectsLoader.Load(ctx, path) + + result, err := thunk() + if err != nil { + return nil, err + } + + var dirs []types.Directory + for _, obj := range result.([]minio.ObjectInfo) { + if obj.Err != nil { + return nil, obj.Err + } else if strings.HasSuffix(obj.Key, "/") { + resultID := types.ID{ + Bucket: path.Bucket, + Key: obj.Key, + } + + resultID.Normalize() + + dirs = append(dirs, types.Directory{ + ID: resultID, + Name: filepath.Base(obj.Key), + }) + } + } + + return dirs, nil +} + +func (l *Loader) GetBuckets(ctx context.Context) ([]string, error) { + thunk := l.listBucketsLoader.Load(ctx, dataloader.StringKey("")) + + result, err := thunk() + + if err != nil { + return nil, err + } + + bucketsInfo := result.([]minio.BucketInfo) + var buckets []string + for _, i := range bucketsInfo { + buckets = append(buckets, i.Name) + } + + return buckets, nil +} + +func (l *Loader) GetFilesRecursive(ctx context.Context, path types.ID) ([]types.File, error) { + thunk := l.listObjectsRecursiveLoader.Load(ctx, path) + result, err := thunk() + + if err != nil { + return nil, err + } + + objects := result.([]minio.ObjectInfo) + + var files []types.File + for _, obj := range objects { + files = append(files, *helper.ObjInfoToFile(obj, path.Bucket)) + } + + return files, nil +} + +func (l *Loader) InvalidateCacheForFile(ctx context.Context, id types.ID) { + log.Debug("Clear cache for file:", id.String()) + parent := id.Parent() + + l.listObjectsLoader.Clear(ctx, id) + l.listObjectsRecursiveLoader.Clear(ctx, parent) +} + +func (l *Loader) InvalidateCacheForDir(ctx context.Context, path types.ID) { + log.Debug("Clear cache for dir:", path.String()) + parent := helper.GetParentDir(path) + + l.listBucketsLoader.Clear(ctx, path).Clear(ctx, parent) + l.listObjectsRecursiveLoader.Clear(ctx, path).Clear(ctx, parent) +} diff --git a/internal/s3Broswer.go b/internal/s3Broswer.go index 32bbd8f..ff36d8c 100644 --- a/internal/s3Broswer.go +++ b/internal/s3Broswer.go @@ -10,11 +10,10 @@ import ( gql "git.kapelle.org/niklas/s3browser/internal/gql" httpserver "git.kapelle.org/niklas/s3browser/internal/httpserver" + "git.kapelle.org/niklas/s3browser/internal/loader" types "git.kapelle.org/niklas/s3browser/internal/types" ) -var bucketName string - // setupS3Client connect the s3Client func setupS3Client(config types.AppConfig) (*minio.Client, error) { minioClient, err := minio.New(config.S3Endoint, &minio.Options{ @@ -36,8 +35,6 @@ func setupS3Client(config types.AppConfig) (*minio.Client, error) { return nil, fmt.Errorf("Bucket '%s' does not exist", config.S3Bucket) } - bucketName = config.S3Bucket - return minioClient, nil } @@ -59,7 +56,7 @@ func Start(config types.AppConfig) { log.Info("s3 client connected") log.Debug("Creating dataloader") - loaderMap := createDataloader(config) + loader := loader.NewLoader(config) log.Debug("Generating graphq schema") gql.GraphqlTypes() @@ -71,7 +68,7 @@ func Start(config types.AppConfig) { } resolveContext := context.WithValue(context.Background(), "s3Client", s3Client) - resolveContext = context.WithValue(resolveContext, "loader", loaderMap) + resolveContext = context.WithValue(resolveContext, "loader", loader) log.Debug("Starting HTTP server") err = httpserver.InitHttp(resolveContext, schema, config.Address)