Compare commits
2 Commits
8725def3a1
...
c5ab0156fd
| Author | SHA1 | Date | |
|---|---|---|---|
| c5ab0156fd | |||
| 686630b2df |
1
.env
1
.env
@@ -2,7 +2,6 @@
|
|||||||
S3_ENDPOINT=localhost:9000
|
S3_ENDPOINT=localhost:9000
|
||||||
S3_ACCESS_KEY=testo
|
S3_ACCESS_KEY=testo
|
||||||
S3_SECRET_KEY=testotesto
|
S3_SECRET_KEY=testotesto
|
||||||
S3_BUCKET=dev
|
|
||||||
S3_DISABLE_SSL=true
|
S3_DISABLE_SSL=true
|
||||||
ADDRESS=:8080
|
ADDRESS=:8080
|
||||||
VERBOSE=true
|
VERBOSE=true
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ type args struct {
|
|||||||
S3Endpoint string `arg:"--s3-endpoint,required,env:S3_ENDPOINT" help:"host[:port]" placeholder:"ENDPOINT"`
|
S3Endpoint string `arg:"--s3-endpoint,required,env:S3_ENDPOINT" help:"host[:port]" placeholder:"ENDPOINT"`
|
||||||
S3AccessKey string `arg:"--s3-access-key,required,env:S3_ACCESS_KEY" placeholder:"ACCESS_KEY"`
|
S3AccessKey string `arg:"--s3-access-key,required,env:S3_ACCESS_KEY" placeholder:"ACCESS_KEY"`
|
||||||
S3SecretKey string `arg:"--s3-secret-key,required,env:S3_SECRET_KEY" placeholder:"SECRET_KEY"`
|
S3SecretKey string `arg:"--s3-secret-key,required,env:S3_SECRET_KEY" placeholder:"SECRET_KEY"`
|
||||||
S3Bucket string `arg:"--s3-bucket,required,env:S3_BUCKET" placeholder:"BUCKET"`
|
|
||||||
S3DisableSSL bool `arg:"--s3-disable-ssl,env:S3_DISABLE_SSL" default:"false"`
|
S3DisableSSL bool `arg:"--s3-disable-ssl,env:S3_DISABLE_SSL" default:"false"`
|
||||||
Address string `arg:"--address,env:ADDRESS" default:":3000" help:"what address to listen on" placeholder:"ADDRESS"`
|
Address string `arg:"--address,env:ADDRESS" default:":3000" help:"what address to listen on" placeholder:"ADDRESS"`
|
||||||
CacheTTL int64 `arg:"--cache-ttl,env:CACHE_TTL" help:"Time in seconds" default:"30" placeholder:"TTL"`
|
CacheTTL int64 `arg:"--cache-ttl,env:CACHE_TTL" help:"Time in seconds" default:"30" placeholder:"TTL"`
|
||||||
@@ -35,7 +34,6 @@ func main() {
|
|||||||
S3SSL: !args.S3DisableSSL,
|
S3SSL: !args.S3DisableSSL,
|
||||||
S3AccessKey: args.S3AccessKey,
|
S3AccessKey: args.S3AccessKey,
|
||||||
S3SecretKey: args.S3SecretKey,
|
S3SecretKey: args.S3SecretKey,
|
||||||
S3Bucket: args.S3Bucket,
|
|
||||||
DSN: args.DBConnection,
|
DSN: args.DBConnection,
|
||||||
CacheTTL: time.Duration(args.CacheTTL) * time.Second,
|
CacheTTL: time.Duration(args.CacheTTL) * time.Second,
|
||||||
CacheCleanup: time.Duration(args.CacheCleanup) * time.Second,
|
CacheCleanup: time.Duration(args.CacheCleanup) * time.Second,
|
||||||
|
|||||||
9
internal/cache/cache.go
vendored
Normal file
9
internal/cache/cache.go
vendored
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
package cache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/graph-gophers/dataloader"
|
||||||
|
)
|
||||||
|
|
||||||
|
type S3Cache interface {
|
||||||
|
dataloader.Cache
|
||||||
|
}
|
||||||
@@ -30,7 +30,7 @@ func deleteMutation(ctx context.Context, id types.ID) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.Value("loader").(*loader.Loader).InvalidateCacheForFile(ctx, id)
|
ctx.Value("loader").(*loader.Loader).InvalidedCacheForId(ctx, id)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -69,7 +69,7 @@ func copyMutation(ctx context.Context, src, dest types.ID) (*types.File, error)
|
|||||||
|
|
||||||
newID.Normalize()
|
newID.Normalize()
|
||||||
|
|
||||||
ctx.Value("loader").(*loader.Loader).InvalidateCacheForFile(ctx, newID)
|
ctx.Value("loader").(*loader.Loader).InvalidedCacheForId(ctx, newID)
|
||||||
|
|
||||||
return &types.File{
|
return &types.File{
|
||||||
ID: newID,
|
ID: newID,
|
||||||
@@ -121,11 +121,16 @@ func moveDirMutation(ctx context.Context, src, dest types.ID) ([]*types.File, er
|
|||||||
|
|
||||||
deleteMutation(ctx, file.ID)
|
deleteMutation(ctx, file.ID)
|
||||||
|
|
||||||
|
loader.InvalidedCacheForId(ctx, newID)
|
||||||
|
loader.InvalidedCacheForId(ctx, file.ID)
|
||||||
|
|
||||||
result = append(result, &types.File{
|
result = append(result, &types.File{
|
||||||
ID: newID,
|
ID: newID,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
loader.InvalidedCacheForId(ctx, src)
|
||||||
|
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -170,7 +175,7 @@ func moveFileMutation(ctx context.Context, src, dest types.ID) (*types.File, err
|
|||||||
|
|
||||||
newId.Normalize()
|
newId.Normalize()
|
||||||
|
|
||||||
ctx.Value("loader").(*loader.Loader).InvalidateCacheForFile(ctx, newId)
|
ctx.Value("loader").(*loader.Loader).InvalidedCacheForId(ctx, newId)
|
||||||
|
|
||||||
return &types.File{
|
return &types.File{
|
||||||
ID: newId,
|
ID: newId,
|
||||||
@@ -201,7 +206,7 @@ func createDirectory(ctx context.Context, id types.ID) (*types.Directory, error)
|
|||||||
|
|
||||||
newID.Normalize()
|
newID.Normalize()
|
||||||
|
|
||||||
ctx.Value("loader").(*loader.Loader).InvalidateCacheForDir(ctx, newID)
|
ctx.Value("loader").(*loader.Loader).InvalidedCacheForId(ctx, newID)
|
||||||
|
|
||||||
return &types.Directory{
|
return &types.Directory{
|
||||||
ID: newID,
|
ID: newID,
|
||||||
@@ -254,7 +259,7 @@ func deleteDirectory(ctx context.Context, id types.ID) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
loader.InvalidateCacheForDir(ctx, id)
|
loader.InvalidedCacheForId(ctx, id)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -194,8 +194,7 @@ func httpPostFile(ctx context.Context, rw http.ResponseWriter, r *http.Request)
|
|||||||
}
|
}
|
||||||
|
|
||||||
loader := ctx.Value("loader").(*loader.Loader)
|
loader := ctx.Value("loader").(*loader.Loader)
|
||||||
loader.InvalidateCacheForFile(ctx, *id)
|
loader.InvalidedCacheForId(ctx, *id)
|
||||||
loader.InvalidateCacheForDir(ctx, *id.Parent())
|
|
||||||
|
|
||||||
rw.WriteHeader(http.StatusCreated)
|
rw.WriteHeader(http.StatusCreated)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/cache"
|
||||||
"git.kapelle.org/niklas/s3browser/internal/helper"
|
"git.kapelle.org/niklas/s3browser/internal/helper"
|
||||||
types "git.kapelle.org/niklas/s3browser/internal/types"
|
types "git.kapelle.org/niklas/s3browser/internal/types"
|
||||||
"github.com/graph-gophers/dataloader"
|
"github.com/graph-gophers/dataloader"
|
||||||
@@ -17,26 +18,43 @@ type Loader struct {
|
|||||||
listObjectsRecursiveLoader *dataloader.Loader
|
listObjectsRecursiveLoader *dataloader.Loader
|
||||||
statObjectLoader *dataloader.Loader
|
statObjectLoader *dataloader.Loader
|
||||||
listBucketsLoader *dataloader.Loader
|
listBucketsLoader *dataloader.Loader
|
||||||
|
|
||||||
|
listObjectsLoaderCache cache.S3Cache
|
||||||
|
listObjectsRecursiveLoaderCache cache.S3Cache
|
||||||
|
statObjectLoaderCache cache.S3Cache
|
||||||
|
listBucketsLoaderCache cache.S3Cache
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewLoader(config types.AppConfig) *Loader {
|
func NewLoader(config types.AppConfig) *Loader {
|
||||||
|
listObjectsLoaderCache := &dataloader.NoCache{}
|
||||||
|
listObjectsRecursiveLoaderCache := &dataloader.NoCache{}
|
||||||
|
statObjectLoaderCache := cache.NewTTLCache(config.CacheTTL, config.CacheCleanup)
|
||||||
|
listBucketsLoaderCache := cache.NewTTLCache(config.CacheTTL, config.CacheCleanup)
|
||||||
|
|
||||||
return &Loader{
|
return &Loader{
|
||||||
listObjectsLoader: dataloader.NewBatchedLoader(
|
listObjectsLoader: dataloader.NewBatchedLoader(
|
||||||
listObjectsBatch,
|
listObjectsBatch,
|
||||||
dataloader.WithCache(&dataloader.NoCache{}),
|
dataloader.WithCache(listObjectsLoaderCache),
|
||||||
),
|
),
|
||||||
|
listObjectsLoaderCache: listObjectsLoaderCache,
|
||||||
|
|
||||||
listObjectsRecursiveLoader: dataloader.NewBatchedLoader(
|
listObjectsRecursiveLoader: dataloader.NewBatchedLoader(
|
||||||
listObjectsRecursiveBatch,
|
listObjectsRecursiveBatch,
|
||||||
dataloader.WithCache(&dataloader.NoCache{}),
|
dataloader.WithCache(listObjectsRecursiveLoaderCache),
|
||||||
),
|
),
|
||||||
|
listObjectsRecursiveLoaderCache: listObjectsRecursiveLoaderCache,
|
||||||
|
|
||||||
statObjectLoader: dataloader.NewBatchedLoader(
|
statObjectLoader: dataloader.NewBatchedLoader(
|
||||||
statObjectBatch,
|
statObjectBatch,
|
||||||
dataloader.WithCache(&dataloader.NoCache{}),
|
dataloader.WithCache(statObjectLoaderCache),
|
||||||
),
|
),
|
||||||
|
statObjectLoaderCache: statObjectLoaderCache,
|
||||||
|
|
||||||
listBucketsLoader: dataloader.NewBatchedLoader(
|
listBucketsLoader: dataloader.NewBatchedLoader(
|
||||||
listBucketsBatch,
|
listBucketsBatch,
|
||||||
dataloader.WithCache(&dataloader.NoCache{}),
|
dataloader.WithCache(listBucketsLoaderCache),
|
||||||
),
|
),
|
||||||
|
listBucketsLoaderCache: listBucketsLoaderCache,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -143,16 +161,17 @@ func (l *Loader) GetFilesRecursive(ctx context.Context, path types.ID) ([]types.
|
|||||||
return files, nil
|
return files, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *Loader) InvalidateCacheForFile(ctx context.Context, id types.ID) {
|
func (l *Loader) InvalidedCacheForId(ctx context.Context, id types.ID) {
|
||||||
parent := id.Parent()
|
parent := id.Parent()
|
||||||
|
|
||||||
l.statObjectLoader.Clear(ctx, id)
|
l.statObjectLoader.Clear(ctx, id)
|
||||||
|
|
||||||
|
// Code below is useless for now until we use a propper cache for "listObjectsLoader" and "listObjectsRecursiveLoader"
|
||||||
|
// TODO: implement cache invalidation for "listObjectsLoader" and "listObjectsRecursiveLoader"
|
||||||
l.listObjectsLoader.Clear(ctx, id).Clear(ctx, parent)
|
l.listObjectsLoader.Clear(ctx, id).Clear(ctx, parent)
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Loader) InvalidateCacheForDir(ctx context.Context, path types.ID) {
|
// Remove up from recursive list
|
||||||
parent := helper.GetParentDir(path)
|
for rParent := parent; rParent != nil; rParent = rParent.Parent() {
|
||||||
|
l.listObjectsRecursiveLoader.Clear(ctx, rParent)
|
||||||
l.listObjectsLoader.Clear(ctx, path).Clear(ctx, parent)
|
}
|
||||||
l.listObjectsRecursiveLoader.Clear(ctx, path).Clear(ctx, parent)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package s3browser
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/minio/minio-go/v7"
|
"github.com/minio/minio-go/v7"
|
||||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||||
@@ -17,26 +16,10 @@ import (
|
|||||||
|
|
||||||
// setupS3Client connect the s3Client
|
// setupS3Client connect the s3Client
|
||||||
func setupS3Client(config types.AppConfig) (*minio.Client, error) {
|
func setupS3Client(config types.AppConfig) (*minio.Client, error) {
|
||||||
minioClient, err := minio.New(config.S3Endoint, &minio.Options{
|
return minio.New(config.S3Endoint, &minio.Options{
|
||||||
Creds: credentials.NewStaticV4(config.S3AccessKey, config.S3SecretKey, ""),
|
Creds: credentials.NewStaticV4(config.S3AccessKey, config.S3SecretKey, ""),
|
||||||
Secure: config.S3SSL,
|
Secure: config.S3SSL,
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
exists, err := minioClient.BucketExists(context.Background(), config.S3Bucket)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !exists {
|
|
||||||
return nil, fmt.Errorf("Bucket '%s' does not exist", config.S3Bucket)
|
|
||||||
}
|
|
||||||
|
|
||||||
return minioClient, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start starts the app
|
// Start starts the app
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ type AppConfig struct {
|
|||||||
S3AccessKey string
|
S3AccessKey string
|
||||||
S3SecretKey string
|
S3SecretKey string
|
||||||
S3SSL bool
|
S3SSL bool
|
||||||
S3Bucket string
|
|
||||||
DSN string
|
DSN string
|
||||||
CacheTTL time.Duration
|
CacheTTL time.Duration
|
||||||
CacheCleanup time.Duration
|
CacheCleanup time.Duration
|
||||||
|
|||||||
Reference in New Issue
Block a user