Compare commits

...

30 Commits

Author SHA1 Message Date
125ce9c955 fix get value db 2022-02-07 15:59:52 +01:00
131f19deed db interface 2022-02-07 15:59:43 +01:00
2ac552e840 added gql tests 2022-02-07 15:56:09 +01:00
48c50a5b7e typesInit flag 2022-02-07 15:53:14 +01:00
48f770f703 added loader tests 2021-11-27 04:07:51 +01:00
2ae14cdfd4 fixed mock s3 list dirs 2021-11-27 04:07:41 +01:00
a10593a318 loader cache config 2021-11-27 04:07:27 +01:00
0971301562 added tests 2021-11-25 01:57:38 +01:00
47befe6db1 fixed id parent on root on root 2021-11-25 01:57:24 +01:00
979ebee677 fixed mock s3 list recursive 2021-11-25 01:56:57 +01:00
8d85d645d6 use s3Service interface 2021-11-23 20:12:24 +01:00
60817c2249 added mock s3 implementation 2021-11-23 01:54:02 +01:00
bead881af2 added s3 interface with minio implementation 2021-11-23 01:53:51 +01:00
80302b62f4 added Name func to ID 2021-11-23 01:53:04 +01:00
2353a0bf53 fixed embeded static files 2021-11-22 02:26:08 +01:00
c5ab0156fd more cache issues 2021-11-22 01:34:22 +01:00
686630b2df removed s3 bucket from config 2021-11-04 20:40:00 +01:00
8725def3a1 added connection string to config 2021-11-04 20:28:37 +01:00
9be7b6c18f cleaned up debug log messages 2021-11-04 19:41:50 +01:00
13f3217a38 implemented moveDir 2021-11-02 00:33:55 +01:00
aa82cd938c id improved parent method 2021-11-02 00:33:33 +01:00
aac1ca8891 ok that it. No cache. Cache is no more. 2021-10-20 23:38:09 +02:00
2268b518b1 added database 2021-10-20 16:11:18 +02:00
8c6f59a6b4 fixed cache invalidation 2021-10-14 23:46:48 +02:00
439e5473b6 even more refactor 2021-10-14 19:00:11 +02:00
d2b0364445 added list buckets query 2021-09-29 21:41:50 +02:00
4344bf841c id parse on object get and put 2021-09-28 16:04:18 +02:00
f2a8e0197d fixed typo in gql 2021-09-27 19:48:57 +02:00
74037dfab5 the big refactor 2: return of the ID struct 2021-09-27 01:59:32 +02:00
91e217e472 added ID type 2021-09-26 23:11:02 +02:00
29 changed files with 1665 additions and 543 deletions

2
.env
View File

@@ -2,7 +2,7 @@
S3_ENDPOINT=localhost:9000
S3_ACCESS_KEY=testo
S3_SECRET_KEY=testotesto
S3_BUCKET=dev
S3_DISABLE_SSL=true
ADDRESS=:8080
VERBOSE=true
DB_CONNECTION=s3Browser:hunter2@/s3Browser

View File

@@ -12,12 +12,12 @@ type args struct {
S3Endpoint string `arg:"--s3-endpoint,required,env:S3_ENDPOINT" help:"host[:port]" placeholder:"ENDPOINT"`
S3AccessKey string `arg:"--s3-access-key,required,env:S3_ACCESS_KEY" placeholder:"ACCESS_KEY"`
S3SecretKey string `arg:"--s3-secret-key,required,env:S3_SECRET_KEY" placeholder:"SECRET_KEY"`
S3Bucket string `arg:"--s3-bucket,required,env:S3_BUCKET" placeholder:"BUCKET"`
S3DisableSSL bool `arg:"--s3-disable-ssl,env:S3_DISABLE_SSL" default:"false"`
Address string `arg:"--address,env:ADDRESS" default:":3000" help:"what address to listen on" placeholder:"ADDRESS"`
CacheTTL int64 `arg:"--cache-ttl,env:CACHE_TTL" help:"Time in seconds" default:"30" placeholder:"TTL"`
CacheCleanup int64 `arg:"--cache-cleanup,env:CACHE_CLEANUP" help:"Time in seconds" default:"60" placeholder:"CLEANUP"`
Verbose bool `arg:"-v,--verbose,env:VERBOSE" help:"verbosity level" default:"false"`
DBConnection string `arg:"--db,required,env:DB_CONNECTION" help:"DSN in format: https://github.com/go-sql-driver/mysql#dsn-data-source-name"`
}
func (args) Version() string {
@@ -34,7 +34,7 @@ func main() {
S3SSL: !args.S3DisableSSL,
S3AccessKey: args.S3AccessKey,
S3SecretKey: args.S3SecretKey,
S3Bucket: args.S3Bucket,
DSN: args.DBConnection,
CacheTTL: time.Duration(args.CacheTTL) * time.Second,
CacheCleanup: time.Duration(args.CacheCleanup) * time.Second,
Address: args.Address,

View File

@@ -13,6 +13,20 @@ services:
command: server /data --console-address ":9001"
volumes:
- minio_dev:/data
db:
container_name: db
image: mariadb
environment:
- MARIADB_ROOT_PASSWORD=hunter2
- MARIADB_DATABASE=s3Browser
- MARIADB_USER=s3Browser
- MARIADB_PASSWORD=hunter2
ports:
- 3306:3306
volumes:
- mariadb_dev:/var/lib/mysql
volumes:
minio_dev:
name: minio_dev
mariadb_dev:
name: mariadb_dev

3
go.mod
View File

@@ -4,6 +4,7 @@ go 1.16
require (
github.com/alexflint/go-arg v1.4.2
github.com/go-sql-driver/mysql v1.6.0
github.com/golang-jwt/jwt v3.2.2+incompatible
github.com/gorilla/mux v1.8.0
github.com/graph-gophers/dataloader v5.0.0+incompatible
@@ -13,4 +14,6 @@ require (
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/sirupsen/logrus v1.8.1
github.com/stretchr/testify v1.7.0
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f
)

6
go.sum
View File

@@ -7,6 +7,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -65,6 +67,8 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f h1:aZp0e2vLN4MToVqnjNEYEtrEA8RH8U8FN1CU7JgqsPU=
@@ -92,3 +96,5 @@ gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

9
internal/cache/cache.go vendored Normal file
View File

@@ -0,0 +1,9 @@
package cache
import (
"github.com/graph-gophers/dataloader"
)
type S3Cache interface {
dataloader.Cache
}

View File

@@ -1,4 +1,4 @@
package s3browser
package cache
import (
"context"
@@ -8,17 +8,18 @@ import (
gocache "github.com/patrickmn/go-cache"
)
type cache struct {
type TTLCache struct {
c *gocache.Cache
}
func newCache(ttl, cleanupInterval time.Duration) *cache {
return &cache{
// Create new ttl cache
func NewTTLCache(ttl, cleanupInterval time.Duration) *TTLCache {
return &TTLCache{
c: gocache.New(ttl, cleanupInterval),
}
}
func (c *cache) Get(_ context.Context, key dataloader.Key) (dataloader.Thunk, bool) {
func (c *TTLCache) Get(_ context.Context, key dataloader.Key) (dataloader.Thunk, bool) {
v, ok := c.c.Get(key.String())
if ok {
return v.(dataloader.Thunk), ok
@@ -26,11 +27,11 @@ func (c *cache) Get(_ context.Context, key dataloader.Key) (dataloader.Thunk, bo
return nil, ok
}
func (c *cache) Set(_ context.Context, key dataloader.Key, value dataloader.Thunk) {
func (c *TTLCache) Set(_ context.Context, key dataloader.Key, value dataloader.Thunk) {
c.c.Set(key.String(), value, 0)
}
func (c *cache) Delete(_ context.Context, key dataloader.Key) bool {
func (c *TTLCache) Delete(_ context.Context, key dataloader.Key) bool {
if _, found := c.c.Get(key.String()); found {
c.c.Delete(key.String())
return true
@@ -38,6 +39,6 @@ func (c *cache) Delete(_ context.Context, key dataloader.Key) bool {
return false
}
func (c *cache) Clear() {
func (c *TTLCache) Clear() {
c.c.Flush()
}

View File

@@ -1,246 +0,0 @@
package s3browser
import (
"context"
"fmt"
"path/filepath"
"strings"
types "git.kapelle.org/niklas/s3browser/internal/types"
"github.com/graph-gophers/dataloader"
"github.com/minio/minio-go/v7"
log "github.com/sirupsen/logrus"
)
// listObjectsBatch batch func for calling s3.ListObjects()
func listObjectsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
log.Debug("listObjectsBatch: ", k.Keys())
var results []*dataloader.Result
s3Client, ok := c.Value("s3Client").(*minio.Client)
if !ok {
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
}
for _, v := range k {
results = append(results, &dataloader.Result{
Data: listObjects(s3Client, bucketName, v.String(), false),
Error: nil,
})
}
return results
}
// listObjectsRecursiveBatch just like listObjectsBatch but with recursive set to true
func listObjectsRecursiveBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
log.Debug("listObjectsRecursiveBatch: ", k.Keys())
var results []*dataloader.Result
s3Client, ok := c.Value("s3Client").(*minio.Client)
if !ok {
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
}
for _, v := range k {
results = append(results, &dataloader.Result{
Data: listObjects(s3Client, bucketName, v.String(), true),
Error: nil,
})
}
return results
}
// listObjects helper func for listObjectsBatch
func listObjects(s3Client *minio.Client, bukitName, path string, recursive bool) []minio.ObjectInfo {
log.Debug("S3 call 'ListObjects': ", path)
objectCh := s3Client.ListObjects(context.Background(), bukitName, minio.ListObjectsOptions{
Prefix: path,
Recursive: recursive,
})
result := make([]minio.ObjectInfo, 0)
for obj := range objectCh {
result = append(result, obj)
}
return result
}
// getFilesBatch batch func for getting all files in path. Uses "listObjects" dataloader
func getFilesBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
log.Debug("getFilesBatch: ", k.Keys())
var results []*dataloader.Result
loader, ok := c.Value("loader").(map[string]*dataloader.Loader)
if !ok {
return handleLoaderError(k, fmt.Errorf("Failed to get loader from context"))
}
for _, v := range k {
path := v.String()
files := make([]types.File, 0)
if !strings.HasSuffix(path, "/") {
path += "/"
}
thunk := loader["listObjects"].Load(c, dataloader.StringKey(path))
objects, _ := thunk()
// TODO: handle thunk error
for _, obj := range objects.([]minio.ObjectInfo) {
if obj.Err != nil {
// TODO: how to handle?
} else if !strings.HasSuffix(obj.Key, "/") {
files = append(files, types.File{
ID: obj.Key,
Name: filepath.Base(obj.Key),
Size: obj.Size,
ContentType: obj.ContentType,
ETag: obj.ETag,
LastModified: obj.LastModified,
})
}
}
results = append(results, &dataloader.Result{
Data: files,
Error: nil,
})
}
return results
}
// getFileBatch batch func for getting object info
func getFileBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
log.Debug("getFileBatch: ", k.Keys())
var results []*dataloader.Result
s3Client, ok := c.Value("s3Client").(*minio.Client)
if !ok {
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
}
for _, v := range k {
log.Debug("S3 call 'StatObject': ", v.String())
obj, err := s3Client.StatObject(context.Background(), bucketName, v.String(), minio.StatObjectOptions{})
if err != nil {
results = append(results, &dataloader.Result{
Data: nil,
Error: err,
})
} else {
results = append(results, &dataloader.Result{
Data: &types.File{
ID: obj.Key,
Size: obj.Size,
ContentType: obj.ContentType,
ETag: obj.ETag,
LastModified: obj.LastModified,
},
Error: nil,
})
}
}
return results
}
// getDirsBatch batch func for getting dirs in a path
func getDirsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
log.Debug("getDirsBatch: ", k.Keys())
var results []*dataloader.Result
loader, ok := c.Value("loader").(map[string]*dataloader.Loader)
if !ok {
return handleLoaderError(k, fmt.Errorf("Failed to get loader from context"))
}
for _, v := range k {
path := v.String()
dirs := make([]types.Directory, 0)
if !strings.HasSuffix(path, "/") {
path += "/"
}
thunk := loader["listObjects"].Load(c, dataloader.StringKey(path))
objects, _ := thunk()
// TODO: handle thunk error
for _, obj := range objects.([]minio.ObjectInfo) {
if obj.Err != nil {
// TODO: how to handle?
} else if strings.HasSuffix(obj.Key, "/") {
dirs = append(dirs, types.Directory{
ID: obj.Key,
Name: filepath.Base(obj.Key),
})
}
}
results = append(results, &dataloader.Result{
Data: dirs,
Error: nil,
})
}
return results
}
// handleLoaderError helper func when the whole batch failed
func handleLoaderError(k dataloader.Keys, err error) []*dataloader.Result {
log.Error(err.Error())
var results []*dataloader.Result
for range k {
results = append(results, &dataloader.Result{
Data: nil,
Error: err,
})
}
return results
}
// createDataloader create all dataloaders and return a map of them plus a cache for objects
func createDataloader(config types.AppConfig) map[string]*dataloader.Loader {
loaderMap := make(map[string]*dataloader.Loader, 0)
loaderMap["getFiles"] = dataloader.NewBatchedLoader(
getFilesBatch,
dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)),
)
loaderMap["getFile"] = dataloader.NewBatchedLoader(
getFileBatch,
dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)),
)
loaderMap["listObjects"] = dataloader.NewBatchedLoader(
listObjectsBatch,
dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)),
)
loaderMap["listObjectsRecursive"] = dataloader.NewBatchedLoader(
listObjectsRecursiveBatch,
dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)),
)
loaderMap["getDirs"] = dataloader.NewBatchedLoader(
getDirsBatch,
dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)),
)
return loaderMap
}

9
internal/db/db.go Normal file
View File

@@ -0,0 +1,9 @@
package db
import "context"
type DB interface {
Setup() error
CheckLogin(ctx context.Context, username, password string) (bool, error)
AddUser(ctx context.Context, username, password string) error
}

94
internal/db/mysql.go Normal file
View File

@@ -0,0 +1,94 @@
package db
import (
"context"
"database/sql"
_ "embed"
"time"
_ "github.com/go-sql-driver/mysql"
"golang.org/x/crypto/bcrypt"
)
//go:embed setup.sql
var setupSql string
const DB_NAME = "s3Browser"
type mysqlDB struct {
dbConn *sql.DB
}
func NewDB(dataSourceName string) (DB, error) {
db, err := sql.Open("mysql", dataSourceName)
if err != nil {
return nil, err
}
db.SetConnMaxLifetime(time.Minute * 3)
db.SetMaxOpenConns(10)
db.SetMaxIdleConns(10)
return &mysqlDB{
dbConn: db,
}, nil
}
func (d *mysqlDB) Setup() error {
tx, err := d.dbConn.Begin()
if err != nil {
return err
}
_, err = tx.Exec(setupSql)
if err != nil {
tx.Rollback()
return err
}
err = tx.Commit()
if err != nil {
return err
}
return nil
}
func (d *mysqlDB) CheckLogin(ctx context.Context, username, password string) (bool, error) {
rows, err := d.dbConn.QueryContext(ctx, "SELECT password FROM user WHERE username = ?", username)
if err != nil {
return false, err
}
if !rows.Next() {
return false, nil
}
var passwordHash []byte
err = rows.Scan(&passwordHash)
if err != nil {
return false, err
}
if bcrypt.CompareHashAndPassword(passwordHash, []byte(password)) != nil {
return false, nil
}
return true, nil
}
func (d *mysqlDB) AddUser(ctx context.Context, username, password string) error {
hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
if err != nil {
return err
}
_, err = d.dbConn.ExecContext(ctx, "INSERT INTO user (username,password) VALUES (?,?)", username, hash)
if err != nil {
return err
}
return nil
}

11
internal/db/setup.sql Normal file
View File

@@ -0,0 +1,11 @@
CREATE TABLE s3Browser.`user` (
id INT auto_increment NOT NULL,
username varchar(100) NOT NULL,
password varchar(60) NOT NULL,
CONSTRAINT user_PK PRIMARY KEY (id),
CONSTRAINT user_UN UNIQUE KEY (username)
)
ENGINE=InnoDB
DEFAULT CHARSET=utf8mb4
COLLATE=utf8mb4_general_ci;

75
internal/gql/gql_test.go Normal file
View File

@@ -0,0 +1,75 @@
package gql_test
import (
"context"
"testing"
"git.kapelle.org/niklas/s3browser/internal/gql"
"git.kapelle.org/niklas/s3browser/internal/loader"
"git.kapelle.org/niklas/s3browser/internal/s3"
"github.com/graph-gophers/dataloader"
"github.com/graphql-go/graphql"
"github.com/stretchr/testify/assert"
)
func setup(t *testing.T) (*assert.Assertions, context.Context, graphql.Schema) {
assert := assert.New(t)
ctx := context.Background()
schema, _ := gql.GraphqlSchema()
s3, err := s3.NewMockS3([]string{"bucket1"})
assert.NoError(err)
ctx = context.WithValue(ctx, "s3Client", s3)
loader := loader.NewLoader(loader.CacheConfig{
ListObjectsLoaderCache: &dataloader.NoCache{},
ListObjectsRecursiveLoaderCache: &dataloader.NoCache{},
StatObjectLoaderCache: &dataloader.NoCache{},
ListBucketsLoaderCache: &dataloader.NoCache{},
})
assert.NotNil(loader)
ctx = context.WithValue(ctx, "loader", loader)
return assert, ctx, schema
}
func do(ctx context.Context, schema graphql.Schema, query string) *graphql.Result {
params := graphql.Params{
Schema: schema,
RequestString: query,
Context: ctx,
}
r := graphql.Do(params)
return r
}
func TestCreateSchema(t *testing.T) {
assert := assert.New(t)
assert.NotPanics(func() {
gql.GraphqlTypes()
})
var schema graphql.Schema
var err error
assert.NotPanics(func() {
schema, err = gql.GraphqlSchema()
})
assert.NoError(err)
assert.NotNil(schema)
}
func TestAuth(t *testing.T) {
assert, ctx, schema := setup(t)
r := do(ctx, schema, `
{
authorized
}
`)
t.Logf("Data: %v", r.Data)
assert.Len(r.Errors, 0)
}

View File

@@ -5,17 +5,19 @@ import (
"path/filepath"
"time"
"github.com/graph-gophers/dataloader"
"github.com/graphql-go/graphql"
"github.com/graphql-go/graphql/language/ast"
helper "git.kapelle.org/niklas/s3browser/internal/helper"
"git.kapelle.org/niklas/s3browser/internal/loader"
types "git.kapelle.org/niklas/s3browser/internal/types"
)
var typesInit bool = false
var graphqlDirType *graphql.Object
var graphqlFileType *graphql.Object
var graphqlLoginResultType *graphql.Object
var objIDType *graphql.Scalar
//GraphqlTypes create all graphql types and stores the in the global variables
func GraphqlTypes() {
@@ -45,7 +47,39 @@ func GraphqlTypes() {
ParseLiteral: func(valueAST ast.Value) interface{} {
switch valueAST := valueAST.(type) {
case *ast.StringValue:
return valueAST.Value
if tval, err := time.Parse(time.RFC3339, valueAST.Value); err != nil {
return nil
} else {
return tval
}
}
return nil
},
})
objIDType = graphql.NewScalar(graphql.ScalarConfig{
Name: "objID",
Description: `String representing a bucket, key and version combination.
Looks like this: "bucketName:/name/of/key" or "bucketName@version:/name/of/key"`,
Serialize: func(value interface{}) interface{} {
switch value := value.(type) {
case types.ID:
return value.String()
}
return "INVALID"
},
ParseValue: func(value interface{}) interface{} {
switch tvalue := value.(type) {
case string:
return types.ParseID(tvalue)
}
return nil
},
ParseLiteral: func(valueAST ast.Value) interface{} {
switch valueAST := valueAST.(type) {
case *ast.StringValue:
return types.ParseID(valueAST.Value)
}
return nil
},
@@ -56,7 +90,7 @@ func GraphqlTypes() {
Description: "Represents a directory",
Fields: graphql.Fields{
"id": &graphql.Field{
Type: graphql.NewNonNull(graphql.ID),
Type: graphql.NewNonNull(objIDType),
},
"name": &graphql.Field{
Type: graphql.String,
@@ -66,7 +100,7 @@ func GraphqlTypes() {
return nil, fmt.Errorf("Failed to parse source for resolve")
}
return filepath.Base(source.ID), nil
return filepath.Base(source.ID.Key), nil
},
},
},
@@ -77,7 +111,7 @@ func GraphqlTypes() {
Description: "Represents a file, not a directory",
Fields: graphql.Fields{
"id": &graphql.Field{
Type: graphql.NewNonNull(graphql.ID),
Type: graphql.NewNonNull(objIDType),
Description: "The uniqe ID of the file. Represents the path and the s3 key.",
},
"name": &graphql.Field{
@@ -88,7 +122,7 @@ func GraphqlTypes() {
return nil, fmt.Errorf("Failed to parse source for resolve")
}
return filepath.Base(source.ID), nil
return filepath.Base(source.ID.Key), nil
},
},
"size": &graphql.Field{
@@ -143,10 +177,14 @@ func GraphqlTypes() {
return nil, fmt.Errorf("Failed to parse Source for parent resolve")
}
basename := helper.GetPathFromId(source.ID)
parent := source.ID.Parent()
if parent == nil {
return nil, nil
}
return types.Directory{
ID: basename,
ID: *source.ID.Parent(),
}, nil
},
},
@@ -161,10 +199,9 @@ func GraphqlTypes() {
return nil, fmt.Errorf("Failed to parse Source for files resolve")
}
loader := p.Context.Value("loader").(map[string]*dataloader.Loader)
loader := p.Context.Value("loader").(*loader.Loader)
thunk := loader["getFiles"].Load(p.Context, dataloader.StringKey(helper.NomalizeID(source.ID)))
return thunk()
return loader.GetFiles(p.Context, source.ID)
},
})
@@ -176,10 +213,8 @@ func GraphqlTypes() {
return nil, fmt.Errorf("Failed to parse Source for directories resolve")
}
loader := p.Context.Value("loader").(map[string]*dataloader.Loader)
thunk := loader["getDirs"].Load(p.Context, dataloader.StringKey(helper.NomalizeID(source.ID)))
return thunk()
loader := p.Context.Value("loader").(*loader.Loader)
return loader.GetDirs(p.Context, source.ID)
},
})
@@ -212,6 +247,8 @@ func GraphqlTypes() {
},
})
typesInit = true
}
//loadFile helper func for using the dataloader to get a file
@@ -221,17 +258,14 @@ func loadFile(p graphql.ResolveParams) (*types.File, error) {
return nil, fmt.Errorf("Failed to parse source for resolve")
}
loader := p.Context.Value("loader").(map[string]*dataloader.Loader)
loader := p.Context.Value("loader").(*loader.Loader)
thunk := loader["getFile"].Load(p.Context, dataloader.StringKey(helper.NomalizeID(source.ID)))
result, err := thunk()
file, err := loader.GetFile(p.Context, source.ID)
if err != nil {
return nil, err
}
file, ok := result.(*types.File)
if !ok {
return nil, fmt.Errorf("Failed to load file")
}

View File

@@ -5,90 +5,132 @@ import (
"fmt"
"strings"
"github.com/graph-gophers/dataloader"
"github.com/minio/minio-go/v7"
"git.kapelle.org/niklas/s3browser/internal/db"
helper "git.kapelle.org/niklas/s3browser/internal/helper"
"git.kapelle.org/niklas/s3browser/internal/loader"
"git.kapelle.org/niklas/s3browser/internal/s3"
types "git.kapelle.org/niklas/s3browser/internal/types"
log "github.com/sirupsen/logrus"
)
func deleteMutation(ctx context.Context, id string) error {
s3Client, ok := ctx.Value("s3Client").(*minio.Client)
func deleteMutation(ctx context.Context, id types.ID) error {
s3Client, ok := ctx.Value("s3Client").(s3.S3Service)
if !ok {
return fmt.Errorf("Failed to get s3Client from context")
}
log.Debug("S3 'RemoveObject': ", id)
// TODO: it is posible to remove multiple objects with a single call.
// Is it better to batch this?
err := s3Client.RemoveObject(ctx, "dev", id, minio.RemoveObjectOptions{})
err := s3Client.RemoveObject(ctx, id)
if err != nil {
return err
}
// Invalidate cache
return helper.InvalidateCache(ctx, helper.NomalizeID(id))
ctx.Value("loader").(*loader.Loader).InvalidedCacheForId(ctx, id)
return nil
}
func copyMutation(ctx context.Context, src, dest string) (*types.File, error) {
s3Client, ok := ctx.Value("s3Client").(*minio.Client)
func copyMutation(ctx context.Context, src, dest types.ID) (*types.File, error) {
s3Client, ok := ctx.Value("s3Client").(s3.S3Service)
if !ok {
return nil, fmt.Errorf("Failed to get s3Client from context")
}
// Check if dest is a file or a dir
if strings.HasSuffix(dest, "/") {
if dest.IsDirectory() {
// create new dest id
// TODO: What if a file with this id already exists?
dest += helper.GetFilenameFromID(src)
dest.Key += helper.GetFilenameFromKey(src.Key)
}
info, err := s3Client.CopyObject(ctx, minio.CopyDestOptions{
Bucket: "dev",
Object: dest,
}, minio.CopySrcOptions{
Bucket: "dev",
Object: src,
})
log.Debug("S3 'CopyObject': ", src, "-->", dest)
err := s3Client.CopyObject(ctx, src, dest)
if err != nil {
return nil, err
}
// Invalidate cache
// TODO: check error
helper.InvalidateCache(ctx, helper.NomalizeID(info.Key))
ctx.Value("loader").(*loader.Loader).InvalidedCacheForId(ctx, dest)
return &types.File{
ID: info.Key,
ID: dest,
}, nil
}
func moveMutation(ctx context.Context, src, dest string) (*types.File, error) {
s3Client, ok := ctx.Value("s3Client").(*minio.Client)
func moveDirMutation(ctx context.Context, src, dest types.ID) ([]*types.File, error) {
s3Client, ok := ctx.Value("s3Client").(s3.S3Service)
if !ok {
return nil, fmt.Errorf("Failed to get s3Client from context")
}
if !dest.IsDirectory() {
return nil, fmt.Errorf("Dest must be a directory")
}
loader, ok := ctx.Value("loader").(*loader.Loader)
// "move" all file inside dir
files, err := loader.GetFilesRecursive(ctx, src)
if err != nil {
return nil, err
}
var result []*types.File
parent := src.Parent()
for _, file := range files {
newID := types.ID{
Bucket: dest.Bucket,
Key: strings.Replace(file.ID.Key, parent.Key, dest.Key, 1),
}
newID.Normalize()
log.Debug("S3 'CopyObject': ", src, "-->", dest)
err := s3Client.CopyObject(ctx, file.ID, dest)
if err != nil {
// TODO: handle error
}
deleteMutation(ctx, file.ID)
loader.InvalidedCacheForId(ctx, newID)
loader.InvalidedCacheForId(ctx, file.ID)
result = append(result, &types.File{
ID: newID,
})
}
loader.InvalidedCacheForId(ctx, src)
return result, nil
}
func moveFileMutation(ctx context.Context, src, dest types.ID) (*types.File, error) {
s3Client, ok := ctx.Value("s3Client").(s3.S3Service)
if !ok {
return nil, fmt.Errorf("Failed to get s3Client from context")
}
// Check if dest is a file or a dir
if strings.HasSuffix(dest, "/") {
if dest.IsDirectory() {
// create new dest id
// TODO: What if a file with this id already exists?
dest += helper.GetFilenameFromID(src)
dest.Key += helper.GetFilenameFromKey(src.Key)
}
log.Debug("S3 'CopyObject': ", src, "-->", dest)
// There is no (spoon) move. Only copy and delete
info, err := s3Client.CopyObject(ctx, minio.CopyDestOptions{
Bucket: "dev",
Object: dest,
}, minio.CopySrcOptions{
Bucket: "dev",
Object: src,
})
err := s3Client.CopyObject(ctx, src, dest)
if err != nil {
return nil, err
@@ -100,76 +142,60 @@ func moveMutation(ctx context.Context, src, dest string) (*types.File, error) {
return nil, err
}
helper.InvalidateCache(ctx, helper.NomalizeID(info.Key))
ctx.Value("loader").(*loader.Loader).InvalidedCacheForId(ctx, dest)
return &types.File{
ID: info.Key,
ID: dest,
}, nil
}
func createDirectory(ctx context.Context, path string) (*types.Directory, error) {
s3Client, ok := ctx.Value("s3Client").(*minio.Client)
func createDirectory(ctx context.Context, id types.ID) (*types.Directory, error) {
s3Client, ok := ctx.Value("s3Client").(s3.S3Service)
if !ok {
return nil, fmt.Errorf("Failed to get s3Client from context")
}
if !strings.HasSuffix(path, "/") {
path += "/"
}
info, err := s3Client.PutObject(ctx, "dev", path, strings.NewReader(""), 0, minio.PutObjectOptions{
ContentType: "application/x-directory",
})
log.Debug("S3 'PutObject': ", id)
err := s3Client.PutObject(ctx, id, strings.NewReader(""), 0) // TODO: s3client interface needs content type parameter
if err != nil {
return nil, err
}
// Invalidate cache
// TODO: check error
helper.InvalidateCacheForDir(ctx, helper.NomalizeID(info.Key))
ctx.Value("loader").(*loader.Loader).InvalidedCacheForId(ctx, id)
return &types.Directory{
ID: info.Key,
ID: id,
}, nil
}
func deleteDirectory(ctx context.Context, path string) error {
s3Client, ok := ctx.Value("s3Client").(*minio.Client)
func deleteDirectory(ctx context.Context, id types.ID) error {
s3Client, ok := ctx.Value("s3Client").(s3.S3Service)
if !ok {
return fmt.Errorf("Failed to get s3Client from context")
}
loader, ok := ctx.Value("loader").(map[string]*dataloader.Loader)
loader, ok := ctx.Value("loader").(*loader.Loader)
if !ok {
return fmt.Errorf("Failed to get dataloader from context")
}
if !strings.HasSuffix(path, "/") {
path += "/"
}
// Get all files inside the directory
thunk := loader["listObjectsRecursive"].Load(ctx, dataloader.StringKey(helper.NomalizeID(path)))
result, err := thunk()
files, err := loader.GetFilesRecursive(ctx, id)
if err != nil {
return err
}
files, ok := result.([]minio.ObjectInfo)
if !ok {
return fmt.Errorf("Failed to get parse result from listObjects")
}
// Delete all child files
err = helper.DeleteMultiple(ctx, *s3Client, files)
for _, file := range files {
s3Client.RemoveObject(ctx, file.ID)
}
if err != nil {
return err
@@ -181,14 +207,14 @@ func deleteDirectory(ctx context.Context, path string) error {
// This is at least the behavior when working with minio as s3 backend
// TODO: check if this is normal behavior when working with s3
if len(files) == 0 {
err := s3Client.RemoveObject(ctx, "dev", path, minio.RemoveObjectOptions{})
log.Debug("S3 'RemoveObject': ", id)
err := s3Client.RemoveObject(ctx, id)
if err != nil {
return err
}
}
//Invalidate cache
helper.InvalidateCacheForDir(ctx, helper.NomalizeID(path))
loader.InvalidedCacheForId(ctx, id)
return nil
}
@@ -196,8 +222,11 @@ func deleteDirectory(ctx context.Context, path string) error {
//login Checks for valid username password combination. Returns singed jwt string
func login(ctx context.Context, username, password string) (types.LoginResult, error) {
// TODO: replace with propper user management
if username != "admin" && password != "hunter2" {
dbStore := ctx.Value("dbStore").(db.DB)
succes, err := dbStore.CheckLogin(ctx, username, password)
if !succes {
return types.LoginResult{
Successful: false,
}, nil

View File

@@ -3,11 +3,11 @@ package gql
import (
"fmt"
"github.com/graph-gophers/dataloader"
"github.com/graphql-go/graphql"
s3errors "git.kapelle.org/niklas/s3browser/internal/errors"
helper "git.kapelle.org/niklas/s3browser/internal/helper"
"git.kapelle.org/niklas/s3browser/internal/loader"
types "git.kapelle.org/niklas/s3browser/internal/types"
log "github.com/sirupsen/logrus"
)
@@ -15,12 +15,16 @@ import (
//GraphqlSchema generate the schema with its root query and mutation
func GraphqlSchema() (graphql.Schema, error) {
if !typesInit {
GraphqlTypes()
}
queryFields := graphql.Fields{
"files": &graphql.Field{
Type: graphql.NewNonNull(graphql.NewList(graphql.NewNonNull(graphqlFileType))),
Args: graphql.FieldConfigArgument{
"path": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String),
Type: graphql.NewNonNull(objIDType),
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
@@ -28,24 +32,23 @@ func GraphqlSchema() (graphql.Schema, error) {
return nil, s3errors.ErrNotAuthenticated
}
path, ok := p.Args["path"].(string)
path, ok := p.Args["path"].(*types.ID)
if !ok {
return nil, nil
return nil, fmt.Errorf("Failed to parse args")
}
log.Debug("querry 'files': ", path)
loader := p.Context.Value("loader").(map[string]*dataloader.Loader)
thunk := loader["getFiles"].Load(p.Context, dataloader.StringKey(path))
return thunk()
loader := p.Context.Value("loader").(*loader.Loader)
return loader.GetFiles(p.Context, *path)
},
},
"directorys": &graphql.Field{
"directories": &graphql.Field{
Type: graphql.NewNonNull(graphql.NewList(graphql.NewNonNull(graphqlDirType))),
Args: graphql.FieldConfigArgument{
"path": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String),
Type: graphql.NewNonNull(objIDType),
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
@@ -53,7 +56,7 @@ func GraphqlSchema() (graphql.Schema, error) {
return nil, s3errors.ErrNotAuthenticated
}
path, ok := p.Args["path"].(string)
path, ok := p.Args["path"].(*types.ID)
if !ok {
return nil, nil
@@ -61,16 +64,15 @@ func GraphqlSchema() (graphql.Schema, error) {
log.Debug("querry 'directorys': ", path)
loader := p.Context.Value("loader").(map[string]*dataloader.Loader)
thunk := loader["getDirs"].Load(p.Context, dataloader.StringKey(path))
return thunk()
loader := p.Context.Value("loader").(*loader.Loader)
return loader.GetDirs(p.Context, *path)
},
},
"file": &graphql.Field{
Type: graphqlFileType,
Args: graphql.FieldConfigArgument{
"id": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.ID),
Type: graphql.NewNonNull(objIDType),
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
@@ -78,7 +80,7 @@ func GraphqlSchema() (graphql.Schema, error) {
return nil, s3errors.ErrNotAuthenticated
}
id, ok := p.Args["id"].(string)
id, ok := p.Args["id"].(*types.ID)
if !ok {
return nil, fmt.Errorf("Failed to parse args")
}
@@ -86,7 +88,7 @@ func GraphqlSchema() (graphql.Schema, error) {
log.Debug("querry 'file': ", id)
return types.File{
ID: id,
ID: *id,
}, nil
},
},
@@ -100,6 +102,19 @@ func GraphqlSchema() (graphql.Schema, error) {
return auth, nil
},
},
"buckets": &graphql.Field{
Name: "buckets",
Type: graphql.NewNonNull(graphql.NewList(graphql.String)),
Description: "List available buckets",
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
if !helper.IsAuthenticated(p.Context) {
return nil, s3errors.ErrNotAuthenticated
}
loader := p.Context.Value("loader").(*loader.Loader)
return loader.GetBuckets(p.Context)
},
},
}
mutationFields := graphql.Fields{
@@ -107,7 +122,7 @@ func GraphqlSchema() (graphql.Schema, error) {
Type: graphql.String,
Args: graphql.FieldConfigArgument{
"id": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.ID),
Type: graphql.NewNonNull(objIDType),
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
@@ -115,24 +130,24 @@ func GraphqlSchema() (graphql.Schema, error) {
return nil, s3errors.ErrNotAuthenticated
}
id, ok := p.Args["id"].(string)
id, ok := p.Args["id"].(*types.ID)
if !ok {
return nil, fmt.Errorf("Failed to parse args")
}
log.Debug("mutation 'delete': ", id)
return id, deleteMutation(p.Context, id)
return id, deleteMutation(p.Context, *id)
},
},
"copy": &graphql.Field{
Type: graphqlFileType,
Args: graphql.FieldConfigArgument{
"src": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.ID),
Type: graphql.NewNonNull(objIDType),
},
"dest": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.ID),
Type: graphql.NewNonNull(objIDType),
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
@@ -140,28 +155,28 @@ func GraphqlSchema() (graphql.Schema, error) {
return nil, s3errors.ErrNotAuthenticated
}
src, ok := p.Args["src"].(string)
src, ok := p.Args["src"].(*types.ID)
if !ok {
return nil, fmt.Errorf("Failed to parse args")
}
dest, ok := p.Args["dest"].(string)
dest, ok := p.Args["dest"].(*types.ID)
if !ok {
return nil, fmt.Errorf("Failed to parse args")
}
log.Debug("mutation 'copy': ", src, "-->", dest)
return copyMutation(p.Context, src, dest)
return copyMutation(p.Context, *src, *dest)
},
},
"move": &graphql.Field{
Type: graphqlFileType,
Type: graphql.NewNonNull(graphqlFileType),
Args: graphql.FieldConfigArgument{
"src": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.ID),
Type: graphql.NewNonNull(objIDType),
},
"dest": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.ID),
Type: graphql.NewNonNull(objIDType),
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
@@ -169,25 +184,54 @@ func GraphqlSchema() (graphql.Schema, error) {
return nil, s3errors.ErrNotAuthenticated
}
src, ok := p.Args["src"].(string)
src, ok := p.Args["src"].(*types.ID)
if !ok {
return nil, fmt.Errorf("Failed to parse args")
}
dest, ok := p.Args["dest"].(string)
dest, ok := p.Args["dest"].(*types.ID)
if !ok {
return nil, fmt.Errorf("Failed to parse args")
}
log.Debug("mutation 'move': ", src, "-->", dest)
return moveMutation(p.Context, src, dest)
return moveFileMutation(p.Context, *src, *dest)
},
},
"moveDir": &graphql.Field{
Type: graphql.NewNonNull(graphql.NewList(graphqlFileType)),
Args: graphql.FieldConfigArgument{
"src": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(objIDType),
},
"dest": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(objIDType),
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
if !helper.IsAuthenticated(p.Context) {
return nil, s3errors.ErrNotAuthenticated
}
src, ok := p.Args["src"].(*types.ID)
if !ok {
return nil, fmt.Errorf("Failed to parse args")
}
dest, ok := p.Args["dest"].(*types.ID)
if !ok {
return nil, fmt.Errorf("Failed to parse args")
}
log.Debug("mutation 'moveDir': ", src, "-->", dest)
return moveDirMutation(p.Context, *src, *dest)
},
},
"createDir": &graphql.Field{
Type: graphql.NewNonNull(graphqlDirType),
Args: graphql.FieldConfigArgument{
"path": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.ID),
Type: graphql.NewNonNull(objIDType),
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
@@ -195,21 +239,21 @@ func GraphqlSchema() (graphql.Schema, error) {
return nil, s3errors.ErrNotAuthenticated
}
path, ok := p.Args["path"].(string)
path, ok := p.Args["path"].(*types.ID)
if !ok {
return nil, fmt.Errorf("Failed to parse args")
}
log.Debug("mutation 'createDir': ", path)
return createDirectory(p.Context, path)
return createDirectory(p.Context, *path)
},
},
"deleteDir": &graphql.Field{
Type: graphql.NewNonNull(graphql.String),
Args: graphql.FieldConfigArgument{
"path": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.ID),
Type: graphql.NewNonNull(objIDType),
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
@@ -217,7 +261,7 @@ func GraphqlSchema() (graphql.Schema, error) {
return nil, s3errors.ErrNotAuthenticated
}
path, ok := p.Args["path"].(string)
path, ok := p.Args["path"].(*types.ID)
if !ok {
return nil, fmt.Errorf("Failed to parse args")
@@ -225,7 +269,7 @@ func GraphqlSchema() (graphql.Schema, error) {
log.Debug("mutation 'deleteDir': ", path)
return path, deleteDirectory(p.Context, path)
return path, deleteDirectory(p.Context, *path)
},
},
"login": &graphql.Field{

View File

@@ -2,115 +2,48 @@ package helper
import (
"context"
"fmt"
"path/filepath"
"strings"
"time"
"github.com/golang-jwt/jwt"
"github.com/graph-gophers/dataloader"
"github.com/minio/minio-go/v7"
log "github.com/sirupsen/logrus"
"git.kapelle.org/niklas/s3browser/internal/s3"
types "git.kapelle.org/niklas/s3browser/internal/types"
"github.com/golang-jwt/jwt"
)
func InvalidateCache(ctx context.Context, id string) error {
loader, ok := ctx.Value("loader").(map[string]*dataloader.Loader)
if !ok {
return fmt.Errorf("Failed to get loader from context")
}
log.Debug("Invalidate cache for id: ", id)
path := GetPathFromId(id)
loader["getFile"].Clear(ctx, dataloader.StringKey(id))
loader["getFiles"].Clear(ctx, dataloader.StringKey(path))
loader["listObjects"].Clear(ctx, dataloader.StringKey(path))
loader["listObjectsRecursive"].Clear(ctx, dataloader.StringKey(path))
return nil
}
func GetPathFromId(id string) string {
dir := filepath.Dir(id)
if dir == "." {
return "/"
}
return NomalizeID(dir + "/")
}
func GetFilenameFromID(id string) string {
func GetFilenameFromKey(id string) string {
return filepath.Base(id)
}
func InvalidateCacheForDir(ctx context.Context, path string) error {
loader, ok := ctx.Value("loader").(map[string]*dataloader.Loader)
if !ok {
return fmt.Errorf("Failed to get loader from context")
}
log.Debug("Invalidate cache for dir: ", path)
parent := GetParentDir(path)
log.Debug("Cache clear dir: ", path, " parent: ", parent)
loader["getFile"].Clear(ctx, dataloader.StringKey(path))
loader["listObjects"].Clear(ctx, dataloader.StringKey(path))
loader["listObjectsRecursive"].Clear(ctx, dataloader.StringKey(path))
loader["getFiles"].Clear(ctx, dataloader.StringKey(path))
loader["getDirs"].Clear(ctx, dataloader.StringKey(parent))
loader["listObjects"].Clear(ctx, dataloader.StringKey(parent))
loader["listObjectsRecursive"].Clear(ctx, dataloader.StringKey(parent))
return nil
}
func DeleteMultiple(ctx context.Context, s3Client minio.Client, ids []minio.ObjectInfo) error {
log.Debug("Delte multiple")
objectsCh := make(chan minio.ObjectInfo, 1)
go func() {
defer close(objectsCh)
for _, id := range ids {
objectsCh <- id
}
}()
for err := range s3Client.RemoveObjects(ctx, "dev", objectsCh, minio.RemoveObjectsOptions{}) {
log.Error("Failed to delete object ", err.ObjectName, " because: ", err.Err.Error())
// TODO: error handel
}
return nil
}
// NomalizeID makes sure there is a leading "/" in the id
func NomalizeID(id string) string {
if !strings.HasPrefix(id, "/") {
if id == "." {
return "/"
}
id = "/" + id
}
return id
}
func GetParentDir(id string) string {
dirs := strings.Split(id, "/")
func GetParentDir(id types.ID) types.ID {
dirs := strings.Split(id.Key, "/")
cut := 1
if strings.HasSuffix(id, "/") {
if strings.HasSuffix(id.Key, "/") {
cut = 2
}
parent := strings.Join(dirs[:len(dirs)-cut], "/") + "/"
parentKey := strings.Join(dirs[:len(dirs)-cut], "/") + "/"
return NomalizeID(parent)
parent := types.ID{
Bucket: id.Bucket,
Key: parentKey,
}
parent.Normalize()
return parent
}
func ObjInfoToFile(objInfo s3.Object, bucket string) *types.File {
return &types.File{
ID: objInfo.ID,
Name: objInfo.ID.Name(),
Size: objInfo.Size,
ContentType: objInfo.ContentType,
ETag: objInfo.ETag,
LastModified: objInfo.LastModified,
}
}
func IsAuthenticated(ctx context.Context) bool {

View File

@@ -4,9 +4,7 @@ import (
"context"
"fmt"
"io"
"mime"
"net/http"
"path/filepath"
"time"
"github.com/golang-jwt/jwt"
@@ -15,11 +13,12 @@ import (
"github.com/graphql-go/graphql"
"github.com/graphql-go/graphql/gqlerrors"
"github.com/graphql-go/handler"
"github.com/minio/minio-go/v7"
log "github.com/sirupsen/logrus"
helper "git.kapelle.org/niklas/s3browser/internal/helper"
"git.kapelle.org/niklas/s3browser/internal/loader"
"git.kapelle.org/niklas/s3browser/internal/s3"
types "git.kapelle.org/niklas/s3browser/internal/types"
)
@@ -111,13 +110,22 @@ func httpGetFile(ctx context.Context, rw http.ResponseWriter, r *http.Request) {
return
}
s3Client := ctx.Value("s3Client").(*minio.Client)
id := r.URL.Query().Get("id")
s3Client := ctx.Value("s3Client").(s3.S3Service)
idString := r.URL.Query().Get("id")
log.Debug("S3 call 'StatObject': ", id)
objInfo, err := s3Client.StatObject(context.Background(), "dev", id, minio.GetObjectOptions{})
id := types.ParseID(idString)
if id == nil {
// Failed to parse ID
rw.WriteHeader(http.StatusBadRequest)
return
}
log.Debug("S3 'StatObject': ", id)
objInfo, err := s3Client.StatObject(context.Background(), *id)
if err != nil {
log.Error("Failed to get object info: ", err)
rw.WriteHeader(http.StatusInternalServerError)
return
}
@@ -128,16 +136,17 @@ func httpGetFile(ctx context.Context, rw http.ResponseWriter, r *http.Request) {
return
}
log.Debug("S3 call 'GetObject': ", id)
obj, err := s3Client.GetObject(context.Background(), "dev", id, minio.GetObjectOptions{})
log.Debug("S3 'GetObject': ", id)
obj, err := s3Client.GetObject(context.Background(), *id)
if err != nil {
log.Error("Failed to get object: ", err)
rw.WriteHeader(http.StatusInternalServerError)
return
}
rw.Header().Set("Cache-Control", "must-revalidate")
rw.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filepath.Base((objInfo.Key))))
rw.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", id.Name()))
rw.Header().Set("Content-Type", objInfo.ContentType)
rw.Header().Set("ETag", objInfo.ETag)
@@ -155,27 +164,33 @@ func httpPostFile(ctx context.Context, rw http.ResponseWriter, r *http.Request)
return
}
s3Client := ctx.Value("s3Client").(*minio.Client)
s3Client := ctx.Value("s3Client").(s3.S3Service)
id := r.URL.Query().Get("id")
idString := r.URL.Query().Get("id")
log.Debug("Upload file: ", id)
id := types.ParseID(idString)
contentType := r.Header.Get("Content-Type")
mimeType, _, _ := mime.ParseMediaType(contentType)
if id == nil {
// Failed to parse ID
rw.WriteHeader(http.StatusBadRequest)
return
}
log.Debug("S3 call 'PutObject': ", id)
info, err := s3Client.PutObject(context.Background(), "dev", id, r.Body, r.ContentLength, minio.PutObjectOptions{
ContentType: mimeType,
})
id.Normalize()
// contentType := r.Header.Get("Content-Type")
// mimeType, _, _ := mime.ParseMediaType(contentType)
log.Debug("S3 'PutObject': ", id)
err := s3Client.PutObject(context.Background(), *id, r.Body, r.ContentLength) // TODO: put content type
if err != nil {
rw.WriteHeader(http.StatusInternalServerError)
return
}
// Invalidate cache
helper.InvalidateCache(ctx, info.Key)
loader := ctx.Value("loader").(*loader.Loader)
loader.InvalidedCacheForId(ctx, *id)
rw.WriteHeader(http.StatusCreated)
}

View File

@@ -5,6 +5,7 @@ package httpserver
import (
"embed"
"github.com/gorilla/mux"
"io/fs"
"net/http"
"os"
@@ -26,7 +27,7 @@ func (spa *spaFileSystem) Open(name string) (http.File, error) {
return f, err
}
func initStatic(e *mux.Router) {
func initStatic(r *mux.Router) {
staticFS, _ := fs.Sub(staticFiles, "static")
r.Handle("/", http.FileServer(&spaFileSystem{http.FS(staticFS)}))
r.PathPrefix("/").Handler(http.FileServer(&spaFileSystem{http.FS(staticFS)}))
}

134
internal/loader/batch.go Normal file
View File

@@ -0,0 +1,134 @@
package loader
import (
"context"
"fmt"
"git.kapelle.org/niklas/s3browser/internal/s3"
"git.kapelle.org/niklas/s3browser/internal/types"
"github.com/graph-gophers/dataloader"
log "github.com/sirupsen/logrus"
)
// listObjectsBatch batch func for calling s3.ListObjects()
func listObjectsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
var results []*dataloader.Result
s3Client, ok := c.Value("s3Client").(s3.S3Service)
if !ok {
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
}
for _, v := range k {
id := v.Raw().(types.ID)
objects, err := s3Client.ListObjects(c, id)
if err != nil {
results = append(results, &dataloader.Result{
Data: nil,
Error: err,
})
} else {
results = append(results, &dataloader.Result{
Data: objects,
Error: nil,
})
}
}
return results
}
// listObjectsRecursiveBatch just like listObjectsBatch but with recursive set to true
func listObjectsRecursiveBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
var results []*dataloader.Result
s3Client, ok := c.Value("s3Client").(s3.S3Service)
if !ok {
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
}
for _, v := range k {
id := v.Raw().(types.ID)
objects, err := s3Client.ListObjectsRecursive(c, id)
if err != nil {
results = append(results, &dataloader.Result{
Data: nil,
Error: err,
})
} else {
results = append(results, &dataloader.Result{
Data: objects,
Error: nil,
})
}
}
return results
}
func listBucketsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
var results []*dataloader.Result
s3Client, ok := c.Value("s3Client").(s3.S3Service)
if !ok {
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
}
log.Debug("S3 'ListBuckets'")
buckets, err := s3Client.ListBuckets(c)
if err != nil {
return handleLoaderError(k, err)
}
result := &dataloader.Result{
Data: buckets,
Error: nil,
}
for range k {
results = append(results, result)
}
return results
}
func statObjectBatch(ctx context.Context, k dataloader.Keys) []*dataloader.Result {
log.Debug("statObjectBatch")
var results []*dataloader.Result
s3Client, ok := ctx.Value("s3Client").(s3.S3Service)
if !ok {
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
}
for _, v := range k {
id := v.Raw().(types.ID)
log.Debug("S3 'StatObject': ", id)
stat, err := s3Client.StatObject(ctx, id)
results = append(results, &dataloader.Result{
Data: stat,
Error: err,
})
}
return results
}
// handleLoaderError helper func when the whole batch failed
func handleLoaderError(k dataloader.Keys, err error) []*dataloader.Result {
log.Error(err.Error())
var results []*dataloader.Result
for range k {
results = append(results, &dataloader.Result{
Data: nil,
Error: err,
})
}
return results
}

164
internal/loader/loader.go Normal file
View File

@@ -0,0 +1,164 @@
package loader
import (
"context"
"fmt"
"git.kapelle.org/niklas/s3browser/internal/cache"
"git.kapelle.org/niklas/s3browser/internal/helper"
"git.kapelle.org/niklas/s3browser/internal/s3"
types "git.kapelle.org/niklas/s3browser/internal/types"
"github.com/graph-gophers/dataloader"
)
type Loader struct {
listObjectsLoader *dataloader.Loader
listObjectsRecursiveLoader *dataloader.Loader
statObjectLoader *dataloader.Loader
listBucketsLoader *dataloader.Loader
listObjectsLoaderCache cache.S3Cache
listObjectsRecursiveLoaderCache cache.S3Cache
statObjectLoaderCache cache.S3Cache
listBucketsLoaderCache cache.S3Cache
}
type CacheConfig struct {
ListObjectsLoaderCache cache.S3Cache
ListObjectsRecursiveLoaderCache cache.S3Cache
StatObjectLoaderCache cache.S3Cache
ListBucketsLoaderCache cache.S3Cache
}
func NewLoader(cacheConfig CacheConfig) *Loader {
listObjectsLoaderCache := cacheConfig.ListObjectsLoaderCache
listObjectsRecursiveLoaderCache := cacheConfig.ListObjectsRecursiveLoaderCache
statObjectLoaderCache := cacheConfig.StatObjectLoaderCache
listBucketsLoaderCache := cacheConfig.ListBucketsLoaderCache
return &Loader{
listObjectsLoader: dataloader.NewBatchedLoader(
listObjectsBatch,
dataloader.WithCache(listObjectsLoaderCache),
),
listObjectsLoaderCache: listObjectsLoaderCache,
listObjectsRecursiveLoader: dataloader.NewBatchedLoader(
listObjectsRecursiveBatch,
dataloader.WithCache(listObjectsRecursiveLoaderCache),
),
listObjectsRecursiveLoaderCache: listObjectsRecursiveLoaderCache,
statObjectLoader: dataloader.NewBatchedLoader(
statObjectBatch,
dataloader.WithCache(statObjectLoaderCache),
),
statObjectLoaderCache: statObjectLoaderCache,
listBucketsLoader: dataloader.NewBatchedLoader(
listBucketsBatch,
dataloader.WithCache(listBucketsLoaderCache),
),
listBucketsLoaderCache: listBucketsLoaderCache,
}
}
func (l *Loader) GetFiles(ctx context.Context, path types.ID) ([]types.File, error) {
thunk := l.listObjectsLoader.Load(ctx, path)
objects, err := thunk()
if err != nil {
return nil, err
}
var files []types.File
for _, obj := range objects.([]s3.Object) {
if !obj.ID.IsDirectory() {
files = append(files, *helper.ObjInfoToFile(obj, path.Bucket))
}
}
return files, nil
}
func (l *Loader) GetFile(ctx context.Context, id types.ID) (*types.File, error) {
thunk := l.statObjectLoader.Load(ctx, id)
result, err := thunk()
if err != nil {
return nil, err
}
objInfo, ok := result.(*s3.Object)
if !ok {
return nil, fmt.Errorf("Failed to stats object")
}
return helper.ObjInfoToFile(*objInfo, id.Bucket), nil
}
func (l *Loader) GetDirs(ctx context.Context, path types.ID) ([]types.Directory, error) {
thunk := l.listObjectsLoader.Load(ctx, path)
result, err := thunk()
if err != nil {
return nil, err
}
var dirs []types.Directory
for _, obj := range result.([]s3.Object) {
if obj.ID.IsDirectory() {
dirs = append(dirs, types.Directory{
ID: obj.ID,
})
}
}
return dirs, nil
}
func (l *Loader) GetBuckets(ctx context.Context) ([]string, error) {
thunk := l.listBucketsLoader.Load(ctx, dataloader.StringKey(""))
result, err := thunk()
if err != nil {
return nil, err
}
return result.([]string), nil
}
func (l *Loader) GetFilesRecursive(ctx context.Context, path types.ID) ([]types.File, error) {
thunk := l.listObjectsRecursiveLoader.Load(ctx, path)
result, err := thunk()
if err != nil {
return nil, err
}
objects := result.([]s3.Object)
var files []types.File
for _, obj := range objects {
files = append(files, *helper.ObjInfoToFile(obj, path.Bucket))
}
return files, nil
}
func (l *Loader) InvalidedCacheForId(ctx context.Context, id types.ID) {
parent := id.Parent()
l.statObjectLoader.Clear(ctx, id)
// Code below is useless for now until we use a propper cache for "listObjectsLoader" and "listObjectsRecursiveLoader"
// TODO: implement cache invalidation for "listObjectsLoader" and "listObjectsRecursiveLoader"
l.listObjectsLoader.Clear(ctx, id).Clear(ctx, parent)
// Remove up from recursive list
for rParent := parent; rParent != nil; rParent = rParent.Parent() {
l.listObjectsRecursiveLoader.Clear(ctx, rParent)
}
}

View File

@@ -0,0 +1,105 @@
package loader_test
import (
"context"
"strings"
"testing"
"git.kapelle.org/niklas/s3browser/internal/loader"
"git.kapelle.org/niklas/s3browser/internal/s3"
"git.kapelle.org/niklas/s3browser/internal/types"
"github.com/graph-gophers/dataloader"
"github.com/stretchr/testify/assert"
)
func setup(t *testing.T) (context.Context, *loader.Loader, *assert.Assertions) {
assert := assert.New(t)
s3, _ := s3.NewMockS3([]string{"bucket1", "bucket2"})
loader := loader.NewLoader(loader.CacheConfig{
ListObjectsLoaderCache: &dataloader.NoCache{},
ListObjectsRecursiveLoaderCache: &dataloader.NoCache{},
StatObjectLoaderCache: &dataloader.NoCache{},
ListBucketsLoaderCache: &dataloader.NoCache{},
})
fillS3(s3)
ctx := context.WithValue(context.Background(), "s3Client", s3)
return ctx, loader, assert
}
func fillS3(s3 s3.S3Service) {
ctx := context.Background()
length := int64(len("content"))
for _, v := range []string{
"bucket1:/file1", "bucket1:/file2", "bucket1:/dir1/file1",
"bucket1:/dir1/file2", "bucket1:/dir2/file1", "bucket1:/dir1/sub1/file1",
"bucket1:/dir1/sub1/file2",
} {
s3.PutObject(ctx, *types.ParseID(v), strings.NewReader("content"), length)
}
}
func TestCreateLoader(t *testing.T) {
assert := assert.New(t)
loader := loader.NewLoader(loader.CacheConfig{
ListObjectsLoaderCache: &dataloader.NoCache{},
ListObjectsRecursiveLoaderCache: &dataloader.NoCache{},
StatObjectLoaderCache: &dataloader.NoCache{},
ListBucketsLoaderCache: &dataloader.NoCache{},
})
assert.NotNil(loader)
}
func TestGetBuckets(t *testing.T) {
ctx, loader, assert := setup(t)
buckets, err := loader.GetBuckets(ctx)
assert.NoError(err)
assert.Len(buckets, 2)
assert.Contains(buckets, "bucket1")
assert.Contains(buckets, "bucket2")
}
func TestGetFile(t *testing.T) {
ctx, loader, assert := setup(t)
file, err := loader.GetFile(ctx, *types.ParseID("bucket1:/dir1/file1"))
assert.NoError(err)
assert.Equal("bucket1:/dir1/file1", file.ID.String())
assert.Equal("file1", file.Name)
assert.Equal(int64(len("content")), file.Size)
}
func TestGetFiles(t *testing.T) {
ctx, loader, assert := setup(t)
id := types.ParseID("bucket1:/")
files, err := loader.GetFiles(ctx, *id)
assert.NoError(err)
assert.Len(files, 2)
}
func TestGetDir(t *testing.T) {
ctx, loader, assert := setup(t)
id := types.ParseID("bucket1:/")
dirs, err := loader.GetDirs(ctx, *id)
assert.NoError(err)
assert.Len(dirs, 2)
}
func Test(t *testing.T) {
ctx, loader, assert := setup(t)
id := types.ParseID("bucket1:/dir1/")
files, err := loader.GetFilesRecursive(ctx, *id)
assert.NoError(err)
assert.Len(files, 4)
}

131
internal/s3/minio.go Normal file
View File

@@ -0,0 +1,131 @@
package s3
import (
"context"
"io"
"git.kapelle.org/niklas/s3browser/internal/types"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
)
type minioS3 struct {
client *minio.Client
}
func NewMinio(config types.AppConfig) (S3Service, error) {
client, err := minio.New(config.S3Endoint, &minio.Options{
Creds: credentials.NewStaticV4(config.S3AccessKey, config.S3SecretKey, ""),
Secure: config.S3SSL,
})
if err != nil {
return nil, err
}
return &minioS3{
client: client,
}, nil
}
func (m *minioS3) ListBuckets(ctx context.Context) ([]string, error) {
buckets, err := m.client.ListBuckets(ctx)
if err != nil {
return nil, err
}
var rtn []string
for _, v := range buckets {
rtn = append(rtn, v.Name)
}
return rtn, nil
}
func (m *minioS3) ListObjects(ctx context.Context, id types.ID) ([]Object, error) {
var result []Object
for objInfo := range m.client.ListObjects(ctx, id.Bucket, minio.ListObjectsOptions{
Prefix: id.Key,
Recursive: false,
}) {
objId := types.ID{
Bucket: id.Bucket,
Key: objInfo.Key,
}
result = append(result, Object{
ID: objId,
Size: objInfo.Size,
})
}
return result, nil
}
func (m *minioS3) ListObjectsRecursive(ctx context.Context, id types.ID) ([]Object, error) {
var result []Object
for objInfo := range m.client.ListObjects(ctx, id.Bucket, minio.ListObjectsOptions{
Prefix: id.Key,
Recursive: true,
}) {
objId := types.ID{
Bucket: id.Bucket,
Key: objInfo.Key,
}
result = append(result, Object{
ID: objId,
Size: objInfo.Size,
})
}
return result, nil
}
func (m *minioS3) GetObject(ctx context.Context, id types.ID) (ObjectReader, error) {
object, err := m.client.GetObject(ctx, id.Bucket, id.Key, minio.GetObjectOptions{})
if err != nil {
return nil, err
}
return object, nil
}
func (m *minioS3) PutObject(ctx context.Context, id types.ID, reader io.Reader, objectSize int64) error {
_, err := m.client.PutObject(ctx, id.Bucket, id.Key, reader, objectSize, minio.PutObjectOptions{})
return err
}
func (m *minioS3) CopyObject(ctx context.Context, src types.ID, dest types.ID) error {
_, err := m.client.CopyObject(ctx, minio.CopyDestOptions{
Bucket: dest.Bucket,
Object: dest.Key,
}, minio.CopySrcOptions{
Bucket: src.Bucket,
Object: src.Key,
})
return err
}
func (m *minioS3) StatObject(ctx context.Context, id types.ID) (*Object, error) {
info, err := m.client.StatObject(ctx, id.Bucket, id.Key, minio.GetObjectOptions{})
if err != nil {
return nil, err
}
return &Object{
ID: id,
Size: info.Size,
LastModified: info.LastModified,
ContentType: info.ContentType,
ETag: info.ETag,
}, nil
}
func (m *minioS3) RemoveObject(ctx context.Context, id types.ID) error {
return m.client.RemoveObject(ctx, id.Bucket, id.Key, minio.RemoveObjectOptions{})
}

154
internal/s3/mock.go Normal file
View File

@@ -0,0 +1,154 @@
package s3
import (
"bytes"
"context"
"crypto/md5"
"fmt"
"io"
"io/ioutil"
"strings"
"time"
"git.kapelle.org/niklas/s3browser/internal/types"
)
type mockS3 struct {
buckets []string
objects map[types.ID]mockObject
}
type mockObject struct {
content []byte
contentType string
lastMod time.Time
}
type mockObjectReader struct {
*bytes.Reader
}
func (r mockObjectReader) Close() error {
// NOOP
return nil
}
func NewMockS3(buckets []string) (S3Service, error) {
return &mockS3{
buckets: buckets,
objects: map[types.ID]mockObject{},
}, nil
}
func (m *mockS3) ListBuckets(ctx context.Context) ([]string, error) {
return m.buckets, nil
}
func (m *mockS3) ListObjects(ctx context.Context, id types.ID) ([]Object, error) {
var results []Object
dirs := make(map[string]bool)
depth := len(strings.Split(id.Key, "/"))
for k, v := range m.objects {
if k.Bucket == id.Bucket {
if k.Parent().Key == id.Key {
results = append(results, *mockObjToObject(v, k))
} else if strings.HasPrefix(k.Key, id.Key) {
s := strings.Join(strings.Split(k.Key, "/")[:depth], "/") + "/"
dirs[s] = true
}
}
}
for k := range dirs {
results = append(results, Object{
ID: types.ID{
Bucket: id.Bucket,
Key: k,
},
})
}
return results, nil
}
func (m *mockS3) ListObjectsRecursive(ctx context.Context, id types.ID) ([]Object, error) {
var results []Object
for k, v := range m.objects {
if k.Bucket == id.Bucket {
if strings.HasPrefix(k.Key, id.Key) {
results = append(results, *mockObjToObject(v, k))
}
}
}
return results, nil
}
func (m *mockS3) GetObject(ctx context.Context, id types.ID) (ObjectReader, error) {
mockObj, exist := m.objects[id]
if !exist {
return nil, fmt.Errorf("Object not found")
}
reader := bytes.NewReader(mockObj.content)
return mockObjectReader{reader}, nil
}
func (m *mockS3) PutObject(ctx context.Context, id types.ID, reader io.Reader, objectSize int64) error {
content, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
m.objects[id] = mockObject{
content: content,
lastMod: time.Now(),
contentType: "application/octet-stream", // TODO: detect MIME type or dont its just a mock after all
}
return nil
}
func (m *mockS3) CopyObject(ctx context.Context, src types.ID, dest types.ID) error {
srcObj, exist := m.objects[src]
if !exist {
return fmt.Errorf("Object not found")
}
m.objects[dest] = srcObj
return nil
}
func (m *mockS3) StatObject(ctx context.Context, id types.ID) (*Object, error) {
mockObj, exist := m.objects[id]
if !exist {
return nil, fmt.Errorf("Object not found")
}
return mockObjToObject(mockObj, id), nil
}
func (m *mockS3) RemoveObject(ctx context.Context, id types.ID) error {
delete(m.objects, id)
return nil
}
func mockObjToObject(mockObj mockObject, id types.ID) *Object {
return &Object{
ID: id,
Size: int64(len(mockObj.content)),
ContentType: mockObj.contentType,
LastModified: mockObj.lastMod,
ETag: fmt.Sprintf("%x", md5.Sum(mockObj.content)),
}
}

162
internal/s3/mock_test.go Normal file
View File

@@ -0,0 +1,162 @@
package s3_test
import (
"context"
"io/ioutil"
"strings"
"testing"
"time"
"git.kapelle.org/niklas/s3browser/internal/s3"
"git.kapelle.org/niklas/s3browser/internal/types"
"github.com/stretchr/testify/assert"
)
func setup(t *testing.T) (s3.S3Service, context.Context, *assert.Assertions) {
service, _ := s3.NewMockS3([]string{"bucket1", "bucket2"})
ctx := context.Background()
assert := assert.New(t)
return service, ctx, assert
}
func TestBuckets(t *testing.T) {
s3, ctx, assert := setup(t)
buckets, err := s3.ListBuckets(ctx)
assert.NoError(err)
assert.Len(buckets, 2)
assert.Contains(buckets, "bucket1")
assert.Contains(buckets, "bucket2")
}
func TestPut(t *testing.T) {
s3, ctx, assert := setup(t)
content := "FileContent"
err := s3.PutObject(ctx, *types.ParseID("bucket1:/file1"), strings.NewReader(content), int64(len(content)))
assert.NoError(err)
}
func TestPutAndGet(t *testing.T) {
s3, ctx, assert := setup(t)
content := "FileContent"
id := *types.ParseID("bucket1:/file1")
err := s3.PutObject(ctx, *types.ParseID("bucket1:/file1"), strings.NewReader(content), int64(len(content)))
assert.NoError(err)
reader, err := s3.GetObject(ctx, id)
assert.NoError(err)
readerContent, err := ioutil.ReadAll(reader)
assert.NoError(err)
assert.Equal(content, string(readerContent))
}
func TestStat(t *testing.T) {
s3, ctx, assert := setup(t)
content := "FileContent"
id := *types.ParseID("bucket1:/file1")
now := time.Now()
err := s3.PutObject(ctx, id, strings.NewReader(content), int64(len(content)))
assert.NoError(err)
obj, err := s3.StatObject(ctx, id)
assert.NoError(err)
assert.Equal(id.String(), obj.ID.String())
assert.Equal(int64(len(content)), obj.Size)
assert.NotEmpty(obj.ETag)
assert.WithinDuration(now, obj.LastModified, time.Second*1)
}
func TestRemove(t *testing.T) {
s3, ctx, assert := setup(t)
content := "FileContent"
id := *types.ParseID("bucket1:/file1")
err := s3.PutObject(ctx, id, strings.NewReader(content), int64(len(content)))
assert.NoError(err)
err = s3.RemoveObject(ctx, id)
assert.NoError(err)
_, err = s3.StatObject(ctx, id)
assert.Error(err)
}
func TestList(t *testing.T) {
s3, ctx, assert := setup(t)
content1 := "FileContent1"
id1 := *types.ParseID("bucket1:/file1")
err := s3.PutObject(ctx, id1, strings.NewReader(content1), int64(len(content1)))
assert.NoError(err)
content2 := "FileContent2"
id2 := *types.ParseID("bucket1:/file2")
err = s3.PutObject(ctx, id2, strings.NewReader(content2), int64(len(content2)))
assert.NoError(err)
listID := types.ParseID("bucket1:/")
objects, err := s3.ListObjects(ctx, *listID)
assert.NoError(err)
assert.Len(objects, 2)
}
func TestListRecursive(t *testing.T) {
s3, ctx, assert := setup(t)
s3.PutObject(ctx, *types.ParseID("bucket1:/file1"), strings.NewReader("content"), int64(len("content")))
s3.PutObject(ctx, *types.ParseID("bucket1:/path1/file1"), strings.NewReader("content"), int64(len("content")))
s3.PutObject(ctx, *types.ParseID("bucket1:/path1/file2"), strings.NewReader("content"), int64(len("content")))
s3.PutObject(ctx, *types.ParseID("bucket1:/path1/path2/file1"), strings.NewReader("content"), int64(len("content")))
s3.PutObject(ctx, *types.ParseID("bucket1:/path3/path4/file1"), strings.NewReader("content"), int64(len("content")))
objects, err := s3.ListObjectsRecursive(ctx, *types.ParseID("bucket1:/path1/"))
assert.NoError(err)
assert.Len(objects, 3)
}
func TestCopy(t *testing.T) {
s3, ctx, assert := setup(t)
id1 := *types.ParseID("bucket1:/file1")
id2 := *types.ParseID("bucket1:/file2")
s3.PutObject(ctx, id1, strings.NewReader("content"), int64(len("content")))
err := s3.CopyObject(ctx, id1, id2)
assert.NoError(err)
obj1, err := s3.StatObject(ctx, id1)
assert.NoError(err)
assert.NotNil(obj1)
obj2, err := s3.StatObject(ctx, id1)
assert.NoError(err)
assert.NotNil(obj2)
assert.Equal(obj1.ETag, obj2.ETag)
assert.Equal(obj1.Size, obj2.Size)
obj2Reader, err := s3.GetObject(ctx, id2)
assert.NoError(err)
obj2Content, err := ioutil.ReadAll(obj2Reader)
assert.NoError(err)
assert.Equal([]byte("content"), obj2Content)
}

37
internal/s3/s3.go Normal file
View File

@@ -0,0 +1,37 @@
package s3
import (
"context"
"io"
"time"
"git.kapelle.org/niklas/s3browser/internal/types"
)
type ObjectReader interface {
io.Reader
io.Seeker
io.ReaderAt
io.Closer
}
type Object struct {
ID types.ID
Size int64
LastModified time.Time
ContentType string
ETag string
}
type S3Service interface {
ListBuckets(ctx context.Context) ([]string, error)
GetObject(ctx context.Context, id types.ID) (ObjectReader, error)
PutObject(ctx context.Context, id types.ID, reader io.Reader, objectSize int64) error
ListObjects(ctx context.Context, id types.ID) ([]Object, error)
ListObjectsRecursive(ctx context.Context, id types.ID) ([]Object, error)
CopyObject(ctx context.Context, src types.ID, dest types.ID) error
StatObject(ctx context.Context, id types.ID) (*Object, error)
RemoveObject(ctx context.Context, id types.ID) error
}

View File

@@ -2,45 +2,19 @@ package s3browser
import (
"context"
"fmt"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/graph-gophers/dataloader"
log "github.com/sirupsen/logrus"
"git.kapelle.org/niklas/s3browser/internal/cache"
"git.kapelle.org/niklas/s3browser/internal/db"
gql "git.kapelle.org/niklas/s3browser/internal/gql"
httpserver "git.kapelle.org/niklas/s3browser/internal/httpserver"
"git.kapelle.org/niklas/s3browser/internal/loader"
"git.kapelle.org/niklas/s3browser/internal/s3"
types "git.kapelle.org/niklas/s3browser/internal/types"
)
var bucketName string
// setupS3Client connect the s3Client
func setupS3Client(config types.AppConfig) (*minio.Client, error) {
minioClient, err := minio.New(config.S3Endoint, &minio.Options{
Creds: credentials.NewStaticV4(config.S3AccessKey, config.S3SecretKey, ""),
Secure: config.S3SSL,
})
if err != nil {
return nil, err
}
exists, err := minioClient.BucketExists(context.Background(), config.S3Bucket)
if err != nil {
return nil, err
}
if !exists {
return nil, fmt.Errorf("Bucket '%s' does not exist", config.S3Bucket)
}
bucketName = config.S3Bucket
return minioClient, nil
}
// Start starts the app
func Start(config types.AppConfig) {
@@ -49,19 +23,25 @@ func Start(config types.AppConfig) {
}
log.Info("Starting")
log.Debug("Setting up s3 client")
s3Client, err := setupS3Client(config)
s3Client, err := s3.NewMinio(config)
if err != nil {
log.Error("Failed to setup s3 client: ", err.Error())
return
}
log.Info("s3 client connected")
log.Debug("Creating dataloader")
loaderMap := createDataloader(config)
dbStore, err := db.NewDB(config.DSN)
if err != nil {
log.Error("Failed to connect DB: ", err.Error())
}
loader := loader.NewLoader(loader.CacheConfig{
ListObjectsLoaderCache: &dataloader.NoCache{},
ListObjectsRecursiveLoaderCache: &dataloader.NoCache{},
StatObjectLoaderCache: cache.NewTTLCache(config.CacheTTL, config.CacheCleanup),
ListBucketsLoaderCache: cache.NewTTLCache(config.CacheTTL, config.CacheCleanup),
})
log.Debug("Generating graphq schema")
gql.GraphqlTypes()
schema, err := gql.GraphqlSchema()
@@ -71,9 +51,9 @@ func Start(config types.AppConfig) {
}
resolveContext := context.WithValue(context.Background(), "s3Client", s3Client)
resolveContext = context.WithValue(resolveContext, "loader", loaderMap)
resolveContext = context.WithValue(resolveContext, "loader", loader)
resolveContext = context.WithValue(resolveContext, "dbStore", dbStore)
log.Debug("Starting HTTP server")
err = httpserver.InitHttp(resolveContext, schema, config.Address)
if err != nil {

114
internal/types/id.go Normal file
View File

@@ -0,0 +1,114 @@
package types
import (
"fmt"
"path/filepath"
"regexp"
"strings"
)
var (
idRegex = regexp.MustCompile(`(.*?)(@(.*))?:(.*)`)
)
// ID an id of a file consists of at least a Bucket and a Key. Version is optional.
// Can also be used as an ID for a directory. When the key ends with "/" it is treated as dir.
type ID struct {
Bucket string `json:"bucket"` // Name of the bucket
Key string `json:"key"` // Key of the object
Version string `json:"version"` // Version of the object. For now we ignore it
}
// String Return String representation of an ID
// Looks like this: "bucketName@version:/id/of/obj" or "bucketName:/id/of/obj"
func (i ID) String() string {
if i.Version == "" {
return fmt.Sprintf("%s:%s", i.Bucket, i.Key)
} else {
return fmt.Sprintf("%s@%s:%s", i.Bucket, i.Version, i.Key)
}
}
// Normalize normalzes the key to have a "/" prefix
func (i *ID) Normalize() {
if i.Key == "." {
i.Key = "/"
} else if !strings.HasPrefix(i.Key, "/") {
i.Key = "/" + i.Key
}
}
// Valid checks if bucket and key is not empty
func (i *ID) Valid() bool {
return i.Bucket != "" && i.Key != ""
}
func (i *ID) IsDirectory() bool {
return strings.HasSuffix(i.Key, "/")
}
// Raw for the Key interface for the dataloaders so ID can be used as a dataloader key
func (i ID) Raw() interface{} {
return i
}
// Parent returns the parent dir ID. If its a file then return containing directory.
// If this is a directory then return the dir one up.
func (i ID) Parent() *ID {
if i.Key == "/" {
// Already at root. We dont have a parent
return nil
}
var parent *ID
if i.IsDirectory() {
parts := strings.Split(i.Key, "/")
parent = &ID{
Bucket: i.Bucket,
Key: strings.Join(parts[:len(parts)-2], "/") + "/",
}
} else {
dir := filepath.Dir(i.Key)
if dir != "/" {
dir += "/"
}
parent = &ID{
Bucket: i.Bucket,
Key: dir,
}
}
parent.Normalize()
return parent
}
// Name returns filename or directory name
func (i ID) Name() string {
return filepath.Base(i.Key)
}
// ParseID parses a string to an ID. Null if invalid
func ParseID(id string) *ID {
match := idRegex.FindStringSubmatch(id)
if match == nil {
return nil
}
rtn := &ID{
Bucket: match[1],
Version: match[3],
Key: match[4],
}
if !rtn.Valid() {
return nil
}
rtn.Normalize()
return rtn
}

109
internal/types/id_test.go Normal file
View File

@@ -0,0 +1,109 @@
package types_test
import (
"testing"
"git.kapelle.org/niklas/s3browser/internal/types"
"github.com/stretchr/testify/assert"
)
// TODO: test version component (not yet used in code)
func TestIDParse(t *testing.T) {
assert := assert.New(t)
id := types.ParseID("test:/path/key")
assert.NotNil(id)
assert.True(id.Valid())
assert.Equal("test", id.Bucket)
assert.Equal("/path/key", id.Key)
assert.False(id.IsDirectory())
assert.Equal("key", id.Name())
assert.Equal("test:/path/key", id.String())
}
func TestIDParseInvalid(t *testing.T) {
assert := assert.New(t)
assert.Nil(types.ParseID("/asd/ad"))
assert.Nil(types.ParseID("test"))
assert.Nil(types.ParseID("test:"))
assert.Nil(types.ParseID(""))
assert.Nil(types.ParseID("/"))
}
func TestIDIsDir(t *testing.T) {
assert := assert.New(t)
idFile := types.ParseID("test:/path/key")
assert.NotNil(idFile)
assert.False(idFile.IsDirectory())
idDir := types.ParseID("test:/path/key/")
assert.NotNil(idDir)
assert.True(idDir.IsDirectory())
}
func TestIDRoot(t *testing.T) {
assert := assert.New(t)
id := types.ParseID("test:/")
assert.NotNil(id)
assert.True(id.Valid())
assert.Equal("test", id.Bucket)
assert.Equal("/", id.Key)
assert.True(id.IsDirectory())
assert.Equal("/", id.Name())
assert.Equal("test:/", id.String())
assert.Nil(id.Parent())
}
func TestIDParentFromFile(t *testing.T) {
assert := assert.New(t)
id := types.ParseID("test:/path1/path2/key")
assert.NotNil(id)
parent := id.Parent()
assert.NotNil(parent)
assert.True(parent.Valid())
assert.Equal("test", parent.Bucket)
assert.Equal("/path1/path2/", parent.Key)
assert.True(parent.IsDirectory())
assert.Equal("path2", parent.Name())
assert.Equal("test:/path1/path2/", parent.String())
}
func TestIDParentFromDir(t *testing.T) {
assert := assert.New(t)
id := types.ParseID("test:/path1/path2/")
assert.NotNil(id)
parent := id.Parent()
assert.NotNil(parent)
assert.True(parent.Valid())
assert.Equal("test", parent.Bucket)
assert.Equal("/path1/", parent.Key)
assert.True(parent.IsDirectory())
assert.Equal("path1", parent.Name())
assert.Equal("test:/path1/", parent.String())
}
func TestIDParentRoot(t *testing.T) {
assert := assert.New(t)
id := types.ParseID("test:/key1")
parent := id.Parent()
assert.NotNil(parent)
assert.Equal("/", parent.Key)
}

View File

@@ -12,7 +12,7 @@ type AppConfig struct {
S3AccessKey string
S3SecretKey string
S3SSL bool
S3Bucket string
DSN string
CacheTTL time.Duration
CacheCleanup time.Duration
Address string
@@ -21,7 +21,7 @@ type AppConfig struct {
// File represents a file with its metadata
type File struct {
ID string `json:"id"`
ID ID `json:"id"`
Name string `json:"name"`
Size int64 `json:"size"`
ContentType string `json:"contentType"`
@@ -31,7 +31,7 @@ type File struct {
// Directory represents a directory with its metadata
type Directory struct {
ID string `json:"id"`
ID ID `json:"id"`
Name string `json:"name"`
Files []File `json:"files"`
Directorys []Directory `json:"directorys"`