the big refactor
This commit is contained in:
parent
a3e66cd351
commit
ed932e3c92
@ -4,6 +4,7 @@ import (
|
||||
"time"
|
||||
|
||||
s3browser "git.kapelle.org/niklas/s3browser/internal"
|
||||
types "git.kapelle.org/niklas/s3browser/internal/types"
|
||||
"github.com/alexflint/go-arg"
|
||||
)
|
||||
|
||||
@ -28,7 +29,7 @@ func main() {
|
||||
var args args
|
||||
arg.MustParse(&args)
|
||||
|
||||
s3browser.Start(s3browser.AppConfig{
|
||||
s3browser.Start(types.AppConfig{
|
||||
S3Endoint: args.S3Endpoint,
|
||||
S3SSL: !args.S3DisableSSL,
|
||||
S3AccessKey: args.S3AccessKey,
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
types "git.kapelle.org/niklas/s3browser/internal/types"
|
||||
"github.com/graph-gophers/dataloader"
|
||||
"github.com/minio/minio-go/v7"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@ -81,7 +82,7 @@ func getFilesBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
||||
|
||||
for _, v := range k {
|
||||
path := v.String()
|
||||
files := make([]File, 0)
|
||||
files := make([]types.File, 0)
|
||||
|
||||
if !strings.HasSuffix(path, "/") {
|
||||
path += "/"
|
||||
@ -97,7 +98,7 @@ func getFilesBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
||||
if obj.Err != nil {
|
||||
// TODO: how to handle?
|
||||
} else if !strings.HasSuffix(obj.Key, "/") {
|
||||
files = append(files, File{
|
||||
files = append(files, types.File{
|
||||
ID: obj.Key,
|
||||
Name: filepath.Base(obj.Key),
|
||||
Size: obj.Size,
|
||||
@ -139,7 +140,7 @@ func getFileBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
||||
})
|
||||
} else {
|
||||
results = append(results, &dataloader.Result{
|
||||
Data: &File{
|
||||
Data: &types.File{
|
||||
ID: obj.Key,
|
||||
Size: obj.Size,
|
||||
ContentType: obj.ContentType,
|
||||
@ -166,7 +167,7 @@ func getDirsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
||||
|
||||
for _, v := range k {
|
||||
path := v.String()
|
||||
dirs := make([]Directory, 0)
|
||||
dirs := make([]types.Directory, 0)
|
||||
|
||||
if !strings.HasSuffix(path, "/") {
|
||||
path += "/"
|
||||
@ -182,7 +183,7 @@ func getDirsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
||||
if obj.Err != nil {
|
||||
// TODO: how to handle?
|
||||
} else if strings.HasSuffix(obj.Key, "/") {
|
||||
dirs = append(dirs, Directory{
|
||||
dirs = append(dirs, types.Directory{
|
||||
ID: obj.Key,
|
||||
Name: filepath.Base(obj.Key),
|
||||
})
|
||||
@ -213,7 +214,7 @@ func handleLoaderError(k dataloader.Keys, err error) []*dataloader.Result {
|
||||
}
|
||||
|
||||
// createDataloader create all dataloaders and return a map of them plus a cache for objects
|
||||
func createDataloader(config AppConfig) map[string]*dataloader.Loader {
|
||||
func createDataloader(config types.AppConfig) map[string]*dataloader.Loader {
|
||||
loaderMap := make(map[string]*dataloader.Loader, 0)
|
||||
|
||||
loaderMap["getFiles"] = dataloader.NewBatchedLoader(
|
||||
|
@ -1,25 +0,0 @@
|
||||
package s3browser
|
||||
|
||||
import "fmt"
|
||||
|
||||
type extendedError struct {
|
||||
Message string
|
||||
Code string
|
||||
}
|
||||
|
||||
func (err *extendedError) Error() string {
|
||||
return err.Message
|
||||
}
|
||||
|
||||
func (err *extendedError) Extensions() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"code": err.Code,
|
||||
}
|
||||
}
|
||||
|
||||
func extendError(code, format string, a ...interface{}) *extendedError {
|
||||
return &extendedError{
|
||||
Message: fmt.Sprintf(format, a...),
|
||||
Code: code,
|
||||
}
|
||||
}
|
25
internal/errors/errors.go
Normal file
25
internal/errors/errors.go
Normal file
@ -0,0 +1,25 @@
|
||||
package s3browser
|
||||
|
||||
import "fmt"
|
||||
|
||||
type ExtendedError struct {
|
||||
Message string
|
||||
Code string
|
||||
}
|
||||
|
||||
func (err *ExtendedError) Error() string {
|
||||
return err.Message
|
||||
}
|
||||
|
||||
func (err *ExtendedError) Extensions() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"code": err.Code,
|
||||
}
|
||||
}
|
||||
|
||||
func ExtendError(code, format string, a ...interface{}) *ExtendedError {
|
||||
return &ExtendedError{
|
||||
Message: fmt.Sprintf(format, a...),
|
||||
Code: code,
|
||||
}
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package s3browser
|
||||
package gql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -8,6 +8,9 @@ import (
|
||||
"github.com/graph-gophers/dataloader"
|
||||
"github.com/graphql-go/graphql"
|
||||
"github.com/graphql-go/graphql/language/ast"
|
||||
|
||||
helper "git.kapelle.org/niklas/s3browser/internal/helper"
|
||||
types "git.kapelle.org/niklas/s3browser/internal/types"
|
||||
)
|
||||
|
||||
type LoginResult struct {
|
||||
@ -19,8 +22,8 @@ var graphqlDirType *graphql.Object
|
||||
var graphqlFileType *graphql.Object
|
||||
var graphqlLoginResultType *graphql.Object
|
||||
|
||||
// graphqlTypes create all graphql types and stores the in the global variables
|
||||
func graphqlTypes() {
|
||||
//GraphqlTypes create all graphql types and stores the in the global variables
|
||||
func GraphqlTypes() {
|
||||
|
||||
var dateTimeType = graphql.NewScalar(graphql.ScalarConfig{
|
||||
Name: "DateTime",
|
||||
@ -63,7 +66,7 @@ func graphqlTypes() {
|
||||
"name": &graphql.Field{
|
||||
Type: graphql.String,
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
source, ok := p.Source.(Directory)
|
||||
source, ok := p.Source.(types.Directory)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Failed to parse source for resolve")
|
||||
}
|
||||
@ -85,7 +88,7 @@ func graphqlTypes() {
|
||||
"name": &graphql.Field{
|
||||
Type: graphql.String,
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
source, ok := p.Source.(File)
|
||||
source, ok := p.Source.(types.File)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Failed to parse source for resolve")
|
||||
}
|
||||
@ -140,14 +143,14 @@ func graphqlTypes() {
|
||||
"parent": &graphql.Field{
|
||||
Type: graphqlDirType,
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
source, ok := p.Source.(File)
|
||||
source, ok := p.Source.(types.File)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Failed to parse Source for parent resolve")
|
||||
}
|
||||
|
||||
basename := getPathFromId(source.ID)
|
||||
basename := helper.GetPathFromId(source.ID)
|
||||
|
||||
return Directory{
|
||||
return types.Directory{
|
||||
ID: basename,
|
||||
}, nil
|
||||
},
|
||||
@ -158,14 +161,14 @@ func graphqlTypes() {
|
||||
graphqlDirType.AddFieldConfig("files", &graphql.Field{
|
||||
Type: graphql.NewList(graphqlFileType),
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
source, ok := p.Source.(Directory)
|
||||
source, ok := p.Source.(types.Directory)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Failed to parse Source for files resolve")
|
||||
}
|
||||
|
||||
loader := p.Context.Value("loader").(map[string]*dataloader.Loader)
|
||||
|
||||
thunk := loader["getFiles"].Load(p.Context, dataloader.StringKey(nomalizeID(source.ID)))
|
||||
thunk := loader["getFiles"].Load(p.Context, dataloader.StringKey(helper.NomalizeID(source.ID)))
|
||||
return thunk()
|
||||
},
|
||||
})
|
||||
@ -173,13 +176,13 @@ func graphqlTypes() {
|
||||
graphqlDirType.AddFieldConfig("directorys", &graphql.Field{
|
||||
Type: graphql.NewList(graphqlDirType),
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
source, ok := p.Source.(Directory)
|
||||
source, ok := p.Source.(types.Directory)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Failed to parse Source for directories resolve")
|
||||
}
|
||||
|
||||
loader := p.Context.Value("loader").(map[string]*dataloader.Loader)
|
||||
thunk := loader["getDirs"].Load(p.Context, dataloader.StringKey(nomalizeID(source.ID)))
|
||||
thunk := loader["getDirs"].Load(p.Context, dataloader.StringKey(helper.NomalizeID(source.ID)))
|
||||
|
||||
return thunk()
|
||||
},
|
||||
@ -188,13 +191,13 @@ func graphqlTypes() {
|
||||
graphqlDirType.AddFieldConfig("parent", &graphql.Field{
|
||||
Type: graphqlDirType,
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
source, ok := p.Source.(Directory)
|
||||
source, ok := p.Source.(types.Directory)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Failed to parse Source for directories resolve")
|
||||
}
|
||||
|
||||
return Directory{
|
||||
ID: getParentDir(source.ID),
|
||||
return types.Directory{
|
||||
ID: helper.GetParentDir(source.ID),
|
||||
}, nil
|
||||
},
|
||||
})
|
||||
@ -216,23 +219,23 @@ func graphqlTypes() {
|
||||
|
||||
}
|
||||
|
||||
// graphqlTypes helper func for using the dataloader to get a file
|
||||
func loadFile(p graphql.ResolveParams) (*File, error) {
|
||||
source, ok := p.Source.(File)
|
||||
//loadFile helper func for using the dataloader to get a file
|
||||
func loadFile(p graphql.ResolveParams) (*types.File, error) {
|
||||
source, ok := p.Source.(types.File)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Failed to parse source for resolve")
|
||||
}
|
||||
|
||||
loader := p.Context.Value("loader").(map[string]*dataloader.Loader)
|
||||
|
||||
thunk := loader["getFile"].Load(p.Context, dataloader.StringKey(nomalizeID(source.ID)))
|
||||
thunk := loader["getFile"].Load(p.Context, dataloader.StringKey(helper.NomalizeID(source.ID)))
|
||||
result, err := thunk()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
file, ok := result.(*File)
|
||||
file, ok := result.(*types.File)
|
||||
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Failed to load file")
|
@ -1,4 +1,4 @@
|
||||
package s3browser
|
||||
package gql
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -7,6 +7,9 @@ import (
|
||||
|
||||
"github.com/graph-gophers/dataloader"
|
||||
"github.com/minio/minio-go/v7"
|
||||
|
||||
helper "git.kapelle.org/niklas/s3browser/internal/helper"
|
||||
types "git.kapelle.org/niklas/s3browser/internal/types"
|
||||
)
|
||||
|
||||
func deleteMutation(ctx context.Context, id string) error {
|
||||
@ -18,17 +21,17 @@ func deleteMutation(ctx context.Context, id string) error {
|
||||
|
||||
// TODO: it is posible to remove multiple objects with a single call.
|
||||
// Is it better to batch this?
|
||||
err := s3Client.RemoveObject(ctx, bucketName, id, minio.RemoveObjectOptions{})
|
||||
err := s3Client.RemoveObject(ctx, "dev", id, minio.RemoveObjectOptions{})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Invalidate cache
|
||||
return invalidateCache(ctx, nomalizeID(id))
|
||||
return helper.InvalidateCache(ctx, helper.NomalizeID(id))
|
||||
}
|
||||
|
||||
func copyMutation(ctx context.Context, src, dest string) (*File, error) {
|
||||
func copyMutation(ctx context.Context, src, dest string) (*types.File, error) {
|
||||
s3Client, ok := ctx.Value("s3Client").(*minio.Client)
|
||||
|
||||
if !ok {
|
||||
@ -39,14 +42,14 @@ func copyMutation(ctx context.Context, src, dest string) (*File, error) {
|
||||
if strings.HasSuffix(dest, "/") {
|
||||
// create new dest id
|
||||
// TODO: What if a file with this id already exists?
|
||||
dest += getFilenameFromID(src)
|
||||
dest += helper.GetFilenameFromID(src)
|
||||
}
|
||||
|
||||
info, err := s3Client.CopyObject(ctx, minio.CopyDestOptions{
|
||||
Bucket: bucketName,
|
||||
Bucket: "dev",
|
||||
Object: dest,
|
||||
}, minio.CopySrcOptions{
|
||||
Bucket: bucketName,
|
||||
Bucket: "dev",
|
||||
Object: src,
|
||||
})
|
||||
|
||||
@ -56,15 +59,15 @@ func copyMutation(ctx context.Context, src, dest string) (*File, error) {
|
||||
|
||||
// Invalidate cache
|
||||
// TODO: check error
|
||||
invalidateCache(ctx, nomalizeID(info.Key))
|
||||
helper.InvalidateCache(ctx, helper.NomalizeID(info.Key))
|
||||
|
||||
return &File{
|
||||
return &types.File{
|
||||
ID: info.Key,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
func moveMutation(ctx context.Context, src, dest string) (*File, error) {
|
||||
func moveMutation(ctx context.Context, src, dest string) (*types.File, error) {
|
||||
s3Client, ok := ctx.Value("s3Client").(*minio.Client)
|
||||
|
||||
if !ok {
|
||||
@ -75,15 +78,15 @@ func moveMutation(ctx context.Context, src, dest string) (*File, error) {
|
||||
if strings.HasSuffix(dest, "/") {
|
||||
// create new dest id
|
||||
// TODO: What if a file with this id already exists?
|
||||
dest += getFilenameFromID(src)
|
||||
dest += helper.GetFilenameFromID(src)
|
||||
}
|
||||
|
||||
// There is no (spoon) move. Only copy and delete
|
||||
info, err := s3Client.CopyObject(ctx, minio.CopyDestOptions{
|
||||
Bucket: bucketName,
|
||||
Bucket: "dev",
|
||||
Object: dest,
|
||||
}, minio.CopySrcOptions{
|
||||
Bucket: bucketName,
|
||||
Bucket: "dev",
|
||||
Object: src,
|
||||
})
|
||||
|
||||
@ -97,15 +100,15 @@ func moveMutation(ctx context.Context, src, dest string) (*File, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
invalidateCache(ctx, nomalizeID(info.Key))
|
||||
helper.InvalidateCache(ctx, helper.NomalizeID(info.Key))
|
||||
|
||||
return &File{
|
||||
return &types.File{
|
||||
ID: info.Key,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
func createDirectory(ctx context.Context, path string) (*Directory, error) {
|
||||
func createDirectory(ctx context.Context, path string) (*types.Directory, error) {
|
||||
s3Client, ok := ctx.Value("s3Client").(*minio.Client)
|
||||
|
||||
if !ok {
|
||||
@ -116,7 +119,7 @@ func createDirectory(ctx context.Context, path string) (*Directory, error) {
|
||||
path += "/"
|
||||
}
|
||||
|
||||
info, err := s3Client.PutObject(ctx, bucketName, path, strings.NewReader(""), 0, minio.PutObjectOptions{
|
||||
info, err := s3Client.PutObject(ctx, "dev", path, strings.NewReader(""), 0, minio.PutObjectOptions{
|
||||
ContentType: "application/x-directory",
|
||||
})
|
||||
|
||||
@ -126,9 +129,9 @@ func createDirectory(ctx context.Context, path string) (*Directory, error) {
|
||||
|
||||
// Invalidate cache
|
||||
// TODO: check error
|
||||
invalidateCacheForDir(ctx, nomalizeID(info.Key))
|
||||
helper.InvalidateCacheForDir(ctx, helper.NomalizeID(info.Key))
|
||||
|
||||
return &Directory{
|
||||
return &types.Directory{
|
||||
ID: info.Key,
|
||||
}, nil
|
||||
|
||||
@ -152,7 +155,7 @@ func deleteDirectory(ctx context.Context, path string) error {
|
||||
}
|
||||
|
||||
// Get all files inside the directory
|
||||
thunk := loader["listObjectsRecursive"].Load(ctx, dataloader.StringKey(nomalizeID(path)))
|
||||
thunk := loader["listObjectsRecursive"].Load(ctx, dataloader.StringKey(helper.NomalizeID(path)))
|
||||
|
||||
result, err := thunk()
|
||||
|
||||
@ -166,7 +169,7 @@ func deleteDirectory(ctx context.Context, path string) error {
|
||||
}
|
||||
|
||||
// Delete all child files
|
||||
err = deleteMultiple(ctx, *s3Client, files)
|
||||
err = helper.DeleteMultiple(ctx, *s3Client, files)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
@ -178,14 +181,14 @@ func deleteDirectory(ctx context.Context, path string) error {
|
||||
// This is at least the behavior when working with minio as s3 backend
|
||||
// TODO: check if this is normal behavior when working with s3
|
||||
if len(files) == 0 {
|
||||
err := s3Client.RemoveObject(ctx, bucketName, path, minio.RemoveObjectOptions{})
|
||||
err := s3Client.RemoveObject(ctx, "dev", path, minio.RemoveObjectOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
//Invalidate cache
|
||||
invalidateCacheForDir(ctx, nomalizeID(path))
|
||||
helper.InvalidateCacheForDir(ctx, helper.NomalizeID(path))
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -200,7 +203,7 @@ func login(ctx context.Context, username, password string) (LoginResult, error)
|
||||
}, nil
|
||||
}
|
||||
|
||||
token := createJWT(createClaims(username))
|
||||
token := helper.CreateJWT(helper.CreateClaims(username))
|
||||
|
||||
tokenString, err := token.SignedString([]byte("TODO"))
|
||||
|
@ -1,4 +1,4 @@
|
||||
package s3browser
|
||||
package gql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -6,11 +6,13 @@ import (
|
||||
"github.com/graph-gophers/dataloader"
|
||||
"github.com/graphql-go/graphql"
|
||||
|
||||
helper "git.kapelle.org/niklas/s3browser/internal/helper"
|
||||
types "git.kapelle.org/niklas/s3browser/internal/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// graphqlSchema generate the schema with its root query and mutation
|
||||
func graphqlSchema() (graphql.Schema, error) {
|
||||
//GraphqlSchema generate the schema with its root query and mutation
|
||||
func GraphqlSchema() (graphql.Schema, error) {
|
||||
|
||||
queryFields := graphql.Fields{
|
||||
"files": &graphql.Field{
|
||||
@ -21,7 +23,7 @@ func graphqlSchema() (graphql.Schema, error) {
|
||||
},
|
||||
},
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
if is, err := isAuth(p.Context); !is {
|
||||
if is, err := helper.IsAuth(p.Context); !is {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -46,7 +48,7 @@ func graphqlSchema() (graphql.Schema, error) {
|
||||
},
|
||||
},
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
if is, err := isAuth(p.Context); !is {
|
||||
if is, err := helper.IsAuth(p.Context); !is {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -71,7 +73,7 @@ func graphqlSchema() (graphql.Schema, error) {
|
||||
},
|
||||
},
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
if is, err := isAuth(p.Context); !is {
|
||||
if is, err := helper.IsAuth(p.Context); !is {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -82,7 +84,7 @@ func graphqlSchema() (graphql.Schema, error) {
|
||||
|
||||
log.Debug("querry 'file': ", id)
|
||||
|
||||
return File{
|
||||
return types.File{
|
||||
ID: id,
|
||||
}, nil
|
||||
},
|
||||
@ -92,7 +94,7 @@ func graphqlSchema() (graphql.Schema, error) {
|
||||
Type: graphql.NewNonNull(graphql.Boolean),
|
||||
Description: "True if the user is authorized",
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
auth, _ := isAuth(p.Context)
|
||||
auth, _ := helper.IsAuth(p.Context)
|
||||
|
||||
return auth, nil
|
||||
},
|
||||
@ -108,7 +110,7 @@ func graphqlSchema() (graphql.Schema, error) {
|
||||
},
|
||||
},
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
if is, err := isAuth(p.Context); !is {
|
||||
if is, err := helper.IsAuth(p.Context); !is {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -133,7 +135,7 @@ func graphqlSchema() (graphql.Schema, error) {
|
||||
},
|
||||
},
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
if is, err := isAuth(p.Context); !is {
|
||||
if is, err := helper.IsAuth(p.Context); !is {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -162,7 +164,7 @@ func graphqlSchema() (graphql.Schema, error) {
|
||||
},
|
||||
},
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
if is, err := isAuth(p.Context); !is {
|
||||
if is, err := helper.IsAuth(p.Context); !is {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -188,7 +190,7 @@ func graphqlSchema() (graphql.Schema, error) {
|
||||
},
|
||||
},
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
if is, err := isAuth(p.Context); !is {
|
||||
if is, err := helper.IsAuth(p.Context); !is {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -210,7 +212,7 @@ func graphqlSchema() (graphql.Schema, error) {
|
||||
},
|
||||
},
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
if is, err := isAuth(p.Context); !is {
|
||||
if is, err := helper.IsAuth(p.Context); !is {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
package s3browser
|
||||
package helper
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -11,9 +11,12 @@ import (
|
||||
"github.com/graph-gophers/dataloader"
|
||||
"github.com/minio/minio-go/v7"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
errors "git.kapelle.org/niklas/s3browser/internal/errors"
|
||||
types "git.kapelle.org/niklas/s3browser/internal/types"
|
||||
)
|
||||
|
||||
func invalidateCache(ctx context.Context, id string) error {
|
||||
func InvalidateCache(ctx context.Context, id string) error {
|
||||
loader, ok := ctx.Value("loader").(map[string]*dataloader.Loader)
|
||||
if !ok {
|
||||
return fmt.Errorf("Failed to get loader from context")
|
||||
@ -21,7 +24,7 @@ func invalidateCache(ctx context.Context, id string) error {
|
||||
|
||||
log.Debug("Invalidate cache for id: ", id)
|
||||
|
||||
path := getPathFromId(id)
|
||||
path := GetPathFromId(id)
|
||||
|
||||
loader["getFile"].Clear(ctx, dataloader.StringKey(id))
|
||||
loader["getFiles"].Clear(ctx, dataloader.StringKey(path))
|
||||
@ -30,21 +33,21 @@ func invalidateCache(ctx context.Context, id string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPathFromId(id string) string {
|
||||
func GetPathFromId(id string) string {
|
||||
dir := filepath.Dir(id)
|
||||
|
||||
if dir == "." {
|
||||
return "/"
|
||||
}
|
||||
|
||||
return nomalizeID(dir + "/")
|
||||
return NomalizeID(dir + "/")
|
||||
}
|
||||
|
||||
func getFilenameFromID(id string) string {
|
||||
func GetFilenameFromID(id string) string {
|
||||
return filepath.Base(id)
|
||||
}
|
||||
|
||||
func invalidateCacheForDir(ctx context.Context, path string) error {
|
||||
func InvalidateCacheForDir(ctx context.Context, path string) error {
|
||||
loader, ok := ctx.Value("loader").(map[string]*dataloader.Loader)
|
||||
if !ok {
|
||||
return fmt.Errorf("Failed to get loader from context")
|
||||
@ -52,7 +55,7 @@ func invalidateCacheForDir(ctx context.Context, path string) error {
|
||||
|
||||
log.Debug("Invalidate cache for dir: ", path)
|
||||
|
||||
parent := getParentDir(path)
|
||||
parent := GetParentDir(path)
|
||||
|
||||
log.Debug("Cache clear dir: ", path, " parent: ", parent)
|
||||
|
||||
@ -67,7 +70,7 @@ func invalidateCacheForDir(ctx context.Context, path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteMultiple(ctx context.Context, s3Client minio.Client, ids []minio.ObjectInfo) error {
|
||||
func DeleteMultiple(ctx context.Context, s3Client minio.Client, ids []minio.ObjectInfo) error {
|
||||
log.Debug("Delte multiple")
|
||||
objectsCh := make(chan minio.ObjectInfo, 1)
|
||||
|
||||
@ -78,7 +81,7 @@ func deleteMultiple(ctx context.Context, s3Client minio.Client, ids []minio.Obje
|
||||
}
|
||||
}()
|
||||
|
||||
for err := range s3Client.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{}) {
|
||||
for err := range s3Client.RemoveObjects(ctx, "dev", objectsCh, minio.RemoveObjectsOptions{}) {
|
||||
log.Error("Failed to delete object ", err.ObjectName, " because: ", err.Err.Error())
|
||||
// TODO: error handel
|
||||
}
|
||||
@ -86,8 +89,8 @@ func deleteMultiple(ctx context.Context, s3Client minio.Client, ids []minio.Obje
|
||||
return nil
|
||||
}
|
||||
|
||||
// nomalizeID makes sure there is a leading "/" in the id
|
||||
func nomalizeID(id string) string {
|
||||
// NomalizeID makes sure there is a leading "/" in the id
|
||||
func NomalizeID(id string) string {
|
||||
if !strings.HasPrefix(id, "/") {
|
||||
if id == "." {
|
||||
return "/"
|
||||
@ -98,7 +101,7 @@ func nomalizeID(id string) string {
|
||||
return id
|
||||
}
|
||||
|
||||
func getParentDir(id string) string {
|
||||
func GetParentDir(id string) string {
|
||||
dirs := strings.Split(id, "/")
|
||||
|
||||
cut := 1
|
||||
@ -108,32 +111,32 @@ func getParentDir(id string) string {
|
||||
|
||||
parent := strings.Join(dirs[:len(dirs)-cut], "/") + "/"
|
||||
|
||||
return nomalizeID(parent)
|
||||
return NomalizeID(parent)
|
||||
}
|
||||
|
||||
func isAuth(ctx context.Context) (bool, error) {
|
||||
func IsAuth(ctx context.Context) (bool, error) {
|
||||
token, ok := ctx.Value("jwt").(*jwt.Token)
|
||||
|
||||
if !ok {
|
||||
return false, extendError("UNAUTHORIZED", "Unauthorized")
|
||||
return false, errors.ExtendError("UNAUTHORIZED", "Unauthorized")
|
||||
}
|
||||
|
||||
if token.Valid {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, extendError("UNAUTHORIZED", "Unauthorized")
|
||||
return false, errors.ExtendError("UNAUTHORIZED", "Unauthorized")
|
||||
}
|
||||
}
|
||||
|
||||
func createJWT(claims *JWTClaims) *jwt.Token {
|
||||
func CreateJWT(claims *types.JWTClaims) *jwt.Token {
|
||||
|
||||
claims.ExpiresAt = time.Now().Add(time.Hour * 24).Unix()
|
||||
|
||||
return jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
|
||||
}
|
||||
|
||||
func createClaims(username string) *JWTClaims {
|
||||
return &JWTClaims{
|
||||
func CreateClaims(username string) *types.JWTClaims {
|
||||
return &types.JWTClaims{
|
||||
StandardClaims: jwt.StandardClaims{
|
||||
Subject: username,
|
||||
},
|
@ -1,7 +1,7 @@
|
||||
//go:build !prod
|
||||
// +build !prod
|
||||
|
||||
package s3browser
|
||||
package httpserver
|
||||
|
||||
import "github.com/gorilla/mux"
|
||||
|
@ -1,4 +1,4 @@
|
||||
package s3browser
|
||||
package httpserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -18,17 +18,16 @@ import (
|
||||
"github.com/minio/minio-go/v7"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
helper "git.kapelle.org/niklas/s3browser/internal/helper"
|
||||
types "git.kapelle.org/niklas/s3browser/internal/types"
|
||||
)
|
||||
|
||||
type JWTClaims struct {
|
||||
jwt.StandardClaims
|
||||
}
|
||||
|
||||
type CookieExtractor struct {
|
||||
type cookieExtractor struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
func (c *CookieExtractor) ExtractToken(req *http.Request) (string, error) {
|
||||
func (c *cookieExtractor) ExtractToken(req *http.Request) (string, error) {
|
||||
cookie, err := req.Cookie(c.Name)
|
||||
|
||||
if err == nil && len(cookie.Value) != 0 {
|
||||
@ -36,11 +35,10 @@ func (c *CookieExtractor) ExtractToken(req *http.Request) (string, error) {
|
||||
}
|
||||
|
||||
return "", jwtRequest.ErrNoTokenInRequest
|
||||
|
||||
}
|
||||
|
||||
// initHttp setup and start the http server. Blocking
|
||||
func initHttp(resolveContext context.Context, schema graphql.Schema, address string) error {
|
||||
// InitHttp setup and start the http server. Blocking
|
||||
func InitHttp(resolveContext context.Context, schema graphql.Schema, address string) error {
|
||||
r := mux.NewRouter()
|
||||
|
||||
gqlHandler := handler.New(&handler.Config{
|
||||
@ -64,8 +62,8 @@ func initHttp(resolveContext context.Context, schema graphql.Schema, address str
|
||||
|
||||
parsedToken, err := jwtRequest.ParseFromRequestWithClaims(r, jwtRequest.MultiExtractor{
|
||||
jwtRequest.AuthorizationHeaderExtractor,
|
||||
&CookieExtractor{Name: "jwt"},
|
||||
}, &JWTClaims{}, jwtKeyFunc)
|
||||
&cookieExtractor{Name: "jwt"},
|
||||
}, &types.JWTClaims{}, jwtKeyFunc)
|
||||
|
||||
if err == nil && parsedToken.Valid {
|
||||
newRequest := r.WithContext(context.WithValue(r.Context(), "jwt", parsedToken))
|
||||
@ -103,7 +101,7 @@ func initHttp(resolveContext context.Context, schema graphql.Schema, address str
|
||||
}
|
||||
|
||||
func httpGetFile(ctx context.Context, rw http.ResponseWriter, r *http.Request) {
|
||||
if is, _ := isAuth(r.Context()); !is {
|
||||
if is, _ := helper.IsAuth(r.Context()); !is {
|
||||
rw.WriteHeader(http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
@ -112,7 +110,7 @@ func httpGetFile(ctx context.Context, rw http.ResponseWriter, r *http.Request) {
|
||||
id := r.URL.Query().Get("id")
|
||||
|
||||
log.Debug("S3 call 'StatObject': ", id)
|
||||
objInfo, err := s3Client.StatObject(context.Background(), bucketName, id, minio.GetObjectOptions{})
|
||||
objInfo, err := s3Client.StatObject(context.Background(), "dev", id, minio.GetObjectOptions{})
|
||||
|
||||
if err != nil {
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
@ -126,7 +124,7 @@ func httpGetFile(ctx context.Context, rw http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
log.Debug("S3 call 'GetObject': ", id)
|
||||
obj, err := s3Client.GetObject(context.Background(), bucketName, id, minio.GetObjectOptions{})
|
||||
obj, err := s3Client.GetObject(context.Background(), "dev", id, minio.GetObjectOptions{})
|
||||
|
||||
if err != nil {
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
@ -147,7 +145,7 @@ func httpGetFile(ctx context.Context, rw http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func httpPostFile(ctx context.Context, rw http.ResponseWriter, r *http.Request) {
|
||||
if is, _ := isAuth(r.Context()); !is {
|
||||
if is, _ := helper.IsAuth(r.Context()); !is {
|
||||
rw.WriteHeader(http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
@ -162,7 +160,7 @@ func httpPostFile(ctx context.Context, rw http.ResponseWriter, r *http.Request)
|
||||
mimeType, _, _ := mime.ParseMediaType(contentType)
|
||||
|
||||
log.Debug("S3 call 'PutObject': ", id)
|
||||
info, err := s3Client.PutObject(context.Background(), bucketName, id, r.Body, r.ContentLength, minio.PutObjectOptions{
|
||||
info, err := s3Client.PutObject(context.Background(), "dev", id, r.Body, r.ContentLength, minio.PutObjectOptions{
|
||||
ContentType: mimeType,
|
||||
})
|
||||
|
||||
@ -172,7 +170,7 @@ func httpPostFile(ctx context.Context, rw http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
// Invalidate cache
|
||||
invalidateCache(ctx, info.Key)
|
||||
helper.InvalidateCache(ctx, info.Key)
|
||||
|
||||
rw.WriteHeader(http.StatusCreated)
|
||||
}
|
||||
@ -196,7 +194,7 @@ func setLoginCookie(rw http.ResponseWriter, r *http.Request) {
|
||||
|
||||
tokenString := string(body)
|
||||
|
||||
token, err := jwt.ParseWithClaims(tokenString, &JWTClaims{}, jwtKeyFunc)
|
||||
token, err := jwt.ParseWithClaims(tokenString, &types.JWTClaims{}, jwtKeyFunc)
|
||||
|
||||
if err != nil {
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
@ -208,7 +206,7 @@ func setLoginCookie(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
claims, ok := token.Claims.(*JWTClaims)
|
||||
claims, ok := token.Claims.(*types.JWTClaims)
|
||||
|
||||
if !ok {
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
@ -246,7 +244,7 @@ func logout(rw http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func refreshToken(rw http.ResponseWriter, r *http.Request) {
|
||||
if is, _ := isAuth(r.Context()); !is {
|
||||
if is, _ := helper.IsAuth(r.Context()); !is {
|
||||
rw.WriteHeader(http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
@ -258,14 +256,14 @@ func refreshToken(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
claims, ok := oldToken.Claims.(*JWTClaims)
|
||||
claims, ok := oldToken.Claims.(*types.JWTClaims)
|
||||
|
||||
if !ok {
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
token := createJWT(claims)
|
||||
token := helper.CreateJWT(claims)
|
||||
|
||||
tokenString, err := token.SignedString([]byte("TODO"))
|
||||
|
@ -1,7 +1,7 @@
|
||||
//go:build prod
|
||||
// +build prod
|
||||
|
||||
package s3browser
|
||||
package httpserver
|
||||
|
||||
import (
|
||||
"embed"
|
@ -3,48 +3,20 @@ package s3browser
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
gql "git.kapelle.org/niklas/s3browser/internal/gql"
|
||||
httpserver "git.kapelle.org/niklas/s3browser/internal/httpserver"
|
||||
types "git.kapelle.org/niklas/s3browser/internal/types"
|
||||
)
|
||||
|
||||
// AppConfig general config
|
||||
type AppConfig struct {
|
||||
S3Endoint string
|
||||
S3AccessKey string
|
||||
S3SecretKey string
|
||||
S3SSL bool
|
||||
S3Bucket string
|
||||
CacheTTL time.Duration
|
||||
CacheCleanup time.Duration
|
||||
Address string
|
||||
LogDebug bool
|
||||
}
|
||||
|
||||
// File represents a file with its metadata
|
||||
type File struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
ContentType string `json:"contentType"`
|
||||
ETag string `json:"etag"`
|
||||
LastModified time.Time `json:"lastModified"`
|
||||
}
|
||||
|
||||
// Directory represents a directory with its metadata
|
||||
type Directory struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Files []File `json:"files"`
|
||||
Directorys []Directory `json:"directorys"`
|
||||
}
|
||||
|
||||
var bucketName string
|
||||
|
||||
// setupS3Client connect the s3Client
|
||||
func setupS3Client(config AppConfig) (*minio.Client, error) {
|
||||
func setupS3Client(config types.AppConfig) (*minio.Client, error) {
|
||||
minioClient, err := minio.New(config.S3Endoint, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(config.S3AccessKey, config.S3SecretKey, ""),
|
||||
Secure: config.S3SSL,
|
||||
@ -70,7 +42,7 @@ func setupS3Client(config AppConfig) (*minio.Client, error) {
|
||||
}
|
||||
|
||||
// Start starts the app
|
||||
func Start(config AppConfig) {
|
||||
func Start(config types.AppConfig) {
|
||||
|
||||
if config.LogDebug {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
@ -90,8 +62,8 @@ func Start(config AppConfig) {
|
||||
loaderMap := createDataloader(config)
|
||||
|
||||
log.Debug("Generating graphq schema")
|
||||
graphqlTypes()
|
||||
schema, err := graphqlSchema()
|
||||
gql.GraphqlTypes()
|
||||
schema, err := gql.GraphqlSchema()
|
||||
|
||||
if err != nil {
|
||||
log.Error("Failed to generate graphq schemas: ", err.Error())
|
||||
@ -102,7 +74,7 @@ func Start(config AppConfig) {
|
||||
resolveContext = context.WithValue(resolveContext, "loader", loaderMap)
|
||||
|
||||
log.Debug("Starting HTTP server")
|
||||
err = initHttp(resolveContext, schema, config.Address)
|
||||
err = httpserver.InitHttp(resolveContext, schema, config.Address)
|
||||
|
||||
if err != nil {
|
||||
log.Error("Failed to start webserver: ", err.Error())
|
||||
|
42
internal/types/types.go
Normal file
42
internal/types/types.go
Normal file
@ -0,0 +1,42 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/golang-jwt/jwt"
|
||||
)
|
||||
|
||||
// AppConfig general config
|
||||
type AppConfig struct {
|
||||
S3Endoint string
|
||||
S3AccessKey string
|
||||
S3SecretKey string
|
||||
S3SSL bool
|
||||
S3Bucket string
|
||||
CacheTTL time.Duration
|
||||
CacheCleanup time.Duration
|
||||
Address string
|
||||
LogDebug bool
|
||||
}
|
||||
|
||||
// File represents a file with its metadata
|
||||
type File struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
ContentType string `json:"contentType"`
|
||||
ETag string `json:"etag"`
|
||||
LastModified time.Time `json:"lastModified"`
|
||||
}
|
||||
|
||||
// Directory represents a directory with its metadata
|
||||
type Directory struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Files []File `json:"files"`
|
||||
Directorys []Directory `json:"directorys"`
|
||||
}
|
||||
|
||||
type JWTClaims struct {
|
||||
jwt.StandardClaims
|
||||
}
|
Loading…
Reference in New Issue
Block a user