the big refactor 2: return of the ID struct
This commit is contained in:
parent
91e217e472
commit
74037dfab5
@ -24,8 +24,9 @@ func listObjectsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result
|
||||
}
|
||||
|
||||
for _, v := range k {
|
||||
id := v.Raw().(types.ID)
|
||||
results = append(results, &dataloader.Result{
|
||||
Data: listObjects(s3Client, bucketName, v.String(), false),
|
||||
Data: listObjects(s3Client, id, false),
|
||||
Error: nil,
|
||||
})
|
||||
}
|
||||
@ -45,8 +46,9 @@ func listObjectsRecursiveBatch(c context.Context, k dataloader.Keys) []*dataload
|
||||
}
|
||||
|
||||
for _, v := range k {
|
||||
id := v.Raw().(types.ID)
|
||||
results = append(results, &dataloader.Result{
|
||||
Data: listObjects(s3Client, bucketName, v.String(), true),
|
||||
Data: listObjects(s3Client, id, true),
|
||||
Error: nil,
|
||||
})
|
||||
}
|
||||
@ -55,10 +57,10 @@ func listObjectsRecursiveBatch(c context.Context, k dataloader.Keys) []*dataload
|
||||
}
|
||||
|
||||
// listObjects helper func for listObjectsBatch
|
||||
func listObjects(s3Client *minio.Client, bukitName, path string, recursive bool) []minio.ObjectInfo {
|
||||
log.Debug("S3 call 'ListObjects': ", path)
|
||||
objectCh := s3Client.ListObjects(context.Background(), bukitName, minio.ListObjectsOptions{
|
||||
Prefix: path,
|
||||
func listObjects(s3Client *minio.Client, id types.ID, recursive bool) []minio.ObjectInfo {
|
||||
log.Debug("S3 call 'ListObjects': ", id)
|
||||
objectCh := s3Client.ListObjects(context.Background(), id.Bucket, minio.ListObjectsOptions{
|
||||
Prefix: id.Key,
|
||||
Recursive: recursive,
|
||||
})
|
||||
|
||||
@ -81,14 +83,10 @@ func getFilesBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
||||
}
|
||||
|
||||
for _, v := range k {
|
||||
path := v.String()
|
||||
id := v.Raw().(types.ID)
|
||||
files := make([]types.File, 0)
|
||||
|
||||
if !strings.HasSuffix(path, "/") {
|
||||
path += "/"
|
||||
}
|
||||
|
||||
thunk := loader["listObjects"].Load(c, dataloader.StringKey(path))
|
||||
thunk := loader["listObjects"].Load(c, id)
|
||||
|
||||
objects, _ := thunk()
|
||||
|
||||
@ -98,8 +96,15 @@ func getFilesBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
||||
if obj.Err != nil {
|
||||
// TODO: how to handle?
|
||||
} else if !strings.HasSuffix(obj.Key, "/") {
|
||||
resultID := types.ID{
|
||||
Bucket: id.Bucket,
|
||||
Key: obj.Key,
|
||||
}
|
||||
|
||||
resultID.Normalize()
|
||||
|
||||
files = append(files, types.File{
|
||||
ID: obj.Key,
|
||||
ID: resultID,
|
||||
Name: filepath.Base(obj.Key),
|
||||
Size: obj.Size,
|
||||
ContentType: obj.ContentType,
|
||||
@ -130,8 +135,9 @@ func getFileBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
||||
}
|
||||
|
||||
for _, v := range k {
|
||||
id := v.Raw().(types.ID)
|
||||
log.Debug("S3 call 'StatObject': ", v.String())
|
||||
obj, err := s3Client.StatObject(context.Background(), bucketName, v.String(), minio.StatObjectOptions{})
|
||||
obj, err := s3Client.StatObject(context.Background(), id.Bucket, id.Key, minio.StatObjectOptions{})
|
||||
|
||||
if err != nil {
|
||||
results = append(results, &dataloader.Result{
|
||||
@ -139,9 +145,16 @@ func getFileBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
||||
Error: err,
|
||||
})
|
||||
} else {
|
||||
resultID := types.ID{
|
||||
Bucket: id.Bucket,
|
||||
Key: obj.Key,
|
||||
}
|
||||
|
||||
resultID.Normalize()
|
||||
|
||||
results = append(results, &dataloader.Result{
|
||||
Data: &types.File{
|
||||
ID: obj.Key,
|
||||
ID: resultID,
|
||||
Size: obj.Size,
|
||||
ContentType: obj.ContentType,
|
||||
ETag: obj.ETag,
|
||||
@ -166,14 +179,10 @@ func getDirsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
||||
}
|
||||
|
||||
for _, v := range k {
|
||||
path := v.String()
|
||||
id := v.Raw().(types.ID)
|
||||
dirs := make([]types.Directory, 0)
|
||||
|
||||
if !strings.HasSuffix(path, "/") {
|
||||
path += "/"
|
||||
}
|
||||
|
||||
thunk := loader["listObjects"].Load(c, dataloader.StringKey(path))
|
||||
thunk := loader["listObjects"].Load(c, id)
|
||||
|
||||
objects, _ := thunk()
|
||||
|
||||
@ -183,8 +192,13 @@ func getDirsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
||||
if obj.Err != nil {
|
||||
// TODO: how to handle?
|
||||
} else if strings.HasSuffix(obj.Key, "/") {
|
||||
resultID := types.ID{
|
||||
Bucket: id.Bucket,
|
||||
Key: obj.Key,
|
||||
}
|
||||
resultID.Normalize()
|
||||
dirs = append(dirs, types.Directory{
|
||||
ID: obj.Key,
|
||||
ID: resultID,
|
||||
Name: filepath.Base(obj.Key),
|
||||
})
|
||||
}
|
||||
|
@ -16,6 +16,7 @@ import (
|
||||
var graphqlDirType *graphql.Object
|
||||
var graphqlFileType *graphql.Object
|
||||
var graphqlLoginResultType *graphql.Object
|
||||
var objIDType *graphql.Scalar
|
||||
|
||||
//GraphqlTypes create all graphql types and stores the in the global variables
|
||||
func GraphqlTypes() {
|
||||
@ -45,7 +46,39 @@ func GraphqlTypes() {
|
||||
ParseLiteral: func(valueAST ast.Value) interface{} {
|
||||
switch valueAST := valueAST.(type) {
|
||||
case *ast.StringValue:
|
||||
return valueAST.Value
|
||||
if tval, err := time.Parse(time.RFC3339, valueAST.Value); err != nil {
|
||||
return nil
|
||||
} else {
|
||||
return tval
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
})
|
||||
|
||||
objIDType = graphql.NewScalar(graphql.ScalarConfig{
|
||||
Name: "objID",
|
||||
Description: `String representing a bucket, key and version combination.
|
||||
Looks like this: "bucketName:/name/of/key" or "bucketName@version:/name/of/key"`,
|
||||
Serialize: func(value interface{}) interface{} {
|
||||
switch value := value.(type) {
|
||||
case types.ID:
|
||||
return value.String()
|
||||
}
|
||||
|
||||
return "INVALID"
|
||||
},
|
||||
ParseValue: func(value interface{}) interface{} {
|
||||
switch tvalue := value.(type) {
|
||||
case string:
|
||||
return types.ParseID(tvalue)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
ParseLiteral: func(valueAST ast.Value) interface{} {
|
||||
switch valueAST := valueAST.(type) {
|
||||
case *ast.StringValue:
|
||||
return types.ParseID(valueAST.Value)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
@ -56,7 +89,7 @@ func GraphqlTypes() {
|
||||
Description: "Represents a directory",
|
||||
Fields: graphql.Fields{
|
||||
"id": &graphql.Field{
|
||||
Type: graphql.NewNonNull(graphql.ID),
|
||||
Type: graphql.NewNonNull(objIDType),
|
||||
},
|
||||
"name": &graphql.Field{
|
||||
Type: graphql.String,
|
||||
@ -66,7 +99,7 @@ func GraphqlTypes() {
|
||||
return nil, fmt.Errorf("Failed to parse source for resolve")
|
||||
}
|
||||
|
||||
return filepath.Base(source.ID), nil
|
||||
return filepath.Base(source.ID.Key), nil
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -77,7 +110,7 @@ func GraphqlTypes() {
|
||||
Description: "Represents a file, not a directory",
|
||||
Fields: graphql.Fields{
|
||||
"id": &graphql.Field{
|
||||
Type: graphql.NewNonNull(graphql.ID),
|
||||
Type: graphql.NewNonNull(objIDType),
|
||||
Description: "The uniqe ID of the file. Represents the path and the s3 key.",
|
||||
},
|
||||
"name": &graphql.Field{
|
||||
@ -88,7 +121,7 @@ func GraphqlTypes() {
|
||||
return nil, fmt.Errorf("Failed to parse source for resolve")
|
||||
}
|
||||
|
||||
return filepath.Base(source.ID), nil
|
||||
return filepath.Base(source.ID.Key), nil
|
||||
},
|
||||
},
|
||||
"size": &graphql.Field{
|
||||
@ -143,10 +176,14 @@ func GraphqlTypes() {
|
||||
return nil, fmt.Errorf("Failed to parse Source for parent resolve")
|
||||
}
|
||||
|
||||
basename := helper.GetPathFromId(source.ID)
|
||||
parent := source.ID.Parent()
|
||||
|
||||
if parent == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return types.Directory{
|
||||
ID: basename,
|
||||
ID: *source.ID.Parent(),
|
||||
}, nil
|
||||
},
|
||||
},
|
||||
@ -163,7 +200,7 @@ func GraphqlTypes() {
|
||||
|
||||
loader := p.Context.Value("loader").(map[string]*dataloader.Loader)
|
||||
|
||||
thunk := loader["getFiles"].Load(p.Context, dataloader.StringKey(helper.NomalizeID(source.ID)))
|
||||
thunk := loader["getFiles"].Load(p.Context, source.ID)
|
||||
return thunk()
|
||||
},
|
||||
})
|
||||
@ -177,7 +214,7 @@ func GraphqlTypes() {
|
||||
}
|
||||
|
||||
loader := p.Context.Value("loader").(map[string]*dataloader.Loader)
|
||||
thunk := loader["getDirs"].Load(p.Context, dataloader.StringKey(helper.NomalizeID(source.ID)))
|
||||
thunk := loader["getDirs"].Load(p.Context, source.ID)
|
||||
|
||||
return thunk()
|
||||
},
|
||||
@ -223,7 +260,7 @@ func loadFile(p graphql.ResolveParams) (*types.File, error) {
|
||||
|
||||
loader := p.Context.Value("loader").(map[string]*dataloader.Loader)
|
||||
|
||||
thunk := loader["getFile"].Load(p.Context, dataloader.StringKey(helper.NomalizeID(source.ID)))
|
||||
thunk := loader["getFile"].Load(p.Context, source.ID)
|
||||
result, err := thunk()
|
||||
|
||||
if err != nil {
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
types "git.kapelle.org/niklas/s3browser/internal/types"
|
||||
)
|
||||
|
||||
func deleteMutation(ctx context.Context, id string) error {
|
||||
func deleteMutation(ctx context.Context, id types.ID) error {
|
||||
s3Client, ok := ctx.Value("s3Client").(*minio.Client)
|
||||
|
||||
if !ok {
|
||||
@ -21,17 +21,17 @@ func deleteMutation(ctx context.Context, id string) error {
|
||||
|
||||
// TODO: it is posible to remove multiple objects with a single call.
|
||||
// Is it better to batch this?
|
||||
err := s3Client.RemoveObject(ctx, "dev", id, minio.RemoveObjectOptions{})
|
||||
err := s3Client.RemoveObject(ctx, id.Bucket, id.Key, minio.RemoveObjectOptions{})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Invalidate cache
|
||||
return helper.InvalidateCache(ctx, helper.NomalizeID(id))
|
||||
return helper.InvalidateCache(ctx, id)
|
||||
}
|
||||
|
||||
func copyMutation(ctx context.Context, src, dest string) (*types.File, error) {
|
||||
func copyMutation(ctx context.Context, src, dest types.ID) (*types.File, error) {
|
||||
s3Client, ok := ctx.Value("s3Client").(*minio.Client)
|
||||
|
||||
if !ok {
|
||||
@ -39,35 +39,42 @@ func copyMutation(ctx context.Context, src, dest string) (*types.File, error) {
|
||||
}
|
||||
|
||||
// Check if dest is a file or a dir
|
||||
if strings.HasSuffix(dest, "/") {
|
||||
if dest.IsDirectory() {
|
||||
// create new dest id
|
||||
// TODO: What if a file with this id already exists?
|
||||
dest += helper.GetFilenameFromID(src)
|
||||
dest.Key += helper.GetFilenameFromKey(src.Key)
|
||||
}
|
||||
|
||||
info, err := s3Client.CopyObject(ctx, minio.CopyDestOptions{
|
||||
Bucket: "dev",
|
||||
Object: dest,
|
||||
Bucket: dest.Bucket,
|
||||
Object: dest.Key,
|
||||
}, minio.CopySrcOptions{
|
||||
Bucket: "dev",
|
||||
Object: src,
|
||||
Bucket: src.Bucket,
|
||||
Object: src.Key,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newid := types.ID{
|
||||
Bucket: info.Bucket,
|
||||
Key: info.Key,
|
||||
}
|
||||
|
||||
newid.Normalize()
|
||||
|
||||
// Invalidate cache
|
||||
// TODO: check error
|
||||
helper.InvalidateCache(ctx, helper.NomalizeID(info.Key))
|
||||
helper.InvalidateCache(ctx, newid)
|
||||
|
||||
return &types.File{
|
||||
ID: info.Key,
|
||||
ID: newid,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
func moveMutation(ctx context.Context, src, dest string) (*types.File, error) {
|
||||
func moveMutation(ctx context.Context, src, dest types.ID) (*types.File, error) {
|
||||
s3Client, ok := ctx.Value("s3Client").(*minio.Client)
|
||||
|
||||
if !ok {
|
||||
@ -75,19 +82,19 @@ func moveMutation(ctx context.Context, src, dest string) (*types.File, error) {
|
||||
}
|
||||
|
||||
// Check if dest is a file or a dir
|
||||
if strings.HasSuffix(dest, "/") {
|
||||
if dest.IsDirectory() {
|
||||
// create new dest id
|
||||
// TODO: What if a file with this id already exists?
|
||||
dest += helper.GetFilenameFromID(src)
|
||||
dest.Key += helper.GetFilenameFromKey(src.Key)
|
||||
}
|
||||
|
||||
// There is no (spoon) move. Only copy and delete
|
||||
info, err := s3Client.CopyObject(ctx, minio.CopyDestOptions{
|
||||
Bucket: "dev",
|
||||
Object: dest,
|
||||
Bucket: dest.Bucket,
|
||||
Object: dest.Key,
|
||||
}, minio.CopySrcOptions{
|
||||
Bucket: "dev",
|
||||
Object: src,
|
||||
Bucket: src.Bucket,
|
||||
Object: src.Key,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@ -100,26 +107,29 @@ func moveMutation(ctx context.Context, src, dest string) (*types.File, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
helper.InvalidateCache(ctx, helper.NomalizeID(info.Key))
|
||||
newId := types.ID{
|
||||
Bucket: info.Bucket,
|
||||
Key: info.Key,
|
||||
}
|
||||
|
||||
newId.Normalize()
|
||||
|
||||
helper.InvalidateCache(ctx, newId)
|
||||
|
||||
return &types.File{
|
||||
ID: info.Key,
|
||||
ID: newId,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
func createDirectory(ctx context.Context, path string) (*types.Directory, error) {
|
||||
func createDirectory(ctx context.Context, id types.ID) (*types.Directory, error) {
|
||||
s3Client, ok := ctx.Value("s3Client").(*minio.Client)
|
||||
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Failed to get s3Client from context")
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(path, "/") {
|
||||
path += "/"
|
||||
}
|
||||
|
||||
info, err := s3Client.PutObject(ctx, "dev", path, strings.NewReader(""), 0, minio.PutObjectOptions{
|
||||
info, err := s3Client.PutObject(ctx, id.Bucket, id.Key, strings.NewReader(""), 0, minio.PutObjectOptions{
|
||||
ContentType: "application/x-directory",
|
||||
})
|
||||
|
||||
@ -127,17 +137,24 @@ func createDirectory(ctx context.Context, path string) (*types.Directory, error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newId := types.ID{
|
||||
Bucket: info.Bucket,
|
||||
Key: info.Key,
|
||||
}
|
||||
|
||||
newId.Normalize()
|
||||
|
||||
// Invalidate cache
|
||||
// TODO: check error
|
||||
helper.InvalidateCacheForDir(ctx, helper.NomalizeID(info.Key))
|
||||
helper.InvalidateCacheForDir(ctx, newId)
|
||||
|
||||
return &types.Directory{
|
||||
ID: info.Key,
|
||||
ID: newId,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
func deleteDirectory(ctx context.Context, path string) error {
|
||||
func deleteDirectory(ctx context.Context, id types.ID) error {
|
||||
s3Client, ok := ctx.Value("s3Client").(*minio.Client)
|
||||
|
||||
if !ok {
|
||||
@ -150,12 +167,8 @@ func deleteDirectory(ctx context.Context, path string) error {
|
||||
return fmt.Errorf("Failed to get dataloader from context")
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(path, "/") {
|
||||
path += "/"
|
||||
}
|
||||
|
||||
// Get all files inside the directory
|
||||
thunk := loader["listObjectsRecursive"].Load(ctx, dataloader.StringKey(helper.NomalizeID(path)))
|
||||
thunk := loader["listObjectsRecursive"].Load(ctx, id)
|
||||
|
||||
result, err := thunk()
|
||||
|
||||
@ -181,14 +194,14 @@ func deleteDirectory(ctx context.Context, path string) error {
|
||||
// This is at least the behavior when working with minio as s3 backend
|
||||
// TODO: check if this is normal behavior when working with s3
|
||||
if len(files) == 0 {
|
||||
err := s3Client.RemoveObject(ctx, "dev", path, minio.RemoveObjectOptions{})
|
||||
err := s3Client.RemoveObject(ctx, id.Bucket, id.Key, minio.RemoveObjectOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
//Invalidate cache
|
||||
helper.InvalidateCacheForDir(ctx, helper.NomalizeID(path))
|
||||
helper.InvalidateCacheForDir(ctx, id)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ func GraphqlSchema() (graphql.Schema, error) {
|
||||
Type: graphql.NewNonNull(graphql.NewList(graphql.NewNonNull(graphqlFileType))),
|
||||
Args: graphql.FieldConfigArgument{
|
||||
"path": &graphql.ArgumentConfig{
|
||||
Type: graphql.NewNonNull(graphql.String),
|
||||
Type: graphql.NewNonNull(objIDType),
|
||||
},
|
||||
},
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
@ -28,16 +28,16 @@ func GraphqlSchema() (graphql.Schema, error) {
|
||||
return nil, s3errors.ErrNotAuthenticated
|
||||
}
|
||||
|
||||
path, ok := p.Args["path"].(string)
|
||||
path, ok := p.Args["path"].(*types.ID)
|
||||
|
||||
if !ok {
|
||||
return nil, nil
|
||||
return nil, fmt.Errorf("Failed to parse args")
|
||||
}
|
||||
|
||||
log.Debug("querry 'files': ", path)
|
||||
|
||||
loader := p.Context.Value("loader").(map[string]*dataloader.Loader)
|
||||
thunk := loader["getFiles"].Load(p.Context, dataloader.StringKey(path))
|
||||
thunk := loader["getFiles"].Load(p.Context, path)
|
||||
return thunk()
|
||||
},
|
||||
},
|
||||
@ -45,7 +45,7 @@ func GraphqlSchema() (graphql.Schema, error) {
|
||||
Type: graphql.NewNonNull(graphql.NewList(graphql.NewNonNull(graphqlDirType))),
|
||||
Args: graphql.FieldConfigArgument{
|
||||
"path": &graphql.ArgumentConfig{
|
||||
Type: graphql.NewNonNull(graphql.String),
|
||||
Type: graphql.NewNonNull(objIDType),
|
||||
},
|
||||
},
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
@ -53,7 +53,7 @@ func GraphqlSchema() (graphql.Schema, error) {
|
||||
return nil, s3errors.ErrNotAuthenticated
|
||||
}
|
||||
|
||||
path, ok := p.Args["path"].(string)
|
||||
path, ok := p.Args["path"].(*types.ID)
|
||||
|
||||
if !ok {
|
||||
return nil, nil
|
||||
@ -62,7 +62,7 @@ func GraphqlSchema() (graphql.Schema, error) {
|
||||
log.Debug("querry 'directorys': ", path)
|
||||
|
||||
loader := p.Context.Value("loader").(map[string]*dataloader.Loader)
|
||||
thunk := loader["getDirs"].Load(p.Context, dataloader.StringKey(path))
|
||||
thunk := loader["getDirs"].Load(p.Context, path)
|
||||
return thunk()
|
||||
},
|
||||
},
|
||||
@ -70,7 +70,7 @@ func GraphqlSchema() (graphql.Schema, error) {
|
||||
Type: graphqlFileType,
|
||||
Args: graphql.FieldConfigArgument{
|
||||
"id": &graphql.ArgumentConfig{
|
||||
Type: graphql.NewNonNull(graphql.ID),
|
||||
Type: graphql.NewNonNull(objIDType),
|
||||
},
|
||||
},
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
@ -78,7 +78,7 @@ func GraphqlSchema() (graphql.Schema, error) {
|
||||
return nil, s3errors.ErrNotAuthenticated
|
||||
}
|
||||
|
||||
id, ok := p.Args["id"].(string)
|
||||
id, ok := p.Args["id"].(*types.ID)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Failed to parse args")
|
||||
}
|
||||
@ -86,7 +86,7 @@ func GraphqlSchema() (graphql.Schema, error) {
|
||||
log.Debug("querry 'file': ", id)
|
||||
|
||||
return types.File{
|
||||
ID: id,
|
||||
ID: *id,
|
||||
}, nil
|
||||
},
|
||||
},
|
||||
@ -107,7 +107,7 @@ func GraphqlSchema() (graphql.Schema, error) {
|
||||
Type: graphql.String,
|
||||
Args: graphql.FieldConfigArgument{
|
||||
"id": &graphql.ArgumentConfig{
|
||||
Type: graphql.NewNonNull(graphql.ID),
|
||||
Type: graphql.NewNonNull(objIDType),
|
||||
},
|
||||
},
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
@ -115,24 +115,24 @@ func GraphqlSchema() (graphql.Schema, error) {
|
||||
return nil, s3errors.ErrNotAuthenticated
|
||||
}
|
||||
|
||||
id, ok := p.Args["id"].(string)
|
||||
id, ok := p.Args["id"].(*types.ID)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Failed to parse args")
|
||||
}
|
||||
|
||||
log.Debug("mutation 'delete': ", id)
|
||||
|
||||
return id, deleteMutation(p.Context, id)
|
||||
return id, deleteMutation(p.Context, *id)
|
||||
},
|
||||
},
|
||||
"copy": &graphql.Field{
|
||||
Type: graphqlFileType,
|
||||
Args: graphql.FieldConfigArgument{
|
||||
"src": &graphql.ArgumentConfig{
|
||||
Type: graphql.NewNonNull(graphql.ID),
|
||||
Type: graphql.NewNonNull(objIDType),
|
||||
},
|
||||
"dest": &graphql.ArgumentConfig{
|
||||
Type: graphql.NewNonNull(graphql.ID),
|
||||
Type: graphql.NewNonNull(objIDType),
|
||||
},
|
||||
},
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
@ -140,28 +140,28 @@ func GraphqlSchema() (graphql.Schema, error) {
|
||||
return nil, s3errors.ErrNotAuthenticated
|
||||
}
|
||||
|
||||
src, ok := p.Args["src"].(string)
|
||||
src, ok := p.Args["src"].(*types.ID)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Failed to parse args")
|
||||
}
|
||||
dest, ok := p.Args["dest"].(string)
|
||||
dest, ok := p.Args["dest"].(*types.ID)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Failed to parse args")
|
||||
}
|
||||
|
||||
log.Debug("mutation 'copy': ", src, "-->", dest)
|
||||
|
||||
return copyMutation(p.Context, src, dest)
|
||||
return copyMutation(p.Context, *src, *dest)
|
||||
},
|
||||
},
|
||||
"move": &graphql.Field{
|
||||
Type: graphqlFileType,
|
||||
Args: graphql.FieldConfigArgument{
|
||||
"src": &graphql.ArgumentConfig{
|
||||
Type: graphql.NewNonNull(graphql.ID),
|
||||
Type: graphql.NewNonNull(objIDType),
|
||||
},
|
||||
"dest": &graphql.ArgumentConfig{
|
||||
Type: graphql.NewNonNull(graphql.ID),
|
||||
Type: graphql.NewNonNull(objIDType),
|
||||
},
|
||||
},
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
@ -169,25 +169,25 @@ func GraphqlSchema() (graphql.Schema, error) {
|
||||
return nil, s3errors.ErrNotAuthenticated
|
||||
}
|
||||
|
||||
src, ok := p.Args["src"].(string)
|
||||
src, ok := p.Args["src"].(*types.ID)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Failed to parse args")
|
||||
}
|
||||
dest, ok := p.Args["dest"].(string)
|
||||
dest, ok := p.Args["dest"].(*types.ID)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Failed to parse args")
|
||||
}
|
||||
|
||||
log.Debug("mutation 'move': ", src, "-->", dest)
|
||||
|
||||
return moveMutation(p.Context, src, dest)
|
||||
return moveMutation(p.Context, *src, *dest)
|
||||
},
|
||||
},
|
||||
"createDir": &graphql.Field{
|
||||
Type: graphql.NewNonNull(graphqlDirType),
|
||||
Args: graphql.FieldConfigArgument{
|
||||
"path": &graphql.ArgumentConfig{
|
||||
Type: graphql.NewNonNull(graphql.ID),
|
||||
Type: graphql.NewNonNull(objIDType),
|
||||
},
|
||||
},
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
@ -195,21 +195,21 @@ func GraphqlSchema() (graphql.Schema, error) {
|
||||
return nil, s3errors.ErrNotAuthenticated
|
||||
}
|
||||
|
||||
path, ok := p.Args["path"].(string)
|
||||
path, ok := p.Args["path"].(*types.ID)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Failed to parse args")
|
||||
}
|
||||
|
||||
log.Debug("mutation 'createDir': ", path)
|
||||
|
||||
return createDirectory(p.Context, path)
|
||||
return createDirectory(p.Context, *path)
|
||||
},
|
||||
},
|
||||
"deleteDir": &graphql.Field{
|
||||
Type: graphql.NewNonNull(graphql.String),
|
||||
Args: graphql.FieldConfigArgument{
|
||||
"path": &graphql.ArgumentConfig{
|
||||
Type: graphql.NewNonNull(graphql.ID),
|
||||
Type: graphql.NewNonNull(objIDType),
|
||||
},
|
||||
},
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
@ -217,7 +217,7 @@ func GraphqlSchema() (graphql.Schema, error) {
|
||||
return nil, s3errors.ErrNotAuthenticated
|
||||
}
|
||||
|
||||
path, ok := p.Args["path"].(string)
|
||||
path, ok := p.Args["path"].(*types.ID)
|
||||
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Failed to parse args")
|
||||
@ -225,7 +225,7 @@ func GraphqlSchema() (graphql.Schema, error) {
|
||||
|
||||
log.Debug("mutation 'deleteDir': ", path)
|
||||
|
||||
return path, deleteDirectory(p.Context, path)
|
||||
return path, deleteDirectory(p.Context, *path)
|
||||
},
|
||||
},
|
||||
"login": &graphql.Field{
|
||||
|
@ -15,7 +15,7 @@ import (
|
||||
types "git.kapelle.org/niklas/s3browser/internal/types"
|
||||
)
|
||||
|
||||
func InvalidateCache(ctx context.Context, id string) error {
|
||||
func InvalidateCache(ctx context.Context, id types.ID) error {
|
||||
loader, ok := ctx.Value("loader").(map[string]*dataloader.Loader)
|
||||
if !ok {
|
||||
return fmt.Errorf("Failed to get loader from context")
|
||||
@ -23,30 +23,20 @@ func InvalidateCache(ctx context.Context, id string) error {
|
||||
|
||||
log.Debug("Invalidate cache for id: ", id)
|
||||
|
||||
path := GetPathFromId(id)
|
||||
parent := id.Parent()
|
||||
|
||||
loader["getFile"].Clear(ctx, dataloader.StringKey(id))
|
||||
loader["getFiles"].Clear(ctx, dataloader.StringKey(path))
|
||||
loader["listObjects"].Clear(ctx, dataloader.StringKey(path))
|
||||
loader["listObjectsRecursive"].Clear(ctx, dataloader.StringKey(path))
|
||||
loader["getFile"].Clear(ctx, id)
|
||||
loader["getFiles"].Clear(ctx, parent)
|
||||
loader["listObjects"].Clear(ctx, parent)
|
||||
loader["listObjectsRecursive"].Clear(ctx, parent)
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetPathFromId(id string) string {
|
||||
dir := filepath.Dir(id)
|
||||
|
||||
if dir == "." {
|
||||
return "/"
|
||||
}
|
||||
|
||||
return NomalizeID(dir + "/")
|
||||
}
|
||||
|
||||
func GetFilenameFromID(id string) string {
|
||||
func GetFilenameFromKey(id string) string {
|
||||
return filepath.Base(id)
|
||||
}
|
||||
|
||||
func InvalidateCacheForDir(ctx context.Context, path string) error {
|
||||
func InvalidateCacheForDir(ctx context.Context, path types.ID) error {
|
||||
loader, ok := ctx.Value("loader").(map[string]*dataloader.Loader)
|
||||
if !ok {
|
||||
return fmt.Errorf("Failed to get loader from context")
|
||||
@ -58,13 +48,13 @@ func InvalidateCacheForDir(ctx context.Context, path string) error {
|
||||
|
||||
log.Debug("Cache clear dir: ", path, " parent: ", parent)
|
||||
|
||||
loader["getFile"].Clear(ctx, dataloader.StringKey(path))
|
||||
loader["listObjects"].Clear(ctx, dataloader.StringKey(path))
|
||||
loader["listObjectsRecursive"].Clear(ctx, dataloader.StringKey(path))
|
||||
loader["getFiles"].Clear(ctx, dataloader.StringKey(path))
|
||||
loader["getDirs"].Clear(ctx, dataloader.StringKey(parent))
|
||||
loader["listObjects"].Clear(ctx, dataloader.StringKey(parent))
|
||||
loader["listObjectsRecursive"].Clear(ctx, dataloader.StringKey(parent))
|
||||
loader["getFile"].Clear(ctx, path)
|
||||
loader["listObjects"].Clear(ctx, path)
|
||||
loader["listObjectsRecursive"].Clear(ctx, path)
|
||||
loader["getFiles"].Clear(ctx, path)
|
||||
loader["getDirs"].Clear(ctx, parent)
|
||||
loader["listObjects"].Clear(ctx, parent)
|
||||
loader["listObjectsRecursive"].Clear(ctx, parent)
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -88,29 +78,24 @@ func DeleteMultiple(ctx context.Context, s3Client minio.Client, ids []minio.Obje
|
||||
return nil
|
||||
}
|
||||
|
||||
// NomalizeID makes sure there is a leading "/" in the id
|
||||
func NomalizeID(id string) string {
|
||||
if !strings.HasPrefix(id, "/") {
|
||||
if id == "." {
|
||||
return "/"
|
||||
}
|
||||
id = "/" + id
|
||||
}
|
||||
|
||||
return id
|
||||
}
|
||||
|
||||
func GetParentDir(id string) string {
|
||||
dirs := strings.Split(id, "/")
|
||||
func GetParentDir(id types.ID) types.ID {
|
||||
dirs := strings.Split(id.Key, "/")
|
||||
|
||||
cut := 1
|
||||
if strings.HasSuffix(id, "/") {
|
||||
if strings.HasSuffix(id.Key, "/") {
|
||||
cut = 2
|
||||
}
|
||||
|
||||
parent := strings.Join(dirs[:len(dirs)-cut], "/") + "/"
|
||||
parentKey := strings.Join(dirs[:len(dirs)-cut], "/") + "/"
|
||||
|
||||
return NomalizeID(parent)
|
||||
parent := types.ID{
|
||||
Bucket: id.Bucket,
|
||||
Key: parentKey,
|
||||
}
|
||||
|
||||
parent.Normalize()
|
||||
|
||||
return parent
|
||||
}
|
||||
|
||||
func IsAuthenticated(ctx context.Context) bool {
|
||||
|
@ -157,7 +157,17 @@ func httpPostFile(ctx context.Context, rw http.ResponseWriter, r *http.Request)
|
||||
|
||||
s3Client := ctx.Value("s3Client").(*minio.Client)
|
||||
|
||||
id := r.URL.Query().Get("id")
|
||||
idString := r.URL.Query().Get("id")
|
||||
|
||||
id := types.ParseID(idString)
|
||||
|
||||
if id == nil {
|
||||
// Failed to parse ID
|
||||
rw.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
id.Normalize()
|
||||
|
||||
log.Debug("Upload file: ", id)
|
||||
|
||||
@ -165,7 +175,7 @@ func httpPostFile(ctx context.Context, rw http.ResponseWriter, r *http.Request)
|
||||
mimeType, _, _ := mime.ParseMediaType(contentType)
|
||||
|
||||
log.Debug("S3 call 'PutObject': ", id)
|
||||
info, err := s3Client.PutObject(context.Background(), "dev", id, r.Body, r.ContentLength, minio.PutObjectOptions{
|
||||
_, err := s3Client.PutObject(context.Background(), id.Bucket, id.Key, r.Body, r.ContentLength, minio.PutObjectOptions{
|
||||
ContentType: mimeType,
|
||||
})
|
||||
|
||||
@ -175,7 +185,7 @@ func httpPostFile(ctx context.Context, rw http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
// Invalidate cache
|
||||
helper.InvalidateCache(ctx, info.Key)
|
||||
helper.InvalidateCache(ctx, *id)
|
||||
|
||||
rw.WriteHeader(http.StatusCreated)
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
@ -13,15 +14,14 @@ var (
|
||||
// ID an id of a file consists of at least a Bucket and a Key. Version is optional.
|
||||
// Can also be used as an ID for a directory. When the key ends with "/" it is treated as dir.
|
||||
type ID struct {
|
||||
Bucket string // Name of the bucket
|
||||
Key string // Key of the object
|
||||
Version string // Version of the object. For now we ignore it
|
||||
Bucket string `json:"bucket"` // Name of the bucket
|
||||
Key string `json:"key"` // Key of the object
|
||||
Version string `json:"version"` // Version of the object. For now we ignore it
|
||||
}
|
||||
|
||||
// String Return String representation of an ID
|
||||
// Looks like this: "bucketName@version:/id/of/obj" or "bucketName:/id/of/obj"
|
||||
func (i *ID) String() string {
|
||||
i.Normalize()
|
||||
func (i ID) String() string {
|
||||
if i.Version == "" {
|
||||
return fmt.Sprintf("%s:%s", i.Bucket, i.Key)
|
||||
} else {
|
||||
@ -31,7 +31,9 @@ func (i *ID) String() string {
|
||||
|
||||
// Normalize normalzes the key to have a "/" prefix
|
||||
func (i *ID) Normalize() {
|
||||
if !strings.HasPrefix(i.Key, "/") {
|
||||
if i.Key == "." {
|
||||
i.Key = "/"
|
||||
} else if !strings.HasPrefix(i.Key, "/") {
|
||||
i.Key = "/" + i.Key
|
||||
}
|
||||
}
|
||||
@ -41,6 +43,32 @@ func (i *ID) Valid() bool {
|
||||
return i.Bucket != "" && i.Key != ""
|
||||
}
|
||||
|
||||
func (i *ID) IsDirectory() bool {
|
||||
return strings.HasSuffix(i.Key, "/")
|
||||
}
|
||||
|
||||
// Raw for the Key interface for the dataloaders so ID can be used as a dataloader key
|
||||
func (i ID) Raw() interface{} {
|
||||
return i
|
||||
}
|
||||
|
||||
// Parent returns the parent dir ID.
|
||||
func (i ID) Parent() *ID {
|
||||
if i.Key == "/" {
|
||||
// Already at root. We dont have a parent
|
||||
return nil
|
||||
}
|
||||
|
||||
parent := &ID{
|
||||
Bucket: i.Bucket,
|
||||
Key: filepath.Dir(i.Key),
|
||||
}
|
||||
|
||||
parent.Normalize()
|
||||
|
||||
return parent
|
||||
}
|
||||
|
||||
// ParseID parses a string to an ID. Null if invalid
|
||||
func ParseID(id string) *ID {
|
||||
match := idRegex.FindStringSubmatch(id)
|
||||
|
@ -21,7 +21,7 @@ type AppConfig struct {
|
||||
|
||||
// File represents a file with its metadata
|
||||
type File struct {
|
||||
ID string `json:"id"`
|
||||
ID ID `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
ContentType string `json:"contentType"`
|
||||
@ -31,7 +31,7 @@ type File struct {
|
||||
|
||||
// Directory represents a directory with its metadata
|
||||
type Directory struct {
|
||||
ID string `json:"id"`
|
||||
ID ID `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Files []File `json:"files"`
|
||||
Directorys []Directory `json:"directorys"`
|
||||
|
Loading…
Reference in New Issue
Block a user