s3browser-backend/internal/gql/mutations.go
2022-02-07 15:59:52 +01:00

250 lines
5.5 KiB
Go

package gql
import (
"context"
"fmt"
"strings"
"git.kapelle.org/niklas/s3browser/internal/db"
helper "git.kapelle.org/niklas/s3browser/internal/helper"
"git.kapelle.org/niklas/s3browser/internal/loader"
"git.kapelle.org/niklas/s3browser/internal/s3"
types "git.kapelle.org/niklas/s3browser/internal/types"
log "github.com/sirupsen/logrus"
)
func deleteMutation(ctx context.Context, id types.ID) error {
s3Client, ok := ctx.Value("s3Client").(s3.S3Service)
if !ok {
return fmt.Errorf("Failed to get s3Client from context")
}
log.Debug("S3 'RemoveObject': ", id)
// TODO: it is posible to remove multiple objects with a single call.
// Is it better to batch this?
err := s3Client.RemoveObject(ctx, id)
if err != nil {
return err
}
ctx.Value("loader").(*loader.Loader).InvalidedCacheForId(ctx, id)
return nil
}
func copyMutation(ctx context.Context, src, dest types.ID) (*types.File, error) {
s3Client, ok := ctx.Value("s3Client").(s3.S3Service)
if !ok {
return nil, fmt.Errorf("Failed to get s3Client from context")
}
// Check if dest is a file or a dir
if dest.IsDirectory() {
// create new dest id
// TODO: What if a file with this id already exists?
dest.Key += helper.GetFilenameFromKey(src.Key)
}
log.Debug("S3 'CopyObject': ", src, "-->", dest)
err := s3Client.CopyObject(ctx, src, dest)
if err != nil {
return nil, err
}
ctx.Value("loader").(*loader.Loader).InvalidedCacheForId(ctx, dest)
return &types.File{
ID: dest,
}, nil
}
func moveDirMutation(ctx context.Context, src, dest types.ID) ([]*types.File, error) {
s3Client, ok := ctx.Value("s3Client").(s3.S3Service)
if !ok {
return nil, fmt.Errorf("Failed to get s3Client from context")
}
if !dest.IsDirectory() {
return nil, fmt.Errorf("Dest must be a directory")
}
loader, ok := ctx.Value("loader").(*loader.Loader)
// "move" all file inside dir
files, err := loader.GetFilesRecursive(ctx, src)
if err != nil {
return nil, err
}
var result []*types.File
parent := src.Parent()
for _, file := range files {
newID := types.ID{
Bucket: dest.Bucket,
Key: strings.Replace(file.ID.Key, parent.Key, dest.Key, 1),
}
newID.Normalize()
log.Debug("S3 'CopyObject': ", src, "-->", dest)
err := s3Client.CopyObject(ctx, file.ID, dest)
if err != nil {
// TODO: handle error
}
deleteMutation(ctx, file.ID)
loader.InvalidedCacheForId(ctx, newID)
loader.InvalidedCacheForId(ctx, file.ID)
result = append(result, &types.File{
ID: newID,
})
}
loader.InvalidedCacheForId(ctx, src)
return result, nil
}
func moveFileMutation(ctx context.Context, src, dest types.ID) (*types.File, error) {
s3Client, ok := ctx.Value("s3Client").(s3.S3Service)
if !ok {
return nil, fmt.Errorf("Failed to get s3Client from context")
}
// Check if dest is a file or a dir
if dest.IsDirectory() {
// create new dest id
// TODO: What if a file with this id already exists?
dest.Key += helper.GetFilenameFromKey(src.Key)
}
log.Debug("S3 'CopyObject': ", src, "-->", dest)
// There is no (spoon) move. Only copy and delete
err := s3Client.CopyObject(ctx, src, dest)
if err != nil {
return nil, err
}
err = deleteMutation(ctx, src)
if err != nil {
return nil, err
}
ctx.Value("loader").(*loader.Loader).InvalidedCacheForId(ctx, dest)
return &types.File{
ID: dest,
}, nil
}
func createDirectory(ctx context.Context, id types.ID) (*types.Directory, error) {
s3Client, ok := ctx.Value("s3Client").(s3.S3Service)
if !ok {
return nil, fmt.Errorf("Failed to get s3Client from context")
}
log.Debug("S3 'PutObject': ", id)
err := s3Client.PutObject(ctx, id, strings.NewReader(""), 0) // TODO: s3client interface needs content type parameter
if err != nil {
return nil, err
}
ctx.Value("loader").(*loader.Loader).InvalidedCacheForId(ctx, id)
return &types.Directory{
ID: id,
}, nil
}
func deleteDirectory(ctx context.Context, id types.ID) error {
s3Client, ok := ctx.Value("s3Client").(s3.S3Service)
if !ok {
return fmt.Errorf("Failed to get s3Client from context")
}
loader, ok := ctx.Value("loader").(*loader.Loader)
if !ok {
return fmt.Errorf("Failed to get dataloader from context")
}
// Get all files inside the directory
files, err := loader.GetFilesRecursive(ctx, id)
if err != nil {
return err
}
// Delete all child files
for _, file := range files {
s3Client.RemoveObject(ctx, file.ID)
}
if err != nil {
return err
}
// If the dir had no children it exists as an object (object with "/" at the end).
// If it exists as an object and had children it will get delete once the last child has been deleted
// If it had no children we have to delete it manualy
// This is at least the behavior when working with minio as s3 backend
// TODO: check if this is normal behavior when working with s3
if len(files) == 0 {
log.Debug("S3 'RemoveObject': ", id)
err := s3Client.RemoveObject(ctx, id)
if err != nil {
return err
}
}
loader.InvalidedCacheForId(ctx, id)
return nil
}
//login Checks for valid username password combination. Returns singed jwt string
func login(ctx context.Context, username, password string) (types.LoginResult, error) {
dbStore := ctx.Value("dbStore").(db.DB)
succes, err := dbStore.CheckLogin(ctx, username, password)
if !succes {
return types.LoginResult{
Successful: false,
}, nil
}
token := helper.CreateJWT(helper.CreateClaims(username))
tokenString, err := token.SignedString([]byte("TODO"))
if err != nil {
return types.LoginResult{
Successful: false,
}, err
}
return types.LoginResult{
Token: tokenString,
Successful: true,
}, nil
}