package gql import ( "context" "fmt" "strings" "github.com/minio/minio-go/v7" "git.kapelle.org/niklas/s3browser/internal/db" helper "git.kapelle.org/niklas/s3browser/internal/helper" "git.kapelle.org/niklas/s3browser/internal/loader" types "git.kapelle.org/niklas/s3browser/internal/types" log "github.com/sirupsen/logrus" ) func deleteMutation(ctx context.Context, id types.ID) error { s3Client, ok := ctx.Value("s3Client").(*minio.Client) if !ok { return fmt.Errorf("Failed to get s3Client from context") } log.Debug("S3 'RemoveObject': ", id) // TODO: it is posible to remove multiple objects with a single call. // Is it better to batch this? err := s3Client.RemoveObject(ctx, id.Bucket, id.Key, minio.RemoveObjectOptions{}) if err != nil { return err } ctx.Value("loader").(*loader.Loader).InvalidateCacheForFile(ctx, id) return nil } func copyMutation(ctx context.Context, src, dest types.ID) (*types.File, error) { s3Client, ok := ctx.Value("s3Client").(*minio.Client) if !ok { return nil, fmt.Errorf("Failed to get s3Client from context") } // Check if dest is a file or a dir if dest.IsDirectory() { // create new dest id // TODO: What if a file with this id already exists? dest.Key += helper.GetFilenameFromKey(src.Key) } log.Debug("S3 'CopyObject': ", src, "-->", dest) info, err := s3Client.CopyObject(ctx, minio.CopyDestOptions{ Bucket: dest.Bucket, Object: dest.Key, }, minio.CopySrcOptions{ Bucket: src.Bucket, Object: src.Key, }) if err != nil { return nil, err } newID := types.ID{ Bucket: info.Bucket, Key: info.Key, } newID.Normalize() ctx.Value("loader").(*loader.Loader).InvalidateCacheForFile(ctx, newID) return &types.File{ ID: newID, }, nil } func moveDirMutation(ctx context.Context, src, dest types.ID) ([]*types.File, error) { s3Client, ok := ctx.Value("s3Client").(*minio.Client) if !ok { return nil, fmt.Errorf("Failed to get s3Client from context") } if !dest.IsDirectory() { return nil, fmt.Errorf("Dest must be a directory") } loader, ok := ctx.Value("loader").(*loader.Loader) // "move" all file inside dir files, err := loader.GetFilesRecursive(ctx, src) if err != nil { return nil, err } var result []*types.File parent := src.Parent() for _, file := range files { newID := types.ID{ Bucket: dest.Bucket, Key: strings.Replace(file.ID.Key, parent.Key, dest.Key, 1), } newID.Normalize() log.Debug("S3 'CopyObject': ", src, "-->", dest) _, err := s3Client.CopyObject(ctx, minio.CopyDestOptions{ Bucket: dest.Bucket, Object: newID.Key, }, minio.CopySrcOptions{ Bucket: file.ID.Bucket, Object: file.ID.Key, }) if err != nil { // TODO: handle error } deleteMutation(ctx, file.ID) result = append(result, &types.File{ ID: newID, }) } return result, nil } func moveFileMutation(ctx context.Context, src, dest types.ID) (*types.File, error) { s3Client, ok := ctx.Value("s3Client").(*minio.Client) if !ok { return nil, fmt.Errorf("Failed to get s3Client from context") } // Check if dest is a file or a dir if dest.IsDirectory() { // create new dest id // TODO: What if a file with this id already exists? dest.Key += helper.GetFilenameFromKey(src.Key) } log.Debug("S3 'CopyObject': ", src, "-->", dest) // There is no (spoon) move. Only copy and delete info, err := s3Client.CopyObject(ctx, minio.CopyDestOptions{ Bucket: dest.Bucket, Object: dest.Key, }, minio.CopySrcOptions{ Bucket: src.Bucket, Object: src.Key, }) if err != nil { return nil, err } err = deleteMutation(ctx, src) if err != nil { return nil, err } newId := types.ID{ Bucket: info.Bucket, Key: info.Key, } newId.Normalize() ctx.Value("loader").(*loader.Loader).InvalidateCacheForFile(ctx, newId) return &types.File{ ID: newId, }, nil } func createDirectory(ctx context.Context, id types.ID) (*types.Directory, error) { s3Client, ok := ctx.Value("s3Client").(*minio.Client) if !ok { return nil, fmt.Errorf("Failed to get s3Client from context") } log.Debug("S3 'PutObject': ", id) info, err := s3Client.PutObject(ctx, id.Bucket, id.Key, strings.NewReader(""), 0, minio.PutObjectOptions{ ContentType: "application/x-directory", }) if err != nil { return nil, err } newID := types.ID{ Bucket: info.Bucket, Key: info.Key, } newID.Normalize() ctx.Value("loader").(*loader.Loader).InvalidateCacheForDir(ctx, newID) return &types.Directory{ ID: newID, }, nil } func deleteDirectory(ctx context.Context, id types.ID) error { s3Client, ok := ctx.Value("s3Client").(*minio.Client) if !ok { return fmt.Errorf("Failed to get s3Client from context") } loader, ok := ctx.Value("loader").(*loader.Loader) if !ok { return fmt.Errorf("Failed to get dataloader from context") } // Get all files inside the directory files, err := loader.GetFilesRecursive(ctx, id) if err != nil { return err } // Delete all child files var keysToDel []string for _, file := range files { keysToDel = append(keysToDel, file.ID.Key) } err = helper.DeleteMultiple(ctx, *s3Client, id.Bucket, keysToDel) if err != nil { return err } // If the dir had no children it exists as an object (object with "/" at the end). // If it exists as an object and had children it will get delete once the last child has been deleted // If it had no children we have to delete it manualy // This is at least the behavior when working with minio as s3 backend // TODO: check if this is normal behavior when working with s3 if len(files) == 0 { log.Debug("S3 'RemoveObject': ", id) err := s3Client.RemoveObject(ctx, id.Bucket, id.Key, minio.RemoveObjectOptions{}) if err != nil { return err } } loader.InvalidateCacheForDir(ctx, id) return nil } //login Checks for valid username password combination. Returns singed jwt string func login(ctx context.Context, username, password string) (types.LoginResult, error) { dbStore := ctx.Value("dbStore").(*db.DB) succes, err := dbStore.CheckLogin(ctx, username, password) if !succes { return types.LoginResult{ Successful: false, }, nil } token := helper.CreateJWT(helper.CreateClaims(username)) tokenString, err := token.SignedString([]byte("TODO")) if err != nil { return types.LoginResult{ Successful: false, }, err } return types.LoginResult{ Token: tokenString, Successful: true, }, nil }