diff --git a/internal/gql/mutations.go b/internal/gql/mutations.go index 5c323b8..a68186c 100644 --- a/internal/gql/mutations.go +++ b/internal/gql/mutations.go @@ -5,17 +5,16 @@ import ( "fmt" "strings" - "github.com/minio/minio-go/v7" - "git.kapelle.org/niklas/s3browser/internal/db" helper "git.kapelle.org/niklas/s3browser/internal/helper" "git.kapelle.org/niklas/s3browser/internal/loader" + "git.kapelle.org/niklas/s3browser/internal/s3" types "git.kapelle.org/niklas/s3browser/internal/types" log "github.com/sirupsen/logrus" ) func deleteMutation(ctx context.Context, id types.ID) error { - s3Client, ok := ctx.Value("s3Client").(*minio.Client) + s3Client, ok := ctx.Value("s3Client").(s3.S3Service) if !ok { return fmt.Errorf("Failed to get s3Client from context") @@ -24,7 +23,7 @@ func deleteMutation(ctx context.Context, id types.ID) error { log.Debug("S3 'RemoveObject': ", id) // TODO: it is posible to remove multiple objects with a single call. // Is it better to batch this? - err := s3Client.RemoveObject(ctx, id.Bucket, id.Key, minio.RemoveObjectOptions{}) + err := s3Client.RemoveObject(ctx, id) if err != nil { return err @@ -36,7 +35,7 @@ func deleteMutation(ctx context.Context, id types.ID) error { } func copyMutation(ctx context.Context, src, dest types.ID) (*types.File, error) { - s3Client, ok := ctx.Value("s3Client").(*minio.Client) + s3Client, ok := ctx.Value("s3Client").(s3.S3Service) if !ok { return nil, fmt.Errorf("Failed to get s3Client from context") @@ -50,35 +49,22 @@ func copyMutation(ctx context.Context, src, dest types.ID) (*types.File, error) } log.Debug("S3 'CopyObject': ", src, "-->", dest) - info, err := s3Client.CopyObject(ctx, minio.CopyDestOptions{ - Bucket: dest.Bucket, - Object: dest.Key, - }, minio.CopySrcOptions{ - Bucket: src.Bucket, - Object: src.Key, - }) + err := s3Client.CopyObject(ctx, src, dest) if err != nil { return nil, err } - newID := types.ID{ - Bucket: info.Bucket, - Key: info.Key, - } - - newID.Normalize() - - ctx.Value("loader").(*loader.Loader).InvalidedCacheForId(ctx, newID) + ctx.Value("loader").(*loader.Loader).InvalidedCacheForId(ctx, dest) return &types.File{ - ID: newID, + ID: dest, }, nil } func moveDirMutation(ctx context.Context, src, dest types.ID) ([]*types.File, error) { - s3Client, ok := ctx.Value("s3Client").(*minio.Client) + s3Client, ok := ctx.Value("s3Client").(s3.S3Service) if !ok { return nil, fmt.Errorf("Failed to get s3Client from context") @@ -107,13 +93,7 @@ func moveDirMutation(ctx context.Context, src, dest types.ID) ([]*types.File, er newID.Normalize() log.Debug("S3 'CopyObject': ", src, "-->", dest) - _, err := s3Client.CopyObject(ctx, minio.CopyDestOptions{ - Bucket: dest.Bucket, - Object: newID.Key, - }, minio.CopySrcOptions{ - Bucket: file.ID.Bucket, - Object: file.ID.Key, - }) + err := s3Client.CopyObject(ctx, file.ID, dest) if err != nil { // TODO: handle error @@ -135,7 +115,7 @@ func moveDirMutation(ctx context.Context, src, dest types.ID) ([]*types.File, er } func moveFileMutation(ctx context.Context, src, dest types.ID) (*types.File, error) { - s3Client, ok := ctx.Value("s3Client").(*minio.Client) + s3Client, ok := ctx.Value("s3Client").(s3.S3Service) if !ok { return nil, fmt.Errorf("Failed to get s3Client from context") @@ -150,13 +130,7 @@ func moveFileMutation(ctx context.Context, src, dest types.ID) (*types.File, err log.Debug("S3 'CopyObject': ", src, "-->", dest) // There is no (spoon) move. Only copy and delete - info, err := s3Client.CopyObject(ctx, minio.CopyDestOptions{ - Bucket: dest.Bucket, - Object: dest.Key, - }, minio.CopySrcOptions{ - Bucket: src.Bucket, - Object: src.Key, - }) + err := s3Client.CopyObject(ctx, src, dest) if err != nil { return nil, err @@ -168,54 +142,37 @@ func moveFileMutation(ctx context.Context, src, dest types.ID) (*types.File, err return nil, err } - newId := types.ID{ - Bucket: info.Bucket, - Key: info.Key, - } - - newId.Normalize() - - ctx.Value("loader").(*loader.Loader).InvalidedCacheForId(ctx, newId) + ctx.Value("loader").(*loader.Loader).InvalidedCacheForId(ctx, dest) return &types.File{ - ID: newId, + ID: dest, }, nil } func createDirectory(ctx context.Context, id types.ID) (*types.Directory, error) { - s3Client, ok := ctx.Value("s3Client").(*minio.Client) + s3Client, ok := ctx.Value("s3Client").(s3.S3Service) if !ok { return nil, fmt.Errorf("Failed to get s3Client from context") } log.Debug("S3 'PutObject': ", id) - info, err := s3Client.PutObject(ctx, id.Bucket, id.Key, strings.NewReader(""), 0, minio.PutObjectOptions{ - ContentType: "application/x-directory", - }) - + err := s3Client.PutObject(ctx, id, strings.NewReader(""), 0) // TODO: s3client interface needs content type parameter if err != nil { return nil, err } - newID := types.ID{ - Bucket: info.Bucket, - Key: info.Key, - } - - newID.Normalize() - - ctx.Value("loader").(*loader.Loader).InvalidedCacheForId(ctx, newID) + ctx.Value("loader").(*loader.Loader).InvalidedCacheForId(ctx, id) return &types.Directory{ - ID: newID, + ID: id, }, nil } func deleteDirectory(ctx context.Context, id types.ID) error { - s3Client, ok := ctx.Value("s3Client").(*minio.Client) + s3Client, ok := ctx.Value("s3Client").(s3.S3Service) if !ok { return fmt.Errorf("Failed to get s3Client from context") @@ -236,11 +193,9 @@ func deleteDirectory(ctx context.Context, id types.ID) error { } // Delete all child files - var keysToDel []string for _, file := range files { - keysToDel = append(keysToDel, file.ID.Key) + s3Client.RemoveObject(ctx, file.ID) } - err = helper.DeleteMultiple(ctx, *s3Client, id.Bucket, keysToDel) if err != nil { return err @@ -253,7 +208,7 @@ func deleteDirectory(ctx context.Context, id types.ID) error { // TODO: check if this is normal behavior when working with s3 if len(files) == 0 { log.Debug("S3 'RemoveObject': ", id) - err := s3Client.RemoveObject(ctx, id.Bucket, id.Key, minio.RemoveObjectOptions{}) + err := s3Client.RemoveObject(ctx, id) if err != nil { return err } diff --git a/internal/helper/helper.go b/internal/helper/helper.go index 4e47a07..2ffadaf 100644 --- a/internal/helper/helper.go +++ b/internal/helper/helper.go @@ -6,37 +6,15 @@ import ( "strings" "time" + "git.kapelle.org/niklas/s3browser/internal/s3" types "git.kapelle.org/niklas/s3browser/internal/types" "github.com/golang-jwt/jwt" - "github.com/minio/minio-go/v7" - log "github.com/sirupsen/logrus" ) func GetFilenameFromKey(id string) string { return filepath.Base(id) } -func DeleteMultiple(ctx context.Context, s3Client minio.Client, bucket string, keys []string) error { - objectsCh := make(chan minio.ObjectInfo, 1) - - go func() { - defer close(objectsCh) - for _, id := range keys { - objectsCh <- minio.ObjectInfo{ - Key: id, - } - } - }() - - log.Debug("S3 'RemoveObject': ", keys) - for err := range s3Client.RemoveObjects(ctx, bucket, objectsCh, minio.RemoveObjectsOptions{}) { - log.Error("Failed to delete object ", err.ObjectName, " because: ", err.Err.Error()) - // TODO: error handel - } - - return nil -} - func GetParentDir(id types.ID) types.ID { dirs := strings.Split(id.Key, "/") @@ -57,17 +35,10 @@ func GetParentDir(id types.ID) types.ID { return parent } -func ObjInfoToFile(objInfo minio.ObjectInfo, bucket string) *types.File { - objID := types.ID{ - Bucket: bucket, - Key: objInfo.Key, - } - - objID.Normalize() - +func ObjInfoToFile(objInfo s3.Object, bucket string) *types.File { return &types.File{ - ID: objID, - Name: GetFilenameFromKey(objID.Key), + ID: objInfo.ID, + Name: objInfo.ID.Name(), Size: objInfo.Size, ContentType: objInfo.ContentType, ETag: objInfo.ETag, diff --git a/internal/httpserver/httpServer.go b/internal/httpserver/httpServer.go index 1f8e46e..a2c2370 100644 --- a/internal/httpserver/httpServer.go +++ b/internal/httpserver/httpServer.go @@ -4,9 +4,7 @@ import ( "context" "fmt" "io" - "mime" "net/http" - "path/filepath" "time" "github.com/golang-jwt/jwt" @@ -15,12 +13,12 @@ import ( "github.com/graphql-go/graphql" "github.com/graphql-go/graphql/gqlerrors" "github.com/graphql-go/handler" - "github.com/minio/minio-go/v7" log "github.com/sirupsen/logrus" helper "git.kapelle.org/niklas/s3browser/internal/helper" "git.kapelle.org/niklas/s3browser/internal/loader" + "git.kapelle.org/niklas/s3browser/internal/s3" types "git.kapelle.org/niklas/s3browser/internal/types" ) @@ -112,7 +110,7 @@ func httpGetFile(ctx context.Context, rw http.ResponseWriter, r *http.Request) { return } - s3Client := ctx.Value("s3Client").(*minio.Client) + s3Client := ctx.Value("s3Client").(s3.S3Service) idString := r.URL.Query().Get("id") id := types.ParseID(idString) @@ -124,7 +122,7 @@ func httpGetFile(ctx context.Context, rw http.ResponseWriter, r *http.Request) { } log.Debug("S3 'StatObject': ", id) - objInfo, err := s3Client.StatObject(context.Background(), id.Bucket, id.Key, minio.GetObjectOptions{}) + objInfo, err := s3Client.StatObject(context.Background(), *id) if err != nil { log.Error("Failed to get object info: ", err) @@ -139,7 +137,7 @@ func httpGetFile(ctx context.Context, rw http.ResponseWriter, r *http.Request) { } log.Debug("S3 'GetObject': ", id) - obj, err := s3Client.GetObject(context.Background(), id.Bucket, id.Key, minio.GetObjectOptions{}) + obj, err := s3Client.GetObject(context.Background(), *id) if err != nil { log.Error("Failed to get object: ", err) @@ -148,7 +146,7 @@ func httpGetFile(ctx context.Context, rw http.ResponseWriter, r *http.Request) { } rw.Header().Set("Cache-Control", "must-revalidate") - rw.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filepath.Base((objInfo.Key)))) + rw.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", id.Name())) rw.Header().Set("Content-Type", objInfo.ContentType) rw.Header().Set("ETag", objInfo.ETag) @@ -166,7 +164,7 @@ func httpPostFile(ctx context.Context, rw http.ResponseWriter, r *http.Request) return } - s3Client := ctx.Value("s3Client").(*minio.Client) + s3Client := ctx.Value("s3Client").(s3.S3Service) idString := r.URL.Query().Get("id") @@ -180,13 +178,11 @@ func httpPostFile(ctx context.Context, rw http.ResponseWriter, r *http.Request) id.Normalize() - contentType := r.Header.Get("Content-Type") - mimeType, _, _ := mime.ParseMediaType(contentType) + // contentType := r.Header.Get("Content-Type") + // mimeType, _, _ := mime.ParseMediaType(contentType) log.Debug("S3 'PutObject': ", id) - _, err := s3Client.PutObject(context.Background(), id.Bucket, id.Key, r.Body, r.ContentLength, minio.PutObjectOptions{ - ContentType: mimeType, - }) + err := s3Client.PutObject(context.Background(), *id, r.Body, r.ContentLength) // TODO: put content type if err != nil { rw.WriteHeader(http.StatusInternalServerError) diff --git a/internal/loader/batch.go b/internal/loader/batch.go index d47cccf..2bca6cb 100644 --- a/internal/loader/batch.go +++ b/internal/loader/batch.go @@ -4,9 +4,9 @@ import ( "context" "fmt" - types "git.kapelle.org/niklas/s3browser/internal/types" + "git.kapelle.org/niklas/s3browser/internal/s3" + "git.kapelle.org/niklas/s3browser/internal/types" "github.com/graph-gophers/dataloader" - "github.com/minio/minio-go/v7" log "github.com/sirupsen/logrus" ) @@ -14,7 +14,7 @@ import ( func listObjectsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result { var results []*dataloader.Result - s3Client, ok := c.Value("s3Client").(*minio.Client) + s3Client, ok := c.Value("s3Client").(s3.S3Service) if !ok { return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context")) @@ -22,10 +22,18 @@ func listObjectsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result for _, v := range k { id := v.Raw().(types.ID) - results = append(results, &dataloader.Result{ - Data: listObjects(s3Client, id, false), - Error: nil, - }) + objects, err := s3Client.ListObjects(c, id) + if err != nil { + results = append(results, &dataloader.Result{ + Data: nil, + Error: err, + }) + } else { + results = append(results, &dataloader.Result{ + Data: objects, + Error: nil, + }) + } } return results @@ -35,7 +43,7 @@ func listObjectsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result func listObjectsRecursiveBatch(c context.Context, k dataloader.Keys) []*dataloader.Result { var results []*dataloader.Result - s3Client, ok := c.Value("s3Client").(*minio.Client) + s3Client, ok := c.Value("s3Client").(s3.S3Service) if !ok { return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context")) @@ -43,35 +51,27 @@ func listObjectsRecursiveBatch(c context.Context, k dataloader.Keys) []*dataload for _, v := range k { id := v.Raw().(types.ID) - results = append(results, &dataloader.Result{ - Data: listObjects(s3Client, id, true), - Error: nil, - }) + objects, err := s3Client.ListObjectsRecursive(c, id) + if err != nil { + results = append(results, &dataloader.Result{ + Data: nil, + Error: err, + }) + } else { + results = append(results, &dataloader.Result{ + Data: objects, + Error: nil, + }) + } } return results } -// listObjects helper func for listObjectsBatch -func listObjects(s3Client *minio.Client, id types.ID, recursive bool) []minio.ObjectInfo { - log.Debug("S3 'ListObjects': ", id) - objectCh := s3Client.ListObjects(context.Background(), id.Bucket, minio.ListObjectsOptions{ - Prefix: id.Key, - Recursive: recursive, - }) - - result := make([]minio.ObjectInfo, 0) - for obj := range objectCh { - result = append(result, obj) - } - - return result -} - func listBucketsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result { var results []*dataloader.Result - s3Client, ok := c.Value("s3Client").(*minio.Client) + s3Client, ok := c.Value("s3Client").(s3.S3Service) if !ok { return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context")) @@ -100,7 +100,7 @@ func statObjectBatch(ctx context.Context, k dataloader.Keys) []*dataloader.Resul log.Debug("statObjectBatch") var results []*dataloader.Result - s3Client, ok := ctx.Value("s3Client").(*minio.Client) + s3Client, ok := ctx.Value("s3Client").(s3.S3Service) if !ok { return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context")) @@ -109,7 +109,7 @@ func statObjectBatch(ctx context.Context, k dataloader.Keys) []*dataloader.Resul for _, v := range k { id := v.Raw().(types.ID) log.Debug("S3 'StatObject': ", id) - stat, err := s3Client.StatObject(ctx, id.Bucket, id.Key, minio.GetObjectOptions{}) + stat, err := s3Client.StatObject(ctx, id) results = append(results, &dataloader.Result{ Data: stat, Error: err, diff --git a/internal/loader/loader.go b/internal/loader/loader.go index 38aafc8..281af04 100644 --- a/internal/loader/loader.go +++ b/internal/loader/loader.go @@ -3,14 +3,12 @@ package loader import ( "context" "fmt" - "path/filepath" - "strings" "git.kapelle.org/niklas/s3browser/internal/cache" "git.kapelle.org/niklas/s3browser/internal/helper" + "git.kapelle.org/niklas/s3browser/internal/s3" types "git.kapelle.org/niklas/s3browser/internal/types" "github.com/graph-gophers/dataloader" - "github.com/minio/minio-go/v7" ) type Loader struct { @@ -68,10 +66,8 @@ func (l *Loader) GetFiles(ctx context.Context, path types.ID) ([]types.File, err var files []types.File - for _, obj := range objects.([]minio.ObjectInfo) { - if obj.Err != nil { - return nil, obj.Err - } else if !strings.HasSuffix(obj.Key, "/") { + for _, obj := range objects.([]s3.Object) { + if !obj.ID.IsDirectory() { files = append(files, *helper.ObjInfoToFile(obj, path.Bucket)) } } @@ -86,13 +82,13 @@ func (l *Loader) GetFile(ctx context.Context, id types.ID) (*types.File, error) if err != nil { return nil, err } - objInfo, ok := result.(minio.ObjectInfo) + objInfo, ok := result.(*s3.Object) if !ok { return nil, fmt.Errorf("Failed to stats object") } - return helper.ObjInfoToFile(objInfo, id.Bucket), nil + return helper.ObjInfoToFile(*objInfo, id.Bucket), nil } func (l *Loader) GetDirs(ctx context.Context, path types.ID) ([]types.Directory, error) { @@ -104,20 +100,10 @@ func (l *Loader) GetDirs(ctx context.Context, path types.ID) ([]types.Directory, } var dirs []types.Directory - for _, obj := range result.([]minio.ObjectInfo) { - if obj.Err != nil { - return nil, obj.Err - } else if strings.HasSuffix(obj.Key, "/") { - resultID := types.ID{ - Bucket: path.Bucket, - Key: obj.Key, - } - - resultID.Normalize() - + for _, obj := range result.([]s3.Object) { + if obj.ID.IsDirectory() { dirs = append(dirs, types.Directory{ - ID: resultID, - Name: filepath.Base(obj.Key), + ID: obj.ID, }) } } @@ -134,13 +120,7 @@ func (l *Loader) GetBuckets(ctx context.Context) ([]string, error) { return nil, err } - bucketsInfo := result.([]minio.BucketInfo) - var buckets []string - for _, i := range bucketsInfo { - buckets = append(buckets, i.Name) - } - - return buckets, nil + return result.([]string), nil } func (l *Loader) GetFilesRecursive(ctx context.Context, path types.ID) ([]types.File, error) { @@ -151,7 +131,7 @@ func (l *Loader) GetFilesRecursive(ctx context.Context, path types.ID) ([]types. return nil, err } - objects := result.([]minio.ObjectInfo) + objects := result.([]s3.Object) var files []types.File for _, obj := range objects { diff --git a/internal/s3/minio.go b/internal/s3/minio.go index 0b6fbba..96bf19d 100644 --- a/internal/s3/minio.go +++ b/internal/s3/minio.go @@ -28,44 +28,62 @@ func NewMinio(config types.AppConfig) (S3Service, error) { }, nil } -func (m *minioS3) ListBuckets(ctx context.Context) ([]Bucket, error) { +func (m *minioS3) ListBuckets(ctx context.Context) ([]string, error) { buckets, err := m.client.ListBuckets(ctx) if err != nil { return nil, err } - var rtn []Bucket + var rtn []string for _, v := range buckets { - rtn = append(rtn, Bucket(v.Name)) + rtn = append(rtn, v.Name) } return rtn, nil } -func (m *minioS3) ListObjects(ctx context.Context, id types.ID) ([]types.File, []types.Directory, error) { - var files []types.File - var dirs []types.Directory +func (m *minioS3) ListObjects(ctx context.Context, id types.ID) ([]Object, error) { + var result []Object - for objInfo := range m.client.ListObjects(ctx, id.Bucket, minio.ListObjectsOptions{}) { + for objInfo := range m.client.ListObjects(ctx, id.Bucket, minio.ListObjectsOptions{ + Prefix: id.Key, + Recursive: false, + }) { objId := types.ID{ Bucket: id.Bucket, Key: objInfo.Key, } - objId.Normalize() - - if objId.IsDirectory() { - dirs = append(dirs, *obkInfoToDir(objInfo, objId)) - } else { - files = append(files, *objInfoToFile(objInfo, objId)) - } + result = append(result, Object{ + ID: objId, + Size: objInfo.Size, + }) } - return files, dirs, nil - + return result, nil } -func (m *minioS3) GetObject(ctx context.Context, id types.ID) (Object, error) { +func (m *minioS3) ListObjectsRecursive(ctx context.Context, id types.ID) ([]Object, error) { + var result []Object + + for objInfo := range m.client.ListObjects(ctx, id.Bucket, minio.ListObjectsOptions{ + Prefix: id.Key, + Recursive: true, + }) { + objId := types.ID{ + Bucket: id.Bucket, + Key: objInfo.Key, + } + result = append(result, Object{ + ID: objId, + Size: objInfo.Size, + }) + } + + return result, nil +} + +func (m *minioS3) GetObject(ctx context.Context, id types.ID) (ObjectReader, error) { object, err := m.client.GetObject(ctx, id.Bucket, id.Key, minio.GetObjectOptions{}) if err != nil { @@ -92,35 +110,22 @@ func (m *minioS3) CopyObject(ctx context.Context, src types.ID, dest types.ID) e return err } -func (m *minioS3) StatObject(ctx context.Context, id types.ID) (*types.File, error) { +func (m *minioS3) StatObject(ctx context.Context, id types.ID) (*Object, error) { info, err := m.client.StatObject(ctx, id.Bucket, id.Key, minio.GetObjectOptions{}) if err != nil { return nil, err } - return objInfoToFile(info, id), nil - + return &Object{ + ID: id, + Size: info.Size, + LastModified: info.LastModified, + ContentType: info.ContentType, + ETag: info.ETag, + }, nil } func (m *minioS3) RemoveObject(ctx context.Context, id types.ID) error { return m.client.RemoveObject(ctx, id.Bucket, id.Key, minio.RemoveObjectOptions{}) } - -func objInfoToFile(objInfo minio.ObjectInfo, id types.ID) *types.File { - return &types.File{ - ID: id, - Name: id.Name(), - Size: objInfo.Size, - ContentType: objInfo.ContentType, - ETag: objInfo.ETag, - LastModified: objInfo.LastModified, - } -} - -func obkInfoToDir(objInfo minio.ObjectInfo, id types.ID) *types.Directory { - return &types.Directory{ - ID: id, - Name: id.Name(), - } -} diff --git a/internal/s3/mock.go b/internal/s3/mock.go index f9d79dd..4dc911b 100644 --- a/internal/s3/mock.go +++ b/internal/s3/mock.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "io/ioutil" + "strings" "time" "git.kapelle.org/niklas/s3browser/internal/types" @@ -39,35 +40,41 @@ func NewMockS3(buckets []string) (S3Service, error) { }, nil } -func (m *mockS3) ListBuckets(ctx context.Context) ([]Bucket, error) { - var rtn []Bucket - for _, v := range m.buckets { - rtn = append(rtn, Bucket(v)) - } - - return rtn, nil +func (m *mockS3) ListBuckets(ctx context.Context) ([]string, error) { + return m.buckets, nil } -func (m *mockS3) ListObjects(ctx context.Context, id types.ID) ([]types.File, []types.Directory, error) { - var files []types.File - var dirs []types.Directory +func (m *mockS3) ListObjects(ctx context.Context, id types.ID) ([]Object, error) { + var results []Object for k, v := range m.objects { if k.Bucket == id.Bucket { if k.Parent().Key == id.Key { - if k.IsDirectory() { - dirs = append(dirs, *mockObjToDir(v, k)) - } else { - files = append(files, *mockObjToFile(v, k)) + results = append(results, *mockObjToObject(v, k)) + } + } + } + + return results, nil +} + +func (m *mockS3) ListObjectsRecursive(ctx context.Context, id types.ID) ([]Object, error) { + var results []Object + + for k, v := range m.objects { + if k.Bucket == id.Bucket { + if strings.HasPrefix(k.Key, id.Key) { + if k.Parent().Key == id.Key { + results = append(results, *mockObjToObject(v, k)) } } } } - return files, dirs, nil + return results, nil } -func (m *mockS3) GetObject(ctx context.Context, id types.ID) (Object, error) { +func (m *mockS3) GetObject(ctx context.Context, id types.ID) (ObjectReader, error) { mockObj, exist := m.objects[id] if !exist { @@ -107,14 +114,14 @@ func (m *mockS3) CopyObject(ctx context.Context, src types.ID, dest types.ID) er return nil } -func (m *mockS3) StatObject(ctx context.Context, id types.ID) (*types.File, error) { +func (m *mockS3) StatObject(ctx context.Context, id types.ID) (*Object, error) { mockObj, exist := m.objects[id] if !exist { return nil, fmt.Errorf("Object not found") } - return mockObjToFile(mockObj, id), nil + return mockObjToObject(mockObj, id), nil } func (m *mockS3) RemoveObject(ctx context.Context, id types.ID) error { @@ -122,20 +129,12 @@ func (m *mockS3) RemoveObject(ctx context.Context, id types.ID) error { return nil } -func mockObjToFile(mockObj mockObject, id types.ID) *types.File { - return &types.File{ +func mockObjToObject(mockObj mockObject, id types.ID) *Object { + return &Object{ ID: id, - Name: id.Name(), Size: int64(len(mockObj.content)), ContentType: mockObj.contentType, LastModified: mockObj.lastMod, ETag: fmt.Sprintf("%x", md5.Sum(mockObj.content)), } } - -func mockObjToDir(mockObj mockObject, id types.ID) *types.Directory { - return &types.Directory{ - ID: id, - Name: id.Name(), - } -} diff --git a/internal/s3/s3.go b/internal/s3/s3.go index 7d60287..8dd463d 100644 --- a/internal/s3/s3.go +++ b/internal/s3/s3.go @@ -3,27 +3,35 @@ package s3 import ( "context" "io" + "time" "git.kapelle.org/niklas/s3browser/internal/types" ) -type Bucket string - -type Object interface { +type ObjectReader interface { io.Reader io.Seeker io.ReaderAt io.Closer } -type S3Service interface { - ListBuckets(ctx context.Context) ([]Bucket, error) +type Object struct { + ID types.ID + Size int64 + LastModified time.Time + ContentType string + ETag string +} - GetObject(ctx context.Context, id types.ID) (Object, error) +type S3Service interface { + ListBuckets(ctx context.Context) ([]string, error) + + GetObject(ctx context.Context, id types.ID) (ObjectReader, error) PutObject(ctx context.Context, id types.ID, reader io.Reader, objectSize int64) error - ListObjects(ctx context.Context, id types.ID) ([]types.File, []types.Directory, error) + ListObjects(ctx context.Context, id types.ID) ([]Object, error) + ListObjectsRecursive(ctx context.Context, id types.ID) ([]Object, error) CopyObject(ctx context.Context, src types.ID, dest types.ID) error - StatObject(ctx context.Context, id types.ID) (*types.File, error) + StatObject(ctx context.Context, id types.ID) (*Object, error) RemoveObject(ctx context.Context, id types.ID) error } diff --git a/internal/s3Broswer.go b/internal/s3Broswer.go index adab13c..b9d9c06 100644 --- a/internal/s3Broswer.go +++ b/internal/s3Broswer.go @@ -3,25 +3,16 @@ package s3browser import ( "context" - "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" log "github.com/sirupsen/logrus" "git.kapelle.org/niklas/s3browser/internal/db" gql "git.kapelle.org/niklas/s3browser/internal/gql" httpserver "git.kapelle.org/niklas/s3browser/internal/httpserver" "git.kapelle.org/niklas/s3browser/internal/loader" + "git.kapelle.org/niklas/s3browser/internal/s3" types "git.kapelle.org/niklas/s3browser/internal/types" ) -// setupS3Client connect the s3Client -func setupS3Client(config types.AppConfig) (*minio.Client, error) { - return minio.New(config.S3Endoint, &minio.Options{ - Creds: credentials.NewStaticV4(config.S3AccessKey, config.S3SecretKey, ""), - Secure: config.S3SSL, - }) -} - // Start starts the app func Start(config types.AppConfig) { @@ -30,7 +21,7 @@ func Start(config types.AppConfig) { } log.Info("Starting") - s3Client, err := setupS3Client(config) + s3Client, err := s3.NewMinio(config) if err != nil { log.Error("Failed to setup s3 client: ", err.Error())