cleaned up debug log messages

This commit is contained in:
Djeeberjr 2021-11-04 19:41:50 +01:00
parent 13f3217a38
commit 9be7b6c18f
6 changed files with 14 additions and 18 deletions

View File

@ -11,6 +11,7 @@ import (
helper "git.kapelle.org/niklas/s3browser/internal/helper"
"git.kapelle.org/niklas/s3browser/internal/loader"
types "git.kapelle.org/niklas/s3browser/internal/types"
log "github.com/sirupsen/logrus"
)
func deleteMutation(ctx context.Context, id types.ID) error {
@ -20,6 +21,7 @@ func deleteMutation(ctx context.Context, id types.ID) error {
return fmt.Errorf("Failed to get s3Client from context")
}
log.Debug("S3 'RemoveObject': ", id)
// TODO: it is posible to remove multiple objects with a single call.
// Is it better to batch this?
err := s3Client.RemoveObject(ctx, id.Bucket, id.Key, minio.RemoveObjectOptions{})
@ -47,6 +49,7 @@ func copyMutation(ctx context.Context, src, dest types.ID) (*types.File, error)
dest.Key += helper.GetFilenameFromKey(src.Key)
}
log.Debug("S3 'CopyObject': ", src, "-->", dest)
info, err := s3Client.CopyObject(ctx, minio.CopyDestOptions{
Bucket: dest.Bucket,
Object: dest.Key,
@ -103,6 +106,7 @@ func moveDirMutation(ctx context.Context, src, dest types.ID) ([]*types.File, er
}
newID.Normalize()
log.Debug("S3 'CopyObject': ", src, "-->", dest)
_, err := s3Client.CopyObject(ctx, minio.CopyDestOptions{
Bucket: dest.Bucket,
Object: newID.Key,
@ -139,6 +143,7 @@ func moveFileMutation(ctx context.Context, src, dest types.ID) (*types.File, err
dest.Key += helper.GetFilenameFromKey(src.Key)
}
log.Debug("S3 'CopyObject': ", src, "-->", dest)
// There is no (spoon) move. Only copy and delete
info, err := s3Client.CopyObject(ctx, minio.CopyDestOptions{
Bucket: dest.Bucket,
@ -180,6 +185,7 @@ func createDirectory(ctx context.Context, id types.ID) (*types.Directory, error)
return nil, fmt.Errorf("Failed to get s3Client from context")
}
log.Debug("S3 'PutObject': ", id)
info, err := s3Client.PutObject(ctx, id.Bucket, id.Key, strings.NewReader(""), 0, minio.PutObjectOptions{
ContentType: "application/x-directory",
})
@ -241,6 +247,7 @@ func deleteDirectory(ctx context.Context, id types.ID) error {
// This is at least the behavior when working with minio as s3 backend
// TODO: check if this is normal behavior when working with s3
if len(files) == 0 {
log.Debug("S3 'RemoveObject': ", id)
err := s3Client.RemoveObject(ctx, id.Bucket, id.Key, minio.RemoveObjectOptions{})
if err != nil {
return err

View File

@ -17,7 +17,6 @@ func GetFilenameFromKey(id string) string {
}
func DeleteMultiple(ctx context.Context, s3Client minio.Client, bucket string, keys []string) error {
log.Debug("Remove multiple objects")
objectsCh := make(chan minio.ObjectInfo, 1)
go func() {
@ -29,6 +28,7 @@ func DeleteMultiple(ctx context.Context, s3Client minio.Client, bucket string, k
}
}()
log.Debug("S3 'RemoveObject': ", keys)
for err := range s3Client.RemoveObjects(ctx, bucket, objectsCh, minio.RemoveObjectsOptions{}) {
log.Error("Failed to delete object ", err.ObjectName, " because: ", err.Err.Error())
// TODO: error handel

View File

@ -123,7 +123,7 @@ func httpGetFile(ctx context.Context, rw http.ResponseWriter, r *http.Request) {
return
}
log.Debug("S3 call 'StatObject': ", id)
log.Debug("S3 'StatObject': ", id)
objInfo, err := s3Client.StatObject(context.Background(), id.Bucket, id.Key, minio.GetObjectOptions{})
if err != nil {
@ -138,7 +138,7 @@ func httpGetFile(ctx context.Context, rw http.ResponseWriter, r *http.Request) {
return
}
log.Debug("S3 call 'GetObject': ", id)
log.Debug("S3 'GetObject': ", id)
obj, err := s3Client.GetObject(context.Background(), id.Bucket, id.Key, minio.GetObjectOptions{})
if err != nil {
@ -180,12 +180,10 @@ func httpPostFile(ctx context.Context, rw http.ResponseWriter, r *http.Request)
id.Normalize()
log.Debug("Upload file: ", id)
contentType := r.Header.Get("Content-Type")
mimeType, _, _ := mime.ParseMediaType(contentType)
log.Debug("S3 call 'PutObject': ", id)
log.Debug("S3 'PutObject': ", id)
_, err := s3Client.PutObject(context.Background(), id.Bucket, id.Key, r.Body, r.ContentLength, minio.PutObjectOptions{
ContentType: mimeType,
})

View File

@ -12,7 +12,6 @@ import (
// listObjectsBatch batch func for calling s3.ListObjects()
func listObjectsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
log.Debug("listObjectsBatch: ", k.Keys())
var results []*dataloader.Result
s3Client, ok := c.Value("s3Client").(*minio.Client)
@ -34,7 +33,6 @@ func listObjectsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result
// listObjectsRecursiveBatch just like listObjectsBatch but with recursive set to true
func listObjectsRecursiveBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
log.Debug("listObjectsRecursiveBatch: ", k.Keys())
var results []*dataloader.Result
s3Client, ok := c.Value("s3Client").(*minio.Client)
@ -56,7 +54,7 @@ func listObjectsRecursiveBatch(c context.Context, k dataloader.Keys) []*dataload
// listObjects helper func for listObjectsBatch
func listObjects(s3Client *minio.Client, id types.ID, recursive bool) []minio.ObjectInfo {
log.Debug("S3 call 'ListObjects': ", id)
log.Debug("S3 'ListObjects': ", id)
objectCh := s3Client.ListObjects(context.Background(), id.Bucket, minio.ListObjectsOptions{
Prefix: id.Key,
Recursive: recursive,
@ -71,7 +69,6 @@ func listObjects(s3Client *minio.Client, id types.ID, recursive bool) []minio.Ob
}
func listBucketsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
log.Debug("listBucketsBatch")
var results []*dataloader.Result
s3Client, ok := c.Value("s3Client").(*minio.Client)
@ -80,6 +77,7 @@ func listBucketsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
}
log.Debug("S3 'ListBuckets'")
buckets, err := s3Client.ListBuckets(c)
if err != nil {
@ -110,6 +108,7 @@ func statObjectBatch(ctx context.Context, k dataloader.Keys) []*dataloader.Resul
for _, v := range k {
id := v.Raw().(types.ID)
log.Debug("S3 'StatObject': ", id)
stat, err := s3Client.StatObject(ctx, id.Bucket, id.Key, minio.GetObjectOptions{})
results = append(results, &dataloader.Result{
Data: stat,

View File

@ -10,7 +10,6 @@ import (
types "git.kapelle.org/niklas/s3browser/internal/types"
"github.com/graph-gophers/dataloader"
"github.com/minio/minio-go/v7"
log "github.com/sirupsen/logrus"
)
type Loader struct {
@ -145,7 +144,6 @@ func (l *Loader) GetFilesRecursive(ctx context.Context, path types.ID) ([]types.
}
func (l *Loader) InvalidateCacheForFile(ctx context.Context, id types.ID) {
log.Debug("Clear cache for file: ", id.String())
parent := id.Parent()
l.statObjectLoader.Clear(ctx, id)
@ -153,7 +151,6 @@ func (l *Loader) InvalidateCacheForFile(ctx context.Context, id types.ID) {
}
func (l *Loader) InvalidateCacheForDir(ctx context.Context, path types.ID) {
log.Debug("Clear cache for dir: ", path.String())
parent := helper.GetParentDir(path)
l.listObjectsLoader.Clear(ctx, path).Clear(ctx, parent)

View File

@ -47,24 +47,20 @@ func Start(config types.AppConfig) {
}
log.Info("Starting")
log.Debug("Setting up s3 client")
s3Client, err := setupS3Client(config)
if err != nil {
log.Error("Failed to setup s3 client: ", err.Error())
return
}
log.Info("s3 client connected")
dbStore, err := db.NewDB("mysql", "s3Browser:hunter2@/s3Browser")
if err != nil {
log.Error("Failed to connect DB: ", err.Error())
}
log.Debug("Creating dataloader")
loader := loader.NewLoader(config)
log.Debug("Generating graphq schema")
gql.GraphqlTypes()
schema, err := gql.GraphqlSchema()
@ -77,7 +73,6 @@ func Start(config types.AppConfig) {
resolveContext = context.WithValue(resolveContext, "loader", loader)
resolveContext = context.WithValue(resolveContext, "dbStore", dbStore)
log.Debug("Starting HTTP server")
err = httpserver.InitHttp(resolveContext, schema, config.Address)
if err != nil {