added list buckets query
This commit is contained in:
parent
4344bf841c
commit
d2b0364445
@ -227,6 +227,40 @@ func handleLoaderError(k dataloader.Keys, err error) []*dataloader.Result {
|
|||||||
return results
|
return results
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func listBucketsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
||||||
|
log.Debug("listBucketsBatch")
|
||||||
|
var results []*dataloader.Result
|
||||||
|
|
||||||
|
s3Client, ok := c.Value("s3Client").(*minio.Client)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
|
||||||
|
}
|
||||||
|
|
||||||
|
buckets, err := s3Client.ListBuckets(c)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return handleLoaderError(k, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var bucketStrings []string
|
||||||
|
|
||||||
|
for _, v := range buckets {
|
||||||
|
bucketStrings = append(bucketStrings, v.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
result := &dataloader.Result{
|
||||||
|
Data: bucketStrings,
|
||||||
|
Error: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
for range k {
|
||||||
|
results = append(results, result)
|
||||||
|
}
|
||||||
|
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
// createDataloader create all dataloaders and return a map of them plus a cache for objects
|
// createDataloader create all dataloaders and return a map of them plus a cache for objects
|
||||||
func createDataloader(config types.AppConfig) map[string]*dataloader.Loader {
|
func createDataloader(config types.AppConfig) map[string]*dataloader.Loader {
|
||||||
loaderMap := make(map[string]*dataloader.Loader, 0)
|
loaderMap := make(map[string]*dataloader.Loader, 0)
|
||||||
@ -256,5 +290,10 @@ func createDataloader(config types.AppConfig) map[string]*dataloader.Loader {
|
|||||||
dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)),
|
dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
loaderMap["listBuckets"] = dataloader.NewBatchedLoader(
|
||||||
|
listBucketsBatch,
|
||||||
|
dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)),
|
||||||
|
)
|
||||||
|
|
||||||
return loaderMap
|
return loaderMap
|
||||||
}
|
}
|
||||||
|
@ -100,6 +100,21 @@ func GraphqlSchema() (graphql.Schema, error) {
|
|||||||
return auth, nil
|
return auth, nil
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"buckets": &graphql.Field{
|
||||||
|
Name: "buckets",
|
||||||
|
Type: graphql.NewNonNull(graphql.NewList(graphql.String)),
|
||||||
|
Description: "List available buckets",
|
||||||
|
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||||
|
if !helper.IsAuthenticated(p.Context) {
|
||||||
|
return nil, s3errors.ErrNotAuthenticated
|
||||||
|
}
|
||||||
|
|
||||||
|
loader := p.Context.Value("loader").(map[string]*dataloader.Loader)
|
||||||
|
// The only reason we use a dataloader with a empty key is that we want to cache the result
|
||||||
|
thunk := loader["listBuckets"].Load(p.Context, dataloader.StringKey(""))
|
||||||
|
return thunk()
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
mutationFields := graphql.Fields{
|
mutationFields := graphql.Fields{
|
||||||
|
Loading…
Reference in New Issue
Block a user