Compare commits
64 Commits
926701125d
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| 125ce9c955 | |||
| 131f19deed | |||
| 2ac552e840 | |||
| 48c50a5b7e | |||
| 48f770f703 | |||
| 2ae14cdfd4 | |||
| a10593a318 | |||
| 0971301562 | |||
| 47befe6db1 | |||
| 979ebee677 | |||
| 8d85d645d6 | |||
| 60817c2249 | |||
| bead881af2 | |||
| 80302b62f4 | |||
| 2353a0bf53 | |||
| c5ab0156fd | |||
| 686630b2df | |||
| 8725def3a1 | |||
| 9be7b6c18f | |||
| 13f3217a38 | |||
| aa82cd938c | |||
| aac1ca8891 | |||
| 2268b518b1 | |||
| 8c6f59a6b4 | |||
| 439e5473b6 | |||
| d2b0364445 | |||
| 4344bf841c | |||
| f2a8e0197d | |||
| 74037dfab5 | |||
| 91e217e472 | |||
| f42d12ffd9 | |||
| 87dc249371 | |||
| a060ed0fe4 | |||
| a35ce7c24b | |||
| 9cfc4da5f3 | |||
| f59f3183f2 | |||
| ed932e3c92 | |||
| a3e66cd351 | |||
| 1337f6ba63 | |||
| 024948b8ff | |||
| 20f95b8ea4 | |||
| 9f280caafc | |||
| af488ab5fe | |||
| 28ff9006df | |||
| 426531e634 | |||
| 2b25003344 | |||
| 4c4ef95ee7 | |||
| 0daa71dc72 | |||
| 5cca5cdd98 | |||
| dff54e2650 | |||
| 9616450cff | |||
| 63b93f5895 | |||
| 8296b21883 | |||
| 1d5dbfaa1c | |||
| 43efc9017c | |||
| c84f94372e | |||
| 65739c9307 | |||
| c20748aef1 | |||
| 6e323a0e4f | |||
| 3c8ece72cc | |||
| 72901e4685 | |||
| 33237b6564 | |||
| 447a0647b2 | |||
| 7253a9195d |
8
.env
Normal file
8
.env
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
# Env file used for developing
|
||||||
|
S3_ENDPOINT=localhost:9000
|
||||||
|
S3_ACCESS_KEY=testo
|
||||||
|
S3_SECRET_KEY=testotesto
|
||||||
|
S3_DISABLE_SSL=true
|
||||||
|
ADDRESS=:8080
|
||||||
|
VERBOSE=true
|
||||||
|
DB_CONNECTION=s3Browser:hunter2@/s3Browser
|
||||||
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
/internal/static
|
||||||
674
LICENSE
Normal file
674
LICENSE
Normal file
@@ -0,0 +1,674 @@
|
|||||||
|
GNU GENERAL PUBLIC LICENSE
|
||||||
|
Version 3, 29 June 2007
|
||||||
|
|
||||||
|
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies
|
||||||
|
of this license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
Preamble
|
||||||
|
|
||||||
|
The GNU General Public License is a free, copyleft license for
|
||||||
|
software and other kinds of works.
|
||||||
|
|
||||||
|
The licenses for most software and other practical works are designed
|
||||||
|
to take away your freedom to share and change the works. By contrast,
|
||||||
|
the GNU General Public License is intended to guarantee your freedom to
|
||||||
|
share and change all versions of a program--to make sure it remains free
|
||||||
|
software for all its users. We, the Free Software Foundation, use the
|
||||||
|
GNU General Public License for most of our software; it applies also to
|
||||||
|
any other work released this way by its authors. You can apply it to
|
||||||
|
your programs, too.
|
||||||
|
|
||||||
|
When we speak of free software, we are referring to freedom, not
|
||||||
|
price. Our General Public Licenses are designed to make sure that you
|
||||||
|
have the freedom to distribute copies of free software (and charge for
|
||||||
|
them if you wish), that you receive source code or can get it if you
|
||||||
|
want it, that you can change the software or use pieces of it in new
|
||||||
|
free programs, and that you know you can do these things.
|
||||||
|
|
||||||
|
To protect your rights, we need to prevent others from denying you
|
||||||
|
these rights or asking you to surrender the rights. Therefore, you have
|
||||||
|
certain responsibilities if you distribute copies of the software, or if
|
||||||
|
you modify it: responsibilities to respect the freedom of others.
|
||||||
|
|
||||||
|
For example, if you distribute copies of such a program, whether
|
||||||
|
gratis or for a fee, you must pass on to the recipients the same
|
||||||
|
freedoms that you received. You must make sure that they, too, receive
|
||||||
|
or can get the source code. And you must show them these terms so they
|
||||||
|
know their rights.
|
||||||
|
|
||||||
|
Developers that use the GNU GPL protect your rights with two steps:
|
||||||
|
(1) assert copyright on the software, and (2) offer you this License
|
||||||
|
giving you legal permission to copy, distribute and/or modify it.
|
||||||
|
|
||||||
|
For the developers' and authors' protection, the GPL clearly explains
|
||||||
|
that there is no warranty for this free software. For both users' and
|
||||||
|
authors' sake, the GPL requires that modified versions be marked as
|
||||||
|
changed, so that their problems will not be attributed erroneously to
|
||||||
|
authors of previous versions.
|
||||||
|
|
||||||
|
Some devices are designed to deny users access to install or run
|
||||||
|
modified versions of the software inside them, although the manufacturer
|
||||||
|
can do so. This is fundamentally incompatible with the aim of
|
||||||
|
protecting users' freedom to change the software. The systematic
|
||||||
|
pattern of such abuse occurs in the area of products for individuals to
|
||||||
|
use, which is precisely where it is most unacceptable. Therefore, we
|
||||||
|
have designed this version of the GPL to prohibit the practice for those
|
||||||
|
products. If such problems arise substantially in other domains, we
|
||||||
|
stand ready to extend this provision to those domains in future versions
|
||||||
|
of the GPL, as needed to protect the freedom of users.
|
||||||
|
|
||||||
|
Finally, every program is threatened constantly by software patents.
|
||||||
|
States should not allow patents to restrict development and use of
|
||||||
|
software on general-purpose computers, but in those that do, we wish to
|
||||||
|
avoid the special danger that patents applied to a free program could
|
||||||
|
make it effectively proprietary. To prevent this, the GPL assures that
|
||||||
|
patents cannot be used to render the program non-free.
|
||||||
|
|
||||||
|
The precise terms and conditions for copying, distribution and
|
||||||
|
modification follow.
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
0. Definitions.
|
||||||
|
|
||||||
|
"This License" refers to version 3 of the GNU General Public License.
|
||||||
|
|
||||||
|
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||||
|
works, such as semiconductor masks.
|
||||||
|
|
||||||
|
"The Program" refers to any copyrightable work licensed under this
|
||||||
|
License. Each licensee is addressed as "you". "Licensees" and
|
||||||
|
"recipients" may be individuals or organizations.
|
||||||
|
|
||||||
|
To "modify" a work means to copy from or adapt all or part of the work
|
||||||
|
in a fashion requiring copyright permission, other than the making of an
|
||||||
|
exact copy. The resulting work is called a "modified version" of the
|
||||||
|
earlier work or a work "based on" the earlier work.
|
||||||
|
|
||||||
|
A "covered work" means either the unmodified Program or a work based
|
||||||
|
on the Program.
|
||||||
|
|
||||||
|
To "propagate" a work means to do anything with it that, without
|
||||||
|
permission, would make you directly or secondarily liable for
|
||||||
|
infringement under applicable copyright law, except executing it on a
|
||||||
|
computer or modifying a private copy. Propagation includes copying,
|
||||||
|
distribution (with or without modification), making available to the
|
||||||
|
public, and in some countries other activities as well.
|
||||||
|
|
||||||
|
To "convey" a work means any kind of propagation that enables other
|
||||||
|
parties to make or receive copies. Mere interaction with a user through
|
||||||
|
a computer network, with no transfer of a copy, is not conveying.
|
||||||
|
|
||||||
|
An interactive user interface displays "Appropriate Legal Notices"
|
||||||
|
to the extent that it includes a convenient and prominently visible
|
||||||
|
feature that (1) displays an appropriate copyright notice, and (2)
|
||||||
|
tells the user that there is no warranty for the work (except to the
|
||||||
|
extent that warranties are provided), that licensees may convey the
|
||||||
|
work under this License, and how to view a copy of this License. If
|
||||||
|
the interface presents a list of user commands or options, such as a
|
||||||
|
menu, a prominent item in the list meets this criterion.
|
||||||
|
|
||||||
|
1. Source Code.
|
||||||
|
|
||||||
|
The "source code" for a work means the preferred form of the work
|
||||||
|
for making modifications to it. "Object code" means any non-source
|
||||||
|
form of a work.
|
||||||
|
|
||||||
|
A "Standard Interface" means an interface that either is an official
|
||||||
|
standard defined by a recognized standards body, or, in the case of
|
||||||
|
interfaces specified for a particular programming language, one that
|
||||||
|
is widely used among developers working in that language.
|
||||||
|
|
||||||
|
The "System Libraries" of an executable work include anything, other
|
||||||
|
than the work as a whole, that (a) is included in the normal form of
|
||||||
|
packaging a Major Component, but which is not part of that Major
|
||||||
|
Component, and (b) serves only to enable use of the work with that
|
||||||
|
Major Component, or to implement a Standard Interface for which an
|
||||||
|
implementation is available to the public in source code form. A
|
||||||
|
"Major Component", in this context, means a major essential component
|
||||||
|
(kernel, window system, and so on) of the specific operating system
|
||||||
|
(if any) on which the executable work runs, or a compiler used to
|
||||||
|
produce the work, or an object code interpreter used to run it.
|
||||||
|
|
||||||
|
The "Corresponding Source" for a work in object code form means all
|
||||||
|
the source code needed to generate, install, and (for an executable
|
||||||
|
work) run the object code and to modify the work, including scripts to
|
||||||
|
control those activities. However, it does not include the work's
|
||||||
|
System Libraries, or general-purpose tools or generally available free
|
||||||
|
programs which are used unmodified in performing those activities but
|
||||||
|
which are not part of the work. For example, Corresponding Source
|
||||||
|
includes interface definition files associated with source files for
|
||||||
|
the work, and the source code for shared libraries and dynamically
|
||||||
|
linked subprograms that the work is specifically designed to require,
|
||||||
|
such as by intimate data communication or control flow between those
|
||||||
|
subprograms and other parts of the work.
|
||||||
|
|
||||||
|
The Corresponding Source need not include anything that users
|
||||||
|
can regenerate automatically from other parts of the Corresponding
|
||||||
|
Source.
|
||||||
|
|
||||||
|
The Corresponding Source for a work in source code form is that
|
||||||
|
same work.
|
||||||
|
|
||||||
|
2. Basic Permissions.
|
||||||
|
|
||||||
|
All rights granted under this License are granted for the term of
|
||||||
|
copyright on the Program, and are irrevocable provided the stated
|
||||||
|
conditions are met. This License explicitly affirms your unlimited
|
||||||
|
permission to run the unmodified Program. The output from running a
|
||||||
|
covered work is covered by this License only if the output, given its
|
||||||
|
content, constitutes a covered work. This License acknowledges your
|
||||||
|
rights of fair use or other equivalent, as provided by copyright law.
|
||||||
|
|
||||||
|
You may make, run and propagate covered works that you do not
|
||||||
|
convey, without conditions so long as your license otherwise remains
|
||||||
|
in force. You may convey covered works to others for the sole purpose
|
||||||
|
of having them make modifications exclusively for you, or provide you
|
||||||
|
with facilities for running those works, provided that you comply with
|
||||||
|
the terms of this License in conveying all material for which you do
|
||||||
|
not control copyright. Those thus making or running the covered works
|
||||||
|
for you must do so exclusively on your behalf, under your direction
|
||||||
|
and control, on terms that prohibit them from making any copies of
|
||||||
|
your copyrighted material outside their relationship with you.
|
||||||
|
|
||||||
|
Conveying under any other circumstances is permitted solely under
|
||||||
|
the conditions stated below. Sublicensing is not allowed; section 10
|
||||||
|
makes it unnecessary.
|
||||||
|
|
||||||
|
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||||
|
|
||||||
|
No covered work shall be deemed part of an effective technological
|
||||||
|
measure under any applicable law fulfilling obligations under article
|
||||||
|
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||||
|
similar laws prohibiting or restricting circumvention of such
|
||||||
|
measures.
|
||||||
|
|
||||||
|
When you convey a covered work, you waive any legal power to forbid
|
||||||
|
circumvention of technological measures to the extent such circumvention
|
||||||
|
is effected by exercising rights under this License with respect to
|
||||||
|
the covered work, and you disclaim any intention to limit operation or
|
||||||
|
modification of the work as a means of enforcing, against the work's
|
||||||
|
users, your or third parties' legal rights to forbid circumvention of
|
||||||
|
technological measures.
|
||||||
|
|
||||||
|
4. Conveying Verbatim Copies.
|
||||||
|
|
||||||
|
You may convey verbatim copies of the Program's source code as you
|
||||||
|
receive it, in any medium, provided that you conspicuously and
|
||||||
|
appropriately publish on each copy an appropriate copyright notice;
|
||||||
|
keep intact all notices stating that this License and any
|
||||||
|
non-permissive terms added in accord with section 7 apply to the code;
|
||||||
|
keep intact all notices of the absence of any warranty; and give all
|
||||||
|
recipients a copy of this License along with the Program.
|
||||||
|
|
||||||
|
You may charge any price or no price for each copy that you convey,
|
||||||
|
and you may offer support or warranty protection for a fee.
|
||||||
|
|
||||||
|
5. Conveying Modified Source Versions.
|
||||||
|
|
||||||
|
You may convey a work based on the Program, or the modifications to
|
||||||
|
produce it from the Program, in the form of source code under the
|
||||||
|
terms of section 4, provided that you also meet all of these conditions:
|
||||||
|
|
||||||
|
a) The work must carry prominent notices stating that you modified
|
||||||
|
it, and giving a relevant date.
|
||||||
|
|
||||||
|
b) The work must carry prominent notices stating that it is
|
||||||
|
released under this License and any conditions added under section
|
||||||
|
7. This requirement modifies the requirement in section 4 to
|
||||||
|
"keep intact all notices".
|
||||||
|
|
||||||
|
c) You must license the entire work, as a whole, under this
|
||||||
|
License to anyone who comes into possession of a copy. This
|
||||||
|
License will therefore apply, along with any applicable section 7
|
||||||
|
additional terms, to the whole of the work, and all its parts,
|
||||||
|
regardless of how they are packaged. This License gives no
|
||||||
|
permission to license the work in any other way, but it does not
|
||||||
|
invalidate such permission if you have separately received it.
|
||||||
|
|
||||||
|
d) If the work has interactive user interfaces, each must display
|
||||||
|
Appropriate Legal Notices; however, if the Program has interactive
|
||||||
|
interfaces that do not display Appropriate Legal Notices, your
|
||||||
|
work need not make them do so.
|
||||||
|
|
||||||
|
A compilation of a covered work with other separate and independent
|
||||||
|
works, which are not by their nature extensions of the covered work,
|
||||||
|
and which are not combined with it such as to form a larger program,
|
||||||
|
in or on a volume of a storage or distribution medium, is called an
|
||||||
|
"aggregate" if the compilation and its resulting copyright are not
|
||||||
|
used to limit the access or legal rights of the compilation's users
|
||||||
|
beyond what the individual works permit. Inclusion of a covered work
|
||||||
|
in an aggregate does not cause this License to apply to the other
|
||||||
|
parts of the aggregate.
|
||||||
|
|
||||||
|
6. Conveying Non-Source Forms.
|
||||||
|
|
||||||
|
You may convey a covered work in object code form under the terms
|
||||||
|
of sections 4 and 5, provided that you also convey the
|
||||||
|
machine-readable Corresponding Source under the terms of this License,
|
||||||
|
in one of these ways:
|
||||||
|
|
||||||
|
a) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by the
|
||||||
|
Corresponding Source fixed on a durable physical medium
|
||||||
|
customarily used for software interchange.
|
||||||
|
|
||||||
|
b) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by a
|
||||||
|
written offer, valid for at least three years and valid for as
|
||||||
|
long as you offer spare parts or customer support for that product
|
||||||
|
model, to give anyone who possesses the object code either (1) a
|
||||||
|
copy of the Corresponding Source for all the software in the
|
||||||
|
product that is covered by this License, on a durable physical
|
||||||
|
medium customarily used for software interchange, for a price no
|
||||||
|
more than your reasonable cost of physically performing this
|
||||||
|
conveying of source, or (2) access to copy the
|
||||||
|
Corresponding Source from a network server at no charge.
|
||||||
|
|
||||||
|
c) Convey individual copies of the object code with a copy of the
|
||||||
|
written offer to provide the Corresponding Source. This
|
||||||
|
alternative is allowed only occasionally and noncommercially, and
|
||||||
|
only if you received the object code with such an offer, in accord
|
||||||
|
with subsection 6b.
|
||||||
|
|
||||||
|
d) Convey the object code by offering access from a designated
|
||||||
|
place (gratis or for a charge), and offer equivalent access to the
|
||||||
|
Corresponding Source in the same way through the same place at no
|
||||||
|
further charge. You need not require recipients to copy the
|
||||||
|
Corresponding Source along with the object code. If the place to
|
||||||
|
copy the object code is a network server, the Corresponding Source
|
||||||
|
may be on a different server (operated by you or a third party)
|
||||||
|
that supports equivalent copying facilities, provided you maintain
|
||||||
|
clear directions next to the object code saying where to find the
|
||||||
|
Corresponding Source. Regardless of what server hosts the
|
||||||
|
Corresponding Source, you remain obligated to ensure that it is
|
||||||
|
available for as long as needed to satisfy these requirements.
|
||||||
|
|
||||||
|
e) Convey the object code using peer-to-peer transmission, provided
|
||||||
|
you inform other peers where the object code and Corresponding
|
||||||
|
Source of the work are being offered to the general public at no
|
||||||
|
charge under subsection 6d.
|
||||||
|
|
||||||
|
A separable portion of the object code, whose source code is excluded
|
||||||
|
from the Corresponding Source as a System Library, need not be
|
||||||
|
included in conveying the object code work.
|
||||||
|
|
||||||
|
A "User Product" is either (1) a "consumer product", which means any
|
||||||
|
tangible personal property which is normally used for personal, family,
|
||||||
|
or household purposes, or (2) anything designed or sold for incorporation
|
||||||
|
into a dwelling. In determining whether a product is a consumer product,
|
||||||
|
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||||
|
product received by a particular user, "normally used" refers to a
|
||||||
|
typical or common use of that class of product, regardless of the status
|
||||||
|
of the particular user or of the way in which the particular user
|
||||||
|
actually uses, or expects or is expected to use, the product. A product
|
||||||
|
is a consumer product regardless of whether the product has substantial
|
||||||
|
commercial, industrial or non-consumer uses, unless such uses represent
|
||||||
|
the only significant mode of use of the product.
|
||||||
|
|
||||||
|
"Installation Information" for a User Product means any methods,
|
||||||
|
procedures, authorization keys, or other information required to install
|
||||||
|
and execute modified versions of a covered work in that User Product from
|
||||||
|
a modified version of its Corresponding Source. The information must
|
||||||
|
suffice to ensure that the continued functioning of the modified object
|
||||||
|
code is in no case prevented or interfered with solely because
|
||||||
|
modification has been made.
|
||||||
|
|
||||||
|
If you convey an object code work under this section in, or with, or
|
||||||
|
specifically for use in, a User Product, and the conveying occurs as
|
||||||
|
part of a transaction in which the right of possession and use of the
|
||||||
|
User Product is transferred to the recipient in perpetuity or for a
|
||||||
|
fixed term (regardless of how the transaction is characterized), the
|
||||||
|
Corresponding Source conveyed under this section must be accompanied
|
||||||
|
by the Installation Information. But this requirement does not apply
|
||||||
|
if neither you nor any third party retains the ability to install
|
||||||
|
modified object code on the User Product (for example, the work has
|
||||||
|
been installed in ROM).
|
||||||
|
|
||||||
|
The requirement to provide Installation Information does not include a
|
||||||
|
requirement to continue to provide support service, warranty, or updates
|
||||||
|
for a work that has been modified or installed by the recipient, or for
|
||||||
|
the User Product in which it has been modified or installed. Access to a
|
||||||
|
network may be denied when the modification itself materially and
|
||||||
|
adversely affects the operation of the network or violates the rules and
|
||||||
|
protocols for communication across the network.
|
||||||
|
|
||||||
|
Corresponding Source conveyed, and Installation Information provided,
|
||||||
|
in accord with this section must be in a format that is publicly
|
||||||
|
documented (and with an implementation available to the public in
|
||||||
|
source code form), and must require no special password or key for
|
||||||
|
unpacking, reading or copying.
|
||||||
|
|
||||||
|
7. Additional Terms.
|
||||||
|
|
||||||
|
"Additional permissions" are terms that supplement the terms of this
|
||||||
|
License by making exceptions from one or more of its conditions.
|
||||||
|
Additional permissions that are applicable to the entire Program shall
|
||||||
|
be treated as though they were included in this License, to the extent
|
||||||
|
that they are valid under applicable law. If additional permissions
|
||||||
|
apply only to part of the Program, that part may be used separately
|
||||||
|
under those permissions, but the entire Program remains governed by
|
||||||
|
this License without regard to the additional permissions.
|
||||||
|
|
||||||
|
When you convey a copy of a covered work, you may at your option
|
||||||
|
remove any additional permissions from that copy, or from any part of
|
||||||
|
it. (Additional permissions may be written to require their own
|
||||||
|
removal in certain cases when you modify the work.) You may place
|
||||||
|
additional permissions on material, added by you to a covered work,
|
||||||
|
for which you have or can give appropriate copyright permission.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, for material you
|
||||||
|
add to a covered work, you may (if authorized by the copyright holders of
|
||||||
|
that material) supplement the terms of this License with terms:
|
||||||
|
|
||||||
|
a) Disclaiming warranty or limiting liability differently from the
|
||||||
|
terms of sections 15 and 16 of this License; or
|
||||||
|
|
||||||
|
b) Requiring preservation of specified reasonable legal notices or
|
||||||
|
author attributions in that material or in the Appropriate Legal
|
||||||
|
Notices displayed by works containing it; or
|
||||||
|
|
||||||
|
c) Prohibiting misrepresentation of the origin of that material, or
|
||||||
|
requiring that modified versions of such material be marked in
|
||||||
|
reasonable ways as different from the original version; or
|
||||||
|
|
||||||
|
d) Limiting the use for publicity purposes of names of licensors or
|
||||||
|
authors of the material; or
|
||||||
|
|
||||||
|
e) Declining to grant rights under trademark law for use of some
|
||||||
|
trade names, trademarks, or service marks; or
|
||||||
|
|
||||||
|
f) Requiring indemnification of licensors and authors of that
|
||||||
|
material by anyone who conveys the material (or modified versions of
|
||||||
|
it) with contractual assumptions of liability to the recipient, for
|
||||||
|
any liability that these contractual assumptions directly impose on
|
||||||
|
those licensors and authors.
|
||||||
|
|
||||||
|
All other non-permissive additional terms are considered "further
|
||||||
|
restrictions" within the meaning of section 10. If the Program as you
|
||||||
|
received it, or any part of it, contains a notice stating that it is
|
||||||
|
governed by this License along with a term that is a further
|
||||||
|
restriction, you may remove that term. If a license document contains
|
||||||
|
a further restriction but permits relicensing or conveying under this
|
||||||
|
License, you may add to a covered work material governed by the terms
|
||||||
|
of that license document, provided that the further restriction does
|
||||||
|
not survive such relicensing or conveying.
|
||||||
|
|
||||||
|
If you add terms to a covered work in accord with this section, you
|
||||||
|
must place, in the relevant source files, a statement of the
|
||||||
|
additional terms that apply to those files, or a notice indicating
|
||||||
|
where to find the applicable terms.
|
||||||
|
|
||||||
|
Additional terms, permissive or non-permissive, may be stated in the
|
||||||
|
form of a separately written license, or stated as exceptions;
|
||||||
|
the above requirements apply either way.
|
||||||
|
|
||||||
|
8. Termination.
|
||||||
|
|
||||||
|
You may not propagate or modify a covered work except as expressly
|
||||||
|
provided under this License. Any attempt otherwise to propagate or
|
||||||
|
modify it is void, and will automatically terminate your rights under
|
||||||
|
this License (including any patent licenses granted under the third
|
||||||
|
paragraph of section 11).
|
||||||
|
|
||||||
|
However, if you cease all violation of this License, then your
|
||||||
|
license from a particular copyright holder is reinstated (a)
|
||||||
|
provisionally, unless and until the copyright holder explicitly and
|
||||||
|
finally terminates your license, and (b) permanently, if the copyright
|
||||||
|
holder fails to notify you of the violation by some reasonable means
|
||||||
|
prior to 60 days after the cessation.
|
||||||
|
|
||||||
|
Moreover, your license from a particular copyright holder is
|
||||||
|
reinstated permanently if the copyright holder notifies you of the
|
||||||
|
violation by some reasonable means, this is the first time you have
|
||||||
|
received notice of violation of this License (for any work) from that
|
||||||
|
copyright holder, and you cure the violation prior to 30 days after
|
||||||
|
your receipt of the notice.
|
||||||
|
|
||||||
|
Termination of your rights under this section does not terminate the
|
||||||
|
licenses of parties who have received copies or rights from you under
|
||||||
|
this License. If your rights have been terminated and not permanently
|
||||||
|
reinstated, you do not qualify to receive new licenses for the same
|
||||||
|
material under section 10.
|
||||||
|
|
||||||
|
9. Acceptance Not Required for Having Copies.
|
||||||
|
|
||||||
|
You are not required to accept this License in order to receive or
|
||||||
|
run a copy of the Program. Ancillary propagation of a covered work
|
||||||
|
occurring solely as a consequence of using peer-to-peer transmission
|
||||||
|
to receive a copy likewise does not require acceptance. However,
|
||||||
|
nothing other than this License grants you permission to propagate or
|
||||||
|
modify any covered work. These actions infringe copyright if you do
|
||||||
|
not accept this License. Therefore, by modifying or propagating a
|
||||||
|
covered work, you indicate your acceptance of this License to do so.
|
||||||
|
|
||||||
|
10. Automatic Licensing of Downstream Recipients.
|
||||||
|
|
||||||
|
Each time you convey a covered work, the recipient automatically
|
||||||
|
receives a license from the original licensors, to run, modify and
|
||||||
|
propagate that work, subject to this License. You are not responsible
|
||||||
|
for enforcing compliance by third parties with this License.
|
||||||
|
|
||||||
|
An "entity transaction" is a transaction transferring control of an
|
||||||
|
organization, or substantially all assets of one, or subdividing an
|
||||||
|
organization, or merging organizations. If propagation of a covered
|
||||||
|
work results from an entity transaction, each party to that
|
||||||
|
transaction who receives a copy of the work also receives whatever
|
||||||
|
licenses to the work the party's predecessor in interest had or could
|
||||||
|
give under the previous paragraph, plus a right to possession of the
|
||||||
|
Corresponding Source of the work from the predecessor in interest, if
|
||||||
|
the predecessor has it or can get it with reasonable efforts.
|
||||||
|
|
||||||
|
You may not impose any further restrictions on the exercise of the
|
||||||
|
rights granted or affirmed under this License. For example, you may
|
||||||
|
not impose a license fee, royalty, or other charge for exercise of
|
||||||
|
rights granted under this License, and you may not initiate litigation
|
||||||
|
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||||
|
any patent claim is infringed by making, using, selling, offering for
|
||||||
|
sale, or importing the Program or any portion of it.
|
||||||
|
|
||||||
|
11. Patents.
|
||||||
|
|
||||||
|
A "contributor" is a copyright holder who authorizes use under this
|
||||||
|
License of the Program or a work on which the Program is based. The
|
||||||
|
work thus licensed is called the contributor's "contributor version".
|
||||||
|
|
||||||
|
A contributor's "essential patent claims" are all patent claims
|
||||||
|
owned or controlled by the contributor, whether already acquired or
|
||||||
|
hereafter acquired, that would be infringed by some manner, permitted
|
||||||
|
by this License, of making, using, or selling its contributor version,
|
||||||
|
but do not include claims that would be infringed only as a
|
||||||
|
consequence of further modification of the contributor version. For
|
||||||
|
purposes of this definition, "control" includes the right to grant
|
||||||
|
patent sublicenses in a manner consistent with the requirements of
|
||||||
|
this License.
|
||||||
|
|
||||||
|
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||||
|
patent license under the contributor's essential patent claims, to
|
||||||
|
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||||
|
propagate the contents of its contributor version.
|
||||||
|
|
||||||
|
In the following three paragraphs, a "patent license" is any express
|
||||||
|
agreement or commitment, however denominated, not to enforce a patent
|
||||||
|
(such as an express permission to practice a patent or covenant not to
|
||||||
|
sue for patent infringement). To "grant" such a patent license to a
|
||||||
|
party means to make such an agreement or commitment not to enforce a
|
||||||
|
patent against the party.
|
||||||
|
|
||||||
|
If you convey a covered work, knowingly relying on a patent license,
|
||||||
|
and the Corresponding Source of the work is not available for anyone
|
||||||
|
to copy, free of charge and under the terms of this License, through a
|
||||||
|
publicly available network server or other readily accessible means,
|
||||||
|
then you must either (1) cause the Corresponding Source to be so
|
||||||
|
available, or (2) arrange to deprive yourself of the benefit of the
|
||||||
|
patent license for this particular work, or (3) arrange, in a manner
|
||||||
|
consistent with the requirements of this License, to extend the patent
|
||||||
|
license to downstream recipients. "Knowingly relying" means you have
|
||||||
|
actual knowledge that, but for the patent license, your conveying the
|
||||||
|
covered work in a country, or your recipient's use of the covered work
|
||||||
|
in a country, would infringe one or more identifiable patents in that
|
||||||
|
country that you have reason to believe are valid.
|
||||||
|
|
||||||
|
If, pursuant to or in connection with a single transaction or
|
||||||
|
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||||
|
covered work, and grant a patent license to some of the parties
|
||||||
|
receiving the covered work authorizing them to use, propagate, modify
|
||||||
|
or convey a specific copy of the covered work, then the patent license
|
||||||
|
you grant is automatically extended to all recipients of the covered
|
||||||
|
work and works based on it.
|
||||||
|
|
||||||
|
A patent license is "discriminatory" if it does not include within
|
||||||
|
the scope of its coverage, prohibits the exercise of, or is
|
||||||
|
conditioned on the non-exercise of one or more of the rights that are
|
||||||
|
specifically granted under this License. You may not convey a covered
|
||||||
|
work if you are a party to an arrangement with a third party that is
|
||||||
|
in the business of distributing software, under which you make payment
|
||||||
|
to the third party based on the extent of your activity of conveying
|
||||||
|
the work, and under which the third party grants, to any of the
|
||||||
|
parties who would receive the covered work from you, a discriminatory
|
||||||
|
patent license (a) in connection with copies of the covered work
|
||||||
|
conveyed by you (or copies made from those copies), or (b) primarily
|
||||||
|
for and in connection with specific products or compilations that
|
||||||
|
contain the covered work, unless you entered into that arrangement,
|
||||||
|
or that patent license was granted, prior to 28 March 2007.
|
||||||
|
|
||||||
|
Nothing in this License shall be construed as excluding or limiting
|
||||||
|
any implied license or other defenses to infringement that may
|
||||||
|
otherwise be available to you under applicable patent law.
|
||||||
|
|
||||||
|
12. No Surrender of Others' Freedom.
|
||||||
|
|
||||||
|
If conditions are imposed on you (whether by court order, agreement or
|
||||||
|
otherwise) that contradict the conditions of this License, they do not
|
||||||
|
excuse you from the conditions of this License. If you cannot convey a
|
||||||
|
covered work so as to satisfy simultaneously your obligations under this
|
||||||
|
License and any other pertinent obligations, then as a consequence you may
|
||||||
|
not convey it at all. For example, if you agree to terms that obligate you
|
||||||
|
to collect a royalty for further conveying from those to whom you convey
|
||||||
|
the Program, the only way you could satisfy both those terms and this
|
||||||
|
License would be to refrain entirely from conveying the Program.
|
||||||
|
|
||||||
|
13. Use with the GNU Affero General Public License.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, you have
|
||||||
|
permission to link or combine any covered work with a work licensed
|
||||||
|
under version 3 of the GNU Affero General Public License into a single
|
||||||
|
combined work, and to convey the resulting work. The terms of this
|
||||||
|
License will continue to apply to the part which is the covered work,
|
||||||
|
but the special requirements of the GNU Affero General Public License,
|
||||||
|
section 13, concerning interaction through a network will apply to the
|
||||||
|
combination as such.
|
||||||
|
|
||||||
|
14. Revised Versions of this License.
|
||||||
|
|
||||||
|
The Free Software Foundation may publish revised and/or new versions of
|
||||||
|
the GNU General Public License from time to time. Such new versions will
|
||||||
|
be similar in spirit to the present version, but may differ in detail to
|
||||||
|
address new problems or concerns.
|
||||||
|
|
||||||
|
Each version is given a distinguishing version number. If the
|
||||||
|
Program specifies that a certain numbered version of the GNU General
|
||||||
|
Public License "or any later version" applies to it, you have the
|
||||||
|
option of following the terms and conditions either of that numbered
|
||||||
|
version or of any later version published by the Free Software
|
||||||
|
Foundation. If the Program does not specify a version number of the
|
||||||
|
GNU General Public License, you may choose any version ever published
|
||||||
|
by the Free Software Foundation.
|
||||||
|
|
||||||
|
If the Program specifies that a proxy can decide which future
|
||||||
|
versions of the GNU General Public License can be used, that proxy's
|
||||||
|
public statement of acceptance of a version permanently authorizes you
|
||||||
|
to choose that version for the Program.
|
||||||
|
|
||||||
|
Later license versions may give you additional or different
|
||||||
|
permissions. However, no additional obligations are imposed on any
|
||||||
|
author or copyright holder as a result of your choosing to follow a
|
||||||
|
later version.
|
||||||
|
|
||||||
|
15. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||||
|
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||||
|
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||||
|
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||||
|
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||||
|
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||||
|
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||||
|
|
||||||
|
16. Limitation of Liability.
|
||||||
|
|
||||||
|
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||||
|
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||||
|
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||||
|
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||||
|
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||||
|
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||||
|
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||||
|
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||||
|
SUCH DAMAGES.
|
||||||
|
|
||||||
|
17. Interpretation of Sections 15 and 16.
|
||||||
|
|
||||||
|
If the disclaimer of warranty and limitation of liability provided
|
||||||
|
above cannot be given local legal effect according to their terms,
|
||||||
|
reviewing courts shall apply local law that most closely approximates
|
||||||
|
an absolute waiver of all civil liability in connection with the
|
||||||
|
Program, unless a warranty or assumption of liability accompanies a
|
||||||
|
copy of the Program in return for a fee.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
How to Apply These Terms to Your New Programs
|
||||||
|
|
||||||
|
If you develop a new program, and you want it to be of the greatest
|
||||||
|
possible use to the public, the best way to achieve this is to make it
|
||||||
|
free software which everyone can redistribute and change under these terms.
|
||||||
|
|
||||||
|
To do so, attach the following notices to the program. It is safest
|
||||||
|
to attach them to the start of each source file to most effectively
|
||||||
|
state the exclusion of warranty; and each file should have at least
|
||||||
|
the "copyright" line and a pointer to where the full notice is found.
|
||||||
|
|
||||||
|
<one line to give the program's name and a brief idea of what it does.>
|
||||||
|
Copyright (C) <year> <name of author>
|
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation, either version 3 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
Also add information on how to contact you by electronic and paper mail.
|
||||||
|
|
||||||
|
If the program does terminal interaction, make it output a short
|
||||||
|
notice like this when it starts in an interactive mode:
|
||||||
|
|
||||||
|
<program> Copyright (C) <year> <name of author>
|
||||||
|
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||||
|
This is free software, and you are welcome to redistribute it
|
||||||
|
under certain conditions; type `show c' for details.
|
||||||
|
|
||||||
|
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||||
|
parts of the General Public License. Of course, your program's commands
|
||||||
|
might be different; for a GUI interface, you would use an "about box".
|
||||||
|
|
||||||
|
You should also get your employer (if you work as a programmer) or school,
|
||||||
|
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||||
|
For more information on this, and how to apply and follow the GNU GPL, see
|
||||||
|
<http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
The GNU General Public License does not permit incorporating your program
|
||||||
|
into proprietary programs. If your program is a subroutine library, you
|
||||||
|
may consider it more useful to permit linking proprietary applications with
|
||||||
|
the library. If this is what you want to do, use the GNU Lesser General
|
||||||
|
Public License instead of this License. But first, please read
|
||||||
|
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
||||||
@@ -4,16 +4,40 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
s3browser "git.kapelle.org/niklas/s3browser/internal"
|
s3browser "git.kapelle.org/niklas/s3browser/internal"
|
||||||
|
types "git.kapelle.org/niklas/s3browser/internal/types"
|
||||||
|
"github.com/alexflint/go-arg"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type args struct {
|
||||||
|
S3Endpoint string `arg:"--s3-endpoint,required,env:S3_ENDPOINT" help:"host[:port]" placeholder:"ENDPOINT"`
|
||||||
|
S3AccessKey string `arg:"--s3-access-key,required,env:S3_ACCESS_KEY" placeholder:"ACCESS_KEY"`
|
||||||
|
S3SecretKey string `arg:"--s3-secret-key,required,env:S3_SECRET_KEY" placeholder:"SECRET_KEY"`
|
||||||
|
S3DisableSSL bool `arg:"--s3-disable-ssl,env:S3_DISABLE_SSL" default:"false"`
|
||||||
|
Address string `arg:"--address,env:ADDRESS" default:":3000" help:"what address to listen on" placeholder:"ADDRESS"`
|
||||||
|
CacheTTL int64 `arg:"--cache-ttl,env:CACHE_TTL" help:"Time in seconds" default:"30" placeholder:"TTL"`
|
||||||
|
CacheCleanup int64 `arg:"--cache-cleanup,env:CACHE_CLEANUP" help:"Time in seconds" default:"60" placeholder:"CLEANUP"`
|
||||||
|
Verbose bool `arg:"-v,--verbose,env:VERBOSE" help:"verbosity level" default:"false"`
|
||||||
|
DBConnection string `arg:"--db,required,env:DB_CONNECTION" help:"DSN in format: https://github.com/go-sql-driver/mysql#dsn-data-source-name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args) Version() string {
|
||||||
|
// TODO
|
||||||
|
return "s3Browser 0.1"
|
||||||
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
s3browser.Start(s3browser.AppConfig{
|
var args args
|
||||||
S3Endoint: "localhost:9000",
|
arg.MustParse(&args)
|
||||||
S3SSL: false,
|
|
||||||
S3AccessKey: "testo",
|
s3browser.Start(types.AppConfig{
|
||||||
S3SecretKey: "testotesto",
|
S3Endoint: args.S3Endpoint,
|
||||||
S3Buket: "dev",
|
S3SSL: !args.S3DisableSSL,
|
||||||
CacheTTL: 20 * time.Second,
|
S3AccessKey: args.S3AccessKey,
|
||||||
CacheCleanup: 1 * time.Minute,
|
S3SecretKey: args.S3SecretKey,
|
||||||
|
DSN: args.DBConnection,
|
||||||
|
CacheTTL: time.Duration(args.CacheTTL) * time.Second,
|
||||||
|
CacheCleanup: time.Duration(args.CacheCleanup) * time.Second,
|
||||||
|
Address: args.Address,
|
||||||
|
LogDebug: args.Verbose,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,6 +13,20 @@ services:
|
|||||||
command: server /data --console-address ":9001"
|
command: server /data --console-address ":9001"
|
||||||
volumes:
|
volumes:
|
||||||
- minio_dev:/data
|
- minio_dev:/data
|
||||||
|
db:
|
||||||
|
container_name: db
|
||||||
|
image: mariadb
|
||||||
|
environment:
|
||||||
|
- MARIADB_ROOT_PASSWORD=hunter2
|
||||||
|
- MARIADB_DATABASE=s3Browser
|
||||||
|
- MARIADB_USER=s3Browser
|
||||||
|
- MARIADB_PASSWORD=hunter2
|
||||||
|
ports:
|
||||||
|
- 3306:3306
|
||||||
|
volumes:
|
||||||
|
- mariadb_dev:/var/lib/mysql
|
||||||
volumes:
|
volumes:
|
||||||
minio_dev:
|
minio_dev:
|
||||||
name: minio_dev
|
name: minio_dev
|
||||||
|
mariadb_dev:
|
||||||
|
name: mariadb_dev
|
||||||
7
go.mod
7
go.mod
@@ -3,10 +3,17 @@ module git.kapelle.org/niklas/s3browser
|
|||||||
go 1.16
|
go 1.16
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/alexflint/go-arg v1.4.2
|
||||||
|
github.com/go-sql-driver/mysql v1.6.0
|
||||||
|
github.com/golang-jwt/jwt v3.2.2+incompatible
|
||||||
|
github.com/gorilla/mux v1.8.0
|
||||||
github.com/graph-gophers/dataloader v5.0.0+incompatible
|
github.com/graph-gophers/dataloader v5.0.0+incompatible
|
||||||
github.com/graphql-go/graphql v0.7.9
|
github.com/graphql-go/graphql v0.7.9
|
||||||
github.com/graphql-go/handler v0.2.3
|
github.com/graphql-go/handler v0.2.3
|
||||||
github.com/minio/minio-go/v7 v7.0.12
|
github.com/minio/minio-go/v7 v7.0.12
|
||||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||||
|
github.com/sirupsen/logrus v1.8.1
|
||||||
|
github.com/stretchr/testify v1.7.0
|
||||||
|
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f
|
||||||
)
|
)
|
||||||
|
|||||||
14
go.sum
14
go.sum
@@ -1,13 +1,23 @@
|
|||||||
|
github.com/alexflint/go-arg v1.4.2 h1:lDWZAXxpAnZUq4qwb86p/3rIJJ2Li81EoMbTMujhVa0=
|
||||||
|
github.com/alexflint/go-arg v1.4.2/go.mod h1:9iRbDxne7LcR/GSvEr7ma++GLpdIU1zrghf2y2768kM=
|
||||||
|
github.com/alexflint/go-scalar v1.0.0 h1:NGupf1XV/Xb04wXskDFzS0KWOLH632W/EO4fAFi+A70=
|
||||||
|
github.com/alexflint/go-scalar v1.0.0/go.mod h1:GpHzbCOZXEKMEcygYQ5n/aa4Aq84zbxjy3MxYW0gjYw=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||||
|
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
|
||||||
|
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||||
|
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
|
||||||
|
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
|
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||||
|
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||||
github.com/graph-gophers/dataloader v5.0.0+incompatible h1:R+yjsbrNq1Mo3aPG+Z/EKYrXrXXUNJHOgbRt+U6jOug=
|
github.com/graph-gophers/dataloader v5.0.0+incompatible h1:R+yjsbrNq1Mo3aPG+Z/EKYrXrXXUNJHOgbRt+U6jOug=
|
||||||
github.com/graph-gophers/dataloader v5.0.0+incompatible/go.mod h1:jk4jk0c5ZISbKaMe8WsVopGB5/15GvGHMdMdPtwlRp4=
|
github.com/graph-gophers/dataloader v5.0.0+incompatible/go.mod h1:jk4jk0c5ZISbKaMe8WsVopGB5/15GvGHMdMdPtwlRp4=
|
||||||
github.com/graphql-go/graphql v0.7.9 h1:5Va/Rt4l5g3YjwDnid3vFfn43faaQBq7rMcIZ0VnV34=
|
github.com/graphql-go/graphql v0.7.9 h1:5Va/Rt4l5g3YjwDnid3vFfn43faaQBq7rMcIZ0VnV34=
|
||||||
@@ -57,6 +67,8 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
|
|||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
|
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||||
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f h1:aZp0e2vLN4MToVqnjNEYEtrEA8RH8U8FN1CU7JgqsPU=
|
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f h1:aZp0e2vLN4MToVqnjNEYEtrEA8RH8U8FN1CU7JgqsPU=
|
||||||
@@ -84,3 +96,5 @@ gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
|||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
|||||||
9
internal/cache/cache.go
vendored
Normal file
9
internal/cache/cache.go
vendored
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
package cache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/graph-gophers/dataloader"
|
||||||
|
)
|
||||||
|
|
||||||
|
type S3Cache interface {
|
||||||
|
dataloader.Cache
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package s3browser
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@@ -8,17 +8,18 @@ import (
|
|||||||
gocache "github.com/patrickmn/go-cache"
|
gocache "github.com/patrickmn/go-cache"
|
||||||
)
|
)
|
||||||
|
|
||||||
type cache struct {
|
type TTLCache struct {
|
||||||
c *gocache.Cache
|
c *gocache.Cache
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCache(ttl, cleanupInterval time.Duration) *cache {
|
// Create new ttl cache
|
||||||
return &cache{
|
func NewTTLCache(ttl, cleanupInterval time.Duration) *TTLCache {
|
||||||
|
return &TTLCache{
|
||||||
c: gocache.New(ttl, cleanupInterval),
|
c: gocache.New(ttl, cleanupInterval),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cache) Get(_ context.Context, key dataloader.Key) (dataloader.Thunk, bool) {
|
func (c *TTLCache) Get(_ context.Context, key dataloader.Key) (dataloader.Thunk, bool) {
|
||||||
v, ok := c.c.Get(key.String())
|
v, ok := c.c.Get(key.String())
|
||||||
if ok {
|
if ok {
|
||||||
return v.(dataloader.Thunk), ok
|
return v.(dataloader.Thunk), ok
|
||||||
@@ -26,11 +27,11 @@ func (c *cache) Get(_ context.Context, key dataloader.Key) (dataloader.Thunk, bo
|
|||||||
return nil, ok
|
return nil, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cache) Set(_ context.Context, key dataloader.Key, value dataloader.Thunk) {
|
func (c *TTLCache) Set(_ context.Context, key dataloader.Key, value dataloader.Thunk) {
|
||||||
c.c.Set(key.String(), value, 0)
|
c.c.Set(key.String(), value, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cache) Delete(_ context.Context, key dataloader.Key) bool {
|
func (c *TTLCache) Delete(_ context.Context, key dataloader.Key) bool {
|
||||||
if _, found := c.c.Get(key.String()); found {
|
if _, found := c.c.Get(key.String()); found {
|
||||||
c.c.Delete(key.String())
|
c.c.Delete(key.String())
|
||||||
return true
|
return true
|
||||||
@@ -38,6 +39,6 @@ func (c *cache) Delete(_ context.Context, key dataloader.Key) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cache) Clear() {
|
func (c *TTLCache) Clear() {
|
||||||
c.c.Flush()
|
c.c.Flush()
|
||||||
}
|
}
|
||||||
@@ -1,211 +0,0 @@
|
|||||||
package s3browser
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/graph-gophers/dataloader"
|
|
||||||
"github.com/minio/minio-go/v7"
|
|
||||||
)
|
|
||||||
|
|
||||||
// listObjectsBatch batch func for calling s3.ListObjects()
|
|
||||||
func listObjectsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
|
||||||
var results []*dataloader.Result
|
|
||||||
|
|
||||||
s3Client, ok := c.Value("s3Client").(*minio.Client)
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, v := range k {
|
|
||||||
results = append(results, &dataloader.Result{
|
|
||||||
Data: listObjects(s3Client, bucketName, v.String(), false),
|
|
||||||
Error: nil,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return results
|
|
||||||
}
|
|
||||||
|
|
||||||
// listObjects helper func for listObjectsBatch
|
|
||||||
func listObjects(s3Client *minio.Client, bukitName, path string, recursive bool) []minio.ObjectInfo {
|
|
||||||
objectCh := s3Client.ListObjects(context.Background(), bukitName, minio.ListObjectsOptions{
|
|
||||||
Prefix: path,
|
|
||||||
Recursive: false,
|
|
||||||
})
|
|
||||||
|
|
||||||
result := make([]minio.ObjectInfo, 0)
|
|
||||||
for obj := range objectCh {
|
|
||||||
result = append(result, obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// getFilesBatch batch func for getting all files in path. Uses "listObjects" dataloader
|
|
||||||
func getFilesBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
|
||||||
var results []*dataloader.Result
|
|
||||||
|
|
||||||
loader, ok := c.Value("loader").(map[string]*dataloader.Loader)
|
|
||||||
if !ok {
|
|
||||||
return handleLoaderError(k, fmt.Errorf("Failed to get loader from context"))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, v := range k {
|
|
||||||
path := v.String()
|
|
||||||
files := make([]File, 0)
|
|
||||||
|
|
||||||
if !strings.HasSuffix(path, "/") {
|
|
||||||
path += "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
thunk := loader["listObjects"].Load(c, dataloader.StringKey(path))
|
|
||||||
|
|
||||||
objects, _ := thunk()
|
|
||||||
|
|
||||||
// TODO: handle thunk error
|
|
||||||
|
|
||||||
for _, obj := range objects.([]minio.ObjectInfo) {
|
|
||||||
if obj.Err != nil {
|
|
||||||
// TODO: how to handle?
|
|
||||||
} else if !strings.HasSuffix(obj.Key, "/") {
|
|
||||||
files = append(files, File{
|
|
||||||
ID: obj.Key,
|
|
||||||
Name: filepath.Base(obj.Key),
|
|
||||||
Size: obj.Size,
|
|
||||||
ContentType: obj.ContentType,
|
|
||||||
ETag: obj.ETag,
|
|
||||||
LastModified: obj.LastModified,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
results = append(results, &dataloader.Result{
|
|
||||||
Data: files,
|
|
||||||
Error: nil,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return results
|
|
||||||
}
|
|
||||||
|
|
||||||
// getFileBatch batch func for getting object info
|
|
||||||
func getFileBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
|
||||||
var results []*dataloader.Result
|
|
||||||
|
|
||||||
s3Client, ok := c.Value("s3Client").(*minio.Client)
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, v := range k {
|
|
||||||
obj, err := s3Client.StatObject(context.Background(), bucketName, v.String(), minio.StatObjectOptions{})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
results = append(results, &dataloader.Result{
|
|
||||||
Data: nil,
|
|
||||||
Error: err,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
results = append(results, &dataloader.Result{
|
|
||||||
Data: &File{
|
|
||||||
ID: obj.Key,
|
|
||||||
Size: obj.Size,
|
|
||||||
ContentType: obj.ContentType,
|
|
||||||
ETag: obj.ETag,
|
|
||||||
LastModified: obj.LastModified,
|
|
||||||
},
|
|
||||||
Error: nil,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return results
|
|
||||||
}
|
|
||||||
|
|
||||||
// getDirsBatch batch func for getting dirs in a path
|
|
||||||
func getDirsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
|
||||||
var results []*dataloader.Result
|
|
||||||
|
|
||||||
loader, ok := c.Value("loader").(map[string]*dataloader.Loader)
|
|
||||||
if !ok {
|
|
||||||
return handleLoaderError(k, fmt.Errorf("Failed to get loader from context"))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, v := range k {
|
|
||||||
path := v.String()
|
|
||||||
dirs := make([]Directory, 0)
|
|
||||||
|
|
||||||
if !strings.HasSuffix(path, "/") {
|
|
||||||
path += "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
thunk := loader["listObjects"].Load(c, dataloader.StringKey(path))
|
|
||||||
|
|
||||||
objects, _ := thunk()
|
|
||||||
|
|
||||||
// TODO: handle thunk error
|
|
||||||
|
|
||||||
for _, obj := range objects.([]minio.ObjectInfo) {
|
|
||||||
if obj.Err != nil {
|
|
||||||
// TODO: how to handle?
|
|
||||||
} else if strings.HasSuffix(obj.Key, "/") {
|
|
||||||
dirs = append(dirs, Directory{
|
|
||||||
ID: obj.Key,
|
|
||||||
Name: filepath.Base(obj.Key),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
results = append(results, &dataloader.Result{
|
|
||||||
Data: dirs,
|
|
||||||
Error: nil,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return results
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleLoaderError helper func when the whole batch failed
|
|
||||||
func handleLoaderError(k dataloader.Keys, err error) []*dataloader.Result {
|
|
||||||
var results []*dataloader.Result
|
|
||||||
for range k {
|
|
||||||
results = append(results, &dataloader.Result{
|
|
||||||
Data: nil,
|
|
||||||
Error: err,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return results
|
|
||||||
}
|
|
||||||
|
|
||||||
// createDataloader create all dataloaders and return a map of them plus a cache for objects
|
|
||||||
func createDataloader(config AppConfig) map[string]*dataloader.Loader {
|
|
||||||
loaderMap := make(map[string]*dataloader.Loader, 0)
|
|
||||||
|
|
||||||
loaderMap["getFiles"] = dataloader.NewBatchedLoader(
|
|
||||||
getFilesBatch,
|
|
||||||
dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)),
|
|
||||||
)
|
|
||||||
|
|
||||||
loaderMap["getFile"] = dataloader.NewBatchedLoader(
|
|
||||||
getFileBatch,
|
|
||||||
dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)),
|
|
||||||
)
|
|
||||||
|
|
||||||
loaderMap["listObjects"] = dataloader.NewBatchedLoader(
|
|
||||||
listObjectsBatch,
|
|
||||||
dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)),
|
|
||||||
)
|
|
||||||
|
|
||||||
loaderMap["getDirs"] = dataloader.NewBatchedLoader(
|
|
||||||
getDirsBatch,
|
|
||||||
dataloader.WithCache(newCache(config.CacheTTL, config.CacheCleanup)),
|
|
||||||
)
|
|
||||||
|
|
||||||
return loaderMap
|
|
||||||
}
|
|
||||||
9
internal/db/db.go
Normal file
9
internal/db/db.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
package db
|
||||||
|
|
||||||
|
import "context"
|
||||||
|
|
||||||
|
type DB interface {
|
||||||
|
Setup() error
|
||||||
|
CheckLogin(ctx context.Context, username, password string) (bool, error)
|
||||||
|
AddUser(ctx context.Context, username, password string) error
|
||||||
|
}
|
||||||
94
internal/db/mysql.go
Normal file
94
internal/db/mysql.go
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
package db
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
_ "embed"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
_ "github.com/go-sql-driver/mysql"
|
||||||
|
"golang.org/x/crypto/bcrypt"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:embed setup.sql
|
||||||
|
var setupSql string
|
||||||
|
|
||||||
|
const DB_NAME = "s3Browser"
|
||||||
|
|
||||||
|
type mysqlDB struct {
|
||||||
|
dbConn *sql.DB
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDB(dataSourceName string) (DB, error) {
|
||||||
|
db, err := sql.Open("mysql", dataSourceName)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
db.SetConnMaxLifetime(time.Minute * 3)
|
||||||
|
db.SetMaxOpenConns(10)
|
||||||
|
db.SetMaxIdleConns(10)
|
||||||
|
|
||||||
|
return &mysqlDB{
|
||||||
|
dbConn: db,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *mysqlDB) Setup() error {
|
||||||
|
tx, err := d.dbConn.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = tx.Exec(setupSql)
|
||||||
|
if err != nil {
|
||||||
|
tx.Rollback()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tx.Commit()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *mysqlDB) CheckLogin(ctx context.Context, username, password string) (bool, error) {
|
||||||
|
rows, err := d.dbConn.QueryContext(ctx, "SELECT password FROM user WHERE username = ?", username)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !rows.Next() {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var passwordHash []byte
|
||||||
|
err = rows.Scan(&passwordHash)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if bcrypt.CompareHashAndPassword(passwordHash, []byte(password)) != nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *mysqlDB) AddUser(ctx context.Context, username, password string) error {
|
||||||
|
hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = d.dbConn.ExecContext(ctx, "INSERT INTO user (username,password) VALUES (?,?)", username, hash)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
11
internal/db/setup.sql
Normal file
11
internal/db/setup.sql
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
CREATE TABLE s3Browser.`user` (
|
||||||
|
id INT auto_increment NOT NULL,
|
||||||
|
username varchar(100) NOT NULL,
|
||||||
|
password varchar(60) NOT NULL,
|
||||||
|
CONSTRAINT user_PK PRIMARY KEY (id),
|
||||||
|
CONSTRAINT user_UN UNIQUE KEY (username)
|
||||||
|
)
|
||||||
|
|
||||||
|
ENGINE=InnoDB
|
||||||
|
DEFAULT CHARSET=utf8mb4
|
||||||
|
COLLATE=utf8mb4_general_ci;
|
||||||
29
internal/errors/errors.go
Normal file
29
internal/errors/errors.go
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
package errors
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrNotAuthenticated = ExtendError("UNAUTHENTICATED", "No valid authentication provided")
|
||||||
|
)
|
||||||
|
|
||||||
|
type ExtendedError struct {
|
||||||
|
Message string
|
||||||
|
Code string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err *ExtendedError) Error() string {
|
||||||
|
return err.Message
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err *ExtendedError) Extensions() map[string]interface{} {
|
||||||
|
return map[string]interface{}{
|
||||||
|
"code": err.Code,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExtendError(code, format string, a ...interface{}) *ExtendedError {
|
||||||
|
return &ExtendedError{
|
||||||
|
Message: fmt.Sprintf(format, a...),
|
||||||
|
Code: code,
|
||||||
|
}
|
||||||
|
}
|
||||||
75
internal/gql/gql_test.go
Normal file
75
internal/gql/gql_test.go
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
package gql_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/gql"
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/loader"
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/s3"
|
||||||
|
"github.com/graph-gophers/dataloader"
|
||||||
|
"github.com/graphql-go/graphql"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func setup(t *testing.T) (*assert.Assertions, context.Context, graphql.Schema) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
schema, _ := gql.GraphqlSchema()
|
||||||
|
|
||||||
|
s3, err := s3.NewMockS3([]string{"bucket1"})
|
||||||
|
assert.NoError(err)
|
||||||
|
ctx = context.WithValue(ctx, "s3Client", s3)
|
||||||
|
|
||||||
|
loader := loader.NewLoader(loader.CacheConfig{
|
||||||
|
ListObjectsLoaderCache: &dataloader.NoCache{},
|
||||||
|
ListObjectsRecursiveLoaderCache: &dataloader.NoCache{},
|
||||||
|
StatObjectLoaderCache: &dataloader.NoCache{},
|
||||||
|
ListBucketsLoaderCache: &dataloader.NoCache{},
|
||||||
|
})
|
||||||
|
assert.NotNil(loader)
|
||||||
|
ctx = context.WithValue(ctx, "loader", loader)
|
||||||
|
|
||||||
|
return assert, ctx, schema
|
||||||
|
}
|
||||||
|
|
||||||
|
func do(ctx context.Context, schema graphql.Schema, query string) *graphql.Result {
|
||||||
|
params := graphql.Params{
|
||||||
|
Schema: schema,
|
||||||
|
RequestString: query,
|
||||||
|
Context: ctx,
|
||||||
|
}
|
||||||
|
r := graphql.Do(params)
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateSchema(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
assert.NotPanics(func() {
|
||||||
|
gql.GraphqlTypes()
|
||||||
|
})
|
||||||
|
|
||||||
|
var schema graphql.Schema
|
||||||
|
var err error
|
||||||
|
|
||||||
|
assert.NotPanics(func() {
|
||||||
|
schema, err = gql.GraphqlSchema()
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NotNil(schema)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAuth(t *testing.T) {
|
||||||
|
assert, ctx, schema := setup(t)
|
||||||
|
|
||||||
|
r := do(ctx, schema, `
|
||||||
|
{
|
||||||
|
authorized
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
t.Logf("Data: %v", r.Data)
|
||||||
|
assert.Len(r.Errors, 0)
|
||||||
|
}
|
||||||
@@ -1,21 +1,26 @@
|
|||||||
package s3browser
|
package gql
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/graph-gophers/dataloader"
|
|
||||||
"github.com/graphql-go/graphql"
|
"github.com/graphql-go/graphql"
|
||||||
"github.com/graphql-go/graphql/language/ast"
|
"github.com/graphql-go/graphql/language/ast"
|
||||||
|
|
||||||
|
helper "git.kapelle.org/niklas/s3browser/internal/helper"
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/loader"
|
||||||
|
types "git.kapelle.org/niklas/s3browser/internal/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var typesInit bool = false
|
||||||
var graphqlDirType *graphql.Object
|
var graphqlDirType *graphql.Object
|
||||||
var graphqlFileType *graphql.Object
|
var graphqlFileType *graphql.Object
|
||||||
|
var graphqlLoginResultType *graphql.Object
|
||||||
|
var objIDType *graphql.Scalar
|
||||||
|
|
||||||
// graphqlTypes create all graphql types and stores the in the global variables
|
//GraphqlTypes create all graphql types and stores the in the global variables
|
||||||
func graphqlTypes() {
|
func GraphqlTypes() {
|
||||||
|
|
||||||
var dateTimeType = graphql.NewScalar(graphql.ScalarConfig{
|
var dateTimeType = graphql.NewScalar(graphql.ScalarConfig{
|
||||||
Name: "DateTime",
|
Name: "DateTime",
|
||||||
@@ -42,7 +47,39 @@ func graphqlTypes() {
|
|||||||
ParseLiteral: func(valueAST ast.Value) interface{} {
|
ParseLiteral: func(valueAST ast.Value) interface{} {
|
||||||
switch valueAST := valueAST.(type) {
|
switch valueAST := valueAST.(type) {
|
||||||
case *ast.StringValue:
|
case *ast.StringValue:
|
||||||
return valueAST.Value
|
if tval, err := time.Parse(time.RFC3339, valueAST.Value); err != nil {
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return tval
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
objIDType = graphql.NewScalar(graphql.ScalarConfig{
|
||||||
|
Name: "objID",
|
||||||
|
Description: `String representing a bucket, key and version combination.
|
||||||
|
Looks like this: "bucketName:/name/of/key" or "bucketName@version:/name/of/key"`,
|
||||||
|
Serialize: func(value interface{}) interface{} {
|
||||||
|
switch value := value.(type) {
|
||||||
|
case types.ID:
|
||||||
|
return value.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
return "INVALID"
|
||||||
|
},
|
||||||
|
ParseValue: func(value interface{}) interface{} {
|
||||||
|
switch tvalue := value.(type) {
|
||||||
|
case string:
|
||||||
|
return types.ParseID(tvalue)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
ParseLiteral: func(valueAST ast.Value) interface{} {
|
||||||
|
switch valueAST := valueAST.(type) {
|
||||||
|
case *ast.StringValue:
|
||||||
|
return types.ParseID(valueAST.Value)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
@@ -53,17 +90,17 @@ func graphqlTypes() {
|
|||||||
Description: "Represents a directory",
|
Description: "Represents a directory",
|
||||||
Fields: graphql.Fields{
|
Fields: graphql.Fields{
|
||||||
"id": &graphql.Field{
|
"id": &graphql.Field{
|
||||||
Type: graphql.NewNonNull(graphql.ID),
|
Type: graphql.NewNonNull(objIDType),
|
||||||
},
|
},
|
||||||
"name": &graphql.Field{
|
"name": &graphql.Field{
|
||||||
Type: graphql.String,
|
Type: graphql.String,
|
||||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||||
source, ok := p.Source.(Directory)
|
source, ok := p.Source.(types.Directory)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("Failed to parse source for resolve")
|
return nil, fmt.Errorf("Failed to parse source for resolve")
|
||||||
}
|
}
|
||||||
|
|
||||||
return filepath.Base(source.ID), nil
|
return filepath.Base(source.ID.Key), nil
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -74,18 +111,18 @@ func graphqlTypes() {
|
|||||||
Description: "Represents a file, not a directory",
|
Description: "Represents a file, not a directory",
|
||||||
Fields: graphql.Fields{
|
Fields: graphql.Fields{
|
||||||
"id": &graphql.Field{
|
"id": &graphql.Field{
|
||||||
Type: graphql.NewNonNull(graphql.ID),
|
Type: graphql.NewNonNull(objIDType),
|
||||||
Description: "The uniqe ID of the file. Represents the path and the s3 key.",
|
Description: "The uniqe ID of the file. Represents the path and the s3 key.",
|
||||||
},
|
},
|
||||||
"name": &graphql.Field{
|
"name": &graphql.Field{
|
||||||
Type: graphql.String,
|
Type: graphql.String,
|
||||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||||
source, ok := p.Source.(File)
|
source, ok := p.Source.(types.File)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("Failed to parse source for resolve")
|
return nil, fmt.Errorf("Failed to parse source for resolve")
|
||||||
}
|
}
|
||||||
|
|
||||||
return filepath.Base(source.ID), nil
|
return filepath.Base(source.ID.Key), nil
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"size": &graphql.Field{
|
"size": &graphql.Field{
|
||||||
@@ -135,19 +172,19 @@ func graphqlTypes() {
|
|||||||
"parent": &graphql.Field{
|
"parent": &graphql.Field{
|
||||||
Type: graphqlDirType,
|
Type: graphqlDirType,
|
||||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||||
source, ok := p.Source.(File)
|
source, ok := p.Source.(types.File)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("Failed to parse Source for parent resolve")
|
return nil, fmt.Errorf("Failed to parse Source for parent resolve")
|
||||||
}
|
}
|
||||||
|
|
||||||
basename := filepath.Dir(source.ID)
|
parent := source.ID.Parent()
|
||||||
|
|
||||||
if basename == "." {
|
if parent == nil {
|
||||||
basename = "/"
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return Directory{
|
return types.Directory{
|
||||||
ID: basename,
|
ID: *source.ID.Parent(),
|
||||||
}, nil
|
}, nil
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -157,63 +194,77 @@ func graphqlTypes() {
|
|||||||
graphqlDirType.AddFieldConfig("files", &graphql.Field{
|
graphqlDirType.AddFieldConfig("files", &graphql.Field{
|
||||||
Type: graphql.NewList(graphqlFileType),
|
Type: graphql.NewList(graphqlFileType),
|
||||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||||
source, ok := p.Source.(Directory)
|
source, ok := p.Source.(types.Directory)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("Failed to parse Source for files resolve")
|
return nil, fmt.Errorf("Failed to parse Source for files resolve")
|
||||||
}
|
}
|
||||||
|
|
||||||
loader := p.Context.Value("loader").(map[string]*dataloader.Loader)
|
loader := p.Context.Value("loader").(*loader.Loader)
|
||||||
|
|
||||||
thunk := loader["getFiles"].Load(p.Context, dataloader.StringKey(source.ID))
|
return loader.GetFiles(p.Context, source.ID)
|
||||||
return thunk()
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
graphqlDirType.AddFieldConfig("directorys", &graphql.Field{
|
graphqlDirType.AddFieldConfig("directorys", &graphql.Field{
|
||||||
Type: graphql.NewList(graphqlDirType),
|
Type: graphql.NewList(graphqlDirType),
|
||||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||||
source, ok := p.Source.(Directory)
|
source, ok := p.Source.(types.Directory)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("Failed to parse Source for directories resolve")
|
return nil, fmt.Errorf("Failed to parse Source for directories resolve")
|
||||||
}
|
}
|
||||||
|
|
||||||
loader := p.Context.Value("loader").(map[string]*dataloader.Loader)
|
loader := p.Context.Value("loader").(*loader.Loader)
|
||||||
thunk := loader["getDirs"].Load(p.Context, dataloader.StringKey(source.ID))
|
return loader.GetDirs(p.Context, source.ID)
|
||||||
|
|
||||||
return thunk()
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
graphqlDirType.AddFieldConfig("parent", &graphql.Field{
|
graphqlDirType.AddFieldConfig("parent", &graphql.Field{
|
||||||
Type: graphqlDirType,
|
Type: graphqlDirType,
|
||||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||||
source, ok := p.Source.(Directory)
|
source, ok := p.Source.(types.Directory)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("Failed to parse Source for directories resolve")
|
return nil, fmt.Errorf("Failed to parse Source for directories resolve")
|
||||||
}
|
}
|
||||||
|
|
||||||
dirs := strings.Split(source.ID, "/")
|
return types.Directory{
|
||||||
|
ID: helper.GetParentDir(source.ID),
|
||||||
return Directory{
|
|
||||||
ID: strings.Join(dirs[:len(dirs)-2], "/") + "/",
|
|
||||||
}, nil
|
}, nil
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
|
graphqlLoginResultType = graphql.NewObject(graphql.ObjectConfig{
|
||||||
|
Name: "LoginResut",
|
||||||
|
Description: "Result of a login",
|
||||||
|
Fields: graphql.Fields{
|
||||||
|
"token": &graphql.Field{
|
||||||
|
Type: graphql.String,
|
||||||
|
Description: "JWT token if login was successful",
|
||||||
|
},
|
||||||
|
"successful": &graphql.Field{
|
||||||
|
Type: graphql.NewNonNull(graphql.Boolean),
|
||||||
|
Description: "If the login was successful",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
typesInit = true
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// graphqlTypes helper func for using the dataloader to get a file
|
//loadFile helper func for using the dataloader to get a file
|
||||||
func loadFile(p graphql.ResolveParams) (*File, error) {
|
func loadFile(p graphql.ResolveParams) (*types.File, error) {
|
||||||
source, ok := p.Source.(File)
|
source, ok := p.Source.(types.File)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("Failed to parse source for resolve")
|
return nil, fmt.Errorf("Failed to parse source for resolve")
|
||||||
}
|
}
|
||||||
|
|
||||||
loader := p.Context.Value("loader").(map[string]*dataloader.Loader)
|
loader := p.Context.Value("loader").(*loader.Loader)
|
||||||
|
|
||||||
thunk := loader["getFile"].Load(p.Context, dataloader.StringKey(source.ID))
|
file, err := loader.GetFile(p.Context, source.ID)
|
||||||
result, err := thunk()
|
|
||||||
|
|
||||||
file, ok := result.(*File)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("Failed to load file")
|
return nil, fmt.Errorf("Failed to load file")
|
||||||
249
internal/gql/mutations.go
Normal file
249
internal/gql/mutations.go
Normal file
@@ -0,0 +1,249 @@
|
|||||||
|
package gql
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/db"
|
||||||
|
helper "git.kapelle.org/niklas/s3browser/internal/helper"
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/loader"
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/s3"
|
||||||
|
types "git.kapelle.org/niklas/s3browser/internal/types"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func deleteMutation(ctx context.Context, id types.ID) error {
|
||||||
|
s3Client, ok := ctx.Value("s3Client").(s3.S3Service)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Failed to get s3Client from context")
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("S3 'RemoveObject': ", id)
|
||||||
|
// TODO: it is posible to remove multiple objects with a single call.
|
||||||
|
// Is it better to batch this?
|
||||||
|
err := s3Client.RemoveObject(ctx, id)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.Value("loader").(*loader.Loader).InvalidedCacheForId(ctx, id)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyMutation(ctx context.Context, src, dest types.ID) (*types.File, error) {
|
||||||
|
s3Client, ok := ctx.Value("s3Client").(s3.S3Service)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Failed to get s3Client from context")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if dest is a file or a dir
|
||||||
|
if dest.IsDirectory() {
|
||||||
|
// create new dest id
|
||||||
|
// TODO: What if a file with this id already exists?
|
||||||
|
dest.Key += helper.GetFilenameFromKey(src.Key)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("S3 'CopyObject': ", src, "-->", dest)
|
||||||
|
err := s3Client.CopyObject(ctx, src, dest)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.Value("loader").(*loader.Loader).InvalidedCacheForId(ctx, dest)
|
||||||
|
|
||||||
|
return &types.File{
|
||||||
|
ID: dest,
|
||||||
|
}, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func moveDirMutation(ctx context.Context, src, dest types.ID) ([]*types.File, error) {
|
||||||
|
s3Client, ok := ctx.Value("s3Client").(s3.S3Service)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Failed to get s3Client from context")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !dest.IsDirectory() {
|
||||||
|
return nil, fmt.Errorf("Dest must be a directory")
|
||||||
|
}
|
||||||
|
|
||||||
|
loader, ok := ctx.Value("loader").(*loader.Loader)
|
||||||
|
|
||||||
|
// "move" all file inside dir
|
||||||
|
files, err := loader.GetFilesRecursive(ctx, src)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var result []*types.File
|
||||||
|
parent := src.Parent()
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
newID := types.ID{
|
||||||
|
Bucket: dest.Bucket,
|
||||||
|
Key: strings.Replace(file.ID.Key, parent.Key, dest.Key, 1),
|
||||||
|
}
|
||||||
|
newID.Normalize()
|
||||||
|
|
||||||
|
log.Debug("S3 'CopyObject': ", src, "-->", dest)
|
||||||
|
err := s3Client.CopyObject(ctx, file.ID, dest)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
// TODO: handle error
|
||||||
|
}
|
||||||
|
|
||||||
|
deleteMutation(ctx, file.ID)
|
||||||
|
|
||||||
|
loader.InvalidedCacheForId(ctx, newID)
|
||||||
|
loader.InvalidedCacheForId(ctx, file.ID)
|
||||||
|
|
||||||
|
result = append(result, &types.File{
|
||||||
|
ID: newID,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
loader.InvalidedCacheForId(ctx, src)
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func moveFileMutation(ctx context.Context, src, dest types.ID) (*types.File, error) {
|
||||||
|
s3Client, ok := ctx.Value("s3Client").(s3.S3Service)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Failed to get s3Client from context")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if dest is a file or a dir
|
||||||
|
if dest.IsDirectory() {
|
||||||
|
// create new dest id
|
||||||
|
// TODO: What if a file with this id already exists?
|
||||||
|
dest.Key += helper.GetFilenameFromKey(src.Key)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("S3 'CopyObject': ", src, "-->", dest)
|
||||||
|
// There is no (spoon) move. Only copy and delete
|
||||||
|
err := s3Client.CopyObject(ctx, src, dest)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = deleteMutation(ctx, src)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.Value("loader").(*loader.Loader).InvalidedCacheForId(ctx, dest)
|
||||||
|
|
||||||
|
return &types.File{
|
||||||
|
ID: dest,
|
||||||
|
}, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func createDirectory(ctx context.Context, id types.ID) (*types.Directory, error) {
|
||||||
|
s3Client, ok := ctx.Value("s3Client").(s3.S3Service)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Failed to get s3Client from context")
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("S3 'PutObject': ", id)
|
||||||
|
err := s3Client.PutObject(ctx, id, strings.NewReader(""), 0) // TODO: s3client interface needs content type parameter
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.Value("loader").(*loader.Loader).InvalidedCacheForId(ctx, id)
|
||||||
|
|
||||||
|
return &types.Directory{
|
||||||
|
ID: id,
|
||||||
|
}, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func deleteDirectory(ctx context.Context, id types.ID) error {
|
||||||
|
s3Client, ok := ctx.Value("s3Client").(s3.S3Service)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Failed to get s3Client from context")
|
||||||
|
}
|
||||||
|
|
||||||
|
loader, ok := ctx.Value("loader").(*loader.Loader)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Failed to get dataloader from context")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get all files inside the directory
|
||||||
|
|
||||||
|
files, err := loader.GetFilesRecursive(ctx, id)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete all child files
|
||||||
|
for _, file := range files {
|
||||||
|
s3Client.RemoveObject(ctx, file.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the dir had no children it exists as an object (object with "/" at the end).
|
||||||
|
// If it exists as an object and had children it will get delete once the last child has been deleted
|
||||||
|
// If it had no children we have to delete it manualy
|
||||||
|
// This is at least the behavior when working with minio as s3 backend
|
||||||
|
// TODO: check if this is normal behavior when working with s3
|
||||||
|
if len(files) == 0 {
|
||||||
|
log.Debug("S3 'RemoveObject': ", id)
|
||||||
|
err := s3Client.RemoveObject(ctx, id)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
loader.InvalidedCacheForId(ctx, id)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//login Checks for valid username password combination. Returns singed jwt string
|
||||||
|
func login(ctx context.Context, username, password string) (types.LoginResult, error) {
|
||||||
|
|
||||||
|
dbStore := ctx.Value("dbStore").(db.DB)
|
||||||
|
|
||||||
|
succes, err := dbStore.CheckLogin(ctx, username, password)
|
||||||
|
|
||||||
|
if !succes {
|
||||||
|
return types.LoginResult{
|
||||||
|
Successful: false,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
token := helper.CreateJWT(helper.CreateClaims(username))
|
||||||
|
|
||||||
|
tokenString, err := token.SignedString([]byte("TODO"))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return types.LoginResult{
|
||||||
|
Successful: false,
|
||||||
|
}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return types.LoginResult{
|
||||||
|
Token: tokenString,
|
||||||
|
Successful: true,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
319
internal/gql/schema.go
Normal file
319
internal/gql/schema.go
Normal file
@@ -0,0 +1,319 @@
|
|||||||
|
package gql
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/graphql-go/graphql"
|
||||||
|
|
||||||
|
s3errors "git.kapelle.org/niklas/s3browser/internal/errors"
|
||||||
|
helper "git.kapelle.org/niklas/s3browser/internal/helper"
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/loader"
|
||||||
|
types "git.kapelle.org/niklas/s3browser/internal/types"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
//GraphqlSchema generate the schema with its root query and mutation
|
||||||
|
func GraphqlSchema() (graphql.Schema, error) {
|
||||||
|
|
||||||
|
if !typesInit {
|
||||||
|
GraphqlTypes()
|
||||||
|
}
|
||||||
|
|
||||||
|
queryFields := graphql.Fields{
|
||||||
|
"files": &graphql.Field{
|
||||||
|
Type: graphql.NewNonNull(graphql.NewList(graphql.NewNonNull(graphqlFileType))),
|
||||||
|
Args: graphql.FieldConfigArgument{
|
||||||
|
"path": &graphql.ArgumentConfig{
|
||||||
|
Type: graphql.NewNonNull(objIDType),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||||
|
if !helper.IsAuthenticated(p.Context) {
|
||||||
|
return nil, s3errors.ErrNotAuthenticated
|
||||||
|
}
|
||||||
|
|
||||||
|
path, ok := p.Args["path"].(*types.ID)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Failed to parse args")
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("querry 'files': ", path)
|
||||||
|
|
||||||
|
loader := p.Context.Value("loader").(*loader.Loader)
|
||||||
|
return loader.GetFiles(p.Context, *path)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"directories": &graphql.Field{
|
||||||
|
Type: graphql.NewNonNull(graphql.NewList(graphql.NewNonNull(graphqlDirType))),
|
||||||
|
Args: graphql.FieldConfigArgument{
|
||||||
|
"path": &graphql.ArgumentConfig{
|
||||||
|
Type: graphql.NewNonNull(objIDType),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||||
|
if !helper.IsAuthenticated(p.Context) {
|
||||||
|
return nil, s3errors.ErrNotAuthenticated
|
||||||
|
}
|
||||||
|
|
||||||
|
path, ok := p.Args["path"].(*types.ID)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("querry 'directorys': ", path)
|
||||||
|
|
||||||
|
loader := p.Context.Value("loader").(*loader.Loader)
|
||||||
|
return loader.GetDirs(p.Context, *path)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"file": &graphql.Field{
|
||||||
|
Type: graphqlFileType,
|
||||||
|
Args: graphql.FieldConfigArgument{
|
||||||
|
"id": &graphql.ArgumentConfig{
|
||||||
|
Type: graphql.NewNonNull(objIDType),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||||
|
if !helper.IsAuthenticated(p.Context) {
|
||||||
|
return nil, s3errors.ErrNotAuthenticated
|
||||||
|
}
|
||||||
|
|
||||||
|
id, ok := p.Args["id"].(*types.ID)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Failed to parse args")
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("querry 'file': ", id)
|
||||||
|
|
||||||
|
return types.File{
|
||||||
|
ID: *id,
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"authorized": &graphql.Field{
|
||||||
|
Name: "authorized",
|
||||||
|
Type: graphql.NewNonNull(graphql.Boolean),
|
||||||
|
Description: "True if the user is authorized",
|
||||||
|
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||||
|
auth := helper.IsAuthenticated(p.Context)
|
||||||
|
|
||||||
|
return auth, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"buckets": &graphql.Field{
|
||||||
|
Name: "buckets",
|
||||||
|
Type: graphql.NewNonNull(graphql.NewList(graphql.String)),
|
||||||
|
Description: "List available buckets",
|
||||||
|
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||||
|
if !helper.IsAuthenticated(p.Context) {
|
||||||
|
return nil, s3errors.ErrNotAuthenticated
|
||||||
|
}
|
||||||
|
|
||||||
|
loader := p.Context.Value("loader").(*loader.Loader)
|
||||||
|
return loader.GetBuckets(p.Context)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
mutationFields := graphql.Fields{
|
||||||
|
"delete": &graphql.Field{
|
||||||
|
Type: graphql.String,
|
||||||
|
Args: graphql.FieldConfigArgument{
|
||||||
|
"id": &graphql.ArgumentConfig{
|
||||||
|
Type: graphql.NewNonNull(objIDType),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||||
|
if !helper.IsAuthenticated(p.Context) {
|
||||||
|
return nil, s3errors.ErrNotAuthenticated
|
||||||
|
}
|
||||||
|
|
||||||
|
id, ok := p.Args["id"].(*types.ID)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Failed to parse args")
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("mutation 'delete': ", id)
|
||||||
|
|
||||||
|
return id, deleteMutation(p.Context, *id)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"copy": &graphql.Field{
|
||||||
|
Type: graphqlFileType,
|
||||||
|
Args: graphql.FieldConfigArgument{
|
||||||
|
"src": &graphql.ArgumentConfig{
|
||||||
|
Type: graphql.NewNonNull(objIDType),
|
||||||
|
},
|
||||||
|
"dest": &graphql.ArgumentConfig{
|
||||||
|
Type: graphql.NewNonNull(objIDType),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||||
|
if !helper.IsAuthenticated(p.Context) {
|
||||||
|
return nil, s3errors.ErrNotAuthenticated
|
||||||
|
}
|
||||||
|
|
||||||
|
src, ok := p.Args["src"].(*types.ID)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Failed to parse args")
|
||||||
|
}
|
||||||
|
dest, ok := p.Args["dest"].(*types.ID)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Failed to parse args")
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("mutation 'copy': ", src, "-->", dest)
|
||||||
|
|
||||||
|
return copyMutation(p.Context, *src, *dest)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"move": &graphql.Field{
|
||||||
|
Type: graphql.NewNonNull(graphqlFileType),
|
||||||
|
Args: graphql.FieldConfigArgument{
|
||||||
|
"src": &graphql.ArgumentConfig{
|
||||||
|
Type: graphql.NewNonNull(objIDType),
|
||||||
|
},
|
||||||
|
"dest": &graphql.ArgumentConfig{
|
||||||
|
Type: graphql.NewNonNull(objIDType),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||||
|
if !helper.IsAuthenticated(p.Context) {
|
||||||
|
return nil, s3errors.ErrNotAuthenticated
|
||||||
|
}
|
||||||
|
|
||||||
|
src, ok := p.Args["src"].(*types.ID)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Failed to parse args")
|
||||||
|
}
|
||||||
|
dest, ok := p.Args["dest"].(*types.ID)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Failed to parse args")
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("mutation 'move': ", src, "-->", dest)
|
||||||
|
|
||||||
|
return moveFileMutation(p.Context, *src, *dest)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"moveDir": &graphql.Field{
|
||||||
|
Type: graphql.NewNonNull(graphql.NewList(graphqlFileType)),
|
||||||
|
Args: graphql.FieldConfigArgument{
|
||||||
|
"src": &graphql.ArgumentConfig{
|
||||||
|
Type: graphql.NewNonNull(objIDType),
|
||||||
|
},
|
||||||
|
"dest": &graphql.ArgumentConfig{
|
||||||
|
Type: graphql.NewNonNull(objIDType),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||||
|
if !helper.IsAuthenticated(p.Context) {
|
||||||
|
return nil, s3errors.ErrNotAuthenticated
|
||||||
|
}
|
||||||
|
|
||||||
|
src, ok := p.Args["src"].(*types.ID)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Failed to parse args")
|
||||||
|
}
|
||||||
|
dest, ok := p.Args["dest"].(*types.ID)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Failed to parse args")
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("mutation 'moveDir': ", src, "-->", dest)
|
||||||
|
|
||||||
|
return moveDirMutation(p.Context, *src, *dest)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"createDir": &graphql.Field{
|
||||||
|
Type: graphql.NewNonNull(graphqlDirType),
|
||||||
|
Args: graphql.FieldConfigArgument{
|
||||||
|
"path": &graphql.ArgumentConfig{
|
||||||
|
Type: graphql.NewNonNull(objIDType),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||||
|
if !helper.IsAuthenticated(p.Context) {
|
||||||
|
return nil, s3errors.ErrNotAuthenticated
|
||||||
|
}
|
||||||
|
|
||||||
|
path, ok := p.Args["path"].(*types.ID)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Failed to parse args")
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("mutation 'createDir': ", path)
|
||||||
|
|
||||||
|
return createDirectory(p.Context, *path)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"deleteDir": &graphql.Field{
|
||||||
|
Type: graphql.NewNonNull(graphql.String),
|
||||||
|
Args: graphql.FieldConfigArgument{
|
||||||
|
"path": &graphql.ArgumentConfig{
|
||||||
|
Type: graphql.NewNonNull(objIDType),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||||
|
if !helper.IsAuthenticated(p.Context) {
|
||||||
|
return nil, s3errors.ErrNotAuthenticated
|
||||||
|
}
|
||||||
|
|
||||||
|
path, ok := p.Args["path"].(*types.ID)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Failed to parse args")
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("mutation 'deleteDir': ", path)
|
||||||
|
|
||||||
|
return path, deleteDirectory(p.Context, *path)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"login": &graphql.Field{
|
||||||
|
Type: graphql.NewNonNull(graphqlLoginResultType),
|
||||||
|
Args: graphql.FieldConfigArgument{
|
||||||
|
"username": &graphql.ArgumentConfig{
|
||||||
|
Type: graphql.NewNonNull(graphql.String),
|
||||||
|
},
|
||||||
|
"password": &graphql.ArgumentConfig{
|
||||||
|
Type: graphql.NewNonNull(graphql.String),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||||
|
username, ok := p.Args["username"].(string)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Failed to parse args")
|
||||||
|
}
|
||||||
|
|
||||||
|
password, ok := p.Args["password"].(string)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Failed to parse args")
|
||||||
|
}
|
||||||
|
|
||||||
|
return login(p.Context, username, password)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
rootQuery := graphql.ObjectConfig{
|
||||||
|
Name: "RootQuery",
|
||||||
|
Fields: queryFields,
|
||||||
|
}
|
||||||
|
|
||||||
|
rootMutation := graphql.ObjectConfig{
|
||||||
|
Name: "RootMutation",
|
||||||
|
Fields: mutationFields,
|
||||||
|
}
|
||||||
|
|
||||||
|
schemaConfig := graphql.SchemaConfig{
|
||||||
|
Query: graphql.NewObject(rootQuery),
|
||||||
|
Mutation: graphql.NewObject(rootMutation),
|
||||||
|
}
|
||||||
|
|
||||||
|
return graphql.NewSchema(schemaConfig)
|
||||||
|
}
|
||||||
67
internal/helper/helper.go
Normal file
67
internal/helper/helper.go
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
package helper
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/s3"
|
||||||
|
types "git.kapelle.org/niklas/s3browser/internal/types"
|
||||||
|
"github.com/golang-jwt/jwt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func GetFilenameFromKey(id string) string {
|
||||||
|
return filepath.Base(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetParentDir(id types.ID) types.ID {
|
||||||
|
dirs := strings.Split(id.Key, "/")
|
||||||
|
|
||||||
|
cut := 1
|
||||||
|
if strings.HasSuffix(id.Key, "/") {
|
||||||
|
cut = 2
|
||||||
|
}
|
||||||
|
|
||||||
|
parentKey := strings.Join(dirs[:len(dirs)-cut], "/") + "/"
|
||||||
|
|
||||||
|
parent := types.ID{
|
||||||
|
Bucket: id.Bucket,
|
||||||
|
Key: parentKey,
|
||||||
|
}
|
||||||
|
|
||||||
|
parent.Normalize()
|
||||||
|
|
||||||
|
return parent
|
||||||
|
}
|
||||||
|
|
||||||
|
func ObjInfoToFile(objInfo s3.Object, bucket string) *types.File {
|
||||||
|
return &types.File{
|
||||||
|
ID: objInfo.ID,
|
||||||
|
Name: objInfo.ID.Name(),
|
||||||
|
Size: objInfo.Size,
|
||||||
|
ContentType: objInfo.ContentType,
|
||||||
|
ETag: objInfo.ETag,
|
||||||
|
LastModified: objInfo.LastModified,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsAuthenticated(ctx context.Context) bool {
|
||||||
|
token, ok := ctx.Value("jwt").(*jwt.Token)
|
||||||
|
return (ok && token.Valid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateJWT(claims *types.JWTClaims) *jwt.Token {
|
||||||
|
|
||||||
|
claims.ExpiresAt = time.Now().Add(time.Hour * 24).Unix()
|
||||||
|
|
||||||
|
return jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateClaims(username string) *types.JWTClaims {
|
||||||
|
return &types.JWTClaims{
|
||||||
|
StandardClaims: jwt.StandardClaims{
|
||||||
|
Subject: username,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,105 +0,0 @@
|
|||||||
package s3browser
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"mime"
|
|
||||||
"net/http"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/graph-gophers/dataloader"
|
|
||||||
"github.com/graphql-go/graphql"
|
|
||||||
"github.com/graphql-go/handler"
|
|
||||||
"github.com/minio/minio-go/v7"
|
|
||||||
)
|
|
||||||
|
|
||||||
// initHttp setup and start the http server. Blocking
|
|
||||||
func initHttp(resolveContext context.Context, schema graphql.Schema) error {
|
|
||||||
h := handler.New(&handler.Config{
|
|
||||||
Schema: &schema,
|
|
||||||
Pretty: true,
|
|
||||||
GraphiQL: false,
|
|
||||||
Playground: true,
|
|
||||||
})
|
|
||||||
|
|
||||||
http.HandleFunc("/graphql", func(rw http.ResponseWriter, r *http.Request) {
|
|
||||||
h.ContextHandler(resolveContext, rw, r)
|
|
||||||
})
|
|
||||||
|
|
||||||
http.HandleFunc("/api/file", func(rw http.ResponseWriter, r *http.Request) {
|
|
||||||
if r.Method == "GET" {
|
|
||||||
httpGetFile(resolveContext, rw, r)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.Method == "POST" {
|
|
||||||
httpPostFile(resolveContext, rw, r)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
return http.ListenAndServe(":8080", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func httpGetFile(ctx context.Context, rw http.ResponseWriter, r *http.Request) {
|
|
||||||
s3Client := ctx.Value("s3Client").(*minio.Client)
|
|
||||||
id := r.URL.Query().Get("id")
|
|
||||||
objInfo, err := s3Client.StatObject(context.Background(), bucketName, id, minio.GetObjectOptions{})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
rw.WriteHeader(http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
reqEtag := r.Header.Get("If-None-Match")
|
|
||||||
if reqEtag == objInfo.ETag {
|
|
||||||
rw.WriteHeader(http.StatusNotModified)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
obj, err := s3Client.GetObject(context.Background(), bucketName, id, minio.GetObjectOptions{})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
rw.WriteHeader(http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
rw.Header().Set("Cache-Control", "must-revalidate")
|
|
||||||
rw.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filepath.Base((objInfo.Key))))
|
|
||||||
rw.Header().Set("Content-Type", objInfo.ContentType)
|
|
||||||
rw.Header().Set("ETag", objInfo.ETag)
|
|
||||||
|
|
||||||
_, err = io.Copy(rw, obj)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
rw.WriteHeader(http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func httpPostFile(ctx context.Context, rw http.ResponseWriter, r *http.Request) {
|
|
||||||
s3Client := ctx.Value("s3Client").(*minio.Client)
|
|
||||||
loader := ctx.Value("loader").(map[string]*dataloader.Loader)
|
|
||||||
|
|
||||||
id := r.URL.Query().Get("id")
|
|
||||||
|
|
||||||
contentType := r.Header.Get("Content-Type")
|
|
||||||
mimeType, _, _ := mime.ParseMediaType(contentType)
|
|
||||||
|
|
||||||
info, err := s3Client.PutObject(context.Background(), bucketName, id, r.Body, r.ContentLength, minio.PutObjectOptions{
|
|
||||||
ContentType: mimeType,
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
rw.WriteHeader(http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Invalidate cache
|
|
||||||
loader["getFile"].Clear(ctx, dataloader.StringKey(info.Key))
|
|
||||||
loader["listObjects"].Clear(ctx, dataloader.StringKey(info.Key))
|
|
||||||
loader["getFiles"].Clear(ctx, dataloader.StringKey(filepath.Dir(info.Key)))
|
|
||||||
|
|
||||||
rw.WriteHeader(http.StatusCreated)
|
|
||||||
}
|
|
||||||
11
internal/httpserver/debug.go
Normal file
11
internal/httpserver/debug.go
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
//go:build !prod
|
||||||
|
// +build !prod
|
||||||
|
|
||||||
|
package httpserver
|
||||||
|
|
||||||
|
import "github.com/gorilla/mux"
|
||||||
|
|
||||||
|
// Since we dont have the static directory when developing we replace the function with an empty one
|
||||||
|
func initStatic(r *mux.Router) {
|
||||||
|
// NOOP
|
||||||
|
}
|
||||||
306
internal/httpserver/httpServer.go
Normal file
306
internal/httpserver/httpServer.go
Normal file
@@ -0,0 +1,306 @@
|
|||||||
|
package httpserver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang-jwt/jwt"
|
||||||
|
jwtRequest "github.com/golang-jwt/jwt/request"
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
"github.com/graphql-go/graphql"
|
||||||
|
"github.com/graphql-go/graphql/gqlerrors"
|
||||||
|
"github.com/graphql-go/handler"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
helper "git.kapelle.org/niklas/s3browser/internal/helper"
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/loader"
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/s3"
|
||||||
|
types "git.kapelle.org/niklas/s3browser/internal/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
tokenExp = int64((time.Hour * 23).Seconds())
|
||||||
|
)
|
||||||
|
|
||||||
|
type cookieExtractor struct {
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cookieExtractor) ExtractToken(req *http.Request) (string, error) {
|
||||||
|
cookie, err := req.Cookie(c.Name)
|
||||||
|
|
||||||
|
if err == nil && len(cookie.Value) != 0 {
|
||||||
|
return cookie.Value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", jwtRequest.ErrNoTokenInRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
// InitHttp setup and start the http server. Blocking
|
||||||
|
func InitHttp(resolveContext context.Context, schema graphql.Schema, address string) error {
|
||||||
|
r := mux.NewRouter()
|
||||||
|
|
||||||
|
gqlHandler := handler.New(&handler.Config{
|
||||||
|
Schema: &schema,
|
||||||
|
Pretty: true,
|
||||||
|
GraphiQL: false,
|
||||||
|
Playground: true,
|
||||||
|
FormatErrorFn: func(err error) gqlerrors.FormattedError {
|
||||||
|
switch err := err.(type) {
|
||||||
|
case gqlerrors.FormattedError:
|
||||||
|
log.Error("GQL: ", err.Message)
|
||||||
|
case *gqlerrors.Error:
|
||||||
|
log.Errorf("GQL: '%s' at '%v'", err.Message, err.Path)
|
||||||
|
}
|
||||||
|
return gqlerrors.FormatError(err)
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
r.Use(func(h http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
|
parsedToken, err := jwtRequest.ParseFromRequestWithClaims(r, jwtRequest.MultiExtractor{
|
||||||
|
jwtRequest.AuthorizationHeaderExtractor,
|
||||||
|
&cookieExtractor{Name: "jwt"},
|
||||||
|
}, &types.JWTClaims{}, jwtKeyFunc)
|
||||||
|
|
||||||
|
if err == nil && parsedToken.Valid {
|
||||||
|
newRequest := r.WithContext(context.WithValue(r.Context(), "jwt", parsedToken))
|
||||||
|
h.ServeHTTP(rw, newRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
h.ServeHTTP(rw, r)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
r.HandleFunc("/api/graphql", func(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
token := r.Context().Value("jwt")
|
||||||
|
refreshTokenIfNeeded(rw, r)
|
||||||
|
gqlHandler.ContextHandler(context.WithValue(resolveContext, "jwt", token), rw, r)
|
||||||
|
})
|
||||||
|
|
||||||
|
r.HandleFunc("/api/file", func(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
refreshTokenIfNeeded(rw, r)
|
||||||
|
httpGetFile(resolveContext, rw, r)
|
||||||
|
}).Methods("GET")
|
||||||
|
|
||||||
|
r.HandleFunc("/api/file", func(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
refreshTokenIfNeeded(rw, r)
|
||||||
|
httpPostFile(resolveContext, rw, r)
|
||||||
|
}).Methods("POST")
|
||||||
|
|
||||||
|
r.HandleFunc("/api/cookie", setLoginCookie).Methods("POST")
|
||||||
|
|
||||||
|
r.HandleFunc("/api/logout", logout).Methods("POST")
|
||||||
|
|
||||||
|
// Init the embedded static files
|
||||||
|
initStatic(r)
|
||||||
|
|
||||||
|
return http.ListenAndServe(address, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func httpGetFile(ctx context.Context, rw http.ResponseWriter, r *http.Request) {
|
||||||
|
if !helper.IsAuthenticated(r.Context()) {
|
||||||
|
rw.WriteHeader(http.StatusUnauthorized)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s3Client := ctx.Value("s3Client").(s3.S3Service)
|
||||||
|
idString := r.URL.Query().Get("id")
|
||||||
|
|
||||||
|
id := types.ParseID(idString)
|
||||||
|
|
||||||
|
if id == nil {
|
||||||
|
// Failed to parse ID
|
||||||
|
rw.WriteHeader(http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("S3 'StatObject': ", id)
|
||||||
|
objInfo, err := s3Client.StatObject(context.Background(), *id)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to get object info: ", err)
|
||||||
|
rw.WriteHeader(http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
reqEtag := r.Header.Get("If-None-Match")
|
||||||
|
if reqEtag == objInfo.ETag {
|
||||||
|
rw.WriteHeader(http.StatusNotModified)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("S3 'GetObject': ", id)
|
||||||
|
obj, err := s3Client.GetObject(context.Background(), *id)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to get object: ", err)
|
||||||
|
rw.WriteHeader(http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
rw.Header().Set("Cache-Control", "must-revalidate")
|
||||||
|
rw.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", id.Name()))
|
||||||
|
rw.Header().Set("Content-Type", objInfo.ContentType)
|
||||||
|
rw.Header().Set("ETag", objInfo.ETag)
|
||||||
|
|
||||||
|
_, err = io.Copy(rw, obj)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
rw.WriteHeader(http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func httpPostFile(ctx context.Context, rw http.ResponseWriter, r *http.Request) {
|
||||||
|
if !helper.IsAuthenticated(r.Context()) {
|
||||||
|
rw.WriteHeader(http.StatusUnauthorized)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s3Client := ctx.Value("s3Client").(s3.S3Service)
|
||||||
|
|
||||||
|
idString := r.URL.Query().Get("id")
|
||||||
|
|
||||||
|
id := types.ParseID(idString)
|
||||||
|
|
||||||
|
if id == nil {
|
||||||
|
// Failed to parse ID
|
||||||
|
rw.WriteHeader(http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
id.Normalize()
|
||||||
|
|
||||||
|
// contentType := r.Header.Get("Content-Type")
|
||||||
|
// mimeType, _, _ := mime.ParseMediaType(contentType)
|
||||||
|
|
||||||
|
log.Debug("S3 'PutObject': ", id)
|
||||||
|
err := s3Client.PutObject(context.Background(), *id, r.Body, r.ContentLength) // TODO: put content type
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
rw.WriteHeader(http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
loader := ctx.Value("loader").(*loader.Loader)
|
||||||
|
loader.InvalidedCacheForId(ctx, *id)
|
||||||
|
|
||||||
|
rw.WriteHeader(http.StatusCreated)
|
||||||
|
}
|
||||||
|
|
||||||
|
func jwtKeyFunc(t *jwt.Token) (interface{}, error) {
|
||||||
|
if _, ok := t.Method.(*jwt.SigningMethodHMAC); !ok {
|
||||||
|
return nil, fmt.Errorf("Unexpected signing method: %v", t.Header["alg"])
|
||||||
|
}
|
||||||
|
return []byte("TODO"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//setLoginCookie if provieded a valid JWT in the body then set a httpOnly cookie with the token
|
||||||
|
func setLoginCookie(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
body, err := io.ReadAll(r.Body)
|
||||||
|
defer r.Body.Close()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
rw.WriteHeader(http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenString := string(body)
|
||||||
|
|
||||||
|
token, err := jwt.ParseWithClaims(tokenString, &types.JWTClaims{}, jwtKeyFunc)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
rw.WriteHeader(http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !token.Valid {
|
||||||
|
rw.WriteHeader(http.StatusUnauthorized)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
claims, ok := token.Claims.(*types.JWTClaims)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
rw.WriteHeader(http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cookie := &http.Cookie{
|
||||||
|
Name: "jwt",
|
||||||
|
Value: tokenString,
|
||||||
|
HttpOnly: true,
|
||||||
|
SameSite: http.SameSiteStrictMode,
|
||||||
|
Path: "/api",
|
||||||
|
Expires: time.Unix(claims.ExpiresAt, 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
http.SetCookie(rw, cookie)
|
||||||
|
|
||||||
|
rw.WriteHeader(http.StatusNoContent)
|
||||||
|
}
|
||||||
|
|
||||||
|
//logout removes the jwt cookie
|
||||||
|
func logout(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
cookie := &http.Cookie{
|
||||||
|
Name: "jwt",
|
||||||
|
Value: "",
|
||||||
|
Path: "/api",
|
||||||
|
Expires: time.Unix(0, 0),
|
||||||
|
HttpOnly: true,
|
||||||
|
SameSite: http.SameSiteStrictMode,
|
||||||
|
}
|
||||||
|
|
||||||
|
http.SetCookie(rw, cookie)
|
||||||
|
|
||||||
|
rw.WriteHeader(http.StatusNoContent)
|
||||||
|
}
|
||||||
|
|
||||||
|
func refreshTokenIfNeeded(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
currentToken, ok := r.Context().Value("jwt").(*jwt.Token)
|
||||||
|
|
||||||
|
if !ok && currentToken == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
claims, ok := currentToken.Claims.(*types.JWTClaims)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
log.Error("Failed to refresh JWT")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Refresh only if token older than 1 hour
|
||||||
|
if (claims.ExpiresAt - time.Now().Unix()) > tokenExp {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
newToken := helper.CreateJWT(claims)
|
||||||
|
|
||||||
|
tokenString, err := newToken.SignedString([]byte("TODO"))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to refresh JWT")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cookie := &http.Cookie{
|
||||||
|
Name: "jwt",
|
||||||
|
Value: tokenString,
|
||||||
|
HttpOnly: true,
|
||||||
|
SameSite: http.SameSiteStrictMode,
|
||||||
|
Path: "/api",
|
||||||
|
Expires: time.Unix(int64(claims.ExpiresAt), 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
http.SetCookie(rw, cookie)
|
||||||
|
|
||||||
|
log.Debug("Refreshed JWT")
|
||||||
|
}
|
||||||
33
internal/httpserver/staticFiles.go
Normal file
33
internal/httpserver/staticFiles.go
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
//go:build prod
|
||||||
|
// +build prod
|
||||||
|
|
||||||
|
package httpserver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"embed"
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
"io/fs"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// content holds our static web server content.
|
||||||
|
//go:embed static
|
||||||
|
var staticFiles embed.FS
|
||||||
|
|
||||||
|
type spaFileSystem struct {
|
||||||
|
root http.FileSystem
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spa *spaFileSystem) Open(name string) (http.File, error) {
|
||||||
|
f, err := spa.root.Open(name)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return spa.root.Open("index.html")
|
||||||
|
}
|
||||||
|
return f, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func initStatic(r *mux.Router) {
|
||||||
|
staticFS, _ := fs.Sub(staticFiles, "static")
|
||||||
|
r.PathPrefix("/").Handler(http.FileServer(&spaFileSystem{http.FS(staticFS)}))
|
||||||
|
}
|
||||||
134
internal/loader/batch.go
Normal file
134
internal/loader/batch.go
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
package loader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/s3"
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/types"
|
||||||
|
"github.com/graph-gophers/dataloader"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// listObjectsBatch batch func for calling s3.ListObjects()
|
||||||
|
func listObjectsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
||||||
|
var results []*dataloader.Result
|
||||||
|
|
||||||
|
s3Client, ok := c.Value("s3Client").(s3.S3Service)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range k {
|
||||||
|
id := v.Raw().(types.ID)
|
||||||
|
objects, err := s3Client.ListObjects(c, id)
|
||||||
|
if err != nil {
|
||||||
|
results = append(results, &dataloader.Result{
|
||||||
|
Data: nil,
|
||||||
|
Error: err,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
results = append(results, &dataloader.Result{
|
||||||
|
Data: objects,
|
||||||
|
Error: nil,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
|
// listObjectsRecursiveBatch just like listObjectsBatch but with recursive set to true
|
||||||
|
func listObjectsRecursiveBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
||||||
|
var results []*dataloader.Result
|
||||||
|
|
||||||
|
s3Client, ok := c.Value("s3Client").(s3.S3Service)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range k {
|
||||||
|
id := v.Raw().(types.ID)
|
||||||
|
objects, err := s3Client.ListObjectsRecursive(c, id)
|
||||||
|
if err != nil {
|
||||||
|
results = append(results, &dataloader.Result{
|
||||||
|
Data: nil,
|
||||||
|
Error: err,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
results = append(results, &dataloader.Result{
|
||||||
|
Data: objects,
|
||||||
|
Error: nil,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
|
func listBucketsBatch(c context.Context, k dataloader.Keys) []*dataloader.Result {
|
||||||
|
var results []*dataloader.Result
|
||||||
|
|
||||||
|
s3Client, ok := c.Value("s3Client").(s3.S3Service)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("S3 'ListBuckets'")
|
||||||
|
buckets, err := s3Client.ListBuckets(c)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return handleLoaderError(k, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
result := &dataloader.Result{
|
||||||
|
Data: buckets,
|
||||||
|
Error: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
for range k {
|
||||||
|
results = append(results, result)
|
||||||
|
}
|
||||||
|
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
|
func statObjectBatch(ctx context.Context, k dataloader.Keys) []*dataloader.Result {
|
||||||
|
log.Debug("statObjectBatch")
|
||||||
|
|
||||||
|
var results []*dataloader.Result
|
||||||
|
s3Client, ok := ctx.Value("s3Client").(s3.S3Service)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return handleLoaderError(k, fmt.Errorf("Failed to get s3Client from context"))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range k {
|
||||||
|
id := v.Raw().(types.ID)
|
||||||
|
log.Debug("S3 'StatObject': ", id)
|
||||||
|
stat, err := s3Client.StatObject(ctx, id)
|
||||||
|
results = append(results, &dataloader.Result{
|
||||||
|
Data: stat,
|
||||||
|
Error: err,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleLoaderError helper func when the whole batch failed
|
||||||
|
func handleLoaderError(k dataloader.Keys, err error) []*dataloader.Result {
|
||||||
|
log.Error(err.Error())
|
||||||
|
var results []*dataloader.Result
|
||||||
|
for range k {
|
||||||
|
results = append(results, &dataloader.Result{
|
||||||
|
Data: nil,
|
||||||
|
Error: err,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return results
|
||||||
|
}
|
||||||
164
internal/loader/loader.go
Normal file
164
internal/loader/loader.go
Normal file
@@ -0,0 +1,164 @@
|
|||||||
|
package loader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/cache"
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/helper"
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/s3"
|
||||||
|
types "git.kapelle.org/niklas/s3browser/internal/types"
|
||||||
|
"github.com/graph-gophers/dataloader"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Loader struct {
|
||||||
|
listObjectsLoader *dataloader.Loader
|
||||||
|
listObjectsRecursiveLoader *dataloader.Loader
|
||||||
|
statObjectLoader *dataloader.Loader
|
||||||
|
listBucketsLoader *dataloader.Loader
|
||||||
|
|
||||||
|
listObjectsLoaderCache cache.S3Cache
|
||||||
|
listObjectsRecursiveLoaderCache cache.S3Cache
|
||||||
|
statObjectLoaderCache cache.S3Cache
|
||||||
|
listBucketsLoaderCache cache.S3Cache
|
||||||
|
}
|
||||||
|
|
||||||
|
type CacheConfig struct {
|
||||||
|
ListObjectsLoaderCache cache.S3Cache
|
||||||
|
ListObjectsRecursiveLoaderCache cache.S3Cache
|
||||||
|
StatObjectLoaderCache cache.S3Cache
|
||||||
|
ListBucketsLoaderCache cache.S3Cache
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewLoader(cacheConfig CacheConfig) *Loader {
|
||||||
|
listObjectsLoaderCache := cacheConfig.ListObjectsLoaderCache
|
||||||
|
listObjectsRecursiveLoaderCache := cacheConfig.ListObjectsRecursiveLoaderCache
|
||||||
|
statObjectLoaderCache := cacheConfig.StatObjectLoaderCache
|
||||||
|
listBucketsLoaderCache := cacheConfig.ListBucketsLoaderCache
|
||||||
|
|
||||||
|
return &Loader{
|
||||||
|
listObjectsLoader: dataloader.NewBatchedLoader(
|
||||||
|
listObjectsBatch,
|
||||||
|
dataloader.WithCache(listObjectsLoaderCache),
|
||||||
|
),
|
||||||
|
listObjectsLoaderCache: listObjectsLoaderCache,
|
||||||
|
|
||||||
|
listObjectsRecursiveLoader: dataloader.NewBatchedLoader(
|
||||||
|
listObjectsRecursiveBatch,
|
||||||
|
dataloader.WithCache(listObjectsRecursiveLoaderCache),
|
||||||
|
),
|
||||||
|
listObjectsRecursiveLoaderCache: listObjectsRecursiveLoaderCache,
|
||||||
|
|
||||||
|
statObjectLoader: dataloader.NewBatchedLoader(
|
||||||
|
statObjectBatch,
|
||||||
|
dataloader.WithCache(statObjectLoaderCache),
|
||||||
|
),
|
||||||
|
statObjectLoaderCache: statObjectLoaderCache,
|
||||||
|
|
||||||
|
listBucketsLoader: dataloader.NewBatchedLoader(
|
||||||
|
listBucketsBatch,
|
||||||
|
dataloader.WithCache(listBucketsLoaderCache),
|
||||||
|
),
|
||||||
|
listBucketsLoaderCache: listBucketsLoaderCache,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Loader) GetFiles(ctx context.Context, path types.ID) ([]types.File, error) {
|
||||||
|
thunk := l.listObjectsLoader.Load(ctx, path)
|
||||||
|
objects, err := thunk()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var files []types.File
|
||||||
|
|
||||||
|
for _, obj := range objects.([]s3.Object) {
|
||||||
|
if !obj.ID.IsDirectory() {
|
||||||
|
files = append(files, *helper.ObjInfoToFile(obj, path.Bucket))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Loader) GetFile(ctx context.Context, id types.ID) (*types.File, error) {
|
||||||
|
thunk := l.statObjectLoader.Load(ctx, id)
|
||||||
|
|
||||||
|
result, err := thunk()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
objInfo, ok := result.(*s3.Object)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Failed to stats object")
|
||||||
|
}
|
||||||
|
|
||||||
|
return helper.ObjInfoToFile(*objInfo, id.Bucket), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Loader) GetDirs(ctx context.Context, path types.ID) ([]types.Directory, error) {
|
||||||
|
thunk := l.listObjectsLoader.Load(ctx, path)
|
||||||
|
|
||||||
|
result, err := thunk()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var dirs []types.Directory
|
||||||
|
for _, obj := range result.([]s3.Object) {
|
||||||
|
if obj.ID.IsDirectory() {
|
||||||
|
dirs = append(dirs, types.Directory{
|
||||||
|
ID: obj.ID,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return dirs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Loader) GetBuckets(ctx context.Context) ([]string, error) {
|
||||||
|
thunk := l.listBucketsLoader.Load(ctx, dataloader.StringKey(""))
|
||||||
|
|
||||||
|
result, err := thunk()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return result.([]string), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Loader) GetFilesRecursive(ctx context.Context, path types.ID) ([]types.File, error) {
|
||||||
|
thunk := l.listObjectsRecursiveLoader.Load(ctx, path)
|
||||||
|
result, err := thunk()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
objects := result.([]s3.Object)
|
||||||
|
|
||||||
|
var files []types.File
|
||||||
|
for _, obj := range objects {
|
||||||
|
files = append(files, *helper.ObjInfoToFile(obj, path.Bucket))
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Loader) InvalidedCacheForId(ctx context.Context, id types.ID) {
|
||||||
|
parent := id.Parent()
|
||||||
|
|
||||||
|
l.statObjectLoader.Clear(ctx, id)
|
||||||
|
|
||||||
|
// Code below is useless for now until we use a propper cache for "listObjectsLoader" and "listObjectsRecursiveLoader"
|
||||||
|
// TODO: implement cache invalidation for "listObjectsLoader" and "listObjectsRecursiveLoader"
|
||||||
|
l.listObjectsLoader.Clear(ctx, id).Clear(ctx, parent)
|
||||||
|
|
||||||
|
// Remove up from recursive list
|
||||||
|
for rParent := parent; rParent != nil; rParent = rParent.Parent() {
|
||||||
|
l.listObjectsRecursiveLoader.Clear(ctx, rParent)
|
||||||
|
}
|
||||||
|
}
|
||||||
105
internal/loader/loader_test.go
Normal file
105
internal/loader/loader_test.go
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
package loader_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/loader"
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/s3"
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/types"
|
||||||
|
"github.com/graph-gophers/dataloader"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func setup(t *testing.T) (context.Context, *loader.Loader, *assert.Assertions) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
s3, _ := s3.NewMockS3([]string{"bucket1", "bucket2"})
|
||||||
|
loader := loader.NewLoader(loader.CacheConfig{
|
||||||
|
ListObjectsLoaderCache: &dataloader.NoCache{},
|
||||||
|
ListObjectsRecursiveLoaderCache: &dataloader.NoCache{},
|
||||||
|
StatObjectLoaderCache: &dataloader.NoCache{},
|
||||||
|
ListBucketsLoaderCache: &dataloader.NoCache{},
|
||||||
|
})
|
||||||
|
|
||||||
|
fillS3(s3)
|
||||||
|
ctx := context.WithValue(context.Background(), "s3Client", s3)
|
||||||
|
|
||||||
|
return ctx, loader, assert
|
||||||
|
}
|
||||||
|
|
||||||
|
func fillS3(s3 s3.S3Service) {
|
||||||
|
ctx := context.Background()
|
||||||
|
length := int64(len("content"))
|
||||||
|
|
||||||
|
for _, v := range []string{
|
||||||
|
"bucket1:/file1", "bucket1:/file2", "bucket1:/dir1/file1",
|
||||||
|
"bucket1:/dir1/file2", "bucket1:/dir2/file1", "bucket1:/dir1/sub1/file1",
|
||||||
|
"bucket1:/dir1/sub1/file2",
|
||||||
|
} {
|
||||||
|
s3.PutObject(ctx, *types.ParseID(v), strings.NewReader("content"), length)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateLoader(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
loader := loader.NewLoader(loader.CacheConfig{
|
||||||
|
ListObjectsLoaderCache: &dataloader.NoCache{},
|
||||||
|
ListObjectsRecursiveLoaderCache: &dataloader.NoCache{},
|
||||||
|
StatObjectLoaderCache: &dataloader.NoCache{},
|
||||||
|
ListBucketsLoaderCache: &dataloader.NoCache{},
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.NotNil(loader)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBuckets(t *testing.T) {
|
||||||
|
ctx, loader, assert := setup(t)
|
||||||
|
|
||||||
|
buckets, err := loader.GetBuckets(ctx)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.Len(buckets, 2)
|
||||||
|
assert.Contains(buckets, "bucket1")
|
||||||
|
assert.Contains(buckets, "bucket2")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetFile(t *testing.T) {
|
||||||
|
ctx, loader, assert := setup(t)
|
||||||
|
|
||||||
|
file, err := loader.GetFile(ctx, *types.ParseID("bucket1:/dir1/file1"))
|
||||||
|
assert.NoError(err)
|
||||||
|
|
||||||
|
assert.Equal("bucket1:/dir1/file1", file.ID.String())
|
||||||
|
assert.Equal("file1", file.Name)
|
||||||
|
assert.Equal(int64(len("content")), file.Size)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetFiles(t *testing.T) {
|
||||||
|
ctx, loader, assert := setup(t)
|
||||||
|
|
||||||
|
id := types.ParseID("bucket1:/")
|
||||||
|
|
||||||
|
files, err := loader.GetFiles(ctx, *id)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.Len(files, 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetDir(t *testing.T) {
|
||||||
|
ctx, loader, assert := setup(t)
|
||||||
|
|
||||||
|
id := types.ParseID("bucket1:/")
|
||||||
|
|
||||||
|
dirs, err := loader.GetDirs(ctx, *id)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.Len(dirs, 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test(t *testing.T) {
|
||||||
|
ctx, loader, assert := setup(t)
|
||||||
|
|
||||||
|
id := types.ParseID("bucket1:/dir1/")
|
||||||
|
|
||||||
|
files, err := loader.GetFilesRecursive(ctx, *id)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.Len(files, 4)
|
||||||
|
}
|
||||||
@@ -1,107 +0,0 @@
|
|||||||
package s3browser
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/graph-gophers/dataloader"
|
|
||||||
"github.com/minio/minio-go/v7"
|
|
||||||
)
|
|
||||||
|
|
||||||
func deleteMutation(ctx context.Context, id string) error {
|
|
||||||
s3Client, ok := ctx.Value("s3Client").(*minio.Client)
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("Failed to get s3Client from context")
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: it is posible to remove multiple objects with a single call.
|
|
||||||
// Is it better to batch this?
|
|
||||||
err := s3Client.RemoveObject(ctx, bucketName, id, minio.RemoveObjectOptions{})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Invalidate cache
|
|
||||||
loader, ok := ctx.Value("loader").(map[string]*dataloader.Loader)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("Failed to get loader from context")
|
|
||||||
}
|
|
||||||
|
|
||||||
loader["getFile"].Clear(ctx, dataloader.StringKey(id))
|
|
||||||
loader["listObjects"].Clear(ctx, dataloader.StringKey(id))
|
|
||||||
loader["getFiles"].Clear(ctx, dataloader.StringKey(filepath.Dir(id)))
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func copyMutation(ctx context.Context, src, dest string) (*File, error) {
|
|
||||||
s3Client, ok := ctx.Value("s3Client").(*minio.Client)
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("Failed to get s3Client from context")
|
|
||||||
}
|
|
||||||
|
|
||||||
info, err := s3Client.CopyObject(ctx, minio.CopyDestOptions{
|
|
||||||
Bucket: bucketName,
|
|
||||||
Object: dest,
|
|
||||||
}, minio.CopySrcOptions{
|
|
||||||
Bucket: bucketName,
|
|
||||||
Object: src,
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Invalidate cache
|
|
||||||
|
|
||||||
loader, ok := ctx.Value("loader").(map[string]*dataloader.Loader)
|
|
||||||
|
|
||||||
// TODO: Do we want to error when the operation
|
|
||||||
// has succeeded but the cache invalidation has failed ?
|
|
||||||
if ok {
|
|
||||||
loader["getFile"].Clear(ctx, dataloader.StringKey(info.Key))
|
|
||||||
loader["listObjects"].Clear(ctx, dataloader.StringKey(info.Key))
|
|
||||||
loader["getFiles"].Clear(ctx, dataloader.StringKey(filepath.Dir(info.Key)))
|
|
||||||
}
|
|
||||||
|
|
||||||
return &File{
|
|
||||||
ID: info.Key,
|
|
||||||
}, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func moveMutation(ctx context.Context, src, dest string) (*File, error) {
|
|
||||||
s3Client, ok := ctx.Value("s3Client").(*minio.Client)
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("Failed to get s3Client from context")
|
|
||||||
}
|
|
||||||
|
|
||||||
// There is no (spoon) move. Only copy and delete
|
|
||||||
info, err := s3Client.CopyObject(ctx, minio.CopyDestOptions{
|
|
||||||
Bucket: bucketName,
|
|
||||||
Object: dest,
|
|
||||||
}, minio.CopySrcOptions{
|
|
||||||
Bucket: bucketName,
|
|
||||||
Object: src,
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = deleteMutation(ctx, src)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &File{
|
|
||||||
ID: info.Key,
|
|
||||||
}, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
131
internal/s3/minio.go
Normal file
131
internal/s3/minio.go
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
package s3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/types"
|
||||||
|
"github.com/minio/minio-go/v7"
|
||||||
|
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||||
|
)
|
||||||
|
|
||||||
|
type minioS3 struct {
|
||||||
|
client *minio.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMinio(config types.AppConfig) (S3Service, error) {
|
||||||
|
client, err := minio.New(config.S3Endoint, &minio.Options{
|
||||||
|
Creds: credentials.NewStaticV4(config.S3AccessKey, config.S3SecretKey, ""),
|
||||||
|
Secure: config.S3SSL,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &minioS3{
|
||||||
|
client: client,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *minioS3) ListBuckets(ctx context.Context) ([]string, error) {
|
||||||
|
buckets, err := m.client.ListBuckets(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var rtn []string
|
||||||
|
|
||||||
|
for _, v := range buckets {
|
||||||
|
rtn = append(rtn, v.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return rtn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *minioS3) ListObjects(ctx context.Context, id types.ID) ([]Object, error) {
|
||||||
|
var result []Object
|
||||||
|
|
||||||
|
for objInfo := range m.client.ListObjects(ctx, id.Bucket, minio.ListObjectsOptions{
|
||||||
|
Prefix: id.Key,
|
||||||
|
Recursive: false,
|
||||||
|
}) {
|
||||||
|
objId := types.ID{
|
||||||
|
Bucket: id.Bucket,
|
||||||
|
Key: objInfo.Key,
|
||||||
|
}
|
||||||
|
|
||||||
|
result = append(result, Object{
|
||||||
|
ID: objId,
|
||||||
|
Size: objInfo.Size,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *minioS3) ListObjectsRecursive(ctx context.Context, id types.ID) ([]Object, error) {
|
||||||
|
var result []Object
|
||||||
|
|
||||||
|
for objInfo := range m.client.ListObjects(ctx, id.Bucket, minio.ListObjectsOptions{
|
||||||
|
Prefix: id.Key,
|
||||||
|
Recursive: true,
|
||||||
|
}) {
|
||||||
|
objId := types.ID{
|
||||||
|
Bucket: id.Bucket,
|
||||||
|
Key: objInfo.Key,
|
||||||
|
}
|
||||||
|
result = append(result, Object{
|
||||||
|
ID: objId,
|
||||||
|
Size: objInfo.Size,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *minioS3) GetObject(ctx context.Context, id types.ID) (ObjectReader, error) {
|
||||||
|
object, err := m.client.GetObject(ctx, id.Bucket, id.Key, minio.GetObjectOptions{})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return object, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *minioS3) PutObject(ctx context.Context, id types.ID, reader io.Reader, objectSize int64) error {
|
||||||
|
_, err := m.client.PutObject(ctx, id.Bucket, id.Key, reader, objectSize, minio.PutObjectOptions{})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *minioS3) CopyObject(ctx context.Context, src types.ID, dest types.ID) error {
|
||||||
|
_, err := m.client.CopyObject(ctx, minio.CopyDestOptions{
|
||||||
|
Bucket: dest.Bucket,
|
||||||
|
Object: dest.Key,
|
||||||
|
}, minio.CopySrcOptions{
|
||||||
|
Bucket: src.Bucket,
|
||||||
|
Object: src.Key,
|
||||||
|
})
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *minioS3) StatObject(ctx context.Context, id types.ID) (*Object, error) {
|
||||||
|
info, err := m.client.StatObject(ctx, id.Bucket, id.Key, minio.GetObjectOptions{})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Object{
|
||||||
|
ID: id,
|
||||||
|
Size: info.Size,
|
||||||
|
LastModified: info.LastModified,
|
||||||
|
ContentType: info.ContentType,
|
||||||
|
ETag: info.ETag,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *minioS3) RemoveObject(ctx context.Context, id types.ID) error {
|
||||||
|
return m.client.RemoveObject(ctx, id.Bucket, id.Key, minio.RemoveObjectOptions{})
|
||||||
|
}
|
||||||
154
internal/s3/mock.go
Normal file
154
internal/s3/mock.go
Normal file
@@ -0,0 +1,154 @@
|
|||||||
|
package s3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/md5"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mockS3 struct {
|
||||||
|
buckets []string
|
||||||
|
objects map[types.ID]mockObject
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockObject struct {
|
||||||
|
content []byte
|
||||||
|
contentType string
|
||||||
|
lastMod time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockObjectReader struct {
|
||||||
|
*bytes.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r mockObjectReader) Close() error {
|
||||||
|
// NOOP
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMockS3(buckets []string) (S3Service, error) {
|
||||||
|
return &mockS3{
|
||||||
|
buckets: buckets,
|
||||||
|
objects: map[types.ID]mockObject{},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockS3) ListBuckets(ctx context.Context) ([]string, error) {
|
||||||
|
return m.buckets, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockS3) ListObjects(ctx context.Context, id types.ID) ([]Object, error) {
|
||||||
|
var results []Object
|
||||||
|
|
||||||
|
dirs := make(map[string]bool)
|
||||||
|
|
||||||
|
depth := len(strings.Split(id.Key, "/"))
|
||||||
|
|
||||||
|
for k, v := range m.objects {
|
||||||
|
if k.Bucket == id.Bucket {
|
||||||
|
if k.Parent().Key == id.Key {
|
||||||
|
results = append(results, *mockObjToObject(v, k))
|
||||||
|
} else if strings.HasPrefix(k.Key, id.Key) {
|
||||||
|
s := strings.Join(strings.Split(k.Key, "/")[:depth], "/") + "/"
|
||||||
|
dirs[s] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for k := range dirs {
|
||||||
|
results = append(results, Object{
|
||||||
|
ID: types.ID{
|
||||||
|
Bucket: id.Bucket,
|
||||||
|
Key: k,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return results, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockS3) ListObjectsRecursive(ctx context.Context, id types.ID) ([]Object, error) {
|
||||||
|
var results []Object
|
||||||
|
|
||||||
|
for k, v := range m.objects {
|
||||||
|
if k.Bucket == id.Bucket {
|
||||||
|
if strings.HasPrefix(k.Key, id.Key) {
|
||||||
|
results = append(results, *mockObjToObject(v, k))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return results, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockS3) GetObject(ctx context.Context, id types.ID) (ObjectReader, error) {
|
||||||
|
mockObj, exist := m.objects[id]
|
||||||
|
|
||||||
|
if !exist {
|
||||||
|
return nil, fmt.Errorf("Object not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := bytes.NewReader(mockObj.content)
|
||||||
|
|
||||||
|
return mockObjectReader{reader}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockS3) PutObject(ctx context.Context, id types.ID, reader io.Reader, objectSize int64) error {
|
||||||
|
content, err := ioutil.ReadAll(reader)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
m.objects[id] = mockObject{
|
||||||
|
content: content,
|
||||||
|
lastMod: time.Now(),
|
||||||
|
contentType: "application/octet-stream", // TODO: detect MIME type or dont its just a mock after all
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockS3) CopyObject(ctx context.Context, src types.ID, dest types.ID) error {
|
||||||
|
srcObj, exist := m.objects[src]
|
||||||
|
|
||||||
|
if !exist {
|
||||||
|
return fmt.Errorf("Object not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
m.objects[dest] = srcObj
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockS3) StatObject(ctx context.Context, id types.ID) (*Object, error) {
|
||||||
|
mockObj, exist := m.objects[id]
|
||||||
|
|
||||||
|
if !exist {
|
||||||
|
return nil, fmt.Errorf("Object not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
return mockObjToObject(mockObj, id), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockS3) RemoveObject(ctx context.Context, id types.ID) error {
|
||||||
|
delete(m.objects, id)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func mockObjToObject(mockObj mockObject, id types.ID) *Object {
|
||||||
|
return &Object{
|
||||||
|
ID: id,
|
||||||
|
Size: int64(len(mockObj.content)),
|
||||||
|
ContentType: mockObj.contentType,
|
||||||
|
LastModified: mockObj.lastMod,
|
||||||
|
ETag: fmt.Sprintf("%x", md5.Sum(mockObj.content)),
|
||||||
|
}
|
||||||
|
}
|
||||||
162
internal/s3/mock_test.go
Normal file
162
internal/s3/mock_test.go
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
package s3_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io/ioutil"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/s3"
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/types"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func setup(t *testing.T) (s3.S3Service, context.Context, *assert.Assertions) {
|
||||||
|
service, _ := s3.NewMockS3([]string{"bucket1", "bucket2"})
|
||||||
|
ctx := context.Background()
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
return service, ctx, assert
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuckets(t *testing.T) {
|
||||||
|
s3, ctx, assert := setup(t)
|
||||||
|
|
||||||
|
buckets, err := s3.ListBuckets(ctx)
|
||||||
|
assert.NoError(err)
|
||||||
|
|
||||||
|
assert.Len(buckets, 2)
|
||||||
|
|
||||||
|
assert.Contains(buckets, "bucket1")
|
||||||
|
assert.Contains(buckets, "bucket2")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPut(t *testing.T) {
|
||||||
|
s3, ctx, assert := setup(t)
|
||||||
|
content := "FileContent"
|
||||||
|
|
||||||
|
err := s3.PutObject(ctx, *types.ParseID("bucket1:/file1"), strings.NewReader(content), int64(len(content)))
|
||||||
|
assert.NoError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPutAndGet(t *testing.T) {
|
||||||
|
s3, ctx, assert := setup(t)
|
||||||
|
|
||||||
|
content := "FileContent"
|
||||||
|
id := *types.ParseID("bucket1:/file1")
|
||||||
|
|
||||||
|
err := s3.PutObject(ctx, *types.ParseID("bucket1:/file1"), strings.NewReader(content), int64(len(content)))
|
||||||
|
assert.NoError(err)
|
||||||
|
|
||||||
|
reader, err := s3.GetObject(ctx, id)
|
||||||
|
assert.NoError(err)
|
||||||
|
|
||||||
|
readerContent, err := ioutil.ReadAll(reader)
|
||||||
|
assert.NoError(err)
|
||||||
|
|
||||||
|
assert.Equal(content, string(readerContent))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStat(t *testing.T) {
|
||||||
|
s3, ctx, assert := setup(t)
|
||||||
|
|
||||||
|
content := "FileContent"
|
||||||
|
id := *types.ParseID("bucket1:/file1")
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
err := s3.PutObject(ctx, id, strings.NewReader(content), int64(len(content)))
|
||||||
|
assert.NoError(err)
|
||||||
|
|
||||||
|
obj, err := s3.StatObject(ctx, id)
|
||||||
|
assert.NoError(err)
|
||||||
|
|
||||||
|
assert.Equal(id.String(), obj.ID.String())
|
||||||
|
assert.Equal(int64(len(content)), obj.Size)
|
||||||
|
assert.NotEmpty(obj.ETag)
|
||||||
|
assert.WithinDuration(now, obj.LastModified, time.Second*1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRemove(t *testing.T) {
|
||||||
|
s3, ctx, assert := setup(t)
|
||||||
|
|
||||||
|
content := "FileContent"
|
||||||
|
id := *types.ParseID("bucket1:/file1")
|
||||||
|
|
||||||
|
err := s3.PutObject(ctx, id, strings.NewReader(content), int64(len(content)))
|
||||||
|
assert.NoError(err)
|
||||||
|
|
||||||
|
err = s3.RemoveObject(ctx, id)
|
||||||
|
assert.NoError(err)
|
||||||
|
|
||||||
|
_, err = s3.StatObject(ctx, id)
|
||||||
|
assert.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestList(t *testing.T) {
|
||||||
|
s3, ctx, assert := setup(t)
|
||||||
|
|
||||||
|
content1 := "FileContent1"
|
||||||
|
id1 := *types.ParseID("bucket1:/file1")
|
||||||
|
|
||||||
|
err := s3.PutObject(ctx, id1, strings.NewReader(content1), int64(len(content1)))
|
||||||
|
assert.NoError(err)
|
||||||
|
|
||||||
|
content2 := "FileContent2"
|
||||||
|
id2 := *types.ParseID("bucket1:/file2")
|
||||||
|
|
||||||
|
err = s3.PutObject(ctx, id2, strings.NewReader(content2), int64(len(content2)))
|
||||||
|
assert.NoError(err)
|
||||||
|
|
||||||
|
listID := types.ParseID("bucket1:/")
|
||||||
|
|
||||||
|
objects, err := s3.ListObjects(ctx, *listID)
|
||||||
|
assert.NoError(err)
|
||||||
|
|
||||||
|
assert.Len(objects, 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestListRecursive(t *testing.T) {
|
||||||
|
s3, ctx, assert := setup(t)
|
||||||
|
|
||||||
|
s3.PutObject(ctx, *types.ParseID("bucket1:/file1"), strings.NewReader("content"), int64(len("content")))
|
||||||
|
s3.PutObject(ctx, *types.ParseID("bucket1:/path1/file1"), strings.NewReader("content"), int64(len("content")))
|
||||||
|
s3.PutObject(ctx, *types.ParseID("bucket1:/path1/file2"), strings.NewReader("content"), int64(len("content")))
|
||||||
|
s3.PutObject(ctx, *types.ParseID("bucket1:/path1/path2/file1"), strings.NewReader("content"), int64(len("content")))
|
||||||
|
s3.PutObject(ctx, *types.ParseID("bucket1:/path3/path4/file1"), strings.NewReader("content"), int64(len("content")))
|
||||||
|
|
||||||
|
objects, err := s3.ListObjectsRecursive(ctx, *types.ParseID("bucket1:/path1/"))
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.Len(objects, 3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCopy(t *testing.T) {
|
||||||
|
s3, ctx, assert := setup(t)
|
||||||
|
|
||||||
|
id1 := *types.ParseID("bucket1:/file1")
|
||||||
|
id2 := *types.ParseID("bucket1:/file2")
|
||||||
|
|
||||||
|
s3.PutObject(ctx, id1, strings.NewReader("content"), int64(len("content")))
|
||||||
|
|
||||||
|
err := s3.CopyObject(ctx, id1, id2)
|
||||||
|
assert.NoError(err)
|
||||||
|
|
||||||
|
obj1, err := s3.StatObject(ctx, id1)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NotNil(obj1)
|
||||||
|
|
||||||
|
obj2, err := s3.StatObject(ctx, id1)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NotNil(obj2)
|
||||||
|
|
||||||
|
assert.Equal(obj1.ETag, obj2.ETag)
|
||||||
|
assert.Equal(obj1.Size, obj2.Size)
|
||||||
|
|
||||||
|
obj2Reader, err := s3.GetObject(ctx, id2)
|
||||||
|
assert.NoError(err)
|
||||||
|
|
||||||
|
obj2Content, err := ioutil.ReadAll(obj2Reader)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.Equal([]byte("content"), obj2Content)
|
||||||
|
}
|
||||||
37
internal/s3/s3.go
Normal file
37
internal/s3/s3.go
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
package s3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ObjectReader interface {
|
||||||
|
io.Reader
|
||||||
|
io.Seeker
|
||||||
|
io.ReaderAt
|
||||||
|
io.Closer
|
||||||
|
}
|
||||||
|
|
||||||
|
type Object struct {
|
||||||
|
ID types.ID
|
||||||
|
Size int64
|
||||||
|
LastModified time.Time
|
||||||
|
ContentType string
|
||||||
|
ETag string
|
||||||
|
}
|
||||||
|
|
||||||
|
type S3Service interface {
|
||||||
|
ListBuckets(ctx context.Context) ([]string, error)
|
||||||
|
|
||||||
|
GetObject(ctx context.Context, id types.ID) (ObjectReader, error)
|
||||||
|
PutObject(ctx context.Context, id types.ID, reader io.Reader, objectSize int64) error
|
||||||
|
|
||||||
|
ListObjects(ctx context.Context, id types.ID) ([]Object, error)
|
||||||
|
ListObjectsRecursive(ctx context.Context, id types.ID) ([]Object, error)
|
||||||
|
CopyObject(ctx context.Context, src types.ID, dest types.ID) error
|
||||||
|
StatObject(ctx context.Context, id types.ID) (*Object, error)
|
||||||
|
RemoveObject(ctx context.Context, id types.ID) error
|
||||||
|
}
|
||||||
@@ -2,89 +2,62 @@ package s3browser
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"log"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/minio/minio-go/v7"
|
"github.com/graph-gophers/dataloader"
|
||||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/cache"
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/db"
|
||||||
|
gql "git.kapelle.org/niklas/s3browser/internal/gql"
|
||||||
|
httpserver "git.kapelle.org/niklas/s3browser/internal/httpserver"
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/loader"
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/s3"
|
||||||
|
types "git.kapelle.org/niklas/s3browser/internal/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AppConfig general config
|
// Start starts the app
|
||||||
type AppConfig struct {
|
func Start(config types.AppConfig) {
|
||||||
S3Endoint string
|
|
||||||
S3AccessKey string
|
if config.LogDebug {
|
||||||
S3SecretKey string
|
log.SetLevel(log.DebugLevel)
|
||||||
S3SSL bool
|
|
||||||
S3Buket string
|
|
||||||
CacheTTL time.Duration
|
|
||||||
CacheCleanup time.Duration
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// File represents a file with its metadata
|
log.Info("Starting")
|
||||||
type File struct {
|
s3Client, err := s3.NewMinio(config)
|
||||||
ID string `json:"id"`
|
|
||||||
Name string `json:"name"`
|
if err != nil {
|
||||||
Size int64 `json:"size"`
|
log.Error("Failed to setup s3 client: ", err.Error())
|
||||||
ContentType string `json:"contentType"`
|
return
|
||||||
ETag string `json:"etag"`
|
|
||||||
LastModified time.Time `json:"lastModified"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Directory represents a directory with its metadata
|
dbStore, err := db.NewDB(config.DSN)
|
||||||
type Directory struct {
|
if err != nil {
|
||||||
ID string `json:"id"`
|
log.Error("Failed to connect DB: ", err.Error())
|
||||||
Name string `json:"name"`
|
|
||||||
Files []File `json:"files"`
|
|
||||||
Directorys []Directory `json:"directorys"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var bucketName = "dev"
|
loader := loader.NewLoader(loader.CacheConfig{
|
||||||
|
ListObjectsLoaderCache: &dataloader.NoCache{},
|
||||||
// setupS3Client connect the s3Client
|
ListObjectsRecursiveLoaderCache: &dataloader.NoCache{},
|
||||||
func setupS3Client(config AppConfig) *minio.Client {
|
StatObjectLoaderCache: cache.NewTTLCache(config.CacheTTL, config.CacheCleanup),
|
||||||
minioClient, err := minio.New(config.S3Endoint, &minio.Options{
|
ListBucketsLoaderCache: cache.NewTTLCache(config.CacheTTL, config.CacheCleanup),
|
||||||
Creds: credentials.NewStaticV4(config.S3AccessKey, config.S3SecretKey, ""),
|
|
||||||
Secure: config.S3SSL,
|
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
gql.GraphqlTypes()
|
||||||
log.Fatalln(err)
|
schema, err := gql.GraphqlSchema()
|
||||||
}
|
|
||||||
|
|
||||||
exists, err := minioClient.BucketExists(context.Background(), config.S3Buket)
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
log.Error("Failed to generate graphq schemas: ", err.Error())
|
||||||
}
|
return
|
||||||
|
|
||||||
if !exists {
|
|
||||||
log.Fatalf("Bucket '%s' does not exist", config.S3Buket)
|
|
||||||
} else {
|
|
||||||
log.Print("S3 client connected")
|
|
||||||
}
|
|
||||||
|
|
||||||
return minioClient
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start starts the app
|
|
||||||
func Start(config AppConfig) {
|
|
||||||
s3Client := setupS3Client(config)
|
|
||||||
|
|
||||||
loaderMap := createDataloader(config)
|
|
||||||
|
|
||||||
graphqlTypes()
|
|
||||||
schema, err := graphqlSchema()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
log.Panic(err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resolveContext := context.WithValue(context.Background(), "s3Client", s3Client)
|
resolveContext := context.WithValue(context.Background(), "s3Client", s3Client)
|
||||||
resolveContext = context.WithValue(resolveContext, "loader", loaderMap)
|
resolveContext = context.WithValue(resolveContext, "loader", loader)
|
||||||
|
resolveContext = context.WithValue(resolveContext, "dbStore", dbStore)
|
||||||
|
|
||||||
err = initHttp(resolveContext, schema)
|
err = httpserver.InitHttp(resolveContext, schema, config.Address)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Failed to start webserver: %s", err.Error())
|
log.Error("Failed to start webserver: ", err.Error())
|
||||||
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,151 +0,0 @@
|
|||||||
package s3browser
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/graph-gophers/dataloader"
|
|
||||||
"github.com/graphql-go/graphql"
|
|
||||||
)
|
|
||||||
|
|
||||||
// graphqlSchema generate the schema with its root query and mutation
|
|
||||||
func graphqlSchema() (graphql.Schema, error) {
|
|
||||||
|
|
||||||
queryFields := graphql.Fields{
|
|
||||||
"files": &graphql.Field{
|
|
||||||
Type: graphql.NewNonNull(graphql.NewList(graphql.NewNonNull(graphqlFileType))),
|
|
||||||
Args: graphql.FieldConfigArgument{
|
|
||||||
"path": &graphql.ArgumentConfig{
|
|
||||||
Type: graphql.NewNonNull(graphql.String),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
|
||||||
path, ok := p.Args["path"].(string)
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
loader := p.Context.Value("loader").(map[string]*dataloader.Loader)
|
|
||||||
thunk := loader["getFiles"].Load(p.Context, dataloader.StringKey(path))
|
|
||||||
return thunk()
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"directorys": &graphql.Field{
|
|
||||||
Type: graphql.NewNonNull(graphql.NewList(graphql.NewNonNull(graphqlDirType))),
|
|
||||||
Args: graphql.FieldConfigArgument{
|
|
||||||
"path": &graphql.ArgumentConfig{
|
|
||||||
Type: graphql.NewNonNull(graphql.String),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
|
||||||
path, ok := p.Args["path"].(string)
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
loader := p.Context.Value("loader").(map[string]*dataloader.Loader)
|
|
||||||
thunk := loader["getDirs"].Load(p.Context, dataloader.StringKey(path))
|
|
||||||
return thunk()
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"file": &graphql.Field{
|
|
||||||
Type: graphqlFileType,
|
|
||||||
Args: graphql.FieldConfigArgument{
|
|
||||||
"id": &graphql.ArgumentConfig{
|
|
||||||
Type: graphql.NewNonNull(graphql.ID),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
|
||||||
id, ok := p.Args["id"].(string)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("Failed to parse args")
|
|
||||||
}
|
|
||||||
|
|
||||||
return File{
|
|
||||||
ID: id,
|
|
||||||
}, nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
mutationFields := graphql.Fields{
|
|
||||||
"delete": &graphql.Field{
|
|
||||||
Type: graphql.String,
|
|
||||||
Args: graphql.FieldConfigArgument{
|
|
||||||
"id": &graphql.ArgumentConfig{
|
|
||||||
Type: graphql.NewNonNull(graphql.ID),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
|
||||||
id, ok := p.Args["id"].(string)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("Failed to parse args")
|
|
||||||
}
|
|
||||||
|
|
||||||
return id, deleteMutation(p.Context, id)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"copy": &graphql.Field{
|
|
||||||
Type: graphqlFileType,
|
|
||||||
Args: graphql.FieldConfigArgument{
|
|
||||||
"src": &graphql.ArgumentConfig{
|
|
||||||
Type: graphql.NewNonNull(graphql.ID),
|
|
||||||
},
|
|
||||||
"dest": &graphql.ArgumentConfig{
|
|
||||||
Type: graphql.NewNonNull(graphql.ID),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
|
||||||
src, ok := p.Args["src"].(string)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("Failed to parse args")
|
|
||||||
}
|
|
||||||
dest, ok := p.Args["dest"].(string)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("Failed to parse args")
|
|
||||||
}
|
|
||||||
|
|
||||||
return copyMutation(p.Context, src, dest)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"move": &graphql.Field{
|
|
||||||
Type: graphqlFileType,
|
|
||||||
Args: graphql.FieldConfigArgument{
|
|
||||||
"src": &graphql.ArgumentConfig{
|
|
||||||
Type: graphql.NewNonNull(graphql.ID),
|
|
||||||
},
|
|
||||||
"dest": &graphql.ArgumentConfig{
|
|
||||||
Type: graphql.NewNonNull(graphql.ID),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
|
||||||
src, ok := p.Args["src"].(string)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("Failed to parse args")
|
|
||||||
}
|
|
||||||
dest, ok := p.Args["dest"].(string)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("Failed to parse args")
|
|
||||||
}
|
|
||||||
|
|
||||||
return moveMutation(p.Context, src, dest)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
rootQuery := graphql.ObjectConfig{
|
|
||||||
Name: "RootQuery",
|
|
||||||
Fields: queryFields,
|
|
||||||
}
|
|
||||||
|
|
||||||
rootMutation := graphql.ObjectConfig{
|
|
||||||
Name: "RootMutation",
|
|
||||||
Fields: mutationFields,
|
|
||||||
}
|
|
||||||
|
|
||||||
schemaConfig := graphql.SchemaConfig{
|
|
||||||
Query: graphql.NewObject(rootQuery),
|
|
||||||
Mutation: graphql.NewObject(rootMutation),
|
|
||||||
}
|
|
||||||
|
|
||||||
return graphql.NewSchema(schemaConfig)
|
|
||||||
}
|
|
||||||
114
internal/types/id.go
Normal file
114
internal/types/id.go
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
idRegex = regexp.MustCompile(`(.*?)(@(.*))?:(.*)`)
|
||||||
|
)
|
||||||
|
|
||||||
|
// ID an id of a file consists of at least a Bucket and a Key. Version is optional.
|
||||||
|
// Can also be used as an ID for a directory. When the key ends with "/" it is treated as dir.
|
||||||
|
type ID struct {
|
||||||
|
Bucket string `json:"bucket"` // Name of the bucket
|
||||||
|
Key string `json:"key"` // Key of the object
|
||||||
|
Version string `json:"version"` // Version of the object. For now we ignore it
|
||||||
|
}
|
||||||
|
|
||||||
|
// String Return String representation of an ID
|
||||||
|
// Looks like this: "bucketName@version:/id/of/obj" or "bucketName:/id/of/obj"
|
||||||
|
func (i ID) String() string {
|
||||||
|
if i.Version == "" {
|
||||||
|
return fmt.Sprintf("%s:%s", i.Bucket, i.Key)
|
||||||
|
} else {
|
||||||
|
return fmt.Sprintf("%s@%s:%s", i.Bucket, i.Version, i.Key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normalize normalzes the key to have a "/" prefix
|
||||||
|
func (i *ID) Normalize() {
|
||||||
|
if i.Key == "." {
|
||||||
|
i.Key = "/"
|
||||||
|
} else if !strings.HasPrefix(i.Key, "/") {
|
||||||
|
i.Key = "/" + i.Key
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Valid checks if bucket and key is not empty
|
||||||
|
func (i *ID) Valid() bool {
|
||||||
|
return i.Bucket != "" && i.Key != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *ID) IsDirectory() bool {
|
||||||
|
return strings.HasSuffix(i.Key, "/")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Raw for the Key interface for the dataloaders so ID can be used as a dataloader key
|
||||||
|
func (i ID) Raw() interface{} {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parent returns the parent dir ID. If its a file then return containing directory.
|
||||||
|
// If this is a directory then return the dir one up.
|
||||||
|
func (i ID) Parent() *ID {
|
||||||
|
if i.Key == "/" {
|
||||||
|
// Already at root. We dont have a parent
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var parent *ID
|
||||||
|
|
||||||
|
if i.IsDirectory() {
|
||||||
|
parts := strings.Split(i.Key, "/")
|
||||||
|
parent = &ID{
|
||||||
|
Bucket: i.Bucket,
|
||||||
|
Key: strings.Join(parts[:len(parts)-2], "/") + "/",
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
dir := filepath.Dir(i.Key)
|
||||||
|
|
||||||
|
if dir != "/" {
|
||||||
|
dir += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
parent = &ID{
|
||||||
|
Bucket: i.Bucket,
|
||||||
|
Key: dir,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
parent.Normalize()
|
||||||
|
|
||||||
|
return parent
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns filename or directory name
|
||||||
|
func (i ID) Name() string {
|
||||||
|
return filepath.Base(i.Key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseID parses a string to an ID. Null if invalid
|
||||||
|
func ParseID(id string) *ID {
|
||||||
|
match := idRegex.FindStringSubmatch(id)
|
||||||
|
if match == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
rtn := &ID{
|
||||||
|
Bucket: match[1],
|
||||||
|
Version: match[3],
|
||||||
|
Key: match[4],
|
||||||
|
}
|
||||||
|
|
||||||
|
if !rtn.Valid() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
rtn.Normalize()
|
||||||
|
|
||||||
|
return rtn
|
||||||
|
}
|
||||||
109
internal/types/id_test.go
Normal file
109
internal/types/id_test.go
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
package types_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"git.kapelle.org/niklas/s3browser/internal/types"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: test version component (not yet used in code)
|
||||||
|
|
||||||
|
func TestIDParse(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
id := types.ParseID("test:/path/key")
|
||||||
|
|
||||||
|
assert.NotNil(id)
|
||||||
|
assert.True(id.Valid())
|
||||||
|
assert.Equal("test", id.Bucket)
|
||||||
|
assert.Equal("/path/key", id.Key)
|
||||||
|
assert.False(id.IsDirectory())
|
||||||
|
assert.Equal("key", id.Name())
|
||||||
|
assert.Equal("test:/path/key", id.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIDParseInvalid(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
assert.Nil(types.ParseID("/asd/ad"))
|
||||||
|
assert.Nil(types.ParseID("test"))
|
||||||
|
assert.Nil(types.ParseID("test:"))
|
||||||
|
assert.Nil(types.ParseID(""))
|
||||||
|
assert.Nil(types.ParseID("/"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIDIsDir(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
idFile := types.ParseID("test:/path/key")
|
||||||
|
assert.NotNil(idFile)
|
||||||
|
assert.False(idFile.IsDirectory())
|
||||||
|
|
||||||
|
idDir := types.ParseID("test:/path/key/")
|
||||||
|
assert.NotNil(idDir)
|
||||||
|
assert.True(idDir.IsDirectory())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIDRoot(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
id := types.ParseID("test:/")
|
||||||
|
|
||||||
|
assert.NotNil(id)
|
||||||
|
assert.True(id.Valid())
|
||||||
|
assert.Equal("test", id.Bucket)
|
||||||
|
assert.Equal("/", id.Key)
|
||||||
|
assert.True(id.IsDirectory())
|
||||||
|
assert.Equal("/", id.Name())
|
||||||
|
assert.Equal("test:/", id.String())
|
||||||
|
|
||||||
|
assert.Nil(id.Parent())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIDParentFromFile(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
id := types.ParseID("test:/path1/path2/key")
|
||||||
|
|
||||||
|
assert.NotNil(id)
|
||||||
|
|
||||||
|
parent := id.Parent()
|
||||||
|
|
||||||
|
assert.NotNil(parent)
|
||||||
|
assert.True(parent.Valid())
|
||||||
|
assert.Equal("test", parent.Bucket)
|
||||||
|
assert.Equal("/path1/path2/", parent.Key)
|
||||||
|
assert.True(parent.IsDirectory())
|
||||||
|
assert.Equal("path2", parent.Name())
|
||||||
|
assert.Equal("test:/path1/path2/", parent.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIDParentFromDir(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
id := types.ParseID("test:/path1/path2/")
|
||||||
|
|
||||||
|
assert.NotNil(id)
|
||||||
|
|
||||||
|
parent := id.Parent()
|
||||||
|
|
||||||
|
assert.NotNil(parent)
|
||||||
|
assert.True(parent.Valid())
|
||||||
|
assert.Equal("test", parent.Bucket)
|
||||||
|
assert.Equal("/path1/", parent.Key)
|
||||||
|
assert.True(parent.IsDirectory())
|
||||||
|
assert.Equal("path1", parent.Name())
|
||||||
|
assert.Equal("test:/path1/", parent.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIDParentRoot(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
id := types.ParseID("test:/key1")
|
||||||
|
|
||||||
|
parent := id.Parent()
|
||||||
|
|
||||||
|
assert.NotNil(parent)
|
||||||
|
assert.Equal("/", parent.Key)
|
||||||
|
}
|
||||||
47
internal/types/types.go
Normal file
47
internal/types/types.go
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang-jwt/jwt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AppConfig general config
|
||||||
|
type AppConfig struct {
|
||||||
|
S3Endoint string
|
||||||
|
S3AccessKey string
|
||||||
|
S3SecretKey string
|
||||||
|
S3SSL bool
|
||||||
|
DSN string
|
||||||
|
CacheTTL time.Duration
|
||||||
|
CacheCleanup time.Duration
|
||||||
|
Address string
|
||||||
|
LogDebug bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// File represents a file with its metadata
|
||||||
|
type File struct {
|
||||||
|
ID ID `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
ContentType string `json:"contentType"`
|
||||||
|
ETag string `json:"etag"`
|
||||||
|
LastModified time.Time `json:"lastModified"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Directory represents a directory with its metadata
|
||||||
|
type Directory struct {
|
||||||
|
ID ID `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Files []File `json:"files"`
|
||||||
|
Directorys []Directory `json:"directorys"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type JWTClaims struct {
|
||||||
|
jwt.StandardClaims
|
||||||
|
}
|
||||||
|
|
||||||
|
type LoginResult struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
Successful bool `json:"successful"`
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user