Update code formatting and update dependencies (unliftio, protolude) (#152)

* Format code with ormolu

* Use latest unliftio-core

* Use latest protolude
This commit is contained in:
Aditya Manthramurthy 2020-06-14 10:06:41 -07:00 committed by GitHub
parent ce23f7322a
commit 23fecbb469
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
58 changed files with 4875 additions and 4059 deletions

View File

@ -47,7 +47,7 @@ jobs:
- name: Install dependencies
run: |
cabal v2-update
cabal v2-build --only-dependencies --enable-tests --enable-benchmarks --verbose=2
cabal v2-build --only-dependencies --enable-tests --enable-benchmarks
- name: Build
run: cabal v2-build --enable-tests --enable-benchmarks all
- name: Run tests

View File

@ -15,4 +15,5 @@
--
import Distribution.Simple
main = defaultMain

View File

@ -16,20 +16,17 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Control.Monad.IO.Class (liftIO)
import Prelude
import Control.Monad.IO.Class (liftIO)
import Network.Minio
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
let bucket = "missingbucket"
@ -39,5 +36,5 @@ main = do
liftIO $ putStrLn $ "Does " ++ show bucket ++ " exist? - " ++ show foundBucket
case res1 of
Left e -> putStrLn $ "bucketExists failed." ++ show e
Left e -> putStrLn $ "bucketExists failed." ++ show e
Right () -> return ()

View File

@ -16,42 +16,40 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import UnliftIO.Exception (catch, throwIO)
import Network.Minio
import UnliftIO.Exception (catch, throwIO)
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
let
bucket = "test"
let bucket = "test"
object = "obj"
objectCopy = "obj-copy"
localFile = "/etc/lsb-release"
res1 <- runMinio minioPlayCI $ do
-- 1. Make a bucket; Catch BucketAlreadyOwnedByYou exception.
catch (makeBucket bucket Nothing) (
\e -> case e of
BucketAlreadyOwnedByYou -> return ()
_ -> throwIO e
catch
(makeBucket bucket Nothing)
( \e -> case e of
BucketAlreadyOwnedByYou -> return ()
_ -> throwIO e
)
-- 2. Upload a file to bucket/object.
fPutObject bucket object localFile defaultPutObjectOptions
-- 3. Copy bucket/object to bucket/objectCopy.
copyObject defaultDestinationInfo {dstBucket = bucket, dstObject = objectCopy}
defaultSourceInfo { srcBucket = bucket , srcObject = object }
copyObject
defaultDestinationInfo {dstBucket = bucket, dstObject = objectCopy}
defaultSourceInfo {srcBucket = bucket, srcObject = object}
case res1 of
Left e -> putStrLn $ "copyObject failed." ++ show e
Left e -> putStrLn $ "copyObject failed." ++ show e
Right () -> putStrLn "copyObject succeeded."

View File

@ -16,40 +16,40 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE ScopedTypeVariables #-}
import Network.Minio
import Data.Monoid ((<>))
import Data.Text (pack)
import Options.Applicative
import System.FilePath.Posix
import UnliftIO (throwIO, try)
import Prelude
import Data.Monoid ((<>))
import Data.Text (pack)
import Network.Minio
import Options.Applicative
import System.FilePath.Posix
import UnliftIO (throwIO, try)
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
-- optparse-applicative package based command-line parsing.
fileNameArgs :: Parser FilePath
fileNameArgs = strArgument
(metavar "FILENAME"
<> help "Name of file to upload to AWS S3 or a MinIO server")
fileNameArgs =
strArgument
( metavar "FILENAME"
<> help "Name of file to upload to AWS S3 or a MinIO server"
)
cmdParser :: ParserInfo FilePath
cmdParser = info
(helper <*> fileNameArgs)
(fullDesc
<> progDesc "FileUploader"
<> header
"FileUploader - a simple file-uploader program using minio-hs")
cmdParser =
info
(helper <*> fileNameArgs)
( fullDesc
<> progDesc "FileUploader"
<> header
"FileUploader - a simple file-uploader program using minio-hs"
)
main :: IO ()
main = do
@ -64,12 +64,12 @@ main = do
bErr <- try $ makeBucket bucket Nothing
case bErr of
Left BucketAlreadyOwnedByYou -> return ()
Left e -> throwIO e
Right _ -> return ()
Left e -> throwIO e
Right _ -> return ()
-- Upload filepath to bucket; object is derived from filepath.
fPutObject bucket object filepath defaultPutObjectOptions
case res of
Left e -> putStrLn $ "file upload failed due to " ++ (show e)
Left e -> putStrLn $ "file upload failed due to " ++ (show e)
Right () -> putStrLn "file upload succeeded."

View File

@ -16,15 +16,15 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
main :: IO ()
main = do
res <- runMinio minioPlayCI $
res <-
runMinio minioPlayCI $
getConfig
print res

View File

@ -16,31 +16,26 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import qualified Data.Conduit as C
import qualified Data.Conduit as C
import qualified Data.Conduit.Binary as CB
import Prelude
import Network.Minio
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
let
bucket = "my-bucket"
let bucket = "my-bucket"
object = "my-object"
res <- runMinio minioPlayCI $ do
src <- getObject bucket object defaultGetObjectOptions
C.connect (gorObjectStream src) $ CB.sinkFileCautious "/tmp/my-object"
case res of
Left e -> putStrLn $ "getObject failed." ++ (show e)
Left e -> putStrLn $ "getObject failed." ++ (show e)
Right _ -> putStrLn "getObject succeeded."

View File

@ -16,28 +16,25 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.S3API
import Prelude
import Network.Minio
import Network.Minio.S3API
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
let
bucket = "test"
let bucket = "test"
object = "passwd"
res <- runMinio minioPlayCI $
headObject bucket object []
res <-
runMinio minioPlayCI $
headObject bucket object []
case res of
Left e -> putStrLn $ "headObject failed." ++ show e
Left e -> putStrLn $ "headObject failed." ++ show e
Right objInfo -> putStrLn $ "headObject succeeded." ++ show objInfo

View File

@ -16,19 +16,23 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
main :: IO ()
main = do
res <- runMinio minioPlayCI $
do
hsr <- startHeal Nothing Nothing HealOpts { hoRecursive = True
, hoDryRun = False
}
hsr <-
startHeal
Nothing
Nothing
HealOpts
{ hoRecursive = True,
hoDryRun = False
}
getHealStatus Nothing Nothing (hsrClientToken hsr)
print res

View File

@ -16,19 +16,17 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Control.Monad.IO.Class (liftIO)
import Prelude
import Control.Monad.IO.Class (liftIO)
import Network.Minio
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
-- This example list buckets that belongs to the user and returns
-- region of the first bucket returned.

View File

@ -16,38 +16,36 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Conduit
import Prelude
import Conduit
import Network.Minio
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
let
bucket = "test"
let bucket = "test"
-- Performs a recursive listing of incomplete uploads under bucket "test"
-- on a local minio server.
res <- runMinio minioPlayCI $
runConduit $ listIncompleteUploads bucket Nothing True .| mapM_C (\v -> (liftIO $ print v))
res <-
runMinio minioPlayCI
$ runConduit
$ listIncompleteUploads bucket Nothing True .| mapM_C (\v -> (liftIO $ print v))
print res
{-
Following is the output of the above program on a local MinIO server.
{-
Following is the output of the above program on a local MinIO server.
Right [UploadInfo { uiKey = "go1.6.2.linux-amd64.tar.gz"
, uiUploadId = "063eb592-bdd7-4a0c-be48-34fb3ceb63e2"
, uiInitTime = 2017-03-01 10:16:25.698 UTC
, uiSize = 17731794
}
]
-}
Right [UploadInfo { uiKey = "go1.6.2.linux-amd64.tar.gz"
, uiUploadId = "063eb592-bdd7-4a0c-be48-34fb3ceb63e2"
, uiInitTime = 2017-03-01 10:16:25.698 UTC
, uiSize = 17731794
}
]
-}

View File

@ -16,33 +16,31 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Conduit
import Prelude
import Conduit
import Network.Minio
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
let
bucket = "test"
let bucket = "test"
-- Performs a recursive listing of all objects under bucket "test"
-- on play.min.io.
res <- runMinio minioPlayCI $
runConduit $ listObjects bucket Nothing True .| mapM_C (\v -> (liftIO $ print v))
res <-
runMinio minioPlayCI
$ runConduit
$ listObjects bucket Nothing True .| mapM_C (\v -> (liftIO $ print v))
print res
{-
Following is the output of the above program on a local MinIO server.
Right [ObjectInfo {oiObject = "ADVANCED.png", oiModTime = 2017-02-10 05:33:24.816 UTC, oiETag = "\"a69f3af6bbb06fe1d42ac910ec30482f\"", oiSize = 94026},ObjectInfo {oiObject = "obj", oiModTime = 2017-02-10 08:49:26.777 UTC, oiETag = "\"715a872a253a3596652c1490081b4b6a-1\"", oiSize = 15728640}]
-}
{-
Following is the output of the above program on a local MinIO server.
Right [ObjectInfo {oiObject = "ADVANCED.png", oiModTime = 2017-02-10 05:33:24.816 UTC, oiETag = "\"a69f3af6bbb06fe1d42ac910ec30482f\"", oiSize = 94026},ObjectInfo {oiObject = "obj", oiModTime = 2017-02-10 08:49:26.777 UTC, oiETag = "\"715a872a253a3596652c1490081b4b6a-1\"", oiSize = 15728640}]
-}

View File

@ -16,24 +16,21 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Prelude
import Network.Minio
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
let bucket = "my-bucket"
res <- runMinio minioPlayCI $
-- N B the region provided for makeBucket is optional.
makeBucket bucket (Just "us-east-1")
res <-
runMinio minioPlayCI $
-- N B the region provided for makeBucket is optional.
makeBucket bucket (Just "us-east-1")
print res

View File

@ -16,34 +16,32 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Control.Monad.IO.Class (liftIO)
import qualified Data.ByteString.Char8 as B
import Data.CaseInsensitive (original)
import Control.Monad.IO.Class (liftIO)
import qualified Data.ByteString.Char8 as B
import Data.CaseInsensitive (original)
import qualified Data.Conduit.Combinators as CC
import qualified Data.Text.Encoding as E
import qualified Data.Text.Encoding as E
import Network.Minio
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
let
bucket = "my-bucket"
object = "my-object"
kb15 = 15*1024
-- Set query parameter to modify content disposition response
-- header
queryParam = [("response-content-disposition",
Just "attachment; filename=\"your-filename.txt\"")]
let bucket = "my-bucket"
object = "my-object"
kb15 = 15 * 1024
-- Set query parameter to modify content disposition response
-- header
queryParam =
[ ( "response-content-disposition",
Just "attachment; filename=\"your-filename.txt\""
)
]
res <- runMinio minioPlayCI $ do
liftIO $ B.putStrLn "Upload a file that we will fetch with a presigned URL..."
@ -61,23 +59,28 @@ main = do
-- Generate a URL with 7 days expiry time - note that the headers
-- used above must be added to the request with the signed URL
-- generated.
url <- presignedGetObjectUrl "my-bucket" "my-object" (7*24*3600)
queryParam headers
url <-
presignedGetObjectUrl
"my-bucket"
"my-object"
(7 * 24 * 3600)
queryParam
headers
return (headers, etag, url)
case res of
Left e -> putStrLn $ "presignedPutObject URL failed." ++ show e
Right (headers, _, url) -> do
-- We generate a curl command to demonstrate usage of the signed
-- URL.
let
hdrOpt (k, v) = B.concat ["-H '", original k, ": ", v, "'"]
curlCmd = B.intercalate " " $
["curl --fail"] ++ map hdrOpt headers ++
["-o /tmp/myfile", B.concat ["'", url, "'"]]
let hdrOpt (k, v) = B.concat ["-H '", original k, ": ", v, "'"]
curlCmd =
B.intercalate " " $
["curl --fail"] ++ map hdrOpt headers
++ ["-o /tmp/myfile", B.concat ["'", url, "'"]]
putStrLn $ "The following curl command would use the presigned " ++
"URL to fetch the object and write it to \"/tmp/myfile\":"
putStrLn $
"The following curl command would use the presigned "
++ "URL to fetch the object and write it to \"/tmp/myfile\":"
B.putStrLn curlCmd

View File

@ -16,47 +16,43 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import qualified Data.ByteString as B
import qualified Data.ByteString as B
import qualified Data.ByteString.Char8 as Char8
import qualified Data.HashMap.Strict as H
import qualified Data.Text.Encoding as Enc
import qualified Data.Time as Time
import qualified Data.HashMap.Strict as H
import qualified Data.Text.Encoding as Enc
import qualified Data.Time as Time
import Network.Minio
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
now <- Time.getCurrentTime
let
bucket = "my-bucket"
object = "photos/my-object"
-- set an expiration time of 10 days
expireTime = Time.addUTCTime (3600 * 24 * 10) now
-- create a policy with expiration time and conditions - since the
-- conditions are validated, newPostPolicy returns an Either value
policyE = newPostPolicy expireTime
[ -- set the object name condition
ppCondKey object
-- set the bucket name condition
, ppCondBucket bucket
-- set the size range of object as 1B to 10MiB
, ppCondContentLengthRange 1 (10*1024*1024)
-- set content type as jpg image
, ppCondContentType "image/jpeg"
-- on success set the server response code to 200
, ppCondSuccessActionStatus 200
]
let bucket = "my-bucket"
object = "photos/my-object"
-- set an expiration time of 10 days
expireTime = Time.addUTCTime (3600 * 24 * 10) now
-- create a policy with expiration time and conditions - since the
-- conditions are validated, newPostPolicy returns an Either value
policyE =
newPostPolicy
expireTime
[ -- set the object name condition
ppCondKey object,
-- set the bucket name condition
ppCondBucket bucket,
-- set the size range of object as 1B to 10MiB
ppCondContentLengthRange 1 (10 * 1024 * 1024),
-- set content type as jpg image
ppCondContentType "image/jpeg",
-- on success set the server response code to 200
ppCondSuccessActionStatus 200
]
case policyE of
Left err -> putStrLn $ show err
@ -66,11 +62,16 @@ main = do
-- a curl command is output to demonstrate using the generated
-- URL and form-data
let
formFn (k, v) = B.concat ["-F ", Enc.encodeUtf8 k, "=",
"'", v, "'"]
formOptions = B.intercalate " " $ map formFn $ H.toList formData
let formFn (k, v) =
B.concat
[ "-F ",
Enc.encodeUtf8 k,
"=",
"'",
v,
"'"
]
formOptions = B.intercalate " " $ map formFn $ H.toList formData
return $ B.intercalate " " $
["curl", formOptions, "-F file=@/tmp/photo.jpg", url]

View File

@ -16,44 +16,42 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import qualified Data.ByteString.Char8 as B
import Data.CaseInsensitive (original)
import Data.CaseInsensitive (original)
import Network.Minio
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
let
-- Use headers to set user-metadata - note that this header will
-- need to be set when the URL is used to make an upload.
headers = [("x-amz-meta-url-creator",
"minio-hs-presigned-put-example")]
let -- Use headers to set user-metadata - note that this header will
-- need to be set when the URL is used to make an upload.
headers =
[ ( "x-amz-meta-url-creator",
"minio-hs-presigned-put-example"
)
]
res <- runMinio minioPlayCI $ do
-- generate a URL with 7 days expiry time
presignedPutObjectUrl "my-bucket" "my-object" (7*24*3600) headers
presignedPutObjectUrl "my-bucket" "my-object" (7 * 24 * 3600) headers
case res of
Left e -> putStrLn $ "presignedPutObject URL failed." ++ show e
Right url -> do
-- We generate a curl command to demonstrate usage of the signed
-- URL.
let
hdrOpt (k, v) = B.concat ["-H '", original k, ": ", v, "'"]
curlCmd = B.intercalate " " $
["curl "] ++ map hdrOpt headers ++
["-T /tmp/myfile", B.concat ["'", url, "'"]]
let hdrOpt (k, v) = B.concat ["-H '", original k, ": ", v, "'"]
curlCmd =
B.intercalate " " $
["curl "] ++ map hdrOpt headers
++ ["-T /tmp/myfile", B.concat ["'", url, "'"]]
putStrLn $ "The following curl command would use the presigned " ++
"URL to upload the file at \"/tmp/myfile\":"
putStrLn $
"The following curl command would use the presigned "
++ "URL to upload the file at \"/tmp/myfile\":"
B.putStrLn curlCmd

View File

@ -16,39 +16,36 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import qualified Data.Conduit.Combinators as CC
import Prelude
import Network.Minio
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
let
bucket = "test"
let bucket = "test"
object = "obj"
localFile = "/etc/lsb-release"
kb15 = 15 * 1024
-- Eg 1. Upload a stream of repeating "a" using putObject with default options.
res1 <- runMinio minioPlayCI $
putObject bucket object (CC.repeat "a") (Just kb15) defaultPutObjectOptions
res1 <-
runMinio minioPlayCI $
putObject bucket object (CC.repeat "a") (Just kb15) defaultPutObjectOptions
case res1 of
Left e -> putStrLn $ "putObject failed." ++ show e
Left e -> putStrLn $ "putObject failed." ++ show e
Right () -> putStrLn "putObject succeeded."
-- Eg 2. Upload a file using fPutObject with default options.
res2 <- runMinio minioPlayCI $
fPutObject bucket object localFile defaultPutObjectOptions
res2 <-
runMinio minioPlayCI $
fPutObject bucket object localFile defaultPutObjectOptions
case res2 of
Left e -> putStrLn $ "fPutObject failed." ++ show e
Left e -> putStrLn $ "fPutObject failed." ++ show e
Right () -> putStrLn "fPutObject succeeded."

View File

@ -16,23 +16,18 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Prelude
import Network.Minio
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
let
bucket = "my-bucket"
let bucket = "my-bucket"
res <- runMinio minioPlayCI $ removeBucket bucket
print res

View File

@ -16,27 +16,24 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Prelude
import Network.Minio
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
let
bucket = "mybucket"
object = "myobject"
let bucket = "mybucket"
object = "myobject"
res <- runMinio minioPlayCI $
removeIncompleteUpload bucket object
res <-
runMinio minioPlayCI $
removeIncompleteUpload bucket object
case res of
Left _ -> putStrLn $ "Failed to remove " ++ show bucket ++ "/" ++ show object

View File

@ -16,20 +16,19 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Prelude
import Network.Minio
import Prelude
main :: IO ()
main = do
let
bucket = "mybucket"
object = "myobject"
let bucket = "mybucket"
object = "myobject"
res <- runMinio minioPlayCI $
removeObject bucket object
res <-
runMinio minioPlayCI $
removeObject bucket object
case res of
Left _ -> putStrLn $ "Failed to remove " ++ show bucket ++ "/" ++ show object

View File

@ -16,34 +16,32 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import qualified Conduit as C
import Control.Monad (when)
import Prelude
import qualified Conduit as C
import Control.Monad (when)
import Network.Minio
import Prelude
main :: IO ()
main = do
let bucket = "selectbucket"
object = "1.csv"
content = "Name,Place,Temperature\n"
<> "James,San Jose,76\n"
<> "Alicia,San Leandro,88\n"
<> "Mark,San Carlos,90\n"
let bucket = "selectbucket"
object = "1.csv"
content =
"Name,Place,Temperature\n"
<> "James,San Jose,76\n"
<> "Alicia,San Leandro,88\n"
<> "Mark,San Carlos,90\n"
res <- runMinio minioPlayCI $ do
res <- runMinio minioPlayCI $ do
exists <- bucketExists bucket
when (not exists) $
makeBucket bucket Nothing
exists <- bucketExists bucket
when (not exists) $
makeBucket bucket Nothing
C.liftIO $ putStrLn "Uploading csv object"
putObject bucket object (C.sourceLazy content) Nothing defaultPutObjectOptions
C.liftIO $ putStrLn "Uploading csv object"
putObject bucket object (C.sourceLazy content) Nothing defaultPutObjectOptions
let sr = selectRequest "Select * from s3object" defaultCsvInput defaultCsvOutput
res <- selectObjectContent bucket object sr
C.runConduit $ res C..| getPayloadBytes C..| C.stdoutC
print res
let sr = selectRequest "Select * from s3object" defaultCsvInput defaultCsvOutput
res <- selectObjectContent bucket object sr
C.runConduit $ res C..| getPayloadBytes C..| C.stdoutC
print res

View File

@ -16,15 +16,15 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
main :: IO ()
main = do
res <- runMinio minioPlayCI $
getServerInfo
res <-
runMinio minioPlayCI $
getServerInfo
print res

View File

@ -16,15 +16,15 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
main :: IO ()
main = do
res <- runMinio minioPlayCI $
serviceSendAction ServiceActionRestart
res <-
runMinio minioPlayCI $
serviceSendAction ServiceActionRestart
print res

View File

@ -16,15 +16,15 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
main :: IO ()
main = do
res <- runMinio minioPlayCI $
serviceSendAction ServiceActionStop
res <-
runMinio minioPlayCI $
serviceSendAction ServiceActionStop
print res

View File

@ -16,15 +16,15 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
main :: IO ()
main = do
res <- runMinio minioPlayCI $
serviceStatus
res <-
runMinio minioPlayCI $
serviceStatus
print res

View File

@ -16,12 +16,11 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
main :: IO ()
main = do

View File

@ -56,7 +56,7 @@ common base-settings
, Network.Minio.XmlParser
, Network.Minio.JsonParser
build-depends: base >= 4.7 && < 5
, protolude >= 0.2 && < 0.3
, protolude >= 0.3 && < 0.4
, aeson >= 1.2
, base64-bytestring >= 1.0
, binary >= 0.8.5.0
@ -83,8 +83,8 @@ common base-settings
, text >= 1.2
, time >= 1.8
, transformers >= 0.5
, unliftio >= 0.2
, unliftio-core >= 0.1 && < 0.2
, unliftio >= 0.2 && < 0.3
, unliftio-core >= 0.2 && < 0.3
, unordered-containers >= 0.2
, xml-conduit >= 1.8

View File

@ -15,19 +15,45 @@
--
module Lib.Prelude
( module Exports
, both
) where
( module Exports,
both,
showBS,
toStrictBS,
fromStrictBS,
)
where
import Protolude as Exports hiding (catch, catches,
throwIO, try)
import Control.Monad.Trans.Maybe as Exports (MaybeT (..), runMaybeT)
import Data.Time as Exports (UTCTime (..),
diffUTCTime)
import UnliftIO as Exports (catch, catches, throwIO,
try)
import Control.Monad.Trans.Maybe as Exports (MaybeT (..), runMaybeT)
import qualified Data.ByteString.Lazy as LB
import Data.Time as Exports
( UTCTime (..),
diffUTCTime,
)
import Protolude as Exports hiding
( Handler,
catch,
catches,
throwIO,
try,
yield,
)
import UnliftIO as Exports
( Handler,
catch,
catches,
throwIO,
try,
)
-- | Apply a function on both elements of a pair
both :: (a -> b) -> (a, a) -> (b, b)
both f (a, b) = (f a, f b)
showBS :: Show a => a -> ByteString
showBS a = toUtf8 (show a :: Text)
toStrictBS :: LByteString -> ByteString
toStrictBS = LB.toStrict
fromStrictBS :: ByteString -> LByteString
fromStrictBS = LB.fromStrict

View File

@ -22,218 +22,217 @@
--
-- Types and functions to conveniently access S3 compatible object
-- storage servers like MinIO.
module Network.Minio
(
-- * Credentials
Credentials (..)
( -- * Credentials
Credentials (..),
-- ** Credential providers
-- | Run actions that retrieve 'Credentials' from the environment or
-- files or other custom sources.
, Provider
, fromAWSConfigFile
, fromAWSEnv
, fromMinioEnv
, findFirst
-- ** Credential providers
-- * Connecting to object storage
, ConnectInfo
, setRegion
, setCreds
, setCredsFrom
, isConnectInfoSecure
, disableTLSCertValidation
, MinioConn
, mkMinioConn
-- | Run actions that retrieve 'Credentials' from the environment or
-- files or other custom sources.
Provider,
fromAWSConfigFile,
fromAWSEnv,
fromMinioEnv,
findFirst,
-- ** Connection helpers
-- | These are helpers to construct 'ConnectInfo' values for common
-- cases.
, minioPlayCI
, awsCI
, gcsCI
-- * Connecting to object storage
ConnectInfo,
setRegion,
setCreds,
setCredsFrom,
isConnectInfoSecure,
disableTLSCertValidation,
MinioConn,
mkMinioConn,
-- * Minio Monad
----------------
-- | The Minio Monad provides connection-reuse, bucket-location
-- caching, resource management and simpler error handling
-- functionality. All actions on object storage are performed within
-- this Monad.
, Minio
, runMinioWith
, runMinio
, runMinioResWith
, runMinioRes
-- ** Connection helpers
-- * Bucket Operations
-- | These are helpers to construct 'ConnectInfo' values for common
-- cases.
minioPlayCI,
awsCI,
gcsCI,
-- ** Creation, removal and querying
, Bucket
, makeBucket
, removeBucket
, bucketExists
, Region
, getLocation
-- * Minio Monad
----------------
-- ** Listing buckets
, BucketInfo(..)
, listBuckets
-- | The Minio Monad provides connection-reuse, bucket-location
-- caching, resource management and simpler error handling
-- functionality. All actions on object storage are performed within
-- this Monad.
Minio,
runMinioWith,
runMinio,
runMinioResWith,
runMinioRes,
-- ** Listing objects
, listObjects
, listObjectsV1
, ListItem(..)
-- * Bucket Operations
, ObjectInfo
, oiObject
, oiModTime
, oiETag
, oiSize
, oiUserMetadata
, oiMetadata
-- ** Creation, removal and querying
Bucket,
makeBucket,
removeBucket,
bucketExists,
Region,
getLocation,
-- ** Listing incomplete uploads
, listIncompleteUploads
, UploadId
, UploadInfo(..)
, listIncompleteParts
, ObjectPartInfo(..)
-- ** Listing buckets
BucketInfo (..),
listBuckets,
-- ** Bucket Notifications
, getBucketNotification
, putBucketNotification
, removeAllBucketNotification
, Notification(..)
, defaultNotification
, NotificationConfig(..)
, Arn
, Event(..)
, Filter(..)
, defaultFilter
, FilterKey(..)
, defaultFilterKey
, FilterRules(..)
, defaultFilterRules
, FilterRule(..)
-- ** Listing objects
listObjects,
listObjectsV1,
ListItem (..),
ObjectInfo,
oiObject,
oiModTime,
oiETag,
oiSize,
oiUserMetadata,
oiMetadata,
-- * Object Operations
, Object
-- ** Listing incomplete uploads
listIncompleteUploads,
UploadId,
UploadInfo (..),
listIncompleteParts,
ObjectPartInfo (..),
-- ** File-based operations
, fGetObject
, fPutObject
-- ** Bucket Notifications
getBucketNotification,
putBucketNotification,
removeAllBucketNotification,
Notification (..),
defaultNotification,
NotificationConfig (..),
Arn,
Event (..),
Filter (..),
defaultFilter,
FilterKey (..),
defaultFilterKey,
FilterRules (..),
defaultFilterRules,
FilterRule (..),
-- ** Conduit-based streaming operations
, putObject
, PutObjectOptions
, defaultPutObjectOptions
, pooContentType
, pooContentEncoding
, pooContentDisposition
, pooContentLanguage
, pooCacheControl
, pooStorageClass
, pooUserMetadata
, pooNumThreads
, pooSSE
-- * Object Operations
Object,
, getObject
, GetObjectOptions
, defaultGetObjectOptions
, gooRange
, gooIfMatch
, gooIfNoneMatch
, gooIfModifiedSince
, gooIfUnmodifiedSince
, gooSSECKey
, GetObjectResponse
, gorObjectInfo
, gorObjectStream
-- ** File-based operations
fGetObject,
fPutObject,
-- ** Server-side object copying
, copyObject
, SourceInfo
, defaultSourceInfo
, srcBucket
, srcObject
, srcRange
, srcIfMatch
, srcIfNoneMatch
, srcIfModifiedSince
, srcIfUnmodifiedSince
, DestinationInfo
, defaultDestinationInfo
, dstBucket
, dstObject
-- ** Conduit-based streaming operations
putObject,
PutObjectOptions,
defaultPutObjectOptions,
pooContentType,
pooContentEncoding,
pooContentDisposition,
pooContentLanguage,
pooCacheControl,
pooStorageClass,
pooUserMetadata,
pooNumThreads,
pooSSE,
getObject,
GetObjectOptions,
defaultGetObjectOptions,
gooRange,
gooIfMatch,
gooIfNoneMatch,
gooIfModifiedSince,
gooIfUnmodifiedSince,
gooSSECKey,
GetObjectResponse,
gorObjectInfo,
gorObjectStream,
-- ** Querying object info
, statObject
-- ** Server-side object copying
copyObject,
SourceInfo,
defaultSourceInfo,
srcBucket,
srcObject,
srcRange,
srcIfMatch,
srcIfNoneMatch,
srcIfModifiedSince,
srcIfUnmodifiedSince,
DestinationInfo,
defaultDestinationInfo,
dstBucket,
dstObject,
-- ** Object removal operations
, removeObject
, removeIncompleteUpload
-- ** Querying object info
statObject,
-- ** Select Object Content with SQL
, module Network.Minio.SelectAPI
-- ** Object removal operations
removeObject,
removeIncompleteUpload,
-- * Server-Side Encryption Helpers
, mkSSECKey
, SSECKey
, SSE(..)
-- ** Select Object Content with SQL
module Network.Minio.SelectAPI,
-- * Presigned Operations
, presignedPutObjectUrl
, presignedGetObjectUrl
, presignedHeadObjectUrl
, UrlExpiry
-- * Server-Side Encryption Helpers
mkSSECKey,
SSECKey,
SSE (..),
-- ** POST (browser) upload helpers
-- | Please see
-- https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
-- for detailed information.
, newPostPolicy
, presignedPostPolicy
, showPostPolicy
, PostPolicy
, PostPolicyError(..)
-- * Presigned Operations
presignedPutObjectUrl,
presignedGetObjectUrl,
presignedHeadObjectUrl,
UrlExpiry,
-- *** Post Policy condition helpers
, PostPolicyCondition
, ppCondBucket
, ppCondContentLengthRange
, ppCondContentType
, ppCondKey
, ppCondKeyStartsWith
, ppCondSuccessActionStatus
-- ** POST (browser) upload helpers
-- * Error handling
-- | Data types representing various errors that may occur while
-- working with an object storage service.
, MinioErr(..)
, MErrV(..)
, ServiceErr(..)
-- | Please see
-- https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
-- for detailed information.
newPostPolicy,
presignedPostPolicy,
showPostPolicy,
PostPolicy,
PostPolicyError (..),
) where
-- *** Post Policy condition helpers
PostPolicyCondition,
ppCondBucket,
ppCondContentLengthRange,
ppCondContentType,
ppCondKey,
ppCondKeyStartsWith,
ppCondSuccessActionStatus,
-- * Error handling
-- | Data types representing various errors that may occur while
-- working with an object storage service.
MinioErr (..),
MErrV (..),
ServiceErr (..),
)
where
{-
This module exports the high-level MinIO API for object storage.
-}
import qualified Data.Conduit as C
import qualified Data.Conduit.Binary as CB
import qualified Data.Conduit as C
import qualified Data.Conduit.Binary as CB
import qualified Data.Conduit.Combinators as CC
import Lib.Prelude
import Network.Minio.CopyObject
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.ListOps
import Network.Minio.PutObject
import Network.Minio.S3API
import Network.Minio.SelectAPI
import Network.Minio.Utils
import Lib.Prelude
import Network.Minio.CopyObject
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.ListOps
import Network.Minio.PutObject
import Network.Minio.S3API
import Network.Minio.SelectAPI
import Network.Minio.Utils
-- | Lists buckets.
listBuckets :: Minio [BucketInfo]
@ -248,8 +247,12 @@ fGetObject bucket object fp opts = do
C.connect (gorObjectStream src) $ CB.sinkFileCautious fp
-- | Upload the given file to the given object.
fPutObject :: Bucket -> Object -> FilePath
-> PutObjectOptions -> Minio ()
fPutObject ::
Bucket ->
Object ->
FilePath ->
PutObjectOptions ->
Minio ()
fPutObject bucket object f opts =
void $ putObjectInternal bucket object opts $ ODFile f Nothing
@ -257,8 +260,13 @@ fPutObject bucket object f opts =
-- known; this helps the library select optimal part sizes to perform
-- a multipart upload. If not specified, it is assumed that the object
-- can be potentially 5TiB and selects multipart sizes appropriately.
putObject :: Bucket -> Object -> C.ConduitM () ByteString Minio ()
-> Maybe Int64 -> PutObjectOptions -> Minio ()
putObject ::
Bucket ->
Object ->
C.ConduitM () ByteString Minio () ->
Maybe Int64 ->
PutObjectOptions ->
Minio ()
putObject bucket object src sizeMay opts =
void $ putObjectInternal bucket object opts $ ODStream src sizeMay
@ -268,18 +276,25 @@ putObject bucket object src sizeMay opts =
-- copy operation if the new object is to be greater than 5GiB in
-- size.
copyObject :: DestinationInfo -> SourceInfo -> Minio ()
copyObject dstInfo srcInfo = void $ copyObjectInternal (dstBucket dstInfo)
(dstObject dstInfo) srcInfo
copyObject dstInfo srcInfo =
void $
copyObjectInternal
(dstBucket dstInfo)
(dstObject dstInfo)
srcInfo
-- | Remove an object from the object store.
removeObject :: Bucket -> Object -> Minio ()
removeObject = deleteObject
-- | Get an object from the object store.
getObject :: Bucket -> Object -> GetObjectOptions
-> Minio GetObjectResponse
getObject ::
Bucket ->
Object ->
GetObjectOptions ->
Minio GetObjectResponse
getObject bucket object opts =
getObject' bucket object [] $ gooToHeaders opts
getObject' bucket object [] $ gooToHeaders opts
-- | Get an object's metadata from the object store. It accepts the
-- same options as GetObject.
@ -309,6 +324,8 @@ bucketExists = headBucket
-- | Removes an ongoing multipart upload of an object.
removeIncompleteUpload :: Bucket -> Object -> Minio ()
removeIncompleteUpload bucket object = do
uploads <- C.runConduit $ listIncompleteUploads bucket (Just object) False
C..| CC.sinkList
uploads <-
C.runConduit $
listIncompleteUploads bucket (Just object) False
C..| CC.sinkList
mapM_ (abortMultipartUpload bucket object) (uiUploadId <$> uploads)

View File

@ -15,169 +15,187 @@
--
module Network.Minio.API
( connect
, S3ReqInfo(..)
, runMinio
, executeRequest
, mkStreamRequest
, getLocation
( connect,
S3ReqInfo (..),
runMinio,
executeRequest,
mkStreamRequest,
getLocation,
isValidBucketName,
checkBucketNameValidity,
isValidObjectName,
checkObjectNameValidity,
)
where
, isValidBucketName
, checkBucketNameValidity
, isValidObjectName
, checkObjectNameValidity
) where
import Control.Retry (fullJitterBackoff,
limitRetriesByCumulativeDelay,
retrying)
import qualified Data.ByteString as B
import qualified Data.Char as C
import qualified Data.Conduit as C
import qualified Data.HashMap.Strict as H
import qualified Data.Text as T
import qualified Data.Time.Clock as Time
import Network.HTTP.Conduit (Response)
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Network.HTTP.Types.Header (hHost)
import Lib.Prelude
import Network.Minio.APICommon
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.Sign.V4
import Network.Minio.Utils
import Network.Minio.XmlParser
import Control.Retry
( fullJitterBackoff,
limitRetriesByCumulativeDelay,
retrying,
)
import qualified Data.ByteString as B
import qualified Data.Char as C
import qualified Data.Conduit as C
import qualified Data.HashMap.Strict as H
import qualified Data.Text as T
import qualified Data.Time.Clock as Time
import Lib.Prelude
import Network.HTTP.Conduit (Response)
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Network.HTTP.Types.Header (hHost)
import Network.Minio.APICommon
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.Sign.V4
import Network.Minio.Utils
import Network.Minio.XmlParser
-- | Fetch bucket location (region)
getLocation :: Bucket -> Minio Region
getLocation bucket = do
resp <- executeRequest $ defaultS3ReqInfo {
riBucket = Just bucket
, riQueryParams = [("location", Nothing)]
, riNeedsLocation = False
}
resp <-
executeRequest $
defaultS3ReqInfo
{ riBucket = Just bucket,
riQueryParams = [("location", Nothing)],
riNeedsLocation = False
}
parseLocation $ NC.responseBody resp
-- | Looks for region in RegionMap and updates it using getLocation if
-- absent.
discoverRegion :: S3ReqInfo -> Minio (Maybe Region)
discoverRegion ri = runMaybeT $ do
bucket <- MaybeT $ return $ riBucket ri
regionMay <- lift $ lookupRegionCache bucket
maybe (do
l <- lift $ getLocation bucket
lift $ addToRegionCache bucket l
return l
) return regionMay
maybe
( do
l <- lift $ getLocation bucket
lift $ addToRegionCache bucket l
return l
)
return
regionMay
getRegion :: S3ReqInfo -> Minio (Maybe Region)
getRegion ri = do
ci <- asks mcConnInfo
ci <- asks mcConnInfo
-- getService/makeBucket/getLocation -- don't need location
if | not $ riNeedsLocation ri ->
return $ Just $ connectRegion ci
-- if autodiscovery of location is disabled by user
| not $ connectAutoDiscoverRegion ci ->
return $ Just $ connectRegion ci
-- discover the region for the request
| otherwise -> discoverRegion ri
-- getService/makeBucket/getLocation -- don't need location
if
| not $ riNeedsLocation ri ->
return $ Just $ connectRegion ci
-- if autodiscovery of location is disabled by user
| not $ connectAutoDiscoverRegion ci ->
return $ Just $ connectRegion ci
-- discover the region for the request
| otherwise -> discoverRegion ri
getRegionHost :: Region -> Minio Text
getRegionHost r = do
ci <- asks mcConnInfo
ci <- asks mcConnInfo
if "amazonaws.com" `T.isSuffixOf` connectHost ci
then maybe (throwIO $ MErrVRegionNotSupported r)
return (H.lookup r awsRegionMap)
if "amazonaws.com" `T.isSuffixOf` connectHost ci
then
maybe
(throwIO $ MErrVRegionNotSupported r)
return
(H.lookup r awsRegionMap)
else return $ connectHost ci
buildRequest :: S3ReqInfo -> Minio NC.Request
buildRequest ri = do
maybe (return ()) checkBucketNameValidity $ riBucket ri
maybe (return ()) checkObjectNameValidity $ riObject ri
maybe (return ()) checkBucketNameValidity $ riBucket ri
maybe (return ()) checkObjectNameValidity $ riObject ri
ci <- asks mcConnInfo
ci <- asks mcConnInfo
regionMay <- getRegion ri
regionMay <- getRegion ri
regionHost <- maybe (return $ connectHost ci) getRegionHost regionMay
regionHost <- maybe (return $ connectHost ci) getRegionHost regionMay
let ri' = ri { riHeaders = hostHeader : riHeaders ri
, riRegion = regionMay
}
ci' = ci { connectHost = regionHost }
hostHeader = (hHost, getHostAddr ci')
let ri' =
ri
{ riHeaders = hostHeader : riHeaders ri,
riRegion = regionMay
}
ci' = ci {connectHost = regionHost}
hostHeader = (hHost, getHostAddr ci')
-- Does not contain body and auth info.
baseRequest =
NC.defaultRequest
{ NC.method = riMethod ri',
NC.secure = connectIsSecure ci',
NC.host = encodeUtf8 $ connectHost ci',
NC.port = connectPort ci',
NC.path = getS3Path (riBucket ri') (riObject ri'),
NC.requestHeaders = riHeaders ri',
NC.queryString = HT.renderQuery False $ riQueryParams ri'
}
-- Does not contain body and auth info.
baseRequest = NC.defaultRequest
{ NC.method = riMethod ri'
, NC.secure = connectIsSecure ci'
, NC.host = encodeUtf8 $ connectHost ci'
, NC.port = connectPort ci'
, NC.path = getS3Path (riBucket ri') (riObject ri')
, NC.requestHeaders = riHeaders ri'
, NC.queryString = HT.renderQuery False $ riQueryParams ri'
}
timeStamp <- liftIO Time.getCurrentTime
timeStamp <- liftIO Time.getCurrentTime
let sp =
SignParams
(connectAccessKey ci')
(connectSecretKey ci')
timeStamp
(riRegion ri')
Nothing
Nothing
let sp = SignParams (connectAccessKey ci') (connectSecretKey ci')
timeStamp (riRegion ri') Nothing Nothing
-- Cases to handle:
--
-- 1. Connection is secure: use unsigned payload
--
-- 2. Insecure connection, streaming signature is enabled via use of
-- conduit payload: use streaming signature for request.
--
-- 3. Insecure connection, non-conduit payload: compute payload
-- sha256hash, buffer request in memory and perform request.
-- Cases to handle:
--
-- 1. Connection is secure: use unsigned payload
--
-- 2. Insecure connection, streaming signature is enabled via use of
-- conduit payload: use streaming signature for request.
--
-- 3. Insecure connection, non-conduit payload: compute payload
-- sha256hash, buffer request in memory and perform request.
-- case 2 from above.
if | isStreamingPayload (riPayload ri') &&
(not $ connectIsSecure ci') -> do
(pLen, pSrc) <- case riPayload ri of
PayloadC l src -> return (l, src)
_ -> throwIO MErrVUnexpectedPayload
let reqFn = signV4Stream pLen sp baseRequest
return $ reqFn pSrc
| otherwise -> do
-- case 1 described above.
sp' <- if | connectIsSecure ci' -> return sp
-- case 3 described above.
| otherwise -> do
pHash <- getPayloadSHA256Hash $ riPayload ri'
return $ sp { spPayloadHash = Just pHash }
let signHeaders = signV4 sp' baseRequest
return $ baseRequest
{ NC.requestHeaders =
NC.requestHeaders baseRequest ++
mkHeaderFromPairs signHeaders
, NC.requestBody = getRequestBody (riPayload ri')
}
-- case 2 from above.
if
| isStreamingPayload (riPayload ri')
&& (not $ connectIsSecure ci') -> do
(pLen, pSrc) <- case riPayload ri of
PayloadC l src -> return (l, src)
_ -> throwIO MErrVUnexpectedPayload
let reqFn = signV4Stream pLen sp baseRequest
return $ reqFn pSrc
| otherwise -> do
-- case 1 described above.
sp' <-
if
| connectIsSecure ci' -> return sp
-- case 3 described above.
| otherwise -> do
pHash <- getPayloadSHA256Hash $ riPayload ri'
return $ sp {spPayloadHash = Just pHash}
let signHeaders = signV4 sp' baseRequest
return $
baseRequest
{ NC.requestHeaders =
NC.requestHeaders baseRequest
++ mkHeaderFromPairs signHeaders,
NC.requestBody = getRequestBody (riPayload ri')
}
retryAPIRequest :: Minio a -> Minio a
retryAPIRequest apiCall = do
resE <- retrying retryPolicy (const shouldRetry) $
const $ try apiCall
resE <-
retrying retryPolicy (const shouldRetry)
$ const
$ try apiCall
either throwIO return resE
where
-- Retry using the full-jitter backoff method for up to 10 mins
-- total
retryPolicy = limitRetriesByCumulativeDelay tenMins
$ fullJitterBackoff oneMilliSecond
retryPolicy =
limitRetriesByCumulativeDelay tenMins $
fullJitterBackoff oneMilliSecond
oneMilliSecond = 1000 -- in microseconds
tenMins = 10 * 60 * 1000000 -- in microseconds
-- retry on connection related failure
@ -189,23 +207,23 @@ retryAPIRequest apiCall = do
-- API request failed with a retryable exception
Left httpExn@(NC.HttpExceptionRequest _ exn) ->
case (exn :: NC.HttpExceptionContent) of
NC.ResponseTimeout -> return True
NC.ConnectionTimeout -> return True
NC.ResponseTimeout -> return True
NC.ConnectionTimeout -> return True
NC.ConnectionFailure _ -> return True
-- We received an unexpected exception
_ -> throwIO httpExn
_ -> throwIO httpExn
-- We received an unexpected exception
Left someOtherExn -> throwIO someOtherExn
executeRequest :: S3ReqInfo -> Minio (Response LByteString)
executeRequest ri = do
req <- buildRequest ri
mgr <- asks mcConnManager
retryAPIRequest $ httpLbs req mgr
mkStreamRequest :: S3ReqInfo
-> Minio (Response (C.ConduitM () ByteString Minio ()))
mkStreamRequest ::
S3ReqInfo ->
Minio (Response (C.ConduitM () ByteString Minio ()))
mkStreamRequest ri = do
req <- buildRequest ri
mgr <- asks mcConnManager
@ -214,35 +232,43 @@ mkStreamRequest ri = do
-- Bucket name validity check according to AWS rules.
isValidBucketName :: Bucket -> Bool
isValidBucketName bucket =
not (or [ len < 3 || len > 63
, or (map labelCheck labels)
, or (map labelCharsCheck labels)
, isIPCheck
])
not
( or
[ len < 3 || len > 63,
or (map labelCheck labels),
or (map labelCharsCheck labels),
isIPCheck
]
)
where
len = T.length bucket
labels = T.splitOn "." bucket
-- does label `l` fail basic checks of length and start/end?
labelCheck l = T.length l == 0 || T.head l == '-' || T.last l == '-'
-- does label `l` have non-allowed characters?
labelCharsCheck l = isJust $ T.find (\x -> not (C.isAsciiLower x ||
x == '-' ||
C.isDigit x)) l
labelCharsCheck l =
isJust $
T.find
( \x ->
not
( C.isAsciiLower x
|| x == '-'
|| C.isDigit x
)
)
l
-- does label `l` have non-digit characters?
labelNonDigits l = isJust $ T.find (not . C.isDigit) l
labelAsNums = map (not . labelNonDigits) labels
-- check if bucket name looks like an IP
isIPCheck = and labelAsNums && length labelAsNums == 4
-- Throws exception iff bucket name is invalid according to AWS rules.
checkBucketNameValidity :: MonadIO m => Bucket -> m ()
checkBucketNameValidity bucket =
when (not $ isValidBucketName bucket) $
throwIO $ MErrVInvalidBucketName bucket
when (not $ isValidBucketName bucket)
$ throwIO
$ MErrVInvalidBucketName bucket
isValidObjectName :: Object -> Bool
isValidObjectName object =
@ -250,5 +276,6 @@ isValidObjectName object =
checkObjectNameValidity :: MonadIO m => Object -> m ()
checkObjectNameValidity object =
when (not $ isValidObjectName object) $
throwIO $ MErrVInvalidObjectName object
when (not $ isValidObjectName object)
$ throwIO
$ MErrVInvalidObjectName object

View File

@ -16,37 +16,38 @@
module Network.Minio.APICommon where
import qualified Conduit as C
import qualified Data.ByteString as BS
import qualified Data.ByteString.Lazy as LB
import Data.Conduit.Binary (sourceHandleRange)
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Data.Crypto
import Network.Minio.Errors
import qualified Conduit as C
import qualified Data.ByteString as BS
import qualified Data.ByteString.Lazy as LB
import Data.Conduit.Binary (sourceHandleRange)
import Lib.Prelude
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Network.Minio.Data
import Network.Minio.Data.Crypto
import Network.Minio.Errors
sha256Header :: ByteString -> HT.Header
sha256Header = ("x-amz-content-sha256", )
sha256Header = ("x-amz-content-sha256",)
-- | This function throws an error if the payload is a conduit (as it
-- will not be possible to re-read the conduit after it is consumed).
getPayloadSHA256Hash :: Payload -> Minio ByteString
getPayloadSHA256Hash (PayloadBS bs) = return $ hashSHA256 bs
getPayloadSHA256Hash (PayloadH h off size) = hashSHA256FromSource $
sourceHandleRange h
(return . fromIntegral $ off)
(return . fromIntegral $ size)
getPayloadSHA256Hash (PayloadH h off size) =
hashSHA256FromSource $
sourceHandleRange
h
(return . fromIntegral $ off)
(return . fromIntegral $ size)
getPayloadSHA256Hash (PayloadC _ _) = throwIO MErrVUnexpectedPayload
getRequestBody :: Payload -> NC.RequestBody
getRequestBody (PayloadBS bs) = NC.RequestBodyBS bs
getRequestBody (PayloadH h off size) =
NC.requestBodySource (fromIntegral size) $
sourceHandleRange h
sourceHandleRange
h
(return . fromIntegral $ off)
(return . fromIntegral $ size)
getRequestBody (PayloadC n src) = NC.requestBodySource n src
@ -55,14 +56,17 @@ mkStreamingPayload :: Payload -> Payload
mkStreamingPayload payload =
case payload of
PayloadBS bs ->
PayloadC (fromIntegral $ BS.length bs)
PayloadC
(fromIntegral $ BS.length bs)
(C.sourceLazy $ LB.fromStrict bs)
PayloadH h off len ->
PayloadC len $ sourceHandleRange h
(return . fromIntegral $ off)
(return . fromIntegral $ len)
PayloadC len $
sourceHandleRange
h
(return . fromIntegral $ off)
(return . fromIntegral $ len)
_ -> payload
isStreamingPayload :: Payload -> Bool
isStreamingPayload (PayloadC _ _) = True
isStreamingPayload _ = False
isStreamingPayload _ = False

View File

@ -16,270 +16,304 @@
module Network.Minio.AdminAPI
( -- * MinIO Admin API
--------------------
--------------------
-- | Provides MinIO admin API and related types. It is in
-- experimental state.
DriveInfo(..)
, ErasureInfo(..)
, Backend(..)
, ConnStats(..)
, HttpStats(..)
, ServerProps(..)
, CountNAvgTime(..)
, StorageClass(..)
, StorageInfo(..)
, SIData(..)
, ServerInfo(..)
, getServerInfo
DriveInfo (..),
ErasureInfo (..),
Backend (..),
ConnStats (..),
HttpStats (..),
ServerProps (..),
CountNAvgTime (..),
StorageClass (..),
StorageInfo (..),
SIData (..),
ServerInfo (..),
getServerInfo,
HealOpts (..),
HealResultItem (..),
HealStatus (..),
HealStartResp (..),
startHeal,
forceStartHeal,
getHealStatus,
SetConfigResult (..),
NodeSummary (..),
setConfig,
getConfig,
ServerVersion (..),
ServiceStatus (..),
serviceStatus,
ServiceAction (..),
serviceSendAction,
)
where
, HealOpts(..)
, HealResultItem(..)
, HealStatus(..)
, HealStartResp(..)
, startHeal
, forceStartHeal
, getHealStatus
, SetConfigResult(..)
, NodeSummary(..)
, setConfig
, getConfig
, ServerVersion(..)
, ServiceStatus(..)
, serviceStatus
, ServiceAction(..)
, serviceSendAction
) where
import Data.Aeson (FromJSON, ToJSON, Value (Object),
eitherDecode, object, pairs,
parseJSON, toEncoding, toJSON,
withObject, withText, (.:), (.:?),
(.=))
import qualified Data.Aeson as A
import Data.Aeson.Types (typeMismatch)
import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as LBS
import qualified Data.Text as T
import Data.Time (NominalDiffTime, getCurrentTime)
import Network.HTTP.Conduit (Response)
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Network.HTTP.Types.Header (hHost)
import Lib.Prelude
import Network.Minio.APICommon
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.Sign.V4
import Network.Minio.Utils
import Data.Aeson
( (.:),
(.:?),
(.=),
FromJSON,
ToJSON,
Value (Object),
eitherDecode,
object,
pairs,
parseJSON,
toEncoding,
toJSON,
withObject,
withText,
)
import qualified Data.Aeson as A
import Data.Aeson.Types (typeMismatch)
import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as LBS
import qualified Data.Text as T
import Data.Time (NominalDiffTime, getCurrentTime)
import Lib.Prelude
import Network.HTTP.Conduit (Response)
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Network.HTTP.Types.Header (hHost)
import Network.Minio.APICommon
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.Sign.V4
import Network.Minio.Utils
data DriveInfo = DriveInfo
{ diUuid :: Text
, diEndpoint :: Text
, diState :: Text
} deriving (Eq, Show)
{ diUuid :: Text,
diEndpoint :: Text,
diState :: Text
}
deriving (Eq, Show)
instance FromJSON DriveInfo where
parseJSON = withObject "DriveInfo" $ \v -> DriveInfo
<$> v .: "uuid"
<*> v .: "endpoint"
<*> v .: "state"
parseJSON = withObject "DriveInfo" $ \v ->
DriveInfo
<$> v .: "uuid"
<*> v .: "endpoint"
<*> v .: "state"
data StorageClass = StorageClass
{ scParity :: Int
, scData :: Int
} deriving (Eq, Show)
{ scParity :: Int,
scData :: Int
}
deriving (Eq, Show)
data ErasureInfo = ErasureInfo
{ eiOnlineDisks :: Int
, eiOfflineDisks :: Int
, eiStandard :: StorageClass
, eiReducedRedundancy :: StorageClass
, eiSets :: [[DriveInfo]]
} deriving (Eq, Show)
{ eiOnlineDisks :: Int,
eiOfflineDisks :: Int,
eiStandard :: StorageClass,
eiReducedRedundancy :: StorageClass,
eiSets :: [[DriveInfo]]
}
deriving (Eq, Show)
instance FromJSON ErasureInfo where
parseJSON = withObject "ErasureInfo" $ \v -> do
onlineDisks <- v .: "OnlineDisks"
offlineDisks <- v .: "OfflineDisks"
stdClass <- StorageClass
<$> v .: "StandardSCData"
<*> v .: "StandardSCParity"
rrClass <- StorageClass
<$> v .: "RRSCData"
<*> v .: "RRSCParity"
sets <- v .: "Sets"
return $ ErasureInfo onlineDisks offlineDisks stdClass rrClass sets
parseJSON = withObject "ErasureInfo" $ \v -> do
onlineDisks <- v .: "OnlineDisks"
offlineDisks <- v .: "OfflineDisks"
stdClass <-
StorageClass
<$> v .: "StandardSCData"
<*> v .: "StandardSCParity"
rrClass <-
StorageClass
<$> v .: "RRSCData"
<*> v .: "RRSCParity"
sets <- v .: "Sets"
return $ ErasureInfo onlineDisks offlineDisks stdClass rrClass sets
data Backend = BackendFS
| BackendErasure ErasureInfo
deriving (Eq, Show)
data Backend
= BackendFS
| BackendErasure ErasureInfo
deriving (Eq, Show)
instance FromJSON Backend where
parseJSON = withObject "Backend" $ \v -> do
typ <- v .: "Type"
case typ :: Int of
1 -> return BackendFS
2 -> BackendErasure <$> parseJSON (Object v)
_ -> typeMismatch "BackendType" (Object v)
parseJSON = withObject "Backend" $ \v -> do
typ <- v .: "Type"
case typ :: Int of
1 -> return BackendFS
2 -> BackendErasure <$> parseJSON (Object v)
_ -> typeMismatch "BackendType" (Object v)
data ConnStats = ConnStats
{ csTransferred :: Int64
, csReceived :: Int64
} deriving (Eq, Show)
{ csTransferred :: Int64,
csReceived :: Int64
}
deriving (Eq, Show)
instance FromJSON ConnStats where
parseJSON = withObject "ConnStats" $ \v -> ConnStats
<$> v .: "transferred"
<*> v .: "received"
parseJSON = withObject "ConnStats" $ \v ->
ConnStats
<$> v .: "transferred"
<*> v .: "received"
data ServerProps = ServerProps
{ spUptime :: NominalDiffTime
, spVersion :: Text
, spCommitId :: Text
, spRegion :: Text
, spSqsArns :: [Text]
} deriving (Eq, Show)
{ spUptime :: NominalDiffTime,
spVersion :: Text,
spCommitId :: Text,
spRegion :: Text,
spSqsArns :: [Text]
}
deriving (Eq, Show)
instance FromJSON ServerProps where
parseJSON = withObject "SIServer" $ \v -> do
uptimeNs <- v .: "uptime"
let uptime = uptimeNs / 1e9
ver <- v .: "version"
commitId <- v .: "commitID"
region <- v .: "region"
arn <- v .: "sqsARN"
return $ ServerProps uptime ver commitId region arn
parseJSON = withObject "SIServer" $ \v -> do
uptimeNs <- v .: "uptime"
let uptime = uptimeNs / 1e9
ver <- v .: "version"
commitId <- v .: "commitID"
region <- v .: "region"
arn <- v .: "sqsARN"
return $ ServerProps uptime ver commitId region arn
data StorageInfo = StorageInfo
{ siUsed :: Int64
, siBackend :: Backend
} deriving (Eq, Show)
{ siUsed :: Int64,
siBackend :: Backend
}
deriving (Eq, Show)
instance FromJSON StorageInfo where
parseJSON = withObject "StorageInfo" $ \v -> StorageInfo
<$> v .: "Used"
<*> v .: "Backend"
parseJSON = withObject "StorageInfo" $ \v ->
StorageInfo
<$> v .: "Used"
<*> v .: "Backend"
data CountNAvgTime = CountNAvgTime
{ caCount :: Int64
, caAvgDuration :: Text
} deriving (Eq, Show)
{ caCount :: Int64,
caAvgDuration :: Text
}
deriving (Eq, Show)
instance FromJSON CountNAvgTime where
parseJSON = withObject "CountNAvgTime" $ \v -> CountNAvgTime
<$> v .: "count"
<*> v .: "avgDuration"
parseJSON = withObject "CountNAvgTime" $ \v ->
CountNAvgTime
<$> v .: "count"
<*> v .: "avgDuration"
data HttpStats = HttpStats
{ hsTotalHeads :: CountNAvgTime
, hsSuccessHeads :: CountNAvgTime
, hsTotalGets :: CountNAvgTime
, hsSuccessGets :: CountNAvgTime
, hsTotalPuts :: CountNAvgTime
, hsSuccessPuts :: CountNAvgTime
, hsTotalPosts :: CountNAvgTime
, hsSuccessPosts :: CountNAvgTime
, hsTotalDeletes :: CountNAvgTime
, hsSuccessDeletes :: CountNAvgTime
} deriving (Eq, Show)
{ hsTotalHeads :: CountNAvgTime,
hsSuccessHeads :: CountNAvgTime,
hsTotalGets :: CountNAvgTime,
hsSuccessGets :: CountNAvgTime,
hsTotalPuts :: CountNAvgTime,
hsSuccessPuts :: CountNAvgTime,
hsTotalPosts :: CountNAvgTime,
hsSuccessPosts :: CountNAvgTime,
hsTotalDeletes :: CountNAvgTime,
hsSuccessDeletes :: CountNAvgTime
}
deriving (Eq, Show)
instance FromJSON HttpStats where
parseJSON = withObject "HttpStats" $ \v -> HttpStats
<$> v .: "totalHEADs"
<*> v .: "successHEADs"
<*> v .: "totalGETs"
<*> v .: "successGETs"
<*> v .: "totalPUTs"
<*> v .: "successPUTs"
<*> v .: "totalPOSTs"
<*> v .: "successPOSTs"
<*> v .: "totalDELETEs"
<*> v .: "successDELETEs"
parseJSON = withObject "HttpStats" $ \v ->
HttpStats
<$> v .: "totalHEADs"
<*> v .: "successHEADs"
<*> v .: "totalGETs"
<*> v .: "successGETs"
<*> v .: "totalPUTs"
<*> v .: "successPUTs"
<*> v .: "totalPOSTs"
<*> v .: "successPOSTs"
<*> v .: "totalDELETEs"
<*> v .: "successDELETEs"
data SIData = SIData
{ sdStorage :: StorageInfo
, sdConnStats :: ConnStats
, sdHttpStats :: HttpStats
, sdProps :: ServerProps
} deriving (Eq, Show)
{ sdStorage :: StorageInfo,
sdConnStats :: ConnStats,
sdHttpStats :: HttpStats,
sdProps :: ServerProps
}
deriving (Eq, Show)
instance FromJSON SIData where
parseJSON = withObject "SIData" $ \v -> SIData
<$> v .: "storage"
<*> v .: "network"
<*> v .: "http"
<*> v .: "server"
parseJSON = withObject "SIData" $ \v ->
SIData
<$> v .: "storage"
<*> v .: "network"
<*> v .: "http"
<*> v .: "server"
data ServerInfo = ServerInfo
{ siError :: Text
, siAddr :: Text
, siData :: SIData
} deriving (Eq, Show)
{ siError :: Text,
siAddr :: Text,
siData :: SIData
}
deriving (Eq, Show)
instance FromJSON ServerInfo where
parseJSON = withObject "ServerInfo" $ \v -> ServerInfo
<$> v .: "error"
<*> v .: "addr"
<*> v .: "data"
parseJSON = withObject "ServerInfo" $ \v ->
ServerInfo
<$> v .: "error"
<*> v .: "addr"
<*> v .: "data"
data ServerVersion = ServerVersion
{ svVersion :: Text
, svCommitId :: Text
} deriving (Eq, Show)
{ svVersion :: Text,
svCommitId :: Text
}
deriving (Eq, Show)
instance FromJSON ServerVersion where
parseJSON = withObject "ServerVersion" $ \v -> ServerVersion
parseJSON = withObject "ServerVersion" $ \v ->
ServerVersion
<$> v .: "version"
<*> v .: "commitID"
data ServiceStatus = ServiceStatus
{ ssVersion :: ServerVersion
, ssUptime :: NominalDiffTime
} deriving (Eq, Show)
{ ssVersion :: ServerVersion,
ssUptime :: NominalDiffTime
}
deriving (Eq, Show)
instance FromJSON ServiceStatus where
parseJSON = withObject "ServiceStatus" $ \v -> do
serverVersion <- v .: "serverVersion"
uptimeNs <- v .: "uptime"
let uptime = uptimeNs / 1e9
return $ ServiceStatus serverVersion uptime
parseJSON = withObject "ServiceStatus" $ \v -> do
serverVersion <- v .: "serverVersion"
uptimeNs <- v .: "uptime"
let uptime = uptimeNs / 1e9
return $ ServiceStatus serverVersion uptime
data ServiceAction = ServiceActionRestart
| ServiceActionStop
deriving (Eq, Show)
data ServiceAction
= ServiceActionRestart
| ServiceActionStop
deriving (Eq, Show)
instance ToJSON ServiceAction where
toJSON a = object [ "action" .= serviceActionToText a ]
toJSON a = object ["action" .= serviceActionToText a]
serviceActionToText :: ServiceAction -> Text
serviceActionToText a = case a of
ServiceActionRestart -> "restart"
ServiceActionStop -> "stop"
ServiceActionStop -> "stop"
adminPath :: ByteString
adminPath = "/minio/admin"
data HealStartResp = HealStartResp
{ hsrClientToken :: Text
, hsrClientAddr :: Text
, hsrStartTime :: UTCTime
} deriving (Eq, Show)
{ hsrClientToken :: Text,
hsrClientAddr :: Text,
hsrStartTime :: UTCTime
}
deriving (Eq, Show)
instance FromJSON HealStartResp where
parseJSON = withObject "HealStartResp" $ \v -> HealStartResp
<$> v .: "clientToken"
<*> v .: "clientAddress"
<*> v .: "startTime"
parseJSON = withObject "HealStartResp" $ \v ->
HealStartResp
<$> v .: "clientToken"
<*> v .: "clientAddress"
<*> v .: "startTime"
data HealOpts = HealOpts
{ hoRecursive :: Bool
, hoDryRun :: Bool
} deriving (Eq, Show)
{ hoRecursive :: Bool,
hoDryRun :: Bool
}
deriving (Eq, Show)
instance ToJSON HealOpts where
toJSON (HealOpts r d) =
@ -288,197 +322,234 @@ instance ToJSON HealOpts where
pairs ("recursive" .= r <> "dryRun" .= d)
instance FromJSON HealOpts where
parseJSON = withObject "HealOpts" $ \v -> HealOpts
parseJSON = withObject "HealOpts" $ \v ->
HealOpts
<$> v .: "recursive"
<*> v .: "dryRun"
data HealItemType = HealItemMetadata
| HealItemBucket
| HealItemBucketMetadata
| HealItemObject
deriving (Eq, Show)
data HealItemType
= HealItemMetadata
| HealItemBucket
| HealItemBucketMetadata
| HealItemObject
deriving (Eq, Show)
instance FromJSON HealItemType where
parseJSON = withText "HealItemType" $ \v -> case v of
"metadata" -> return HealItemMetadata
"bucket" -> return HealItemBucket
"object" -> return HealItemObject
"bucket-metadata" -> return HealItemBucketMetadata
_ -> typeMismatch "HealItemType" (A.String v)
parseJSON = withText "HealItemType" $ \v -> case v of
"metadata" -> return HealItemMetadata
"bucket" -> return HealItemBucket
"object" -> return HealItemObject
"bucket-metadata" -> return HealItemBucketMetadata
_ -> typeMismatch "HealItemType" (A.String v)
data NodeSummary = NodeSummary
{ nsName :: Text
, nsErrSet :: Bool
, nsErrMessage :: Text
} deriving (Eq, Show)
{ nsName :: Text,
nsErrSet :: Bool,
nsErrMessage :: Text
}
deriving (Eq, Show)
instance FromJSON NodeSummary where
parseJSON = withObject "NodeSummary" $ \v -> NodeSummary
<$> v .: "name"
<*> v .: "errSet"
<*> v .: "errMsg"
parseJSON = withObject "NodeSummary" $ \v ->
NodeSummary
<$> v .: "name"
<*> v .: "errSet"
<*> v .: "errMsg"
data SetConfigResult = SetConfigResult
{ scrStatus :: Bool
, scrNodeSummary :: [NodeSummary]
} deriving (Eq, Show)
{ scrStatus :: Bool,
scrNodeSummary :: [NodeSummary]
}
deriving (Eq, Show)
instance FromJSON SetConfigResult where
parseJSON = withObject "SetConfigResult" $ \v -> SetConfigResult
<$> v .: "status"
<*> v .: "nodeResults"
parseJSON = withObject "SetConfigResult" $ \v ->
SetConfigResult
<$> v .: "status"
<*> v .: "nodeResults"
data HealResultItem = HealResultItem
{ hriResultIdx :: Int
, hriType :: HealItemType
, hriBucket :: Bucket
, hriObject :: Object
, hriDetail :: Text
, hriParityBlocks :: Maybe Int
, hriDataBlocks :: Maybe Int
, hriDiskCount :: Int
, hriSetCount :: Int
, hriObjectSize :: Int
, hriBefore :: [DriveInfo]
, hriAfter :: [DriveInfo]
} deriving (Eq, Show)
{ hriResultIdx :: Int,
hriType :: HealItemType,
hriBucket :: Bucket,
hriObject :: Object,
hriDetail :: Text,
hriParityBlocks :: Maybe Int,
hriDataBlocks :: Maybe Int,
hriDiskCount :: Int,
hriSetCount :: Int,
hriObjectSize :: Int,
hriBefore :: [DriveInfo],
hriAfter :: [DriveInfo]
}
deriving (Eq, Show)
instance FromJSON HealResultItem where
parseJSON = withObject "HealResultItem" $ \v -> HealResultItem
<$> v .: "resultId"
<*> v .: "type"
<*> v .: "bucket"
<*> v .: "object"
<*> v .: "detail"
<*> v .:? "parityBlocks"
<*> v .:? "dataBlocks"
<*> v .: "diskCount"
<*> v .: "setCount"
<*> v .: "objectSize"
<*> (do before <- v .: "before"
before .: "drives")
<*> (do after <- v .: "after"
after .: "drives")
parseJSON = withObject "HealResultItem" $ \v ->
HealResultItem
<$> v .: "resultId"
<*> v .: "type"
<*> v .: "bucket"
<*> v .: "object"
<*> v .: "detail"
<*> v .:? "parityBlocks"
<*> v .:? "dataBlocks"
<*> v .: "diskCount"
<*> v .: "setCount"
<*> v .: "objectSize"
<*> ( do
before <- v .: "before"
before .: "drives"
)
<*> ( do
after <- v .: "after"
after .: "drives"
)
data HealStatus = HealStatus
{ hsSummary :: Text
, hsStartTime :: UTCTime
, hsSettings :: HealOpts
, hsNumDisks :: Int
, hsFailureDetail :: Maybe Text
, hsItems :: Maybe [HealResultItem]
} deriving (Eq, Show)
{ hsSummary :: Text,
hsStartTime :: UTCTime,
hsSettings :: HealOpts,
hsNumDisks :: Int,
hsFailureDetail :: Maybe Text,
hsItems :: Maybe [HealResultItem]
}
deriving (Eq, Show)
instance FromJSON HealStatus where
parseJSON = withObject "HealStatus" $ \v -> HealStatus
<$> v .: "Summary"
<*> v .: "StartTime"
<*> v .: "Settings"
<*> v .: "NumDisks"
<*> v .:? "Detail"
<*> v .: "Items"
parseJSON = withObject "HealStatus" $ \v ->
HealStatus
<$> v .: "Summary"
<*> v .: "StartTime"
<*> v .: "Settings"
<*> v .: "NumDisks"
<*> v .:? "Detail"
<*> v .: "Items"
healPath :: Maybe Bucket -> Maybe Text -> ByteString
healPath bucket prefix = do
if (isJust bucket)
then encodeUtf8 $ "v1/heal/" <> fromMaybe "" bucket <> "/"
<> fromMaybe "" prefix
then
encodeUtf8 $
"v1/heal/" <> fromMaybe "" bucket <> "/"
<> fromMaybe "" prefix
else encodeUtf8 $ "v1/heal/"
-- | Get server version and uptime.
serviceStatus :: Minio ServiceStatus
serviceStatus = do
rsp <- executeAdminRequest AdminReqInfo { ariMethod = HT.methodGet
, ariPayload = PayloadBS B.empty
, ariPayloadHash = Nothing
, ariPath = "v1/service"
, ariHeaders = []
, ariQueryParams = []
}
rsp <-
executeAdminRequest
AdminReqInfo
{ ariMethod = HT.methodGet,
ariPayload = PayloadBS B.empty,
ariPayloadHash = Nothing,
ariPath = "v1/service",
ariHeaders = [],
ariQueryParams = []
}
let rspBS = NC.responseBody rsp
case eitherDecode rspBS of
Right ss -> return ss
Left err -> throwIO $ MErrVJsonParse $ T.pack err
let rspBS = NC.responseBody rsp
case eitherDecode rspBS of
Right ss -> return ss
Left err -> throwIO $ MErrVJsonParse $ T.pack err
-- | Send service restart or stop action to MinIO server.
serviceSendAction :: ServiceAction -> Minio ()
serviceSendAction action = do
let payload = PayloadBS $ LBS.toStrict $ A.encode action
void $ executeAdminRequest AdminReqInfo { ariMethod = HT.methodPost
, ariPayload = payload
, ariPayloadHash = Nothing
, ariPath = "v1/service"
, ariHeaders = []
, ariQueryParams = []
}
let payload = PayloadBS $ LBS.toStrict $ A.encode action
void $
executeAdminRequest
AdminReqInfo
{ ariMethod = HT.methodPost,
ariPayload = payload,
ariPayloadHash = Nothing,
ariPath = "v1/service",
ariHeaders = [],
ariQueryParams = []
}
-- | Get the current config file from server.
getConfig :: Minio ByteString
getConfig = do
rsp <- executeAdminRequest AdminReqInfo { ariMethod = HT.methodGet
, ariPayload = PayloadBS B.empty
, ariPayloadHash = Nothing
, ariPath = "v1/config"
, ariHeaders = []
, ariQueryParams = []
}
return $ LBS.toStrict $ NC.responseBody rsp
rsp <-
executeAdminRequest
AdminReqInfo
{ ariMethod = HT.methodGet,
ariPayload = PayloadBS B.empty,
ariPayloadHash = Nothing,
ariPath = "v1/config",
ariHeaders = [],
ariQueryParams = []
}
return $ LBS.toStrict $ NC.responseBody rsp
-- | Set a new config to the server.
setConfig :: ByteString -> Minio SetConfigResult
setConfig config = do
rsp <- executeAdminRequest AdminReqInfo { ariMethod = HT.methodPut
, ariPayload = PayloadBS config
, ariPayloadHash = Nothing
, ariPath = "v1/config"
, ariHeaders = []
, ariQueryParams = []
}
rsp <-
executeAdminRequest
AdminReqInfo
{ ariMethod = HT.methodPut,
ariPayload = PayloadBS config,
ariPayloadHash = Nothing,
ariPath = "v1/config",
ariHeaders = [],
ariQueryParams = []
}
let rspBS = NC.responseBody rsp
case eitherDecode rspBS of
Right scr -> return scr
Left err -> throwIO $ MErrVJsonParse $ T.pack err
let rspBS = NC.responseBody rsp
case eitherDecode rspBS of
Right scr -> return scr
Left err -> throwIO $ MErrVJsonParse $ T.pack err
-- | Get the progress of currently running heal task, this API should be
-- invoked right after `startHeal`. `token` is obtained after `startHeal`
-- which should be used to get the heal status.
getHealStatus :: Maybe Bucket -> Maybe Text -> Text -> Minio HealStatus
getHealStatus bucket prefix token = do
when (isNothing bucket && isJust prefix) $ throwIO MErrVInvalidHealPath
let qparams = HT.queryTextToQuery [("clientToken", Just token)]
rsp <- executeAdminRequest AdminReqInfo { ariMethod = HT.methodPost
, ariPayload = PayloadBS B.empty
, ariPayloadHash = Nothing
, ariPath = healPath bucket prefix
, ariHeaders = []
, ariQueryParams = qparams
}
let rspBS = NC.responseBody rsp
case eitherDecode rspBS of
Right hs -> return hs
Left err -> throwIO $ MErrVJsonParse $ T.pack err
when (isNothing bucket && isJust prefix) $ throwIO MErrVInvalidHealPath
let qparams = HT.queryTextToQuery [("clientToken", Just token)]
rsp <-
executeAdminRequest
AdminReqInfo
{ ariMethod = HT.methodPost,
ariPayload = PayloadBS B.empty,
ariPayloadHash = Nothing,
ariPath = healPath bucket prefix,
ariHeaders = [],
ariQueryParams = qparams
}
let rspBS = NC.responseBody rsp
case eitherDecode rspBS of
Right hs -> return hs
Left err -> throwIO $ MErrVJsonParse $ T.pack err
doHeal :: Maybe Bucket -> Maybe Text -> HealOpts -> Bool -> Minio HealStartResp
doHeal bucket prefix opts forceStart = do
when (isNothing bucket && isJust prefix) $ throwIO MErrVInvalidHealPath
let payload = PayloadBS $ LBS.toStrict $ A.encode opts
let qparams = bool [] (HT.queryTextToQuery [("forceStart", Just "true")])
forceStart
when (isNothing bucket && isJust prefix) $ throwIO MErrVInvalidHealPath
let payload = PayloadBS $ LBS.toStrict $ A.encode opts
let qparams =
bool
[]
(HT.queryTextToQuery [("forceStart", Just "true")])
forceStart
rsp <- executeAdminRequest AdminReqInfo { ariMethod = HT.methodPost
, ariPayload = payload
, ariPayloadHash = Nothing
, ariPath = healPath bucket prefix
, ariHeaders = []
, ariQueryParams = qparams
}
rsp <-
executeAdminRequest
AdminReqInfo
{ ariMethod = HT.methodPost,
ariPayload = payload,
ariPayloadHash = Nothing,
ariPath = healPath bucket prefix,
ariHeaders = [],
ariQueryParams = qparams
}
let rspBS = NC.responseBody rsp
case eitherDecode rspBS of
Right hsr -> return hsr
Left err -> throwIO $ MErrVJsonParse $ T.pack err
let rspBS = NC.responseBody rsp
case eitherDecode rspBS of
Right hsr -> return hsr
Left err -> throwIO $ MErrVJsonParse $ T.pack err
-- | Start a heal sequence that scans data under given (possible empty)
-- `bucket` and `prefix`. The `recursive` bool turns on recursive
@ -500,60 +571,75 @@ forceStartHeal bucket prefix opts = doHeal bucket prefix opts True
-- properties, storage information, network statistics, etc.
getServerInfo :: Minio [ServerInfo]
getServerInfo = do
rsp <- executeAdminRequest AdminReqInfo { ariMethod = HT.methodGet
, ariPayload = PayloadBS B.empty
, ariPayloadHash = Nothing
, ariPath = "v1/info"
, ariHeaders = []
, ariQueryParams = []
}
let rspBS = NC.responseBody rsp
case eitherDecode rspBS of
Right si -> return si
Left err -> throwIO $ MErrVJsonParse $ T.pack err
rsp <-
executeAdminRequest
AdminReqInfo
{ ariMethod = HT.methodGet,
ariPayload = PayloadBS B.empty,
ariPayloadHash = Nothing,
ariPath = "v1/info",
ariHeaders = [],
ariQueryParams = []
}
let rspBS = NC.responseBody rsp
case eitherDecode rspBS of
Right si -> return si
Left err -> throwIO $ MErrVJsonParse $ T.pack err
executeAdminRequest :: AdminReqInfo -> Minio (Response LByteString)
executeAdminRequest ari = do
req <- buildAdminRequest ari
mgr <- asks mcConnManager
httpLbs req mgr
req <- buildAdminRequest ari
mgr <- asks mcConnManager
httpLbs req mgr
buildAdminRequest :: AdminReqInfo -> Minio NC.Request
buildAdminRequest areq = do
ci <- asks mcConnInfo
sha256Hash <- if | connectIsSecure ci ->
-- if secure connection
return "UNSIGNED-PAYLOAD"
ci <- asks mcConnInfo
sha256Hash <-
if
| connectIsSecure ci ->
-- if secure connection
return "UNSIGNED-PAYLOAD"
-- otherwise compute sha256
| otherwise -> getPayloadSHA256Hash (ariPayload areq)
-- otherwise compute sha256
| otherwise -> getPayloadSHA256Hash (ariPayload areq)
timeStamp <- liftIO getCurrentTime
timeStamp <- liftIO getCurrentTime
let hostHeader = (hHost, getHostAddr ci)
newAreq =
areq
{ ariPayloadHash = Just sha256Hash,
ariHeaders =
hostHeader
: sha256Header sha256Hash
: ariHeaders areq
}
signReq = toRequest ci newAreq
sp =
SignParams
(connectAccessKey ci)
(connectSecretKey ci)
timeStamp
Nothing
Nothing
(ariPayloadHash newAreq)
signHeaders = signV4 sp signReq
let hostHeader = (hHost, getHostAddr ci)
newAreq = areq { ariPayloadHash = Just sha256Hash
, ariHeaders = hostHeader
: sha256Header sha256Hash
: ariHeaders areq
}
signReq = toRequest ci newAreq
sp = SignParams (connectAccessKey ci) (connectSecretKey ci)
timeStamp Nothing Nothing (ariPayloadHash newAreq)
signHeaders = signV4 sp signReq
-- Update signReq with Authorization header containing v4 signature
return signReq {
NC.requestHeaders = ariHeaders newAreq ++ mkHeaderFromPairs signHeaders
}
-- Update signReq with Authorization header containing v4 signature
return
signReq
{ NC.requestHeaders = ariHeaders newAreq ++ mkHeaderFromPairs signHeaders
}
where
toRequest :: ConnectInfo -> AdminReqInfo -> NC.Request
toRequest ci aReq = NC.defaultRequest
{ NC.method = ariMethod aReq
, NC.secure = connectIsSecure ci
, NC.host = encodeUtf8 $ connectHost ci
, NC.port = connectPort ci
, NC.path = B.intercalate "/" [adminPath, ariPath aReq]
, NC.requestHeaders = ariHeaders aReq
, NC.queryString = HT.renderQuery False $ ariQueryParams aReq
, NC.requestBody = getRequestBody (ariPayload aReq)
toRequest ci aReq =
NC.defaultRequest
{ NC.method = ariMethod aReq,
NC.secure = connectIsSecure ci,
NC.host = encodeUtf8 $ connectHost ci,
NC.port = connectPort ci,
NC.path = B.intercalate "/" [adminPath, ariPath aReq],
NC.requestHeaders = ariHeaders aReq,
NC.queryString = HT.renderQuery False $ ariQueryParams aReq,
NC.requestBody = getRequestBody (ariPayload aReq)
}

View File

@ -16,19 +16,19 @@
module Network.Minio.CopyObject where
import qualified Data.List as List
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.S3API
import Network.Minio.Utils
import qualified Data.List as List
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.S3API
import Network.Minio.Utils
-- | Copy an object using single or multipart copy strategy.
copyObjectInternal :: Bucket -> Object -> SourceInfo
-> Minio ETag
copyObjectInternal ::
Bucket ->
Object ->
SourceInfo ->
Minio ETag
copyObjectInternal b' o srcInfo = do
let sBucket = srcBucket srcInfo
sObject = srcObject srcInfo
@ -43,27 +43,35 @@ copyObjectInternal b' o srcInfo = do
startOffset = fst range
endOffset = snd range
when (isJust rangeMay &&
or [startOffset < 0, endOffset < startOffset,
endOffset >= fromIntegral srcSize]) $
throwIO $ MErrVInvalidSrcObjByteRange range
when
( isJust rangeMay
&& or
[ startOffset < 0,
endOffset < startOffset,
endOffset >= fromIntegral srcSize
]
)
$ throwIO
$ MErrVInvalidSrcObjByteRange range
-- 1. If sz > 64MiB (minPartSize) use multipart copy, OR
-- 2. If startOffset /= 0 use multipart copy
let destSize = (\(a, b) -> b - a + 1 ) $
maybe (0, srcSize - 1) identity rangeMay
let destSize =
(\(a, b) -> b - a + 1) $
maybe (0, srcSize - 1) identity rangeMay
if destSize > minPartSize || (endOffset - startOffset + 1 /= srcSize)
then multiPartCopyObject b' o srcInfo srcSize
else fst <$> copyObjectSingle b' o srcInfo{srcRange = Nothing} []
else fst <$> copyObjectSingle b' o srcInfo {srcRange = Nothing} []
-- | Given the input byte range of the source object, compute the
-- splits for a multipart copy object procedure. Minimum part size
-- used is minPartSize.
selectCopyRanges :: (Int64, Int64) -> [(PartNumber, (Int64, Int64))]
selectCopyRanges (st, end) = zip pns $
map (\(x, y) -> (st + x, st + x + y - 1)) $ zip startOffsets partSizes
selectCopyRanges (st, end) =
zip pns
$ map (\(x, y) -> (st + x, st + x + y - 1))
$ zip startOffsets partSizes
where
size = end - st + 1
(pns, startOffsets, partSizes) = List.unzip3 $ selectPartSizes size
@ -71,22 +79,30 @@ selectCopyRanges (st, end) = zip pns $
-- | Perform a multipart copy object action. Since we cannot verify
-- existing parts based on the source object, there is no resuming
-- copy action support.
multiPartCopyObject :: Bucket -> Object -> SourceInfo -> Int64
-> Minio ETag
multiPartCopyObject ::
Bucket ->
Object ->
SourceInfo ->
Int64 ->
Minio ETag
multiPartCopyObject b o cps srcSize = do
uid <- newMultipartUpload b o []
let byteRange = maybe (0, fromIntegral $ srcSize - 1) identity $ srcRange cps
partRanges = selectCopyRanges byteRange
partSources = map (\(x, (start, end)) -> (x, cps {srcRange = Just (start, end) }))
partRanges
dstInfo = defaultDestinationInfo { dstBucket = b, dstObject = o}
partSources =
map
(\(x, (start, end)) -> (x, cps {srcRange = Just (start, end)}))
partRanges
dstInfo = defaultDestinationInfo {dstBucket = b, dstObject = o}
copiedParts <- limitedMapConcurrently 10
(\(pn, cps') -> do
(etag, _) <- copyObjectPart dstInfo cps' uid pn []
return (pn, etag)
)
partSources
copiedParts <-
limitedMapConcurrently
10
( \(pn, cps') -> do
(etag, _) <- copyObjectPart dstInfo cps' uid pn []
return (pn, etag)
)
partSources
completeMultipartUpload b o uid copiedParts

File diff suppressed because it is too large Load Diff

View File

@ -13,23 +13,22 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE FlexibleInstances #-}
module Network.Minio.Data.ByteString
(
stripBS
, UriEncodable(..)
) where
( stripBS,
UriEncodable (..),
)
where
import qualified Data.ByteString as B
import qualified Data.ByteString.Builder as BB
import qualified Data.ByteString.Char8 as BC8
import qualified Data.ByteString.Lazy as LB
import Data.Char (isSpace, toUpper, isAsciiUpper, isAsciiLower, isDigit)
import Data.Char (isAsciiLower, isAsciiUpper)
import qualified Data.Text as T
import Numeric (showHex)
import Lib.Prelude
import Lib.Prelude
import Numeric (showHex)
stripBS :: ByteString -> ByteString
stripBS = BC8.dropWhile isSpace . fst . BC8.spanEnd isSpace
@ -40,7 +39,7 @@ class UriEncodable s where
instance UriEncodable [Char] where
uriEncode encodeSlash payload =
LB.toStrict $ BB.toLazyByteString $ mconcat $
map (`uriEncodeChar` encodeSlash) payload
map (`uriEncodeChar` encodeSlash) payload
instance UriEncodable ByteString where
-- assumes that uriEncode is passed ASCII encoded strings.
@ -59,16 +58,17 @@ uriEncodeChar '/' True = BB.byteString "%2F"
uriEncodeChar '/' False = BB.char7 '/'
uriEncodeChar ch _
| isAsciiUpper ch
|| isAsciiLower ch
|| isDigit ch
|| (ch == '_')
|| (ch == '-')
|| (ch == '.')
|| (ch == '~') = BB.char7 ch
|| isAsciiLower ch
|| isDigit ch
|| (ch == '_')
|| (ch == '-')
|| (ch == '.')
|| (ch == '~') =
BB.char7 ch
| otherwise = mconcat $ map f $ B.unpack $ encodeUtf8 $ T.singleton ch
where
f :: Word8 -> BB.Builder
f n = BB.char7 '%' <> BB.string7 hexStr
where
hexStr = map toUpper $ showHex q $ showHex r ""
(q, r) = divMod (fromIntegral n) (16::Word8)
(q, r) = divMod (fromIntegral n) (16 :: Word8)

View File

@ -15,31 +15,31 @@
--
module Network.Minio.Data.Crypto
(
hashSHA256
, hashSHA256FromSource
( hashSHA256,
hashSHA256FromSource,
hashMD5,
hashMD5ToBase64,
hashMD5FromSource,
hmacSHA256,
hmacSHA256RawBS,
digestToBS,
digestToBase16,
encodeToBase64,
)
where
, hashMD5
, hashMD5ToBase64
, hashMD5FromSource
, hmacSHA256
, hmacSHA256RawBS
, digestToBS
, digestToBase16
, encodeToBase64
) where
import Crypto.Hash (Digest, MD5 (..), SHA256 (..),
hashWith)
import Crypto.Hash.Conduit (sinkHash)
import Crypto.MAC.HMAC (HMAC, hmac)
import Data.ByteArray (ByteArrayAccess, convert)
import Data.ByteArray.Encoding (Base (Base16, Base64), convertToBase)
import qualified Data.Conduit as C
import Lib.Prelude
import Crypto.Hash
( Digest,
MD5 (..),
SHA256 (..),
hashWith,
)
import Crypto.Hash.Conduit (sinkHash)
import Crypto.MAC.HMAC (HMAC, hmac)
import Data.ByteArray (ByteArrayAccess, convert)
import Data.ByteArray.Encoding (Base (Base16, Base64), convertToBase)
import qualified Data.Conduit as C
import Lib.Prelude
hashSHA256 :: ByteString -> ByteString
hashSHA256 = digestToBase16 . hashWith SHA256

View File

@ -15,20 +15,18 @@
--
module Network.Minio.Data.Time
(
awsTimeFormat
, awsTimeFormatBS
, awsDateFormat
, awsDateFormatBS
, awsParseTime
, iso8601TimeFormat
) where
( awsTimeFormat,
awsTimeFormatBS,
awsDateFormat,
awsDateFormatBS,
awsParseTime,
iso8601TimeFormat,
)
where
import Data.ByteString.Char8 (pack)
import Data.ByteString.Char8 (pack)
import qualified Data.Time as Time
import Lib.Prelude
import Lib.Prelude
awsTimeFormat :: UTCTime -> [Char]
awsTimeFormat = Time.formatTime Time.defaultTimeLocale "%Y%m%dT%H%M%SZ"

View File

@ -16,74 +16,75 @@
module Network.Minio.Errors where
import Control.Exception
import Control.Exception
import Lib.Prelude
import qualified Network.HTTP.Conduit as NC
import Lib.Prelude
---------------------------------
-- Errors
---------------------------------
-- | Various validation errors
data MErrV = MErrVSinglePUTSizeExceeded Int64
| MErrVPutSizeExceeded Int64
| MErrVETagHeaderNotFound
| MErrVInvalidObjectInfoResponse
| MErrVInvalidSrcObjSpec Text
| MErrVInvalidSrcObjByteRange (Int64, Int64)
| MErrVCopyObjSingleNoRangeAccepted
| MErrVRegionNotSupported Text
| MErrVXmlParse Text
| MErrVInvalidBucketName Text
| MErrVInvalidObjectName Text
| MErrVInvalidUrlExpiry Int
| MErrVJsonParse Text
| MErrVInvalidHealPath
| MErrVMissingCredentials
| MErrVInvalidEncryptionKeyLength
| MErrVStreamingBodyUnexpectedEOF
| MErrVUnexpectedPayload
data MErrV
= MErrVSinglePUTSizeExceeded Int64
| MErrVPutSizeExceeded Int64
| MErrVETagHeaderNotFound
| MErrVInvalidObjectInfoResponse
| MErrVInvalidSrcObjSpec Text
| MErrVInvalidSrcObjByteRange (Int64, Int64)
| MErrVCopyObjSingleNoRangeAccepted
| MErrVRegionNotSupported Text
| MErrVXmlParse Text
| MErrVInvalidBucketName Text
| MErrVInvalidObjectName Text
| MErrVInvalidUrlExpiry Int
| MErrVJsonParse Text
| MErrVInvalidHealPath
| MErrVMissingCredentials
| MErrVInvalidEncryptionKeyLength
| MErrVStreamingBodyUnexpectedEOF
| MErrVUnexpectedPayload
deriving (Show, Eq)
instance Exception MErrV
-- | Errors returned by S3 compatible service
data ServiceErr = BucketAlreadyExists
| BucketAlreadyOwnedByYou
| NoSuchBucket
| InvalidBucketName
| NoSuchKey
| SelectErr Text Text
| ServiceErr Text Text
data ServiceErr
= BucketAlreadyExists
| BucketAlreadyOwnedByYou
| NoSuchBucket
| InvalidBucketName
| NoSuchKey
| SelectErr Text Text
| ServiceErr Text Text
deriving (Show, Eq)
instance Exception ServiceErr
toServiceErr :: Text -> Text -> ServiceErr
toServiceErr "NoSuchKey" _ = NoSuchKey
toServiceErr "NoSuchBucket" _ = NoSuchBucket
toServiceErr "InvalidBucketName" _ = InvalidBucketName
toServiceErr "NoSuchKey" _ = NoSuchKey
toServiceErr "NoSuchBucket" _ = NoSuchBucket
toServiceErr "InvalidBucketName" _ = InvalidBucketName
toServiceErr "BucketAlreadyOwnedByYou" _ = BucketAlreadyOwnedByYou
toServiceErr "BucketAlreadyExists" _ = BucketAlreadyExists
toServiceErr code message = ServiceErr code message
toServiceErr "BucketAlreadyExists" _ = BucketAlreadyExists
toServiceErr code message = ServiceErr code message
-- | Errors thrown by the library
data MinioErr = MErrHTTP NC.HttpException
| MErrIO IOException
| MErrService ServiceErr
| MErrValidation MErrV
data MinioErr
= MErrHTTP NC.HttpException
| MErrIO IOException
| MErrService ServiceErr
| MErrValidation MErrV
deriving (Show)
instance Eq MinioErr where
MErrHTTP _ == MErrHTTP _ = True
MErrHTTP _ == _ = False
MErrIO _ == MErrIO _ = True
MErrIO _ == _ = False
MErrService a == MErrService b = a == b
MErrService _ == _ = False
MErrValidation a == MErrValidation b = a == b
MErrValidation _ == _ = False
MErrHTTP _ == MErrHTTP _ = True
MErrHTTP _ == _ = False
MErrIO _ == MErrIO _ = True
MErrIO _ == _ = False
MErrService a == MErrService b = a == b
MErrService _ == _ = False
MErrValidation a == MErrValidation b = a == b
MErrValidation _ == _ = False
instance Exception MinioErr

View File

@ -15,28 +15,35 @@
--
module Network.Minio.JsonParser
(
parseErrResponseJSON
) where
( parseErrResponseJSON,
)
where
import Data.Aeson (FromJSON, eitherDecode, parseJSON,
withObject, (.:))
import qualified Data.Text as T
import Data.Aeson
( (.:),
FromJSON,
eitherDecode,
parseJSON,
withObject,
)
import qualified Data.Text as T
import Lib.Prelude
import Network.Minio.Errors
import Lib.Prelude
data AdminErrJSON = AdminErrJSON
{ aeCode :: Text,
aeMessage :: Text
}
deriving (Eq, Show)
import Network.Minio.Errors
data AdminErrJSON = AdminErrJSON { aeCode :: Text
, aeMessage :: Text
} deriving (Eq, Show)
instance FromJSON AdminErrJSON where
parseJSON = withObject "AdminErrJSON" $ \v -> AdminErrJSON
<$> v .: "Code"
<*> v .: "Message"
parseJSON = withObject "AdminErrJSON" $ \v ->
AdminErrJSON
<$> v .: "Code"
<*> v .: "Message"
parseErrResponseJSON :: (MonadIO m) => LByteString -> m ServiceErr
parseErrResponseJSON jsondata =
case eitherDecode jsondata of
Right aErr -> return $ toServiceErr (aeCode aErr) (aeMessage aErr)
Left err -> throwIO $ MErrVJsonParse $ T.pack err
Left err -> throwIO $ MErrVJsonParse $ T.pack err

View File

@ -16,20 +16,19 @@
module Network.Minio.ListOps where
import qualified Data.Conduit as C
import qualified Data.Conduit as C
import qualified Data.Conduit.Combinators as CC
import qualified Data.Conduit.List as CL
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.S3API
import qualified Data.Conduit.List as CL
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.S3API
-- | Represents a list output item - either an object or an object
-- prefix (i.e. a directory).
data ListItem = ListItemObject ObjectInfo
| ListItemPrefix Text
deriving (Show, Eq)
data ListItem
= ListItemObject ObjectInfo
| ListItemPrefix Text
deriving (Show, Eq)
-- | @'listObjects' bucket prefix recurse@ lists objects in a bucket
-- similar to a file system tree traversal.
@ -48,73 +47,99 @@ listObjects bucket prefix recurse = loop Nothing
where
loop :: Maybe Text -> C.ConduitM () ListItem Minio ()
loop nextToken = do
let
delimiter = bool (Just "/") Nothing recurse
let delimiter = bool (Just "/") Nothing recurse
res <- lift $ listObjects' bucket prefix nextToken delimiter Nothing
CL.sourceList $ map ListItemObject $ lorObjects res
unless recurse $
CL.sourceList $ map ListItemPrefix $ lorCPrefixes res
unless recurse
$ CL.sourceList
$ map ListItemPrefix
$ lorCPrefixes res
when (lorHasMore res) $
loop (lorNextToken res)
-- | Lists objects - similar to @listObjects@, however uses the older
-- V1 AWS S3 API. Prefer @listObjects@ to this.
listObjectsV1 :: Bucket -> Maybe Text -> Bool
-> C.ConduitM () ListItem Minio ()
listObjectsV1 ::
Bucket ->
Maybe Text ->
Bool ->
C.ConduitM () ListItem Minio ()
listObjectsV1 bucket prefix recurse = loop Nothing
where
loop :: Maybe Text -> C.ConduitM () ListItem Minio ()
loop nextMarker = do
let
delimiter = bool (Just "/") Nothing recurse
let delimiter = bool (Just "/") Nothing recurse
res <- lift $ listObjectsV1' bucket prefix nextMarker delimiter Nothing
CL.sourceList $ map ListItemObject $ lorObjects' res
unless recurse $
CL.sourceList $ map ListItemPrefix $ lorCPrefixes' res
unless recurse
$ CL.sourceList
$ map ListItemPrefix
$ lorCPrefixes' res
when (lorHasMore' res) $
loop (lorNextMarker res)
-- | List incomplete uploads in a bucket matching the given prefix. If
-- recurse is set to True incomplete uploads for the given prefix are
-- recursively listed.
listIncompleteUploads :: Bucket -> Maybe Text -> Bool
-> C.ConduitM () UploadInfo Minio ()
listIncompleteUploads ::
Bucket ->
Maybe Text ->
Bool ->
C.ConduitM () UploadInfo Minio ()
listIncompleteUploads bucket prefix recurse = loop Nothing Nothing
where
loop :: Maybe Text -> Maybe Text -> C.ConduitM () UploadInfo Minio ()
loop nextKeyMarker nextUploadIdMarker = do
let
delimiter = bool (Just "/") Nothing recurse
let delimiter = bool (Just "/") Nothing recurse
res <- lift $ listIncompleteUploads' bucket prefix delimiter
nextKeyMarker nextUploadIdMarker Nothing
res <-
lift $
listIncompleteUploads'
bucket
prefix
delimiter
nextKeyMarker
nextUploadIdMarker
Nothing
aggrSizes <- lift $ forM (lurUploads res) $ \(uKey, uId, _) -> do
partInfos <- C.runConduit $ listIncompleteParts bucket uKey uId
C..| CC.sinkList
return $ foldl (\sizeSofar p -> opiSize p + sizeSofar) 0 partInfos
partInfos <-
C.runConduit $
listIncompleteParts bucket uKey uId
C..| CC.sinkList
return $ foldl (\sizeSofar p -> opiSize p + sizeSofar) 0 partInfos
CL.sourceList $
map (\((uKey, uId, uInitTime), size) ->
UploadInfo uKey uId uInitTime size
) $ zip (lurUploads res) aggrSizes
CL.sourceList
$ map
( \((uKey, uId, uInitTime), size) ->
UploadInfo uKey uId uInitTime size
)
$ zip (lurUploads res) aggrSizes
when (lurHasMore res) $
loop (lurNextKey res) (lurNextUpload res)
-- | List object parts of an ongoing multipart upload for given
-- bucket, object and uploadId.
listIncompleteParts :: Bucket -> Object -> UploadId
-> C.ConduitM () ObjectPartInfo Minio ()
listIncompleteParts ::
Bucket ->
Object ->
UploadId ->
C.ConduitM () ObjectPartInfo Minio ()
listIncompleteParts bucket object uploadId = loop Nothing
where
loop :: Maybe Text -> C.ConduitM () ObjectPartInfo Minio ()
loop nextPartMarker = do
res <- lift $ listIncompleteParts' bucket object uploadId Nothing
nextPartMarker
res <-
lift $
listIncompleteParts'
bucket
object
uploadId
Nothing
nextPartMarker
CL.sourceList $ lprParts res
when (lprHasMore res) $
loop (show <$> lprNextPart res)

View File

@ -15,43 +15,40 @@
--
module Network.Minio.PresignedOperations
( UrlExpiry
, makePresignedUrl
, presignedPutObjectUrl
, presignedGetObjectUrl
, presignedHeadObjectUrl
( UrlExpiry,
makePresignedUrl,
presignedPutObjectUrl,
presignedGetObjectUrl,
presignedHeadObjectUrl,
PostPolicyCondition (..),
ppCondBucket,
ppCondContentLengthRange,
ppCondContentType,
ppCondKey,
ppCondKeyStartsWith,
ppCondSuccessActionStatus,
PostPolicy (..),
PostPolicyError (..),
newPostPolicy,
showPostPolicy,
presignedPostPolicy,
)
where
, PostPolicyCondition(..)
, ppCondBucket
, ppCondContentLengthRange
, ppCondContentType
, ppCondKey
, ppCondKeyStartsWith
, ppCondSuccessActionStatus
, PostPolicy(..)
, PostPolicyError(..)
, newPostPolicy
, showPostPolicy
, presignedPostPolicy
) where
import Data.Aeson ((.=))
import qualified Data.Aeson as Json
import Data.ByteString.Builder (byteString, toLazyByteString)
import qualified Data.HashMap.Strict as H
import qualified Data.Text as T
import qualified Data.Time as Time
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Network.HTTP.Types.Header (hHost)
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Data.Time
import Network.Minio.Errors
import Network.Minio.Sign.V4
import Data.Aeson ((.=))
import qualified Data.Aeson as Json
import Data.ByteString.Builder (byteString, toLazyByteString)
import qualified Data.HashMap.Strict as H
import qualified Data.Text as T
import qualified Data.Time as Time
import Lib.Prelude
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Network.HTTP.Types.Header (hHost)
import Network.Minio.Data
import Network.Minio.Data.Time
import Network.Minio.Errors
import Network.Minio.Sign.V4
-- | Generate a presigned URL. This function allows for advanced usage
-- - for simple cases prefer the `presigned*Url` functions.
@ -61,42 +58,56 @@ import Network.Minio.Sign.V4
--
-- All extra query parameters or headers are signed, and therefore are
-- required to be sent when the generated URL is actually used.
makePresignedUrl :: UrlExpiry -> HT.Method -> Maybe Bucket -> Maybe Object
-> Maybe Region -> HT.Query -> HT.RequestHeaders
-> Minio ByteString
makePresignedUrl ::
UrlExpiry ->
HT.Method ->
Maybe Bucket ->
Maybe Object ->
Maybe Region ->
HT.Query ->
HT.RequestHeaders ->
Minio ByteString
makePresignedUrl expiry method bucket object region extraQuery extraHeaders = do
when (expiry > 7*24*3600 || expiry < 0) $
throwIO $ MErrVInvalidUrlExpiry expiry
when (expiry > 7 * 24 * 3600 || expiry < 0)
$ throwIO
$ MErrVInvalidUrlExpiry expiry
ci <- asks mcConnInfo
let
hostHeader = (hHost, getHostAddr ci)
req = NC.defaultRequest {
NC.method = method
, NC.secure = connectIsSecure ci
, NC.host = encodeUtf8 $ connectHost ci
, NC.port = connectPort ci
, NC.path = getS3Path bucket object
, NC.requestHeaders = hostHeader : extraHeaders
, NC.queryString = HT.renderQuery True extraQuery
}
let hostHeader = (hHost, getHostAddr ci)
req =
NC.defaultRequest
{ NC.method = method,
NC.secure = connectIsSecure ci,
NC.host = encodeUtf8 $ connectHost ci,
NC.port = connectPort ci,
NC.path = getS3Path bucket object,
NC.requestHeaders = hostHeader : extraHeaders,
NC.queryString = HT.renderQuery True extraQuery
}
ts <- liftIO Time.getCurrentTime
let sp = SignParams (connectAccessKey ci) (connectSecretKey ci)
ts region (Just expiry) Nothing
let sp =
SignParams
(connectAccessKey ci)
(connectSecretKey ci)
ts
region
(Just expiry)
Nothing
signPairs = signV4 sp req
qpToAdd = (fmap . fmap) Just signPairs
queryStr = HT.renderQueryBuilder True
((HT.parseQuery $ NC.queryString req) ++ qpToAdd)
queryStr =
HT.renderQueryBuilder
True
((HT.parseQuery $ NC.queryString req) ++ qpToAdd)
scheme = byteString $ bool "http://" "https://" $ connectIsSecure ci
return $ toS $ toLazyByteString $ scheme
<> byteString (getHostAddr ci)
<> byteString (getS3Path bucket object)
<> queryStr
return $ toStrictBS $ toLazyByteString $
scheme
<> byteString (getHostAddr ci)
<> byteString (getS3Path bucket object)
<> queryStr
-- | Generate a URL with authentication signature to PUT (upload) an
-- object. Any extra headers if passed, are signed, and so they are
@ -105,12 +116,22 @@ makePresignedUrl expiry method bucket object region extraQuery extraHeaders = do
--
-- For a list of possible headers to pass, please refer to the PUT
-- object REST API AWS S3 documentation.
presignedPutObjectUrl :: Bucket -> Object -> UrlExpiry -> HT.RequestHeaders
-> Minio ByteString
presignedPutObjectUrl ::
Bucket ->
Object ->
UrlExpiry ->
HT.RequestHeaders ->
Minio ByteString
presignedPutObjectUrl bucket object expirySeconds extraHeaders = do
region <- asks (Just . connectRegion . mcConnInfo)
makePresignedUrl expirySeconds HT.methodPut
(Just bucket) (Just object) region [] extraHeaders
makePresignedUrl
expirySeconds
HT.methodPut
(Just bucket)
(Just object)
region
[]
extraHeaders
-- | Generate a URL with authentication signature to GET (download) an
-- object. All extra query parameters and headers passed here will be
@ -121,12 +142,23 @@ presignedPutObjectUrl bucket object expirySeconds extraHeaders = do
--
-- For a list of possible request parameters and headers, please refer
-- to the GET object REST API AWS S3 documentation.
presignedGetObjectUrl :: Bucket -> Object -> UrlExpiry -> HT.Query
-> HT.RequestHeaders -> Minio ByteString
presignedGetObjectUrl ::
Bucket ->
Object ->
UrlExpiry ->
HT.Query ->
HT.RequestHeaders ->
Minio ByteString
presignedGetObjectUrl bucket object expirySeconds extraQuery extraHeaders = do
region <- asks (Just . connectRegion . mcConnInfo)
makePresignedUrl expirySeconds HT.methodGet
(Just bucket) (Just object) region extraQuery extraHeaders
makePresignedUrl
expirySeconds
HT.methodGet
(Just bucket)
(Just object)
region
extraQuery
extraHeaders
-- | Generate a URL with authentication signature to make a HEAD
-- request on an object. This is used to fetch metadata about an
@ -135,18 +167,29 @@ presignedGetObjectUrl bucket object expirySeconds extraQuery extraHeaders = do
--
-- For a list of possible headers to pass, please refer to the HEAD
-- object REST API AWS S3 documentation.
presignedHeadObjectUrl :: Bucket -> Object -> UrlExpiry
-> HT.RequestHeaders -> Minio ByteString
presignedHeadObjectUrl ::
Bucket ->
Object ->
UrlExpiry ->
HT.RequestHeaders ->
Minio ByteString
presignedHeadObjectUrl bucket object expirySeconds extraHeaders = do
region <- asks (Just . connectRegion . mcConnInfo)
makePresignedUrl expirySeconds HT.methodHead
(Just bucket) (Just object) region [] extraHeaders
makePresignedUrl
expirySeconds
HT.methodHead
(Just bucket)
(Just object)
region
[]
extraHeaders
-- | Represents individual conditions in a Post Policy document.
data PostPolicyCondition = PPCStartsWith Text Text
| PPCEquals Text Text
| PPCRange Text Int64 Int64
deriving (Show, Eq)
data PostPolicyCondition
= PPCStartsWith Text Text
| PPCEquals Text Text
| PPCRange Text Int64 Int64
deriving (Show, Eq)
instance Json.ToJSON PostPolicyCondition where
toJSON (PPCStartsWith k v) = Json.toJSON ["starts-with", k, v]
@ -160,25 +203,28 @@ instance Json.ToJSON PostPolicyCondition where
Json.foldable [Json.toJSON k, Json.toJSON minVal, Json.toJSON maxVal]
-- | A PostPolicy is required to perform uploads via browser forms.
data PostPolicy = PostPolicy {
expiration :: UTCTime
, conditions :: [PostPolicyCondition]
} deriving (Show, Eq)
data PostPolicy = PostPolicy
{ expiration :: UTCTime,
conditions :: [PostPolicyCondition]
}
deriving (Show, Eq)
instance Json.ToJSON PostPolicy where
toJSON (PostPolicy e c) =
Json.object $ [ "expiration" .= iso8601TimeFormat e
, "conditions" .= c
]
Json.object $
[ "expiration" .= iso8601TimeFormat e,
"conditions" .= c
]
toEncoding (PostPolicy e c) =
Json.pairs ("expiration" .= iso8601TimeFormat e <> "conditions" .= c)
-- | Possible validation errors when creating a PostPolicy.
data PostPolicyError = PPEKeyNotSpecified
| PPEBucketNotSpecified
| PPEConditionKeyEmpty
| PPERangeInvalid
deriving (Eq, Show)
data PostPolicyError
= PPEKeyNotSpecified
| PPEBucketNotSpecified
| PPEConditionKeyEmpty
| PPERangeInvalid
deriving (Eq, Show)
-- | Set the bucket name that the upload should use.
ppCondBucket :: Bucket -> PostPolicyCondition
@ -186,8 +232,10 @@ ppCondBucket = PPCEquals "bucket"
-- | Set the content length range constraint with minimum and maximum
-- byte count values.
ppCondContentLengthRange :: Int64 -> Int64
-> PostPolicyCondition
ppCondContentLengthRange ::
Int64 ->
Int64 ->
PostPolicyCondition
ppCondContentLengthRange = PPCRange "content-length-range"
-- | Set the content-type header for the upload.
@ -210,83 +258,91 @@ ppCondSuccessActionStatus n =
-- | This function creates a PostPolicy after validating its
-- arguments.
newPostPolicy :: UTCTime -> [PostPolicyCondition]
-> Either PostPolicyError PostPolicy
newPostPolicy ::
UTCTime ->
[PostPolicyCondition] ->
Either PostPolicyError PostPolicy
newPostPolicy expirationTime conds
-- object name condition must be present
| not $ any (keyEquals "key") conds =
Left PPEKeyNotSpecified
Left PPEKeyNotSpecified
-- bucket name condition must be present
| not $ any (keyEquals "bucket") conds =
Left PPEBucketNotSpecified
Left PPEBucketNotSpecified
-- a condition with an empty key is invalid
| any (keyEquals "") conds || any isEmptyRangeKey conds =
Left PPEConditionKeyEmpty
Left PPEConditionKeyEmpty
-- invalid range check
| any isInvalidRange conds =
Left PPERangeInvalid
Left PPERangeInvalid
-- all good!
| otherwise =
return $ PostPolicy expirationTime conds
return $ PostPolicy expirationTime conds
where
keyEquals k' (PPCStartsWith k _) = k == k'
keyEquals k' (PPCEquals k _) = k == k'
keyEquals _ _ = False
keyEquals k' (PPCEquals k _) = k == k'
keyEquals _ _ = False
isEmptyRangeKey (PPCRange k _ _) = k == ""
isEmptyRangeKey _ = False
isEmptyRangeKey _ = False
isInvalidRange (PPCRange _ mi ma) = mi < 0 || mi > ma
isInvalidRange _ = False
isInvalidRange _ = False
-- | Convert Post Policy to a string (e.g. for printing).
showPostPolicy :: PostPolicy -> ByteString
showPostPolicy = toS . Json.encode
showPostPolicy = toStrictBS . Json.encode
-- | Generate a presigned URL and POST policy to upload files via a
-- browser. On success, this function returns a URL and POST
-- form-data.
presignedPostPolicy :: PostPolicy
-> Minio (ByteString, H.HashMap Text ByteString)
presignedPostPolicy ::
PostPolicy ->
Minio (ByteString, H.HashMap Text ByteString)
presignedPostPolicy p = do
ci <- asks mcConnInfo
signTime <- liftIO $ Time.getCurrentTime
let
extraConditions =
[ PPCEquals "x-amz-date" (toS $ awsTimeFormat signTime)
, PPCEquals "x-amz-algorithm" "AWS4-HMAC-SHA256"
, PPCEquals "x-amz-credential"
(T.intercalate "/" [connectAccessKey ci,
decodeUtf8 $ mkScope signTime region])
]
ppWithCreds = p {
conditions = conditions p ++ extraConditions
}
sp = SignParams (connectAccessKey ci) (connectSecretKey ci)
signTime (Just $ connectRegion ci) Nothing Nothing
signData = signV4PostPolicy (showPostPolicy ppWithCreds) sp
-- compute form-data
mkPair (PPCStartsWith k v) = Just (k, v)
mkPair (PPCEquals k v) = Just (k, v)
mkPair _ = Nothing
formFromPolicy = H.map toS $ H.fromList $ catMaybes $
mkPair <$> conditions ppWithCreds
formData = formFromPolicy `H.union` signData
-- compute POST upload URL
bucket = H.lookupDefault "" "bucket" formData
scheme = byteString $ bool "http://" "https://" $ connectIsSecure ci
region = connectRegion ci
url = toS $ toLazyByteString $ scheme <> byteString (getHostAddr ci) <>
byteString "/" <> byteString (toS bucket) <> byteString "/"
let extraConditions =
[ PPCEquals "x-amz-date" (toS $ awsTimeFormat signTime),
PPCEquals "x-amz-algorithm" "AWS4-HMAC-SHA256",
PPCEquals
"x-amz-credential"
( T.intercalate
"/"
[ connectAccessKey ci,
decodeUtf8 $ mkScope signTime region
]
)
]
ppWithCreds =
p
{ conditions = conditions p ++ extraConditions
}
sp =
SignParams
(connectAccessKey ci)
(connectSecretKey ci)
signTime
(Just $ connectRegion ci)
Nothing
Nothing
signData = signV4PostPolicy (showPostPolicy ppWithCreds) sp
-- compute form-data
mkPair (PPCStartsWith k v) = Just (k, v)
mkPair (PPCEquals k v) = Just (k, v)
mkPair _ = Nothing
formFromPolicy =
H.map toUtf8 $ H.fromList $ catMaybes $
mkPair <$> conditions ppWithCreds
formData = formFromPolicy `H.union` signData
-- compute POST upload URL
bucket = H.lookupDefault "" "bucket" formData
scheme = byteString $ bool "http://" "https://" $ connectIsSecure ci
region = connectRegion ci
url =
toStrictBS $ toLazyByteString $
scheme <> byteString (getHostAddr ci)
<> byteString "/"
<> byteString bucket
<> byteString "/"
return (url, formData)

View File

@ -15,29 +15,24 @@
--
module Network.Minio.PutObject
(
putObjectInternal
, ObjectData(..)
, selectPartSizes
) where
( putObjectInternal,
ObjectData (..),
selectPartSizes,
)
where
import Conduit (takeC)
import qualified Conduit as C
import qualified Data.ByteString.Lazy as LBS
import qualified Data.Conduit.Binary as CB
import Conduit (takeC)
import qualified Conduit as C
import qualified Data.ByteString.Lazy as LBS
import qualified Data.Conduit.Binary as CB
import qualified Data.Conduit.Combinators as CC
import qualified Data.Conduit.List as CL
import qualified Data.List as List
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.S3API
import Network.Minio.Utils
import qualified Data.Conduit.List as CL
import qualified Data.List as List
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.S3API
import Network.Minio.Utils
-- | A data-type to represent the source data for an object. A
-- file-path or a producer-conduit may be provided.
@ -50,37 +45,45 @@ import Network.Minio.Utils
-- the input - if it is not provided, upload will continue until the
-- stream ends or the object reaches `maxObjectSize` size.
data ObjectData m
= ODFile FilePath (Maybe Int64) -- ^ Takes filepath and optional
-- size.
| ODStream (C.ConduitM () ByteString m ()) (Maybe Int64) -- ^ Pass
-- size
-- (bytes)
-- if
-- known.
= -- | Takes filepath and optional
-- size.
ODFile FilePath (Maybe Int64)
| -- | Pass
-- size
-- (bytes)
-- if
-- known.
ODStream (C.ConduitM () ByteString m ()) (Maybe Int64)
-- | Put an object from ObjectData. This high-level API handles
-- objects of all sizes, and even if the object size is unknown.
putObjectInternal :: Bucket -> Object -> PutObjectOptions
-> ObjectData Minio -> Minio ETag
putObjectInternal ::
Bucket ->
Object ->
PutObjectOptions ->
ObjectData Minio ->
Minio ETag
putObjectInternal b o opts (ODStream src sizeMay) = do
case sizeMay of
-- unable to get size, so assume non-seekable file
Nothing -> sequentialMultipartUpload b o opts Nothing src
-- got file size, so check for single/multipart upload
Just size ->
if | size <= 64 * oneMiB -> do
bs <- C.runConduit $ src C..| takeC (fromIntegral size) C..| CB.sinkLbs
putObjectSingle' b o (pooToHeaders opts) $ LBS.toStrict bs
| size > maxObjectSize -> throwIO $ MErrVPutSizeExceeded size
| otherwise -> sequentialMultipartUpload b o opts (Just size) src
if
| size <= 64 * oneMiB -> do
bs <- C.runConduit $ src C..| takeC (fromIntegral size) C..| CB.sinkLbs
putObjectSingle' b o (pooToHeaders opts) $ LBS.toStrict bs
| size > maxObjectSize -> throwIO $ MErrVPutSizeExceeded size
| otherwise -> sequentialMultipartUpload b o opts (Just size) src
putObjectInternal b o opts (ODFile fp sizeMay) = do
hResE <- withNewHandle fp $ \h ->
liftM2 (,) (isHandleSeekable h) (getFileSize h)
(isSeekable, handleSizeMay) <- either (const $ return (False, Nothing)) return
hResE
(isSeekable, handleSizeMay) <-
either
(const $ return (False, Nothing))
return
hResE
-- prefer given size to queried size.
let finalSizeMay = listToMaybe $ catMaybes [sizeMay, handleSizeMay]
@ -88,18 +91,25 @@ putObjectInternal b o opts (ODFile fp sizeMay) = do
case finalSizeMay of
-- unable to get size, so assume non-seekable file
Nothing -> sequentialMultipartUpload b o opts Nothing $ CB.sourceFile fp
-- got file size, so check for single/multipart upload
Just size ->
if | size <= 64 * oneMiB -> either throwIO return =<<
withNewHandle fp (\h -> putObjectSingle b o (pooToHeaders opts) h 0 size)
| size > maxObjectSize -> throwIO $ MErrVPutSizeExceeded size
| isSeekable -> parallelMultipartUpload b o opts fp size
| otherwise -> sequentialMultipartUpload b o opts (Just size) $
CB.sourceFile fp
if
| size <= 64 * oneMiB ->
either throwIO return
=<< withNewHandle fp (\h -> putObjectSingle b o (pooToHeaders opts) h 0 size)
| size > maxObjectSize -> throwIO $ MErrVPutSizeExceeded size
| isSeekable -> parallelMultipartUpload b o opts fp size
| otherwise ->
sequentialMultipartUpload b o opts (Just size) $
CB.sourceFile fp
parallelMultipartUpload :: Bucket -> Object -> PutObjectOptions
-> FilePath -> Int64 -> Minio ETag
parallelMultipartUpload ::
Bucket ->
Object ->
PutObjectOptions ->
FilePath ->
Int64 ->
Minio ETag
parallelMultipartUpload b o opts filePath size = do
-- get a new upload id.
uploadId <- newMultipartUpload b o (pooToHeaders opts)
@ -109,15 +119,17 @@ parallelMultipartUpload b o opts filePath size = do
let threads = fromMaybe 10 $ pooNumThreads opts
-- perform upload with 'threads' threads
uploadedPartsE <- limitedMapConcurrently (fromIntegral threads)
(uploadPart uploadId) partSizeInfo
uploadedPartsE <-
limitedMapConcurrently
(fromIntegral threads)
(uploadPart uploadId)
partSizeInfo
-- if there were any errors, rethrow exception.
mapM_ throwIO $ lefts uploadedPartsE
-- if we get here, all parts were successfully uploaded.
completeMultipartUpload b o uploadId $ rights uploadedPartsE
where
uploadPart uploadId (partNum, offset, sz) =
withNewHandle filePath $ \h -> do
@ -125,10 +137,13 @@ parallelMultipartUpload b o opts filePath size = do
putObjectPart b o uploadId partNum [] payload
-- | Upload multipart object from conduit source sequentially
sequentialMultipartUpload :: Bucket -> Object -> PutObjectOptions
-> Maybe Int64
-> C.ConduitM () ByteString Minio ()
-> Minio ETag
sequentialMultipartUpload ::
Bucket ->
Object ->
PutObjectOptions ->
Maybe Int64 ->
C.ConduitM () ByteString Minio () ->
Minio ETag
sequentialMultipartUpload b o opts sizeMay src = do
-- get a new upload id.
uploadId <- newMultipartUpload b o (pooToHeaders opts)
@ -136,22 +151,23 @@ sequentialMultipartUpload b o opts sizeMay src = do
-- upload parts in loop
let partSizes = selectPartSizes $ maybe maxObjectSize identity sizeMay
(pnums, _, sizes) = List.unzip3 partSizes
uploadedParts <- C.runConduit
$ src
C..| chunkBSConduit (map fromIntegral sizes)
C..| CL.map PayloadBS
C..| uploadPart' uploadId pnums
C..| CC.sinkList
uploadedParts <-
C.runConduit $
src
C..| chunkBSConduit (map fromIntegral sizes)
C..| CL.map PayloadBS
C..| uploadPart' uploadId pnums
C..| CC.sinkList
-- complete multipart upload
completeMultipartUpload b o uploadId uploadedParts
where
uploadPart' _ [] = return ()
uploadPart' uid (pn:pns) = do
uploadPart' uid (pn : pns) = do
payloadMay <- C.await
case payloadMay of
Nothing -> return ()
Just payload -> do pinfo <- lift $ putObjectPart b o uid pn [] payload
C.yield pinfo
uploadPart' uid pns
Just payload -> do
pinfo <- lift $ putObjectPart b o uid pn [] payload
C.yield pinfo
uploadPart' uid pns

View File

@ -15,150 +15,162 @@
--
module Network.Minio.S3API
(
Region
, getLocation
( Region,
getLocation,
-- * Listing buckets
--------------------
, getService
-- * Listing buckets
--------------------
getService,
-- * Listing objects
--------------------
, ListObjectsResult(..)
, ListObjectsV1Result(..)
, listObjects'
, listObjectsV1'
-- * Listing objects
--------------------
ListObjectsResult (..),
ListObjectsV1Result (..),
listObjects',
listObjectsV1',
-- * Retrieving buckets
, headBucket
-- * Retrieving buckets
headBucket,
-- * Retrieving objects
-----------------------
, getObject'
, headObject
-- * Retrieving objects
-----------------------
getObject',
headObject,
-- * Creating buckets and objects
---------------------------------
, putBucket
, ETag
, maxSinglePutObjectSizeBytes
, putObjectSingle'
, putObjectSingle
, copyObjectSingle
-- * Creating buckets and objects
---------------------------------
putBucket,
ETag,
maxSinglePutObjectSizeBytes,
putObjectSingle',
putObjectSingle,
copyObjectSingle,
-- * Multipart Upload APIs
--------------------------
, UploadId
, PartTuple
, Payload(..)
, PartNumber
, newMultipartUpload
, putObjectPart
, copyObjectPart
, completeMultipartUpload
, abortMultipartUpload
, ListUploadsResult(..)
, listIncompleteUploads'
, ListPartsResult(..)
, listIncompleteParts'
-- * Multipart Upload APIs
--------------------------
UploadId,
PartTuple,
Payload (..),
PartNumber,
newMultipartUpload,
putObjectPart,
copyObjectPart,
completeMultipartUpload,
abortMultipartUpload,
ListUploadsResult (..),
listIncompleteUploads',
ListPartsResult (..),
listIncompleteParts',
-- * Deletion APIs
--------------------------
, deleteBucket
, deleteObject
-- * Deletion APIs
--------------------------
deleteBucket,
deleteObject,
-- * Presigned Operations
-----------------------------
, module Network.Minio.PresignedOperations
-- * Presigned Operations
-----------------------------
module Network.Minio.PresignedOperations,
-- ** Bucket Policies
, getBucketPolicy
, setBucketPolicy
-- ** Bucket Policies
getBucketPolicy,
setBucketPolicy,
-- * Bucket Notifications
-------------------------
, Notification(..)
, NotificationConfig(..)
, Arn
, Event(..)
, Filter(..)
, FilterKey(..)
, FilterRules(..)
, FilterRule(..)
, getBucketNotification
, putBucketNotification
, removeAllBucketNotification
) where
-- * Bucket Notifications
-------------------------
Notification (..),
NotificationConfig (..),
Arn,
Event (..),
Filter (..),
FilterKey (..),
FilterRules (..),
FilterRule (..),
getBucketNotification,
putBucketNotification,
removeAllBucketNotification,
)
where
import qualified Data.ByteString as BS
import qualified Data.Text as T
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Network.HTTP.Types.Status (status404)
import UnliftIO (Handler (Handler))
import Lib.Prelude
import Network.Minio.API
import Network.Minio.APICommon
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.PresignedOperations
import Network.Minio.Utils
import Network.Minio.XmlGenerator
import Network.Minio.XmlParser
import qualified Data.ByteString as BS
import qualified Data.Text as T
import Lib.Prelude
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Network.HTTP.Types.Status (status404)
import Network.Minio.API
import Network.Minio.APICommon
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.PresignedOperations
import Network.Minio.Utils
import Network.Minio.XmlGenerator
import Network.Minio.XmlParser
import UnliftIO (Handler (Handler))
-- | Fetch all buckets from the service.
getService :: Minio [BucketInfo]
getService = do
resp <- executeRequest $ defaultS3ReqInfo {
riNeedsLocation = False
}
resp <-
executeRequest $
defaultS3ReqInfo
{ riNeedsLocation = False
}
parseListBuckets $ NC.responseBody resp
-- Parse headers from getObject and headObject calls.
parseGetObjectHeaders :: Object -> [HT.Header] -> Maybe ObjectInfo
parseGetObjectHeaders object headers =
let metadataPairs = getMetadata headers
userMetadata = getUserMetadataMap metadataPairs
metadata = getNonUserMetadataMap metadataPairs
in ObjectInfo <$> Just object
<*> getLastModifiedHeader headers
<*> getETagHeader headers
<*> getContentLength headers
<*> Just userMetadata
<*> Just metadata
let metadataPairs = getMetadata headers
userMetadata = getUserMetadataMap metadataPairs
metadata = getNonUserMetadataMap metadataPairs
in ObjectInfo <$> Just object
<*> getLastModifiedHeader headers
<*> getETagHeader headers
<*> getContentLength headers
<*> Just userMetadata
<*> Just metadata
-- | GET an object from the service and return parsed ObjectInfo and a
-- conduit source for the object content
getObject' :: Bucket -> Object -> HT.Query -> [HT.Header]
-> Minio GetObjectResponse
getObject' ::
Bucket ->
Object ->
HT.Query ->
[HT.Header] ->
Minio GetObjectResponse
getObject' bucket object queryParams headers = do
resp <- mkStreamRequest reqInfo
let objInfoMaybe = parseGetObjectHeaders object $ NC.responseHeaders resp
objInfo <- maybe (throwIO MErrVInvalidObjectInfoResponse) return
objInfoMaybe
return $ GetObjectResponse { gorObjectInfo = objInfo
, gorObjectStream = NC.responseBody resp
}
resp <- mkStreamRequest reqInfo
let objInfoMaybe = parseGetObjectHeaders object $ NC.responseHeaders resp
objInfo <-
maybe
(throwIO MErrVInvalidObjectInfoResponse)
return
objInfoMaybe
return $
GetObjectResponse
{ gorObjectInfo = objInfo,
gorObjectStream = NC.responseBody resp
}
where
reqInfo = defaultS3ReqInfo { riBucket = Just bucket
, riObject = Just object
, riQueryParams = queryParams
, riHeaders = headers
}
reqInfo =
defaultS3ReqInfo
{ riBucket = Just bucket,
riObject = Just object,
riQueryParams = queryParams,
riHeaders = headers
}
-- | Creates a bucket via a PUT bucket call.
putBucket :: Bucket -> Region -> Minio ()
putBucket bucket location = do
ns <- asks getSvcNamespace
void $ executeRequest $
defaultS3ReqInfo { riMethod = HT.methodPut
, riBucket = Just bucket
, riPayload = PayloadBS $ mkCreateBucketConfig ns location
, riNeedsLocation = False
}
defaultS3ReqInfo
{ riMethod = HT.methodPut,
riBucket = Just bucket,
riPayload = PayloadBS $ mkCreateBucketConfig ns location,
riNeedsLocation = False
}
-- | Single PUT object size.
maxSinglePutObjectSizeBytes :: Int64
@ -172,315 +184,429 @@ putObjectSingle' :: Bucket -> Object -> [HT.Header] -> ByteString -> Minio ETag
putObjectSingle' bucket object headers bs = do
let size = fromIntegral (BS.length bs)
-- check length is within single PUT object size.
when (size > maxSinglePutObjectSizeBytes) $
throwIO $ MErrVSinglePUTSizeExceeded size
when (size > maxSinglePutObjectSizeBytes)
$ throwIO
$ MErrVSinglePUTSizeExceeded size
let payload = mkStreamingPayload $ PayloadBS bs
resp <- executeRequest $
defaultS3ReqInfo { riMethod = HT.methodPut
, riBucket = Just bucket
, riObject = Just object
, riHeaders = headers
, riPayload = payload
}
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodPut,
riBucket = Just bucket,
riObject = Just object,
riHeaders = headers,
riPayload = payload
}
let rheaders = NC.responseHeaders resp
etag = getETagHeader rheaders
maybe
(throwIO MErrVETagHeaderNotFound)
return etag
return
etag
-- | PUT an object into the service. This function performs a single
-- PUT object call, and so can only transfer objects upto 5GiB.
putObjectSingle :: Bucket -> Object -> [HT.Header] -> Handle -> Int64
-> Int64 -> Minio ETag
putObjectSingle ::
Bucket ->
Object ->
[HT.Header] ->
Handle ->
Int64 ->
Int64 ->
Minio ETag
putObjectSingle bucket object headers h offset size = do
-- check length is within single PUT object size.
when (size > maxSinglePutObjectSizeBytes) $
throwIO $ MErrVSinglePUTSizeExceeded size
when (size > maxSinglePutObjectSizeBytes)
$ throwIO
$ MErrVSinglePUTSizeExceeded size
-- content-length header is automatically set by library.
let payload = mkStreamingPayload $ PayloadH h offset size
resp <- executeRequest $
defaultS3ReqInfo { riMethod = HT.methodPut
, riBucket = Just bucket
, riObject = Just object
, riHeaders = headers
, riPayload = payload
}
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodPut,
riBucket = Just bucket,
riObject = Just object,
riHeaders = headers,
riPayload = payload
}
let rheaders = NC.responseHeaders resp
etag = getETagHeader rheaders
maybe
(throwIO MErrVETagHeaderNotFound)
return etag
return
etag
-- | List objects in a bucket matching prefix up to delimiter,
-- starting from nextMarker.
listObjectsV1' :: Bucket -> Maybe Text -> Maybe Text -> Maybe Text -> Maybe Int
-> Minio ListObjectsV1Result
listObjectsV1' ::
Bucket ->
Maybe Text ->
Maybe Text ->
Maybe Text ->
Maybe Int ->
Minio ListObjectsV1Result
listObjectsV1' bucket prefix nextMarker delimiter maxKeys = do
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet
, riBucket = Just bucket
, riQueryParams = mkOptionalParams params
}
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodGet,
riBucket = Just bucket,
riQueryParams = mkOptionalParams params
}
parseListObjectsV1Response $ NC.responseBody resp
where
params = [
("marker", nextMarker)
, ("prefix", prefix)
, ("delimiter", delimiter)
, ("max-keys", show <$> maxKeys)
params =
[ ("marker", nextMarker),
("prefix", prefix),
("delimiter", delimiter),
("max-keys", show <$> maxKeys)
]
-- | List objects in a bucket matching prefix up to delimiter,
-- starting from nextToken.
listObjects' :: Bucket -> Maybe Text -> Maybe Text -> Maybe Text -> Maybe Int
-> Minio ListObjectsResult
listObjects' ::
Bucket ->
Maybe Text ->
Maybe Text ->
Maybe Text ->
Maybe Int ->
Minio ListObjectsResult
listObjects' bucket prefix nextToken delimiter maxKeys = do
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet
, riBucket = Just bucket
, riQueryParams = mkOptionalParams params
}
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodGet,
riBucket = Just bucket,
riQueryParams = mkOptionalParams params
}
parseListObjectsResponse $ NC.responseBody resp
where
params = [
("list-type", Just "2")
, ("continuation_token", nextToken)
, ("prefix", prefix)
, ("delimiter", delimiter)
, ("max-keys", show <$> maxKeys)
params =
[ ("list-type", Just "2"),
("continuation_token", nextToken),
("prefix", prefix),
("delimiter", delimiter),
("max-keys", show <$> maxKeys)
]
-- | DELETE a bucket from the service.
deleteBucket :: Bucket -> Minio ()
deleteBucket bucket = void $
executeRequest $
defaultS3ReqInfo { riMethod = HT.methodDelete
, riBucket = Just bucket
}
deleteBucket bucket =
void
$ executeRequest
$ defaultS3ReqInfo
{ riMethod = HT.methodDelete,
riBucket = Just bucket
}
-- | DELETE an object from the service.
deleteObject :: Bucket -> Object -> Minio ()
deleteObject bucket object = void $
executeRequest $
defaultS3ReqInfo { riMethod = HT.methodDelete
, riBucket = Just bucket
, riObject = Just object
}
deleteObject bucket object =
void
$ executeRequest
$ defaultS3ReqInfo
{ riMethod = HT.methodDelete,
riBucket = Just bucket,
riObject = Just object
}
-- | Create a new multipart upload.
newMultipartUpload :: Bucket -> Object -> [HT.Header] -> Minio UploadId
newMultipartUpload bucket object headers = do
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodPost
, riBucket = Just bucket
, riObject = Just object
, riQueryParams = [("uploads", Nothing)]
, riHeaders = headers
}
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodPost,
riBucket = Just bucket,
riObject = Just object,
riQueryParams = [("uploads", Nothing)],
riHeaders = headers
}
parseNewMultipartUpload $ NC.responseBody resp
-- | PUT a part of an object as part of a multipart upload.
putObjectPart :: Bucket -> Object -> UploadId -> PartNumber -> [HT.Header]
-> Payload -> Minio PartTuple
putObjectPart ::
Bucket ->
Object ->
UploadId ->
PartNumber ->
[HT.Header] ->
Payload ->
Minio PartTuple
putObjectPart bucket object uploadId partNumber headers payload = do
-- transform payload to conduit to enable streaming signature
let payload' = mkStreamingPayload payload
resp <- executeRequest $
defaultS3ReqInfo { riMethod = HT.methodPut
, riBucket = Just bucket
, riObject = Just object
, riQueryParams = mkOptionalParams params
, riHeaders = headers
, riPayload = payload'
}
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodPut,
riBucket = Just bucket,
riObject = Just object,
riQueryParams = mkOptionalParams params,
riHeaders = headers,
riPayload = payload'
}
let rheaders = NC.responseHeaders resp
etag = getETagHeader rheaders
maybe
(throwIO MErrVETagHeaderNotFound)
(return . (partNumber, )) etag
(return . (partNumber,))
etag
where
params = [
("uploadId", Just uploadId)
, ("partNumber", Just $ show partNumber)
params =
[ ("uploadId", Just uploadId),
("partNumber", Just $ show partNumber)
]
srcInfoToHeaders :: SourceInfo -> [HT.Header]
srcInfoToHeaders srcInfo = ("x-amz-copy-source",
toS $ T.concat ["/", srcBucket srcInfo,
"/", srcObject srcInfo]
) : rangeHdr ++ zip names values
srcInfoToHeaders srcInfo =
( "x-amz-copy-source",
toUtf8 $
T.concat
[ "/",
srcBucket srcInfo,
"/",
srcObject srcInfo
]
)
: rangeHdr
++ zip names values
where
names = ["x-amz-copy-source-if-match", "x-amz-copy-source-if-none-match",
"x-amz-copy-source-if-unmodified-since",
"x-amz-copy-source-if-modified-since"]
values = mapMaybe (fmap encodeUtf8 . (srcInfo &))
[srcIfMatch, srcIfNoneMatch,
fmap formatRFC1123 . srcIfUnmodifiedSince,
fmap formatRFC1123 . srcIfModifiedSince]
rangeHdr = maybe [] (\a -> [("x-amz-copy-source-range", HT.renderByteRanges [a])])
$ toByteRange <$> srcRange srcInfo
names =
[ "x-amz-copy-source-if-match",
"x-amz-copy-source-if-none-match",
"x-amz-copy-source-if-unmodified-since",
"x-amz-copy-source-if-modified-since"
]
values =
mapMaybe
(fmap encodeUtf8 . (srcInfo &))
[ srcIfMatch,
srcIfNoneMatch,
fmap formatRFC1123 . srcIfUnmodifiedSince,
fmap formatRFC1123 . srcIfModifiedSince
]
rangeHdr =
maybe [] (\a -> [("x-amz-copy-source-range", HT.renderByteRanges [a])]) $
toByteRange <$> srcRange srcInfo
toByteRange :: (Int64, Int64) -> HT.ByteRange
toByteRange (x, y) = HT.ByteRangeFromTo (fromIntegral x) (fromIntegral y)
-- | Performs server-side copy of an object or part of an object as an
-- upload part of an ongoing multi-part upload.
copyObjectPart :: DestinationInfo -> SourceInfo -> UploadId
-> PartNumber -> [HT.Header] -> Minio (ETag, UTCTime)
copyObjectPart ::
DestinationInfo ->
SourceInfo ->
UploadId ->
PartNumber ->
[HT.Header] ->
Minio (ETag, UTCTime)
copyObjectPart dstInfo srcInfo uploadId partNumber headers = do
resp <- executeRequest $
defaultS3ReqInfo { riMethod = HT.methodPut
, riBucket = Just $ dstBucket dstInfo
, riObject = Just $ dstObject dstInfo
, riQueryParams = mkOptionalParams params
, riHeaders = headers ++ srcInfoToHeaders srcInfo
}
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodPut,
riBucket = Just $ dstBucket dstInfo,
riObject = Just $ dstObject dstInfo,
riQueryParams = mkOptionalParams params,
riHeaders = headers ++ srcInfoToHeaders srcInfo
}
parseCopyObjectResponse $ NC.responseBody resp
where
params = [
("uploadId", Just uploadId)
, ("partNumber", Just $ show partNumber)
params =
[ ("uploadId", Just uploadId),
("partNumber", Just $ show partNumber)
]
-- | Performs server-side copy of an object that is upto 5GiB in
-- size. If the object is greater than 5GiB, this function throws the
-- error returned by the server.
copyObjectSingle :: Bucket -> Object -> SourceInfo -> [HT.Header]
-> Minio (ETag, UTCTime)
copyObjectSingle ::
Bucket ->
Object ->
SourceInfo ->
[HT.Header] ->
Minio (ETag, UTCTime)
copyObjectSingle bucket object srcInfo headers = do
-- validate that srcRange is Nothing for this API.
when (isJust $ srcRange srcInfo) $
throwIO MErrVCopyObjSingleNoRangeAccepted
resp <- executeRequest $
defaultS3ReqInfo { riMethod = HT.methodPut
, riBucket = Just bucket
, riObject = Just object
, riHeaders = headers ++ srcInfoToHeaders srcInfo
}
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodPut,
riBucket = Just bucket,
riObject = Just object,
riHeaders = headers ++ srcInfoToHeaders srcInfo
}
parseCopyObjectResponse $ NC.responseBody resp
-- | Complete a multipart upload.
completeMultipartUpload :: Bucket -> Object -> UploadId -> [PartTuple]
-> Minio ETag
completeMultipartUpload ::
Bucket ->
Object ->
UploadId ->
[PartTuple] ->
Minio ETag
completeMultipartUpload bucket object uploadId partTuple = do
resp <- executeRequest $
defaultS3ReqInfo { riMethod = HT.methodPost
, riBucket = Just bucket
, riObject = Just object
, riQueryParams = mkOptionalParams params
, riPayload = PayloadBS $
mkCompleteMultipartUploadRequest partTuple
}
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodPost,
riBucket = Just bucket,
riObject = Just object,
riQueryParams = mkOptionalParams params,
riPayload =
PayloadBS $
mkCompleteMultipartUploadRequest partTuple
}
parseCompleteMultipartUploadResponse $ NC.responseBody resp
where
params = [("uploadId", Just uploadId)]
-- | Abort a multipart upload.
abortMultipartUpload :: Bucket -> Object -> UploadId -> Minio ()
abortMultipartUpload bucket object uploadId = void $
executeRequest $ defaultS3ReqInfo { riMethod = HT.methodDelete
, riBucket = Just bucket
, riObject = Just object
, riQueryParams = mkOptionalParams params
}
abortMultipartUpload bucket object uploadId =
void
$ executeRequest
$ defaultS3ReqInfo
{ riMethod = HT.methodDelete,
riBucket = Just bucket,
riObject = Just object,
riQueryParams = mkOptionalParams params
}
where
params = [("uploadId", Just uploadId)]
-- | List incomplete multipart uploads.
listIncompleteUploads' :: Bucket -> Maybe Text -> Maybe Text -> Maybe Text
-> Maybe Text -> Maybe Int -> Minio ListUploadsResult
listIncompleteUploads' ::
Bucket ->
Maybe Text ->
Maybe Text ->
Maybe Text ->
Maybe Text ->
Maybe Int ->
Minio ListUploadsResult
listIncompleteUploads' bucket prefix delimiter keyMarker uploadIdMarker maxKeys = do
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet
, riBucket = Just bucket
, riQueryParams = params
}
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodGet,
riBucket = Just bucket,
riQueryParams = params
}
parseListUploadsResponse $ NC.responseBody resp
where
-- build query params
params = ("uploads", Nothing) : mkOptionalParams
[ ("prefix", prefix)
, ("delimiter", delimiter)
, ("key-marker", keyMarker)
, ("upload-id-marker", uploadIdMarker)
, ("max-uploads", show <$> maxKeys)
]
params =
("uploads", Nothing)
: mkOptionalParams
[ ("prefix", prefix),
("delimiter", delimiter),
("key-marker", keyMarker),
("upload-id-marker", uploadIdMarker),
("max-uploads", show <$> maxKeys)
]
-- | List parts of an ongoing multipart upload.
listIncompleteParts' :: Bucket -> Object -> UploadId -> Maybe Text
-> Maybe Text -> Minio ListPartsResult
listIncompleteParts' ::
Bucket ->
Object ->
UploadId ->
Maybe Text ->
Maybe Text ->
Minio ListPartsResult
listIncompleteParts' bucket object uploadId maxParts partNumMarker = do
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet
, riBucket = Just bucket
, riObject = Just object
, riQueryParams = mkOptionalParams params
}
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodGet,
riBucket = Just bucket,
riObject = Just object,
riQueryParams = mkOptionalParams params
}
parseListPartsResponse $ NC.responseBody resp
where
-- build optional query params
params = [
("uploadId", Just uploadId)
, ("part-number-marker", partNumMarker)
, ("max-parts", maxParts)
params =
[ ("uploadId", Just uploadId),
("part-number-marker", partNumMarker),
("max-parts", maxParts)
]
-- | Get metadata of an object.
headObject :: Bucket -> Object -> [HT.Header] -> Minio ObjectInfo
headObject bucket object reqHeaders = do
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodHead
, riBucket = Just bucket
, riObject = Just object
, riHeaders = reqHeaders
}
maybe (throwIO MErrVInvalidObjectInfoResponse) return $
parseGetObjectHeaders object $ NC.responseHeaders resp
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodHead,
riBucket = Just bucket,
riObject = Just object,
riHeaders = reqHeaders
}
maybe (throwIO MErrVInvalidObjectInfoResponse) return
$ parseGetObjectHeaders object
$ NC.responseHeaders resp
-- | Query the object store if a given bucket exists.
headBucket :: Bucket -> Minio Bool
headBucket bucket = headBucketEx `catches`
[ Handler handleNoSuchBucket
, Handler handleStatus404
]
headBucket bucket =
headBucketEx
`catches` [ Handler handleNoSuchBucket,
Handler handleStatus404
]
where
handleNoSuchBucket :: ServiceErr -> Minio Bool
handleNoSuchBucket e | e == NoSuchBucket = return False
| otherwise = throwIO e
handleNoSuchBucket e
| e == NoSuchBucket = return False
| otherwise = throwIO e
handleStatus404 :: NC.HttpException -> Minio Bool
handleStatus404 e@(NC.HttpExceptionRequest _ (NC.StatusCodeException res _)) =
if NC.responseStatus res == status404
then return False
else throwIO e
then return False
else throwIO e
handleStatus404 e = throwIO e
headBucketEx = do
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodHead
, riBucket = Just bucket
}
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodHead,
riBucket = Just bucket
}
return $ NC.responseStatus resp == HT.ok200
-- | Set the notification configuration on a bucket.
putBucketNotification :: Bucket -> Notification -> Minio ()
putBucketNotification bucket ncfg = do
ns <- asks getSvcNamespace
void $ executeRequest $ defaultS3ReqInfo { riMethod = HT.methodPut
, riBucket = Just bucket
, riQueryParams = [("notification", Nothing)]
, riPayload = PayloadBS $
mkPutNotificationRequest ns ncfg
}
void $ executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodPut,
riBucket = Just bucket,
riQueryParams = [("notification", Nothing)],
riPayload =
PayloadBS $
mkPutNotificationRequest ns ncfg
}
-- | Retrieve the notification configuration on a bucket.
getBucketNotification :: Bucket -> Minio Notification
getBucketNotification bucket = do
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet
, riBucket = Just bucket
, riQueryParams = [("notification", Nothing)]
}
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodGet,
riBucket = Just bucket,
riQueryParams = [("notification", Nothing)]
}
parseNotification $ NC.responseBody resp
-- | Remove all notifications configured on a bucket.
@ -490,11 +616,14 @@ removeAllBucketNotification = flip putBucketNotification defaultNotification
-- | Fetch the policy if any on a bucket.
getBucketPolicy :: Bucket -> Minio Text
getBucketPolicy bucket = do
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet
, riBucket = Just bucket
, riQueryParams = [("policy", Nothing)]
}
return $ toS $ NC.responseBody resp
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodGet,
riBucket = Just bucket,
riQueryParams = [("policy", Nothing)]
}
return $ decodeUtf8Lenient $ toStrictBS $ NC.responseBody resp
-- | Set a new policy on a bucket.
-- As a special condition if the policy is empty
@ -506,18 +635,22 @@ setBucketPolicy bucket policy = do
else putBucketPolicy bucket policy
-- | Save a new policy on a bucket.
putBucketPolicy :: Bucket -> Text -> Minio()
putBucketPolicy :: Bucket -> Text -> Minio ()
putBucketPolicy bucket policy = do
void $ executeRequest $ defaultS3ReqInfo { riMethod = HT.methodPut
, riBucket = Just bucket
, riQueryParams = [("policy", Nothing)]
, riPayload = PayloadBS $ encodeUtf8 policy
}
void $ executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodPut,
riBucket = Just bucket,
riQueryParams = [("policy", Nothing)],
riPayload = PayloadBS $ encodeUtf8 policy
}
-- | Delete any policy set on a bucket.
deleteBucketPolicy :: Bucket -> Minio()
deleteBucketPolicy :: Bucket -> Minio ()
deleteBucketPolicy bucket = do
void $ executeRequest $ defaultS3ReqInfo { riMethod = HT.methodDelete
, riBucket = Just bucket
, riQueryParams = [("policy", Nothing)]
}
void $ executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodDelete,
riBucket = Just bucket,
riQueryParams = [("policy", Nothing)]
}

View File

@ -15,113 +15,103 @@
--
module Network.Minio.SelectAPI
(
( -- | The `selectObjectContent` allows querying CSV, JSON or Parquet
-- format objects in AWS S3 and in MinIO using SQL Select
-- statements. This allows significant reduction of data transfer
-- from object storage for computation-intensive tasks, as relevant
-- data is filtered close to the storage.
selectObjectContent,
SelectRequest,
selectRequest,
-- | The `selectObjectContent` allows querying CSV, JSON or Parquet
-- format objects in AWS S3 and in MinIO using SQL Select
-- statements. This allows significant reduction of data transfer
-- from object storage for computation-intensive tasks, as relevant
-- data is filtered close to the storage.
-- *** Input Serialization
InputSerialization,
defaultCsvInput,
linesJsonInput,
documentJsonInput,
defaultParquetInput,
setInputCSVProps,
CompressionType (..),
setInputCompressionType,
selectObjectContent
-- *** CSV Format details
, SelectRequest
, selectRequest
-- | CSV format options such as delimiters and quote characters are
-- specified using using the functions below. Options are combined
-- monoidally.
CSVProp,
recordDelimiter,
fieldDelimiter,
quoteCharacter,
quoteEscapeCharacter,
commentCharacter,
allowQuotedRecordDelimiter,
FileHeaderInfo (..),
fileHeaderInfo,
QuoteFields (..),
quoteFields,
-- *** Input Serialization
-- *** Output Serialization
OutputSerialization,
defaultCsvOutput,
defaultJsonOutput,
outputCSVFromProps,
outputJSONFromRecordDelimiter,
, InputSerialization
, defaultCsvInput
, linesJsonInput
, documentJsonInput
, defaultParquetInput
, setInputCSVProps
-- *** Progress messages
setRequestProgressEnabled,
, CompressionType(..)
, setInputCompressionType
-- *** Interpreting Select output
-- *** CSV Format details
-- | The conduit returned by `selectObjectContent` returns values of
-- the `EventMessage` data type. This returns the query output
-- messages formatted according to the chosen output serialization,
-- interleaved with progress messages (if enabled by
-- `setRequestProgressEnabled`), and at the end a statistics
-- message.
--
-- If the application is interested in only the payload, then
-- `getPayloadBytes` can be used. For example to simply print the
-- payload to stdout:
--
-- > resultConduit <- selectObjectContent bucket object mySelectRequest
-- > runConduit $ resultConduit .| getPayloadBytes .| stdoutC
--
-- Note that runConduit, the connect operator (.|) and stdoutC are
-- all from the "conduit" package.
getPayloadBytes,
EventMessage (..),
Progress (..),
Stats,
)
where
-- | CSV format options such as delimiters and quote characters are
-- specified using using the functions below. Options are combined
-- monoidally.
import Conduit ((.|))
import qualified Conduit as C
import qualified Data.Binary as Bin
import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as LB
import Data.Digest.CRC32 (crc32, crc32Update)
import Lib.Prelude
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Network.Minio.API
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.Utils
import Network.Minio.XmlGenerator
import Network.Minio.XmlParser
import UnliftIO (MonadUnliftIO)
, CSVProp
, recordDelimiter
, fieldDelimiter
, quoteCharacter
, quoteEscapeCharacter
, commentCharacter
, allowQuotedRecordDelimiter
, FileHeaderInfo(..)
, fileHeaderInfo
, QuoteFields(..)
, quoteFields
-- *** Output Serialization
, OutputSerialization
, defaultCsvOutput
, defaultJsonOutput
, outputCSVFromProps
, outputJSONFromRecordDelimiter
-- *** Progress messages
, setRequestProgressEnabled
-- *** Interpreting Select output
-- | The conduit returned by `selectObjectContent` returns values of
-- the `EventMessage` data type. This returns the query output
-- messages formatted according to the chosen output serialization,
-- interleaved with progress messages (if enabled by
-- `setRequestProgressEnabled`), and at the end a statistics
-- message.
--
-- If the application is interested in only the payload, then
-- `getPayloadBytes` can be used. For example to simply print the
-- payload to stdout:
--
-- > resultConduit <- selectObjectContent bucket object mySelectRequest
-- > runConduit $ resultConduit .| getPayloadBytes .| stdoutC
--
-- Note that runConduit, the connect operator (.|) and stdoutC are
-- all from the "conduit" package.
, getPayloadBytes
, EventMessage(..)
, Progress(..)
, Stats
) where
import Conduit ((.|))
import qualified Conduit as C
import qualified Data.Binary as Bin
import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as LB
import Data.Digest.CRC32 (crc32, crc32Update)
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import UnliftIO (MonadUnliftIO)
import Lib.Prelude
import Network.Minio.API
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.Utils
import Network.Minio.XmlGenerator
import Network.Minio.XmlParser
data EventStreamException = ESEPreludeCRCFailed
| ESEMessageCRCFailed
| ESEUnexpectedEndOfStream
| ESEDecodeFail [Char]
| ESEInvalidHeaderType
| ESEInvalidHeaderValueType
| ESEInvalidMessageType
deriving (Eq, Show)
data EventStreamException
= ESEPreludeCRCFailed
| ESEMessageCRCFailed
| ESEUnexpectedEndOfStream
| ESEDecodeFail [Char]
| ESEInvalidHeaderType
| ESEInvalidHeaderValueType
| ESEInvalidMessageType
deriving (Eq, Show)
instance Exception EventStreamException
@ -131,169 +121,174 @@ chunkSize = 32 * 1024
parseBinary :: Bin.Binary a => ByteString -> IO a
parseBinary b = do
case Bin.decodeOrFail $ LB.fromStrict b of
Left (_, _, msg) -> throwIO $ ESEDecodeFail msg
Right (_, _, r) -> return r
case Bin.decodeOrFail $ LB.fromStrict b of
Left (_, _, msg) -> throwIO $ ESEDecodeFail msg
Right (_, _, r) -> return r
bytesToHeaderName :: Text -> IO MsgHeaderName
bytesToHeaderName t = case t of
":message-type" -> return MessageType
":event-type" -> return EventType
":content-type" -> return ContentType
":error-code" -> return ErrorCode
":message-type" -> return MessageType
":event-type" -> return EventType
":content-type" -> return ContentType
":error-code" -> return ErrorCode
":error-message" -> return ErrorMessage
_ -> throwIO ESEInvalidHeaderType
_ -> throwIO ESEInvalidHeaderType
parseHeaders :: MonadUnliftIO m
=> Word32 -> C.ConduitM ByteString a m [MessageHeader]
parseHeaders ::
MonadUnliftIO m =>
Word32 ->
C.ConduitM ByteString a m [MessageHeader]
parseHeaders 0 = return []
parseHeaders hdrLen = do
bs1 <- readNBytes 1
n :: Word8 <- liftIO $ parseBinary bs1
bs1 <- readNBytes 1
n :: Word8 <- liftIO $ parseBinary bs1
headerKeyBytes <- readNBytes $ fromIntegral n
let headerKey = decodeUtf8Lenient headerKeyBytes
headerName <- liftIO $ bytesToHeaderName headerKey
headerKeyBytes <- readNBytes $ fromIntegral n
let headerKey = decodeUtf8Lenient headerKeyBytes
headerName <- liftIO $ bytesToHeaderName headerKey
bs2 <- readNBytes 1
headerValueType :: Word8 <- liftIO $ parseBinary bs2
when (headerValueType /= 7) $ throwIO ESEInvalidHeaderValueType
bs2 <- readNBytes 1
headerValueType :: Word8 <- liftIO $ parseBinary bs2
when (headerValueType /= 7) $ throwIO ESEInvalidHeaderValueType
bs3 <- readNBytes 2
vLen :: Word16 <- liftIO $ parseBinary bs3
headerValueBytes <- readNBytes $ fromIntegral vLen
let headerValue = decodeUtf8Lenient headerValueBytes
m = (headerName, headerValue)
k = 1 + fromIntegral n + 1 + 2 + fromIntegral vLen
bs3 <- readNBytes 2
vLen :: Word16 <- liftIO $ parseBinary bs3
headerValueBytes <- readNBytes $ fromIntegral vLen
let headerValue = decodeUtf8Lenient headerValueBytes
m = (headerName, headerValue)
k = 1 + fromIntegral n + 1 + 2 + fromIntegral vLen
ms <- parseHeaders (hdrLen - k)
return (m:ms)
ms <- parseHeaders (hdrLen - k)
return (m : ms)
-- readNBytes returns N bytes read from the string and throws an
-- exception if N bytes are not present on the stream.
readNBytes :: MonadUnliftIO m => Int -> C.ConduitM ByteString a m ByteString
readNBytes n = do
b <- LB.toStrict <$> (C.takeCE n .| C.sinkLazy)
if B.length b /= n
then throwIO ESEUnexpectedEndOfStream
else return b
b <- LB.toStrict <$> (C.takeCE n .| C.sinkLazy)
if B.length b /= n
then throwIO ESEUnexpectedEndOfStream
else return b
crcCheck :: MonadUnliftIO m
=> C.ConduitM ByteString ByteString m ()
crcCheck ::
MonadUnliftIO m =>
C.ConduitM ByteString ByteString m ()
crcCheck = do
b <- readNBytes 12
n :: Word32 <- liftIO $ parseBinary $ B.take 4 b
preludeCRC :: Word32 <- liftIO $ parseBinary $ B.drop 8 b
when (crc32 (B.take 8 b) /= preludeCRC) $
throwIO ESEPreludeCRCFailed
b <- readNBytes 12
n :: Word32 <- liftIO $ parseBinary $ B.take 4 b
preludeCRC :: Word32 <- liftIO $ parseBinary $ B.drop 8 b
when (crc32 (B.take 8 b) /= preludeCRC) $
throwIO ESEPreludeCRCFailed
-- we do not yield the checksum
C.yield $ B.take 8 b
-- we do not yield the checksum
C.yield $ B.take 8 b
-- 12 bytes have been read off the current message. Now read the
-- next (n-12)-4 bytes and accumulate the checksum, and yield it.
let startCrc = crc32 b
finalCrc <- accumulateYield (fromIntegral n-16) startCrc
-- 12 bytes have been read off the current message. Now read the
-- next (n-12)-4 bytes and accumulate the checksum, and yield it.
let startCrc = crc32 b
finalCrc <- accumulateYield (fromIntegral n -16) startCrc
bs <- readNBytes 4
expectedCrc :: Word32 <- liftIO $ parseBinary bs
bs <- readNBytes 4
expectedCrc :: Word32 <- liftIO $ parseBinary bs
when (finalCrc /= expectedCrc) $
throwIO ESEMessageCRCFailed
when (finalCrc /= expectedCrc) $
throwIO ESEMessageCRCFailed
-- we unconditionally recurse - downstream figures out when to
-- quit reading the stream
crcCheck
-- we unconditionally recurse - downstream figures out when to
-- quit reading the stream
crcCheck
where
accumulateYield n checkSum = do
let toRead = min n chunkSize
b <- readNBytes toRead
let c' = crc32Update checkSum b
n' = n - B.length b
C.yield b
if n' > 0
then accumulateYield n' c'
else return c'
let toRead = min n chunkSize
b <- readNBytes toRead
let c' = crc32Update checkSum b
n' = n - B.length b
C.yield b
if n' > 0
then accumulateYield n' c'
else return c'
handleMessage :: MonadUnliftIO m => C.ConduitT ByteString EventMessage m ()
handleMessage = do
b1 <- readNBytes 4
msgLen :: Word32 <- liftIO $ parseBinary b1
b1 <- readNBytes 4
msgLen :: Word32 <- liftIO $ parseBinary b1
b2 <- readNBytes 4
hdrLen :: Word32 <- liftIO $ parseBinary b2
b2 <- readNBytes 4
hdrLen :: Word32 <- liftIO $ parseBinary b2
hs <- parseHeaders hdrLen
hs <- parseHeaders hdrLen
let payloadLen = msgLen - hdrLen - 16
getHdrVal h = fmap snd . headMay . filter ((h ==) . fst)
eventHdrValue = getHdrVal EventType hs
msgHdrValue = getHdrVal MessageType hs
errCode = getHdrVal ErrorCode hs
errMsg = getHdrVal ErrorMessage hs
case msgHdrValue of
Just "event" -> do
case eventHdrValue of
Just "Records" -> passThrough $ fromIntegral payloadLen
Just "Cont" -> return ()
Just "Progress" -> do
bs <- readNBytes $ fromIntegral payloadLen
progress <- parseSelectProgress bs
C.yield $ ProgressEventMessage progress
Just "Stats" -> do
bs <- readNBytes $ fromIntegral payloadLen
stats <- parseSelectProgress bs
C.yield $ StatsEventMessage stats
Just "End" -> return ()
_ -> throwIO ESEInvalidMessageType
when (eventHdrValue /= Just "End") handleMessage
Just "error" -> do
let reqMsgMay = RequestLevelErrorMessage <$> errCode <*> errMsg
maybe (throwIO ESEInvalidMessageType) C.yield reqMsgMay
_ -> throwIO ESEInvalidMessageType
let payloadLen = msgLen - hdrLen - 16
getHdrVal h = fmap snd . headMay . filter ((h ==) . fst)
eventHdrValue = getHdrVal EventType hs
msgHdrValue = getHdrVal MessageType hs
errCode = getHdrVal ErrorCode hs
errMsg = getHdrVal ErrorMessage hs
case msgHdrValue of
Just "event" -> do
case eventHdrValue of
Just "Records" -> passThrough $ fromIntegral payloadLen
Just "Cont" -> return ()
Just "Progress" -> do
bs <- readNBytes $ fromIntegral payloadLen
progress <- parseSelectProgress bs
C.yield $ ProgressEventMessage progress
Just "Stats" -> do
bs <- readNBytes $ fromIntegral payloadLen
stats <- parseSelectProgress bs
C.yield $ StatsEventMessage stats
Just "End" -> return ()
_ -> throwIO ESEInvalidMessageType
when (eventHdrValue /= Just "End") handleMessage
Just "error" -> do
let reqMsgMay = RequestLevelErrorMessage <$> errCode <*> errMsg
maybe (throwIO ESEInvalidMessageType) C.yield reqMsgMay
_ -> throwIO ESEInvalidMessageType
where
passThrough 0 = return ()
passThrough n = do
let c = min n chunkSize
b <- readNBytes c
C.yield $ RecordPayloadEventMessage b
passThrough $ n - B.length b
let c = min n chunkSize
b <- readNBytes c
C.yield $ RecordPayloadEventMessage b
passThrough $ n - B.length b
selectProtoConduit :: MonadUnliftIO m
=> C.ConduitT ByteString EventMessage m ()
selectProtoConduit ::
MonadUnliftIO m =>
C.ConduitT ByteString EventMessage m ()
selectProtoConduit = crcCheck .| handleMessage
-- | selectObjectContent calls the SelectRequest on the given
-- object. It returns a Conduit of event messages that can be consumed
-- by the client.
selectObjectContent :: Bucket -> Object -> SelectRequest
-> Minio (C.ConduitT () EventMessage Minio ())
selectObjectContent ::
Bucket ->
Object ->
SelectRequest ->
Minio (C.ConduitT () EventMessage Minio ())
selectObjectContent b o r = do
let reqInfo = defaultS3ReqInfo { riMethod = HT.methodPost
, riBucket = Just b
, riObject = Just o
, riPayload = PayloadBS $ mkSelectRequest r
, riNeedsLocation = False
, riQueryParams = [("select", Nothing), ("select-type", Just "2")]
}
--print $ mkSelectRequest r
resp <- mkStreamRequest reqInfo
return $ NC.responseBody resp .| selectProtoConduit
let reqInfo =
defaultS3ReqInfo
{ riMethod = HT.methodPost,
riBucket = Just b,
riObject = Just o,
riPayload = PayloadBS $ mkSelectRequest r,
riNeedsLocation = False,
riQueryParams = [("select", Nothing), ("select-type", Just "2")]
}
--print $ mkSelectRequest r
resp <- mkStreamRequest reqInfo
return $ NC.responseBody resp .| selectProtoConduit
-- | A helper conduit that returns only the record payload bytes.
getPayloadBytes :: MonadIO m => C.ConduitT EventMessage ByteString m ()
getPayloadBytes = do
evM <- C.await
case evM of
Just v -> do
case v of
RecordPayloadEventMessage b -> C.yield b
RequestLevelErrorMessage c m -> liftIO $ throwIO $ SelectErr c m
_ -> return ()
getPayloadBytes
Nothing -> return ()
evM <- C.await
case evM of
Just v -> do
case v of
RecordPayloadEventMessage b -> C.yield b
RequestLevelErrorMessage c m -> liftIO $ throwIO $ SelectErr c m
_ -> return ()
getPayloadBytes
Nothing -> return ()

View File

@ -13,57 +13,62 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE BangPatterns #-}
module Network.Minio.Sign.V4 where
import qualified Conduit as C
import qualified Data.ByteString as B
import qualified Data.ByteString.Base64 as Base64
import qualified Data.ByteString.Char8 as B8
import Data.CaseInsensitive (mk)
import qualified Data.CaseInsensitive as CI
import qualified Data.HashMap.Strict as Map
import qualified Data.HashSet as Set
import qualified Data.Time as Time
import qualified Network.HTTP.Conduit as NC
import Network.HTTP.Types (Header, parseQuery)
import qualified Network.HTTP.Types as H
import Text.Printf (printf)
import Lib.Prelude
import Network.Minio.Data.ByteString
import Network.Minio.Data.Crypto
import Network.Minio.Data.Time
import Network.Minio.Errors
import qualified Conduit as C
import qualified Data.ByteString as B
import qualified Data.ByteString.Base64 as Base64
import qualified Data.ByteString.Char8 as B8
import qualified Data.ByteString.Lazy as LB
import Data.CaseInsensitive (mk)
import qualified Data.CaseInsensitive as CI
import qualified Data.HashMap.Strict as Map
import qualified Data.HashSet as Set
import qualified Data.Time as Time
import Lib.Prelude
import qualified Network.HTTP.Conduit as NC
import Network.HTTP.Types (Header, parseQuery)
import qualified Network.HTTP.Types as H
import Network.Minio.Data.ByteString
import Network.Minio.Data.Crypto
import Network.Minio.Data.Time
import Network.Minio.Errors
import Text.Printf (printf)
-- these headers are not included in the string to sign when signing a
-- request
ignoredHeaders :: Set.HashSet ByteString
ignoredHeaders = Set.fromList $ map CI.foldedCase
[ H.hAuthorization
, H.hContentType
, H.hUserAgent
]
ignoredHeaders =
Set.fromList $
map
CI.foldedCase
[ H.hAuthorization,
H.hContentType,
H.hUserAgent
]
data SignV4Data = SignV4Data {
sv4SignTime :: UTCTime
, sv4Scope :: ByteString
, sv4CanonicalRequest :: ByteString
, sv4HeadersToSign :: [(ByteString, ByteString)]
, sv4Output :: [(ByteString, ByteString)]
, sv4StringToSign :: ByteString
, sv4SigningKey :: ByteString
} deriving (Show)
data SignV4Data = SignV4Data
{ sv4SignTime :: UTCTime,
sv4Scope :: ByteString,
sv4CanonicalRequest :: ByteString,
sv4HeadersToSign :: [(ByteString, ByteString)],
sv4Output :: [(ByteString, ByteString)],
sv4StringToSign :: ByteString,
sv4SigningKey :: ByteString
}
deriving (Show)
data SignParams = SignParams {
spAccessKey :: Text
, spSecretKey :: Text
, spTimeStamp :: UTCTime
, spRegion :: Maybe Text
, spExpirySecs :: Maybe Int
, spPayloadHash :: Maybe ByteString
} deriving (Show)
data SignParams = SignParams
{ spAccessKey :: Text,
spSecretKey :: Text,
spTimeStamp :: UTCTime,
spRegion :: Maybe Text,
spExpirySecs :: Maybe Int,
spPayloadHash :: Maybe ByteString
}
deriving (Show)
debugPrintSignV4Data :: SignV4Data -> IO ()
debugPrintSignV4Data (SignV4Data t s cr h2s o sts sk) = do
@ -79,22 +84,23 @@ debugPrintSignV4Data (SignV4Data t s cr h2s o sts sk) = do
B8.putStrLn "END of SignV4Data ========="
where
printBytes b = do
mapM_ (\x -> B.putStr $ B.concat [show x, " "]) $ B.unpack b
mapM_ (\x -> B.putStr $ B.singleton x <> " ") $ B.unpack b
B8.putStrLn ""
mkAuthHeader :: Text -> ByteString -> ByteString -> ByteString -> H.Header
mkAuthHeader accessKey scope signedHeaderKeys sign =
let authValue = B.concat
[ "AWS4-HMAC-SHA256 Credential="
, toS accessKey
, "/"
, scope
, ", SignedHeaders="
, signedHeaderKeys
, ", Signature="
, sign
]
in (H.hAuthorization, authValue)
let authValue =
B.concat
[ "AWS4-HMAC-SHA256 Credential=",
toUtf8 accessKey,
"/",
scope,
", SignedHeaders=",
signedHeaderKeys,
", Signature=",
sign
]
in (H.hAuthorization, authValue)
-- | Given SignParams and request details, including request method,
-- request path, headers, query params and payload hash, generates an
@ -110,122 +116,132 @@ mkAuthHeader accessKey scope signedHeaderKeys sign =
-- the request.
signV4 :: SignParams -> NC.Request -> [(ByteString, ByteString)]
signV4 !sp !req =
let
region = fromMaybe "" $ spRegion sp
ts = spTimeStamp sp
scope = mkScope ts region
accessKey = toS $ spAccessKey sp
secretKey = toS $ spSecretKey sp
expiry = spExpirySecs sp
sha256Hdr = ("x-amz-content-sha256",
fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp)
-- headers to be added to the request
datePair = ("X-Amz-Date", awsTimeFormatBS ts)
computedHeaders = NC.requestHeaders req ++
if isJust $ expiry
then []
else map (\(x, y) -> (mk x, y)) [datePair, sha256Hdr]
headersToSign = getHeadersToSign computedHeaders
signedHeaderKeys = B.intercalate ";" $ sort $ map fst headersToSign
-- query-parameters to be added before signing for presigned URLs
-- (i.e. when `isJust expiry`)
authQP = [ ("X-Amz-Algorithm", "AWS4-HMAC-SHA256")
, ("X-Amz-Credential", B.concat [accessKey, "/", scope])
, datePair
, ("X-Amz-Expires", maybe "" show expiry)
, ("X-Amz-SignedHeaders", signedHeaderKeys)
]
finalQP = parseQuery (NC.queryString req) ++
if isJust expiry
then (fmap . fmap) Just authQP
else []
-- 1. compute canonical request
canonicalRequest = mkCanonicalRequest False sp
(NC.setQueryString finalQP req)
headersToSign
-- 2. compute string to sign
stringToSign = mkStringToSign ts scope canonicalRequest
-- 3.1 compute signing key
signingKey = mkSigningKey ts region secretKey
-- 3.2 compute signature
signature = computeSignature stringToSign signingKey
-- 4. compute auth header
authHeader = mkAuthHeader (spAccessKey sp) scope signedHeaderKeys signature
-- finally compute output pairs
output = if isJust expiry
then ("X-Amz-Signature", signature) : authQP
else [(\(x, y) -> (CI.foldedCase x, y)) authHeader,
datePair, sha256Hdr]
in output
let region = fromMaybe "" $ spRegion sp
ts = spTimeStamp sp
scope = mkScope ts region
accessKey = toUtf8 $ spAccessKey sp
secretKey = toUtf8 $ spSecretKey sp
expiry = spExpirySecs sp
sha256Hdr =
( "x-amz-content-sha256",
fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp
)
-- headers to be added to the request
datePair = ("X-Amz-Date", awsTimeFormatBS ts)
computedHeaders =
NC.requestHeaders req
++ if isJust $ expiry
then []
else map (\(x, y) -> (mk x, y)) [datePair, sha256Hdr]
headersToSign = getHeadersToSign computedHeaders
signedHeaderKeys = B.intercalate ";" $ sort $ map fst headersToSign
-- query-parameters to be added before signing for presigned URLs
-- (i.e. when `isJust expiry`)
authQP =
[ ("X-Amz-Algorithm", "AWS4-HMAC-SHA256"),
("X-Amz-Credential", B.concat [accessKey, "/", scope]),
datePair,
("X-Amz-Expires", maybe "" showBS expiry),
("X-Amz-SignedHeaders", signedHeaderKeys)
]
finalQP =
parseQuery (NC.queryString req)
++ if isJust expiry
then (fmap . fmap) Just authQP
else []
-- 1. compute canonical request
canonicalRequest =
mkCanonicalRequest
False
sp
(NC.setQueryString finalQP req)
headersToSign
-- 2. compute string to sign
stringToSign = mkStringToSign ts scope canonicalRequest
-- 3.1 compute signing key
signingKey = mkSigningKey ts region secretKey
-- 3.2 compute signature
signature = computeSignature stringToSign signingKey
-- 4. compute auth header
authHeader = mkAuthHeader (spAccessKey sp) scope signedHeaderKeys signature
-- finally compute output pairs
output =
if isJust expiry
then ("X-Amz-Signature", signature) : authQP
else
[ (\(x, y) -> (CI.foldedCase x, y)) authHeader,
datePair,
sha256Hdr
]
in output
mkScope :: UTCTime -> Text -> ByteString
mkScope ts region = B.intercalate "/"
[ toS $ Time.formatTime Time.defaultTimeLocale "%Y%m%d" ts
, toS region
, "s3"
, "aws4_request"
]
mkScope ts region =
B.intercalate
"/"
[ toUtf8 $ Time.formatTime Time.defaultTimeLocale "%Y%m%d" ts,
toUtf8 region,
"s3",
"aws4_request"
]
getHeadersToSign :: [Header] -> [(ByteString, ByteString)]
getHeadersToSign !h =
filter ((\hdr -> not $ Set.member hdr ignoredHeaders) . fst) $
map (\(x, y) -> (CI.foldedCase x, stripBS y)) h
map (\(x, y) -> (CI.foldedCase x, stripBS y)) h
mkCanonicalRequest :: Bool -> SignParams -> NC.Request -> [(ByteString, ByteString)]
-> ByteString
mkCanonicalRequest ::
Bool ->
SignParams ->
NC.Request ->
[(ByteString, ByteString)] ->
ByteString
mkCanonicalRequest !isStreaming !sp !req !headersForSign =
let
canonicalQueryString = B.intercalate "&" $
map (\(x, y) -> B.concat [x, "=", y]) $
sort $ map (\(x, y) ->
(uriEncode True x, maybe "" (uriEncode True) y)) $
(parseQuery $ NC.queryString req)
sortedHeaders = sort headersForSign
canonicalHeaders = B.concat $
map (\(x, y) -> B.concat [x, ":", y, "\n"]) sortedHeaders
signedHeaders = B.intercalate ";" $ map fst sortedHeaders
payloadHashStr =
let canonicalQueryString =
B.intercalate "&"
$ map (\(x, y) -> B.concat [x, "=", y])
$ sort
$ map
( \(x, y) ->
(uriEncode True x, maybe "" (uriEncode True) y)
)
$ (parseQuery $ NC.queryString req)
sortedHeaders = sort headersForSign
canonicalHeaders =
B.concat $
map (\(x, y) -> B.concat [x, ":", y, "\n"]) sortedHeaders
signedHeaders = B.intercalate ";" $ map fst sortedHeaders
payloadHashStr =
if isStreaming
then "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
else fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp
in
B.intercalate "\n"
[ NC.method req
, uriEncode False $ NC.path req
, canonicalQueryString
, canonicalHeaders
, signedHeaders
, payloadHashStr
]
then "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
else fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp
in B.intercalate
"\n"
[ NC.method req,
uriEncode False $ NC.path req,
canonicalQueryString,
canonicalHeaders,
signedHeaders,
payloadHashStr
]
mkStringToSign :: UTCTime -> ByteString -> ByteString -> ByteString
mkStringToSign ts !scope !canonicalRequest = B.intercalate "\n"
[ "AWS4-HMAC-SHA256"
, awsTimeFormatBS ts
, scope
, hashSHA256 canonicalRequest
]
mkStringToSign ts !scope !canonicalRequest =
B.intercalate
"\n"
[ "AWS4-HMAC-SHA256",
awsTimeFormatBS ts,
scope,
hashSHA256 canonicalRequest
]
mkSigningKey :: UTCTime -> Text -> ByteString -> ByteString
mkSigningKey ts region !secretKey = hmacSHA256RawBS "aws4_request"
. hmacSHA256RawBS "s3"
. hmacSHA256RawBS (toS region)
. hmacSHA256RawBS (awsDateFormatBS ts)
$ B.concat ["AWS4", secretKey]
mkSigningKey ts region !secretKey =
hmacSHA256RawBS "aws4_request"
. hmacSHA256RawBS "s3"
. hmacSHA256RawBS (toUtf8 region)
. hmacSHA256RawBS (awsDateFormatBS ts)
$ B.concat ["AWS4", secretKey]
computeSignature :: ByteString -> ByteString -> ByteString
computeSignature !toSign !key = digestToBase16 $ hmacSHA256 toSign key
@ -233,18 +249,19 @@ computeSignature !toSign !key = digestToBase16 $ hmacSHA256 toSign key
-- | Takes a validated Post Policy JSON bytestring, the signing time,
-- and ConnInfo and returns form-data for the POST upload containing
-- just the signature and the encoded post-policy.
signV4PostPolicy :: ByteString -> SignParams
-> Map.HashMap Text ByteString
signV4PostPolicy ::
ByteString ->
SignParams ->
Map.HashMap Text ByteString
signV4PostPolicy !postPolicyJSON !sp =
let
stringToSign = Base64.encode postPolicyJSON
region = fromMaybe "" $ spRegion sp
signingKey = mkSigningKey (spTimeStamp sp) region $ toS $ spSecretKey sp
signature = computeSignature stringToSign signingKey
in
Map.fromList [ ("x-amz-signature", signature)
, ("policy", stringToSign)
]
let stringToSign = Base64.encode postPolicyJSON
region = fromMaybe "" $ spRegion sp
signingKey = mkSigningKey (spTimeStamp sp) region $ toUtf8 $ spSecretKey sp
signature = computeSignature stringToSign signingKey
in Map.fromList
[ ("x-amz-signature", signature),
("policy", stringToSign)
]
chunkSizeConstant :: Int
chunkSizeConstant = 64 * 1024
@ -252,140 +269,141 @@ chunkSizeConstant = 64 * 1024
-- base16Len computes the number of bytes required to represent @n (> 0)@ in
-- hexadecimal.
base16Len :: Integral a => a -> Int
base16Len n | n == 0 = 0
| otherwise = 1 + base16Len (n `div` 16)
base16Len n
| n == 0 = 0
| otherwise = 1 + base16Len (n `div` 16)
signedStreamLength :: Int64 -> Int64
signedStreamLength dataLen =
let
chunkSzInt = fromIntegral chunkSizeConstant
(numChunks, lastChunkLen) = quotRem dataLen chunkSzInt
let chunkSzInt = fromIntegral chunkSizeConstant
(numChunks, lastChunkLen) = quotRem dataLen chunkSzInt
-- Structure of a chunk:
-- string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n
encodedChunkLen csz = fromIntegral (base16Len csz) + 17 + 64 + 2 + csz + 2
fullChunkSize = encodedChunkLen chunkSzInt
lastChunkSize = bool 0 (encodedChunkLen lastChunkLen) $ lastChunkLen > 0
finalChunkSize = 1 + 17 + 64 + 2 + 2
in numChunks * fullChunkSize + lastChunkSize + finalChunkSize
-- Structure of a chunk:
-- string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n
encodedChunkLen csz = fromIntegral (base16Len csz) + 17 + 64 + 2 + csz + 2
fullChunkSize = encodedChunkLen chunkSzInt
lastChunkSize = bool 0 (encodedChunkLen lastChunkLen) $ lastChunkLen > 0
finalChunkSize = 1 + 17 + 64 + 2 + 2
in
numChunks * fullChunkSize + lastChunkSize + finalChunkSize
signV4Stream :: Int64 -> SignParams -> NC.Request
-> (C.ConduitT () ByteString (C.ResourceT IO) () -> NC.Request)
-- -> ([Header], C.ConduitT () ByteString (C.ResourceT IO) () -> NC.RequestBody)
signV4Stream ::
Int64 ->
SignParams ->
NC.Request ->
(C.ConduitT () ByteString (C.ResourceT IO) () -> NC.Request)
-- -> ([Header], C.ConduitT () ByteString (C.ResourceT IO) () -> NC.RequestBody)
signV4Stream !payloadLength !sp !req =
let
ts = spTimeStamp sp
addContentEncoding hs =
let ts = spTimeStamp sp
addContentEncoding hs =
let ceMay = headMay $ filter (\(x, _) -> x == "content-encoding") hs
in case ceMay of
Nothing -> ("content-encoding", "aws-chunked") : hs
Just (_, ce) -> ("content-encoding", ce <> ",aws-chunked") :
filter (\(x, _) -> x /= "content-encoding") hs
-- headers to be added to the request
datePair = ("X-Amz-Date", awsTimeFormatBS ts)
computedHeaders = addContentEncoding $
datePair : NC.requestHeaders req
-- headers specific to streaming signature
signedContentLength = signedStreamLength payloadLength
streamingHeaders :: [Header]
streamingHeaders =
[ ("x-amz-decoded-content-length", show payloadLength)
, ("content-length", show signedContentLength )
, ("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD")
in case ceMay of
Nothing -> ("content-encoding", "aws-chunked") : hs
Just (_, ce) ->
("content-encoding", ce <> ",aws-chunked")
: filter (\(x, _) -> x /= "content-encoding") hs
-- headers to be added to the request
datePair = ("X-Amz-Date", awsTimeFormatBS ts)
computedHeaders =
addContentEncoding $
datePair : NC.requestHeaders req
-- headers specific to streaming signature
signedContentLength = signedStreamLength payloadLength
streamingHeaders :: [Header]
streamingHeaders =
[ ("x-amz-decoded-content-length", showBS payloadLength),
("content-length", showBS signedContentLength),
("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD")
]
headersToSign = getHeadersToSign $ computedHeaders ++ streamingHeaders
signedHeaderKeys = B.intercalate ";" $ sort $ map fst headersToSign
finalQP = parseQuery (NC.queryString req)
headersToSign = getHeadersToSign $ computedHeaders ++ streamingHeaders
signedHeaderKeys = B.intercalate ";" $ sort $ map fst headersToSign
finalQP = parseQuery (NC.queryString req)
-- 1. Compute Seed Signature
-- 1.1 Canonical Request
canonicalReq =
mkCanonicalRequest
True
sp
(NC.setQueryString finalQP req)
headersToSign
region = fromMaybe "" $ spRegion sp
scope = mkScope ts region
accessKey = spAccessKey sp
secretKey = spSecretKey sp
-- 1.2 String toSign
stringToSign = mkStringToSign ts scope canonicalReq
-- 1.3 Compute signature
-- 1.3.1 compute signing key
signingKey = mkSigningKey ts region $ toUtf8 secretKey
-- 1.3.2 Compute signature
seedSignature = computeSignature stringToSign signingKey
-- 1.3.3 Compute Auth Header
authHeader = mkAuthHeader accessKey scope signedHeaderKeys seedSignature
-- 1.4 Updated headers for the request
finalReqHeaders = authHeader : (computedHeaders ++ streamingHeaders)
-- headersToAdd = authHeader : datePair : streamingHeaders
-- 1. Compute Seed Signature
-- 1.1 Canonical Request
canonicalReq = mkCanonicalRequest True sp
(NC.setQueryString finalQP req)
headersToSign
region = fromMaybe "" $ spRegion sp
scope = mkScope ts region
accessKey = spAccessKey sp
secretKey = spSecretKey sp
-- 1.2 String toSign
stringToSign = mkStringToSign ts scope canonicalReq
-- 1.3 Compute signature
-- 1.3.1 compute signing key
signingKey = mkSigningKey ts region $ toS secretKey
-- 1.3.2 Compute signature
seedSignature = computeSignature stringToSign signingKey
-- 1.3.3 Compute Auth Header
authHeader = mkAuthHeader accessKey scope signedHeaderKeys seedSignature
-- 1.4 Updated headers for the request
finalReqHeaders = authHeader : (computedHeaders ++ streamingHeaders)
-- headersToAdd = authHeader : datePair : streamingHeaders
toHexStr n = B8.pack $ printf "%x" n
(numParts, lastPSize) = payloadLength `quotRem` fromIntegral chunkSizeConstant
-- Function to compute string to sign for each chunk.
chunkStrToSign prevSign currChunkHash =
B.intercalate "\n"
[ "AWS4-HMAC-SHA256-PAYLOAD"
, awsTimeFormatBS ts
, scope
, prevSign
, hashSHA256 ""
, currChunkHash
]
-- Read n byte from upstream and return a strict bytestring.
mustTakeN n = do
bs <- toS <$> (C.takeCE n C..| C.sinkLazy)
toHexStr n = B8.pack $ printf "%x" n
(numParts, lastPSize) = payloadLength `quotRem` fromIntegral chunkSizeConstant
-- Function to compute string to sign for each chunk.
chunkStrToSign prevSign currChunkHash =
B.intercalate
"\n"
[ "AWS4-HMAC-SHA256-PAYLOAD",
awsTimeFormatBS ts,
scope,
prevSign,
hashSHA256 "",
currChunkHash
]
-- Read n byte from upstream and return a strict bytestring.
mustTakeN n = do
bs <- LB.toStrict <$> (C.takeCE n C..| C.sinkLazy)
when (B.length bs /= n) $
throwIO MErrVStreamingBodyUnexpectedEOF
throwIO MErrVStreamingBodyUnexpectedEOF
return bs
signerConduit n lps prevSign =
-- First case encodes a full chunk of length
-- 'chunkSizeConstant'.
if
| n > 0 -> do
bs <- mustTakeN chunkSizeConstant
let strToSign = chunkStrToSign prevSign (hashSHA256 bs)
nextSign = computeSignature strToSign signingKey
chunkBS =
toHexStr chunkSizeConstant
<> ";chunk-signature="
<> nextSign
<> "\r\n"
<> bs
<> "\r\n"
C.yield chunkBS
signerConduit (n -1) lps nextSign
signerConduit n lps prevSign =
-- First case encodes a full chunk of length
-- 'chunkSizeConstant'.
if | n > 0 -> do
bs <- mustTakeN chunkSizeConstant
let strToSign = chunkStrToSign prevSign (hashSHA256 bs)
nextSign = computeSignature strToSign signingKey
chunkBS = toHexStr chunkSizeConstant
<> ";chunk-signature="
<> nextSign <> "\r\n" <> bs <> "\r\n"
C.yield chunkBS
signerConduit (n-1) lps nextSign
-- Second case encodes the last chunk which is smaller than
-- 'chunkSizeConstant'
| lps > 0 -> do
bs <- mustTakeN $ fromIntegral lps
let strToSign = chunkStrToSign prevSign (hashSHA256 bs)
nextSign = computeSignature strToSign signingKey
chunkBS =
toHexStr lps <> ";chunk-signature="
<> nextSign
<> "\r\n"
<> bs
<> "\r\n"
C.yield chunkBS
signerConduit 0 0 nextSign
-- Second case encodes the last chunk which is smaller than
-- 'chunkSizeConstant'
| lps > 0 -> do
bs <- mustTakeN $ fromIntegral lps
let strToSign = chunkStrToSign prevSign (hashSHA256 bs)
nextSign = computeSignature strToSign signingKey
chunkBS = toHexStr lps <> ";chunk-signature="
<> nextSign <> "\r\n" <> bs <> "\r\n"
C.yield chunkBS
signerConduit 0 0 nextSign
-- Last case encodes the final signature chunk that has no
-- data.
| otherwise -> do
let strToSign = chunkStrToSign prevSign (hashSHA256 "")
nextSign = computeSignature strToSign signingKey
lastChunkBS = "0;chunk-signature=" <> nextSign <> "\r\n\r\n"
C.yield lastChunkBS
in
\src -> req { NC.requestHeaders = finalReqHeaders
, NC.requestBody =
NC.requestBodySource signedContentLength $
src C..| signerConduit numParts lastPSize seedSignature
}
-- Last case encodes the final signature chunk that has no
-- data.
| otherwise -> do
let strToSign = chunkStrToSign prevSign (hashSHA256 "")
nextSign = computeSignature strToSign signingKey
lastChunkBS = "0;chunk-signature=" <> nextSign <> "\r\n\r\n"
C.yield lastChunkBS
in \src ->
req
{ NC.requestHeaders = finalReqHeaders,
NC.requestBody =
NC.requestBodySource signedContentLength $
src C..| signerConduit numParts lastPSize seedSignature
}

View File

@ -16,37 +16,40 @@
module Network.Minio.Utils where
import qualified Conduit as C
import Control.Monad.IO.Unlift (MonadUnliftIO)
import qualified Control.Monad.Trans.Resource as R
import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as LB
import Data.CaseInsensitive (mk, original)
import qualified Data.Conduit.Binary as CB
import qualified Data.HashMap.Strict as H
import qualified Data.List as List
import qualified Data.Text as T
import Data.Text.Read (decimal)
import Data.Time (defaultTimeLocale, parseTimeM,
rfc822DateFormat)
import Network.HTTP.Conduit (Response)
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import qualified Network.HTTP.Types.Header as Hdr
import qualified System.IO as IO
import qualified UnliftIO as U
import qualified UnliftIO.Async as A
import qualified UnliftIO.MVar as UM
import qualified Conduit as C
import Control.Monad.IO.Unlift (MonadUnliftIO)
import qualified Control.Monad.Trans.Resource as R
import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as LB
import Data.CaseInsensitive (mk, original)
import qualified Data.Conduit.Binary as CB
import qualified Data.HashMap.Strict as H
import qualified Data.List as List
import qualified Data.Text as T
import Data.Text.Read (decimal)
import Data.Time
( defaultTimeLocale,
parseTimeM,
rfc822DateFormat,
)
import Lib.Prelude
import Network.HTTP.Conduit (Response)
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import qualified Network.HTTP.Types.Header as Hdr
import Network.Minio.Data
import Network.Minio.Data.ByteString
import Network.Minio.JsonParser (parseErrResponseJSON)
import Network.Minio.XmlParser (parseErrResponse)
import qualified System.IO as IO
import qualified UnliftIO as U
import qualified UnliftIO.Async as A
import qualified UnliftIO.MVar as UM
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Data.ByteString
import Network.Minio.JsonParser (parseErrResponseJSON)
import Network.Minio.XmlParser (parseErrResponse)
allocateReadFile :: (MonadUnliftIO m, R.MonadResource m)
=> FilePath -> m (R.ReleaseKey, Handle)
allocateReadFile ::
(MonadUnliftIO m, R.MonadResource m) =>
FilePath ->
m (R.ReleaseKey, Handle)
allocateReadFile fp = do
(rk, hdlE) <- R.allocate (openReadFile fp) cleanup
either (\(e :: IOException) -> throwIO e) (return . (rk,)) hdlE
@ -56,30 +59,37 @@ allocateReadFile fp = do
-- | Queries the file size from the handle. Catches any file operation
-- exceptions and returns Nothing instead.
getFileSize :: (MonadUnliftIO m, R.MonadResource m)
=> Handle -> m (Maybe Int64)
getFileSize ::
(MonadUnliftIO m, R.MonadResource m) =>
Handle ->
m (Maybe Int64)
getFileSize h = do
resE <- liftIO $ try $ fromIntegral <$> IO.hFileSize h
case resE of
Left (_ :: IOException) -> return Nothing
Right s -> return $ Just s
Right s -> return $ Just s
-- | Queries if handle is seekable. Catches any file operation
-- exceptions and return False instead.
isHandleSeekable :: (R.MonadResource m, MonadUnliftIO m)
=> Handle -> m Bool
isHandleSeekable ::
(R.MonadResource m, MonadUnliftIO m) =>
Handle ->
m Bool
isHandleSeekable h = do
resE <- liftIO $ try $ IO.hIsSeekable h
case resE of
Left (_ :: IOException) -> return False
Right v -> return v
Right v -> return v
-- | Helper function that opens a handle to the filepath and performs
-- the given action on it. Exceptions of type MError are caught and
-- returned - both during file handle allocation and when the action
-- is run.
withNewHandle :: (MonadUnliftIO m, R.MonadResource m)
=> FilePath -> (Handle -> m a) -> m (Either IOException a)
withNewHandle ::
(MonadUnliftIO m, R.MonadResource m) =>
FilePath ->
(Handle -> m a) ->
m (Either IOException a)
withNewHandle fp fileAction = do
-- opening a handle can throw MError exception.
handleE <- try $ allocateReadFile fp
@ -103,24 +113,27 @@ getETagHeader hs = decodeUtf8Lenient <$> lookupHeader Hdr.hETag hs
getMetadata :: [HT.Header] -> [(Text, Text)]
getMetadata =
map ((\(x, y) -> (decodeUtf8Lenient $ original x, decodeUtf8Lenient $ stripBS y)))
map ((\(x, y) -> (decodeUtf8Lenient $ original x, decodeUtf8Lenient $ stripBS y)))
toMaybeMetadataHeader :: (Text, Text) -> Maybe (Text, Text)
toMaybeMetadataHeader (k, v) =
(, v) <$> userMetadataHeaderNameMaybe k
(,v) <$> userMetadataHeaderNameMaybe k
getNonUserMetadataMap :: [(Text, Text)] -> H.HashMap Text Text
getNonUserMetadataMap = H.fromList
. filter ( isNothing
. userMetadataHeaderNameMaybe
. fst
)
getNonUserMetadataMap =
H.fromList
. filter
( isNothing
. userMetadataHeaderNameMaybe
. fst
)
-- | This function collects all headers starting with `x-amz-meta-`
-- and strips off this prefix, and returns a map.
getUserMetadataMap :: [(Text, Text)] -> H.HashMap Text Text
getUserMetadataMap = H.fromList
. mapMaybe toMaybeMetadataHeader
getUserMetadataMap =
H.fromList
. mapMaybe toMaybeMetadataHeader
getLastModifiedHeader :: [HT.Header] -> Maybe UTCTime
getLastModifiedHeader hs = do
@ -132,17 +145,19 @@ getContentLength hs = do
nbs <- decodeUtf8Lenient <$> lookupHeader Hdr.hContentLength hs
fst <$> hush (decimal nbs)
decodeUtf8Lenient :: ByteString -> Text
decodeUtf8Lenient = decodeUtf8With lenientDecode
isSuccessStatus :: HT.Status -> Bool
isSuccessStatus sts = let s = HT.statusCode sts
in (s >= 200 && s < 300)
isSuccessStatus sts =
let s = HT.statusCode sts
in (s >= 200 && s < 300)
httpLbs :: MonadIO m
=> NC.Request -> NC.Manager
-> m (NC.Response LByteString)
httpLbs ::
MonadIO m =>
NC.Request ->
NC.Manager ->
m (NC.Response LByteString)
httpLbs req mgr = do
respE <- liftIO $ tryHttpEx $ NC.httpLbs req mgr
resp <- either throwIO return respE
@ -154,21 +169,25 @@ httpLbs req mgr = do
Just "application/json" -> do
sErr <- parseErrResponseJSON $ NC.responseBody resp
throwIO sErr
_ -> throwIO $ NC.HttpExceptionRequest req $
NC.StatusCodeException (void resp) (show resp)
_ ->
throwIO $ NC.HttpExceptionRequest req $
NC.StatusCodeException (void resp) (showBS resp)
return resp
where
tryHttpEx :: IO (NC.Response LByteString)
-> IO (Either NC.HttpException (NC.Response LByteString))
tryHttpEx ::
IO (NC.Response LByteString) ->
IO (Either NC.HttpException (NC.Response LByteString))
tryHttpEx = try
contentTypeMay resp = lookupHeader Hdr.hContentType $
NC.responseHeaders resp
contentTypeMay resp =
lookupHeader Hdr.hContentType $
NC.responseHeaders resp
http :: (MonadUnliftIO m, R.MonadResource m)
=> NC.Request -> NC.Manager
-> m (Response (C.ConduitT () ByteString m ()))
http ::
(MonadUnliftIO m, R.MonadResource m) =>
NC.Request ->
NC.Manager ->
m (Response (C.ConduitT () ByteString m ()))
http req mgr = do
respE <- tryHttpEx $ NC.http req mgr
resp <- either throwIO return respE
@ -178,25 +197,30 @@ http req mgr = do
respBody <- C.connect (NC.responseBody resp) CB.sinkLbs
sErr <- parseErrResponse respBody
throwIO sErr
_ -> do
content <- LB.toStrict . NC.responseBody <$> NC.lbsResponse resp
throwIO $ NC.HttpExceptionRequest req $
NC.StatusCodeException (void resp) content
NC.StatusCodeException (void resp) content
return resp
where
tryHttpEx :: (MonadUnliftIO m) => m a
-> m (Either NC.HttpException a)
tryHttpEx ::
(MonadUnliftIO m) =>
m a ->
m (Either NC.HttpException a)
tryHttpEx = try
contentTypeMay resp = lookupHeader Hdr.hContentType $
NC.responseHeaders resp
contentTypeMay resp =
lookupHeader Hdr.hContentType $
NC.responseHeaders resp
-- Similar to mapConcurrently but limits the number of threads that
-- can run using a quantity semaphore.
limitedMapConcurrently :: MonadUnliftIO m
=> Int -> (t -> m a) -> [t] -> m [a]
limitedMapConcurrently ::
MonadUnliftIO m =>
Int ->
(t -> m a) ->
[t] ->
m [a]
limitedMapConcurrently 0 _ _ = return []
limitedMapConcurrently count act args = do
t' <- U.newTVarIO count
@ -205,17 +229,15 @@ limitedMapConcurrently count act args = do
where
wThread t arg =
U.bracket_ (waitSem t) (signalSem t) $ act arg
-- quantity semaphore implementation using TVar
waitSem t = U.atomically $ do
v <- U.readTVar t
if v > 0
then U.writeTVar t (v-1)
else U.retrySTM
then U.writeTVar t (v -1)
else U.retrySTM
signalSem t = U.atomically $ do
v <- U.readTVar t
U.writeTVar t (v+1)
U.writeTVar t (v + 1)
-- helper function to 'drop' empty optional parameter.
mkQuery :: Text -> Maybe Text -> Maybe (Text, Text)
@ -224,7 +246,7 @@ mkQuery k mv = (k,) <$> mv
-- helper function to build query parameters that are optional.
-- don't use it with mandatory query params with empty value.
mkOptionalParams :: [(Text, Maybe Text)] -> HT.Query
mkOptionalParams params = HT.toQuery $ uncurry mkQuery <$> params
mkOptionalParams params = HT.toQuery $ uncurry mkQuery <$> params
-- | Conduit that rechunks bytestrings into the given chunk
-- lengths. Stops after given chunk lengths are yielded. Stops if
@ -232,23 +254,30 @@ mkOptionalParams params = HT.toQuery $ uncurry mkQuery <$> params
-- received. Does not throw any errors.
chunkBSConduit :: (Monad m) => [Int] -> C.ConduitM ByteString ByteString m ()
chunkBSConduit [] = return ()
chunkBSConduit (s:ss) = do
bs <- fmap LB.toStrict $ C.takeCE s C..| C.sinkLazy
if | B.length bs == s -> C.yield bs >> chunkBSConduit ss
| B.length bs > 0 -> C.yield bs
| otherwise -> return ()
chunkBSConduit (s : ss) = do
bs <- fmap LB.toStrict $ C.takeCE s C..| C.sinkLazy
if
| B.length bs == s -> C.yield bs >> chunkBSConduit ss
| B.length bs > 0 -> C.yield bs
| otherwise -> return ()
-- | Select part sizes - the logic is that the minimum part-size will
-- be 64MiB.
selectPartSizes :: Int64 -> [(PartNumber, Int64, Int64)]
selectPartSizes size = uncurry (List.zip3 [1..]) $
List.unzip $ loop 0 size
selectPartSizes size =
uncurry (List.zip3 [1 ..])
$ List.unzip
$ loop 0 size
where
ceil :: Double -> Int64
ceil = ceiling
partSize = max minPartSize (ceil $ fromIntegral size /
fromIntegral maxMultipartParts)
partSize =
max
minPartSize
( ceil $
fromIntegral size
/ fromIntegral maxMultipartParts
)
m = fromIntegral partSize
loop st sz
| st > sz = []
@ -257,16 +286,16 @@ selectPartSizes size = uncurry (List.zip3 [1..]) $
lookupRegionCache :: Bucket -> Minio (Maybe Region)
lookupRegionCache b = do
rMVar <- asks mcRegionMap
rMap <- UM.readMVar rMVar
return $ H.lookup b rMap
rMVar <- asks mcRegionMap
rMap <- UM.readMVar rMVar
return $ H.lookup b rMap
addToRegionCache :: Bucket -> Region -> Minio ()
addToRegionCache b region = do
rMVar <- asks mcRegionMap
UM.modifyMVar_ rMVar $ return . H.insert b region
rMVar <- asks mcRegionMap
UM.modifyMVar_ rMVar $ return . H.insert b region
deleteFromRegionCache :: Bucket -> Minio ()
deleteFromRegionCache b = do
rMVar <- asks mcRegionMap
UM.modifyMVar_ rMVar $ return . H.delete b
rMVar <- asks mcRegionMap
UM.modifyMVar_ rMVar $ return . H.delete b

View File

@ -15,63 +15,80 @@
--
module Network.Minio.XmlGenerator
( mkCreateBucketConfig
, mkCompleteMultipartUploadRequest
, mkPutNotificationRequest
, mkSelectRequest
) where
( mkCreateBucketConfig,
mkCompleteMultipartUploadRequest,
mkPutNotificationRequest,
mkSelectRequest,
)
where
import qualified Data.ByteString.Lazy as LBS
import qualified Data.HashMap.Strict as H
import qualified Data.Text as T
import Text.XML
import Lib.Prelude
import Network.Minio.Data
import qualified Data.HashMap.Strict as H
import qualified Data.Text as T
import Lib.Prelude
import Network.Minio.Data
import Text.XML
-- | Create a bucketConfig request body XML
mkCreateBucketConfig :: Text -> Region -> ByteString
mkCreateBucketConfig ns location = LBS.toStrict $ renderLBS def bucketConfig
where
s3Element n = Element (s3Name ns n) mempty
root = s3Element "CreateBucketConfiguration"
[ NodeElement $ s3Element "LocationConstraint"
[ NodeContent location]
s3Element n = Element (s3Name ns n) mempty
root =
s3Element
"CreateBucketConfiguration"
[ NodeElement $
s3Element
"LocationConstraint"
[NodeContent location]
]
bucketConfig = Document (Prologue [] Nothing []) root []
bucketConfig = Document (Prologue [] Nothing []) root []
-- | Create a completeMultipartUpload request body XML
mkCompleteMultipartUploadRequest :: [PartTuple] -> ByteString
mkCompleteMultipartUploadRequest partInfo =
LBS.toStrict $ renderLBS def cmur
where
root = Element "CompleteMultipartUpload" mempty $
map (NodeElement . mkPart) partInfo
mkPart (n, etag) = Element "Part" mempty
[ NodeElement $ Element "PartNumber" mempty
[NodeContent $ T.pack $ show n]
, NodeElement $ Element "ETag" mempty
[NodeContent etag]
]
root =
Element "CompleteMultipartUpload" mempty $
map (NodeElement . mkPart) partInfo
mkPart (n, etag) =
Element
"Part"
mempty
[ NodeElement $
Element
"PartNumber"
mempty
[NodeContent $ T.pack $ show n],
NodeElement $
Element
"ETag"
mempty
[NodeContent etag]
]
cmur = Document (Prologue [] Nothing []) root []
-- Simplified XML representation without element attributes.
data XNode = XNode Text [XNode]
| XLeaf Text Text
data XNode
= XNode Text [XNode]
| XLeaf Text Text
deriving (Eq, Show)
toXML :: Text -> XNode -> ByteString
toXML ns node = LBS.toStrict $ renderLBS def $
Document (Prologue [] Nothing []) (xmlNode node) []
toXML ns node =
LBS.toStrict $ renderLBS def $
Document (Prologue [] Nothing []) (xmlNode node) []
where
xmlNode :: XNode -> Element
xmlNode (XNode name nodes) = Element (s3Name ns name) mempty $
map (NodeElement . xmlNode) nodes
xmlNode (XLeaf name content) = Element (s3Name ns name) mempty
[NodeContent content]
xmlNode (XNode name nodes) =
Element (s3Name ns name) mempty $
map (NodeElement . xmlNode) nodes
xmlNode (XLeaf name content) =
Element
(s3Name ns name)
mempty
[NodeContent content]
class ToXNode a where
toXNode :: a -> XNode
@ -80,24 +97,29 @@ instance ToXNode Event where
toXNode = XLeaf "Event" . show
instance ToXNode Notification where
toXNode (Notification qc tc lc) = XNode "NotificationConfiguration" $
map (toXNodesWithArnName "QueueConfiguration" "Queue") qc ++
map (toXNodesWithArnName "TopicConfiguration" "Topic") tc ++
map (toXNodesWithArnName "CloudFunctionConfiguration" "CloudFunction") lc
toXNode (Notification qc tc lc) =
XNode "NotificationConfiguration" $
map (toXNodesWithArnName "QueueConfiguration" "Queue") qc
++ map (toXNodesWithArnName "TopicConfiguration" "Topic") tc
++ map (toXNodesWithArnName "CloudFunctionConfiguration" "CloudFunction") lc
toXNodesWithArnName :: Text -> Text -> NotificationConfig -> XNode
toXNodesWithArnName eltName arnName (NotificationConfig id arn events fRule) =
XNode eltName $ [XLeaf "Id" id, XLeaf arnName arn] ++ map toXNode events ++
[toXNode fRule]
XNode eltName $
[XLeaf "Id" id, XLeaf arnName arn] ++ map toXNode events
++ [toXNode fRule]
instance ToXNode Filter where
toXNode (Filter (FilterKey (FilterRules rules))) =
XNode "Filter" [XNode "S3Key" (map getFRXNode rules)]
getFRXNode :: FilterRule -> XNode
getFRXNode (FilterRule n v) = XNode "FilterRule" [ XLeaf "Name" n
, XLeaf "Value" v
]
getFRXNode (FilterRule n v) =
XNode
"FilterRule"
[ XLeaf "Name" n,
XLeaf "Value" v
]
mkPutNotificationRequest :: Text -> Notification -> ByteString
mkPutNotificationRequest ns = toXML ns . toXNode
@ -106,60 +128,103 @@ mkSelectRequest :: SelectRequest -> ByteString
mkSelectRequest r = LBS.toStrict $ renderLBS def sr
where
sr = Document (Prologue [] Nothing []) root []
root = Element "SelectRequest" mempty $
[ NodeElement (Element "Expression" mempty
[NodeContent $ srExpression r])
, NodeElement (Element "ExpressionType" mempty
[NodeContent $ show $ srExpressionType r])
, NodeElement (Element "InputSerialization" mempty $
inputSerializationNodes $ srInputSerialization r)
, NodeElement (Element "OutputSerialization" mempty $
outputSerializationNodes $ srOutputSerialization r)
] ++ maybe [] reqProgElem (srRequestProgressEnabled r)
reqProgElem enabled = [NodeElement
(Element "RequestProgress" mempty
[NodeElement
(Element "Enabled" mempty
[NodeContent
(if enabled then "TRUE" else "FALSE")]
)
]
)
]
inputSerializationNodes is = comprTypeNode (isCompressionType is) ++
[NodeElement $ formatNode (isFormatInfo is)]
comprTypeNode (Just c) = [NodeElement $ Element "CompressionType" mempty
[NodeContent $ case c of
CompressionTypeNone -> "NONE"
CompressionTypeGzip -> "GZIP"
CompressionTypeBzip2 -> "BZIP2"
]
]
root =
Element "SelectRequest" mempty $
[ NodeElement
( Element
"Expression"
mempty
[NodeContent $ srExpression r]
),
NodeElement
( Element
"ExpressionType"
mempty
[NodeContent $ show $ srExpressionType r]
),
NodeElement
( Element "InputSerialization" mempty
$ inputSerializationNodes
$ srInputSerialization r
),
NodeElement
( Element "OutputSerialization" mempty
$ outputSerializationNodes
$ srOutputSerialization r
)
]
++ maybe [] reqProgElem (srRequestProgressEnabled r)
reqProgElem enabled =
[ NodeElement
( Element
"RequestProgress"
mempty
[ NodeElement
( Element
"Enabled"
mempty
[ NodeContent
(if enabled then "TRUE" else "FALSE")
]
)
]
)
]
inputSerializationNodes is =
comprTypeNode (isCompressionType is)
++ [NodeElement $ formatNode (isFormatInfo is)]
comprTypeNode (Just c) =
[ NodeElement $
Element
"CompressionType"
mempty
[ NodeContent $ case c of
CompressionTypeNone -> "NONE"
CompressionTypeGzip -> "GZIP"
CompressionTypeBzip2 -> "BZIP2"
]
]
comprTypeNode Nothing = []
kvElement (k, v) = Element (Name k Nothing Nothing) mempty [NodeContent v]
formatNode (InputFormatCSV (CSVProp h)) =
Element "CSV" mempty
(map NodeElement $ map kvElement $ H.toList h)
Element
"CSV"
mempty
(map NodeElement $ map kvElement $ H.toList h)
formatNode (InputFormatJSON p) =
Element "JSON" mempty
[NodeElement
(Element "Type" mempty
[NodeContent $ case jsonipType p of
JSONTypeDocument -> "DOCUMENT"
JSONTypeLines -> "LINES"
]
)
]
Element
"JSON"
mempty
[ NodeElement
( Element
"Type"
mempty
[ NodeContent $ case jsonipType p of
JSONTypeDocument -> "DOCUMENT"
JSONTypeLines -> "LINES"
]
)
]
formatNode InputFormatParquet = Element "Parquet" mempty []
outputSerializationNodes (OutputSerializationJSON j) =
[NodeElement (Element "JSON" mempty $
rdElem $ jsonopRecordDelimiter j)]
[ NodeElement
( Element "JSON" mempty
$ rdElem
$ jsonopRecordDelimiter j
)
]
outputSerializationNodes (OutputSerializationCSV (CSVProp h)) =
[NodeElement $ Element "CSV" mempty
(map NodeElement $ map kvElement $ H.toList h)]
[ NodeElement $
Element
"CSV"
mempty
(map NodeElement $ map kvElement $ H.toList h)
]
rdElem Nothing = []
rdElem (Just t) = [NodeElement $ Element "RecordDelimiter" mempty
[NodeContent t]]
rdElem (Just t) =
[ NodeElement $
Element
"RecordDelimiter"
mempty
[NodeContent t]
]

View File

@ -15,34 +15,32 @@
--
module Network.Minio.XmlParser
( parseListBuckets
, parseLocation
, parseNewMultipartUpload
, parseCompleteMultipartUploadResponse
, parseCopyObjectResponse
, parseListObjectsResponse
, parseListObjectsV1Response
, parseListUploadsResponse
, parseListPartsResponse
, parseErrResponse
, parseNotification
, parseSelectProgress
) where
( parseListBuckets,
parseLocation,
parseNewMultipartUpload,
parseCompleteMultipartUploadResponse,
parseCopyObjectResponse,
parseListObjectsResponse,
parseListObjectsV1Response,
parseListUploadsResponse,
parseListPartsResponse,
parseErrResponse,
parseNotification,
parseSelectProgress,
)
where
import qualified Data.ByteString.Lazy as LB
import qualified Data.HashMap.Strict as H
import Data.List (zip3, zip4, zip6)
import qualified Data.Text as T
import Data.Text.Read (decimal)
import Data.Time
import Text.XML
import Text.XML.Cursor hiding (bool)
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Errors
import qualified Data.HashMap.Strict as H
import Data.List (zip3, zip4, zip6)
import qualified Data.Text as T
import Data.Text.Read (decimal)
import Data.Time
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Errors
import Text.XML
import Text.XML.Cursor hiding (bool)
-- | Represent the time format string returned by S3 API calls.
s3TimeFormat :: [Char]
@ -58,12 +56,14 @@ uncurry6 f (a, b, c, d, e, g) = f a b c d e g
-- | Parse time strings from XML
parseS3XMLTime :: MonadIO m => Text -> m UTCTime
parseS3XMLTime t =
maybe (throwIO $ MErrVXmlParse $ "timestamp parse failure: " <> t) return $
parseTimeM True defaultTimeLocale s3TimeFormat $ T.unpack t
maybe (throwIO $ MErrVXmlParse $ "timestamp parse failure: " <> t) return
$ parseTimeM True defaultTimeLocale s3TimeFormat
$ T.unpack t
parseDecimal :: (MonadIO m, Integral a) => Text -> m a
parseDecimal numStr = either (throwIO . MErrVXmlParse . show) return $
fst <$> decimal numStr
parseDecimal numStr =
either (throwIO . MErrVXmlParse . show) return $
fst <$> decimal numStr
parseDecimals :: (MonadIO m, Integral a) => [Text] -> m [a]
parseDecimals numStr = forM numStr parseDecimal
@ -72,18 +72,18 @@ s3Elem :: Text -> Text -> Axis
s3Elem ns = element . s3Name ns
parseRoot :: (MonadIO m) => LByteString -> m Cursor
parseRoot = either (throwIO . MErrVXmlParse . show) (return . fromDocument)
. parseLBS def
parseRoot =
either (throwIO . MErrVXmlParse . show) (return . fromDocument)
. parseLBS def
-- | Parse the response XML of a list buckets call.
parseListBuckets :: (MonadReader env m, HasSvcNamespace env, MonadIO m) => LByteString -> m [BucketInfo]
parseListBuckets xmldata = do
r <- parseRoot xmldata
ns <- asks getSvcNamespace
let
s3Elem' = s3Elem ns
names = r $// s3Elem' "Bucket" &// s3Elem' "Name" &/ content
timeStrings = r $// s3Elem' "Bucket" &// s3Elem' "CreationDate" &/ content
let s3Elem' = s3Elem ns
names = r $// s3Elem' "Bucket" &// s3Elem' "Name" &/ content
timeStrings = r $// s3Elem' "Bucket" &// s3Elem' "CreationDate" &/ content
times <- mapM parseS3XMLTime timeStrings
return $ zipWith BucketInfo names times
@ -116,41 +116,38 @@ parseCopyObjectResponse :: (MonadReader env m, HasSvcNamespace env, MonadIO m) =
parseCopyObjectResponse xmldata = do
r <- parseRoot xmldata
ns <- asks getSvcNamespace
let
s3Elem' = s3Elem ns
mtimeStr = T.concat $ r $// s3Elem' "LastModified" &/ content
let s3Elem' = s3Elem ns
mtimeStr = T.concat $ r $// s3Elem' "LastModified" &/ content
mtime <- parseS3XMLTime mtimeStr
return (T.concat $ r $// s3Elem' "ETag" &/ content, mtime)
-- | Parse the response XML of a list objects v1 call.
parseListObjectsV1Response :: (MonadReader env m, HasSvcNamespace env, MonadIO m)
=> LByteString -> m ListObjectsV1Result
parseListObjectsV1Response ::
(MonadReader env m, HasSvcNamespace env, MonadIO m) =>
LByteString ->
m ListObjectsV1Result
parseListObjectsV1Response xmldata = do
r <- parseRoot xmldata
ns <- asks getSvcNamespace
let
s3Elem' = s3Elem ns
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
nextMarker = headMay $ r $/ s3Elem' "NextMarker" &/ content
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
keys = r $/ s3Elem' "Contents" &/ s3Elem' "Key" &/ content
modTimeStr = r $/ s3Elem' "Contents" &/ s3Elem' "LastModified" &/ content
etagsList = r $/ s3Elem' "Contents" &/ s3Elem' "ETag" &/ content
-- if response xml contains empty etag response fill them with as
-- many empty Text for the zip4 below to work as intended.
etags = etagsList ++ repeat ""
sizeStr = r $/ s3Elem' "Contents" &/ s3Elem' "Size" &/ content
let s3Elem' = s3Elem ns
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
nextMarker = headMay $ r $/ s3Elem' "NextMarker" &/ content
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
keys = r $/ s3Elem' "Contents" &/ s3Elem' "Key" &/ content
modTimeStr = r $/ s3Elem' "Contents" &/ s3Elem' "LastModified" &/ content
etagsList = r $/ s3Elem' "Contents" &/ s3Elem' "ETag" &/ content
-- if response xml contains empty etag response fill them with as
-- many empty Text for the zip4 below to work as intended.
etags = etagsList ++ repeat ""
sizeStr = r $/ s3Elem' "Contents" &/ s3Elem' "Size" &/ content
modTimes <- mapM parseS3XMLTime modTimeStr
sizes <- parseDecimals sizeStr
let
objects = map (uncurry6 ObjectInfo) $
zip6 keys modTimes etags sizes (repeat H.empty) (repeat H.empty)
let objects =
map (uncurry6 ObjectInfo) $
zip6 keys modTimes etags sizes (repeat H.empty) (repeat H.empty)
return $ ListObjectsV1Result hasMore nextMarker objects prefixes
@ -159,28 +156,24 @@ parseListObjectsResponse :: (MonadReader env m, HasSvcNamespace env, MonadIO m)
parseListObjectsResponse xmldata = do
r <- parseRoot xmldata
ns <- asks getSvcNamespace
let
s3Elem' = s3Elem ns
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
nextToken = headMay $ r $/ s3Elem' "NextContinuationToken" &/ content
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
keys = r $/ s3Elem' "Contents" &/ s3Elem' "Key" &/ content
modTimeStr = r $/ s3Elem' "Contents" &/ s3Elem' "LastModified" &/ content
etagsList = r $/ s3Elem' "Contents" &/ s3Elem' "ETag" &/ content
-- if response xml contains empty etag response fill them with as
-- many empty Text for the zip4 below to work as intended.
etags = etagsList ++ repeat ""
sizeStr = r $/ s3Elem' "Contents" &/ s3Elem' "Size" &/ content
let s3Elem' = s3Elem ns
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
nextToken = headMay $ r $/ s3Elem' "NextContinuationToken" &/ content
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
keys = r $/ s3Elem' "Contents" &/ s3Elem' "Key" &/ content
modTimeStr = r $/ s3Elem' "Contents" &/ s3Elem' "LastModified" &/ content
etagsList = r $/ s3Elem' "Contents" &/ s3Elem' "ETag" &/ content
-- if response xml contains empty etag response fill them with as
-- many empty Text for the zip4 below to work as intended.
etags = etagsList ++ repeat ""
sizeStr = r $/ s3Elem' "Contents" &/ s3Elem' "Size" &/ content
modTimes <- mapM parseS3XMLTime modTimeStr
sizes <- parseDecimals sizeStr
let
objects = map (uncurry6 ObjectInfo) $
zip6 keys modTimes etags sizes (repeat H.empty) (repeat H.empty)
let objects =
map (uncurry6 ObjectInfo) $
zip6 keys modTimes etags sizes (repeat H.empty) (repeat H.empty)
return $ ListObjectsResult hasMore nextToken objects prefixes
@ -189,20 +182,18 @@ parseListUploadsResponse :: (MonadReader env m, HasSvcNamespace env, MonadIO m)
parseListUploadsResponse xmldata = do
r <- parseRoot xmldata
ns <- asks getSvcNamespace
let
s3Elem' = s3Elem ns
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
nextKey = headMay $ r $/ s3Elem' "NextKeyMarker" &/ content
nextUpload = headMay $ r $/ s3Elem' "NextUploadIdMarker" &/ content
uploadKeys = r $/ s3Elem' "Upload" &/ s3Elem' "Key" &/ content
uploadIds = r $/ s3Elem' "Upload" &/ s3Elem' "UploadId" &/ content
uploadInitTimeStr = r $/ s3Elem' "Upload" &/ s3Elem' "Initiated" &/ content
let s3Elem' = s3Elem ns
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
nextKey = headMay $ r $/ s3Elem' "NextKeyMarker" &/ content
nextUpload = headMay $ r $/ s3Elem' "NextUploadIdMarker" &/ content
uploadKeys = r $/ s3Elem' "Upload" &/ s3Elem' "Key" &/ content
uploadIds = r $/ s3Elem' "Upload" &/ s3Elem' "UploadId" &/ content
uploadInitTimeStr = r $/ s3Elem' "Upload" &/ s3Elem' "Initiated" &/ content
uploadInitTimes <- mapM parseS3XMLTime uploadInitTimeStr
let
uploads = zip3 uploadKeys uploadIds uploadInitTimes
let uploads = zip3 uploadKeys uploadIds uploadInitTimes
return $ ListUploadsResult hasMore nextKey nextUpload uploads prefixes
@ -210,27 +201,25 @@ parseListPartsResponse :: (MonadReader env m, HasSvcNamespace env, MonadIO m) =>
parseListPartsResponse xmldata = do
r <- parseRoot xmldata
ns <- asks getSvcNamespace
let
s3Elem' = s3Elem ns
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
nextPartNumStr = headMay $ r $/ s3Elem' "NextPartNumberMarker" &/ content
partNumberStr = r $/ s3Elem' "Part" &/ s3Elem' "PartNumber" &/ content
partModTimeStr = r $/ s3Elem' "Part" &/ s3Elem' "LastModified" &/ content
partETags = r $/ s3Elem' "Part" &/ s3Elem' "ETag" &/ content
partSizeStr = r $/ s3Elem' "Part" &/ s3Elem' "Size" &/ content
let s3Elem' = s3Elem ns
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
nextPartNumStr = headMay $ r $/ s3Elem' "NextPartNumberMarker" &/ content
partNumberStr = r $/ s3Elem' "Part" &/ s3Elem' "PartNumber" &/ content
partModTimeStr = r $/ s3Elem' "Part" &/ s3Elem' "LastModified" &/ content
partETags = r $/ s3Elem' "Part" &/ s3Elem' "ETag" &/ content
partSizeStr = r $/ s3Elem' "Part" &/ s3Elem' "Size" &/ content
partModTimes <- mapM parseS3XMLTime partModTimeStr
partSizes <- parseDecimals partSizeStr
partNumbers <- parseDecimals partNumberStr
nextPartNum <- parseDecimals $ maybeToList nextPartNumStr
let
partInfos = map (uncurry4 ObjectPartInfo) $
zip4 partNumbers partETags partSizes partModTimes
let partInfos =
map (uncurry4 ObjectPartInfo) $
zip4 partNumbers partETags partSizes partModTimes
return $ ListPartsResult hasMore (listToMaybe nextPartNum) partInfos
parseErrResponse :: (MonadIO m) => LByteString -> m ServiceErr
parseErrResponse xmldata = do
r <- parseRoot xmldata
@ -250,28 +239,31 @@ parseNotification xmldata = do
<*> (mapM (parseNode ns "Topic") tcfg)
<*> (mapM (parseNode ns "CloudFunction") lcfg)
where
getFilterRule ns c =
let name = T.concat $ c $/ s3Elem ns "Name" &/ content
value = T.concat $ c $/ s3Elem ns "Value" &/ content
in FilterRule name value
in FilterRule name value
parseNode ns arnName nodeData = do
let c = fromNode nodeData
id = T.concat $ c $/ s3Elem ns "Id" &/ content
arn = T.concat $ c $/ s3Elem ns arnName &/ content
events = catMaybes $ map textToEvent $ c $/ s3Elem ns "Event" &/ content
rules = c $/ s3Elem ns "Filter" &/ s3Elem ns "S3Key" &/
s3Elem ns "FilterRule" &| getFilterRule ns
return $ NotificationConfig id arn events
(Filter $ FilterKey $ FilterRules rules)
rules =
c $/ s3Elem ns "Filter" &/ s3Elem ns "S3Key"
&/ s3Elem ns "FilterRule" &| getFilterRule ns
return $
NotificationConfig
id
arn
events
(Filter $ FilterKey $ FilterRules rules)
parseSelectProgress :: MonadIO m => ByteString -> m Progress
parseSelectProgress xmldata = do
r <- parseRoot $ LB.fromStrict xmldata
let bScanned = T.concat $ r $/ element "BytesScanned" &/ content
bProcessed = T.concat $ r $/element "BytesProcessed" &/ content
bReturned = T.concat $ r $/element "BytesReturned" &/ content
Progress <$> parseDecimal bScanned
<*> parseDecimal bProcessed
<*> parseDecimal bReturned
r <- parseRoot $ LB.fromStrict xmldata
let bScanned = T.concat $ r $/ element "BytesScanned" &/ content
bProcessed = T.concat $ r $/ element "BytesProcessed" &/ content
bReturned = T.concat $ r $/ element "BytesReturned" &/ content
Progress <$> parseDecimal bScanned
<*> parseDecimal bProcessed
<*> parseDecimal bReturned

View File

@ -15,7 +15,7 @@
# resolver:
# name: custom-snapshot
# location: "./custom-snapshot.yaml"
resolver: lts-14.6
resolver: lts-16.0
# User packages to be built.
# Various formats can be used as shown in the example below.
@ -39,7 +39,9 @@ packages:
- '.'
# Dependency packages to be pulled from upstream that are not in the resolver
# (e.g., acme-missiles-0.3)
extra-deps: []
extra-deps:
- unliftio-core-0.2.0.1
- protolude-0.3.0
# Override default flag values for local packages and extra-deps
flags: {}

View File

@ -3,10 +3,24 @@
# For more information, please see the documentation at:
# https://docs.haskellstack.org/en/stable/lock_files
packages: []
packages:
- completed:
hackage: unliftio-core-0.2.0.1@sha256:9b3e44ea9aacacbfc35b3b54015af450091916ac3618a41868ebf6546977659a,1082
pantry-tree:
size: 328
sha256: e81c5a1e82ec2cd68cbbbec9cd60567363abe02257fa1370a906f6754b6818b8
original:
hackage: unliftio-core-0.2.0.1
- completed:
hackage: protolude-0.3.0@sha256:8361b811b420585b122a7ba715aa5923834db6e8c36309bf267df2dbf66b95ef,2693
pantry-tree:
size: 1644
sha256: babf32b414f25f790b7a4ce6bae5c960bc51a11a289e7c47335b222e6762560c
original:
hackage: protolude-0.3.0
snapshots:
- completed:
size: 524127
url: https://raw.githubusercontent.com/commercialhaskell/stackage-snapshots/master/lts/14/6.yaml
sha256: dc70dfb45e2c32f54719819bd055f46855dd4b3bd2e58b9f3f38729a2d553fbb
original: lts-14.6
size: 531237
url: https://raw.githubusercontent.com/commercialhaskell/stackage-snapshots/master/lts/16/0.yaml
sha256: 210e15b7043e2783115afe16b0d54914b1611cdaa73f3ca3ca7f8e0847ff54e5
original: lts-16.0

File diff suppressed because it is too large Load Diff

View File

@ -15,88 +15,100 @@
--
module Network.Minio.API.Test
( bucketNameValidityTests
, objectNameValidityTests
, parseServerInfoJSONTest
, parseHealStatusTest
, parseHealStartRespTest
) where
( bucketNameValidityTests,
objectNameValidityTests,
parseServerInfoJSONTest,
parseHealStatusTest,
parseHealStartRespTest,
)
where
import Data.Aeson (eitherDecode)
import Test.Tasty
import Test.Tasty.HUnit
import Lib.Prelude
import Network.Minio.AdminAPI
import Network.Minio.API
import Data.Aeson (eitherDecode)
import Lib.Prelude
import Network.Minio.API
import Network.Minio.AdminAPI
import Test.Tasty
import Test.Tasty.HUnit
assertBool' :: Bool -> Assertion
assertBool' = assertBool "Test failed!"
bucketNameValidityTests :: TestTree
bucketNameValidityTests = testGroup "Bucket Name Validity Tests"
[ testCase "Too short 1" $ assertBool' $ not $ isValidBucketName ""
, testCase "Too short 2" $ assertBool' $ not $ isValidBucketName "ab"
, testCase "Too long 1" $ assertBool' $ not $ isValidBucketName "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
, testCase "Has upper case" $ assertBool' $ not $ isValidBucketName "ABCD"
, testCase "Has punctuation" $ assertBool' $ not $ isValidBucketName "abc,2"
, testCase "Has hyphen at end" $ assertBool' $ not $ isValidBucketName "abc-"
, testCase "Has consecutive dot" $ assertBool' $ not $ isValidBucketName "abck..eedg"
, testCase "Looks like IP" $ assertBool' $ not $ isValidBucketName "10.0.0.1"
, testCase "Valid bucket name 1" $ assertBool' $ isValidBucketName "abcd.pqeq.rea"
, testCase "Valid bucket name 2" $ assertBool' $ isValidBucketName "abcdedgh1d"
, testCase "Valid bucket name 3" $ assertBool' $ isValidBucketName "abc-de-dg-h1d"
]
bucketNameValidityTests =
testGroup
"Bucket Name Validity Tests"
[ testCase "Too short 1" $ assertBool' $ not $ isValidBucketName "",
testCase "Too short 2" $ assertBool' $ not $ isValidBucketName "ab",
testCase "Too long 1" $ assertBool' $ not $ isValidBucketName "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
testCase "Has upper case" $ assertBool' $ not $ isValidBucketName "ABCD",
testCase "Has punctuation" $ assertBool' $ not $ isValidBucketName "abc,2",
testCase "Has hyphen at end" $ assertBool' $ not $ isValidBucketName "abc-",
testCase "Has consecutive dot" $ assertBool' $ not $ isValidBucketName "abck..eedg",
testCase "Looks like IP" $ assertBool' $ not $ isValidBucketName "10.0.0.1",
testCase "Valid bucket name 1" $ assertBool' $ isValidBucketName "abcd.pqeq.rea",
testCase "Valid bucket name 2" $ assertBool' $ isValidBucketName "abcdedgh1d",
testCase "Valid bucket name 3" $ assertBool' $ isValidBucketName "abc-de-dg-h1d"
]
objectNameValidityTests :: TestTree
objectNameValidityTests = testGroup "Object Name Validity Tests"
[ testCase "Empty name" $ assertBool' $ not $ isValidObjectName ""
, testCase "Has unicode characters" $ assertBool' $ isValidObjectName "日本国"
]
objectNameValidityTests =
testGroup
"Object Name Validity Tests"
[ testCase "Empty name" $ assertBool' $ not $ isValidObjectName "",
testCase "Has unicode characters" $ assertBool' $ isValidObjectName "日本国"
]
parseServerInfoJSONTest :: TestTree
parseServerInfoJSONTest = testGroup "Parse MinIO Admin API ServerInfo JSON test" $
map (\(tName, tDesc, tfn, tVal) -> testCase tName $ assertBool tDesc $
tfn (eitherDecode tVal :: Either [Char] [ServerInfo])) testCases
parseServerInfoJSONTest =
testGroup "Parse MinIO Admin API ServerInfo JSON test" $
map
( \(tName, tDesc, tfn, tVal) ->
testCase tName $ assertBool tDesc $
tfn (eitherDecode tVal :: Either [Char] [ServerInfo])
)
testCases
where
testCases = [ ("FSBackend", "Verify server info json parsing for FS backend", isRight, fsJSON)
, ("Erasure Backend", "Verify server info json parsing for Erasure backend", isRight, erasureJSON)
, ("Unknown Backend", "Verify server info json parsing for invalid backend", isLeft, invalidJSON)
]
testCases =
[ ("FSBackend", "Verify server info json parsing for FS backend", isRight, fsJSON),
("Erasure Backend", "Verify server info json parsing for Erasure backend", isRight, erasureJSON),
("Unknown Backend", "Verify server info json parsing for invalid backend", isLeft, invalidJSON)
]
fsJSON = "[{\"error\":\"\",\"addr\":\"192.168.1.218:9000\",\"data\":{\"storage\":{\"Used\":20530,\"Backend\":{\"Type\":1,\"OnlineDisks\":0,\"OfflineDisks\":0,\"StandardSCData\":0,\"StandardSCParity\":0,\"RRSCData\":0,\"RRSCParity\":0,\"Sets\":null}},\"network\":{\"transferred\":808,\"received\":1160},\"http\":{\"totalHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalGETs\":{\"count\":1,\"avgDuration\":\"0s\"},\"successGETs\":{\"count\":1,\"avgDuration\":\"0s\"},\"totalPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"}},\"server\":{\"uptime\":5992503019270,\"version\":\"DEVELOPMENT.GOGET\",\"commitID\":\"DEVELOPMENT.GOGET\",\"region\":\"\",\"sqsARN\":[]}}}]"
erasureJSON = "[{\"error\":\"\",\"addr\":\"192.168.1.218:9000\",\"data\":{\"storage\":{\"Used\":83084,\"Backend\":{\"Type\":2,\"OnlineDisks\":4,\"OfflineDisks\":0,\"StandardSCData\":2,\"StandardSCParity\":2,\"RRSCData\":2,\"RRSCParity\":2,\"Sets\":[[{\"uuid\":\"16ec6f2c-9197-4787-904a-36bb2c2683f8\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"4052e086-ef99-4aa5-ae2b-8e27559432f6\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"d0639950-ddd3-45b0-93ca-fd86f5d79f72\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"30ec68c0-37e1-4592-82c1-26b143c0ac10\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]]}},\"network\":{\"transferred\":404,\"received\":0},\"http\":{\"totalHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"}},\"server\":{\"uptime\":2738903073,\"version\":\"DEVELOPMENT.GOGET\",\"commitID\":\"DEVELOPMENT.GOGET\",\"region\":\"\",\"sqsARN\":[]}}}]"
invalidJSON = "[{\"error\":\"\",\"addr\":\"192.168.1.218:9000\",\"data\":{\"storage\":{\"Used\":83084,\"Backend\":{\"Type\":42,\"OnlineDisks\":4,\"OfflineDisks\":0,\"StandardSCData\":2,\"StandardSCParity\":2,\"RRSCData\":2,\"RRSCParity\":2,\"Sets\":[[{\"uuid\":\"16ec6f2c-9197-4787-904a-36bb2c2683f8\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"4052e086-ef99-4aa5-ae2b-8e27559432f6\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"d0639950-ddd3-45b0-93ca-fd86f5d79f72\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"30ec68c0-37e1-4592-82c1-26b143c0ac10\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]]}},\"network\":{\"transferred\":404,\"received\":0},\"http\":{\"totalHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"}},\"server\":{\"uptime\":2738903073,\"version\":\"DEVELOPMENT.GOGET\",\"commitID\":\"DEVELOPMENT.GOGET\",\"region\":\"\",\"sqsARN\":[]}}}]"
parseHealStatusTest :: TestTree
parseHealStatusTest = testGroup "Parse MinIO Admin API HealStatus JSON test" $
map (\(tName, tDesc, tfn, tVal) -> testCase tName $ assertBool tDesc $
tfn (eitherDecode tVal :: Either [Char] HealStatus)) testCases
parseHealStatusTest =
testGroup "Parse MinIO Admin API HealStatus JSON test" $
map
( \(tName, tDesc, tfn, tVal) ->
testCase tName $ assertBool tDesc $
tfn (eitherDecode tVal :: Either [Char] HealStatus)
)
testCases
where
testCases = [ ("Good", "Verify heal result item for erasure backend", isRight, erasureJSON')
, ("Corrupted", "Verify heal result item for erasure backend", isLeft, invalidJSON')
, ("Incorrect Value", "Verify heal result item for erasure backend", isLeft, invalidItemType)
]
testCases =
[ ("Good", "Verify heal result item for erasure backend", isRight, erasureJSON'),
("Corrupted", "Verify heal result item for erasure backend", isLeft, invalidJSON'),
("Incorrect Value", "Verify heal result item for erasure backend", isLeft, invalidItemType)
]
erasureJSON' = "{\"Summary\":\"finished\",\"StartTime\":\"2018-06-05T08:09:47.644465513Z\",\"NumDisks\":4,\"Settings\":{\"recursive\":false,\"dryRun\":false},\"Items\":[{\"resultId\":1,\"type\":\"metadata\",\"bucket\":\"\",\"object\":\"\",\"detail\":\"disk-format\",\"diskCount\":4,\"setCount\":1,\"before\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"after\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"objectSize\":0}]}"
invalidJSON' = "{\"Summary\":\"finished\",\"StartTime\":\"2018-06-05T08:09:47.644465513Z\",\"NumDisks\":4,\"Settings\":{\"recursive\":false,\"dryRun\":false},\"Items\":[{\"resultId\":1,\"type\":\"metadata\",\"bucket\":\"\",\"object\":\"\",\"detail\":\"disk-format\",\"diskCount\":4,\"setCount\":1,\"before\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"after\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"objectSize\":0}]"
invalidItemType = "{\"Summary\":\"finished\",\"StartTime\":\"2018-06-05T08:09:47.644465513Z\",\"NumDisks\":4,\"Settings\":{\"recursive\":false,\"dryRun\":false},\"Items\":[{\"resultId\":1,\"type\":\"hello\",\"bucket\":\"\",\"object\":\"\",\"detail\":\"disk-format\",\"diskCount\":4,\"setCount\":1,\"before\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"after\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"objectSize\":0}]}"
parseHealStartRespTest :: TestTree
parseHealStartRespTest = testGroup "Parse MinIO Admin API HealStartResp JSON test" $
map (\(tName, tDesc, tfn, tVal) -> testCase tName $ assertBool tDesc $
tfn (eitherDecode tVal :: Either [Char] HealStartResp)) testCases
parseHealStartRespTest =
testGroup "Parse MinIO Admin API HealStartResp JSON test" $
map
( \(tName, tDesc, tfn, tVal) ->
testCase tName $ assertBool tDesc $
tfn (eitherDecode tVal :: Either [Char] HealStartResp)
)
testCases
where
testCases = [ ("Good", "Verify heal start response for erasure backend", isRight, hsrJSON)
, ("Missing Token", "Verify heal start response for erasure backend", isLeft, missingTokenJSON)
]
testCases =
[ ("Good", "Verify heal start response for erasure backend", isRight, hsrJSON),
("Missing Token", "Verify heal start response for erasure backend", isLeft, missingTokenJSON)
]
hsrJSON = "{\"clientToken\":\"3a3aca49-77dd-4b78-bba7-0978f119b23e\",\"clientAddress\":\"127.0.0.1\",\"startTime\":\"2018-06-05T08:09:47.644394493Z\"}"
missingTokenJSON = "{\"clientAddress\":\"127.0.0.1\",\"startTime\":\"2018-06-05T08:09:47.644394493Z\"}"

View File

@ -15,23 +15,23 @@
--
module Network.Minio.JsonParser.Test
(
jsonParserTests
) where
( jsonParserTests,
)
where
import Test.Tasty
import Test.Tasty.HUnit
import UnliftIO (MonadUnliftIO)
import Lib.Prelude
import Network.Minio.Errors
import Network.Minio.JsonParser
import Lib.Prelude
import Network.Minio.Errors
import Network.Minio.JsonParser
import Test.Tasty
import Test.Tasty.HUnit
import UnliftIO (MonadUnliftIO)
jsonParserTests :: TestTree
jsonParserTests = testGroup "JSON Parser Tests"
[ testCase "Test parseErrResponseJSON" testParseErrResponseJSON
]
jsonParserTests =
testGroup
"JSON Parser Tests"
[ testCase "Test parseErrResponseJSON" testParseErrResponseJSON
]
tryValidationErr :: (MonadUnliftIO m) => m a -> m (Either MErrV a)
tryValidationErr act = try act
@ -43,22 +43,21 @@ testParseErrResponseJSON :: Assertion
testParseErrResponseJSON = do
-- 1. Test parsing of an invalid error json.
parseResE <- tryValidationErr $ parseErrResponseJSON "ClearlyInvalidJSON"
when (isRight parseResE) $
assertFailure $ "Parsing should have failed => " ++ show parseResE
when (isRight parseResE)
$ assertFailure
$ "Parsing should have failed => " ++ show parseResE
forM_ cases $ \(jsondata, sErr) -> do
parseErr <- tryValidationErr $ parseErrResponseJSON jsondata
either assertValidationErr (@?= sErr) parseErr
where
cases = [
-- 2. Test parsing of a valid error json.
("{\"Code\":\"InvalidAccessKeyId\",\"Message\":\"The access key ID you provided does not exist in our records.\",\"Key\":\"\",\"BucketName\":\"\",\"Resource\":\"/minio/admin/v1/info\",\"RequestId\":\"3L137\",\"HostId\":\"3L137\"}",
ServiceErr "InvalidAccessKeyId" "The access key ID you provided does not exist in our records."
)
,
-- 3. Test parsing of a valid, empty Resource.
("{\"Code\":\"SignatureDoesNotMatch\",\"Message\":\"The request signature we calculated does not match the signature you provided. Check your key and signing method.\",\"Key\":\"\",\"BucketName\":\"\",\"Resource\":\"/minio/admin/v1/info\",\"RequestId\":\"3L137\",\"HostId\":\"3L137\"}",
ServiceErr "SignatureDoesNotMatch" "The request signature we calculated does not match the signature you provided. Check your key and signing method."
)
cases =
[ -- 2. Test parsing of a valid error json.
( "{\"Code\":\"InvalidAccessKeyId\",\"Message\":\"The access key ID you provided does not exist in our records.\",\"Key\":\"\",\"BucketName\":\"\",\"Resource\":\"/minio/admin/v1/info\",\"RequestId\":\"3L137\",\"HostId\":\"3L137\"}",
ServiceErr "InvalidAccessKeyId" "The access key ID you provided does not exist in our records."
),
-- 3. Test parsing of a valid, empty Resource.
( "{\"Code\":\"SignatureDoesNotMatch\",\"Message\":\"The request signature we calculated does not match the signature you provided. Check your key and signing method.\",\"Key\":\"\",\"BucketName\":\"\",\"Resource\":\"/minio/admin/v1/info\",\"RequestId\":\"3L137\",\"HostId\":\"3L137\"}",
ServiceErr "SignatureDoesNotMatch" "The request signature we calculated does not match the signature you provided. Check your key and signing method."
)
]

View File

@ -15,18 +15,19 @@
--
module Network.Minio.TestHelpers
( runTestNS
) where
( runTestNS,
)
where
import Network.Minio.Data
import Lib.Prelude
import Network.Minio.Data
import Lib.Prelude
newtype TestNS = TestNS { testNamespace :: Text }
newtype TestNS = TestNS {testNamespace :: Text}
instance HasSvcNamespace TestNS where
getSvcNamespace = testNamespace
runTestNS :: ReaderT TestNS m a -> m a
runTestNS = flip runReaderT $
TestNS "http://s3.amazonaws.com/doc/2006-03-01/"
runTestNS =
flip runReaderT $
TestNS "http://s3.amazonaws.com/doc/2006-03-01/"

View File

@ -15,33 +15,32 @@
--
module Network.Minio.Utils.Test
(
limitedMapConcurrentlyTests
) where
( limitedMapConcurrentlyTests,
)
where
import Test.Tasty
import Test.Tasty.HUnit
import Lib.Prelude
import Network.Minio.Utils
import Lib.Prelude
import Network.Minio.Utils
import Test.Tasty
import Test.Tasty.HUnit
limitedMapConcurrentlyTests :: TestTree
limitedMapConcurrentlyTests = testGroup "limitedMapConcurrently Tests"
[ testCase "Test with various thread counts" testLMC
]
limitedMapConcurrentlyTests =
testGroup
"limitedMapConcurrently Tests"
[ testCase "Test with various thread counts" testLMC
]
testLMC :: Assertion
testLMC = do
let maxNum = 50
-- test with thread count of 1 to 2*maxNum
forM_ [1..(2*maxNum)] $ \threads -> do
res <- limitedMapConcurrently threads compute [1..maxNum]
forM_ [1 .. (2 * maxNum)] $ \threads -> do
res <- limitedMapConcurrently threads compute [1 .. maxNum]
sum res @?= overallResultCheck maxNum
where
-- simple function to run in each thread
compute :: Int -> IO Int
compute n = return $ sum [1..n]
compute n = return $ sum [1 .. n]
-- function to check overall result
overallResultCheck n = sum $ map (\t -> (t * (t+1)) `div` 2) [1..n]
overallResultCheck n = sum $ map (\t -> (t * (t + 1)) `div` 2) [1 .. n]

View File

@ -13,30 +13,31 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE QuasiQuotes #-}
module Network.Minio.XmlGenerator.Test
( xmlGeneratorTests
) where
( xmlGeneratorTests,
)
where
import Test.Tasty
import Test.Tasty.HUnit
import Text.RawString.QQ (r)
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.TestHelpers
import Network.Minio.XmlGenerator
import Network.Minio.XmlParser (parseNotification)
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.TestHelpers
import Network.Minio.XmlGenerator
import Network.Minio.XmlParser (parseNotification)
import Test.Tasty
import Test.Tasty.HUnit
import Text.RawString.QQ (r)
xmlGeneratorTests :: TestTree
xmlGeneratorTests = testGroup "XML Generator Tests"
[ testCase "Test mkCreateBucketConfig" testMkCreateBucketConfig
, testCase "Test mkCompleteMultipartUploadRequest" testMkCompleteMultipartUploadRequest
, testCase "Test mkPutNotificationRequest" testMkPutNotificationRequest
, testCase "Test mkSelectRequest" testMkSelectRequest
]
xmlGeneratorTests =
testGroup
"XML Generator Tests"
[ testCase "Test mkCreateBucketConfig" testMkCreateBucketConfig,
testCase "Test mkCompleteMultipartUploadRequest" testMkCompleteMultipartUploadRequest,
testCase "Test mkPutNotificationRequest" testMkPutNotificationRequest,
testCase "Test mkSelectRequest" testMkSelectRequest
]
testMkCreateBucketConfig :: Assertion
testMkCreateBucketConfig = do
@ -44,100 +45,129 @@ testMkCreateBucketConfig = do
assertEqual "CreateBucketConfiguration xml should match: " expected $
mkCreateBucketConfig ns "EU"
where
expected = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CreateBucketConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<LocationConstraint>EU</LocationConstraint>\
\</CreateBucketConfiguration>"
expected =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CreateBucketConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<LocationConstraint>EU</LocationConstraint>\
\</CreateBucketConfiguration>"
testMkCompleteMultipartUploadRequest :: Assertion
testMkCompleteMultipartUploadRequest =
assertEqual "completeMultipartUpload xml should match: " expected $
mkCompleteMultipartUploadRequest [(1, "abc")]
mkCompleteMultipartUploadRequest [(1, "abc")]
where
expected = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CompleteMultipartUpload>\
\<Part>\
\<PartNumber>1</PartNumber><ETag>abc</ETag>\
\</Part>\
\</CompleteMultipartUpload>"
expected =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CompleteMultipartUpload>\
\<Part>\
\<PartNumber>1</PartNumber><ETag>abc</ETag>\
\</Part>\
\</CompleteMultipartUpload>"
testMkPutNotificationRequest :: Assertion
testMkPutNotificationRequest =
forM_ cases $ \val -> do
let ns = "http://s3.amazonaws.com/doc/2006-03-01/"
result = toS $ mkPutNotificationRequest ns val
result = fromStrictBS $ mkPutNotificationRequest ns val
ntf <- runExceptT $ runTestNS $ parseNotification result
either (\_ -> assertFailure "XML Parse Error!")
(@?= val) ntf
either
(\_ -> assertFailure "XML Parse Error!")
(@?= val)
ntf
where
cases = [ Notification []
[ NotificationConfig
"YjVkM2Y0YmUtNGI3NC00ZjQyLWEwNGItNDIyYWUxY2I0N2M4"
"arn:aws:sns:us-east-1:account-id:s3notificationtopic2"
[ReducedRedundancyLostObject, ObjectCreated] defaultFilter
]
[]
, Notification
[ NotificationConfig
"1" "arn:aws:sqs:us-west-2:444455556666:s3notificationqueue"
[ObjectCreatedPut]
(Filter $ FilterKey $ FilterRules
[ FilterRule "prefix" "images/"
, FilterRule "suffix" ".jpg"])
, NotificationConfig
"" "arn:aws:sqs:us-east-1:356671443308:s3notificationqueue"
[ObjectCreated] defaultFilter
]
[ NotificationConfig
"" "arn:aws:sns:us-east-1:356671443308:s3notificationtopic2"
[ReducedRedundancyLostObject] defaultFilter
]
[ NotificationConfig
"ObjectCreatedEvents" "arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail"
[ObjectCreated] defaultFilter
]
]
cases =
[ Notification
[]
[ NotificationConfig
"YjVkM2Y0YmUtNGI3NC00ZjQyLWEwNGItNDIyYWUxY2I0N2M4"
"arn:aws:sns:us-east-1:account-id:s3notificationtopic2"
[ReducedRedundancyLostObject, ObjectCreated]
defaultFilter
]
[],
Notification
[ NotificationConfig
"1"
"arn:aws:sqs:us-west-2:444455556666:s3notificationqueue"
[ObjectCreatedPut]
( Filter $ FilterKey $
FilterRules
[ FilterRule "prefix" "images/",
FilterRule "suffix" ".jpg"
]
),
NotificationConfig
""
"arn:aws:sqs:us-east-1:356671443308:s3notificationqueue"
[ObjectCreated]
defaultFilter
]
[ NotificationConfig
""
"arn:aws:sns:us-east-1:356671443308:s3notificationtopic2"
[ReducedRedundancyLostObject]
defaultFilter
]
[ NotificationConfig
"ObjectCreatedEvents"
"arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail"
[ObjectCreated]
defaultFilter
]
]
testMkSelectRequest :: Assertion
testMkSelectRequest = mapM_ assertFn cases
where
assertFn (a, b) = assertEqual "selectRequest XML should match: " b $ mkSelectRequest a
cases = [ ( SelectRequest "Select * from S3Object" SQL
(InputSerialization (Just CompressionTypeGzip)
(InputFormatCSV $ fileHeaderInfo FileHeaderIgnore
<> recordDelimiter "\n"
<> fieldDelimiter ","
<> quoteCharacter "\""
<> quoteEscapeCharacter "\""
))
(OutputSerializationCSV $ quoteFields QuoteFieldsAsNeeded
<> recordDelimiter "\n"
<> fieldDelimiter ","
<> quoteCharacter "\""
<> quoteEscapeCharacter "\""
cases =
[ ( SelectRequest
"Select * from S3Object"
SQL
( InputSerialization
(Just CompressionTypeGzip)
( InputFormatCSV $
fileHeaderInfo FileHeaderIgnore
<> recordDelimiter "\n"
<> fieldDelimiter ","
<> quoteCharacter "\""
<> quoteEscapeCharacter "\""
)
(Just False)
, [r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>GZIP</CompressionType><CSV><QuoteCharacter>&#34;</QuoteCharacter><RecordDelimiter>
)
( OutputSerializationCSV $
quoteFields QuoteFieldsAsNeeded
<> recordDelimiter "\n"
<> fieldDelimiter ","
<> quoteCharacter "\""
<> quoteEscapeCharacter "\""
)
(Just False),
[r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>GZIP</CompressionType><CSV><QuoteCharacter>&#34;</QuoteCharacter><RecordDelimiter>
</RecordDelimiter><FileHeaderInfo>IGNORE</FileHeaderInfo><QuoteEscapeCharacter>&#34;</QuoteEscapeCharacter><FieldDelimiter>,</FieldDelimiter></CSV></InputSerialization><OutputSerialization><CSV><QuoteCharacter>&#34;</QuoteCharacter><QuoteFields>ASNEEDED</QuoteFields><RecordDelimiter>
</RecordDelimiter><QuoteEscapeCharacter>&#34;</QuoteEscapeCharacter><FieldDelimiter>,</FieldDelimiter></CSV></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
)
, ( setRequestProgressEnabled False $
setInputCompressionType CompressionTypeGzip $
selectRequest "Select * from S3Object" documentJsonInput
(outputJSONFromRecordDelimiter "\n")
, [r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>GZIP</CompressionType><JSON><Type>DOCUMENT</Type></JSON></InputSerialization><OutputSerialization><JSON><RecordDelimiter>
),
( setRequestProgressEnabled False
$ setInputCompressionType CompressionTypeGzip
$ selectRequest
"Select * from S3Object"
documentJsonInput
(outputJSONFromRecordDelimiter "\n"),
[r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>GZIP</CompressionType><JSON><Type>DOCUMENT</Type></JSON></InputSerialization><OutputSerialization><JSON><RecordDelimiter>
</RecordDelimiter></JSON></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
)
, ( setRequestProgressEnabled False $
setInputCompressionType CompressionTypeNone $
selectRequest "Select * from S3Object" defaultParquetInput
(outputCSVFromProps $ quoteFields QuoteFieldsAsNeeded
<> recordDelimiter "\n"
<> fieldDelimiter ","
<> quoteCharacter "\""
<> quoteEscapeCharacter "\"")
, [r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>NONE</CompressionType><Parquet/></InputSerialization><OutputSerialization><CSV><QuoteCharacter>&#34;</QuoteCharacter><QuoteFields>ASNEEDED</QuoteFields><RecordDelimiter>
),
( setRequestProgressEnabled False
$ setInputCompressionType CompressionTypeNone
$ selectRequest
"Select * from S3Object"
defaultParquetInput
( outputCSVFromProps $
quoteFields QuoteFieldsAsNeeded
<> recordDelimiter "\n"
<> fieldDelimiter ","
<> quoteCharacter "\""
<> quoteEscapeCharacter "\""
),
[r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>NONE</CompressionType><Parquet/></InputSerialization><OutputSerialization><CSV><QuoteCharacter>&#34;</QuoteCharacter><QuoteFields>ASNEEDED</QuoteFields><RecordDelimiter>
</RecordDelimiter><QuoteEscapeCharacter>&#34;</QuoteEscapeCharacter><FieldDelimiter>,</FieldDelimiter></CSV></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
)
]
)
]

View File

@ -13,39 +13,40 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE QuasiQuotes #-}
module Network.Minio.XmlParser.Test
( xmlParserTests
) where
( xmlParserTests,
)
where
import qualified Data.HashMap.Strict as H
import Data.Time (fromGregorian)
import Test.Tasty
import Test.Tasty.HUnit
import Text.RawString.QQ (r)
import UnliftIO (MonadUnliftIO)
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.TestHelpers
import Network.Minio.XmlParser
import qualified Data.HashMap.Strict as H
import Data.Time (fromGregorian)
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.TestHelpers
import Network.Minio.XmlParser
import Test.Tasty
import Test.Tasty.HUnit
import Text.RawString.QQ (r)
import UnliftIO (MonadUnliftIO)
xmlParserTests :: TestTree
xmlParserTests = testGroup "XML Parser Tests"
[ testCase "Test parseLocation" testParseLocation
, testCase "Test parseNewMultipartUpload" testParseNewMultipartUpload
, testCase "Test parseListObjectsResponse" testParseListObjectsResult
, testCase "Test parseListObjectsV1Response" testParseListObjectsV1Result
, testCase "Test parseListUploadsresponse" testParseListIncompleteUploads
, testCase "Test parseCompleteMultipartUploadResponse" testParseCompleteMultipartUploadResponse
, testCase "Test parseListPartsResponse" testParseListPartsResponse
, testCase "Test parseCopyObjectResponse" testParseCopyObjectResponse
, testCase "Test parseNotification" testParseNotification
, testCase "Test parseSelectProgress" testParseSelectProgress
]
xmlParserTests =
testGroup
"XML Parser Tests"
[ testCase "Test parseLocation" testParseLocation,
testCase "Test parseNewMultipartUpload" testParseNewMultipartUpload,
testCase "Test parseListObjectsResponse" testParseListObjectsResult,
testCase "Test parseListObjectsV1Response" testParseListObjectsV1Result,
testCase "Test parseListUploadsresponse" testParseListIncompleteUploads,
testCase "Test parseCompleteMultipartUploadResponse" testParseCompleteMultipartUploadResponse,
testCase "Test parseListPartsResponse" testParseListPartsResponse,
testCase "Test parseCopyObjectResponse" testParseCopyObjectResponse,
testCase "Test parseNotification" testParseNotification,
testCase "Test parseSelectProgress" testParseSelectProgress
]
tryValidationErr :: (MonadUnliftIO m) => m a -> m (Either MErrV a)
tryValidationErr act = try act
@ -54,232 +55,232 @@ assertValidtionErr :: MErrV -> Assertion
assertValidtionErr e = assertFailure $ "Failed due to validation error => " ++ show e
eitherValidationErr :: Either MErrV a -> (a -> Assertion) -> Assertion
eitherValidationErr (Left e) _ = assertValidtionErr e
eitherValidationErr (Left e) _ = assertValidtionErr e
eitherValidationErr (Right a) f = f a
testParseLocation :: Assertion
testParseLocation = do
-- 1. Test parsing of an invalid location constraint xml.
parseResE <- tryValidationErr $ parseLocation "ClearlyInvalidXml"
when (isRight parseResE) $
assertFailure $ "Parsing should have failed => " ++ show parseResE
when (isRight parseResE)
$ assertFailure
$ "Parsing should have failed => " ++ show parseResE
forM_ cases $ \(xmldata, expectedLocation) -> do
parseLocE <- tryValidationErr $ parseLocation xmldata
either assertValidtionErr (@?= expectedLocation) parseLocE
where
cases = [
-- 2. Test parsing of a valid location xml.
("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<LocationConstraint xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">EU</LocationConstraint>",
"EU"
)
,
-- 3. Test parsing of a valid, empty location xml.
("<LocationConstraint xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"/>",
"us-east-1"
)
cases =
[ -- 2. Test parsing of a valid location xml.
( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<LocationConstraint xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">EU</LocationConstraint>",
"EU"
),
-- 3. Test parsing of a valid, empty location xml.
( "<LocationConstraint xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"/>",
"us-east-1"
)
]
testParseNewMultipartUpload :: Assertion
testParseNewMultipartUpload = do
forM_ cases $ \(xmldata, expectedUploadId) -> do
parsedUploadIdE <- tryValidationErr $ runTestNS $ parseNewMultipartUpload xmldata
eitherValidationErr parsedUploadIdE (@?= expectedUploadId)
where
cases = [
("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<InitiateMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\ <Bucket>example-bucket</Bucket>\
\ <Key>example-object</Key>\
\ <UploadId>VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA</UploadId>\
\</InitiateMultipartUploadResult>",
"VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA"
),
("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<InitiateMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\ <Bucket>example-bucket</Bucket>\
\ <Key>example-object</Key>\
\ <UploadId>EXAMPLEJZ6e0YupT2h66iePQCc9IEbYbDUy4RTpMeoSMLPRp8Z5o1u8feSRonpvnWsKKG35tI2LB9VDPiCgTy.Gq2VxQLYjrue4Nq.NBdqI-</UploadId>\
\</InitiateMultipartUploadResult>",
"EXAMPLEJZ6e0YupT2h66iePQCc9IEbYbDUy4RTpMeoSMLPRp8Z5o1u8feSRonpvnWsKKG35tI2LB9VDPiCgTy.Gq2VxQLYjrue4Nq.NBdqI-"
)
cases =
[ ( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<InitiateMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\ <Bucket>example-bucket</Bucket>\
\ <Key>example-object</Key>\
\ <UploadId>VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA</UploadId>\
\</InitiateMultipartUploadResult>",
"VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA"
),
( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<InitiateMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\ <Bucket>example-bucket</Bucket>\
\ <Key>example-object</Key>\
\ <UploadId>EXAMPLEJZ6e0YupT2h66iePQCc9IEbYbDUy4RTpMeoSMLPRp8Z5o1u8feSRonpvnWsKKG35tI2LB9VDPiCgTy.Gq2VxQLYjrue4Nq.NBdqI-</UploadId>\
\</InitiateMultipartUploadResult>",
"EXAMPLEJZ6e0YupT2h66iePQCc9IEbYbDUy4RTpMeoSMLPRp8Z5o1u8feSRonpvnWsKKG35tI2LB9VDPiCgTy.Gq2VxQLYjrue4Nq.NBdqI-"
)
]
testParseListObjectsResult :: Assertion
testParseListObjectsResult = do
let
xmldata = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<ListBucketResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Name>bucket</Name>\
\<Prefix/>\
\<NextContinuationToken>opaque</NextContinuationToken>\
\<KeyCount>1000</KeyCount>\
\<MaxKeys>1000</MaxKeys>\
\<IsTruncated>true</IsTruncated>\
\<Contents>\
\<Key>my-image.jpg</Key>\
\<LastModified>2009-10-12T17:50:30.000Z</LastModified>\
\<ETag>&quot;fba9dede5f27731c9771645a39863328&quot;</ETag>\
\<Size>434234</Size>\
\<StorageClass>STANDARD</StorageClass>\
\</Contents>\
\</ListBucketResult>"
expectedListResult = ListObjectsResult True (Just "opaque") [object1] []
object1 = ObjectInfo "my-image.jpg" modifiedTime1 "\"fba9dede5f27731c9771645a39863328\"" 434234 H.empty H.empty
modifiedTime1 = flip UTCTime 64230 $ fromGregorian 2009 10 12
let xmldata =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<ListBucketResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Name>bucket</Name>\
\<Prefix/>\
\<NextContinuationToken>opaque</NextContinuationToken>\
\<KeyCount>1000</KeyCount>\
\<MaxKeys>1000</MaxKeys>\
\<IsTruncated>true</IsTruncated>\
\<Contents>\
\<Key>my-image.jpg</Key>\
\<LastModified>2009-10-12T17:50:30.000Z</LastModified>\
\<ETag>&quot;fba9dede5f27731c9771645a39863328&quot;</ETag>\
\<Size>434234</Size>\
\<StorageClass>STANDARD</StorageClass>\
\</Contents>\
\</ListBucketResult>"
expectedListResult = ListObjectsResult True (Just "opaque") [object1] []
object1 = ObjectInfo "my-image.jpg" modifiedTime1 "\"fba9dede5f27731c9771645a39863328\"" 434234 H.empty H.empty
modifiedTime1 = flip UTCTime 64230 $ fromGregorian 2009 10 12
parsedListObjectsResult <- tryValidationErr $ runTestNS $ parseListObjectsResponse xmldata
eitherValidationErr parsedListObjectsResult (@?= expectedListResult)
testParseListObjectsV1Result :: Assertion
testParseListObjectsV1Result = do
let
xmldata = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<ListBucketResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Name>bucket</Name>\
\<Prefix/>\
\<NextMarker>my-image1.jpg</NextMarker>\
\<KeyCount>1000</KeyCount>\
\<MaxKeys>1000</MaxKeys>\
\<IsTruncated>true</IsTruncated>\
\<Contents>\
\<Key>my-image.jpg</Key>\
\<LastModified>2009-10-12T17:50:30.000Z</LastModified>\
\<ETag>&quot;fba9dede5f27731c9771645a39863328&quot;</ETag>\
\<Size>434234</Size>\
\<StorageClass>STANDARD</StorageClass>\
\</Contents>\
\</ListBucketResult>"
expectedListResult = ListObjectsV1Result True (Just "my-image1.jpg") [object1] []
object1 = ObjectInfo "my-image.jpg" modifiedTime1 "\"fba9dede5f27731c9771645a39863328\"" 434234 H.empty H.empty
modifiedTime1 = flip UTCTime 64230 $ fromGregorian 2009 10 12
let xmldata =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<ListBucketResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Name>bucket</Name>\
\<Prefix/>\
\<NextMarker>my-image1.jpg</NextMarker>\
\<KeyCount>1000</KeyCount>\
\<MaxKeys>1000</MaxKeys>\
\<IsTruncated>true</IsTruncated>\
\<Contents>\
\<Key>my-image.jpg</Key>\
\<LastModified>2009-10-12T17:50:30.000Z</LastModified>\
\<ETag>&quot;fba9dede5f27731c9771645a39863328&quot;</ETag>\
\<Size>434234</Size>\
\<StorageClass>STANDARD</StorageClass>\
\</Contents>\
\</ListBucketResult>"
expectedListResult = ListObjectsV1Result True (Just "my-image1.jpg") [object1] []
object1 = ObjectInfo "my-image.jpg" modifiedTime1 "\"fba9dede5f27731c9771645a39863328\"" 434234 H.empty H.empty
modifiedTime1 = flip UTCTime 64230 $ fromGregorian 2009 10 12
parsedListObjectsV1Result <- tryValidationErr $ runTestNS $ parseListObjectsV1Response xmldata
eitherValidationErr parsedListObjectsV1Result (@?= expectedListResult)
testParseListIncompleteUploads :: Assertion
testParseListIncompleteUploads = do
let
xmldata = "<ListMultipartUploadsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Bucket>example-bucket</Bucket>\
\<KeyMarker/>\
\<UploadIdMarker/>\
\<NextKeyMarker>sample.jpg</NextKeyMarker>\
\<NextUploadIdMarker>Xgw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1W99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--</NextUploadIdMarker>\
\<Delimiter>/</Delimiter>\
\<Prefix/>\
\<MaxUploads>1000</MaxUploads>\
\<IsTruncated>false</IsTruncated>\
\<Upload>\
\<Key>sample.jpg</Key>\
\<UploadId>Agw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1N99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--</UploadId>\
\<Initiator>\
\<ID>314133b66967d86f031c7249d1d9a80249109428335cd0ef1cdc487b4566cb1b</ID>\
\<DisplayName>s3-nickname</DisplayName>\
\</Initiator>\
\<Owner>\
\<ID>314133b66967d86f031c7249d1d9a80249109428335cd0ef1cdc487b4566cb1b</ID>\
\<DisplayName>s3-nickname</DisplayName>\
\</Owner>\
\<StorageClass>STANDARD</StorageClass>\
\<Initiated>2010-11-26T19:24:17.000Z</Initiated>\
\</Upload>\
\<CommonPrefixes>\
\<Prefix>photos/</Prefix>\
\</CommonPrefixes>\
\<CommonPrefixes>\
\<Prefix>videos/</Prefix>\
\</CommonPrefixes>\
\</ListMultipartUploadsResult>"
expectedListResult = ListUploadsResult False (Just "sample.jpg") (Just "Xgw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1W99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--") uploads prefixes
uploads = [("sample.jpg", "Agw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1N99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--", initTime)]
initTime = UTCTime (fromGregorian 2010 11 26) 69857
prefixes = ["photos/", "videos/"]
let xmldata =
"<ListMultipartUploadsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Bucket>example-bucket</Bucket>\
\<KeyMarker/>\
\<UploadIdMarker/>\
\<NextKeyMarker>sample.jpg</NextKeyMarker>\
\<NextUploadIdMarker>Xgw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1W99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--</NextUploadIdMarker>\
\<Delimiter>/</Delimiter>\
\<Prefix/>\
\<MaxUploads>1000</MaxUploads>\
\<IsTruncated>false</IsTruncated>\
\<Upload>\
\<Key>sample.jpg</Key>\
\<UploadId>Agw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1N99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--</UploadId>\
\<Initiator>\
\<ID>314133b66967d86f031c7249d1d9a80249109428335cd0ef1cdc487b4566cb1b</ID>\
\<DisplayName>s3-nickname</DisplayName>\
\</Initiator>\
\<Owner>\
\<ID>314133b66967d86f031c7249d1d9a80249109428335cd0ef1cdc487b4566cb1b</ID>\
\<DisplayName>s3-nickname</DisplayName>\
\</Owner>\
\<StorageClass>STANDARD</StorageClass>\
\<Initiated>2010-11-26T19:24:17.000Z</Initiated>\
\</Upload>\
\<CommonPrefixes>\
\<Prefix>photos/</Prefix>\
\</CommonPrefixes>\
\<CommonPrefixes>\
\<Prefix>videos/</Prefix>\
\</CommonPrefixes>\
\</ListMultipartUploadsResult>"
expectedListResult = ListUploadsResult False (Just "sample.jpg") (Just "Xgw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1W99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--") uploads prefixes
uploads = [("sample.jpg", "Agw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1N99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--", initTime)]
initTime = UTCTime (fromGregorian 2010 11 26) 69857
prefixes = ["photos/", "videos/"]
parsedListUploadsResult <- tryValidationErr $ runTestNS $ parseListUploadsResponse xmldata
eitherValidationErr parsedListUploadsResult (@?= expectedListResult)
testParseCompleteMultipartUploadResponse :: Assertion
testParseCompleteMultipartUploadResponse = do
let
xmldata = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CompleteMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Location>http://Example-Bucket.s3.amazonaws.com/Example-Object</Location>\
\<Bucket>Example-Bucket</Bucket>\
\<Key>Example-Object</Key>\
\<ETag>\"3858f62230ac3c915f300c664312c11f-9\"</ETag>\
\</CompleteMultipartUploadResult>"
expectedETag = "\"3858f62230ac3c915f300c664312c11f-9\""
let xmldata =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CompleteMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Location>http://Example-Bucket.s3.amazonaws.com/Example-Object</Location>\
\<Bucket>Example-Bucket</Bucket>\
\<Key>Example-Object</Key>\
\<ETag>\"3858f62230ac3c915f300c664312c11f-9\"</ETag>\
\</CompleteMultipartUploadResult>"
expectedETag = "\"3858f62230ac3c915f300c664312c11f-9\""
parsedETagE <- runExceptT $ runTestNS $ parseCompleteMultipartUploadResponse xmldata
eitherValidationErr parsedETagE (@?= expectedETag)
testParseListPartsResponse :: Assertion
testParseListPartsResponse = do
let
xmldata = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<ListPartsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Bucket>example-bucket</Bucket>\
\<Key>example-object</Key>\
\<UploadId>XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA</UploadId>\
\<Initiator>\
\<ID>arn:aws:iam::111122223333:user/some-user-11116a31-17b5-4fb7-9df5-b288870f11xx</ID>\
\<DisplayName>umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx</DisplayName>\
\</Initiator>\
\<Owner>\
\<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>\
\<DisplayName>someName</DisplayName>\
\</Owner>\
\<StorageClass>STANDARD</StorageClass>\
\<PartNumberMarker>1</PartNumberMarker>\
\<NextPartNumberMarker>3</NextPartNumberMarker>\
\<MaxParts>2</MaxParts>\
\<IsTruncated>true</IsTruncated>\
\<Part>\
\<PartNumber>2</PartNumber>\
\<LastModified>2010-11-10T20:48:34.000Z</LastModified>\
\<ETag>\"7778aef83f66abc1fa1e8477f296d394\"</ETag>\
\<Size>10485760</Size>\
\</Part>\
\<Part>\
\<PartNumber>3</PartNumber>\
\<LastModified>2010-11-10T20:48:33.000Z</LastModified>\
\<ETag>\"aaaa18db4cc2f85cedef654fccc4a4x8\"</ETag>\
\<Size>10485760</Size>\
\</Part>\
\</ListPartsResult>"
expectedListResult = ListPartsResult True (Just 3) [part1, part2]
part1 = ObjectPartInfo 2 "\"7778aef83f66abc1fa1e8477f296d394\"" 10485760 modifiedTime1
modifiedTime1 = flip UTCTime 74914 $ fromGregorian 2010 11 10
part2 = ObjectPartInfo 3 "\"aaaa18db4cc2f85cedef654fccc4a4x8\"" 10485760 modifiedTime2
modifiedTime2 = flip UTCTime 74913 $ fromGregorian 2010 11 10
let xmldata =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<ListPartsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Bucket>example-bucket</Bucket>\
\<Key>example-object</Key>\
\<UploadId>XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA</UploadId>\
\<Initiator>\
\<ID>arn:aws:iam::111122223333:user/some-user-11116a31-17b5-4fb7-9df5-b288870f11xx</ID>\
\<DisplayName>umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx</DisplayName>\
\</Initiator>\
\<Owner>\
\<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>\
\<DisplayName>someName</DisplayName>\
\</Owner>\
\<StorageClass>STANDARD</StorageClass>\
\<PartNumberMarker>1</PartNumberMarker>\
\<NextPartNumberMarker>3</NextPartNumberMarker>\
\<MaxParts>2</MaxParts>\
\<IsTruncated>true</IsTruncated>\
\<Part>\
\<PartNumber>2</PartNumber>\
\<LastModified>2010-11-10T20:48:34.000Z</LastModified>\
\<ETag>\"7778aef83f66abc1fa1e8477f296d394\"</ETag>\
\<Size>10485760</Size>\
\</Part>\
\<Part>\
\<PartNumber>3</PartNumber>\
\<LastModified>2010-11-10T20:48:33.000Z</LastModified>\
\<ETag>\"aaaa18db4cc2f85cedef654fccc4a4x8\"</ETag>\
\<Size>10485760</Size>\
\</Part>\
\</ListPartsResult>"
expectedListResult = ListPartsResult True (Just 3) [part1, part2]
part1 = ObjectPartInfo 2 "\"7778aef83f66abc1fa1e8477f296d394\"" 10485760 modifiedTime1
modifiedTime1 = flip UTCTime 74914 $ fromGregorian 2010 11 10
part2 = ObjectPartInfo 3 "\"aaaa18db4cc2f85cedef654fccc4a4x8\"" 10485760 modifiedTime2
modifiedTime2 = flip UTCTime 74913 $ fromGregorian 2010 11 10
parsedListPartsResult <- runExceptT $ runTestNS $ parseListPartsResponse xmldata
eitherValidationErr parsedListPartsResult (@?= expectedListResult)
testParseCopyObjectResponse :: Assertion
testParseCopyObjectResponse = do
let
cases = [ ("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CopyObjectResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<LastModified>2009-10-28T22:32:00.000Z</LastModified>\
\<ETag>\"9b2cf535f27731c974343645a3985328\"</ETag>\
\</CopyObjectResult>",
("\"9b2cf535f27731c974343645a3985328\"",
UTCTime (fromGregorian 2009 10 28) 81120))
, ("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CopyPartResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<LastModified>2009-10-28T22:32:00.000Z</LastModified>\
\<ETag>\"9b2cf535f27731c974343645a3985328\"</ETag>\
\</CopyPartResult>",
("\"9b2cf535f27731c974343645a3985328\"",
UTCTime (fromGregorian 2009 10 28) 81120))]
let cases =
[ ( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CopyObjectResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<LastModified>2009-10-28T22:32:00.000Z</LastModified>\
\<ETag>\"9b2cf535f27731c974343645a3985328\"</ETag>\
\</CopyObjectResult>",
( "\"9b2cf535f27731c974343645a3985328\"",
UTCTime (fromGregorian 2009 10 28) 81120
)
),
( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CopyPartResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<LastModified>2009-10-28T22:32:00.000Z</LastModified>\
\<ETag>\"9b2cf535f27731c974343645a3985328\"</ETag>\
\</CopyPartResult>",
( "\"9b2cf535f27731c974343645a3985328\"",
UTCTime (fromGregorian 2009 10 28) 81120
)
)
]
forM_ cases $ \(xmldata, (etag, modTime)) -> do
parseResult <- runExceptT $ runTestNS $ parseCopyObjectResponse xmldata
@ -287,73 +288,88 @@ testParseCopyObjectResponse = do
testParseNotification :: Assertion
testParseNotification = do
let
cases = [ ("<NotificationConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\ <TopicConfiguration>\
\ <Id>YjVkM2Y0YmUtNGI3NC00ZjQyLWEwNGItNDIyYWUxY2I0N2M4</Id>\
\ <Topic>arn:aws:sns:us-east-1:account-id:s3notificationtopic2</Topic>\
\ <Event>s3:ReducedRedundancyLostObject</Event>\
\ <Event>s3:ObjectCreated:*</Event>\
\ </TopicConfiguration>\
\</NotificationConfiguration>",
Notification []
[ NotificationConfig
let cases =
[ ( "<NotificationConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\ <TopicConfiguration>\
\ <Id>YjVkM2Y0YmUtNGI3NC00ZjQyLWEwNGItNDIyYWUxY2I0N2M4</Id>\
\ <Topic>arn:aws:sns:us-east-1:account-id:s3notificationtopic2</Topic>\
\ <Event>s3:ReducedRedundancyLostObject</Event>\
\ <Event>s3:ObjectCreated:*</Event>\
\ </TopicConfiguration>\
\</NotificationConfiguration>",
Notification
[]
[ NotificationConfig
"YjVkM2Y0YmUtNGI3NC00ZjQyLWEwNGItNDIyYWUxY2I0N2M4"
"arn:aws:sns:us-east-1:account-id:s3notificationtopic2"
[ReducedRedundancyLostObject, ObjectCreated] defaultFilter
]
[])
, ("<NotificationConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\ <CloudFunctionConfiguration>\
\ <Id>ObjectCreatedEvents</Id>\
\ <CloudFunction>arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail</CloudFunction>\
\ <Event>s3:ObjectCreated:*</Event>\
\ </CloudFunctionConfiguration>\
\ <QueueConfiguration>\
\ <Id>1</Id>\
\ <Filter>\
\ <S3Key>\
\ <FilterRule>\
\ <Name>prefix</Name>\
\ <Value>images/</Value>\
\ </FilterRule>\
\ <FilterRule>\
\ <Name>suffix</Name>\
\ <Value>.jpg</Value>\
\ </FilterRule>\
\ </S3Key>\
\ </Filter>\
\ <Queue>arn:aws:sqs:us-west-2:444455556666:s3notificationqueue</Queue>\
\ <Event>s3:ObjectCreated:Put</Event>\
\ </QueueConfiguration>\
\ <TopicConfiguration>\
\ <Topic>arn:aws:sns:us-east-1:356671443308:s3notificationtopic2</Topic>\
\ <Event>s3:ReducedRedundancyLostObject</Event>\
\ </TopicConfiguration>\
\ <QueueConfiguration>\
\ <Queue>arn:aws:sqs:us-east-1:356671443308:s3notificationqueue</Queue>\
\ <Event>s3:ObjectCreated:*</Event>\
\ </QueueConfiguration>)\
\</NotificationConfiguration>",
Notification [ NotificationConfig
"1" "arn:aws:sqs:us-west-2:444455556666:s3notificationqueue"
[ObjectCreatedPut]
(Filter $ FilterKey $ FilterRules
[FilterRule "prefix" "images/",
FilterRule "suffix" ".jpg"])
, NotificationConfig
"" "arn:aws:sqs:us-east-1:356671443308:s3notificationqueue"
[ObjectCreated] defaultFilter
]
[ NotificationConfig
"" "arn:aws:sns:us-east-1:356671443308:s3notificationtopic2"
[ReducedRedundancyLostObject] defaultFilter
]
[ NotificationConfig
"ObjectCreatedEvents" "arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail"
[ObjectCreated] defaultFilter
])
]
[ReducedRedundancyLostObject, ObjectCreated]
defaultFilter
]
[]
),
( "<NotificationConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\ <CloudFunctionConfiguration>\
\ <Id>ObjectCreatedEvents</Id>\
\ <CloudFunction>arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail</CloudFunction>\
\ <Event>s3:ObjectCreated:*</Event>\
\ </CloudFunctionConfiguration>\
\ <QueueConfiguration>\
\ <Id>1</Id>\
\ <Filter>\
\ <S3Key>\
\ <FilterRule>\
\ <Name>prefix</Name>\
\ <Value>images/</Value>\
\ </FilterRule>\
\ <FilterRule>\
\ <Name>suffix</Name>\
\ <Value>.jpg</Value>\
\ </FilterRule>\
\ </S3Key>\
\ </Filter>\
\ <Queue>arn:aws:sqs:us-west-2:444455556666:s3notificationqueue</Queue>\
\ <Event>s3:ObjectCreated:Put</Event>\
\ </QueueConfiguration>\
\ <TopicConfiguration>\
\ <Topic>arn:aws:sns:us-east-1:356671443308:s3notificationtopic2</Topic>\
\ <Event>s3:ReducedRedundancyLostObject</Event>\
\ </TopicConfiguration>\
\ <QueueConfiguration>\
\ <Queue>arn:aws:sqs:us-east-1:356671443308:s3notificationqueue</Queue>\
\ <Event>s3:ObjectCreated:*</Event>\
\ </QueueConfiguration>)\
\</NotificationConfiguration>",
Notification
[ NotificationConfig
"1"
"arn:aws:sqs:us-west-2:444455556666:s3notificationqueue"
[ObjectCreatedPut]
( Filter $ FilterKey $
FilterRules
[ FilterRule "prefix" "images/",
FilterRule "suffix" ".jpg"
]
),
NotificationConfig
""
"arn:aws:sqs:us-east-1:356671443308:s3notificationqueue"
[ObjectCreated]
defaultFilter
]
[ NotificationConfig
""
"arn:aws:sns:us-east-1:356671443308:s3notificationtopic2"
[ReducedRedundancyLostObject]
defaultFilter
]
[ NotificationConfig
"ObjectCreatedEvents"
"arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail"
[ObjectCreated]
defaultFilter
]
)
]
forM_ cases $ \(xmldata, val) -> do
result <- runExceptT $ runTestNS $ parseNotification xmldata
@ -362,20 +378,25 @@ testParseNotification = do
-- | Tests parsing of both progress and stats
testParseSelectProgress :: Assertion
testParseSelectProgress = do
let cases = [ ([r|<?xml version="1.0" encoding="UTF-8"?>
let cases =
[ ( [r|<?xml version="1.0" encoding="UTF-8"?>
<Progress>
<BytesScanned>512</BytesScanned>
<BytesProcessed>1024</BytesProcessed>
<BytesReturned>1024</BytesReturned>
</Progress>|] , Progress 512 1024 1024)
, ([r|<?xml version="1.0" encoding="UTF-8"?>
</Progress>|],
Progress 512 1024 1024
),
( [r|<?xml version="1.0" encoding="UTF-8"?>
<Stats>
<BytesScanned>512</BytesScanned>
<BytesProcessed>1024</BytesProcessed>
<BytesReturned>1024</BytesReturned>
</Stats>|], Progress 512 1024 1024)
]
</Stats>|],
Progress 512 1024 1024
)
]
forM_ cases $ \(xmldata, progress) -> do
result <- runExceptT $ parseSelectProgress xmldata
eitherValidationErr result (@?= progress)
forM_ cases $ \(xmldata, progress) -> do
result <- runExceptT $ parseSelectProgress xmldata
eitherValidationErr result (@?= progress)

View File

@ -14,21 +14,18 @@
-- limitations under the License.
--
import Test.Tasty
import Test.Tasty.QuickCheck as QC
import qualified Data.ByteString as B
import qualified Data.List as L
import Lib.Prelude
import Network.Minio.API.Test
import Network.Minio.CopyObject
import Network.Minio.Data
import Network.Minio.PutObject
import Network.Minio.Utils.Test
import Network.Minio.XmlGenerator.Test
import Network.Minio.XmlParser.Test
import qualified Data.ByteString as B
import qualified Data.List as L
import Lib.Prelude
import Network.Minio.API.Test
import Network.Minio.CopyObject
import Network.Minio.Data
import Network.Minio.PutObject
import Network.Minio.Utils.Test
import Network.Minio.XmlGenerator.Test
import Network.Minio.XmlParser.Test
import Test.Tasty
import Test.Tasty.QuickCheck as QC
main :: IO ()
main = defaultMain tests
@ -51,82 +48,84 @@ properties = testGroup "Properties" [qcProps] -- [scProps]
-- ]
qcProps :: TestTree
qcProps = testGroup "(checked by QuickCheck)"
[ QC.testProperty "selectPartSizes:" $
\n -> let (pns, offs, sizes) = L.unzip3 (selectPartSizes n)
qcProps =
testGroup
"(checked by QuickCheck)"
[ QC.testProperty "selectPartSizes:" $
\n ->
let (pns, offs, sizes) = L.unzip3 (selectPartSizes n)
-- check that pns increments from 1.
isPNumsAscendingFrom1 = all (\(a, b) -> a == b) $ zip pns [1..]
consPairs [] = []
consPairs [_] = []
consPairs (a:(b:c)) = (a, b):(consPairs (b:c))
isPNumsAscendingFrom1 = all (\(a, b) -> a == b) $ zip pns [1 ..]
consPairs [] = []
consPairs [_] = []
consPairs (a : (b : c)) = (a, b) : (consPairs (b : c))
-- check `offs` is monotonically increasing.
isOffsetsAsc = all (\(a, b) -> a < b) $ consPairs offs
-- check sizes sums to n.
isSumSizeOk = sum sizes == n
-- check sizes are constant except last
isSizesConstantExceptLast =
all (\(a, b) -> a == b) (consPairs $ L.init sizes)
-- check each part except last is at least minPartSize;
-- last part may be 0 only if it is the only part.
nparts = length sizes
isMinPartSizeOk =
if | nparts > 1 -> -- last part can be smaller but > 0
all (>= minPartSize) (take (nparts - 1) sizes) &&
all (\s -> s > 0) (drop (nparts - 1) sizes)
| nparts == 1 -> -- size may be 0 here.
maybe True (\x -> x >= 0 && x <= minPartSize) $
headMay sizes
| otherwise -> False
in n < 0 ||
(isPNumsAscendingFrom1 && isOffsetsAsc && isSumSizeOk &&
isSizesConstantExceptLast && isMinPartSizeOk)
, QC.testProperty "selectCopyRanges:" $
\(start, end) ->
let (_, pairs) = L.unzip (selectCopyRanges (start, end))
-- is last part's snd offset end?
isLastPartOk = maybe False ((end ==) . snd) $ lastMay pairs
-- is first part's fst offset start
isFirstPartOk = maybe False ((start ==) . fst) $ headMay pairs
-- each pair is >=64MiB except last, and all those parts
-- have same size.
initSizes = maybe [] (map (\(a, b) -> b - a + 1)) $ initMay pairs
isPartSizesOk = all (>= minPartSize) initSizes &&
maybe True (\k -> all (== k) initSizes)
(headMay initSizes)
-- returned offsets are contiguous.
fsts = drop 1 $ map fst pairs
snds = take (length pairs - 1) $ map snd pairs
isContParts = length fsts == length snds &&
and (map (\(a, b) -> a == b + 1) $ zip fsts snds)
in start < 0 || start > end ||
(isLastPartOk && isFirstPartOk && isPartSizesOk && isContParts)
, QC.testProperty "mkSSECKey:" $
\w8s -> let bs = B.pack w8s
r = mkSSECKey bs
in case r of
Just _ -> B.length bs == 32
if
| nparts > 1 -> -- last part can be smaller but > 0
all (>= minPartSize) (take (nparts - 1) sizes)
&& all (\s -> s > 0) (drop (nparts - 1) sizes)
| nparts == 1 -> -- size may be 0 here.
maybe True (\x -> x >= 0 && x <= minPartSize) $
headMay sizes
| otherwise -> False
in n < 0
|| ( isPNumsAscendingFrom1 && isOffsetsAsc && isSumSizeOk
&& isSizesConstantExceptLast
&& isMinPartSizeOk
),
QC.testProperty "selectCopyRanges:" $
\(start, end) ->
let (_, pairs) = L.unzip (selectCopyRanges (start, end))
-- is last part's snd offset end?
isLastPartOk = maybe False ((end ==) . snd) $ lastMay pairs
-- is first part's fst offset start
isFirstPartOk = maybe False ((start ==) . fst) $ headMay pairs
-- each pair is >=64MiB except last, and all those parts
-- have same size.
initSizes = maybe [] (map (\(a, b) -> b - a + 1)) $ initMay pairs
isPartSizesOk =
all (>= minPartSize) initSizes
&& maybe
True
(\k -> all (== k) initSizes)
(headMay initSizes)
-- returned offsets are contiguous.
fsts = drop 1 $ map fst pairs
snds = take (length pairs - 1) $ map snd pairs
isContParts =
length fsts == length snds
&& and (map (\(a, b) -> a == b + 1) $ zip fsts snds)
in start < 0 || start > end
|| (isLastPartOk && isFirstPartOk && isPartSizesOk && isContParts),
QC.testProperty "mkSSECKey:" $
\w8s ->
let bs = B.pack w8s
r = mkSSECKey bs
in case r of
Just _ -> B.length bs == 32
Nothing -> B.length bs /= 32
]
]
unitTests :: TestTree
unitTests = testGroup "Unit tests" [ xmlGeneratorTests, xmlParserTests
, bucketNameValidityTests
, objectNameValidityTests
, parseServerInfoJSONTest
, parseHealStatusTest
, parseHealStartRespTest
, limitedMapConcurrentlyTests
]
unitTests =
testGroup
"Unit tests"
[ xmlGeneratorTests,
xmlParserTests,
bucketNameValidityTests,
objectNameValidityTests,
parseServerInfoJSONTest,
parseHealStatusTest,
parseHealStartRespTest,
limitedMapConcurrentlyTests
]