Update code formatting and update dependencies (unliftio, protolude) (#152)

* Format code with ormolu

* Use latest unliftio-core

* Use latest protolude
This commit is contained in:
Aditya Manthramurthy 2020-06-14 10:06:41 -07:00 committed by GitHub
parent ce23f7322a
commit 23fecbb469
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
58 changed files with 4875 additions and 4059 deletions

View File

@ -47,7 +47,7 @@ jobs:
- name: Install dependencies - name: Install dependencies
run: | run: |
cabal v2-update cabal v2-update
cabal v2-build --only-dependencies --enable-tests --enable-benchmarks --verbose=2 cabal v2-build --only-dependencies --enable-tests --enable-benchmarks
- name: Build - name: Build
run: cabal v2-build --enable-tests --enable-benchmarks all run: cabal v2-build --enable-tests --enable-benchmarks all
- name: Run tests - name: Run tests

View File

@ -15,4 +15,5 @@
-- --
import Distribution.Simple import Distribution.Simple
main = defaultMain main = defaultMain

View File

@ -16,20 +16,17 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Control.Monad.IO.Class (liftIO) import Control.Monad.IO.Class (liftIO)
import Prelude import Network.Minio
import Prelude
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let bucket = "missingbucket" let bucket = "missingbucket"
@ -39,5 +36,5 @@ main = do
liftIO $ putStrLn $ "Does " ++ show bucket ++ " exist? - " ++ show foundBucket liftIO $ putStrLn $ "Does " ++ show bucket ++ " exist? - " ++ show foundBucket
case res1 of case res1 of
Left e -> putStrLn $ "bucketExists failed." ++ show e Left e -> putStrLn $ "bucketExists failed." ++ show e
Right () -> return () Right () -> return ()

View File

@ -16,42 +16,40 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import UnliftIO.Exception (catch, throwIO) import Network.Minio
import UnliftIO.Exception (catch, throwIO)
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let let bucket = "test"
bucket = "test"
object = "obj" object = "obj"
objectCopy = "obj-copy" objectCopy = "obj-copy"
localFile = "/etc/lsb-release" localFile = "/etc/lsb-release"
res1 <- runMinio minioPlayCI $ do res1 <- runMinio minioPlayCI $ do
-- 1. Make a bucket; Catch BucketAlreadyOwnedByYou exception. -- 1. Make a bucket; Catch BucketAlreadyOwnedByYou exception.
catch (makeBucket bucket Nothing) ( catch
\e -> case e of (makeBucket bucket Nothing)
BucketAlreadyOwnedByYou -> return () ( \e -> case e of
_ -> throwIO e BucketAlreadyOwnedByYou -> return ()
_ -> throwIO e
) )
-- 2. Upload a file to bucket/object. -- 2. Upload a file to bucket/object.
fPutObject bucket object localFile defaultPutObjectOptions fPutObject bucket object localFile defaultPutObjectOptions
-- 3. Copy bucket/object to bucket/objectCopy. -- 3. Copy bucket/object to bucket/objectCopy.
copyObject defaultDestinationInfo {dstBucket = bucket, dstObject = objectCopy} copyObject
defaultSourceInfo { srcBucket = bucket , srcObject = object } defaultDestinationInfo {dstBucket = bucket, dstObject = objectCopy}
defaultSourceInfo {srcBucket = bucket, srcObject = object}
case res1 of case res1 of
Left e -> putStrLn $ "copyObject failed." ++ show e Left e -> putStrLn $ "copyObject failed." ++ show e
Right () -> putStrLn "copyObject succeeded." Right () -> putStrLn "copyObject succeeded."

View File

@ -16,40 +16,40 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE ScopedTypeVariables #-}
import Network.Minio
import Data.Monoid ((<>)) import Data.Monoid ((<>))
import Data.Text (pack) import Data.Text (pack)
import Options.Applicative import Network.Minio
import System.FilePath.Posix import Options.Applicative
import UnliftIO (throwIO, try) import System.FilePath.Posix
import UnliftIO (throwIO, try)
import Prelude import Prelude
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
-- optparse-applicative package based command-line parsing. -- optparse-applicative package based command-line parsing.
fileNameArgs :: Parser FilePath fileNameArgs :: Parser FilePath
fileNameArgs = strArgument fileNameArgs =
(metavar "FILENAME" strArgument
<> help "Name of file to upload to AWS S3 or a MinIO server") ( metavar "FILENAME"
<> help "Name of file to upload to AWS S3 or a MinIO server"
)
cmdParser :: ParserInfo FilePath cmdParser :: ParserInfo FilePath
cmdParser = info cmdParser =
(helper <*> fileNameArgs) info
(fullDesc (helper <*> fileNameArgs)
<> progDesc "FileUploader" ( fullDesc
<> header <> progDesc "FileUploader"
"FileUploader - a simple file-uploader program using minio-hs") <> header
"FileUploader - a simple file-uploader program using minio-hs"
)
main :: IO () main :: IO ()
main = do main = do
@ -64,12 +64,12 @@ main = do
bErr <- try $ makeBucket bucket Nothing bErr <- try $ makeBucket bucket Nothing
case bErr of case bErr of
Left BucketAlreadyOwnedByYou -> return () Left BucketAlreadyOwnedByYou -> return ()
Left e -> throwIO e Left e -> throwIO e
Right _ -> return () Right _ -> return ()
-- Upload filepath to bucket; object is derived from filepath. -- Upload filepath to bucket; object is derived from filepath.
fPutObject bucket object filepath defaultPutObjectOptions fPutObject bucket object filepath defaultPutObjectOptions
case res of case res of
Left e -> putStrLn $ "file upload failed due to " ++ (show e) Left e -> putStrLn $ "file upload failed due to " ++ (show e)
Right () -> putStrLn "file upload succeeded." Right () -> putStrLn "file upload succeeded."

View File

@ -16,15 +16,15 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.AdminAPI
import Prelude import Network.Minio
import Network.Minio.AdminAPI
import Prelude
main :: IO () main :: IO ()
main = do main = do
res <- runMinio minioPlayCI $ res <-
runMinio minioPlayCI $
getConfig getConfig
print res print res

View File

@ -16,31 +16,26 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import qualified Data.Conduit as C import qualified Data.Conduit as C
import qualified Data.Conduit.Binary as CB import qualified Data.Conduit.Binary as CB
import Network.Minio
import Prelude import Prelude
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let let bucket = "my-bucket"
bucket = "my-bucket"
object = "my-object" object = "my-object"
res <- runMinio minioPlayCI $ do res <- runMinio minioPlayCI $ do
src <- getObject bucket object defaultGetObjectOptions src <- getObject bucket object defaultGetObjectOptions
C.connect (gorObjectStream src) $ CB.sinkFileCautious "/tmp/my-object" C.connect (gorObjectStream src) $ CB.sinkFileCautious "/tmp/my-object"
case res of case res of
Left e -> putStrLn $ "getObject failed." ++ (show e) Left e -> putStrLn $ "getObject failed." ++ (show e)
Right _ -> putStrLn "getObject succeeded." Right _ -> putStrLn "getObject succeeded."

View File

@ -16,28 +16,25 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.S3API
import Prelude import Network.Minio
import Network.Minio.S3API
import Prelude
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let let bucket = "test"
bucket = "test"
object = "passwd" object = "passwd"
res <- runMinio minioPlayCI $ res <-
headObject bucket object [] runMinio minioPlayCI $
headObject bucket object []
case res of case res of
Left e -> putStrLn $ "headObject failed." ++ show e Left e -> putStrLn $ "headObject failed." ++ show e
Right objInfo -> putStrLn $ "headObject succeeded." ++ show objInfo Right objInfo -> putStrLn $ "headObject succeeded." ++ show objInfo

View File

@ -16,19 +16,23 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.AdminAPI
import Prelude import Network.Minio
import Network.Minio.AdminAPI
import Prelude
main :: IO () main :: IO ()
main = do main = do
res <- runMinio minioPlayCI $ res <- runMinio minioPlayCI $
do do
hsr <- startHeal Nothing Nothing HealOpts { hoRecursive = True hsr <-
, hoDryRun = False startHeal
} Nothing
Nothing
HealOpts
{ hoRecursive = True,
hoDryRun = False
}
getHealStatus Nothing Nothing (hsrClientToken hsr) getHealStatus Nothing Nothing (hsrClientToken hsr)
print res print res

View File

@ -16,19 +16,17 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Control.Monad.IO.Class (liftIO) import Control.Monad.IO.Class (liftIO)
import Prelude import Network.Minio
import Prelude
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
-- This example list buckets that belongs to the user and returns -- This example list buckets that belongs to the user and returns
-- region of the first bucket returned. -- region of the first bucket returned.

View File

@ -16,38 +16,36 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Conduit import Conduit
import Prelude import Network.Minio
import Prelude
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let let bucket = "test"
bucket = "test"
-- Performs a recursive listing of incomplete uploads under bucket "test" -- Performs a recursive listing of incomplete uploads under bucket "test"
-- on a local minio server. -- on a local minio server.
res <- runMinio minioPlayCI $ res <-
runConduit $ listIncompleteUploads bucket Nothing True .| mapM_C (\v -> (liftIO $ print v)) runMinio minioPlayCI
$ runConduit
$ listIncompleteUploads bucket Nothing True .| mapM_C (\v -> (liftIO $ print v))
print res print res
{- {-
Following is the output of the above program on a local MinIO server. Following is the output of the above program on a local MinIO server.
Right [UploadInfo { uiKey = "go1.6.2.linux-amd64.tar.gz" Right [UploadInfo { uiKey = "go1.6.2.linux-amd64.tar.gz"
, uiUploadId = "063eb592-bdd7-4a0c-be48-34fb3ceb63e2" , uiUploadId = "063eb592-bdd7-4a0c-be48-34fb3ceb63e2"
, uiInitTime = 2017-03-01 10:16:25.698 UTC , uiInitTime = 2017-03-01 10:16:25.698 UTC
, uiSize = 17731794 , uiSize = 17731794
} }
] ]
-} -}

View File

@ -16,33 +16,31 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Conduit
import Prelude
import Conduit
import Network.Minio
import Prelude
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let let bucket = "test"
bucket = "test"
-- Performs a recursive listing of all objects under bucket "test" -- Performs a recursive listing of all objects under bucket "test"
-- on play.min.io. -- on play.min.io.
res <- runMinio minioPlayCI $ res <-
runConduit $ listObjects bucket Nothing True .| mapM_C (\v -> (liftIO $ print v)) runMinio minioPlayCI
$ runConduit
$ listObjects bucket Nothing True .| mapM_C (\v -> (liftIO $ print v))
print res print res
{-
Following is the output of the above program on a local MinIO server.
Right [ObjectInfo {oiObject = "ADVANCED.png", oiModTime = 2017-02-10 05:33:24.816 UTC, oiETag = "\"a69f3af6bbb06fe1d42ac910ec30482f\"", oiSize = 94026},ObjectInfo {oiObject = "obj", oiModTime = 2017-02-10 08:49:26.777 UTC, oiETag = "\"715a872a253a3596652c1490081b4b6a-1\"", oiSize = 15728640}] {-
-} Following is the output of the above program on a local MinIO server.
Right [ObjectInfo {oiObject = "ADVANCED.png", oiModTime = 2017-02-10 05:33:24.816 UTC, oiETag = "\"a69f3af6bbb06fe1d42ac910ec30482f\"", oiSize = 94026},ObjectInfo {oiObject = "obj", oiModTime = 2017-02-10 08:49:26.777 UTC, oiETag = "\"715a872a253a3596652c1490081b4b6a-1\"", oiSize = 15728640}]
-}

View File

@ -16,24 +16,21 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Prelude import Network.Minio
import Prelude
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let bucket = "my-bucket" let bucket = "my-bucket"
res <- runMinio minioPlayCI $ res <-
-- N B the region provided for makeBucket is optional. runMinio minioPlayCI $
makeBucket bucket (Just "us-east-1") -- N B the region provided for makeBucket is optional.
makeBucket bucket (Just "us-east-1")
print res print res

View File

@ -16,34 +16,32 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Control.Monad.IO.Class (liftIO) import Control.Monad.IO.Class (liftIO)
import qualified Data.ByteString.Char8 as B import qualified Data.ByteString.Char8 as B
import Data.CaseInsensitive (original) import Data.CaseInsensitive (original)
import qualified Data.Conduit.Combinators as CC import qualified Data.Conduit.Combinators as CC
import qualified Data.Text.Encoding as E import qualified Data.Text.Encoding as E
import Network.Minio
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let let bucket = "my-bucket"
bucket = "my-bucket" object = "my-object"
object = "my-object" kb15 = 15 * 1024
kb15 = 15*1024 -- Set query parameter to modify content disposition response
-- header
-- Set query parameter to modify content disposition response queryParam =
-- header [ ( "response-content-disposition",
queryParam = [("response-content-disposition", Just "attachment; filename=\"your-filename.txt\""
Just "attachment; filename=\"your-filename.txt\"")] )
]
res <- runMinio minioPlayCI $ do res <- runMinio minioPlayCI $ do
liftIO $ B.putStrLn "Upload a file that we will fetch with a presigned URL..." liftIO $ B.putStrLn "Upload a file that we will fetch with a presigned URL..."
@ -61,23 +59,28 @@ main = do
-- Generate a URL with 7 days expiry time - note that the headers -- Generate a URL with 7 days expiry time - note that the headers
-- used above must be added to the request with the signed URL -- used above must be added to the request with the signed URL
-- generated. -- generated.
url <- presignedGetObjectUrl "my-bucket" "my-object" (7*24*3600) url <-
queryParam headers presignedGetObjectUrl
"my-bucket"
"my-object"
(7 * 24 * 3600)
queryParam
headers
return (headers, etag, url) return (headers, etag, url)
case res of case res of
Left e -> putStrLn $ "presignedPutObject URL failed." ++ show e Left e -> putStrLn $ "presignedPutObject URL failed." ++ show e
Right (headers, _, url) -> do Right (headers, _, url) -> do
-- We generate a curl command to demonstrate usage of the signed -- We generate a curl command to demonstrate usage of the signed
-- URL. -- URL.
let let hdrOpt (k, v) = B.concat ["-H '", original k, ": ", v, "'"]
hdrOpt (k, v) = B.concat ["-H '", original k, ": ", v, "'"] curlCmd =
curlCmd = B.intercalate " " $ B.intercalate " " $
["curl --fail"] ++ map hdrOpt headers ++ ["curl --fail"] ++ map hdrOpt headers
["-o /tmp/myfile", B.concat ["'", url, "'"]] ++ ["-o /tmp/myfile", B.concat ["'", url, "'"]]
putStrLn $ "The following curl command would use the presigned " ++ putStrLn $
"URL to fetch the object and write it to \"/tmp/myfile\":" "The following curl command would use the presigned "
++ "URL to fetch the object and write it to \"/tmp/myfile\":"
B.putStrLn curlCmd B.putStrLn curlCmd

View File

@ -16,47 +16,43 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import qualified Data.ByteString as B import qualified Data.ByteString as B
import qualified Data.ByteString.Char8 as Char8 import qualified Data.ByteString.Char8 as Char8
import qualified Data.HashMap.Strict as H import qualified Data.HashMap.Strict as H
import qualified Data.Text.Encoding as Enc import qualified Data.Text.Encoding as Enc
import qualified Data.Time as Time import qualified Data.Time as Time
import Network.Minio
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
now <- Time.getCurrentTime now <- Time.getCurrentTime
let let bucket = "my-bucket"
bucket = "my-bucket" object = "photos/my-object"
object = "photos/my-object" -- set an expiration time of 10 days
expireTime = Time.addUTCTime (3600 * 24 * 10) now
-- set an expiration time of 10 days -- create a policy with expiration time and conditions - since the
expireTime = Time.addUTCTime (3600 * 24 * 10) now -- conditions are validated, newPostPolicy returns an Either value
policyE =
-- create a policy with expiration time and conditions - since the newPostPolicy
-- conditions are validated, newPostPolicy returns an Either value expireTime
policyE = newPostPolicy expireTime [ -- set the object name condition
[ -- set the object name condition ppCondKey object,
ppCondKey object -- set the bucket name condition
-- set the bucket name condition ppCondBucket bucket,
, ppCondBucket bucket -- set the size range of object as 1B to 10MiB
-- set the size range of object as 1B to 10MiB ppCondContentLengthRange 1 (10 * 1024 * 1024),
, ppCondContentLengthRange 1 (10*1024*1024) -- set content type as jpg image
-- set content type as jpg image ppCondContentType "image/jpeg",
, ppCondContentType "image/jpeg" -- on success set the server response code to 200
-- on success set the server response code to 200 ppCondSuccessActionStatus 200
, ppCondSuccessActionStatus 200 ]
]
case policyE of case policyE of
Left err -> putStrLn $ show err Left err -> putStrLn $ show err
@ -66,11 +62,16 @@ main = do
-- a curl command is output to demonstrate using the generated -- a curl command is output to demonstrate using the generated
-- URL and form-data -- URL and form-data
let let formFn (k, v) =
formFn (k, v) = B.concat ["-F ", Enc.encodeUtf8 k, "=", B.concat
"'", v, "'"] [ "-F ",
formOptions = B.intercalate " " $ map formFn $ H.toList formData Enc.encodeUtf8 k,
"=",
"'",
v,
"'"
]
formOptions = B.intercalate " " $ map formFn $ H.toList formData
return $ B.intercalate " " $ return $ B.intercalate " " $
["curl", formOptions, "-F file=@/tmp/photo.jpg", url] ["curl", formOptions, "-F file=@/tmp/photo.jpg", url]

View File

@ -16,44 +16,42 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import qualified Data.ByteString.Char8 as B import qualified Data.ByteString.Char8 as B
import Data.CaseInsensitive (original) import Data.CaseInsensitive (original)
import Network.Minio
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let let -- Use headers to set user-metadata - note that this header will
-- Use headers to set user-metadata - note that this header will -- need to be set when the URL is used to make an upload.
-- need to be set when the URL is used to make an upload. headers =
headers = [("x-amz-meta-url-creator", [ ( "x-amz-meta-url-creator",
"minio-hs-presigned-put-example")] "minio-hs-presigned-put-example"
)
]
res <- runMinio minioPlayCI $ do res <- runMinio minioPlayCI $ do
-- generate a URL with 7 days expiry time -- generate a URL with 7 days expiry time
presignedPutObjectUrl "my-bucket" "my-object" (7*24*3600) headers presignedPutObjectUrl "my-bucket" "my-object" (7 * 24 * 3600) headers
case res of case res of
Left e -> putStrLn $ "presignedPutObject URL failed." ++ show e Left e -> putStrLn $ "presignedPutObject URL failed." ++ show e
Right url -> do Right url -> do
-- We generate a curl command to demonstrate usage of the signed -- We generate a curl command to demonstrate usage of the signed
-- URL. -- URL.
let let hdrOpt (k, v) = B.concat ["-H '", original k, ": ", v, "'"]
hdrOpt (k, v) = B.concat ["-H '", original k, ": ", v, "'"] curlCmd =
curlCmd = B.intercalate " " $ B.intercalate " " $
["curl "] ++ map hdrOpt headers ++ ["curl "] ++ map hdrOpt headers
["-T /tmp/myfile", B.concat ["'", url, "'"]] ++ ["-T /tmp/myfile", B.concat ["'", url, "'"]]
putStrLn $ "The following curl command would use the presigned " ++ putStrLn $
"URL to upload the file at \"/tmp/myfile\":" "The following curl command would use the presigned "
++ "URL to upload the file at \"/tmp/myfile\":"
B.putStrLn curlCmd B.putStrLn curlCmd

View File

@ -16,39 +16,36 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import qualified Data.Conduit.Combinators as CC import qualified Data.Conduit.Combinators as CC
import Network.Minio
import Prelude import Prelude
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let let bucket = "test"
bucket = "test"
object = "obj" object = "obj"
localFile = "/etc/lsb-release" localFile = "/etc/lsb-release"
kb15 = 15 * 1024 kb15 = 15 * 1024
-- Eg 1. Upload a stream of repeating "a" using putObject with default options. -- Eg 1. Upload a stream of repeating "a" using putObject with default options.
res1 <- runMinio minioPlayCI $ res1 <-
putObject bucket object (CC.repeat "a") (Just kb15) defaultPutObjectOptions runMinio minioPlayCI $
putObject bucket object (CC.repeat "a") (Just kb15) defaultPutObjectOptions
case res1 of case res1 of
Left e -> putStrLn $ "putObject failed." ++ show e Left e -> putStrLn $ "putObject failed." ++ show e
Right () -> putStrLn "putObject succeeded." Right () -> putStrLn "putObject succeeded."
-- Eg 2. Upload a file using fPutObject with default options. -- Eg 2. Upload a file using fPutObject with default options.
res2 <- runMinio minioPlayCI $ res2 <-
fPutObject bucket object localFile defaultPutObjectOptions runMinio minioPlayCI $
fPutObject bucket object localFile defaultPutObjectOptions
case res2 of case res2 of
Left e -> putStrLn $ "fPutObject failed." ++ show e Left e -> putStrLn $ "fPutObject failed." ++ show e
Right () -> putStrLn "fPutObject succeeded." Right () -> putStrLn "fPutObject succeeded."

View File

@ -16,23 +16,18 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Prelude import Network.Minio
import Prelude
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let let bucket = "my-bucket"
bucket = "my-bucket"
res <- runMinio minioPlayCI $ removeBucket bucket res <- runMinio minioPlayCI $ removeBucket bucket
print res print res

View File

@ -16,27 +16,24 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Prelude import Network.Minio
import Prelude
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let let bucket = "mybucket"
bucket = "mybucket" object = "myobject"
object = "myobject"
res <- runMinio minioPlayCI $ res <-
removeIncompleteUpload bucket object runMinio minioPlayCI $
removeIncompleteUpload bucket object
case res of case res of
Left _ -> putStrLn $ "Failed to remove " ++ show bucket ++ "/" ++ show object Left _ -> putStrLn $ "Failed to remove " ++ show bucket ++ "/" ++ show object

View File

@ -16,20 +16,19 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio import Network.Minio
import Prelude import Prelude
main :: IO () main :: IO ()
main = do main = do
let let bucket = "mybucket"
bucket = "mybucket" object = "myobject"
object = "myobject"
res <- runMinio minioPlayCI $ res <-
removeObject bucket object runMinio minioPlayCI $
removeObject bucket object
case res of case res of
Left _ -> putStrLn $ "Failed to remove " ++ show bucket ++ "/" ++ show object Left _ -> putStrLn $ "Failed to remove " ++ show bucket ++ "/" ++ show object

View File

@ -16,34 +16,32 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import qualified Conduit as C import qualified Conduit as C
import Control.Monad (when) import Control.Monad (when)
import Network.Minio
import Prelude import Prelude
main :: IO () main :: IO ()
main = do main = do
let bucket = "selectbucket" let bucket = "selectbucket"
object = "1.csv" object = "1.csv"
content = "Name,Place,Temperature\n" content =
<> "James,San Jose,76\n" "Name,Place,Temperature\n"
<> "Alicia,San Leandro,88\n" <> "James,San Jose,76\n"
<> "Mark,San Carlos,90\n" <> "Alicia,San Leandro,88\n"
<> "Mark,San Carlos,90\n"
res <- runMinio minioPlayCI $ do res <- runMinio minioPlayCI $ do
exists <- bucketExists bucket
when (not exists) $
makeBucket bucket Nothing
exists <- bucketExists bucket C.liftIO $ putStrLn "Uploading csv object"
when (not exists) $ putObject bucket object (C.sourceLazy content) Nothing defaultPutObjectOptions
makeBucket bucket Nothing
C.liftIO $ putStrLn "Uploading csv object" let sr = selectRequest "Select * from s3object" defaultCsvInput defaultCsvOutput
putObject bucket object (C.sourceLazy content) Nothing defaultPutObjectOptions res <- selectObjectContent bucket object sr
C.runConduit $ res C..| getPayloadBytes C..| C.stdoutC
let sr = selectRequest "Select * from s3object" defaultCsvInput defaultCsvOutput print res
res <- selectObjectContent bucket object sr
C.runConduit $ res C..| getPayloadBytes C..| C.stdoutC
print res

View File

@ -16,15 +16,15 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.AdminAPI
import Prelude import Network.Minio
import Network.Minio.AdminAPI
import Prelude
main :: IO () main :: IO ()
main = do main = do
res <- runMinio minioPlayCI $ res <-
getServerInfo runMinio minioPlayCI $
getServerInfo
print res print res

View File

@ -16,15 +16,15 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.AdminAPI
import Prelude import Network.Minio
import Network.Minio.AdminAPI
import Prelude
main :: IO () main :: IO ()
main = do main = do
res <- runMinio minioPlayCI $ res <-
serviceSendAction ServiceActionRestart runMinio minioPlayCI $
serviceSendAction ServiceActionRestart
print res print res

View File

@ -16,15 +16,15 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.AdminAPI
import Prelude import Network.Minio
import Network.Minio.AdminAPI
import Prelude
main :: IO () main :: IO ()
main = do main = do
res <- runMinio minioPlayCI $ res <-
serviceSendAction ServiceActionStop runMinio minioPlayCI $
serviceSendAction ServiceActionStop
print res print res

View File

@ -16,15 +16,15 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.AdminAPI
import Prelude import Network.Minio
import Network.Minio.AdminAPI
import Prelude
main :: IO () main :: IO ()
main = do main = do
res <- runMinio minioPlayCI $ res <-
serviceStatus runMinio minioPlayCI $
serviceStatus
print res print res

View File

@ -16,12 +16,11 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.AdminAPI
import Prelude import Network.Minio
import Network.Minio.AdminAPI
import Prelude
main :: IO () main :: IO ()
main = do main = do

View File

@ -56,7 +56,7 @@ common base-settings
, Network.Minio.XmlParser , Network.Minio.XmlParser
, Network.Minio.JsonParser , Network.Minio.JsonParser
build-depends: base >= 4.7 && < 5 build-depends: base >= 4.7 && < 5
, protolude >= 0.2 && < 0.3 , protolude >= 0.3 && < 0.4
, aeson >= 1.2 , aeson >= 1.2
, base64-bytestring >= 1.0 , base64-bytestring >= 1.0
, binary >= 0.8.5.0 , binary >= 0.8.5.0
@ -83,8 +83,8 @@ common base-settings
, text >= 1.2 , text >= 1.2
, time >= 1.8 , time >= 1.8
, transformers >= 0.5 , transformers >= 0.5
, unliftio >= 0.2 , unliftio >= 0.2 && < 0.3
, unliftio-core >= 0.1 && < 0.2 , unliftio-core >= 0.2 && < 0.3
, unordered-containers >= 0.2 , unordered-containers >= 0.2
, xml-conduit >= 1.8 , xml-conduit >= 1.8

View File

@ -15,19 +15,45 @@
-- --
module Lib.Prelude module Lib.Prelude
( module Exports ( module Exports,
, both both,
) where showBS,
toStrictBS,
fromStrictBS,
)
where
import Protolude as Exports hiding (catch, catches, import Control.Monad.Trans.Maybe as Exports (MaybeT (..), runMaybeT)
throwIO, try) import qualified Data.ByteString.Lazy as LB
import Data.Time as Exports
import Control.Monad.Trans.Maybe as Exports (MaybeT (..), runMaybeT) ( UTCTime (..),
import Data.Time as Exports (UTCTime (..), diffUTCTime,
diffUTCTime) )
import UnliftIO as Exports (catch, catches, throwIO, import Protolude as Exports hiding
try) ( Handler,
catch,
catches,
throwIO,
try,
yield,
)
import UnliftIO as Exports
( Handler,
catch,
catches,
throwIO,
try,
)
-- | Apply a function on both elements of a pair -- | Apply a function on both elements of a pair
both :: (a -> b) -> (a, a) -> (b, b) both :: (a -> b) -> (a, a) -> (b, b)
both f (a, b) = (f a, f b) both f (a, b) = (f a, f b)
showBS :: Show a => a -> ByteString
showBS a = toUtf8 (show a :: Text)
toStrictBS :: LByteString -> ByteString
toStrictBS = LB.toStrict
fromStrictBS :: ByteString -> LByteString
fromStrictBS = LB.fromStrict

View File

@ -22,218 +22,217 @@
-- --
-- Types and functions to conveniently access S3 compatible object -- Types and functions to conveniently access S3 compatible object
-- storage servers like MinIO. -- storage servers like MinIO.
module Network.Minio module Network.Minio
( ( -- * Credentials
-- * Credentials Credentials (..),
Credentials (..)
-- ** Credential providers -- ** Credential providers
-- | Run actions that retrieve 'Credentials' from the environment or
-- files or other custom sources.
, Provider
, fromAWSConfigFile
, fromAWSEnv
, fromMinioEnv
, findFirst
-- * Connecting to object storage -- | Run actions that retrieve 'Credentials' from the environment or
, ConnectInfo -- files or other custom sources.
, setRegion Provider,
, setCreds fromAWSConfigFile,
, setCredsFrom fromAWSEnv,
, isConnectInfoSecure fromMinioEnv,
, disableTLSCertValidation findFirst,
, MinioConn
, mkMinioConn
-- ** Connection helpers -- * Connecting to object storage
-- | These are helpers to construct 'ConnectInfo' values for common ConnectInfo,
-- cases. setRegion,
, minioPlayCI setCreds,
, awsCI setCredsFrom,
, gcsCI isConnectInfoSecure,
disableTLSCertValidation,
MinioConn,
mkMinioConn,
-- * Minio Monad -- ** Connection helpers
----------------
-- | The Minio Monad provides connection-reuse, bucket-location
-- caching, resource management and simpler error handling
-- functionality. All actions on object storage are performed within
-- this Monad.
, Minio
, runMinioWith
, runMinio
, runMinioResWith
, runMinioRes
-- * Bucket Operations -- | These are helpers to construct 'ConnectInfo' values for common
-- cases.
minioPlayCI,
awsCI,
gcsCI,
-- ** Creation, removal and querying -- * Minio Monad
, Bucket ----------------
, makeBucket
, removeBucket
, bucketExists
, Region
, getLocation
-- ** Listing buckets -- | The Minio Monad provides connection-reuse, bucket-location
, BucketInfo(..) -- caching, resource management and simpler error handling
, listBuckets -- functionality. All actions on object storage are performed within
-- this Monad.
Minio,
runMinioWith,
runMinio,
runMinioResWith,
runMinioRes,
-- ** Listing objects -- * Bucket Operations
, listObjects
, listObjectsV1
, ListItem(..)
, ObjectInfo -- ** Creation, removal and querying
, oiObject Bucket,
, oiModTime makeBucket,
, oiETag removeBucket,
, oiSize bucketExists,
, oiUserMetadata Region,
, oiMetadata getLocation,
-- ** Listing incomplete uploads -- ** Listing buckets
, listIncompleteUploads BucketInfo (..),
, UploadId listBuckets,
, UploadInfo(..)
, listIncompleteParts
, ObjectPartInfo(..)
-- ** Bucket Notifications -- ** Listing objects
, getBucketNotification listObjects,
, putBucketNotification listObjectsV1,
, removeAllBucketNotification ListItem (..),
, Notification(..) ObjectInfo,
, defaultNotification oiObject,
, NotificationConfig(..) oiModTime,
, Arn oiETag,
, Event(..) oiSize,
, Filter(..) oiUserMetadata,
, defaultFilter oiMetadata,
, FilterKey(..)
, defaultFilterKey
, FilterRules(..)
, defaultFilterRules
, FilterRule(..)
-- * Object Operations -- ** Listing incomplete uploads
, Object listIncompleteUploads,
UploadId,
UploadInfo (..),
listIncompleteParts,
ObjectPartInfo (..),
-- ** File-based operations -- ** Bucket Notifications
, fGetObject getBucketNotification,
, fPutObject putBucketNotification,
removeAllBucketNotification,
Notification (..),
defaultNotification,
NotificationConfig (..),
Arn,
Event (..),
Filter (..),
defaultFilter,
FilterKey (..),
defaultFilterKey,
FilterRules (..),
defaultFilterRules,
FilterRule (..),
-- ** Conduit-based streaming operations -- * Object Operations
, putObject Object,
, PutObjectOptions
, defaultPutObjectOptions
, pooContentType
, pooContentEncoding
, pooContentDisposition
, pooContentLanguage
, pooCacheControl
, pooStorageClass
, pooUserMetadata
, pooNumThreads
, pooSSE
, getObject -- ** File-based operations
, GetObjectOptions fGetObject,
, defaultGetObjectOptions fPutObject,
, gooRange
, gooIfMatch
, gooIfNoneMatch
, gooIfModifiedSince
, gooIfUnmodifiedSince
, gooSSECKey
, GetObjectResponse
, gorObjectInfo
, gorObjectStream
-- ** Server-side object copying -- ** Conduit-based streaming operations
, copyObject putObject,
, SourceInfo PutObjectOptions,
, defaultSourceInfo defaultPutObjectOptions,
, srcBucket pooContentType,
, srcObject pooContentEncoding,
, srcRange pooContentDisposition,
, srcIfMatch pooContentLanguage,
, srcIfNoneMatch pooCacheControl,
, srcIfModifiedSince pooStorageClass,
, srcIfUnmodifiedSince pooUserMetadata,
, DestinationInfo pooNumThreads,
, defaultDestinationInfo pooSSE,
, dstBucket getObject,
, dstObject GetObjectOptions,
defaultGetObjectOptions,
gooRange,
gooIfMatch,
gooIfNoneMatch,
gooIfModifiedSince,
gooIfUnmodifiedSince,
gooSSECKey,
GetObjectResponse,
gorObjectInfo,
gorObjectStream,
-- ** Querying object info -- ** Server-side object copying
, statObject copyObject,
SourceInfo,
defaultSourceInfo,
srcBucket,
srcObject,
srcRange,
srcIfMatch,
srcIfNoneMatch,
srcIfModifiedSince,
srcIfUnmodifiedSince,
DestinationInfo,
defaultDestinationInfo,
dstBucket,
dstObject,
-- ** Object removal operations -- ** Querying object info
, removeObject statObject,
, removeIncompleteUpload
-- ** Select Object Content with SQL -- ** Object removal operations
, module Network.Minio.SelectAPI removeObject,
removeIncompleteUpload,
-- * Server-Side Encryption Helpers -- ** Select Object Content with SQL
, mkSSECKey module Network.Minio.SelectAPI,
, SSECKey
, SSE(..)
-- * Presigned Operations -- * Server-Side Encryption Helpers
, presignedPutObjectUrl mkSSECKey,
, presignedGetObjectUrl SSECKey,
, presignedHeadObjectUrl SSE (..),
, UrlExpiry
-- ** POST (browser) upload helpers -- * Presigned Operations
-- | Please see presignedPutObjectUrl,
-- https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html presignedGetObjectUrl,
-- for detailed information. presignedHeadObjectUrl,
, newPostPolicy UrlExpiry,
, presignedPostPolicy
, showPostPolicy
, PostPolicy
, PostPolicyError(..)
-- *** Post Policy condition helpers -- ** POST (browser) upload helpers
, PostPolicyCondition
, ppCondBucket
, ppCondContentLengthRange
, ppCondContentType
, ppCondKey
, ppCondKeyStartsWith
, ppCondSuccessActionStatus
-- * Error handling -- | Please see
-- | Data types representing various errors that may occur while -- https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
-- working with an object storage service. -- for detailed information.
, MinioErr(..) newPostPolicy,
, MErrV(..) presignedPostPolicy,
, ServiceErr(..) showPostPolicy,
PostPolicy,
PostPolicyError (..),
) where -- *** Post Policy condition helpers
PostPolicyCondition,
ppCondBucket,
ppCondContentLengthRange,
ppCondContentType,
ppCondKey,
ppCondKeyStartsWith,
ppCondSuccessActionStatus,
-- * Error handling
-- | Data types representing various errors that may occur while
-- working with an object storage service.
MinioErr (..),
MErrV (..),
ServiceErr (..),
)
where
{- {-
This module exports the high-level MinIO API for object storage. This module exports the high-level MinIO API for object storage.
-} -}
import qualified Data.Conduit as C import qualified Data.Conduit as C
import qualified Data.Conduit.Binary as CB import qualified Data.Conduit.Binary as CB
import qualified Data.Conduit.Combinators as CC import qualified Data.Conduit.Combinators as CC
import Lib.Prelude
import Lib.Prelude import Network.Minio.CopyObject
import Network.Minio.Data
import Network.Minio.CopyObject import Network.Minio.Errors
import Network.Minio.Data import Network.Minio.ListOps
import Network.Minio.Errors import Network.Minio.PutObject
import Network.Minio.ListOps import Network.Minio.S3API
import Network.Minio.PutObject import Network.Minio.SelectAPI
import Network.Minio.S3API import Network.Minio.Utils
import Network.Minio.SelectAPI
import Network.Minio.Utils
-- | Lists buckets. -- | Lists buckets.
listBuckets :: Minio [BucketInfo] listBuckets :: Minio [BucketInfo]
@ -248,8 +247,12 @@ fGetObject bucket object fp opts = do
C.connect (gorObjectStream src) $ CB.sinkFileCautious fp C.connect (gorObjectStream src) $ CB.sinkFileCautious fp
-- | Upload the given file to the given object. -- | Upload the given file to the given object.
fPutObject :: Bucket -> Object -> FilePath fPutObject ::
-> PutObjectOptions -> Minio () Bucket ->
Object ->
FilePath ->
PutObjectOptions ->
Minio ()
fPutObject bucket object f opts = fPutObject bucket object f opts =
void $ putObjectInternal bucket object opts $ ODFile f Nothing void $ putObjectInternal bucket object opts $ ODFile f Nothing
@ -257,8 +260,13 @@ fPutObject bucket object f opts =
-- known; this helps the library select optimal part sizes to perform -- known; this helps the library select optimal part sizes to perform
-- a multipart upload. If not specified, it is assumed that the object -- a multipart upload. If not specified, it is assumed that the object
-- can be potentially 5TiB and selects multipart sizes appropriately. -- can be potentially 5TiB and selects multipart sizes appropriately.
putObject :: Bucket -> Object -> C.ConduitM () ByteString Minio () putObject ::
-> Maybe Int64 -> PutObjectOptions -> Minio () Bucket ->
Object ->
C.ConduitM () ByteString Minio () ->
Maybe Int64 ->
PutObjectOptions ->
Minio ()
putObject bucket object src sizeMay opts = putObject bucket object src sizeMay opts =
void $ putObjectInternal bucket object opts $ ODStream src sizeMay void $ putObjectInternal bucket object opts $ ODStream src sizeMay
@ -268,18 +276,25 @@ putObject bucket object src sizeMay opts =
-- copy operation if the new object is to be greater than 5GiB in -- copy operation if the new object is to be greater than 5GiB in
-- size. -- size.
copyObject :: DestinationInfo -> SourceInfo -> Minio () copyObject :: DestinationInfo -> SourceInfo -> Minio ()
copyObject dstInfo srcInfo = void $ copyObjectInternal (dstBucket dstInfo) copyObject dstInfo srcInfo =
(dstObject dstInfo) srcInfo void $
copyObjectInternal
(dstBucket dstInfo)
(dstObject dstInfo)
srcInfo
-- | Remove an object from the object store. -- | Remove an object from the object store.
removeObject :: Bucket -> Object -> Minio () removeObject :: Bucket -> Object -> Minio ()
removeObject = deleteObject removeObject = deleteObject
-- | Get an object from the object store. -- | Get an object from the object store.
getObject :: Bucket -> Object -> GetObjectOptions getObject ::
-> Minio GetObjectResponse Bucket ->
Object ->
GetObjectOptions ->
Minio GetObjectResponse
getObject bucket object opts = getObject bucket object opts =
getObject' bucket object [] $ gooToHeaders opts getObject' bucket object [] $ gooToHeaders opts
-- | Get an object's metadata from the object store. It accepts the -- | Get an object's metadata from the object store. It accepts the
-- same options as GetObject. -- same options as GetObject.
@ -309,6 +324,8 @@ bucketExists = headBucket
-- | Removes an ongoing multipart upload of an object. -- | Removes an ongoing multipart upload of an object.
removeIncompleteUpload :: Bucket -> Object -> Minio () removeIncompleteUpload :: Bucket -> Object -> Minio ()
removeIncompleteUpload bucket object = do removeIncompleteUpload bucket object = do
uploads <- C.runConduit $ listIncompleteUploads bucket (Just object) False uploads <-
C..| CC.sinkList C.runConduit $
listIncompleteUploads bucket (Just object) False
C..| CC.sinkList
mapM_ (abortMultipartUpload bucket object) (uiUploadId <$> uploads) mapM_ (abortMultipartUpload bucket object) (uiUploadId <$> uploads)

View File

@ -15,169 +15,187 @@
-- --
module Network.Minio.API module Network.Minio.API
( connect ( connect,
, S3ReqInfo(..) S3ReqInfo (..),
, runMinio runMinio,
, executeRequest executeRequest,
, mkStreamRequest mkStreamRequest,
, getLocation getLocation,
isValidBucketName,
checkBucketNameValidity,
isValidObjectName,
checkObjectNameValidity,
)
where
, isValidBucketName import Control.Retry
, checkBucketNameValidity ( fullJitterBackoff,
, isValidObjectName limitRetriesByCumulativeDelay,
, checkObjectNameValidity retrying,
) where )
import qualified Data.ByteString as B
import Control.Retry (fullJitterBackoff, import qualified Data.Char as C
limitRetriesByCumulativeDelay, import qualified Data.Conduit as C
retrying) import qualified Data.HashMap.Strict as H
import qualified Data.ByteString as B import qualified Data.Text as T
import qualified Data.Char as C import qualified Data.Time.Clock as Time
import qualified Data.Conduit as C import Lib.Prelude
import qualified Data.HashMap.Strict as H import Network.HTTP.Conduit (Response)
import qualified Data.Text as T import qualified Network.HTTP.Conduit as NC
import qualified Data.Time.Clock as Time import qualified Network.HTTP.Types as HT
import Network.HTTP.Conduit (Response) import Network.HTTP.Types.Header (hHost)
import qualified Network.HTTP.Conduit as NC import Network.Minio.APICommon
import qualified Network.HTTP.Types as HT import Network.Minio.Data
import Network.HTTP.Types.Header (hHost) import Network.Minio.Errors
import Network.Minio.Sign.V4
import Lib.Prelude import Network.Minio.Utils
import Network.Minio.XmlParser
import Network.Minio.APICommon
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.Sign.V4
import Network.Minio.Utils
import Network.Minio.XmlParser
-- | Fetch bucket location (region) -- | Fetch bucket location (region)
getLocation :: Bucket -> Minio Region getLocation :: Bucket -> Minio Region
getLocation bucket = do getLocation bucket = do
resp <- executeRequest $ defaultS3ReqInfo { resp <-
riBucket = Just bucket executeRequest $
, riQueryParams = [("location", Nothing)] defaultS3ReqInfo
, riNeedsLocation = False { riBucket = Just bucket,
} riQueryParams = [("location", Nothing)],
riNeedsLocation = False
}
parseLocation $ NC.responseBody resp parseLocation $ NC.responseBody resp
-- | Looks for region in RegionMap and updates it using getLocation if -- | Looks for region in RegionMap and updates it using getLocation if
-- absent. -- absent.
discoverRegion :: S3ReqInfo -> Minio (Maybe Region) discoverRegion :: S3ReqInfo -> Minio (Maybe Region)
discoverRegion ri = runMaybeT $ do discoverRegion ri = runMaybeT $ do
bucket <- MaybeT $ return $ riBucket ri bucket <- MaybeT $ return $ riBucket ri
regionMay <- lift $ lookupRegionCache bucket regionMay <- lift $ lookupRegionCache bucket
maybe (do maybe
l <- lift $ getLocation bucket ( do
lift $ addToRegionCache bucket l l <- lift $ getLocation bucket
return l lift $ addToRegionCache bucket l
) return regionMay return l
)
return
regionMay
getRegion :: S3ReqInfo -> Minio (Maybe Region) getRegion :: S3ReqInfo -> Minio (Maybe Region)
getRegion ri = do getRegion ri = do
ci <- asks mcConnInfo ci <- asks mcConnInfo
-- getService/makeBucket/getLocation -- don't need location -- getService/makeBucket/getLocation -- don't need location
if | not $ riNeedsLocation ri -> if
return $ Just $ connectRegion ci | not $ riNeedsLocation ri ->
return $ Just $ connectRegion ci
-- if autodiscovery of location is disabled by user -- if autodiscovery of location is disabled by user
| not $ connectAutoDiscoverRegion ci -> | not $ connectAutoDiscoverRegion ci ->
return $ Just $ connectRegion ci return $ Just $ connectRegion ci
-- discover the region for the request
-- discover the region for the request | otherwise -> discoverRegion ri
| otherwise -> discoverRegion ri
getRegionHost :: Region -> Minio Text getRegionHost :: Region -> Minio Text
getRegionHost r = do getRegionHost r = do
ci <- asks mcConnInfo ci <- asks mcConnInfo
if "amazonaws.com" `T.isSuffixOf` connectHost ci if "amazonaws.com" `T.isSuffixOf` connectHost ci
then maybe (throwIO $ MErrVRegionNotSupported r) then
return (H.lookup r awsRegionMap) maybe
(throwIO $ MErrVRegionNotSupported r)
return
(H.lookup r awsRegionMap)
else return $ connectHost ci else return $ connectHost ci
buildRequest :: S3ReqInfo -> Minio NC.Request buildRequest :: S3ReqInfo -> Minio NC.Request
buildRequest ri = do buildRequest ri = do
maybe (return ()) checkBucketNameValidity $ riBucket ri maybe (return ()) checkBucketNameValidity $ riBucket ri
maybe (return ()) checkObjectNameValidity $ riObject ri maybe (return ()) checkObjectNameValidity $ riObject ri
ci <- asks mcConnInfo ci <- asks mcConnInfo
regionMay <- getRegion ri regionMay <- getRegion ri
regionHost <- maybe (return $ connectHost ci) getRegionHost regionMay regionHost <- maybe (return $ connectHost ci) getRegionHost regionMay
let ri' = ri { riHeaders = hostHeader : riHeaders ri let ri' =
, riRegion = regionMay ri
} { riHeaders = hostHeader : riHeaders ri,
ci' = ci { connectHost = regionHost } riRegion = regionMay
hostHeader = (hHost, getHostAddr ci') }
ci' = ci {connectHost = regionHost}
hostHeader = (hHost, getHostAddr ci')
-- Does not contain body and auth info.
baseRequest =
NC.defaultRequest
{ NC.method = riMethod ri',
NC.secure = connectIsSecure ci',
NC.host = encodeUtf8 $ connectHost ci',
NC.port = connectPort ci',
NC.path = getS3Path (riBucket ri') (riObject ri'),
NC.requestHeaders = riHeaders ri',
NC.queryString = HT.renderQuery False $ riQueryParams ri'
}
-- Does not contain body and auth info. timeStamp <- liftIO Time.getCurrentTime
baseRequest = NC.defaultRequest
{ NC.method = riMethod ri'
, NC.secure = connectIsSecure ci'
, NC.host = encodeUtf8 $ connectHost ci'
, NC.port = connectPort ci'
, NC.path = getS3Path (riBucket ri') (riObject ri')
, NC.requestHeaders = riHeaders ri'
, NC.queryString = HT.renderQuery False $ riQueryParams ri'
}
timeStamp <- liftIO Time.getCurrentTime let sp =
SignParams
(connectAccessKey ci')
(connectSecretKey ci')
timeStamp
(riRegion ri')
Nothing
Nothing
let sp = SignParams (connectAccessKey ci') (connectSecretKey ci') -- Cases to handle:
timeStamp (riRegion ri') Nothing Nothing --
-- 1. Connection is secure: use unsigned payload
--
-- 2. Insecure connection, streaming signature is enabled via use of
-- conduit payload: use streaming signature for request.
--
-- 3. Insecure connection, non-conduit payload: compute payload
-- sha256hash, buffer request in memory and perform request.
-- Cases to handle: -- case 2 from above.
-- if
-- 1. Connection is secure: use unsigned payload | isStreamingPayload (riPayload ri')
-- && (not $ connectIsSecure ci') -> do
-- 2. Insecure connection, streaming signature is enabled via use of (pLen, pSrc) <- case riPayload ri of
-- conduit payload: use streaming signature for request. PayloadC l src -> return (l, src)
-- _ -> throwIO MErrVUnexpectedPayload
-- 3. Insecure connection, non-conduit payload: compute payload let reqFn = signV4Stream pLen sp baseRequest
-- sha256hash, buffer request in memory and perform request. return $ reqFn pSrc
| otherwise -> do
-- case 2 from above. -- case 1 described above.
if | isStreamingPayload (riPayload ri') && sp' <-
(not $ connectIsSecure ci') -> do if
(pLen, pSrc) <- case riPayload ri of | connectIsSecure ci' -> return sp
PayloadC l src -> return (l, src) -- case 3 described above.
_ -> throwIO MErrVUnexpectedPayload | otherwise -> do
let reqFn = signV4Stream pLen sp baseRequest pHash <- getPayloadSHA256Hash $ riPayload ri'
return $ reqFn pSrc return $ sp {spPayloadHash = Just pHash}
| otherwise -> do
-- case 1 described above.
sp' <- if | connectIsSecure ci' -> return sp
-- case 3 described above.
| otherwise -> do
pHash <- getPayloadSHA256Hash $ riPayload ri'
return $ sp { spPayloadHash = Just pHash }
let signHeaders = signV4 sp' baseRequest
return $ baseRequest
{ NC.requestHeaders =
NC.requestHeaders baseRequest ++
mkHeaderFromPairs signHeaders
, NC.requestBody = getRequestBody (riPayload ri')
}
let signHeaders = signV4 sp' baseRequest
return $
baseRequest
{ NC.requestHeaders =
NC.requestHeaders baseRequest
++ mkHeaderFromPairs signHeaders,
NC.requestBody = getRequestBody (riPayload ri')
}
retryAPIRequest :: Minio a -> Minio a retryAPIRequest :: Minio a -> Minio a
retryAPIRequest apiCall = do retryAPIRequest apiCall = do
resE <- retrying retryPolicy (const shouldRetry) $ resE <-
const $ try apiCall retrying retryPolicy (const shouldRetry)
$ const
$ try apiCall
either throwIO return resE either throwIO return resE
where where
-- Retry using the full-jitter backoff method for up to 10 mins -- Retry using the full-jitter backoff method for up to 10 mins
-- total -- total
retryPolicy = limitRetriesByCumulativeDelay tenMins retryPolicy =
$ fullJitterBackoff oneMilliSecond limitRetriesByCumulativeDelay tenMins $
fullJitterBackoff oneMilliSecond
oneMilliSecond = 1000 -- in microseconds oneMilliSecond = 1000 -- in microseconds
tenMins = 10 * 60 * 1000000 -- in microseconds tenMins = 10 * 60 * 1000000 -- in microseconds
-- retry on connection related failure -- retry on connection related failure
@ -189,23 +207,23 @@ retryAPIRequest apiCall = do
-- API request failed with a retryable exception -- API request failed with a retryable exception
Left httpExn@(NC.HttpExceptionRequest _ exn) -> Left httpExn@(NC.HttpExceptionRequest _ exn) ->
case (exn :: NC.HttpExceptionContent) of case (exn :: NC.HttpExceptionContent) of
NC.ResponseTimeout -> return True NC.ResponseTimeout -> return True
NC.ConnectionTimeout -> return True NC.ConnectionTimeout -> return True
NC.ConnectionFailure _ -> return True NC.ConnectionFailure _ -> return True
-- We received an unexpected exception -- We received an unexpected exception
_ -> throwIO httpExn _ -> throwIO httpExn
-- We received an unexpected exception -- We received an unexpected exception
Left someOtherExn -> throwIO someOtherExn Left someOtherExn -> throwIO someOtherExn
executeRequest :: S3ReqInfo -> Minio (Response LByteString) executeRequest :: S3ReqInfo -> Minio (Response LByteString)
executeRequest ri = do executeRequest ri = do
req <- buildRequest ri req <- buildRequest ri
mgr <- asks mcConnManager mgr <- asks mcConnManager
retryAPIRequest $ httpLbs req mgr retryAPIRequest $ httpLbs req mgr
mkStreamRequest :: S3ReqInfo mkStreamRequest ::
-> Minio (Response (C.ConduitM () ByteString Minio ())) S3ReqInfo ->
Minio (Response (C.ConduitM () ByteString Minio ()))
mkStreamRequest ri = do mkStreamRequest ri = do
req <- buildRequest ri req <- buildRequest ri
mgr <- asks mcConnManager mgr <- asks mcConnManager
@ -214,35 +232,43 @@ mkStreamRequest ri = do
-- Bucket name validity check according to AWS rules. -- Bucket name validity check according to AWS rules.
isValidBucketName :: Bucket -> Bool isValidBucketName :: Bucket -> Bool
isValidBucketName bucket = isValidBucketName bucket =
not (or [ len < 3 || len > 63 not
, or (map labelCheck labels) ( or
, or (map labelCharsCheck labels) [ len < 3 || len > 63,
, isIPCheck or (map labelCheck labels),
]) or (map labelCharsCheck labels),
isIPCheck
]
)
where where
len = T.length bucket len = T.length bucket
labels = T.splitOn "." bucket labels = T.splitOn "." bucket
-- does label `l` fail basic checks of length and start/end? -- does label `l` fail basic checks of length and start/end?
labelCheck l = T.length l == 0 || T.head l == '-' || T.last l == '-' labelCheck l = T.length l == 0 || T.head l == '-' || T.last l == '-'
-- does label `l` have non-allowed characters? -- does label `l` have non-allowed characters?
labelCharsCheck l = isJust $ T.find (\x -> not (C.isAsciiLower x || labelCharsCheck l =
x == '-' || isJust $
C.isDigit x)) l T.find
( \x ->
not
( C.isAsciiLower x
|| x == '-'
|| C.isDigit x
)
)
l
-- does label `l` have non-digit characters? -- does label `l` have non-digit characters?
labelNonDigits l = isJust $ T.find (not . C.isDigit) l labelNonDigits l = isJust $ T.find (not . C.isDigit) l
labelAsNums = map (not . labelNonDigits) labels labelAsNums = map (not . labelNonDigits) labels
-- check if bucket name looks like an IP -- check if bucket name looks like an IP
isIPCheck = and labelAsNums && length labelAsNums == 4 isIPCheck = and labelAsNums && length labelAsNums == 4
-- Throws exception iff bucket name is invalid according to AWS rules. -- Throws exception iff bucket name is invalid according to AWS rules.
checkBucketNameValidity :: MonadIO m => Bucket -> m () checkBucketNameValidity :: MonadIO m => Bucket -> m ()
checkBucketNameValidity bucket = checkBucketNameValidity bucket =
when (not $ isValidBucketName bucket) $ when (not $ isValidBucketName bucket)
throwIO $ MErrVInvalidBucketName bucket $ throwIO
$ MErrVInvalidBucketName bucket
isValidObjectName :: Object -> Bool isValidObjectName :: Object -> Bool
isValidObjectName object = isValidObjectName object =
@ -250,5 +276,6 @@ isValidObjectName object =
checkObjectNameValidity :: MonadIO m => Object -> m () checkObjectNameValidity :: MonadIO m => Object -> m ()
checkObjectNameValidity object = checkObjectNameValidity object =
when (not $ isValidObjectName object) $ when (not $ isValidObjectName object)
throwIO $ MErrVInvalidObjectName object $ throwIO
$ MErrVInvalidObjectName object

View File

@ -16,37 +16,38 @@
module Network.Minio.APICommon where module Network.Minio.APICommon where
import qualified Conduit as C import qualified Conduit as C
import qualified Data.ByteString as BS import qualified Data.ByteString as BS
import qualified Data.ByteString.Lazy as LB import qualified Data.ByteString.Lazy as LB
import Data.Conduit.Binary (sourceHandleRange) import Data.Conduit.Binary (sourceHandleRange)
import qualified Network.HTTP.Conduit as NC import Lib.Prelude
import qualified Network.HTTP.Types as HT import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Lib.Prelude import Network.Minio.Data
import Network.Minio.Data.Crypto
import Network.Minio.Data import Network.Minio.Errors
import Network.Minio.Data.Crypto
import Network.Minio.Errors
sha256Header :: ByteString -> HT.Header sha256Header :: ByteString -> HT.Header
sha256Header = ("x-amz-content-sha256", ) sha256Header = ("x-amz-content-sha256",)
-- | This function throws an error if the payload is a conduit (as it -- | This function throws an error if the payload is a conduit (as it
-- will not be possible to re-read the conduit after it is consumed). -- will not be possible to re-read the conduit after it is consumed).
getPayloadSHA256Hash :: Payload -> Minio ByteString getPayloadSHA256Hash :: Payload -> Minio ByteString
getPayloadSHA256Hash (PayloadBS bs) = return $ hashSHA256 bs getPayloadSHA256Hash (PayloadBS bs) = return $ hashSHA256 bs
getPayloadSHA256Hash (PayloadH h off size) = hashSHA256FromSource $ getPayloadSHA256Hash (PayloadH h off size) =
sourceHandleRange h hashSHA256FromSource $
(return . fromIntegral $ off) sourceHandleRange
(return . fromIntegral $ size) h
(return . fromIntegral $ off)
(return . fromIntegral $ size)
getPayloadSHA256Hash (PayloadC _ _) = throwIO MErrVUnexpectedPayload getPayloadSHA256Hash (PayloadC _ _) = throwIO MErrVUnexpectedPayload
getRequestBody :: Payload -> NC.RequestBody getRequestBody :: Payload -> NC.RequestBody
getRequestBody (PayloadBS bs) = NC.RequestBodyBS bs getRequestBody (PayloadBS bs) = NC.RequestBodyBS bs
getRequestBody (PayloadH h off size) = getRequestBody (PayloadH h off size) =
NC.requestBodySource (fromIntegral size) $ NC.requestBodySource (fromIntegral size) $
sourceHandleRange h sourceHandleRange
h
(return . fromIntegral $ off) (return . fromIntegral $ off)
(return . fromIntegral $ size) (return . fromIntegral $ size)
getRequestBody (PayloadC n src) = NC.requestBodySource n src getRequestBody (PayloadC n src) = NC.requestBodySource n src
@ -55,14 +56,17 @@ mkStreamingPayload :: Payload -> Payload
mkStreamingPayload payload = mkStreamingPayload payload =
case payload of case payload of
PayloadBS bs -> PayloadBS bs ->
PayloadC (fromIntegral $ BS.length bs) PayloadC
(fromIntegral $ BS.length bs)
(C.sourceLazy $ LB.fromStrict bs) (C.sourceLazy $ LB.fromStrict bs)
PayloadH h off len -> PayloadH h off len ->
PayloadC len $ sourceHandleRange h PayloadC len $
(return . fromIntegral $ off) sourceHandleRange
(return . fromIntegral $ len) h
(return . fromIntegral $ off)
(return . fromIntegral $ len)
_ -> payload _ -> payload
isStreamingPayload :: Payload -> Bool isStreamingPayload :: Payload -> Bool
isStreamingPayload (PayloadC _ _) = True isStreamingPayload (PayloadC _ _) = True
isStreamingPayload _ = False isStreamingPayload _ = False

View File

@ -16,270 +16,304 @@
module Network.Minio.AdminAPI module Network.Minio.AdminAPI
( -- * MinIO Admin API ( -- * MinIO Admin API
-------------------- --------------------
-- | Provides MinIO admin API and related types. It is in -- | Provides MinIO admin API and related types. It is in
-- experimental state. -- experimental state.
DriveInfo(..) DriveInfo (..),
, ErasureInfo(..) ErasureInfo (..),
, Backend(..) Backend (..),
, ConnStats(..) ConnStats (..),
, HttpStats(..) HttpStats (..),
, ServerProps(..) ServerProps (..),
, CountNAvgTime(..) CountNAvgTime (..),
, StorageClass(..) StorageClass (..),
, StorageInfo(..) StorageInfo (..),
, SIData(..) SIData (..),
, ServerInfo(..) ServerInfo (..),
, getServerInfo getServerInfo,
HealOpts (..),
HealResultItem (..),
HealStatus (..),
HealStartResp (..),
startHeal,
forceStartHeal,
getHealStatus,
SetConfigResult (..),
NodeSummary (..),
setConfig,
getConfig,
ServerVersion (..),
ServiceStatus (..),
serviceStatus,
ServiceAction (..),
serviceSendAction,
)
where
, HealOpts(..) import Data.Aeson
, HealResultItem(..) ( (.:),
, HealStatus(..) (.:?),
, HealStartResp(..) (.=),
, startHeal FromJSON,
, forceStartHeal ToJSON,
, getHealStatus Value (Object),
eitherDecode,
, SetConfigResult(..) object,
, NodeSummary(..) pairs,
, setConfig parseJSON,
, getConfig toEncoding,
toJSON,
, ServerVersion(..) withObject,
, ServiceStatus(..) withText,
, serviceStatus )
import qualified Data.Aeson as A
, ServiceAction(..) import Data.Aeson.Types (typeMismatch)
, serviceSendAction import qualified Data.ByteString as B
) where import qualified Data.ByteString.Lazy as LBS
import qualified Data.Text as T
import Data.Aeson (FromJSON, ToJSON, Value (Object), import Data.Time (NominalDiffTime, getCurrentTime)
eitherDecode, object, pairs, import Lib.Prelude
parseJSON, toEncoding, toJSON, import Network.HTTP.Conduit (Response)
withObject, withText, (.:), (.:?), import qualified Network.HTTP.Conduit as NC
(.=)) import qualified Network.HTTP.Types as HT
import qualified Data.Aeson as A import Network.HTTP.Types.Header (hHost)
import Data.Aeson.Types (typeMismatch) import Network.Minio.APICommon
import qualified Data.ByteString as B import Network.Minio.Data
import qualified Data.ByteString.Lazy as LBS import Network.Minio.Errors
import qualified Data.Text as T import Network.Minio.Sign.V4
import Data.Time (NominalDiffTime, getCurrentTime) import Network.Minio.Utils
import Network.HTTP.Conduit (Response)
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Network.HTTP.Types.Header (hHost)
import Lib.Prelude
import Network.Minio.APICommon
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.Sign.V4
import Network.Minio.Utils
data DriveInfo = DriveInfo data DriveInfo = DriveInfo
{ diUuid :: Text { diUuid :: Text,
, diEndpoint :: Text diEndpoint :: Text,
, diState :: Text diState :: Text
} deriving (Eq, Show) }
deriving (Eq, Show)
instance FromJSON DriveInfo where instance FromJSON DriveInfo where
parseJSON = withObject "DriveInfo" $ \v -> DriveInfo parseJSON = withObject "DriveInfo" $ \v ->
<$> v .: "uuid" DriveInfo
<*> v .: "endpoint" <$> v .: "uuid"
<*> v .: "state" <*> v .: "endpoint"
<*> v .: "state"
data StorageClass = StorageClass data StorageClass = StorageClass
{ scParity :: Int { scParity :: Int,
, scData :: Int scData :: Int
} deriving (Eq, Show) }
deriving (Eq, Show)
data ErasureInfo = ErasureInfo data ErasureInfo = ErasureInfo
{ eiOnlineDisks :: Int { eiOnlineDisks :: Int,
, eiOfflineDisks :: Int eiOfflineDisks :: Int,
, eiStandard :: StorageClass eiStandard :: StorageClass,
, eiReducedRedundancy :: StorageClass eiReducedRedundancy :: StorageClass,
, eiSets :: [[DriveInfo]] eiSets :: [[DriveInfo]]
} deriving (Eq, Show) }
deriving (Eq, Show)
instance FromJSON ErasureInfo where instance FromJSON ErasureInfo where
parseJSON = withObject "ErasureInfo" $ \v -> do parseJSON = withObject "ErasureInfo" $ \v -> do
onlineDisks <- v .: "OnlineDisks" onlineDisks <- v .: "OnlineDisks"
offlineDisks <- v .: "OfflineDisks" offlineDisks <- v .: "OfflineDisks"
stdClass <- StorageClass stdClass <-
<$> v .: "StandardSCData" StorageClass
<*> v .: "StandardSCParity" <$> v .: "StandardSCData"
rrClass <- StorageClass <*> v .: "StandardSCParity"
<$> v .: "RRSCData" rrClass <-
<*> v .: "RRSCParity" StorageClass
sets <- v .: "Sets" <$> v .: "RRSCData"
return $ ErasureInfo onlineDisks offlineDisks stdClass rrClass sets <*> v .: "RRSCParity"
sets <- v .: "Sets"
return $ ErasureInfo onlineDisks offlineDisks stdClass rrClass sets
data Backend = BackendFS data Backend
| BackendErasure ErasureInfo = BackendFS
deriving (Eq, Show) | BackendErasure ErasureInfo
deriving (Eq, Show)
instance FromJSON Backend where instance FromJSON Backend where
parseJSON = withObject "Backend" $ \v -> do parseJSON = withObject "Backend" $ \v -> do
typ <- v .: "Type" typ <- v .: "Type"
case typ :: Int of case typ :: Int of
1 -> return BackendFS 1 -> return BackendFS
2 -> BackendErasure <$> parseJSON (Object v) 2 -> BackendErasure <$> parseJSON (Object v)
_ -> typeMismatch "BackendType" (Object v) _ -> typeMismatch "BackendType" (Object v)
data ConnStats = ConnStats data ConnStats = ConnStats
{ csTransferred :: Int64 { csTransferred :: Int64,
, csReceived :: Int64 csReceived :: Int64
} deriving (Eq, Show) }
deriving (Eq, Show)
instance FromJSON ConnStats where instance FromJSON ConnStats where
parseJSON = withObject "ConnStats" $ \v -> ConnStats parseJSON = withObject "ConnStats" $ \v ->
<$> v .: "transferred" ConnStats
<*> v .: "received" <$> v .: "transferred"
<*> v .: "received"
data ServerProps = ServerProps data ServerProps = ServerProps
{ spUptime :: NominalDiffTime { spUptime :: NominalDiffTime,
, spVersion :: Text spVersion :: Text,
, spCommitId :: Text spCommitId :: Text,
, spRegion :: Text spRegion :: Text,
, spSqsArns :: [Text] spSqsArns :: [Text]
} deriving (Eq, Show) }
deriving (Eq, Show)
instance FromJSON ServerProps where instance FromJSON ServerProps where
parseJSON = withObject "SIServer" $ \v -> do parseJSON = withObject "SIServer" $ \v -> do
uptimeNs <- v .: "uptime" uptimeNs <- v .: "uptime"
let uptime = uptimeNs / 1e9 let uptime = uptimeNs / 1e9
ver <- v .: "version" ver <- v .: "version"
commitId <- v .: "commitID" commitId <- v .: "commitID"
region <- v .: "region" region <- v .: "region"
arn <- v .: "sqsARN" arn <- v .: "sqsARN"
return $ ServerProps uptime ver commitId region arn return $ ServerProps uptime ver commitId region arn
data StorageInfo = StorageInfo data StorageInfo = StorageInfo
{ siUsed :: Int64 { siUsed :: Int64,
, siBackend :: Backend siBackend :: Backend
} deriving (Eq, Show) }
deriving (Eq, Show)
instance FromJSON StorageInfo where instance FromJSON StorageInfo where
parseJSON = withObject "StorageInfo" $ \v -> StorageInfo parseJSON = withObject "StorageInfo" $ \v ->
<$> v .: "Used" StorageInfo
<*> v .: "Backend" <$> v .: "Used"
<*> v .: "Backend"
data CountNAvgTime = CountNAvgTime data CountNAvgTime = CountNAvgTime
{ caCount :: Int64 { caCount :: Int64,
, caAvgDuration :: Text caAvgDuration :: Text
} deriving (Eq, Show) }
deriving (Eq, Show)
instance FromJSON CountNAvgTime where instance FromJSON CountNAvgTime where
parseJSON = withObject "CountNAvgTime" $ \v -> CountNAvgTime parseJSON = withObject "CountNAvgTime" $ \v ->
<$> v .: "count" CountNAvgTime
<*> v .: "avgDuration" <$> v .: "count"
<*> v .: "avgDuration"
data HttpStats = HttpStats data HttpStats = HttpStats
{ hsTotalHeads :: CountNAvgTime { hsTotalHeads :: CountNAvgTime,
, hsSuccessHeads :: CountNAvgTime hsSuccessHeads :: CountNAvgTime,
, hsTotalGets :: CountNAvgTime hsTotalGets :: CountNAvgTime,
, hsSuccessGets :: CountNAvgTime hsSuccessGets :: CountNAvgTime,
, hsTotalPuts :: CountNAvgTime hsTotalPuts :: CountNAvgTime,
, hsSuccessPuts :: CountNAvgTime hsSuccessPuts :: CountNAvgTime,
, hsTotalPosts :: CountNAvgTime hsTotalPosts :: CountNAvgTime,
, hsSuccessPosts :: CountNAvgTime hsSuccessPosts :: CountNAvgTime,
, hsTotalDeletes :: CountNAvgTime hsTotalDeletes :: CountNAvgTime,
, hsSuccessDeletes :: CountNAvgTime hsSuccessDeletes :: CountNAvgTime
} deriving (Eq, Show) }
deriving (Eq, Show)
instance FromJSON HttpStats where instance FromJSON HttpStats where
parseJSON = withObject "HttpStats" $ \v -> HttpStats parseJSON = withObject "HttpStats" $ \v ->
<$> v .: "totalHEADs" HttpStats
<*> v .: "successHEADs" <$> v .: "totalHEADs"
<*> v .: "totalGETs" <*> v .: "successHEADs"
<*> v .: "successGETs" <*> v .: "totalGETs"
<*> v .: "totalPUTs" <*> v .: "successGETs"
<*> v .: "successPUTs" <*> v .: "totalPUTs"
<*> v .: "totalPOSTs" <*> v .: "successPUTs"
<*> v .: "successPOSTs" <*> v .: "totalPOSTs"
<*> v .: "totalDELETEs" <*> v .: "successPOSTs"
<*> v .: "successDELETEs" <*> v .: "totalDELETEs"
<*> v .: "successDELETEs"
data SIData = SIData data SIData = SIData
{ sdStorage :: StorageInfo { sdStorage :: StorageInfo,
, sdConnStats :: ConnStats sdConnStats :: ConnStats,
, sdHttpStats :: HttpStats sdHttpStats :: HttpStats,
, sdProps :: ServerProps sdProps :: ServerProps
} deriving (Eq, Show) }
deriving (Eq, Show)
instance FromJSON SIData where instance FromJSON SIData where
parseJSON = withObject "SIData" $ \v -> SIData parseJSON = withObject "SIData" $ \v ->
<$> v .: "storage" SIData
<*> v .: "network" <$> v .: "storage"
<*> v .: "http" <*> v .: "network"
<*> v .: "server" <*> v .: "http"
<*> v .: "server"
data ServerInfo = ServerInfo data ServerInfo = ServerInfo
{ siError :: Text { siError :: Text,
, siAddr :: Text siAddr :: Text,
, siData :: SIData siData :: SIData
} deriving (Eq, Show) }
deriving (Eq, Show)
instance FromJSON ServerInfo where instance FromJSON ServerInfo where
parseJSON = withObject "ServerInfo" $ \v -> ServerInfo parseJSON = withObject "ServerInfo" $ \v ->
<$> v .: "error" ServerInfo
<*> v .: "addr" <$> v .: "error"
<*> v .: "data" <*> v .: "addr"
<*> v .: "data"
data ServerVersion = ServerVersion data ServerVersion = ServerVersion
{ svVersion :: Text { svVersion :: Text,
, svCommitId :: Text svCommitId :: Text
} deriving (Eq, Show) }
deriving (Eq, Show)
instance FromJSON ServerVersion where instance FromJSON ServerVersion where
parseJSON = withObject "ServerVersion" $ \v -> ServerVersion parseJSON = withObject "ServerVersion" $ \v ->
ServerVersion
<$> v .: "version" <$> v .: "version"
<*> v .: "commitID" <*> v .: "commitID"
data ServiceStatus = ServiceStatus data ServiceStatus = ServiceStatus
{ ssVersion :: ServerVersion { ssVersion :: ServerVersion,
, ssUptime :: NominalDiffTime ssUptime :: NominalDiffTime
} deriving (Eq, Show) }
deriving (Eq, Show)
instance FromJSON ServiceStatus where instance FromJSON ServiceStatus where
parseJSON = withObject "ServiceStatus" $ \v -> do parseJSON = withObject "ServiceStatus" $ \v -> do
serverVersion <- v .: "serverVersion" serverVersion <- v .: "serverVersion"
uptimeNs <- v .: "uptime" uptimeNs <- v .: "uptime"
let uptime = uptimeNs / 1e9 let uptime = uptimeNs / 1e9
return $ ServiceStatus serverVersion uptime return $ ServiceStatus serverVersion uptime
data ServiceAction = ServiceActionRestart data ServiceAction
| ServiceActionStop = ServiceActionRestart
deriving (Eq, Show) | ServiceActionStop
deriving (Eq, Show)
instance ToJSON ServiceAction where instance ToJSON ServiceAction where
toJSON a = object [ "action" .= serviceActionToText a ] toJSON a = object ["action" .= serviceActionToText a]
serviceActionToText :: ServiceAction -> Text serviceActionToText :: ServiceAction -> Text
serviceActionToText a = case a of serviceActionToText a = case a of
ServiceActionRestart -> "restart" ServiceActionRestart -> "restart"
ServiceActionStop -> "stop" ServiceActionStop -> "stop"
adminPath :: ByteString adminPath :: ByteString
adminPath = "/minio/admin" adminPath = "/minio/admin"
data HealStartResp = HealStartResp data HealStartResp = HealStartResp
{ hsrClientToken :: Text { hsrClientToken :: Text,
, hsrClientAddr :: Text hsrClientAddr :: Text,
, hsrStartTime :: UTCTime hsrStartTime :: UTCTime
} deriving (Eq, Show) }
deriving (Eq, Show)
instance FromJSON HealStartResp where instance FromJSON HealStartResp where
parseJSON = withObject "HealStartResp" $ \v -> HealStartResp parseJSON = withObject "HealStartResp" $ \v ->
<$> v .: "clientToken" HealStartResp
<*> v .: "clientAddress" <$> v .: "clientToken"
<*> v .: "startTime" <*> v .: "clientAddress"
<*> v .: "startTime"
data HealOpts = HealOpts data HealOpts = HealOpts
{ hoRecursive :: Bool { hoRecursive :: Bool,
, hoDryRun :: Bool hoDryRun :: Bool
} deriving (Eq, Show) }
deriving (Eq, Show)
instance ToJSON HealOpts where instance ToJSON HealOpts where
toJSON (HealOpts r d) = toJSON (HealOpts r d) =
@ -288,197 +322,234 @@ instance ToJSON HealOpts where
pairs ("recursive" .= r <> "dryRun" .= d) pairs ("recursive" .= r <> "dryRun" .= d)
instance FromJSON HealOpts where instance FromJSON HealOpts where
parseJSON = withObject "HealOpts" $ \v -> HealOpts parseJSON = withObject "HealOpts" $ \v ->
HealOpts
<$> v .: "recursive" <$> v .: "recursive"
<*> v .: "dryRun" <*> v .: "dryRun"
data HealItemType = HealItemMetadata data HealItemType
| HealItemBucket = HealItemMetadata
| HealItemBucketMetadata | HealItemBucket
| HealItemObject | HealItemBucketMetadata
deriving (Eq, Show) | HealItemObject
deriving (Eq, Show)
instance FromJSON HealItemType where instance FromJSON HealItemType where
parseJSON = withText "HealItemType" $ \v -> case v of parseJSON = withText "HealItemType" $ \v -> case v of
"metadata" -> return HealItemMetadata "metadata" -> return HealItemMetadata
"bucket" -> return HealItemBucket "bucket" -> return HealItemBucket
"object" -> return HealItemObject "object" -> return HealItemObject
"bucket-metadata" -> return HealItemBucketMetadata "bucket-metadata" -> return HealItemBucketMetadata
_ -> typeMismatch "HealItemType" (A.String v) _ -> typeMismatch "HealItemType" (A.String v)
data NodeSummary = NodeSummary data NodeSummary = NodeSummary
{ nsName :: Text { nsName :: Text,
, nsErrSet :: Bool nsErrSet :: Bool,
, nsErrMessage :: Text nsErrMessage :: Text
} deriving (Eq, Show) }
deriving (Eq, Show)
instance FromJSON NodeSummary where instance FromJSON NodeSummary where
parseJSON = withObject "NodeSummary" $ \v -> NodeSummary parseJSON = withObject "NodeSummary" $ \v ->
<$> v .: "name" NodeSummary
<*> v .: "errSet" <$> v .: "name"
<*> v .: "errMsg" <*> v .: "errSet"
<*> v .: "errMsg"
data SetConfigResult = SetConfigResult data SetConfigResult = SetConfigResult
{ scrStatus :: Bool { scrStatus :: Bool,
, scrNodeSummary :: [NodeSummary] scrNodeSummary :: [NodeSummary]
} deriving (Eq, Show) }
deriving (Eq, Show)
instance FromJSON SetConfigResult where instance FromJSON SetConfigResult where
parseJSON = withObject "SetConfigResult" $ \v -> SetConfigResult parseJSON = withObject "SetConfigResult" $ \v ->
<$> v .: "status" SetConfigResult
<*> v .: "nodeResults" <$> v .: "status"
<*> v .: "nodeResults"
data HealResultItem = HealResultItem data HealResultItem = HealResultItem
{ hriResultIdx :: Int { hriResultIdx :: Int,
, hriType :: HealItemType hriType :: HealItemType,
, hriBucket :: Bucket hriBucket :: Bucket,
, hriObject :: Object hriObject :: Object,
, hriDetail :: Text hriDetail :: Text,
, hriParityBlocks :: Maybe Int hriParityBlocks :: Maybe Int,
, hriDataBlocks :: Maybe Int hriDataBlocks :: Maybe Int,
, hriDiskCount :: Int hriDiskCount :: Int,
, hriSetCount :: Int hriSetCount :: Int,
, hriObjectSize :: Int hriObjectSize :: Int,
, hriBefore :: [DriveInfo] hriBefore :: [DriveInfo],
, hriAfter :: [DriveInfo] hriAfter :: [DriveInfo]
} deriving (Eq, Show) }
deriving (Eq, Show)
instance FromJSON HealResultItem where instance FromJSON HealResultItem where
parseJSON = withObject "HealResultItem" $ \v -> HealResultItem parseJSON = withObject "HealResultItem" $ \v ->
<$> v .: "resultId" HealResultItem
<*> v .: "type" <$> v .: "resultId"
<*> v .: "bucket" <*> v .: "type"
<*> v .: "object" <*> v .: "bucket"
<*> v .: "detail" <*> v .: "object"
<*> v .:? "parityBlocks" <*> v .: "detail"
<*> v .:? "dataBlocks" <*> v .:? "parityBlocks"
<*> v .: "diskCount" <*> v .:? "dataBlocks"
<*> v .: "setCount" <*> v .: "diskCount"
<*> v .: "objectSize" <*> v .: "setCount"
<*> (do before <- v .: "before" <*> v .: "objectSize"
before .: "drives") <*> ( do
<*> (do after <- v .: "after" before <- v .: "before"
after .: "drives") before .: "drives"
)
<*> ( do
after <- v .: "after"
after .: "drives"
)
data HealStatus = HealStatus data HealStatus = HealStatus
{ hsSummary :: Text { hsSummary :: Text,
, hsStartTime :: UTCTime hsStartTime :: UTCTime,
, hsSettings :: HealOpts hsSettings :: HealOpts,
, hsNumDisks :: Int hsNumDisks :: Int,
, hsFailureDetail :: Maybe Text hsFailureDetail :: Maybe Text,
, hsItems :: Maybe [HealResultItem] hsItems :: Maybe [HealResultItem]
} deriving (Eq, Show) }
deriving (Eq, Show)
instance FromJSON HealStatus where instance FromJSON HealStatus where
parseJSON = withObject "HealStatus" $ \v -> HealStatus parseJSON = withObject "HealStatus" $ \v ->
<$> v .: "Summary" HealStatus
<*> v .: "StartTime" <$> v .: "Summary"
<*> v .: "Settings" <*> v .: "StartTime"
<*> v .: "NumDisks" <*> v .: "Settings"
<*> v .:? "Detail" <*> v .: "NumDisks"
<*> v .: "Items" <*> v .:? "Detail"
<*> v .: "Items"
healPath :: Maybe Bucket -> Maybe Text -> ByteString healPath :: Maybe Bucket -> Maybe Text -> ByteString
healPath bucket prefix = do healPath bucket prefix = do
if (isJust bucket) if (isJust bucket)
then encodeUtf8 $ "v1/heal/" <> fromMaybe "" bucket <> "/" then
<> fromMaybe "" prefix encodeUtf8 $
"v1/heal/" <> fromMaybe "" bucket <> "/"
<> fromMaybe "" prefix
else encodeUtf8 $ "v1/heal/" else encodeUtf8 $ "v1/heal/"
-- | Get server version and uptime. -- | Get server version and uptime.
serviceStatus :: Minio ServiceStatus serviceStatus :: Minio ServiceStatus
serviceStatus = do serviceStatus = do
rsp <- executeAdminRequest AdminReqInfo { ariMethod = HT.methodGet rsp <-
, ariPayload = PayloadBS B.empty executeAdminRequest
, ariPayloadHash = Nothing AdminReqInfo
, ariPath = "v1/service" { ariMethod = HT.methodGet,
, ariHeaders = [] ariPayload = PayloadBS B.empty,
, ariQueryParams = [] ariPayloadHash = Nothing,
} ariPath = "v1/service",
ariHeaders = [],
ariQueryParams = []
}
let rspBS = NC.responseBody rsp let rspBS = NC.responseBody rsp
case eitherDecode rspBS of case eitherDecode rspBS of
Right ss -> return ss Right ss -> return ss
Left err -> throwIO $ MErrVJsonParse $ T.pack err Left err -> throwIO $ MErrVJsonParse $ T.pack err
-- | Send service restart or stop action to MinIO server. -- | Send service restart or stop action to MinIO server.
serviceSendAction :: ServiceAction -> Minio () serviceSendAction :: ServiceAction -> Minio ()
serviceSendAction action = do serviceSendAction action = do
let payload = PayloadBS $ LBS.toStrict $ A.encode action let payload = PayloadBS $ LBS.toStrict $ A.encode action
void $ executeAdminRequest AdminReqInfo { ariMethod = HT.methodPost void $
, ariPayload = payload executeAdminRequest
, ariPayloadHash = Nothing AdminReqInfo
, ariPath = "v1/service" { ariMethod = HT.methodPost,
, ariHeaders = [] ariPayload = payload,
, ariQueryParams = [] ariPayloadHash = Nothing,
} ariPath = "v1/service",
ariHeaders = [],
ariQueryParams = []
}
-- | Get the current config file from server. -- | Get the current config file from server.
getConfig :: Minio ByteString getConfig :: Minio ByteString
getConfig = do getConfig = do
rsp <- executeAdminRequest AdminReqInfo { ariMethod = HT.methodGet rsp <-
, ariPayload = PayloadBS B.empty executeAdminRequest
, ariPayloadHash = Nothing AdminReqInfo
, ariPath = "v1/config" { ariMethod = HT.methodGet,
, ariHeaders = [] ariPayload = PayloadBS B.empty,
, ariQueryParams = [] ariPayloadHash = Nothing,
} ariPath = "v1/config",
return $ LBS.toStrict $ NC.responseBody rsp ariHeaders = [],
ariQueryParams = []
}
return $ LBS.toStrict $ NC.responseBody rsp
-- | Set a new config to the server. -- | Set a new config to the server.
setConfig :: ByteString -> Minio SetConfigResult setConfig :: ByteString -> Minio SetConfigResult
setConfig config = do setConfig config = do
rsp <- executeAdminRequest AdminReqInfo { ariMethod = HT.methodPut rsp <-
, ariPayload = PayloadBS config executeAdminRequest
, ariPayloadHash = Nothing AdminReqInfo
, ariPath = "v1/config" { ariMethod = HT.methodPut,
, ariHeaders = [] ariPayload = PayloadBS config,
, ariQueryParams = [] ariPayloadHash = Nothing,
} ariPath = "v1/config",
ariHeaders = [],
ariQueryParams = []
}
let rspBS = NC.responseBody rsp let rspBS = NC.responseBody rsp
case eitherDecode rspBS of case eitherDecode rspBS of
Right scr -> return scr Right scr -> return scr
Left err -> throwIO $ MErrVJsonParse $ T.pack err Left err -> throwIO $ MErrVJsonParse $ T.pack err
-- | Get the progress of currently running heal task, this API should be -- | Get the progress of currently running heal task, this API should be
-- invoked right after `startHeal`. `token` is obtained after `startHeal` -- invoked right after `startHeal`. `token` is obtained after `startHeal`
-- which should be used to get the heal status. -- which should be used to get the heal status.
getHealStatus :: Maybe Bucket -> Maybe Text -> Text -> Minio HealStatus getHealStatus :: Maybe Bucket -> Maybe Text -> Text -> Minio HealStatus
getHealStatus bucket prefix token = do getHealStatus bucket prefix token = do
when (isNothing bucket && isJust prefix) $ throwIO MErrVInvalidHealPath when (isNothing bucket && isJust prefix) $ throwIO MErrVInvalidHealPath
let qparams = HT.queryTextToQuery [("clientToken", Just token)] let qparams = HT.queryTextToQuery [("clientToken", Just token)]
rsp <- executeAdminRequest AdminReqInfo { ariMethod = HT.methodPost rsp <-
, ariPayload = PayloadBS B.empty executeAdminRequest
, ariPayloadHash = Nothing AdminReqInfo
, ariPath = healPath bucket prefix { ariMethod = HT.methodPost,
, ariHeaders = [] ariPayload = PayloadBS B.empty,
, ariQueryParams = qparams ariPayloadHash = Nothing,
} ariPath = healPath bucket prefix,
let rspBS = NC.responseBody rsp ariHeaders = [],
case eitherDecode rspBS of ariQueryParams = qparams
Right hs -> return hs }
Left err -> throwIO $ MErrVJsonParse $ T.pack err let rspBS = NC.responseBody rsp
case eitherDecode rspBS of
Right hs -> return hs
Left err -> throwIO $ MErrVJsonParse $ T.pack err
doHeal :: Maybe Bucket -> Maybe Text -> HealOpts -> Bool -> Minio HealStartResp doHeal :: Maybe Bucket -> Maybe Text -> HealOpts -> Bool -> Minio HealStartResp
doHeal bucket prefix opts forceStart = do doHeal bucket prefix opts forceStart = do
when (isNothing bucket && isJust prefix) $ throwIO MErrVInvalidHealPath when (isNothing bucket && isJust prefix) $ throwIO MErrVInvalidHealPath
let payload = PayloadBS $ LBS.toStrict $ A.encode opts let payload = PayloadBS $ LBS.toStrict $ A.encode opts
let qparams = bool [] (HT.queryTextToQuery [("forceStart", Just "true")]) let qparams =
forceStart bool
[]
(HT.queryTextToQuery [("forceStart", Just "true")])
forceStart
rsp <- executeAdminRequest AdminReqInfo { ariMethod = HT.methodPost rsp <-
, ariPayload = payload executeAdminRequest
, ariPayloadHash = Nothing AdminReqInfo
, ariPath = healPath bucket prefix { ariMethod = HT.methodPost,
, ariHeaders = [] ariPayload = payload,
, ariQueryParams = qparams ariPayloadHash = Nothing,
} ariPath = healPath bucket prefix,
ariHeaders = [],
ariQueryParams = qparams
}
let rspBS = NC.responseBody rsp let rspBS = NC.responseBody rsp
case eitherDecode rspBS of case eitherDecode rspBS of
Right hsr -> return hsr Right hsr -> return hsr
Left err -> throwIO $ MErrVJsonParse $ T.pack err Left err -> throwIO $ MErrVJsonParse $ T.pack err
-- | Start a heal sequence that scans data under given (possible empty) -- | Start a heal sequence that scans data under given (possible empty)
-- `bucket` and `prefix`. The `recursive` bool turns on recursive -- `bucket` and `prefix`. The `recursive` bool turns on recursive
@ -500,60 +571,75 @@ forceStartHeal bucket prefix opts = doHeal bucket prefix opts True
-- properties, storage information, network statistics, etc. -- properties, storage information, network statistics, etc.
getServerInfo :: Minio [ServerInfo] getServerInfo :: Minio [ServerInfo]
getServerInfo = do getServerInfo = do
rsp <- executeAdminRequest AdminReqInfo { ariMethod = HT.methodGet rsp <-
, ariPayload = PayloadBS B.empty executeAdminRequest
, ariPayloadHash = Nothing AdminReqInfo
, ariPath = "v1/info" { ariMethod = HT.methodGet,
, ariHeaders = [] ariPayload = PayloadBS B.empty,
, ariQueryParams = [] ariPayloadHash = Nothing,
} ariPath = "v1/info",
let rspBS = NC.responseBody rsp ariHeaders = [],
case eitherDecode rspBS of ariQueryParams = []
Right si -> return si }
Left err -> throwIO $ MErrVJsonParse $ T.pack err let rspBS = NC.responseBody rsp
case eitherDecode rspBS of
Right si -> return si
Left err -> throwIO $ MErrVJsonParse $ T.pack err
executeAdminRequest :: AdminReqInfo -> Minio (Response LByteString) executeAdminRequest :: AdminReqInfo -> Minio (Response LByteString)
executeAdminRequest ari = do executeAdminRequest ari = do
req <- buildAdminRequest ari req <- buildAdminRequest ari
mgr <- asks mcConnManager mgr <- asks mcConnManager
httpLbs req mgr httpLbs req mgr
buildAdminRequest :: AdminReqInfo -> Minio NC.Request buildAdminRequest :: AdminReqInfo -> Minio NC.Request
buildAdminRequest areq = do buildAdminRequest areq = do
ci <- asks mcConnInfo ci <- asks mcConnInfo
sha256Hash <- if | connectIsSecure ci -> sha256Hash <-
-- if secure connection if
return "UNSIGNED-PAYLOAD" | connectIsSecure ci ->
-- if secure connection
return "UNSIGNED-PAYLOAD"
-- otherwise compute sha256
| otherwise -> getPayloadSHA256Hash (ariPayload areq)
-- otherwise compute sha256 timeStamp <- liftIO getCurrentTime
| otherwise -> getPayloadSHA256Hash (ariPayload areq)
timeStamp <- liftIO getCurrentTime let hostHeader = (hHost, getHostAddr ci)
newAreq =
areq
{ ariPayloadHash = Just sha256Hash,
ariHeaders =
hostHeader
: sha256Header sha256Hash
: ariHeaders areq
}
signReq = toRequest ci newAreq
sp =
SignParams
(connectAccessKey ci)
(connectSecretKey ci)
timeStamp
Nothing
Nothing
(ariPayloadHash newAreq)
signHeaders = signV4 sp signReq
let hostHeader = (hHost, getHostAddr ci) -- Update signReq with Authorization header containing v4 signature
newAreq = areq { ariPayloadHash = Just sha256Hash return
, ariHeaders = hostHeader signReq
: sha256Header sha256Hash { NC.requestHeaders = ariHeaders newAreq ++ mkHeaderFromPairs signHeaders
: ariHeaders areq }
}
signReq = toRequest ci newAreq
sp = SignParams (connectAccessKey ci) (connectSecretKey ci)
timeStamp Nothing Nothing (ariPayloadHash newAreq)
signHeaders = signV4 sp signReq
-- Update signReq with Authorization header containing v4 signature
return signReq {
NC.requestHeaders = ariHeaders newAreq ++ mkHeaderFromPairs signHeaders
}
where where
toRequest :: ConnectInfo -> AdminReqInfo -> NC.Request toRequest :: ConnectInfo -> AdminReqInfo -> NC.Request
toRequest ci aReq = NC.defaultRequest toRequest ci aReq =
{ NC.method = ariMethod aReq NC.defaultRequest
, NC.secure = connectIsSecure ci { NC.method = ariMethod aReq,
, NC.host = encodeUtf8 $ connectHost ci NC.secure = connectIsSecure ci,
, NC.port = connectPort ci NC.host = encodeUtf8 $ connectHost ci,
, NC.path = B.intercalate "/" [adminPath, ariPath aReq] NC.port = connectPort ci,
, NC.requestHeaders = ariHeaders aReq NC.path = B.intercalate "/" [adminPath, ariPath aReq],
, NC.queryString = HT.renderQuery False $ ariQueryParams aReq NC.requestHeaders = ariHeaders aReq,
, NC.requestBody = getRequestBody (ariPayload aReq) NC.queryString = HT.renderQuery False $ ariQueryParams aReq,
NC.requestBody = getRequestBody (ariPayload aReq)
} }

View File

@ -16,19 +16,19 @@
module Network.Minio.CopyObject where module Network.Minio.CopyObject where
import qualified Data.List as List import qualified Data.List as List
import Lib.Prelude
import Lib.Prelude import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.Data import Network.Minio.S3API
import Network.Minio.Errors import Network.Minio.Utils
import Network.Minio.S3API
import Network.Minio.Utils
-- | Copy an object using single or multipart copy strategy. -- | Copy an object using single or multipart copy strategy.
copyObjectInternal :: Bucket -> Object -> SourceInfo copyObjectInternal ::
-> Minio ETag Bucket ->
Object ->
SourceInfo ->
Minio ETag
copyObjectInternal b' o srcInfo = do copyObjectInternal b' o srcInfo = do
let sBucket = srcBucket srcInfo let sBucket = srcBucket srcInfo
sObject = srcObject srcInfo sObject = srcObject srcInfo
@ -43,27 +43,35 @@ copyObjectInternal b' o srcInfo = do
startOffset = fst range startOffset = fst range
endOffset = snd range endOffset = snd range
when (isJust rangeMay && when
or [startOffset < 0, endOffset < startOffset, ( isJust rangeMay
endOffset >= fromIntegral srcSize]) $ && or
throwIO $ MErrVInvalidSrcObjByteRange range [ startOffset < 0,
endOffset < startOffset,
endOffset >= fromIntegral srcSize
]
)
$ throwIO
$ MErrVInvalidSrcObjByteRange range
-- 1. If sz > 64MiB (minPartSize) use multipart copy, OR -- 1. If sz > 64MiB (minPartSize) use multipart copy, OR
-- 2. If startOffset /= 0 use multipart copy -- 2. If startOffset /= 0 use multipart copy
let destSize = (\(a, b) -> b - a + 1 ) $ let destSize =
maybe (0, srcSize - 1) identity rangeMay (\(a, b) -> b - a + 1) $
maybe (0, srcSize - 1) identity rangeMay
if destSize > minPartSize || (endOffset - startOffset + 1 /= srcSize) if destSize > minPartSize || (endOffset - startOffset + 1 /= srcSize)
then multiPartCopyObject b' o srcInfo srcSize then multiPartCopyObject b' o srcInfo srcSize
else fst <$> copyObjectSingle b' o srcInfo {srcRange = Nothing} []
else fst <$> copyObjectSingle b' o srcInfo{srcRange = Nothing} []
-- | Given the input byte range of the source object, compute the -- | Given the input byte range of the source object, compute the
-- splits for a multipart copy object procedure. Minimum part size -- splits for a multipart copy object procedure. Minimum part size
-- used is minPartSize. -- used is minPartSize.
selectCopyRanges :: (Int64, Int64) -> [(PartNumber, (Int64, Int64))] selectCopyRanges :: (Int64, Int64) -> [(PartNumber, (Int64, Int64))]
selectCopyRanges (st, end) = zip pns $ selectCopyRanges (st, end) =
map (\(x, y) -> (st + x, st + x + y - 1)) $ zip startOffsets partSizes zip pns
$ map (\(x, y) -> (st + x, st + x + y - 1))
$ zip startOffsets partSizes
where where
size = end - st + 1 size = end - st + 1
(pns, startOffsets, partSizes) = List.unzip3 $ selectPartSizes size (pns, startOffsets, partSizes) = List.unzip3 $ selectPartSizes size
@ -71,22 +79,30 @@ selectCopyRanges (st, end) = zip pns $
-- | Perform a multipart copy object action. Since we cannot verify -- | Perform a multipart copy object action. Since we cannot verify
-- existing parts based on the source object, there is no resuming -- existing parts based on the source object, there is no resuming
-- copy action support. -- copy action support.
multiPartCopyObject :: Bucket -> Object -> SourceInfo -> Int64 multiPartCopyObject ::
-> Minio ETag Bucket ->
Object ->
SourceInfo ->
Int64 ->
Minio ETag
multiPartCopyObject b o cps srcSize = do multiPartCopyObject b o cps srcSize = do
uid <- newMultipartUpload b o [] uid <- newMultipartUpload b o []
let byteRange = maybe (0, fromIntegral $ srcSize - 1) identity $ srcRange cps let byteRange = maybe (0, fromIntegral $ srcSize - 1) identity $ srcRange cps
partRanges = selectCopyRanges byteRange partRanges = selectCopyRanges byteRange
partSources = map (\(x, (start, end)) -> (x, cps {srcRange = Just (start, end) })) partSources =
partRanges map
dstInfo = defaultDestinationInfo { dstBucket = b, dstObject = o} (\(x, (start, end)) -> (x, cps {srcRange = Just (start, end)}))
partRanges
dstInfo = defaultDestinationInfo {dstBucket = b, dstObject = o}
copiedParts <- limitedMapConcurrently 10 copiedParts <-
(\(pn, cps') -> do limitedMapConcurrently
(etag, _) <- copyObjectPart dstInfo cps' uid pn [] 10
return (pn, etag) ( \(pn, cps') -> do
) (etag, _) <- copyObjectPart dstInfo cps' uid pn []
partSources return (pn, etag)
)
partSources
completeMultipartUpload b o uid copiedParts completeMultipartUpload b o uid copiedParts

File diff suppressed because it is too large Load Diff

View File

@ -13,23 +13,22 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE FlexibleInstances #-}
module Network.Minio.Data.ByteString module Network.Minio.Data.ByteString
( ( stripBS,
stripBS UriEncodable (..),
, UriEncodable(..) )
) where where
import qualified Data.ByteString as B import qualified Data.ByteString as B
import qualified Data.ByteString.Builder as BB import qualified Data.ByteString.Builder as BB
import qualified Data.ByteString.Char8 as BC8 import qualified Data.ByteString.Char8 as BC8
import qualified Data.ByteString.Lazy as LB import qualified Data.ByteString.Lazy as LB
import Data.Char (isSpace, toUpper, isAsciiUpper, isAsciiLower, isDigit) import Data.Char (isAsciiLower, isAsciiUpper)
import qualified Data.Text as T import qualified Data.Text as T
import Numeric (showHex) import Lib.Prelude
import Numeric (showHex)
import Lib.Prelude
stripBS :: ByteString -> ByteString stripBS :: ByteString -> ByteString
stripBS = BC8.dropWhile isSpace . fst . BC8.spanEnd isSpace stripBS = BC8.dropWhile isSpace . fst . BC8.spanEnd isSpace
@ -40,7 +39,7 @@ class UriEncodable s where
instance UriEncodable [Char] where instance UriEncodable [Char] where
uriEncode encodeSlash payload = uriEncode encodeSlash payload =
LB.toStrict $ BB.toLazyByteString $ mconcat $ LB.toStrict $ BB.toLazyByteString $ mconcat $
map (`uriEncodeChar` encodeSlash) payload map (`uriEncodeChar` encodeSlash) payload
instance UriEncodable ByteString where instance UriEncodable ByteString where
-- assumes that uriEncode is passed ASCII encoded strings. -- assumes that uriEncode is passed ASCII encoded strings.
@ -59,16 +58,17 @@ uriEncodeChar '/' True = BB.byteString "%2F"
uriEncodeChar '/' False = BB.char7 '/' uriEncodeChar '/' False = BB.char7 '/'
uriEncodeChar ch _ uriEncodeChar ch _
| isAsciiUpper ch | isAsciiUpper ch
|| isAsciiLower ch || isAsciiLower ch
|| isDigit ch || isDigit ch
|| (ch == '_') || (ch == '_')
|| (ch == '-') || (ch == '-')
|| (ch == '.') || (ch == '.')
|| (ch == '~') = BB.char7 ch || (ch == '~') =
BB.char7 ch
| otherwise = mconcat $ map f $ B.unpack $ encodeUtf8 $ T.singleton ch | otherwise = mconcat $ map f $ B.unpack $ encodeUtf8 $ T.singleton ch
where where
f :: Word8 -> BB.Builder f :: Word8 -> BB.Builder
f n = BB.char7 '%' <> BB.string7 hexStr f n = BB.char7 '%' <> BB.string7 hexStr
where where
hexStr = map toUpper $ showHex q $ showHex r "" hexStr = map toUpper $ showHex q $ showHex r ""
(q, r) = divMod (fromIntegral n) (16::Word8) (q, r) = divMod (fromIntegral n) (16 :: Word8)

View File

@ -15,31 +15,31 @@
-- --
module Network.Minio.Data.Crypto module Network.Minio.Data.Crypto
( ( hashSHA256,
hashSHA256 hashSHA256FromSource,
, hashSHA256FromSource hashMD5,
hashMD5ToBase64,
hashMD5FromSource,
hmacSHA256,
hmacSHA256RawBS,
digestToBS,
digestToBase16,
encodeToBase64,
)
where
, hashMD5 import Crypto.Hash
, hashMD5ToBase64 ( Digest,
, hashMD5FromSource MD5 (..),
SHA256 (..),
, hmacSHA256 hashWith,
, hmacSHA256RawBS )
, digestToBS import Crypto.Hash.Conduit (sinkHash)
, digestToBase16 import Crypto.MAC.HMAC (HMAC, hmac)
import Data.ByteArray (ByteArrayAccess, convert)
, encodeToBase64 import Data.ByteArray.Encoding (Base (Base16, Base64), convertToBase)
) where import qualified Data.Conduit as C
import Lib.Prelude
import Crypto.Hash (Digest, MD5 (..), SHA256 (..),
hashWith)
import Crypto.Hash.Conduit (sinkHash)
import Crypto.MAC.HMAC (HMAC, hmac)
import Data.ByteArray (ByteArrayAccess, convert)
import Data.ByteArray.Encoding (Base (Base16, Base64), convertToBase)
import qualified Data.Conduit as C
import Lib.Prelude
hashSHA256 :: ByteString -> ByteString hashSHA256 :: ByteString -> ByteString
hashSHA256 = digestToBase16 . hashWith SHA256 hashSHA256 = digestToBase16 . hashWith SHA256

View File

@ -15,20 +15,18 @@
-- --
module Network.Minio.Data.Time module Network.Minio.Data.Time
( ( awsTimeFormat,
awsTimeFormat awsTimeFormatBS,
, awsTimeFormatBS awsDateFormat,
, awsDateFormat awsDateFormatBS,
, awsDateFormatBS awsParseTime,
, awsParseTime iso8601TimeFormat,
, iso8601TimeFormat )
) where where
import Data.ByteString.Char8 (pack)
import Data.ByteString.Char8 (pack)
import qualified Data.Time as Time import qualified Data.Time as Time
import Lib.Prelude
import Lib.Prelude
awsTimeFormat :: UTCTime -> [Char] awsTimeFormat :: UTCTime -> [Char]
awsTimeFormat = Time.formatTime Time.defaultTimeLocale "%Y%m%dT%H%M%SZ" awsTimeFormat = Time.formatTime Time.defaultTimeLocale "%Y%m%dT%H%M%SZ"

View File

@ -16,74 +16,75 @@
module Network.Minio.Errors where module Network.Minio.Errors where
import Control.Exception import Control.Exception
import Lib.Prelude
import qualified Network.HTTP.Conduit as NC import qualified Network.HTTP.Conduit as NC
import Lib.Prelude
--------------------------------- ---------------------------------
-- Errors -- Errors
--------------------------------- ---------------------------------
-- | Various validation errors -- | Various validation errors
data MErrV = MErrVSinglePUTSizeExceeded Int64 data MErrV
| MErrVPutSizeExceeded Int64 = MErrVSinglePUTSizeExceeded Int64
| MErrVETagHeaderNotFound | MErrVPutSizeExceeded Int64
| MErrVInvalidObjectInfoResponse | MErrVETagHeaderNotFound
| MErrVInvalidSrcObjSpec Text | MErrVInvalidObjectInfoResponse
| MErrVInvalidSrcObjByteRange (Int64, Int64) | MErrVInvalidSrcObjSpec Text
| MErrVCopyObjSingleNoRangeAccepted | MErrVInvalidSrcObjByteRange (Int64, Int64)
| MErrVRegionNotSupported Text | MErrVCopyObjSingleNoRangeAccepted
| MErrVXmlParse Text | MErrVRegionNotSupported Text
| MErrVInvalidBucketName Text | MErrVXmlParse Text
| MErrVInvalidObjectName Text | MErrVInvalidBucketName Text
| MErrVInvalidUrlExpiry Int | MErrVInvalidObjectName Text
| MErrVJsonParse Text | MErrVInvalidUrlExpiry Int
| MErrVInvalidHealPath | MErrVJsonParse Text
| MErrVMissingCredentials | MErrVInvalidHealPath
| MErrVInvalidEncryptionKeyLength | MErrVMissingCredentials
| MErrVStreamingBodyUnexpectedEOF | MErrVInvalidEncryptionKeyLength
| MErrVUnexpectedPayload | MErrVStreamingBodyUnexpectedEOF
| MErrVUnexpectedPayload
deriving (Show, Eq) deriving (Show, Eq)
instance Exception MErrV instance Exception MErrV
-- | Errors returned by S3 compatible service -- | Errors returned by S3 compatible service
data ServiceErr = BucketAlreadyExists data ServiceErr
| BucketAlreadyOwnedByYou = BucketAlreadyExists
| NoSuchBucket | BucketAlreadyOwnedByYou
| InvalidBucketName | NoSuchBucket
| NoSuchKey | InvalidBucketName
| SelectErr Text Text | NoSuchKey
| ServiceErr Text Text | SelectErr Text Text
| ServiceErr Text Text
deriving (Show, Eq) deriving (Show, Eq)
instance Exception ServiceErr instance Exception ServiceErr
toServiceErr :: Text -> Text -> ServiceErr toServiceErr :: Text -> Text -> ServiceErr
toServiceErr "NoSuchKey" _ = NoSuchKey toServiceErr "NoSuchKey" _ = NoSuchKey
toServiceErr "NoSuchBucket" _ = NoSuchBucket toServiceErr "NoSuchBucket" _ = NoSuchBucket
toServiceErr "InvalidBucketName" _ = InvalidBucketName toServiceErr "InvalidBucketName" _ = InvalidBucketName
toServiceErr "BucketAlreadyOwnedByYou" _ = BucketAlreadyOwnedByYou toServiceErr "BucketAlreadyOwnedByYou" _ = BucketAlreadyOwnedByYou
toServiceErr "BucketAlreadyExists" _ = BucketAlreadyExists toServiceErr "BucketAlreadyExists" _ = BucketAlreadyExists
toServiceErr code message = ServiceErr code message toServiceErr code message = ServiceErr code message
-- | Errors thrown by the library -- | Errors thrown by the library
data MinioErr = MErrHTTP NC.HttpException data MinioErr
| MErrIO IOException = MErrHTTP NC.HttpException
| MErrService ServiceErr | MErrIO IOException
| MErrValidation MErrV | MErrService ServiceErr
| MErrValidation MErrV
deriving (Show) deriving (Show)
instance Eq MinioErr where instance Eq MinioErr where
MErrHTTP _ == MErrHTTP _ = True MErrHTTP _ == MErrHTTP _ = True
MErrHTTP _ == _ = False MErrHTTP _ == _ = False
MErrIO _ == MErrIO _ = True MErrIO _ == MErrIO _ = True
MErrIO _ == _ = False MErrIO _ == _ = False
MErrService a == MErrService b = a == b MErrService a == MErrService b = a == b
MErrService _ == _ = False MErrService _ == _ = False
MErrValidation a == MErrValidation b = a == b MErrValidation a == MErrValidation b = a == b
MErrValidation _ == _ = False MErrValidation _ == _ = False
instance Exception MinioErr instance Exception MinioErr

View File

@ -15,28 +15,35 @@
-- --
module Network.Minio.JsonParser module Network.Minio.JsonParser
( ( parseErrResponseJSON,
parseErrResponseJSON )
) where where
import Data.Aeson (FromJSON, eitherDecode, parseJSON, import Data.Aeson
withObject, (.:)) ( (.:),
import qualified Data.Text as T FromJSON,
eitherDecode,
parseJSON,
withObject,
)
import qualified Data.Text as T
import Lib.Prelude
import Network.Minio.Errors
import Lib.Prelude data AdminErrJSON = AdminErrJSON
{ aeCode :: Text,
aeMessage :: Text
}
deriving (Eq, Show)
import Network.Minio.Errors
data AdminErrJSON = AdminErrJSON { aeCode :: Text
, aeMessage :: Text
} deriving (Eq, Show)
instance FromJSON AdminErrJSON where instance FromJSON AdminErrJSON where
parseJSON = withObject "AdminErrJSON" $ \v -> AdminErrJSON parseJSON = withObject "AdminErrJSON" $ \v ->
<$> v .: "Code" AdminErrJSON
<*> v .: "Message" <$> v .: "Code"
<*> v .: "Message"
parseErrResponseJSON :: (MonadIO m) => LByteString -> m ServiceErr parseErrResponseJSON :: (MonadIO m) => LByteString -> m ServiceErr
parseErrResponseJSON jsondata = parseErrResponseJSON jsondata =
case eitherDecode jsondata of case eitherDecode jsondata of
Right aErr -> return $ toServiceErr (aeCode aErr) (aeMessage aErr) Right aErr -> return $ toServiceErr (aeCode aErr) (aeMessage aErr)
Left err -> throwIO $ MErrVJsonParse $ T.pack err Left err -> throwIO $ MErrVJsonParse $ T.pack err

View File

@ -16,20 +16,19 @@
module Network.Minio.ListOps where module Network.Minio.ListOps where
import qualified Data.Conduit as C import qualified Data.Conduit as C
import qualified Data.Conduit.Combinators as CC import qualified Data.Conduit.Combinators as CC
import qualified Data.Conduit.List as CL import qualified Data.Conduit.List as CL
import Lib.Prelude
import Lib.Prelude import Network.Minio.Data
import Network.Minio.S3API
import Network.Minio.Data
import Network.Minio.S3API
-- | Represents a list output item - either an object or an object -- | Represents a list output item - either an object or an object
-- prefix (i.e. a directory). -- prefix (i.e. a directory).
data ListItem = ListItemObject ObjectInfo data ListItem
| ListItemPrefix Text = ListItemObject ObjectInfo
deriving (Show, Eq) | ListItemPrefix Text
deriving (Show, Eq)
-- | @'listObjects' bucket prefix recurse@ lists objects in a bucket -- | @'listObjects' bucket prefix recurse@ lists objects in a bucket
-- similar to a file system tree traversal. -- similar to a file system tree traversal.
@ -48,73 +47,99 @@ listObjects bucket prefix recurse = loop Nothing
where where
loop :: Maybe Text -> C.ConduitM () ListItem Minio () loop :: Maybe Text -> C.ConduitM () ListItem Minio ()
loop nextToken = do loop nextToken = do
let let delimiter = bool (Just "/") Nothing recurse
delimiter = bool (Just "/") Nothing recurse
res <- lift $ listObjects' bucket prefix nextToken delimiter Nothing res <- lift $ listObjects' bucket prefix nextToken delimiter Nothing
CL.sourceList $ map ListItemObject $ lorObjects res CL.sourceList $ map ListItemObject $ lorObjects res
unless recurse $ unless recurse
CL.sourceList $ map ListItemPrefix $ lorCPrefixes res $ CL.sourceList
$ map ListItemPrefix
$ lorCPrefixes res
when (lorHasMore res) $ when (lorHasMore res) $
loop (lorNextToken res) loop (lorNextToken res)
-- | Lists objects - similar to @listObjects@, however uses the older -- | Lists objects - similar to @listObjects@, however uses the older
-- V1 AWS S3 API. Prefer @listObjects@ to this. -- V1 AWS S3 API. Prefer @listObjects@ to this.
listObjectsV1 :: Bucket -> Maybe Text -> Bool listObjectsV1 ::
-> C.ConduitM () ListItem Minio () Bucket ->
Maybe Text ->
Bool ->
C.ConduitM () ListItem Minio ()
listObjectsV1 bucket prefix recurse = loop Nothing listObjectsV1 bucket prefix recurse = loop Nothing
where where
loop :: Maybe Text -> C.ConduitM () ListItem Minio () loop :: Maybe Text -> C.ConduitM () ListItem Minio ()
loop nextMarker = do loop nextMarker = do
let let delimiter = bool (Just "/") Nothing recurse
delimiter = bool (Just "/") Nothing recurse
res <- lift $ listObjectsV1' bucket prefix nextMarker delimiter Nothing res <- lift $ listObjectsV1' bucket prefix nextMarker delimiter Nothing
CL.sourceList $ map ListItemObject $ lorObjects' res CL.sourceList $ map ListItemObject $ lorObjects' res
unless recurse $ unless recurse
CL.sourceList $ map ListItemPrefix $ lorCPrefixes' res $ CL.sourceList
$ map ListItemPrefix
$ lorCPrefixes' res
when (lorHasMore' res) $ when (lorHasMore' res) $
loop (lorNextMarker res) loop (lorNextMarker res)
-- | List incomplete uploads in a bucket matching the given prefix. If -- | List incomplete uploads in a bucket matching the given prefix. If
-- recurse is set to True incomplete uploads for the given prefix are -- recurse is set to True incomplete uploads for the given prefix are
-- recursively listed. -- recursively listed.
listIncompleteUploads :: Bucket -> Maybe Text -> Bool listIncompleteUploads ::
-> C.ConduitM () UploadInfo Minio () Bucket ->
Maybe Text ->
Bool ->
C.ConduitM () UploadInfo Minio ()
listIncompleteUploads bucket prefix recurse = loop Nothing Nothing listIncompleteUploads bucket prefix recurse = loop Nothing Nothing
where where
loop :: Maybe Text -> Maybe Text -> C.ConduitM () UploadInfo Minio () loop :: Maybe Text -> Maybe Text -> C.ConduitM () UploadInfo Minio ()
loop nextKeyMarker nextUploadIdMarker = do loop nextKeyMarker nextUploadIdMarker = do
let let delimiter = bool (Just "/") Nothing recurse
delimiter = bool (Just "/") Nothing recurse
res <- lift $ listIncompleteUploads' bucket prefix delimiter res <-
nextKeyMarker nextUploadIdMarker Nothing lift $
listIncompleteUploads'
bucket
prefix
delimiter
nextKeyMarker
nextUploadIdMarker
Nothing
aggrSizes <- lift $ forM (lurUploads res) $ \(uKey, uId, _) -> do aggrSizes <- lift $ forM (lurUploads res) $ \(uKey, uId, _) -> do
partInfos <- C.runConduit $ listIncompleteParts bucket uKey uId partInfos <-
C..| CC.sinkList C.runConduit $
return $ foldl (\sizeSofar p -> opiSize p + sizeSofar) 0 partInfos listIncompleteParts bucket uKey uId
C..| CC.sinkList
return $ foldl (\sizeSofar p -> opiSize p + sizeSofar) 0 partInfos
CL.sourceList $ CL.sourceList
map (\((uKey, uId, uInitTime), size) -> $ map
UploadInfo uKey uId uInitTime size ( \((uKey, uId, uInitTime), size) ->
) $ zip (lurUploads res) aggrSizes UploadInfo uKey uId uInitTime size
)
$ zip (lurUploads res) aggrSizes
when (lurHasMore res) $ when (lurHasMore res) $
loop (lurNextKey res) (lurNextUpload res) loop (lurNextKey res) (lurNextUpload res)
-- | List object parts of an ongoing multipart upload for given -- | List object parts of an ongoing multipart upload for given
-- bucket, object and uploadId. -- bucket, object and uploadId.
listIncompleteParts :: Bucket -> Object -> UploadId listIncompleteParts ::
-> C.ConduitM () ObjectPartInfo Minio () Bucket ->
Object ->
UploadId ->
C.ConduitM () ObjectPartInfo Minio ()
listIncompleteParts bucket object uploadId = loop Nothing listIncompleteParts bucket object uploadId = loop Nothing
where where
loop :: Maybe Text -> C.ConduitM () ObjectPartInfo Minio () loop :: Maybe Text -> C.ConduitM () ObjectPartInfo Minio ()
loop nextPartMarker = do loop nextPartMarker = do
res <- lift $ listIncompleteParts' bucket object uploadId Nothing res <-
nextPartMarker lift $
listIncompleteParts'
bucket
object
uploadId
Nothing
nextPartMarker
CL.sourceList $ lprParts res CL.sourceList $ lprParts res
when (lprHasMore res) $ when (lprHasMore res) $
loop (show <$> lprNextPart res) loop (show <$> lprNextPart res)

View File

@ -15,43 +15,40 @@
-- --
module Network.Minio.PresignedOperations module Network.Minio.PresignedOperations
( UrlExpiry ( UrlExpiry,
, makePresignedUrl makePresignedUrl,
, presignedPutObjectUrl presignedPutObjectUrl,
, presignedGetObjectUrl presignedGetObjectUrl,
, presignedHeadObjectUrl presignedHeadObjectUrl,
PostPolicyCondition (..),
ppCondBucket,
ppCondContentLengthRange,
ppCondContentType,
ppCondKey,
ppCondKeyStartsWith,
ppCondSuccessActionStatus,
PostPolicy (..),
PostPolicyError (..),
newPostPolicy,
showPostPolicy,
presignedPostPolicy,
)
where
, PostPolicyCondition(..) import Data.Aeson ((.=))
, ppCondBucket import qualified Data.Aeson as Json
, ppCondContentLengthRange import Data.ByteString.Builder (byteString, toLazyByteString)
, ppCondContentType import qualified Data.HashMap.Strict as H
, ppCondKey import qualified Data.Text as T
, ppCondKeyStartsWith import qualified Data.Time as Time
, ppCondSuccessActionStatus import Lib.Prelude
import qualified Network.HTTP.Conduit as NC
, PostPolicy(..) import qualified Network.HTTP.Types as HT
, PostPolicyError(..) import Network.HTTP.Types.Header (hHost)
, newPostPolicy import Network.Minio.Data
, showPostPolicy import Network.Minio.Data.Time
, presignedPostPolicy import Network.Minio.Errors
) where import Network.Minio.Sign.V4
import Data.Aeson ((.=))
import qualified Data.Aeson as Json
import Data.ByteString.Builder (byteString, toLazyByteString)
import qualified Data.HashMap.Strict as H
import qualified Data.Text as T
import qualified Data.Time as Time
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Network.HTTP.Types.Header (hHost)
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Data.Time
import Network.Minio.Errors
import Network.Minio.Sign.V4
-- | Generate a presigned URL. This function allows for advanced usage -- | Generate a presigned URL. This function allows for advanced usage
-- - for simple cases prefer the `presigned*Url` functions. -- - for simple cases prefer the `presigned*Url` functions.
@ -61,42 +58,56 @@ import Network.Minio.Sign.V4
-- --
-- All extra query parameters or headers are signed, and therefore are -- All extra query parameters or headers are signed, and therefore are
-- required to be sent when the generated URL is actually used. -- required to be sent when the generated URL is actually used.
makePresignedUrl :: UrlExpiry -> HT.Method -> Maybe Bucket -> Maybe Object makePresignedUrl ::
-> Maybe Region -> HT.Query -> HT.RequestHeaders UrlExpiry ->
-> Minio ByteString HT.Method ->
Maybe Bucket ->
Maybe Object ->
Maybe Region ->
HT.Query ->
HT.RequestHeaders ->
Minio ByteString
makePresignedUrl expiry method bucket object region extraQuery extraHeaders = do makePresignedUrl expiry method bucket object region extraQuery extraHeaders = do
when (expiry > 7*24*3600 || expiry < 0) $ when (expiry > 7 * 24 * 3600 || expiry < 0)
throwIO $ MErrVInvalidUrlExpiry expiry $ throwIO
$ MErrVInvalidUrlExpiry expiry
ci <- asks mcConnInfo ci <- asks mcConnInfo
let let hostHeader = (hHost, getHostAddr ci)
hostHeader = (hHost, getHostAddr ci) req =
req = NC.defaultRequest { NC.defaultRequest
NC.method = method { NC.method = method,
, NC.secure = connectIsSecure ci NC.secure = connectIsSecure ci,
, NC.host = encodeUtf8 $ connectHost ci NC.host = encodeUtf8 $ connectHost ci,
, NC.port = connectPort ci NC.port = connectPort ci,
, NC.path = getS3Path bucket object NC.path = getS3Path bucket object,
, NC.requestHeaders = hostHeader : extraHeaders NC.requestHeaders = hostHeader : extraHeaders,
, NC.queryString = HT.renderQuery True extraQuery NC.queryString = HT.renderQuery True extraQuery
} }
ts <- liftIO Time.getCurrentTime ts <- liftIO Time.getCurrentTime
let sp = SignParams (connectAccessKey ci) (connectSecretKey ci) let sp =
ts region (Just expiry) Nothing SignParams
(connectAccessKey ci)
(connectSecretKey ci)
ts
region
(Just expiry)
Nothing
signPairs = signV4 sp req signPairs = signV4 sp req
qpToAdd = (fmap . fmap) Just signPairs qpToAdd = (fmap . fmap) Just signPairs
queryStr = HT.renderQueryBuilder True queryStr =
((HT.parseQuery $ NC.queryString req) ++ qpToAdd) HT.renderQueryBuilder
True
((HT.parseQuery $ NC.queryString req) ++ qpToAdd)
scheme = byteString $ bool "http://" "https://" $ connectIsSecure ci scheme = byteString $ bool "http://" "https://" $ connectIsSecure ci
return $ toS $ toLazyByteString $ scheme return $ toStrictBS $ toLazyByteString $
<> byteString (getHostAddr ci) scheme
<> byteString (getS3Path bucket object) <> byteString (getHostAddr ci)
<> queryStr <> byteString (getS3Path bucket object)
<> queryStr
-- | Generate a URL with authentication signature to PUT (upload) an -- | Generate a URL with authentication signature to PUT (upload) an
-- object. Any extra headers if passed, are signed, and so they are -- object. Any extra headers if passed, are signed, and so they are
@ -105,12 +116,22 @@ makePresignedUrl expiry method bucket object region extraQuery extraHeaders = do
-- --
-- For a list of possible headers to pass, please refer to the PUT -- For a list of possible headers to pass, please refer to the PUT
-- object REST API AWS S3 documentation. -- object REST API AWS S3 documentation.
presignedPutObjectUrl :: Bucket -> Object -> UrlExpiry -> HT.RequestHeaders presignedPutObjectUrl ::
-> Minio ByteString Bucket ->
Object ->
UrlExpiry ->
HT.RequestHeaders ->
Minio ByteString
presignedPutObjectUrl bucket object expirySeconds extraHeaders = do presignedPutObjectUrl bucket object expirySeconds extraHeaders = do
region <- asks (Just . connectRegion . mcConnInfo) region <- asks (Just . connectRegion . mcConnInfo)
makePresignedUrl expirySeconds HT.methodPut makePresignedUrl
(Just bucket) (Just object) region [] extraHeaders expirySeconds
HT.methodPut
(Just bucket)
(Just object)
region
[]
extraHeaders
-- | Generate a URL with authentication signature to GET (download) an -- | Generate a URL with authentication signature to GET (download) an
-- object. All extra query parameters and headers passed here will be -- object. All extra query parameters and headers passed here will be
@ -121,12 +142,23 @@ presignedPutObjectUrl bucket object expirySeconds extraHeaders = do
-- --
-- For a list of possible request parameters and headers, please refer -- For a list of possible request parameters and headers, please refer
-- to the GET object REST API AWS S3 documentation. -- to the GET object REST API AWS S3 documentation.
presignedGetObjectUrl :: Bucket -> Object -> UrlExpiry -> HT.Query presignedGetObjectUrl ::
-> HT.RequestHeaders -> Minio ByteString Bucket ->
Object ->
UrlExpiry ->
HT.Query ->
HT.RequestHeaders ->
Minio ByteString
presignedGetObjectUrl bucket object expirySeconds extraQuery extraHeaders = do presignedGetObjectUrl bucket object expirySeconds extraQuery extraHeaders = do
region <- asks (Just . connectRegion . mcConnInfo) region <- asks (Just . connectRegion . mcConnInfo)
makePresignedUrl expirySeconds HT.methodGet makePresignedUrl
(Just bucket) (Just object) region extraQuery extraHeaders expirySeconds
HT.methodGet
(Just bucket)
(Just object)
region
extraQuery
extraHeaders
-- | Generate a URL with authentication signature to make a HEAD -- | Generate a URL with authentication signature to make a HEAD
-- request on an object. This is used to fetch metadata about an -- request on an object. This is used to fetch metadata about an
@ -135,18 +167,29 @@ presignedGetObjectUrl bucket object expirySeconds extraQuery extraHeaders = do
-- --
-- For a list of possible headers to pass, please refer to the HEAD -- For a list of possible headers to pass, please refer to the HEAD
-- object REST API AWS S3 documentation. -- object REST API AWS S3 documentation.
presignedHeadObjectUrl :: Bucket -> Object -> UrlExpiry presignedHeadObjectUrl ::
-> HT.RequestHeaders -> Minio ByteString Bucket ->
Object ->
UrlExpiry ->
HT.RequestHeaders ->
Minio ByteString
presignedHeadObjectUrl bucket object expirySeconds extraHeaders = do presignedHeadObjectUrl bucket object expirySeconds extraHeaders = do
region <- asks (Just . connectRegion . mcConnInfo) region <- asks (Just . connectRegion . mcConnInfo)
makePresignedUrl expirySeconds HT.methodHead makePresignedUrl
(Just bucket) (Just object) region [] extraHeaders expirySeconds
HT.methodHead
(Just bucket)
(Just object)
region
[]
extraHeaders
-- | Represents individual conditions in a Post Policy document. -- | Represents individual conditions in a Post Policy document.
data PostPolicyCondition = PPCStartsWith Text Text data PostPolicyCondition
| PPCEquals Text Text = PPCStartsWith Text Text
| PPCRange Text Int64 Int64 | PPCEquals Text Text
deriving (Show, Eq) | PPCRange Text Int64 Int64
deriving (Show, Eq)
instance Json.ToJSON PostPolicyCondition where instance Json.ToJSON PostPolicyCondition where
toJSON (PPCStartsWith k v) = Json.toJSON ["starts-with", k, v] toJSON (PPCStartsWith k v) = Json.toJSON ["starts-with", k, v]
@ -160,25 +203,28 @@ instance Json.ToJSON PostPolicyCondition where
Json.foldable [Json.toJSON k, Json.toJSON minVal, Json.toJSON maxVal] Json.foldable [Json.toJSON k, Json.toJSON minVal, Json.toJSON maxVal]
-- | A PostPolicy is required to perform uploads via browser forms. -- | A PostPolicy is required to perform uploads via browser forms.
data PostPolicy = PostPolicy { data PostPolicy = PostPolicy
expiration :: UTCTime { expiration :: UTCTime,
, conditions :: [PostPolicyCondition] conditions :: [PostPolicyCondition]
} deriving (Show, Eq) }
deriving (Show, Eq)
instance Json.ToJSON PostPolicy where instance Json.ToJSON PostPolicy where
toJSON (PostPolicy e c) = toJSON (PostPolicy e c) =
Json.object $ [ "expiration" .= iso8601TimeFormat e Json.object $
, "conditions" .= c [ "expiration" .= iso8601TimeFormat e,
] "conditions" .= c
]
toEncoding (PostPolicy e c) = toEncoding (PostPolicy e c) =
Json.pairs ("expiration" .= iso8601TimeFormat e <> "conditions" .= c) Json.pairs ("expiration" .= iso8601TimeFormat e <> "conditions" .= c)
-- | Possible validation errors when creating a PostPolicy. -- | Possible validation errors when creating a PostPolicy.
data PostPolicyError = PPEKeyNotSpecified data PostPolicyError
| PPEBucketNotSpecified = PPEKeyNotSpecified
| PPEConditionKeyEmpty | PPEBucketNotSpecified
| PPERangeInvalid | PPEConditionKeyEmpty
deriving (Eq, Show) | PPERangeInvalid
deriving (Eq, Show)
-- | Set the bucket name that the upload should use. -- | Set the bucket name that the upload should use.
ppCondBucket :: Bucket -> PostPolicyCondition ppCondBucket :: Bucket -> PostPolicyCondition
@ -186,8 +232,10 @@ ppCondBucket = PPCEquals "bucket"
-- | Set the content length range constraint with minimum and maximum -- | Set the content length range constraint with minimum and maximum
-- byte count values. -- byte count values.
ppCondContentLengthRange :: Int64 -> Int64 ppCondContentLengthRange ::
-> PostPolicyCondition Int64 ->
Int64 ->
PostPolicyCondition
ppCondContentLengthRange = PPCRange "content-length-range" ppCondContentLengthRange = PPCRange "content-length-range"
-- | Set the content-type header for the upload. -- | Set the content-type header for the upload.
@ -210,83 +258,91 @@ ppCondSuccessActionStatus n =
-- | This function creates a PostPolicy after validating its -- | This function creates a PostPolicy after validating its
-- arguments. -- arguments.
newPostPolicy :: UTCTime -> [PostPolicyCondition] newPostPolicy ::
-> Either PostPolicyError PostPolicy UTCTime ->
[PostPolicyCondition] ->
Either PostPolicyError PostPolicy
newPostPolicy expirationTime conds newPostPolicy expirationTime conds
-- object name condition must be present -- object name condition must be present
| not $ any (keyEquals "key") conds = | not $ any (keyEquals "key") conds =
Left PPEKeyNotSpecified Left PPEKeyNotSpecified
-- bucket name condition must be present -- bucket name condition must be present
| not $ any (keyEquals "bucket") conds = | not $ any (keyEquals "bucket") conds =
Left PPEBucketNotSpecified Left PPEBucketNotSpecified
-- a condition with an empty key is invalid -- a condition with an empty key is invalid
| any (keyEquals "") conds || any isEmptyRangeKey conds = | any (keyEquals "") conds || any isEmptyRangeKey conds =
Left PPEConditionKeyEmpty Left PPEConditionKeyEmpty
-- invalid range check -- invalid range check
| any isInvalidRange conds = | any isInvalidRange conds =
Left PPERangeInvalid Left PPERangeInvalid
-- all good! -- all good!
| otherwise = | otherwise =
return $ PostPolicy expirationTime conds return $ PostPolicy expirationTime conds
where where
keyEquals k' (PPCStartsWith k _) = k == k' keyEquals k' (PPCStartsWith k _) = k == k'
keyEquals k' (PPCEquals k _) = k == k' keyEquals k' (PPCEquals k _) = k == k'
keyEquals _ _ = False keyEquals _ _ = False
isEmptyRangeKey (PPCRange k _ _) = k == "" isEmptyRangeKey (PPCRange k _ _) = k == ""
isEmptyRangeKey _ = False isEmptyRangeKey _ = False
isInvalidRange (PPCRange _ mi ma) = mi < 0 || mi > ma isInvalidRange (PPCRange _ mi ma) = mi < 0 || mi > ma
isInvalidRange _ = False isInvalidRange _ = False
-- | Convert Post Policy to a string (e.g. for printing). -- | Convert Post Policy to a string (e.g. for printing).
showPostPolicy :: PostPolicy -> ByteString showPostPolicy :: PostPolicy -> ByteString
showPostPolicy = toS . Json.encode showPostPolicy = toStrictBS . Json.encode
-- | Generate a presigned URL and POST policy to upload files via a -- | Generate a presigned URL and POST policy to upload files via a
-- browser. On success, this function returns a URL and POST -- browser. On success, this function returns a URL and POST
-- form-data. -- form-data.
presignedPostPolicy :: PostPolicy presignedPostPolicy ::
-> Minio (ByteString, H.HashMap Text ByteString) PostPolicy ->
Minio (ByteString, H.HashMap Text ByteString)
presignedPostPolicy p = do presignedPostPolicy p = do
ci <- asks mcConnInfo ci <- asks mcConnInfo
signTime <- liftIO $ Time.getCurrentTime signTime <- liftIO $ Time.getCurrentTime
let let extraConditions =
extraConditions = [ PPCEquals "x-amz-date" (toS $ awsTimeFormat signTime),
[ PPCEquals "x-amz-date" (toS $ awsTimeFormat signTime) PPCEquals "x-amz-algorithm" "AWS4-HMAC-SHA256",
, PPCEquals "x-amz-algorithm" "AWS4-HMAC-SHA256" PPCEquals
, PPCEquals "x-amz-credential" "x-amz-credential"
(T.intercalate "/" [connectAccessKey ci, ( T.intercalate
decodeUtf8 $ mkScope signTime region]) "/"
] [ connectAccessKey ci,
ppWithCreds = p { decodeUtf8 $ mkScope signTime region
conditions = conditions p ++ extraConditions ]
} )
sp = SignParams (connectAccessKey ci) (connectSecretKey ci) ]
signTime (Just $ connectRegion ci) Nothing Nothing ppWithCreds =
signData = signV4PostPolicy (showPostPolicy ppWithCreds) sp p
{ conditions = conditions p ++ extraConditions
}
-- compute form-data sp =
mkPair (PPCStartsWith k v) = Just (k, v) SignParams
mkPair (PPCEquals k v) = Just (k, v) (connectAccessKey ci)
mkPair _ = Nothing (connectSecretKey ci)
formFromPolicy = H.map toS $ H.fromList $ catMaybes $ signTime
mkPair <$> conditions ppWithCreds (Just $ connectRegion ci)
formData = formFromPolicy `H.union` signData Nothing
Nothing
-- compute POST upload URL signData = signV4PostPolicy (showPostPolicy ppWithCreds) sp
bucket = H.lookupDefault "" "bucket" formData -- compute form-data
scheme = byteString $ bool "http://" "https://" $ connectIsSecure ci mkPair (PPCStartsWith k v) = Just (k, v)
region = connectRegion ci mkPair (PPCEquals k v) = Just (k, v)
mkPair _ = Nothing
url = toS $ toLazyByteString $ scheme <> byteString (getHostAddr ci) <> formFromPolicy =
byteString "/" <> byteString (toS bucket) <> byteString "/" H.map toUtf8 $ H.fromList $ catMaybes $
mkPair <$> conditions ppWithCreds
formData = formFromPolicy `H.union` signData
-- compute POST upload URL
bucket = H.lookupDefault "" "bucket" formData
scheme = byteString $ bool "http://" "https://" $ connectIsSecure ci
region = connectRegion ci
url =
toStrictBS $ toLazyByteString $
scheme <> byteString (getHostAddr ci)
<> byteString "/"
<> byteString bucket
<> byteString "/"
return (url, formData) return (url, formData)

View File

@ -15,29 +15,24 @@
-- --
module Network.Minio.PutObject module Network.Minio.PutObject
( ( putObjectInternal,
putObjectInternal ObjectData (..),
, ObjectData(..) selectPartSizes,
, selectPartSizes )
) where where
import Conduit (takeC)
import Conduit (takeC) import qualified Conduit as C
import qualified Conduit as C import qualified Data.ByteString.Lazy as LBS
import qualified Data.ByteString.Lazy as LBS import qualified Data.Conduit.Binary as CB
import qualified Data.Conduit.Binary as CB
import qualified Data.Conduit.Combinators as CC import qualified Data.Conduit.Combinators as CC
import qualified Data.Conduit.List as CL import qualified Data.Conduit.List as CL
import qualified Data.List as List import qualified Data.List as List
import Lib.Prelude
import Network.Minio.Data
import Lib.Prelude import Network.Minio.Errors
import Network.Minio.S3API
import Network.Minio.Data import Network.Minio.Utils
import Network.Minio.Errors
import Network.Minio.S3API
import Network.Minio.Utils
-- | A data-type to represent the source data for an object. A -- | A data-type to represent the source data for an object. A
-- file-path or a producer-conduit may be provided. -- file-path or a producer-conduit may be provided.
@ -50,37 +45,45 @@ import Network.Minio.Utils
-- the input - if it is not provided, upload will continue until the -- the input - if it is not provided, upload will continue until the
-- stream ends or the object reaches `maxObjectSize` size. -- stream ends or the object reaches `maxObjectSize` size.
data ObjectData m data ObjectData m
= ODFile FilePath (Maybe Int64) -- ^ Takes filepath and optional = -- | Takes filepath and optional
-- size. -- size.
| ODStream (C.ConduitM () ByteString m ()) (Maybe Int64) -- ^ Pass ODFile FilePath (Maybe Int64)
-- size | -- | Pass
-- (bytes) -- size
-- if -- (bytes)
-- known. -- if
-- known.
ODStream (C.ConduitM () ByteString m ()) (Maybe Int64)
-- | Put an object from ObjectData. This high-level API handles -- | Put an object from ObjectData. This high-level API handles
-- objects of all sizes, and even if the object size is unknown. -- objects of all sizes, and even if the object size is unknown.
putObjectInternal :: Bucket -> Object -> PutObjectOptions putObjectInternal ::
-> ObjectData Minio -> Minio ETag Bucket ->
Object ->
PutObjectOptions ->
ObjectData Minio ->
Minio ETag
putObjectInternal b o opts (ODStream src sizeMay) = do putObjectInternal b o opts (ODStream src sizeMay) = do
case sizeMay of case sizeMay of
-- unable to get size, so assume non-seekable file -- unable to get size, so assume non-seekable file
Nothing -> sequentialMultipartUpload b o opts Nothing src Nothing -> sequentialMultipartUpload b o opts Nothing src
-- got file size, so check for single/multipart upload -- got file size, so check for single/multipart upload
Just size -> Just size ->
if | size <= 64 * oneMiB -> do if
bs <- C.runConduit $ src C..| takeC (fromIntegral size) C..| CB.sinkLbs | size <= 64 * oneMiB -> do
putObjectSingle' b o (pooToHeaders opts) $ LBS.toStrict bs bs <- C.runConduit $ src C..| takeC (fromIntegral size) C..| CB.sinkLbs
| size > maxObjectSize -> throwIO $ MErrVPutSizeExceeded size putObjectSingle' b o (pooToHeaders opts) $ LBS.toStrict bs
| otherwise -> sequentialMultipartUpload b o opts (Just size) src | size > maxObjectSize -> throwIO $ MErrVPutSizeExceeded size
| otherwise -> sequentialMultipartUpload b o opts (Just size) src
putObjectInternal b o opts (ODFile fp sizeMay) = do putObjectInternal b o opts (ODFile fp sizeMay) = do
hResE <- withNewHandle fp $ \h -> hResE <- withNewHandle fp $ \h ->
liftM2 (,) (isHandleSeekable h) (getFileSize h) liftM2 (,) (isHandleSeekable h) (getFileSize h)
(isSeekable, handleSizeMay) <- either (const $ return (False, Nothing)) return (isSeekable, handleSizeMay) <-
hResE either
(const $ return (False, Nothing))
return
hResE
-- prefer given size to queried size. -- prefer given size to queried size.
let finalSizeMay = listToMaybe $ catMaybes [sizeMay, handleSizeMay] let finalSizeMay = listToMaybe $ catMaybes [sizeMay, handleSizeMay]
@ -88,18 +91,25 @@ putObjectInternal b o opts (ODFile fp sizeMay) = do
case finalSizeMay of case finalSizeMay of
-- unable to get size, so assume non-seekable file -- unable to get size, so assume non-seekable file
Nothing -> sequentialMultipartUpload b o opts Nothing $ CB.sourceFile fp Nothing -> sequentialMultipartUpload b o opts Nothing $ CB.sourceFile fp
-- got file size, so check for single/multipart upload -- got file size, so check for single/multipart upload
Just size -> Just size ->
if | size <= 64 * oneMiB -> either throwIO return =<< if
withNewHandle fp (\h -> putObjectSingle b o (pooToHeaders opts) h 0 size) | size <= 64 * oneMiB ->
| size > maxObjectSize -> throwIO $ MErrVPutSizeExceeded size either throwIO return
| isSeekable -> parallelMultipartUpload b o opts fp size =<< withNewHandle fp (\h -> putObjectSingle b o (pooToHeaders opts) h 0 size)
| otherwise -> sequentialMultipartUpload b o opts (Just size) $ | size > maxObjectSize -> throwIO $ MErrVPutSizeExceeded size
CB.sourceFile fp | isSeekable -> parallelMultipartUpload b o opts fp size
| otherwise ->
sequentialMultipartUpload b o opts (Just size) $
CB.sourceFile fp
parallelMultipartUpload :: Bucket -> Object -> PutObjectOptions parallelMultipartUpload ::
-> FilePath -> Int64 -> Minio ETag Bucket ->
Object ->
PutObjectOptions ->
FilePath ->
Int64 ->
Minio ETag
parallelMultipartUpload b o opts filePath size = do parallelMultipartUpload b o opts filePath size = do
-- get a new upload id. -- get a new upload id.
uploadId <- newMultipartUpload b o (pooToHeaders opts) uploadId <- newMultipartUpload b o (pooToHeaders opts)
@ -109,15 +119,17 @@ parallelMultipartUpload b o opts filePath size = do
let threads = fromMaybe 10 $ pooNumThreads opts let threads = fromMaybe 10 $ pooNumThreads opts
-- perform upload with 'threads' threads -- perform upload with 'threads' threads
uploadedPartsE <- limitedMapConcurrently (fromIntegral threads) uploadedPartsE <-
(uploadPart uploadId) partSizeInfo limitedMapConcurrently
(fromIntegral threads)
(uploadPart uploadId)
partSizeInfo
-- if there were any errors, rethrow exception. -- if there were any errors, rethrow exception.
mapM_ throwIO $ lefts uploadedPartsE mapM_ throwIO $ lefts uploadedPartsE
-- if we get here, all parts were successfully uploaded. -- if we get here, all parts were successfully uploaded.
completeMultipartUpload b o uploadId $ rights uploadedPartsE completeMultipartUpload b o uploadId $ rights uploadedPartsE
where where
uploadPart uploadId (partNum, offset, sz) = uploadPart uploadId (partNum, offset, sz) =
withNewHandle filePath $ \h -> do withNewHandle filePath $ \h -> do
@ -125,10 +137,13 @@ parallelMultipartUpload b o opts filePath size = do
putObjectPart b o uploadId partNum [] payload putObjectPart b o uploadId partNum [] payload
-- | Upload multipart object from conduit source sequentially -- | Upload multipart object from conduit source sequentially
sequentialMultipartUpload :: Bucket -> Object -> PutObjectOptions sequentialMultipartUpload ::
-> Maybe Int64 Bucket ->
-> C.ConduitM () ByteString Minio () Object ->
-> Minio ETag PutObjectOptions ->
Maybe Int64 ->
C.ConduitM () ByteString Minio () ->
Minio ETag
sequentialMultipartUpload b o opts sizeMay src = do sequentialMultipartUpload b o opts sizeMay src = do
-- get a new upload id. -- get a new upload id.
uploadId <- newMultipartUpload b o (pooToHeaders opts) uploadId <- newMultipartUpload b o (pooToHeaders opts)
@ -136,22 +151,23 @@ sequentialMultipartUpload b o opts sizeMay src = do
-- upload parts in loop -- upload parts in loop
let partSizes = selectPartSizes $ maybe maxObjectSize identity sizeMay let partSizes = selectPartSizes $ maybe maxObjectSize identity sizeMay
(pnums, _, sizes) = List.unzip3 partSizes (pnums, _, sizes) = List.unzip3 partSizes
uploadedParts <- C.runConduit uploadedParts <-
$ src C.runConduit $
C..| chunkBSConduit (map fromIntegral sizes) src
C..| CL.map PayloadBS C..| chunkBSConduit (map fromIntegral sizes)
C..| uploadPart' uploadId pnums C..| CL.map PayloadBS
C..| CC.sinkList C..| uploadPart' uploadId pnums
C..| CC.sinkList
-- complete multipart upload -- complete multipart upload
completeMultipartUpload b o uploadId uploadedParts completeMultipartUpload b o uploadId uploadedParts
where where
uploadPart' _ [] = return () uploadPart' _ [] = return ()
uploadPart' uid (pn:pns) = do uploadPart' uid (pn : pns) = do
payloadMay <- C.await payloadMay <- C.await
case payloadMay of case payloadMay of
Nothing -> return () Nothing -> return ()
Just payload -> do pinfo <- lift $ putObjectPart b o uid pn [] payload Just payload -> do
C.yield pinfo pinfo <- lift $ putObjectPart b o uid pn [] payload
uploadPart' uid pns C.yield pinfo
uploadPart' uid pns

View File

@ -15,150 +15,162 @@
-- --
module Network.Minio.S3API module Network.Minio.S3API
( ( Region,
Region getLocation,
, getLocation
-- * Listing buckets -- * Listing buckets
-------------------- --------------------
, getService getService,
-- * Listing objects -- * Listing objects
-------------------- --------------------
, ListObjectsResult(..) ListObjectsResult (..),
, ListObjectsV1Result(..) ListObjectsV1Result (..),
, listObjects' listObjects',
, listObjectsV1' listObjectsV1',
-- * Retrieving buckets -- * Retrieving buckets
, headBucket headBucket,
-- * Retrieving objects -- * Retrieving objects
----------------------- -----------------------
, getObject' getObject',
, headObject headObject,
-- * Creating buckets and objects -- * Creating buckets and objects
--------------------------------- ---------------------------------
, putBucket putBucket,
, ETag ETag,
, maxSinglePutObjectSizeBytes maxSinglePutObjectSizeBytes,
, putObjectSingle' putObjectSingle',
, putObjectSingle putObjectSingle,
, copyObjectSingle copyObjectSingle,
-- * Multipart Upload APIs -- * Multipart Upload APIs
-------------------------- --------------------------
, UploadId UploadId,
, PartTuple PartTuple,
, Payload(..) Payload (..),
, PartNumber PartNumber,
, newMultipartUpload newMultipartUpload,
, putObjectPart putObjectPart,
, copyObjectPart copyObjectPart,
, completeMultipartUpload completeMultipartUpload,
, abortMultipartUpload abortMultipartUpload,
, ListUploadsResult(..) ListUploadsResult (..),
, listIncompleteUploads' listIncompleteUploads',
, ListPartsResult(..) ListPartsResult (..),
, listIncompleteParts' listIncompleteParts',
-- * Deletion APIs -- * Deletion APIs
-------------------------- --------------------------
, deleteBucket deleteBucket,
, deleteObject deleteObject,
-- * Presigned Operations -- * Presigned Operations
----------------------------- -----------------------------
, module Network.Minio.PresignedOperations module Network.Minio.PresignedOperations,
-- ** Bucket Policies -- ** Bucket Policies
, getBucketPolicy getBucketPolicy,
, setBucketPolicy setBucketPolicy,
-- * Bucket Notifications -- * Bucket Notifications
------------------------- -------------------------
, Notification(..) Notification (..),
, NotificationConfig(..) NotificationConfig (..),
, Arn Arn,
, Event(..) Event (..),
, Filter(..) Filter (..),
, FilterKey(..) FilterKey (..),
, FilterRules(..) FilterRules (..),
, FilterRule(..) FilterRule (..),
, getBucketNotification getBucketNotification,
, putBucketNotification putBucketNotification,
, removeAllBucketNotification removeAllBucketNotification,
) where )
where
import qualified Data.ByteString as BS import qualified Data.ByteString as BS
import qualified Data.Text as T import qualified Data.Text as T
import qualified Network.HTTP.Conduit as NC import Lib.Prelude
import qualified Network.HTTP.Types as HT import qualified Network.HTTP.Conduit as NC
import Network.HTTP.Types.Status (status404) import qualified Network.HTTP.Types as HT
import UnliftIO (Handler (Handler)) import Network.HTTP.Types.Status (status404)
import Network.Minio.API
import Lib.Prelude import Network.Minio.APICommon
import Network.Minio.Data
import Network.Minio.API import Network.Minio.Errors
import Network.Minio.APICommon import Network.Minio.PresignedOperations
import Network.Minio.Data import Network.Minio.Utils
import Network.Minio.Errors import Network.Minio.XmlGenerator
import Network.Minio.PresignedOperations import Network.Minio.XmlParser
import Network.Minio.Utils import UnliftIO (Handler (Handler))
import Network.Minio.XmlGenerator
import Network.Minio.XmlParser
-- | Fetch all buckets from the service. -- | Fetch all buckets from the service.
getService :: Minio [BucketInfo] getService :: Minio [BucketInfo]
getService = do getService = do
resp <- executeRequest $ defaultS3ReqInfo { resp <-
riNeedsLocation = False executeRequest $
} defaultS3ReqInfo
{ riNeedsLocation = False
}
parseListBuckets $ NC.responseBody resp parseListBuckets $ NC.responseBody resp
-- Parse headers from getObject and headObject calls. -- Parse headers from getObject and headObject calls.
parseGetObjectHeaders :: Object -> [HT.Header] -> Maybe ObjectInfo parseGetObjectHeaders :: Object -> [HT.Header] -> Maybe ObjectInfo
parseGetObjectHeaders object headers = parseGetObjectHeaders object headers =
let metadataPairs = getMetadata headers let metadataPairs = getMetadata headers
userMetadata = getUserMetadataMap metadataPairs userMetadata = getUserMetadataMap metadataPairs
metadata = getNonUserMetadataMap metadataPairs metadata = getNonUserMetadataMap metadataPairs
in ObjectInfo <$> Just object in ObjectInfo <$> Just object
<*> getLastModifiedHeader headers <*> getLastModifiedHeader headers
<*> getETagHeader headers <*> getETagHeader headers
<*> getContentLength headers <*> getContentLength headers
<*> Just userMetadata <*> Just userMetadata
<*> Just metadata <*> Just metadata
-- | GET an object from the service and return parsed ObjectInfo and a -- | GET an object from the service and return parsed ObjectInfo and a
-- conduit source for the object content -- conduit source for the object content
getObject' :: Bucket -> Object -> HT.Query -> [HT.Header] getObject' ::
-> Minio GetObjectResponse Bucket ->
Object ->
HT.Query ->
[HT.Header] ->
Minio GetObjectResponse
getObject' bucket object queryParams headers = do getObject' bucket object queryParams headers = do
resp <- mkStreamRequest reqInfo resp <- mkStreamRequest reqInfo
let objInfoMaybe = parseGetObjectHeaders object $ NC.responseHeaders resp let objInfoMaybe = parseGetObjectHeaders object $ NC.responseHeaders resp
objInfo <- maybe (throwIO MErrVInvalidObjectInfoResponse) return objInfo <-
objInfoMaybe maybe
return $ GetObjectResponse { gorObjectInfo = objInfo (throwIO MErrVInvalidObjectInfoResponse)
, gorObjectStream = NC.responseBody resp return
} objInfoMaybe
return $
GetObjectResponse
{ gorObjectInfo = objInfo,
gorObjectStream = NC.responseBody resp
}
where where
reqInfo = defaultS3ReqInfo { riBucket = Just bucket reqInfo =
, riObject = Just object defaultS3ReqInfo
, riQueryParams = queryParams { riBucket = Just bucket,
, riHeaders = headers riObject = Just object,
} riQueryParams = queryParams,
riHeaders = headers
}
-- | Creates a bucket via a PUT bucket call. -- | Creates a bucket via a PUT bucket call.
putBucket :: Bucket -> Region -> Minio () putBucket :: Bucket -> Region -> Minio ()
putBucket bucket location = do putBucket bucket location = do
ns <- asks getSvcNamespace ns <- asks getSvcNamespace
void $ executeRequest $ void $ executeRequest $
defaultS3ReqInfo { riMethod = HT.methodPut defaultS3ReqInfo
, riBucket = Just bucket { riMethod = HT.methodPut,
, riPayload = PayloadBS $ mkCreateBucketConfig ns location riBucket = Just bucket,
, riNeedsLocation = False riPayload = PayloadBS $ mkCreateBucketConfig ns location,
} riNeedsLocation = False
}
-- | Single PUT object size. -- | Single PUT object size.
maxSinglePutObjectSizeBytes :: Int64 maxSinglePutObjectSizeBytes :: Int64
@ -172,315 +184,429 @@ putObjectSingle' :: Bucket -> Object -> [HT.Header] -> ByteString -> Minio ETag
putObjectSingle' bucket object headers bs = do putObjectSingle' bucket object headers bs = do
let size = fromIntegral (BS.length bs) let size = fromIntegral (BS.length bs)
-- check length is within single PUT object size. -- check length is within single PUT object size.
when (size > maxSinglePutObjectSizeBytes) $ when (size > maxSinglePutObjectSizeBytes)
throwIO $ MErrVSinglePUTSizeExceeded size $ throwIO
$ MErrVSinglePUTSizeExceeded size
let payload = mkStreamingPayload $ PayloadBS bs let payload = mkStreamingPayload $ PayloadBS bs
resp <- executeRequest $ resp <-
defaultS3ReqInfo { riMethod = HT.methodPut executeRequest $
, riBucket = Just bucket defaultS3ReqInfo
, riObject = Just object { riMethod = HT.methodPut,
, riHeaders = headers riBucket = Just bucket,
, riPayload = payload riObject = Just object,
} riHeaders = headers,
riPayload = payload
}
let rheaders = NC.responseHeaders resp let rheaders = NC.responseHeaders resp
etag = getETagHeader rheaders etag = getETagHeader rheaders
maybe maybe
(throwIO MErrVETagHeaderNotFound) (throwIO MErrVETagHeaderNotFound)
return etag return
etag
-- | PUT an object into the service. This function performs a single -- | PUT an object into the service. This function performs a single
-- PUT object call, and so can only transfer objects upto 5GiB. -- PUT object call, and so can only transfer objects upto 5GiB.
putObjectSingle :: Bucket -> Object -> [HT.Header] -> Handle -> Int64 putObjectSingle ::
-> Int64 -> Minio ETag Bucket ->
Object ->
[HT.Header] ->
Handle ->
Int64 ->
Int64 ->
Minio ETag
putObjectSingle bucket object headers h offset size = do putObjectSingle bucket object headers h offset size = do
-- check length is within single PUT object size. -- check length is within single PUT object size.
when (size > maxSinglePutObjectSizeBytes) $ when (size > maxSinglePutObjectSizeBytes)
throwIO $ MErrVSinglePUTSizeExceeded size $ throwIO
$ MErrVSinglePUTSizeExceeded size
-- content-length header is automatically set by library. -- content-length header is automatically set by library.
let payload = mkStreamingPayload $ PayloadH h offset size let payload = mkStreamingPayload $ PayloadH h offset size
resp <- executeRequest $ resp <-
defaultS3ReqInfo { riMethod = HT.methodPut executeRequest $
, riBucket = Just bucket defaultS3ReqInfo
, riObject = Just object { riMethod = HT.methodPut,
, riHeaders = headers riBucket = Just bucket,
, riPayload = payload riObject = Just object,
} riHeaders = headers,
riPayload = payload
}
let rheaders = NC.responseHeaders resp let rheaders = NC.responseHeaders resp
etag = getETagHeader rheaders etag = getETagHeader rheaders
maybe maybe
(throwIO MErrVETagHeaderNotFound) (throwIO MErrVETagHeaderNotFound)
return etag return
etag
-- | List objects in a bucket matching prefix up to delimiter, -- | List objects in a bucket matching prefix up to delimiter,
-- starting from nextMarker. -- starting from nextMarker.
listObjectsV1' :: Bucket -> Maybe Text -> Maybe Text -> Maybe Text -> Maybe Int listObjectsV1' ::
-> Minio ListObjectsV1Result Bucket ->
Maybe Text ->
Maybe Text ->
Maybe Text ->
Maybe Int ->
Minio ListObjectsV1Result
listObjectsV1' bucket prefix nextMarker delimiter maxKeys = do listObjectsV1' bucket prefix nextMarker delimiter maxKeys = do
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet resp <-
, riBucket = Just bucket executeRequest $
, riQueryParams = mkOptionalParams params defaultS3ReqInfo
} { riMethod = HT.methodGet,
riBucket = Just bucket,
riQueryParams = mkOptionalParams params
}
parseListObjectsV1Response $ NC.responseBody resp parseListObjectsV1Response $ NC.responseBody resp
where where
params = [ params =
("marker", nextMarker) [ ("marker", nextMarker),
, ("prefix", prefix) ("prefix", prefix),
, ("delimiter", delimiter) ("delimiter", delimiter),
, ("max-keys", show <$> maxKeys) ("max-keys", show <$> maxKeys)
] ]
-- | List objects in a bucket matching prefix up to delimiter, -- | List objects in a bucket matching prefix up to delimiter,
-- starting from nextToken. -- starting from nextToken.
listObjects' :: Bucket -> Maybe Text -> Maybe Text -> Maybe Text -> Maybe Int listObjects' ::
-> Minio ListObjectsResult Bucket ->
Maybe Text ->
Maybe Text ->
Maybe Text ->
Maybe Int ->
Minio ListObjectsResult
listObjects' bucket prefix nextToken delimiter maxKeys = do listObjects' bucket prefix nextToken delimiter maxKeys = do
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet resp <-
, riBucket = Just bucket executeRequest $
, riQueryParams = mkOptionalParams params defaultS3ReqInfo
} { riMethod = HT.methodGet,
riBucket = Just bucket,
riQueryParams = mkOptionalParams params
}
parseListObjectsResponse $ NC.responseBody resp parseListObjectsResponse $ NC.responseBody resp
where where
params = [ params =
("list-type", Just "2") [ ("list-type", Just "2"),
, ("continuation_token", nextToken) ("continuation_token", nextToken),
, ("prefix", prefix) ("prefix", prefix),
, ("delimiter", delimiter) ("delimiter", delimiter),
, ("max-keys", show <$> maxKeys) ("max-keys", show <$> maxKeys)
] ]
-- | DELETE a bucket from the service. -- | DELETE a bucket from the service.
deleteBucket :: Bucket -> Minio () deleteBucket :: Bucket -> Minio ()
deleteBucket bucket = void $ deleteBucket bucket =
executeRequest $ void
defaultS3ReqInfo { riMethod = HT.methodDelete $ executeRequest
, riBucket = Just bucket $ defaultS3ReqInfo
} { riMethod = HT.methodDelete,
riBucket = Just bucket
}
-- | DELETE an object from the service. -- | DELETE an object from the service.
deleteObject :: Bucket -> Object -> Minio () deleteObject :: Bucket -> Object -> Minio ()
deleteObject bucket object = void $ deleteObject bucket object =
executeRequest $ void
defaultS3ReqInfo { riMethod = HT.methodDelete $ executeRequest
, riBucket = Just bucket $ defaultS3ReqInfo
, riObject = Just object { riMethod = HT.methodDelete,
} riBucket = Just bucket,
riObject = Just object
}
-- | Create a new multipart upload. -- | Create a new multipart upload.
newMultipartUpload :: Bucket -> Object -> [HT.Header] -> Minio UploadId newMultipartUpload :: Bucket -> Object -> [HT.Header] -> Minio UploadId
newMultipartUpload bucket object headers = do newMultipartUpload bucket object headers = do
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodPost resp <-
, riBucket = Just bucket executeRequest $
, riObject = Just object defaultS3ReqInfo
, riQueryParams = [("uploads", Nothing)] { riMethod = HT.methodPost,
, riHeaders = headers riBucket = Just bucket,
} riObject = Just object,
riQueryParams = [("uploads", Nothing)],
riHeaders = headers
}
parseNewMultipartUpload $ NC.responseBody resp parseNewMultipartUpload $ NC.responseBody resp
-- | PUT a part of an object as part of a multipart upload. -- | PUT a part of an object as part of a multipart upload.
putObjectPart :: Bucket -> Object -> UploadId -> PartNumber -> [HT.Header] putObjectPart ::
-> Payload -> Minio PartTuple Bucket ->
Object ->
UploadId ->
PartNumber ->
[HT.Header] ->
Payload ->
Minio PartTuple
putObjectPart bucket object uploadId partNumber headers payload = do putObjectPart bucket object uploadId partNumber headers payload = do
-- transform payload to conduit to enable streaming signature -- transform payload to conduit to enable streaming signature
let payload' = mkStreamingPayload payload let payload' = mkStreamingPayload payload
resp <- executeRequest $ resp <-
defaultS3ReqInfo { riMethod = HT.methodPut executeRequest $
, riBucket = Just bucket defaultS3ReqInfo
, riObject = Just object { riMethod = HT.methodPut,
, riQueryParams = mkOptionalParams params riBucket = Just bucket,
, riHeaders = headers riObject = Just object,
, riPayload = payload' riQueryParams = mkOptionalParams params,
} riHeaders = headers,
riPayload = payload'
}
let rheaders = NC.responseHeaders resp let rheaders = NC.responseHeaders resp
etag = getETagHeader rheaders etag = getETagHeader rheaders
maybe maybe
(throwIO MErrVETagHeaderNotFound) (throwIO MErrVETagHeaderNotFound)
(return . (partNumber, )) etag (return . (partNumber,))
etag
where where
params = [ params =
("uploadId", Just uploadId) [ ("uploadId", Just uploadId),
, ("partNumber", Just $ show partNumber) ("partNumber", Just $ show partNumber)
] ]
srcInfoToHeaders :: SourceInfo -> [HT.Header] srcInfoToHeaders :: SourceInfo -> [HT.Header]
srcInfoToHeaders srcInfo = ("x-amz-copy-source", srcInfoToHeaders srcInfo =
toS $ T.concat ["/", srcBucket srcInfo, ( "x-amz-copy-source",
"/", srcObject srcInfo] toUtf8 $
) : rangeHdr ++ zip names values T.concat
[ "/",
srcBucket srcInfo,
"/",
srcObject srcInfo
]
)
: rangeHdr
++ zip names values
where where
names = ["x-amz-copy-source-if-match", "x-amz-copy-source-if-none-match", names =
"x-amz-copy-source-if-unmodified-since", [ "x-amz-copy-source-if-match",
"x-amz-copy-source-if-modified-since"] "x-amz-copy-source-if-none-match",
values = mapMaybe (fmap encodeUtf8 . (srcInfo &)) "x-amz-copy-source-if-unmodified-since",
[srcIfMatch, srcIfNoneMatch, "x-amz-copy-source-if-modified-since"
fmap formatRFC1123 . srcIfUnmodifiedSince, ]
fmap formatRFC1123 . srcIfModifiedSince] values =
rangeHdr = maybe [] (\a -> [("x-amz-copy-source-range", HT.renderByteRanges [a])]) mapMaybe
$ toByteRange <$> srcRange srcInfo (fmap encodeUtf8 . (srcInfo &))
[ srcIfMatch,
srcIfNoneMatch,
fmap formatRFC1123 . srcIfUnmodifiedSince,
fmap formatRFC1123 . srcIfModifiedSince
]
rangeHdr =
maybe [] (\a -> [("x-amz-copy-source-range", HT.renderByteRanges [a])]) $
toByteRange <$> srcRange srcInfo
toByteRange :: (Int64, Int64) -> HT.ByteRange toByteRange :: (Int64, Int64) -> HT.ByteRange
toByteRange (x, y) = HT.ByteRangeFromTo (fromIntegral x) (fromIntegral y) toByteRange (x, y) = HT.ByteRangeFromTo (fromIntegral x) (fromIntegral y)
-- | Performs server-side copy of an object or part of an object as an -- | Performs server-side copy of an object or part of an object as an
-- upload part of an ongoing multi-part upload. -- upload part of an ongoing multi-part upload.
copyObjectPart :: DestinationInfo -> SourceInfo -> UploadId copyObjectPart ::
-> PartNumber -> [HT.Header] -> Minio (ETag, UTCTime) DestinationInfo ->
SourceInfo ->
UploadId ->
PartNumber ->
[HT.Header] ->
Minio (ETag, UTCTime)
copyObjectPart dstInfo srcInfo uploadId partNumber headers = do copyObjectPart dstInfo srcInfo uploadId partNumber headers = do
resp <- executeRequest $ resp <-
defaultS3ReqInfo { riMethod = HT.methodPut executeRequest $
, riBucket = Just $ dstBucket dstInfo defaultS3ReqInfo
, riObject = Just $ dstObject dstInfo { riMethod = HT.methodPut,
, riQueryParams = mkOptionalParams params riBucket = Just $ dstBucket dstInfo,
, riHeaders = headers ++ srcInfoToHeaders srcInfo riObject = Just $ dstObject dstInfo,
} riQueryParams = mkOptionalParams params,
riHeaders = headers ++ srcInfoToHeaders srcInfo
}
parseCopyObjectResponse $ NC.responseBody resp parseCopyObjectResponse $ NC.responseBody resp
where where
params = [ params =
("uploadId", Just uploadId) [ ("uploadId", Just uploadId),
, ("partNumber", Just $ show partNumber) ("partNumber", Just $ show partNumber)
] ]
-- | Performs server-side copy of an object that is upto 5GiB in -- | Performs server-side copy of an object that is upto 5GiB in
-- size. If the object is greater than 5GiB, this function throws the -- size. If the object is greater than 5GiB, this function throws the
-- error returned by the server. -- error returned by the server.
copyObjectSingle :: Bucket -> Object -> SourceInfo -> [HT.Header] copyObjectSingle ::
-> Minio (ETag, UTCTime) Bucket ->
Object ->
SourceInfo ->
[HT.Header] ->
Minio (ETag, UTCTime)
copyObjectSingle bucket object srcInfo headers = do copyObjectSingle bucket object srcInfo headers = do
-- validate that srcRange is Nothing for this API. -- validate that srcRange is Nothing for this API.
when (isJust $ srcRange srcInfo) $ when (isJust $ srcRange srcInfo) $
throwIO MErrVCopyObjSingleNoRangeAccepted throwIO MErrVCopyObjSingleNoRangeAccepted
resp <- executeRequest $ resp <-
defaultS3ReqInfo { riMethod = HT.methodPut executeRequest $
, riBucket = Just bucket defaultS3ReqInfo
, riObject = Just object { riMethod = HT.methodPut,
, riHeaders = headers ++ srcInfoToHeaders srcInfo riBucket = Just bucket,
} riObject = Just object,
riHeaders = headers ++ srcInfoToHeaders srcInfo
}
parseCopyObjectResponse $ NC.responseBody resp parseCopyObjectResponse $ NC.responseBody resp
-- | Complete a multipart upload. -- | Complete a multipart upload.
completeMultipartUpload :: Bucket -> Object -> UploadId -> [PartTuple] completeMultipartUpload ::
-> Minio ETag Bucket ->
Object ->
UploadId ->
[PartTuple] ->
Minio ETag
completeMultipartUpload bucket object uploadId partTuple = do completeMultipartUpload bucket object uploadId partTuple = do
resp <- executeRequest $ resp <-
defaultS3ReqInfo { riMethod = HT.methodPost executeRequest $
, riBucket = Just bucket defaultS3ReqInfo
, riObject = Just object { riMethod = HT.methodPost,
, riQueryParams = mkOptionalParams params riBucket = Just bucket,
, riPayload = PayloadBS $ riObject = Just object,
mkCompleteMultipartUploadRequest partTuple riQueryParams = mkOptionalParams params,
} riPayload =
PayloadBS $
mkCompleteMultipartUploadRequest partTuple
}
parseCompleteMultipartUploadResponse $ NC.responseBody resp parseCompleteMultipartUploadResponse $ NC.responseBody resp
where where
params = [("uploadId", Just uploadId)] params = [("uploadId", Just uploadId)]
-- | Abort a multipart upload. -- | Abort a multipart upload.
abortMultipartUpload :: Bucket -> Object -> UploadId -> Minio () abortMultipartUpload :: Bucket -> Object -> UploadId -> Minio ()
abortMultipartUpload bucket object uploadId = void $ abortMultipartUpload bucket object uploadId =
executeRequest $ defaultS3ReqInfo { riMethod = HT.methodDelete void
, riBucket = Just bucket $ executeRequest
, riObject = Just object $ defaultS3ReqInfo
, riQueryParams = mkOptionalParams params { riMethod = HT.methodDelete,
} riBucket = Just bucket,
riObject = Just object,
riQueryParams = mkOptionalParams params
}
where where
params = [("uploadId", Just uploadId)] params = [("uploadId", Just uploadId)]
-- | List incomplete multipart uploads. -- | List incomplete multipart uploads.
listIncompleteUploads' :: Bucket -> Maybe Text -> Maybe Text -> Maybe Text listIncompleteUploads' ::
-> Maybe Text -> Maybe Int -> Minio ListUploadsResult Bucket ->
Maybe Text ->
Maybe Text ->
Maybe Text ->
Maybe Text ->
Maybe Int ->
Minio ListUploadsResult
listIncompleteUploads' bucket prefix delimiter keyMarker uploadIdMarker maxKeys = do listIncompleteUploads' bucket prefix delimiter keyMarker uploadIdMarker maxKeys = do
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet resp <-
, riBucket = Just bucket executeRequest $
, riQueryParams = params defaultS3ReqInfo
} { riMethod = HT.methodGet,
riBucket = Just bucket,
riQueryParams = params
}
parseListUploadsResponse $ NC.responseBody resp parseListUploadsResponse $ NC.responseBody resp
where where
-- build query params -- build query params
params = ("uploads", Nothing) : mkOptionalParams params =
[ ("prefix", prefix) ("uploads", Nothing)
, ("delimiter", delimiter) : mkOptionalParams
, ("key-marker", keyMarker) [ ("prefix", prefix),
, ("upload-id-marker", uploadIdMarker) ("delimiter", delimiter),
, ("max-uploads", show <$> maxKeys) ("key-marker", keyMarker),
] ("upload-id-marker", uploadIdMarker),
("max-uploads", show <$> maxKeys)
]
-- | List parts of an ongoing multipart upload. -- | List parts of an ongoing multipart upload.
listIncompleteParts' :: Bucket -> Object -> UploadId -> Maybe Text listIncompleteParts' ::
-> Maybe Text -> Minio ListPartsResult Bucket ->
Object ->
UploadId ->
Maybe Text ->
Maybe Text ->
Minio ListPartsResult
listIncompleteParts' bucket object uploadId maxParts partNumMarker = do listIncompleteParts' bucket object uploadId maxParts partNumMarker = do
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet resp <-
, riBucket = Just bucket executeRequest $
, riObject = Just object defaultS3ReqInfo
, riQueryParams = mkOptionalParams params { riMethod = HT.methodGet,
} riBucket = Just bucket,
riObject = Just object,
riQueryParams = mkOptionalParams params
}
parseListPartsResponse $ NC.responseBody resp parseListPartsResponse $ NC.responseBody resp
where where
-- build optional query params -- build optional query params
params = [ params =
("uploadId", Just uploadId) [ ("uploadId", Just uploadId),
, ("part-number-marker", partNumMarker) ("part-number-marker", partNumMarker),
, ("max-parts", maxParts) ("max-parts", maxParts)
] ]
-- | Get metadata of an object. -- | Get metadata of an object.
headObject :: Bucket -> Object -> [HT.Header] -> Minio ObjectInfo headObject :: Bucket -> Object -> [HT.Header] -> Minio ObjectInfo
headObject bucket object reqHeaders = do headObject bucket object reqHeaders = do
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodHead resp <-
, riBucket = Just bucket executeRequest $
, riObject = Just object defaultS3ReqInfo
, riHeaders = reqHeaders { riMethod = HT.methodHead,
} riBucket = Just bucket,
riObject = Just object,
maybe (throwIO MErrVInvalidObjectInfoResponse) return $ riHeaders = reqHeaders
parseGetObjectHeaders object $ NC.responseHeaders resp }
maybe (throwIO MErrVInvalidObjectInfoResponse) return
$ parseGetObjectHeaders object
$ NC.responseHeaders resp
-- | Query the object store if a given bucket exists. -- | Query the object store if a given bucket exists.
headBucket :: Bucket -> Minio Bool headBucket :: Bucket -> Minio Bool
headBucket bucket = headBucketEx `catches` headBucket bucket =
[ Handler handleNoSuchBucket headBucketEx
, Handler handleStatus404 `catches` [ Handler handleNoSuchBucket,
] Handler handleStatus404
]
where where
handleNoSuchBucket :: ServiceErr -> Minio Bool handleNoSuchBucket :: ServiceErr -> Minio Bool
handleNoSuchBucket e | e == NoSuchBucket = return False handleNoSuchBucket e
| otherwise = throwIO e | e == NoSuchBucket = return False
| otherwise = throwIO e
handleStatus404 :: NC.HttpException -> Minio Bool handleStatus404 :: NC.HttpException -> Minio Bool
handleStatus404 e@(NC.HttpExceptionRequest _ (NC.StatusCodeException res _)) = handleStatus404 e@(NC.HttpExceptionRequest _ (NC.StatusCodeException res _)) =
if NC.responseStatus res == status404 if NC.responseStatus res == status404
then return False then return False
else throwIO e else throwIO e
handleStatus404 e = throwIO e handleStatus404 e = throwIO e
headBucketEx = do headBucketEx = do
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodHead resp <-
, riBucket = Just bucket executeRequest $
} defaultS3ReqInfo
{ riMethod = HT.methodHead,
riBucket = Just bucket
}
return $ NC.responseStatus resp == HT.ok200 return $ NC.responseStatus resp == HT.ok200
-- | Set the notification configuration on a bucket. -- | Set the notification configuration on a bucket.
putBucketNotification :: Bucket -> Notification -> Minio () putBucketNotification :: Bucket -> Notification -> Minio ()
putBucketNotification bucket ncfg = do putBucketNotification bucket ncfg = do
ns <- asks getSvcNamespace ns <- asks getSvcNamespace
void $ executeRequest $ defaultS3ReqInfo { riMethod = HT.methodPut void $ executeRequest $
, riBucket = Just bucket defaultS3ReqInfo
, riQueryParams = [("notification", Nothing)] { riMethod = HT.methodPut,
, riPayload = PayloadBS $ riBucket = Just bucket,
mkPutNotificationRequest ns ncfg riQueryParams = [("notification", Nothing)],
} riPayload =
PayloadBS $
mkPutNotificationRequest ns ncfg
}
-- | Retrieve the notification configuration on a bucket. -- | Retrieve the notification configuration on a bucket.
getBucketNotification :: Bucket -> Minio Notification getBucketNotification :: Bucket -> Minio Notification
getBucketNotification bucket = do getBucketNotification bucket = do
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet resp <-
, riBucket = Just bucket executeRequest $
, riQueryParams = [("notification", Nothing)] defaultS3ReqInfo
} { riMethod = HT.methodGet,
riBucket = Just bucket,
riQueryParams = [("notification", Nothing)]
}
parseNotification $ NC.responseBody resp parseNotification $ NC.responseBody resp
-- | Remove all notifications configured on a bucket. -- | Remove all notifications configured on a bucket.
@ -490,11 +616,14 @@ removeAllBucketNotification = flip putBucketNotification defaultNotification
-- | Fetch the policy if any on a bucket. -- | Fetch the policy if any on a bucket.
getBucketPolicy :: Bucket -> Minio Text getBucketPolicy :: Bucket -> Minio Text
getBucketPolicy bucket = do getBucketPolicy bucket = do
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet resp <-
, riBucket = Just bucket executeRequest $
, riQueryParams = [("policy", Nothing)] defaultS3ReqInfo
} { riMethod = HT.methodGet,
return $ toS $ NC.responseBody resp riBucket = Just bucket,
riQueryParams = [("policy", Nothing)]
}
return $ decodeUtf8Lenient $ toStrictBS $ NC.responseBody resp
-- | Set a new policy on a bucket. -- | Set a new policy on a bucket.
-- As a special condition if the policy is empty -- As a special condition if the policy is empty
@ -506,18 +635,22 @@ setBucketPolicy bucket policy = do
else putBucketPolicy bucket policy else putBucketPolicy bucket policy
-- | Save a new policy on a bucket. -- | Save a new policy on a bucket.
putBucketPolicy :: Bucket -> Text -> Minio() putBucketPolicy :: Bucket -> Text -> Minio ()
putBucketPolicy bucket policy = do putBucketPolicy bucket policy = do
void $ executeRequest $ defaultS3ReqInfo { riMethod = HT.methodPut void $ executeRequest $
, riBucket = Just bucket defaultS3ReqInfo
, riQueryParams = [("policy", Nothing)] { riMethod = HT.methodPut,
, riPayload = PayloadBS $ encodeUtf8 policy riBucket = Just bucket,
} riQueryParams = [("policy", Nothing)],
riPayload = PayloadBS $ encodeUtf8 policy
}
-- | Delete any policy set on a bucket. -- | Delete any policy set on a bucket.
deleteBucketPolicy :: Bucket -> Minio() deleteBucketPolicy :: Bucket -> Minio ()
deleteBucketPolicy bucket = do deleteBucketPolicy bucket = do
void $ executeRequest $ defaultS3ReqInfo { riMethod = HT.methodDelete void $ executeRequest $
, riBucket = Just bucket defaultS3ReqInfo
, riQueryParams = [("policy", Nothing)] { riMethod = HT.methodDelete,
} riBucket = Just bucket,
riQueryParams = [("policy", Nothing)]
}

View File

@ -15,113 +15,103 @@
-- --
module Network.Minio.SelectAPI module Network.Minio.SelectAPI
( ( -- | The `selectObjectContent` allows querying CSV, JSON or Parquet
-- format objects in AWS S3 and in MinIO using SQL Select
-- statements. This allows significant reduction of data transfer
-- from object storage for computation-intensive tasks, as relevant
-- data is filtered close to the storage.
selectObjectContent,
SelectRequest,
selectRequest,
-- | The `selectObjectContent` allows querying CSV, JSON or Parquet -- *** Input Serialization
-- format objects in AWS S3 and in MinIO using SQL Select InputSerialization,
-- statements. This allows significant reduction of data transfer defaultCsvInput,
-- from object storage for computation-intensive tasks, as relevant linesJsonInput,
-- data is filtered close to the storage. documentJsonInput,
defaultParquetInput,
setInputCSVProps,
CompressionType (..),
setInputCompressionType,
selectObjectContent -- *** CSV Format details
, SelectRequest -- | CSV format options such as delimiters and quote characters are
, selectRequest -- specified using using the functions below. Options are combined
-- monoidally.
CSVProp,
recordDelimiter,
fieldDelimiter,
quoteCharacter,
quoteEscapeCharacter,
commentCharacter,
allowQuotedRecordDelimiter,
FileHeaderInfo (..),
fileHeaderInfo,
QuoteFields (..),
quoteFields,
-- *** Input Serialization -- *** Output Serialization
OutputSerialization,
defaultCsvOutput,
defaultJsonOutput,
outputCSVFromProps,
outputJSONFromRecordDelimiter,
, InputSerialization -- *** Progress messages
, defaultCsvInput setRequestProgressEnabled,
, linesJsonInput
, documentJsonInput
, defaultParquetInput
, setInputCSVProps
, CompressionType(..) -- *** Interpreting Select output
, setInputCompressionType
-- *** CSV Format details -- | The conduit returned by `selectObjectContent` returns values of
-- the `EventMessage` data type. This returns the query output
-- messages formatted according to the chosen output serialization,
-- interleaved with progress messages (if enabled by
-- `setRequestProgressEnabled`), and at the end a statistics
-- message.
--
-- If the application is interested in only the payload, then
-- `getPayloadBytes` can be used. For example to simply print the
-- payload to stdout:
--
-- > resultConduit <- selectObjectContent bucket object mySelectRequest
-- > runConduit $ resultConduit .| getPayloadBytes .| stdoutC
--
-- Note that runConduit, the connect operator (.|) and stdoutC are
-- all from the "conduit" package.
getPayloadBytes,
EventMessage (..),
Progress (..),
Stats,
)
where
-- | CSV format options such as delimiters and quote characters are import Conduit ((.|))
-- specified using using the functions below. Options are combined import qualified Conduit as C
-- monoidally. import qualified Data.Binary as Bin
import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as LB
import Data.Digest.CRC32 (crc32, crc32Update)
import Lib.Prelude
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Network.Minio.API
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.Utils
import Network.Minio.XmlGenerator
import Network.Minio.XmlParser
import UnliftIO (MonadUnliftIO)
, CSVProp data EventStreamException
, recordDelimiter = ESEPreludeCRCFailed
, fieldDelimiter | ESEMessageCRCFailed
, quoteCharacter | ESEUnexpectedEndOfStream
, quoteEscapeCharacter | ESEDecodeFail [Char]
, commentCharacter | ESEInvalidHeaderType
, allowQuotedRecordDelimiter | ESEInvalidHeaderValueType
, FileHeaderInfo(..) | ESEInvalidMessageType
, fileHeaderInfo deriving (Eq, Show)
, QuoteFields(..)
, quoteFields
-- *** Output Serialization
, OutputSerialization
, defaultCsvOutput
, defaultJsonOutput
, outputCSVFromProps
, outputJSONFromRecordDelimiter
-- *** Progress messages
, setRequestProgressEnabled
-- *** Interpreting Select output
-- | The conduit returned by `selectObjectContent` returns values of
-- the `EventMessage` data type. This returns the query output
-- messages formatted according to the chosen output serialization,
-- interleaved with progress messages (if enabled by
-- `setRequestProgressEnabled`), and at the end a statistics
-- message.
--
-- If the application is interested in only the payload, then
-- `getPayloadBytes` can be used. For example to simply print the
-- payload to stdout:
--
-- > resultConduit <- selectObjectContent bucket object mySelectRequest
-- > runConduit $ resultConduit .| getPayloadBytes .| stdoutC
--
-- Note that runConduit, the connect operator (.|) and stdoutC are
-- all from the "conduit" package.
, getPayloadBytes
, EventMessage(..)
, Progress(..)
, Stats
) where
import Conduit ((.|))
import qualified Conduit as C
import qualified Data.Binary as Bin
import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as LB
import Data.Digest.CRC32 (crc32, crc32Update)
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import UnliftIO (MonadUnliftIO)
import Lib.Prelude
import Network.Minio.API
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.Utils
import Network.Minio.XmlGenerator
import Network.Minio.XmlParser
data EventStreamException = ESEPreludeCRCFailed
| ESEMessageCRCFailed
| ESEUnexpectedEndOfStream
| ESEDecodeFail [Char]
| ESEInvalidHeaderType
| ESEInvalidHeaderValueType
| ESEInvalidMessageType
deriving (Eq, Show)
instance Exception EventStreamException instance Exception EventStreamException
@ -131,169 +121,174 @@ chunkSize = 32 * 1024
parseBinary :: Bin.Binary a => ByteString -> IO a parseBinary :: Bin.Binary a => ByteString -> IO a
parseBinary b = do parseBinary b = do
case Bin.decodeOrFail $ LB.fromStrict b of case Bin.decodeOrFail $ LB.fromStrict b of
Left (_, _, msg) -> throwIO $ ESEDecodeFail msg Left (_, _, msg) -> throwIO $ ESEDecodeFail msg
Right (_, _, r) -> return r Right (_, _, r) -> return r
bytesToHeaderName :: Text -> IO MsgHeaderName bytesToHeaderName :: Text -> IO MsgHeaderName
bytesToHeaderName t = case t of bytesToHeaderName t = case t of
":message-type" -> return MessageType ":message-type" -> return MessageType
":event-type" -> return EventType ":event-type" -> return EventType
":content-type" -> return ContentType ":content-type" -> return ContentType
":error-code" -> return ErrorCode ":error-code" -> return ErrorCode
":error-message" -> return ErrorMessage ":error-message" -> return ErrorMessage
_ -> throwIO ESEInvalidHeaderType _ -> throwIO ESEInvalidHeaderType
parseHeaders :: MonadUnliftIO m parseHeaders ::
=> Word32 -> C.ConduitM ByteString a m [MessageHeader] MonadUnliftIO m =>
Word32 ->
C.ConduitM ByteString a m [MessageHeader]
parseHeaders 0 = return [] parseHeaders 0 = return []
parseHeaders hdrLen = do parseHeaders hdrLen = do
bs1 <- readNBytes 1 bs1 <- readNBytes 1
n :: Word8 <- liftIO $ parseBinary bs1 n :: Word8 <- liftIO $ parseBinary bs1
headerKeyBytes <- readNBytes $ fromIntegral n headerKeyBytes <- readNBytes $ fromIntegral n
let headerKey = decodeUtf8Lenient headerKeyBytes let headerKey = decodeUtf8Lenient headerKeyBytes
headerName <- liftIO $ bytesToHeaderName headerKey headerName <- liftIO $ bytesToHeaderName headerKey
bs2 <- readNBytes 1 bs2 <- readNBytes 1
headerValueType :: Word8 <- liftIO $ parseBinary bs2 headerValueType :: Word8 <- liftIO $ parseBinary bs2
when (headerValueType /= 7) $ throwIO ESEInvalidHeaderValueType when (headerValueType /= 7) $ throwIO ESEInvalidHeaderValueType
bs3 <- readNBytes 2 bs3 <- readNBytes 2
vLen :: Word16 <- liftIO $ parseBinary bs3 vLen :: Word16 <- liftIO $ parseBinary bs3
headerValueBytes <- readNBytes $ fromIntegral vLen headerValueBytes <- readNBytes $ fromIntegral vLen
let headerValue = decodeUtf8Lenient headerValueBytes let headerValue = decodeUtf8Lenient headerValueBytes
m = (headerName, headerValue) m = (headerName, headerValue)
k = 1 + fromIntegral n + 1 + 2 + fromIntegral vLen k = 1 + fromIntegral n + 1 + 2 + fromIntegral vLen
ms <- parseHeaders (hdrLen - k) ms <- parseHeaders (hdrLen - k)
return (m:ms) return (m : ms)
-- readNBytes returns N bytes read from the string and throws an -- readNBytes returns N bytes read from the string and throws an
-- exception if N bytes are not present on the stream. -- exception if N bytes are not present on the stream.
readNBytes :: MonadUnliftIO m => Int -> C.ConduitM ByteString a m ByteString readNBytes :: MonadUnliftIO m => Int -> C.ConduitM ByteString a m ByteString
readNBytes n = do readNBytes n = do
b <- LB.toStrict <$> (C.takeCE n .| C.sinkLazy) b <- LB.toStrict <$> (C.takeCE n .| C.sinkLazy)
if B.length b /= n if B.length b /= n
then throwIO ESEUnexpectedEndOfStream then throwIO ESEUnexpectedEndOfStream
else return b else return b
crcCheck :: MonadUnliftIO m crcCheck ::
=> C.ConduitM ByteString ByteString m () MonadUnliftIO m =>
C.ConduitM ByteString ByteString m ()
crcCheck = do crcCheck = do
b <- readNBytes 12 b <- readNBytes 12
n :: Word32 <- liftIO $ parseBinary $ B.take 4 b n :: Word32 <- liftIO $ parseBinary $ B.take 4 b
preludeCRC :: Word32 <- liftIO $ parseBinary $ B.drop 8 b preludeCRC :: Word32 <- liftIO $ parseBinary $ B.drop 8 b
when (crc32 (B.take 8 b) /= preludeCRC) $ when (crc32 (B.take 8 b) /= preludeCRC) $
throwIO ESEPreludeCRCFailed throwIO ESEPreludeCRCFailed
-- we do not yield the checksum -- we do not yield the checksum
C.yield $ B.take 8 b C.yield $ B.take 8 b
-- 12 bytes have been read off the current message. Now read the -- 12 bytes have been read off the current message. Now read the
-- next (n-12)-4 bytes and accumulate the checksum, and yield it. -- next (n-12)-4 bytes and accumulate the checksum, and yield it.
let startCrc = crc32 b let startCrc = crc32 b
finalCrc <- accumulateYield (fromIntegral n-16) startCrc finalCrc <- accumulateYield (fromIntegral n -16) startCrc
bs <- readNBytes 4 bs <- readNBytes 4
expectedCrc :: Word32 <- liftIO $ parseBinary bs expectedCrc :: Word32 <- liftIO $ parseBinary bs
when (finalCrc /= expectedCrc) $ when (finalCrc /= expectedCrc) $
throwIO ESEMessageCRCFailed throwIO ESEMessageCRCFailed
-- we unconditionally recurse - downstream figures out when to -- we unconditionally recurse - downstream figures out when to
-- quit reading the stream -- quit reading the stream
crcCheck crcCheck
where where
accumulateYield n checkSum = do accumulateYield n checkSum = do
let toRead = min n chunkSize let toRead = min n chunkSize
b <- readNBytes toRead b <- readNBytes toRead
let c' = crc32Update checkSum b let c' = crc32Update checkSum b
n' = n - B.length b n' = n - B.length b
C.yield b C.yield b
if n' > 0 if n' > 0
then accumulateYield n' c' then accumulateYield n' c'
else return c' else return c'
handleMessage :: MonadUnliftIO m => C.ConduitT ByteString EventMessage m () handleMessage :: MonadUnliftIO m => C.ConduitT ByteString EventMessage m ()
handleMessage = do handleMessage = do
b1 <- readNBytes 4 b1 <- readNBytes 4
msgLen :: Word32 <- liftIO $ parseBinary b1 msgLen :: Word32 <- liftIO $ parseBinary b1
b2 <- readNBytes 4 b2 <- readNBytes 4
hdrLen :: Word32 <- liftIO $ parseBinary b2 hdrLen :: Word32 <- liftIO $ parseBinary b2
hs <- parseHeaders hdrLen hs <- parseHeaders hdrLen
let payloadLen = msgLen - hdrLen - 16 let payloadLen = msgLen - hdrLen - 16
getHdrVal h = fmap snd . headMay . filter ((h ==) . fst) getHdrVal h = fmap snd . headMay . filter ((h ==) . fst)
eventHdrValue = getHdrVal EventType hs eventHdrValue = getHdrVal EventType hs
msgHdrValue = getHdrVal MessageType hs msgHdrValue = getHdrVal MessageType hs
errCode = getHdrVal ErrorCode hs errCode = getHdrVal ErrorCode hs
errMsg = getHdrVal ErrorMessage hs errMsg = getHdrVal ErrorMessage hs
case msgHdrValue of
Just "event" -> do
case eventHdrValue of
Just "Records" -> passThrough $ fromIntegral payloadLen
Just "Cont" -> return ()
Just "Progress" -> do
bs <- readNBytes $ fromIntegral payloadLen
progress <- parseSelectProgress bs
C.yield $ ProgressEventMessage progress
Just "Stats" -> do
bs <- readNBytes $ fromIntegral payloadLen
stats <- parseSelectProgress bs
C.yield $ StatsEventMessage stats
Just "End" -> return ()
_ -> throwIO ESEInvalidMessageType
when (eventHdrValue /= Just "End") handleMessage
Just "error" -> do
let reqMsgMay = RequestLevelErrorMessage <$> errCode <*> errMsg
maybe (throwIO ESEInvalidMessageType) C.yield reqMsgMay
_ -> throwIO ESEInvalidMessageType
case msgHdrValue of
Just "event" -> do
case eventHdrValue of
Just "Records" -> passThrough $ fromIntegral payloadLen
Just "Cont" -> return ()
Just "Progress" -> do
bs <- readNBytes $ fromIntegral payloadLen
progress <- parseSelectProgress bs
C.yield $ ProgressEventMessage progress
Just "Stats" -> do
bs <- readNBytes $ fromIntegral payloadLen
stats <- parseSelectProgress bs
C.yield $ StatsEventMessage stats
Just "End" -> return ()
_ -> throwIO ESEInvalidMessageType
when (eventHdrValue /= Just "End") handleMessage
Just "error" -> do
let reqMsgMay = RequestLevelErrorMessage <$> errCode <*> errMsg
maybe (throwIO ESEInvalidMessageType) C.yield reqMsgMay
_ -> throwIO ESEInvalidMessageType
where where
passThrough 0 = return () passThrough 0 = return ()
passThrough n = do passThrough n = do
let c = min n chunkSize let c = min n chunkSize
b <- readNBytes c b <- readNBytes c
C.yield $ RecordPayloadEventMessage b C.yield $ RecordPayloadEventMessage b
passThrough $ n - B.length b passThrough $ n - B.length b
selectProtoConduit ::
selectProtoConduit :: MonadUnliftIO m MonadUnliftIO m =>
=> C.ConduitT ByteString EventMessage m () C.ConduitT ByteString EventMessage m ()
selectProtoConduit = crcCheck .| handleMessage selectProtoConduit = crcCheck .| handleMessage
-- | selectObjectContent calls the SelectRequest on the given -- | selectObjectContent calls the SelectRequest on the given
-- object. It returns a Conduit of event messages that can be consumed -- object. It returns a Conduit of event messages that can be consumed
-- by the client. -- by the client.
selectObjectContent :: Bucket -> Object -> SelectRequest selectObjectContent ::
-> Minio (C.ConduitT () EventMessage Minio ()) Bucket ->
Object ->
SelectRequest ->
Minio (C.ConduitT () EventMessage Minio ())
selectObjectContent b o r = do selectObjectContent b o r = do
let reqInfo = defaultS3ReqInfo { riMethod = HT.methodPost let reqInfo =
, riBucket = Just b defaultS3ReqInfo
, riObject = Just o { riMethod = HT.methodPost,
, riPayload = PayloadBS $ mkSelectRequest r riBucket = Just b,
, riNeedsLocation = False riObject = Just o,
, riQueryParams = [("select", Nothing), ("select-type", Just "2")] riPayload = PayloadBS $ mkSelectRequest r,
} riNeedsLocation = False,
--print $ mkSelectRequest r riQueryParams = [("select", Nothing), ("select-type", Just "2")]
resp <- mkStreamRequest reqInfo }
return $ NC.responseBody resp .| selectProtoConduit --print $ mkSelectRequest r
resp <- mkStreamRequest reqInfo
return $ NC.responseBody resp .| selectProtoConduit
-- | A helper conduit that returns only the record payload bytes. -- | A helper conduit that returns only the record payload bytes.
getPayloadBytes :: MonadIO m => C.ConduitT EventMessage ByteString m () getPayloadBytes :: MonadIO m => C.ConduitT EventMessage ByteString m ()
getPayloadBytes = do getPayloadBytes = do
evM <- C.await evM <- C.await
case evM of case evM of
Just v -> do Just v -> do
case v of case v of
RecordPayloadEventMessage b -> C.yield b RecordPayloadEventMessage b -> C.yield b
RequestLevelErrorMessage c m -> liftIO $ throwIO $ SelectErr c m RequestLevelErrorMessage c m -> liftIO $ throwIO $ SelectErr c m
_ -> return () _ -> return ()
getPayloadBytes getPayloadBytes
Nothing -> return () Nothing -> return ()

View File

@ -13,57 +13,62 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE BangPatterns #-}
module Network.Minio.Sign.V4 where module Network.Minio.Sign.V4 where
import qualified Conduit as C import qualified Conduit as C
import qualified Data.ByteString as B import qualified Data.ByteString as B
import qualified Data.ByteString.Base64 as Base64 import qualified Data.ByteString.Base64 as Base64
import qualified Data.ByteString.Char8 as B8 import qualified Data.ByteString.Char8 as B8
import Data.CaseInsensitive (mk) import qualified Data.ByteString.Lazy as LB
import qualified Data.CaseInsensitive as CI import Data.CaseInsensitive (mk)
import qualified Data.HashMap.Strict as Map import qualified Data.CaseInsensitive as CI
import qualified Data.HashSet as Set import qualified Data.HashMap.Strict as Map
import qualified Data.Time as Time import qualified Data.HashSet as Set
import qualified Network.HTTP.Conduit as NC import qualified Data.Time as Time
import Network.HTTP.Types (Header, parseQuery) import Lib.Prelude
import qualified Network.HTTP.Types as H import qualified Network.HTTP.Conduit as NC
import Text.Printf (printf) import Network.HTTP.Types (Header, parseQuery)
import qualified Network.HTTP.Types as H
import Lib.Prelude import Network.Minio.Data.ByteString
import Network.Minio.Data.Crypto
import Network.Minio.Data.ByteString import Network.Minio.Data.Time
import Network.Minio.Data.Crypto import Network.Minio.Errors
import Network.Minio.Data.Time import Text.Printf (printf)
import Network.Minio.Errors
-- these headers are not included in the string to sign when signing a -- these headers are not included in the string to sign when signing a
-- request -- request
ignoredHeaders :: Set.HashSet ByteString ignoredHeaders :: Set.HashSet ByteString
ignoredHeaders = Set.fromList $ map CI.foldedCase ignoredHeaders =
[ H.hAuthorization Set.fromList $
, H.hContentType map
, H.hUserAgent CI.foldedCase
] [ H.hAuthorization,
H.hContentType,
H.hUserAgent
]
data SignV4Data = SignV4Data { data SignV4Data = SignV4Data
sv4SignTime :: UTCTime { sv4SignTime :: UTCTime,
, sv4Scope :: ByteString sv4Scope :: ByteString,
, sv4CanonicalRequest :: ByteString sv4CanonicalRequest :: ByteString,
, sv4HeadersToSign :: [(ByteString, ByteString)] sv4HeadersToSign :: [(ByteString, ByteString)],
, sv4Output :: [(ByteString, ByteString)] sv4Output :: [(ByteString, ByteString)],
, sv4StringToSign :: ByteString sv4StringToSign :: ByteString,
, sv4SigningKey :: ByteString sv4SigningKey :: ByteString
} deriving (Show) }
deriving (Show)
data SignParams = SignParams { data SignParams = SignParams
spAccessKey :: Text { spAccessKey :: Text,
, spSecretKey :: Text spSecretKey :: Text,
, spTimeStamp :: UTCTime spTimeStamp :: UTCTime,
, spRegion :: Maybe Text spRegion :: Maybe Text,
, spExpirySecs :: Maybe Int spExpirySecs :: Maybe Int,
, spPayloadHash :: Maybe ByteString spPayloadHash :: Maybe ByteString
} deriving (Show) }
deriving (Show)
debugPrintSignV4Data :: SignV4Data -> IO () debugPrintSignV4Data :: SignV4Data -> IO ()
debugPrintSignV4Data (SignV4Data t s cr h2s o sts sk) = do debugPrintSignV4Data (SignV4Data t s cr h2s o sts sk) = do
@ -79,22 +84,23 @@ debugPrintSignV4Data (SignV4Data t s cr h2s o sts sk) = do
B8.putStrLn "END of SignV4Data =========" B8.putStrLn "END of SignV4Data ========="
where where
printBytes b = do printBytes b = do
mapM_ (\x -> B.putStr $ B.concat [show x, " "]) $ B.unpack b mapM_ (\x -> B.putStr $ B.singleton x <> " ") $ B.unpack b
B8.putStrLn "" B8.putStrLn ""
mkAuthHeader :: Text -> ByteString -> ByteString -> ByteString -> H.Header mkAuthHeader :: Text -> ByteString -> ByteString -> ByteString -> H.Header
mkAuthHeader accessKey scope signedHeaderKeys sign = mkAuthHeader accessKey scope signedHeaderKeys sign =
let authValue = B.concat let authValue =
[ "AWS4-HMAC-SHA256 Credential=" B.concat
, toS accessKey [ "AWS4-HMAC-SHA256 Credential=",
, "/" toUtf8 accessKey,
, scope "/",
, ", SignedHeaders=" scope,
, signedHeaderKeys ", SignedHeaders=",
, ", Signature=" signedHeaderKeys,
, sign ", Signature=",
] sign
in (H.hAuthorization, authValue) ]
in (H.hAuthorization, authValue)
-- | Given SignParams and request details, including request method, -- | Given SignParams and request details, including request method,
-- request path, headers, query params and payload hash, generates an -- request path, headers, query params and payload hash, generates an
@ -110,122 +116,132 @@ mkAuthHeader accessKey scope signedHeaderKeys sign =
-- the request. -- the request.
signV4 :: SignParams -> NC.Request -> [(ByteString, ByteString)] signV4 :: SignParams -> NC.Request -> [(ByteString, ByteString)]
signV4 !sp !req = signV4 !sp !req =
let let region = fromMaybe "" $ spRegion sp
region = fromMaybe "" $ spRegion sp ts = spTimeStamp sp
ts = spTimeStamp sp scope = mkScope ts region
scope = mkScope ts region accessKey = toUtf8 $ spAccessKey sp
accessKey = toS $ spAccessKey sp secretKey = toUtf8 $ spSecretKey sp
secretKey = toS $ spSecretKey sp expiry = spExpirySecs sp
expiry = spExpirySecs sp sha256Hdr =
sha256Hdr = ("x-amz-content-sha256", ( "x-amz-content-sha256",
fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp) fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp
)
-- headers to be added to the request -- headers to be added to the request
datePair = ("X-Amz-Date", awsTimeFormatBS ts) datePair = ("X-Amz-Date", awsTimeFormatBS ts)
computedHeaders = NC.requestHeaders req ++ computedHeaders =
if isJust $ expiry NC.requestHeaders req
then [] ++ if isJust $ expiry
else map (\(x, y) -> (mk x, y)) [datePair, sha256Hdr] then []
headersToSign = getHeadersToSign computedHeaders else map (\(x, y) -> (mk x, y)) [datePair, sha256Hdr]
signedHeaderKeys = B.intercalate ";" $ sort $ map fst headersToSign headersToSign = getHeadersToSign computedHeaders
signedHeaderKeys = B.intercalate ";" $ sort $ map fst headersToSign
-- query-parameters to be added before signing for presigned URLs -- query-parameters to be added before signing for presigned URLs
-- (i.e. when `isJust expiry`) -- (i.e. when `isJust expiry`)
authQP = [ ("X-Amz-Algorithm", "AWS4-HMAC-SHA256") authQP =
, ("X-Amz-Credential", B.concat [accessKey, "/", scope]) [ ("X-Amz-Algorithm", "AWS4-HMAC-SHA256"),
, datePair ("X-Amz-Credential", B.concat [accessKey, "/", scope]),
, ("X-Amz-Expires", maybe "" show expiry) datePair,
, ("X-Amz-SignedHeaders", signedHeaderKeys) ("X-Amz-Expires", maybe "" showBS expiry),
] ("X-Amz-SignedHeaders", signedHeaderKeys)
finalQP = parseQuery (NC.queryString req) ++ ]
if isJust expiry finalQP =
then (fmap . fmap) Just authQP parseQuery (NC.queryString req)
else [] ++ if isJust expiry
then (fmap . fmap) Just authQP
-- 1. compute canonical request else []
canonicalRequest = mkCanonicalRequest False sp -- 1. compute canonical request
(NC.setQueryString finalQP req) canonicalRequest =
headersToSign mkCanonicalRequest
False
-- 2. compute string to sign sp
stringToSign = mkStringToSign ts scope canonicalRequest (NC.setQueryString finalQP req)
headersToSign
-- 3.1 compute signing key -- 2. compute string to sign
signingKey = mkSigningKey ts region secretKey stringToSign = mkStringToSign ts scope canonicalRequest
-- 3.1 compute signing key
-- 3.2 compute signature signingKey = mkSigningKey ts region secretKey
signature = computeSignature stringToSign signingKey -- 3.2 compute signature
signature = computeSignature stringToSign signingKey
-- 4. compute auth header -- 4. compute auth header
authHeader = mkAuthHeader (spAccessKey sp) scope signedHeaderKeys signature authHeader = mkAuthHeader (spAccessKey sp) scope signedHeaderKeys signature
-- finally compute output pairs
-- finally compute output pairs output =
output = if isJust expiry if isJust expiry
then ("X-Amz-Signature", signature) : authQP then ("X-Amz-Signature", signature) : authQP
else [(\(x, y) -> (CI.foldedCase x, y)) authHeader, else
datePair, sha256Hdr] [ (\(x, y) -> (CI.foldedCase x, y)) authHeader,
datePair,
in output sha256Hdr
]
in output
mkScope :: UTCTime -> Text -> ByteString mkScope :: UTCTime -> Text -> ByteString
mkScope ts region = B.intercalate "/" mkScope ts region =
[ toS $ Time.formatTime Time.defaultTimeLocale "%Y%m%d" ts B.intercalate
, toS region "/"
, "s3" [ toUtf8 $ Time.formatTime Time.defaultTimeLocale "%Y%m%d" ts,
, "aws4_request" toUtf8 region,
] "s3",
"aws4_request"
]
getHeadersToSign :: [Header] -> [(ByteString, ByteString)] getHeadersToSign :: [Header] -> [(ByteString, ByteString)]
getHeadersToSign !h = getHeadersToSign !h =
filter ((\hdr -> not $ Set.member hdr ignoredHeaders) . fst) $ filter ((\hdr -> not $ Set.member hdr ignoredHeaders) . fst) $
map (\(x, y) -> (CI.foldedCase x, stripBS y)) h map (\(x, y) -> (CI.foldedCase x, stripBS y)) h
mkCanonicalRequest :: Bool -> SignParams -> NC.Request -> [(ByteString, ByteString)] mkCanonicalRequest ::
-> ByteString Bool ->
SignParams ->
NC.Request ->
[(ByteString, ByteString)] ->
ByteString
mkCanonicalRequest !isStreaming !sp !req !headersForSign = mkCanonicalRequest !isStreaming !sp !req !headersForSign =
let let canonicalQueryString =
canonicalQueryString = B.intercalate "&" $ B.intercalate "&"
map (\(x, y) -> B.concat [x, "=", y]) $ $ map (\(x, y) -> B.concat [x, "=", y])
sort $ map (\(x, y) -> $ sort
(uriEncode True x, maybe "" (uriEncode True) y)) $ $ map
(parseQuery $ NC.queryString req) ( \(x, y) ->
(uriEncode True x, maybe "" (uriEncode True) y)
sortedHeaders = sort headersForSign )
$ (parseQuery $ NC.queryString req)
canonicalHeaders = B.concat $ sortedHeaders = sort headersForSign
map (\(x, y) -> B.concat [x, ":", y, "\n"]) sortedHeaders canonicalHeaders =
B.concat $
signedHeaders = B.intercalate ";" $ map fst sortedHeaders map (\(x, y) -> B.concat [x, ":", y, "\n"]) sortedHeaders
signedHeaders = B.intercalate ";" $ map fst sortedHeaders
payloadHashStr = payloadHashStr =
if isStreaming if isStreaming
then "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" then "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
else fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp else fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp
in in B.intercalate
B.intercalate "\n" "\n"
[ NC.method req [ NC.method req,
, uriEncode False $ NC.path req uriEncode False $ NC.path req,
, canonicalQueryString canonicalQueryString,
, canonicalHeaders canonicalHeaders,
, signedHeaders signedHeaders,
, payloadHashStr payloadHashStr
] ]
mkStringToSign :: UTCTime -> ByteString -> ByteString -> ByteString mkStringToSign :: UTCTime -> ByteString -> ByteString -> ByteString
mkStringToSign ts !scope !canonicalRequest = B.intercalate "\n" mkStringToSign ts !scope !canonicalRequest =
[ "AWS4-HMAC-SHA256" B.intercalate
, awsTimeFormatBS ts "\n"
, scope [ "AWS4-HMAC-SHA256",
, hashSHA256 canonicalRequest awsTimeFormatBS ts,
] scope,
hashSHA256 canonicalRequest
]
mkSigningKey :: UTCTime -> Text -> ByteString -> ByteString mkSigningKey :: UTCTime -> Text -> ByteString -> ByteString
mkSigningKey ts region !secretKey = hmacSHA256RawBS "aws4_request" mkSigningKey ts region !secretKey =
. hmacSHA256RawBS "s3" hmacSHA256RawBS "aws4_request"
. hmacSHA256RawBS (toS region) . hmacSHA256RawBS "s3"
. hmacSHA256RawBS (awsDateFormatBS ts) . hmacSHA256RawBS (toUtf8 region)
$ B.concat ["AWS4", secretKey] . hmacSHA256RawBS (awsDateFormatBS ts)
$ B.concat ["AWS4", secretKey]
computeSignature :: ByteString -> ByteString -> ByteString computeSignature :: ByteString -> ByteString -> ByteString
computeSignature !toSign !key = digestToBase16 $ hmacSHA256 toSign key computeSignature !toSign !key = digestToBase16 $ hmacSHA256 toSign key
@ -233,18 +249,19 @@ computeSignature !toSign !key = digestToBase16 $ hmacSHA256 toSign key
-- | Takes a validated Post Policy JSON bytestring, the signing time, -- | Takes a validated Post Policy JSON bytestring, the signing time,
-- and ConnInfo and returns form-data for the POST upload containing -- and ConnInfo and returns form-data for the POST upload containing
-- just the signature and the encoded post-policy. -- just the signature and the encoded post-policy.
signV4PostPolicy :: ByteString -> SignParams signV4PostPolicy ::
-> Map.HashMap Text ByteString ByteString ->
SignParams ->
Map.HashMap Text ByteString
signV4PostPolicy !postPolicyJSON !sp = signV4PostPolicy !postPolicyJSON !sp =
let let stringToSign = Base64.encode postPolicyJSON
stringToSign = Base64.encode postPolicyJSON region = fromMaybe "" $ spRegion sp
region = fromMaybe "" $ spRegion sp signingKey = mkSigningKey (spTimeStamp sp) region $ toUtf8 $ spSecretKey sp
signingKey = mkSigningKey (spTimeStamp sp) region $ toS $ spSecretKey sp signature = computeSignature stringToSign signingKey
signature = computeSignature stringToSign signingKey in Map.fromList
in [ ("x-amz-signature", signature),
Map.fromList [ ("x-amz-signature", signature) ("policy", stringToSign)
, ("policy", stringToSign) ]
]
chunkSizeConstant :: Int chunkSizeConstant :: Int
chunkSizeConstant = 64 * 1024 chunkSizeConstant = 64 * 1024
@ -252,140 +269,141 @@ chunkSizeConstant = 64 * 1024
-- base16Len computes the number of bytes required to represent @n (> 0)@ in -- base16Len computes the number of bytes required to represent @n (> 0)@ in
-- hexadecimal. -- hexadecimal.
base16Len :: Integral a => a -> Int base16Len :: Integral a => a -> Int
base16Len n | n == 0 = 0 base16Len n
| otherwise = 1 + base16Len (n `div` 16) | n == 0 = 0
| otherwise = 1 + base16Len (n `div` 16)
signedStreamLength :: Int64 -> Int64 signedStreamLength :: Int64 -> Int64
signedStreamLength dataLen = signedStreamLength dataLen =
let let chunkSzInt = fromIntegral chunkSizeConstant
chunkSzInt = fromIntegral chunkSizeConstant (numChunks, lastChunkLen) = quotRem dataLen chunkSzInt
(numChunks, lastChunkLen) = quotRem dataLen chunkSzInt -- Structure of a chunk:
-- string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n
encodedChunkLen csz = fromIntegral (base16Len csz) + 17 + 64 + 2 + csz + 2
fullChunkSize = encodedChunkLen chunkSzInt
lastChunkSize = bool 0 (encodedChunkLen lastChunkLen) $ lastChunkLen > 0
finalChunkSize = 1 + 17 + 64 + 2 + 2
in numChunks * fullChunkSize + lastChunkSize + finalChunkSize
signV4Stream ::
-- Structure of a chunk: Int64 ->
-- string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n SignParams ->
encodedChunkLen csz = fromIntegral (base16Len csz) + 17 + 64 + 2 + csz + 2 NC.Request ->
fullChunkSize = encodedChunkLen chunkSzInt (C.ConduitT () ByteString (C.ResourceT IO) () -> NC.Request)
lastChunkSize = bool 0 (encodedChunkLen lastChunkLen) $ lastChunkLen > 0 -- -> ([Header], C.ConduitT () ByteString (C.ResourceT IO) () -> NC.RequestBody)
finalChunkSize = 1 + 17 + 64 + 2 + 2
in
numChunks * fullChunkSize + lastChunkSize + finalChunkSize
signV4Stream :: Int64 -> SignParams -> NC.Request
-> (C.ConduitT () ByteString (C.ResourceT IO) () -> NC.Request)
-- -> ([Header], C.ConduitT () ByteString (C.ResourceT IO) () -> NC.RequestBody)
signV4Stream !payloadLength !sp !req = signV4Stream !payloadLength !sp !req =
let let ts = spTimeStamp sp
ts = spTimeStamp sp addContentEncoding hs =
addContentEncoding hs =
let ceMay = headMay $ filter (\(x, _) -> x == "content-encoding") hs let ceMay = headMay $ filter (\(x, _) -> x == "content-encoding") hs
in case ceMay of in case ceMay of
Nothing -> ("content-encoding", "aws-chunked") : hs Nothing -> ("content-encoding", "aws-chunked") : hs
Just (_, ce) -> ("content-encoding", ce <> ",aws-chunked") : Just (_, ce) ->
filter (\(x, _) -> x /= "content-encoding") hs ("content-encoding", ce <> ",aws-chunked")
: filter (\(x, _) -> x /= "content-encoding") hs
-- headers to be added to the request -- headers to be added to the request
datePair = ("X-Amz-Date", awsTimeFormatBS ts) datePair = ("X-Amz-Date", awsTimeFormatBS ts)
computedHeaders = addContentEncoding $ computedHeaders =
datePair : NC.requestHeaders req addContentEncoding $
datePair : NC.requestHeaders req
-- headers specific to streaming signature -- headers specific to streaming signature
signedContentLength = signedStreamLength payloadLength signedContentLength = signedStreamLength payloadLength
streamingHeaders :: [Header] streamingHeaders :: [Header]
streamingHeaders = streamingHeaders =
[ ("x-amz-decoded-content-length", show payloadLength) [ ("x-amz-decoded-content-length", showBS payloadLength),
, ("content-length", show signedContentLength ) ("content-length", showBS signedContentLength),
, ("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD") ("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD")
] ]
headersToSign = getHeadersToSign $ computedHeaders ++ streamingHeaders headersToSign = getHeadersToSign $ computedHeaders ++ streamingHeaders
signedHeaderKeys = B.intercalate ";" $ sort $ map fst headersToSign signedHeaderKeys = B.intercalate ";" $ sort $ map fst headersToSign
finalQP = parseQuery (NC.queryString req) finalQP = parseQuery (NC.queryString req)
-- 1. Compute Seed Signature
-- 1.1 Canonical Request
canonicalReq =
mkCanonicalRequest
True
sp
(NC.setQueryString finalQP req)
headersToSign
region = fromMaybe "" $ spRegion sp
scope = mkScope ts region
accessKey = spAccessKey sp
secretKey = spSecretKey sp
-- 1.2 String toSign
stringToSign = mkStringToSign ts scope canonicalReq
-- 1.3 Compute signature
-- 1.3.1 compute signing key
signingKey = mkSigningKey ts region $ toUtf8 secretKey
-- 1.3.2 Compute signature
seedSignature = computeSignature stringToSign signingKey
-- 1.3.3 Compute Auth Header
authHeader = mkAuthHeader accessKey scope signedHeaderKeys seedSignature
-- 1.4 Updated headers for the request
finalReqHeaders = authHeader : (computedHeaders ++ streamingHeaders)
-- headersToAdd = authHeader : datePair : streamingHeaders
-- 1. Compute Seed Signature toHexStr n = B8.pack $ printf "%x" n
-- 1.1 Canonical Request (numParts, lastPSize) = payloadLength `quotRem` fromIntegral chunkSizeConstant
canonicalReq = mkCanonicalRequest True sp -- Function to compute string to sign for each chunk.
(NC.setQueryString finalQP req) chunkStrToSign prevSign currChunkHash =
headersToSign B.intercalate
"\n"
region = fromMaybe "" $ spRegion sp [ "AWS4-HMAC-SHA256-PAYLOAD",
scope = mkScope ts region awsTimeFormatBS ts,
accessKey = spAccessKey sp scope,
secretKey = spSecretKey sp prevSign,
hashSHA256 "",
-- 1.2 String toSign currChunkHash
stringToSign = mkStringToSign ts scope canonicalReq ]
-- Read n byte from upstream and return a strict bytestring.
-- 1.3 Compute signature mustTakeN n = do
-- 1.3.1 compute signing key bs <- LB.toStrict <$> (C.takeCE n C..| C.sinkLazy)
signingKey = mkSigningKey ts region $ toS secretKey
-- 1.3.2 Compute signature
seedSignature = computeSignature stringToSign signingKey
-- 1.3.3 Compute Auth Header
authHeader = mkAuthHeader accessKey scope signedHeaderKeys seedSignature
-- 1.4 Updated headers for the request
finalReqHeaders = authHeader : (computedHeaders ++ streamingHeaders)
-- headersToAdd = authHeader : datePair : streamingHeaders
toHexStr n = B8.pack $ printf "%x" n
(numParts, lastPSize) = payloadLength `quotRem` fromIntegral chunkSizeConstant
-- Function to compute string to sign for each chunk.
chunkStrToSign prevSign currChunkHash =
B.intercalate "\n"
[ "AWS4-HMAC-SHA256-PAYLOAD"
, awsTimeFormatBS ts
, scope
, prevSign
, hashSHA256 ""
, currChunkHash
]
-- Read n byte from upstream and return a strict bytestring.
mustTakeN n = do
bs <- toS <$> (C.takeCE n C..| C.sinkLazy)
when (B.length bs /= n) $ when (B.length bs /= n) $
throwIO MErrVStreamingBodyUnexpectedEOF throwIO MErrVStreamingBodyUnexpectedEOF
return bs return bs
signerConduit n lps prevSign =
-- First case encodes a full chunk of length
-- 'chunkSizeConstant'.
if
| n > 0 -> do
bs <- mustTakeN chunkSizeConstant
let strToSign = chunkStrToSign prevSign (hashSHA256 bs)
nextSign = computeSignature strToSign signingKey
chunkBS =
toHexStr chunkSizeConstant
<> ";chunk-signature="
<> nextSign
<> "\r\n"
<> bs
<> "\r\n"
C.yield chunkBS
signerConduit (n -1) lps nextSign
signerConduit n lps prevSign = -- Second case encodes the last chunk which is smaller than
-- First case encodes a full chunk of length -- 'chunkSizeConstant'
-- 'chunkSizeConstant'. | lps > 0 -> do
if | n > 0 -> do bs <- mustTakeN $ fromIntegral lps
bs <- mustTakeN chunkSizeConstant let strToSign = chunkStrToSign prevSign (hashSHA256 bs)
let strToSign = chunkStrToSign prevSign (hashSHA256 bs) nextSign = computeSignature strToSign signingKey
nextSign = computeSignature strToSign signingKey chunkBS =
chunkBS = toHexStr chunkSizeConstant toHexStr lps <> ";chunk-signature="
<> ";chunk-signature=" <> nextSign
<> nextSign <> "\r\n" <> bs <> "\r\n" <> "\r\n"
C.yield chunkBS <> bs
signerConduit (n-1) lps nextSign <> "\r\n"
C.yield chunkBS
signerConduit 0 0 nextSign
-- Second case encodes the last chunk which is smaller than -- Last case encodes the final signature chunk that has no
-- 'chunkSizeConstant' -- data.
| lps > 0 -> do | otherwise -> do
bs <- mustTakeN $ fromIntegral lps let strToSign = chunkStrToSign prevSign (hashSHA256 "")
let strToSign = chunkStrToSign prevSign (hashSHA256 bs) nextSign = computeSignature strToSign signingKey
nextSign = computeSignature strToSign signingKey lastChunkBS = "0;chunk-signature=" <> nextSign <> "\r\n\r\n"
chunkBS = toHexStr lps <> ";chunk-signature=" C.yield lastChunkBS
<> nextSign <> "\r\n" <> bs <> "\r\n" in \src ->
C.yield chunkBS req
signerConduit 0 0 nextSign { NC.requestHeaders = finalReqHeaders,
NC.requestBody =
-- Last case encodes the final signature chunk that has no NC.requestBodySource signedContentLength $
-- data. src C..| signerConduit numParts lastPSize seedSignature
| otherwise -> do }
let strToSign = chunkStrToSign prevSign (hashSHA256 "")
nextSign = computeSignature strToSign signingKey
lastChunkBS = "0;chunk-signature=" <> nextSign <> "\r\n\r\n"
C.yield lastChunkBS
in
\src -> req { NC.requestHeaders = finalReqHeaders
, NC.requestBody =
NC.requestBodySource signedContentLength $
src C..| signerConduit numParts lastPSize seedSignature
}

View File

@ -16,37 +16,40 @@
module Network.Minio.Utils where module Network.Minio.Utils where
import qualified Conduit as C import qualified Conduit as C
import Control.Monad.IO.Unlift (MonadUnliftIO) import Control.Monad.IO.Unlift (MonadUnliftIO)
import qualified Control.Monad.Trans.Resource as R import qualified Control.Monad.Trans.Resource as R
import qualified Data.ByteString as B import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as LB import qualified Data.ByteString.Lazy as LB
import Data.CaseInsensitive (mk, original) import Data.CaseInsensitive (mk, original)
import qualified Data.Conduit.Binary as CB import qualified Data.Conduit.Binary as CB
import qualified Data.HashMap.Strict as H import qualified Data.HashMap.Strict as H
import qualified Data.List as List import qualified Data.List as List
import qualified Data.Text as T import qualified Data.Text as T
import Data.Text.Read (decimal) import Data.Text.Read (decimal)
import Data.Time (defaultTimeLocale, parseTimeM, import Data.Time
rfc822DateFormat) ( defaultTimeLocale,
import Network.HTTP.Conduit (Response) parseTimeM,
import qualified Network.HTTP.Conduit as NC rfc822DateFormat,
import qualified Network.HTTP.Types as HT )
import qualified Network.HTTP.Types.Header as Hdr import Lib.Prelude
import qualified System.IO as IO import Network.HTTP.Conduit (Response)
import qualified UnliftIO as U import qualified Network.HTTP.Conduit as NC
import qualified UnliftIO.Async as A import qualified Network.HTTP.Types as HT
import qualified UnliftIO.MVar as UM import qualified Network.HTTP.Types.Header as Hdr
import Network.Minio.Data
import Network.Minio.Data.ByteString
import Network.Minio.JsonParser (parseErrResponseJSON)
import Network.Minio.XmlParser (parseErrResponse)
import qualified System.IO as IO
import qualified UnliftIO as U
import qualified UnliftIO.Async as A
import qualified UnliftIO.MVar as UM
import Lib.Prelude allocateReadFile ::
(MonadUnliftIO m, R.MonadResource m) =>
import Network.Minio.Data FilePath ->
import Network.Minio.Data.ByteString m (R.ReleaseKey, Handle)
import Network.Minio.JsonParser (parseErrResponseJSON)
import Network.Minio.XmlParser (parseErrResponse)
allocateReadFile :: (MonadUnliftIO m, R.MonadResource m)
=> FilePath -> m (R.ReleaseKey, Handle)
allocateReadFile fp = do allocateReadFile fp = do
(rk, hdlE) <- R.allocate (openReadFile fp) cleanup (rk, hdlE) <- R.allocate (openReadFile fp) cleanup
either (\(e :: IOException) -> throwIO e) (return . (rk,)) hdlE either (\(e :: IOException) -> throwIO e) (return . (rk,)) hdlE
@ -56,30 +59,37 @@ allocateReadFile fp = do
-- | Queries the file size from the handle. Catches any file operation -- | Queries the file size from the handle. Catches any file operation
-- exceptions and returns Nothing instead. -- exceptions and returns Nothing instead.
getFileSize :: (MonadUnliftIO m, R.MonadResource m) getFileSize ::
=> Handle -> m (Maybe Int64) (MonadUnliftIO m, R.MonadResource m) =>
Handle ->
m (Maybe Int64)
getFileSize h = do getFileSize h = do
resE <- liftIO $ try $ fromIntegral <$> IO.hFileSize h resE <- liftIO $ try $ fromIntegral <$> IO.hFileSize h
case resE of case resE of
Left (_ :: IOException) -> return Nothing Left (_ :: IOException) -> return Nothing
Right s -> return $ Just s Right s -> return $ Just s
-- | Queries if handle is seekable. Catches any file operation -- | Queries if handle is seekable. Catches any file operation
-- exceptions and return False instead. -- exceptions and return False instead.
isHandleSeekable :: (R.MonadResource m, MonadUnliftIO m) isHandleSeekable ::
=> Handle -> m Bool (R.MonadResource m, MonadUnliftIO m) =>
Handle ->
m Bool
isHandleSeekable h = do isHandleSeekable h = do
resE <- liftIO $ try $ IO.hIsSeekable h resE <- liftIO $ try $ IO.hIsSeekable h
case resE of case resE of
Left (_ :: IOException) -> return False Left (_ :: IOException) -> return False
Right v -> return v Right v -> return v
-- | Helper function that opens a handle to the filepath and performs -- | Helper function that opens a handle to the filepath and performs
-- the given action on it. Exceptions of type MError are caught and -- the given action on it. Exceptions of type MError are caught and
-- returned - both during file handle allocation and when the action -- returned - both during file handle allocation and when the action
-- is run. -- is run.
withNewHandle :: (MonadUnliftIO m, R.MonadResource m) withNewHandle ::
=> FilePath -> (Handle -> m a) -> m (Either IOException a) (MonadUnliftIO m, R.MonadResource m) =>
FilePath ->
(Handle -> m a) ->
m (Either IOException a)
withNewHandle fp fileAction = do withNewHandle fp fileAction = do
-- opening a handle can throw MError exception. -- opening a handle can throw MError exception.
handleE <- try $ allocateReadFile fp handleE <- try $ allocateReadFile fp
@ -103,24 +113,27 @@ getETagHeader hs = decodeUtf8Lenient <$> lookupHeader Hdr.hETag hs
getMetadata :: [HT.Header] -> [(Text, Text)] getMetadata :: [HT.Header] -> [(Text, Text)]
getMetadata = getMetadata =
map ((\(x, y) -> (decodeUtf8Lenient $ original x, decodeUtf8Lenient $ stripBS y))) map ((\(x, y) -> (decodeUtf8Lenient $ original x, decodeUtf8Lenient $ stripBS y)))
toMaybeMetadataHeader :: (Text, Text) -> Maybe (Text, Text) toMaybeMetadataHeader :: (Text, Text) -> Maybe (Text, Text)
toMaybeMetadataHeader (k, v) = toMaybeMetadataHeader (k, v) =
(, v) <$> userMetadataHeaderNameMaybe k (,v) <$> userMetadataHeaderNameMaybe k
getNonUserMetadataMap :: [(Text, Text)] -> H.HashMap Text Text getNonUserMetadataMap :: [(Text, Text)] -> H.HashMap Text Text
getNonUserMetadataMap = H.fromList getNonUserMetadataMap =
. filter ( isNothing H.fromList
. userMetadataHeaderNameMaybe . filter
. fst ( isNothing
) . userMetadataHeaderNameMaybe
. fst
)
-- | This function collects all headers starting with `x-amz-meta-` -- | This function collects all headers starting with `x-amz-meta-`
-- and strips off this prefix, and returns a map. -- and strips off this prefix, and returns a map.
getUserMetadataMap :: [(Text, Text)] -> H.HashMap Text Text getUserMetadataMap :: [(Text, Text)] -> H.HashMap Text Text
getUserMetadataMap = H.fromList getUserMetadataMap =
. mapMaybe toMaybeMetadataHeader H.fromList
. mapMaybe toMaybeMetadataHeader
getLastModifiedHeader :: [HT.Header] -> Maybe UTCTime getLastModifiedHeader :: [HT.Header] -> Maybe UTCTime
getLastModifiedHeader hs = do getLastModifiedHeader hs = do
@ -132,17 +145,19 @@ getContentLength hs = do
nbs <- decodeUtf8Lenient <$> lookupHeader Hdr.hContentLength hs nbs <- decodeUtf8Lenient <$> lookupHeader Hdr.hContentLength hs
fst <$> hush (decimal nbs) fst <$> hush (decimal nbs)
decodeUtf8Lenient :: ByteString -> Text decodeUtf8Lenient :: ByteString -> Text
decodeUtf8Lenient = decodeUtf8With lenientDecode decodeUtf8Lenient = decodeUtf8With lenientDecode
isSuccessStatus :: HT.Status -> Bool isSuccessStatus :: HT.Status -> Bool
isSuccessStatus sts = let s = HT.statusCode sts isSuccessStatus sts =
in (s >= 200 && s < 300) let s = HT.statusCode sts
in (s >= 200 && s < 300)
httpLbs :: MonadIO m httpLbs ::
=> NC.Request -> NC.Manager MonadIO m =>
-> m (NC.Response LByteString) NC.Request ->
NC.Manager ->
m (NC.Response LByteString)
httpLbs req mgr = do httpLbs req mgr = do
respE <- liftIO $ tryHttpEx $ NC.httpLbs req mgr respE <- liftIO $ tryHttpEx $ NC.httpLbs req mgr
resp <- either throwIO return respE resp <- either throwIO return respE
@ -154,21 +169,25 @@ httpLbs req mgr = do
Just "application/json" -> do Just "application/json" -> do
sErr <- parseErrResponseJSON $ NC.responseBody resp sErr <- parseErrResponseJSON $ NC.responseBody resp
throwIO sErr throwIO sErr
_ ->
_ -> throwIO $ NC.HttpExceptionRequest req $ throwIO $ NC.HttpExceptionRequest req $
NC.StatusCodeException (void resp) (show resp) NC.StatusCodeException (void resp) (showBS resp)
return resp return resp
where where
tryHttpEx :: IO (NC.Response LByteString) tryHttpEx ::
-> IO (Either NC.HttpException (NC.Response LByteString)) IO (NC.Response LByteString) ->
IO (Either NC.HttpException (NC.Response LByteString))
tryHttpEx = try tryHttpEx = try
contentTypeMay resp = lookupHeader Hdr.hContentType $ contentTypeMay resp =
NC.responseHeaders resp lookupHeader Hdr.hContentType $
NC.responseHeaders resp
http :: (MonadUnliftIO m, R.MonadResource m) http ::
=> NC.Request -> NC.Manager (MonadUnliftIO m, R.MonadResource m) =>
-> m (Response (C.ConduitT () ByteString m ())) NC.Request ->
NC.Manager ->
m (Response (C.ConduitT () ByteString m ()))
http req mgr = do http req mgr = do
respE <- tryHttpEx $ NC.http req mgr respE <- tryHttpEx $ NC.http req mgr
resp <- either throwIO return respE resp <- either throwIO return respE
@ -178,25 +197,30 @@ http req mgr = do
respBody <- C.connect (NC.responseBody resp) CB.sinkLbs respBody <- C.connect (NC.responseBody resp) CB.sinkLbs
sErr <- parseErrResponse respBody sErr <- parseErrResponse respBody
throwIO sErr throwIO sErr
_ -> do _ -> do
content <- LB.toStrict . NC.responseBody <$> NC.lbsResponse resp content <- LB.toStrict . NC.responseBody <$> NC.lbsResponse resp
throwIO $ NC.HttpExceptionRequest req $ throwIO $ NC.HttpExceptionRequest req $
NC.StatusCodeException (void resp) content NC.StatusCodeException (void resp) content
return resp return resp
where where
tryHttpEx :: (MonadUnliftIO m) => m a tryHttpEx ::
-> m (Either NC.HttpException a) (MonadUnliftIO m) =>
m a ->
m (Either NC.HttpException a)
tryHttpEx = try tryHttpEx = try
contentTypeMay resp = lookupHeader Hdr.hContentType $ contentTypeMay resp =
NC.responseHeaders resp lookupHeader Hdr.hContentType $
NC.responseHeaders resp
-- Similar to mapConcurrently but limits the number of threads that -- Similar to mapConcurrently but limits the number of threads that
-- can run using a quantity semaphore. -- can run using a quantity semaphore.
limitedMapConcurrently :: MonadUnliftIO m limitedMapConcurrently ::
=> Int -> (t -> m a) -> [t] -> m [a] MonadUnliftIO m =>
Int ->
(t -> m a) ->
[t] ->
m [a]
limitedMapConcurrently 0 _ _ = return [] limitedMapConcurrently 0 _ _ = return []
limitedMapConcurrently count act args = do limitedMapConcurrently count act args = do
t' <- U.newTVarIO count t' <- U.newTVarIO count
@ -205,17 +229,15 @@ limitedMapConcurrently count act args = do
where where
wThread t arg = wThread t arg =
U.bracket_ (waitSem t) (signalSem t) $ act arg U.bracket_ (waitSem t) (signalSem t) $ act arg
-- quantity semaphore implementation using TVar -- quantity semaphore implementation using TVar
waitSem t = U.atomically $ do waitSem t = U.atomically $ do
v <- U.readTVar t v <- U.readTVar t
if v > 0 if v > 0
then U.writeTVar t (v-1) then U.writeTVar t (v -1)
else U.retrySTM else U.retrySTM
signalSem t = U.atomically $ do signalSem t = U.atomically $ do
v <- U.readTVar t v <- U.readTVar t
U.writeTVar t (v+1) U.writeTVar t (v + 1)
-- helper function to 'drop' empty optional parameter. -- helper function to 'drop' empty optional parameter.
mkQuery :: Text -> Maybe Text -> Maybe (Text, Text) mkQuery :: Text -> Maybe Text -> Maybe (Text, Text)
@ -224,7 +246,7 @@ mkQuery k mv = (k,) <$> mv
-- helper function to build query parameters that are optional. -- helper function to build query parameters that are optional.
-- don't use it with mandatory query params with empty value. -- don't use it with mandatory query params with empty value.
mkOptionalParams :: [(Text, Maybe Text)] -> HT.Query mkOptionalParams :: [(Text, Maybe Text)] -> HT.Query
mkOptionalParams params = HT.toQuery $ uncurry mkQuery <$> params mkOptionalParams params = HT.toQuery $ uncurry mkQuery <$> params
-- | Conduit that rechunks bytestrings into the given chunk -- | Conduit that rechunks bytestrings into the given chunk
-- lengths. Stops after given chunk lengths are yielded. Stops if -- lengths. Stops after given chunk lengths are yielded. Stops if
@ -232,23 +254,30 @@ mkOptionalParams params = HT.toQuery $ uncurry mkQuery <$> params
-- received. Does not throw any errors. -- received. Does not throw any errors.
chunkBSConduit :: (Monad m) => [Int] -> C.ConduitM ByteString ByteString m () chunkBSConduit :: (Monad m) => [Int] -> C.ConduitM ByteString ByteString m ()
chunkBSConduit [] = return () chunkBSConduit [] = return ()
chunkBSConduit (s:ss) = do chunkBSConduit (s : ss) = do
bs <- fmap LB.toStrict $ C.takeCE s C..| C.sinkLazy bs <- fmap LB.toStrict $ C.takeCE s C..| C.sinkLazy
if | B.length bs == s -> C.yield bs >> chunkBSConduit ss if
| B.length bs > 0 -> C.yield bs | B.length bs == s -> C.yield bs >> chunkBSConduit ss
| otherwise -> return () | B.length bs > 0 -> C.yield bs
| otherwise -> return ()
-- | Select part sizes - the logic is that the minimum part-size will -- | Select part sizes - the logic is that the minimum part-size will
-- be 64MiB. -- be 64MiB.
selectPartSizes :: Int64 -> [(PartNumber, Int64, Int64)] selectPartSizes :: Int64 -> [(PartNumber, Int64, Int64)]
selectPartSizes size = uncurry (List.zip3 [1..]) $ selectPartSizes size =
List.unzip $ loop 0 size uncurry (List.zip3 [1 ..])
$ List.unzip
$ loop 0 size
where where
ceil :: Double -> Int64 ceil :: Double -> Int64
ceil = ceiling ceil = ceiling
partSize = max minPartSize (ceil $ fromIntegral size / partSize =
fromIntegral maxMultipartParts) max
minPartSize
( ceil $
fromIntegral size
/ fromIntegral maxMultipartParts
)
m = fromIntegral partSize m = fromIntegral partSize
loop st sz loop st sz
| st > sz = [] | st > sz = []
@ -257,16 +286,16 @@ selectPartSizes size = uncurry (List.zip3 [1..]) $
lookupRegionCache :: Bucket -> Minio (Maybe Region) lookupRegionCache :: Bucket -> Minio (Maybe Region)
lookupRegionCache b = do lookupRegionCache b = do
rMVar <- asks mcRegionMap rMVar <- asks mcRegionMap
rMap <- UM.readMVar rMVar rMap <- UM.readMVar rMVar
return $ H.lookup b rMap return $ H.lookup b rMap
addToRegionCache :: Bucket -> Region -> Minio () addToRegionCache :: Bucket -> Region -> Minio ()
addToRegionCache b region = do addToRegionCache b region = do
rMVar <- asks mcRegionMap rMVar <- asks mcRegionMap
UM.modifyMVar_ rMVar $ return . H.insert b region UM.modifyMVar_ rMVar $ return . H.insert b region
deleteFromRegionCache :: Bucket -> Minio () deleteFromRegionCache :: Bucket -> Minio ()
deleteFromRegionCache b = do deleteFromRegionCache b = do
rMVar <- asks mcRegionMap rMVar <- asks mcRegionMap
UM.modifyMVar_ rMVar $ return . H.delete b UM.modifyMVar_ rMVar $ return . H.delete b

View File

@ -15,63 +15,80 @@
-- --
module Network.Minio.XmlGenerator module Network.Minio.XmlGenerator
( mkCreateBucketConfig ( mkCreateBucketConfig,
, mkCompleteMultipartUploadRequest mkCompleteMultipartUploadRequest,
, mkPutNotificationRequest mkPutNotificationRequest,
, mkSelectRequest mkSelectRequest,
) where )
where
import qualified Data.ByteString.Lazy as LBS import qualified Data.ByteString.Lazy as LBS
import qualified Data.HashMap.Strict as H import qualified Data.HashMap.Strict as H
import qualified Data.Text as T import qualified Data.Text as T
import Text.XML import Lib.Prelude
import Network.Minio.Data
import Lib.Prelude import Text.XML
import Network.Minio.Data
-- | Create a bucketConfig request body XML -- | Create a bucketConfig request body XML
mkCreateBucketConfig :: Text -> Region -> ByteString mkCreateBucketConfig :: Text -> Region -> ByteString
mkCreateBucketConfig ns location = LBS.toStrict $ renderLBS def bucketConfig mkCreateBucketConfig ns location = LBS.toStrict $ renderLBS def bucketConfig
where where
s3Element n = Element (s3Name ns n) mempty s3Element n = Element (s3Name ns n) mempty
root = s3Element "CreateBucketConfiguration" root =
[ NodeElement $ s3Element "LocationConstraint" s3Element
[ NodeContent location] "CreateBucketConfiguration"
[ NodeElement $
s3Element
"LocationConstraint"
[NodeContent location]
] ]
bucketConfig = Document (Prologue [] Nothing []) root [] bucketConfig = Document (Prologue [] Nothing []) root []
-- | Create a completeMultipartUpload request body XML -- | Create a completeMultipartUpload request body XML
mkCompleteMultipartUploadRequest :: [PartTuple] -> ByteString mkCompleteMultipartUploadRequest :: [PartTuple] -> ByteString
mkCompleteMultipartUploadRequest partInfo = mkCompleteMultipartUploadRequest partInfo =
LBS.toStrict $ renderLBS def cmur LBS.toStrict $ renderLBS def cmur
where where
root = Element "CompleteMultipartUpload" mempty $ root =
map (NodeElement . mkPart) partInfo Element "CompleteMultipartUpload" mempty $
mkPart (n, etag) = Element "Part" mempty map (NodeElement . mkPart) partInfo
[ NodeElement $ Element "PartNumber" mempty mkPart (n, etag) =
[NodeContent $ T.pack $ show n] Element
, NodeElement $ Element "ETag" mempty "Part"
[NodeContent etag] mempty
] [ NodeElement $
Element
"PartNumber"
mempty
[NodeContent $ T.pack $ show n],
NodeElement $
Element
"ETag"
mempty
[NodeContent etag]
]
cmur = Document (Prologue [] Nothing []) root [] cmur = Document (Prologue [] Nothing []) root []
-- Simplified XML representation without element attributes. -- Simplified XML representation without element attributes.
data XNode = XNode Text [XNode] data XNode
| XLeaf Text Text = XNode Text [XNode]
| XLeaf Text Text
deriving (Eq, Show) deriving (Eq, Show)
toXML :: Text -> XNode -> ByteString toXML :: Text -> XNode -> ByteString
toXML ns node = LBS.toStrict $ renderLBS def $ toXML ns node =
Document (Prologue [] Nothing []) (xmlNode node) [] LBS.toStrict $ renderLBS def $
Document (Prologue [] Nothing []) (xmlNode node) []
where where
xmlNode :: XNode -> Element xmlNode :: XNode -> Element
xmlNode (XNode name nodes) = Element (s3Name ns name) mempty $ xmlNode (XNode name nodes) =
map (NodeElement . xmlNode) nodes Element (s3Name ns name) mempty $
xmlNode (XLeaf name content) = Element (s3Name ns name) mempty map (NodeElement . xmlNode) nodes
[NodeContent content] xmlNode (XLeaf name content) =
Element
(s3Name ns name)
mempty
[NodeContent content]
class ToXNode a where class ToXNode a where
toXNode :: a -> XNode toXNode :: a -> XNode
@ -80,24 +97,29 @@ instance ToXNode Event where
toXNode = XLeaf "Event" . show toXNode = XLeaf "Event" . show
instance ToXNode Notification where instance ToXNode Notification where
toXNode (Notification qc tc lc) = XNode "NotificationConfiguration" $ toXNode (Notification qc tc lc) =
map (toXNodesWithArnName "QueueConfiguration" "Queue") qc ++ XNode "NotificationConfiguration" $
map (toXNodesWithArnName "TopicConfiguration" "Topic") tc ++ map (toXNodesWithArnName "QueueConfiguration" "Queue") qc
map (toXNodesWithArnName "CloudFunctionConfiguration" "CloudFunction") lc ++ map (toXNodesWithArnName "TopicConfiguration" "Topic") tc
++ map (toXNodesWithArnName "CloudFunctionConfiguration" "CloudFunction") lc
toXNodesWithArnName :: Text -> Text -> NotificationConfig -> XNode toXNodesWithArnName :: Text -> Text -> NotificationConfig -> XNode
toXNodesWithArnName eltName arnName (NotificationConfig id arn events fRule) = toXNodesWithArnName eltName arnName (NotificationConfig id arn events fRule) =
XNode eltName $ [XLeaf "Id" id, XLeaf arnName arn] ++ map toXNode events ++ XNode eltName $
[toXNode fRule] [XLeaf "Id" id, XLeaf arnName arn] ++ map toXNode events
++ [toXNode fRule]
instance ToXNode Filter where instance ToXNode Filter where
toXNode (Filter (FilterKey (FilterRules rules))) = toXNode (Filter (FilterKey (FilterRules rules))) =
XNode "Filter" [XNode "S3Key" (map getFRXNode rules)] XNode "Filter" [XNode "S3Key" (map getFRXNode rules)]
getFRXNode :: FilterRule -> XNode getFRXNode :: FilterRule -> XNode
getFRXNode (FilterRule n v) = XNode "FilterRule" [ XLeaf "Name" n getFRXNode (FilterRule n v) =
, XLeaf "Value" v XNode
] "FilterRule"
[ XLeaf "Name" n,
XLeaf "Value" v
]
mkPutNotificationRequest :: Text -> Notification -> ByteString mkPutNotificationRequest :: Text -> Notification -> ByteString
mkPutNotificationRequest ns = toXML ns . toXNode mkPutNotificationRequest ns = toXML ns . toXNode
@ -106,60 +128,103 @@ mkSelectRequest :: SelectRequest -> ByteString
mkSelectRequest r = LBS.toStrict $ renderLBS def sr mkSelectRequest r = LBS.toStrict $ renderLBS def sr
where where
sr = Document (Prologue [] Nothing []) root [] sr = Document (Prologue [] Nothing []) root []
root = Element "SelectRequest" mempty $ root =
[ NodeElement (Element "Expression" mempty Element "SelectRequest" mempty $
[NodeContent $ srExpression r]) [ NodeElement
, NodeElement (Element "ExpressionType" mempty ( Element
[NodeContent $ show $ srExpressionType r]) "Expression"
, NodeElement (Element "InputSerialization" mempty $ mempty
inputSerializationNodes $ srInputSerialization r) [NodeContent $ srExpression r]
, NodeElement (Element "OutputSerialization" mempty $ ),
outputSerializationNodes $ srOutputSerialization r) NodeElement
] ++ maybe [] reqProgElem (srRequestProgressEnabled r) ( Element
reqProgElem enabled = [NodeElement "ExpressionType"
(Element "RequestProgress" mempty mempty
[NodeElement [NodeContent $ show $ srExpressionType r]
(Element "Enabled" mempty ),
[NodeContent NodeElement
(if enabled then "TRUE" else "FALSE")] ( Element "InputSerialization" mempty
) $ inputSerializationNodes
] $ srInputSerialization r
) ),
] NodeElement
inputSerializationNodes is = comprTypeNode (isCompressionType is) ++ ( Element "OutputSerialization" mempty
[NodeElement $ formatNode (isFormatInfo is)] $ outputSerializationNodes
comprTypeNode (Just c) = [NodeElement $ Element "CompressionType" mempty $ srOutputSerialization r
[NodeContent $ case c of )
CompressionTypeNone -> "NONE" ]
CompressionTypeGzip -> "GZIP" ++ maybe [] reqProgElem (srRequestProgressEnabled r)
CompressionTypeBzip2 -> "BZIP2" reqProgElem enabled =
] [ NodeElement
] ( Element
"RequestProgress"
mempty
[ NodeElement
( Element
"Enabled"
mempty
[ NodeContent
(if enabled then "TRUE" else "FALSE")
]
)
]
)
]
inputSerializationNodes is =
comprTypeNode (isCompressionType is)
++ [NodeElement $ formatNode (isFormatInfo is)]
comprTypeNode (Just c) =
[ NodeElement $
Element
"CompressionType"
mempty
[ NodeContent $ case c of
CompressionTypeNone -> "NONE"
CompressionTypeGzip -> "GZIP"
CompressionTypeBzip2 -> "BZIP2"
]
]
comprTypeNode Nothing = [] comprTypeNode Nothing = []
kvElement (k, v) = Element (Name k Nothing Nothing) mempty [NodeContent v] kvElement (k, v) = Element (Name k Nothing Nothing) mempty [NodeContent v]
formatNode (InputFormatCSV (CSVProp h)) = formatNode (InputFormatCSV (CSVProp h)) =
Element "CSV" mempty Element
(map NodeElement $ map kvElement $ H.toList h) "CSV"
mempty
(map NodeElement $ map kvElement $ H.toList h)
formatNode (InputFormatJSON p) = formatNode (InputFormatJSON p) =
Element "JSON" mempty Element
[NodeElement "JSON"
(Element "Type" mempty mempty
[NodeContent $ case jsonipType p of [ NodeElement
JSONTypeDocument -> "DOCUMENT" ( Element
JSONTypeLines -> "LINES" "Type"
] mempty
) [ NodeContent $ case jsonipType p of
] JSONTypeDocument -> "DOCUMENT"
JSONTypeLines -> "LINES"
]
)
]
formatNode InputFormatParquet = Element "Parquet" mempty [] formatNode InputFormatParquet = Element "Parquet" mempty []
outputSerializationNodes (OutputSerializationJSON j) = outputSerializationNodes (OutputSerializationJSON j) =
[NodeElement (Element "JSON" mempty $ [ NodeElement
rdElem $ jsonopRecordDelimiter j)] ( Element "JSON" mempty
$ rdElem
$ jsonopRecordDelimiter j
)
]
outputSerializationNodes (OutputSerializationCSV (CSVProp h)) = outputSerializationNodes (OutputSerializationCSV (CSVProp h)) =
[NodeElement $ Element "CSV" mempty [ NodeElement $
(map NodeElement $ map kvElement $ H.toList h)] Element
"CSV"
mempty
(map NodeElement $ map kvElement $ H.toList h)
]
rdElem Nothing = [] rdElem Nothing = []
rdElem (Just t) = [NodeElement $ Element "RecordDelimiter" mempty rdElem (Just t) =
[NodeContent t]] [ NodeElement $
Element
"RecordDelimiter"
mempty
[NodeContent t]
]

View File

@ -15,34 +15,32 @@
-- --
module Network.Minio.XmlParser module Network.Minio.XmlParser
( parseListBuckets ( parseListBuckets,
, parseLocation parseLocation,
, parseNewMultipartUpload parseNewMultipartUpload,
, parseCompleteMultipartUploadResponse parseCompleteMultipartUploadResponse,
, parseCopyObjectResponse parseCopyObjectResponse,
, parseListObjectsResponse parseListObjectsResponse,
, parseListObjectsV1Response parseListObjectsV1Response,
, parseListUploadsResponse parseListUploadsResponse,
, parseListPartsResponse parseListPartsResponse,
, parseErrResponse parseErrResponse,
, parseNotification parseNotification,
, parseSelectProgress parseSelectProgress,
) where )
where
import qualified Data.ByteString.Lazy as LB import qualified Data.ByteString.Lazy as LB
import qualified Data.HashMap.Strict as H import qualified Data.HashMap.Strict as H
import Data.List (zip3, zip4, zip6) import Data.List (zip3, zip4, zip6)
import qualified Data.Text as T import qualified Data.Text as T
import Data.Text.Read (decimal) import Data.Text.Read (decimal)
import Data.Time import Data.Time
import Text.XML import Lib.Prelude
import Text.XML.Cursor hiding (bool) import Network.Minio.Data
import Network.Minio.Errors
import Lib.Prelude import Text.XML
import Text.XML.Cursor hiding (bool)
import Network.Minio.Data
import Network.Minio.Errors
-- | Represent the time format string returned by S3 API calls. -- | Represent the time format string returned by S3 API calls.
s3TimeFormat :: [Char] s3TimeFormat :: [Char]
@ -58,12 +56,14 @@ uncurry6 f (a, b, c, d, e, g) = f a b c d e g
-- | Parse time strings from XML -- | Parse time strings from XML
parseS3XMLTime :: MonadIO m => Text -> m UTCTime parseS3XMLTime :: MonadIO m => Text -> m UTCTime
parseS3XMLTime t = parseS3XMLTime t =
maybe (throwIO $ MErrVXmlParse $ "timestamp parse failure: " <> t) return $ maybe (throwIO $ MErrVXmlParse $ "timestamp parse failure: " <> t) return
parseTimeM True defaultTimeLocale s3TimeFormat $ T.unpack t $ parseTimeM True defaultTimeLocale s3TimeFormat
$ T.unpack t
parseDecimal :: (MonadIO m, Integral a) => Text -> m a parseDecimal :: (MonadIO m, Integral a) => Text -> m a
parseDecimal numStr = either (throwIO . MErrVXmlParse . show) return $ parseDecimal numStr =
fst <$> decimal numStr either (throwIO . MErrVXmlParse . show) return $
fst <$> decimal numStr
parseDecimals :: (MonadIO m, Integral a) => [Text] -> m [a] parseDecimals :: (MonadIO m, Integral a) => [Text] -> m [a]
parseDecimals numStr = forM numStr parseDecimal parseDecimals numStr = forM numStr parseDecimal
@ -72,18 +72,18 @@ s3Elem :: Text -> Text -> Axis
s3Elem ns = element . s3Name ns s3Elem ns = element . s3Name ns
parseRoot :: (MonadIO m) => LByteString -> m Cursor parseRoot :: (MonadIO m) => LByteString -> m Cursor
parseRoot = either (throwIO . MErrVXmlParse . show) (return . fromDocument) parseRoot =
. parseLBS def either (throwIO . MErrVXmlParse . show) (return . fromDocument)
. parseLBS def
-- | Parse the response XML of a list buckets call. -- | Parse the response XML of a list buckets call.
parseListBuckets :: (MonadReader env m, HasSvcNamespace env, MonadIO m) => LByteString -> m [BucketInfo] parseListBuckets :: (MonadReader env m, HasSvcNamespace env, MonadIO m) => LByteString -> m [BucketInfo]
parseListBuckets xmldata = do parseListBuckets xmldata = do
r <- parseRoot xmldata r <- parseRoot xmldata
ns <- asks getSvcNamespace ns <- asks getSvcNamespace
let let s3Elem' = s3Elem ns
s3Elem' = s3Elem ns names = r $// s3Elem' "Bucket" &// s3Elem' "Name" &/ content
names = r $// s3Elem' "Bucket" &// s3Elem' "Name" &/ content timeStrings = r $// s3Elem' "Bucket" &// s3Elem' "CreationDate" &/ content
timeStrings = r $// s3Elem' "Bucket" &// s3Elem' "CreationDate" &/ content
times <- mapM parseS3XMLTime timeStrings times <- mapM parseS3XMLTime timeStrings
return $ zipWith BucketInfo names times return $ zipWith BucketInfo names times
@ -116,41 +116,38 @@ parseCopyObjectResponse :: (MonadReader env m, HasSvcNamespace env, MonadIO m) =
parseCopyObjectResponse xmldata = do parseCopyObjectResponse xmldata = do
r <- parseRoot xmldata r <- parseRoot xmldata
ns <- asks getSvcNamespace ns <- asks getSvcNamespace
let let s3Elem' = s3Elem ns
s3Elem' = s3Elem ns mtimeStr = T.concat $ r $// s3Elem' "LastModified" &/ content
mtimeStr = T.concat $ r $// s3Elem' "LastModified" &/ content
mtime <- parseS3XMLTime mtimeStr mtime <- parseS3XMLTime mtimeStr
return (T.concat $ r $// s3Elem' "ETag" &/ content, mtime) return (T.concat $ r $// s3Elem' "ETag" &/ content, mtime)
-- | Parse the response XML of a list objects v1 call. -- | Parse the response XML of a list objects v1 call.
parseListObjectsV1Response :: (MonadReader env m, HasSvcNamespace env, MonadIO m) parseListObjectsV1Response ::
=> LByteString -> m ListObjectsV1Result (MonadReader env m, HasSvcNamespace env, MonadIO m) =>
LByteString ->
m ListObjectsV1Result
parseListObjectsV1Response xmldata = do parseListObjectsV1Response xmldata = do
r <- parseRoot xmldata r <- parseRoot xmldata
ns <- asks getSvcNamespace ns <- asks getSvcNamespace
let let s3Elem' = s3Elem ns
s3Elem' = s3Elem ns hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content) nextMarker = headMay $ r $/ s3Elem' "NextMarker" &/ content
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
nextMarker = headMay $ r $/ s3Elem' "NextMarker" &/ content keys = r $/ s3Elem' "Contents" &/ s3Elem' "Key" &/ content
modTimeStr = r $/ s3Elem' "Contents" &/ s3Elem' "LastModified" &/ content
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content etagsList = r $/ s3Elem' "Contents" &/ s3Elem' "ETag" &/ content
-- if response xml contains empty etag response fill them with as
keys = r $/ s3Elem' "Contents" &/ s3Elem' "Key" &/ content -- many empty Text for the zip4 below to work as intended.
modTimeStr = r $/ s3Elem' "Contents" &/ s3Elem' "LastModified" &/ content etags = etagsList ++ repeat ""
etagsList = r $/ s3Elem' "Contents" &/ s3Elem' "ETag" &/ content sizeStr = r $/ s3Elem' "Contents" &/ s3Elem' "Size" &/ content
-- if response xml contains empty etag response fill them with as
-- many empty Text for the zip4 below to work as intended.
etags = etagsList ++ repeat ""
sizeStr = r $/ s3Elem' "Contents" &/ s3Elem' "Size" &/ content
modTimes <- mapM parseS3XMLTime modTimeStr modTimes <- mapM parseS3XMLTime modTimeStr
sizes <- parseDecimals sizeStr sizes <- parseDecimals sizeStr
let let objects =
objects = map (uncurry6 ObjectInfo) $ map (uncurry6 ObjectInfo) $
zip6 keys modTimes etags sizes (repeat H.empty) (repeat H.empty) zip6 keys modTimes etags sizes (repeat H.empty) (repeat H.empty)
return $ ListObjectsV1Result hasMore nextMarker objects prefixes return $ ListObjectsV1Result hasMore nextMarker objects prefixes
@ -159,28 +156,24 @@ parseListObjectsResponse :: (MonadReader env m, HasSvcNamespace env, MonadIO m)
parseListObjectsResponse xmldata = do parseListObjectsResponse xmldata = do
r <- parseRoot xmldata r <- parseRoot xmldata
ns <- asks getSvcNamespace ns <- asks getSvcNamespace
let let s3Elem' = s3Elem ns
s3Elem' = s3Elem ns hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content) nextToken = headMay $ r $/ s3Elem' "NextContinuationToken" &/ content
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
nextToken = headMay $ r $/ s3Elem' "NextContinuationToken" &/ content keys = r $/ s3Elem' "Contents" &/ s3Elem' "Key" &/ content
modTimeStr = r $/ s3Elem' "Contents" &/ s3Elem' "LastModified" &/ content
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content etagsList = r $/ s3Elem' "Contents" &/ s3Elem' "ETag" &/ content
-- if response xml contains empty etag response fill them with as
keys = r $/ s3Elem' "Contents" &/ s3Elem' "Key" &/ content -- many empty Text for the zip4 below to work as intended.
modTimeStr = r $/ s3Elem' "Contents" &/ s3Elem' "LastModified" &/ content etags = etagsList ++ repeat ""
etagsList = r $/ s3Elem' "Contents" &/ s3Elem' "ETag" &/ content sizeStr = r $/ s3Elem' "Contents" &/ s3Elem' "Size" &/ content
-- if response xml contains empty etag response fill them with as
-- many empty Text for the zip4 below to work as intended.
etags = etagsList ++ repeat ""
sizeStr = r $/ s3Elem' "Contents" &/ s3Elem' "Size" &/ content
modTimes <- mapM parseS3XMLTime modTimeStr modTimes <- mapM parseS3XMLTime modTimeStr
sizes <- parseDecimals sizeStr sizes <- parseDecimals sizeStr
let let objects =
objects = map (uncurry6 ObjectInfo) $ map (uncurry6 ObjectInfo) $
zip6 keys modTimes etags sizes (repeat H.empty) (repeat H.empty) zip6 keys modTimes etags sizes (repeat H.empty) (repeat H.empty)
return $ ListObjectsResult hasMore nextToken objects prefixes return $ ListObjectsResult hasMore nextToken objects prefixes
@ -189,20 +182,18 @@ parseListUploadsResponse :: (MonadReader env m, HasSvcNamespace env, MonadIO m)
parseListUploadsResponse xmldata = do parseListUploadsResponse xmldata = do
r <- parseRoot xmldata r <- parseRoot xmldata
ns <- asks getSvcNamespace ns <- asks getSvcNamespace
let let s3Elem' = s3Elem ns
s3Elem' = s3Elem ns hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content) prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content nextKey = headMay $ r $/ s3Elem' "NextKeyMarker" &/ content
nextKey = headMay $ r $/ s3Elem' "NextKeyMarker" &/ content nextUpload = headMay $ r $/ s3Elem' "NextUploadIdMarker" &/ content
nextUpload = headMay $ r $/ s3Elem' "NextUploadIdMarker" &/ content uploadKeys = r $/ s3Elem' "Upload" &/ s3Elem' "Key" &/ content
uploadKeys = r $/ s3Elem' "Upload" &/ s3Elem' "Key" &/ content uploadIds = r $/ s3Elem' "Upload" &/ s3Elem' "UploadId" &/ content
uploadIds = r $/ s3Elem' "Upload" &/ s3Elem' "UploadId" &/ content uploadInitTimeStr = r $/ s3Elem' "Upload" &/ s3Elem' "Initiated" &/ content
uploadInitTimeStr = r $/ s3Elem' "Upload" &/ s3Elem' "Initiated" &/ content
uploadInitTimes <- mapM parseS3XMLTime uploadInitTimeStr uploadInitTimes <- mapM parseS3XMLTime uploadInitTimeStr
let let uploads = zip3 uploadKeys uploadIds uploadInitTimes
uploads = zip3 uploadKeys uploadIds uploadInitTimes
return $ ListUploadsResult hasMore nextKey nextUpload uploads prefixes return $ ListUploadsResult hasMore nextKey nextUpload uploads prefixes
@ -210,27 +201,25 @@ parseListPartsResponse :: (MonadReader env m, HasSvcNamespace env, MonadIO m) =>
parseListPartsResponse xmldata = do parseListPartsResponse xmldata = do
r <- parseRoot xmldata r <- parseRoot xmldata
ns <- asks getSvcNamespace ns <- asks getSvcNamespace
let let s3Elem' = s3Elem ns
s3Elem' = s3Elem ns hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content) nextPartNumStr = headMay $ r $/ s3Elem' "NextPartNumberMarker" &/ content
nextPartNumStr = headMay $ r $/ s3Elem' "NextPartNumberMarker" &/ content partNumberStr = r $/ s3Elem' "Part" &/ s3Elem' "PartNumber" &/ content
partNumberStr = r $/ s3Elem' "Part" &/ s3Elem' "PartNumber" &/ content partModTimeStr = r $/ s3Elem' "Part" &/ s3Elem' "LastModified" &/ content
partModTimeStr = r $/ s3Elem' "Part" &/ s3Elem' "LastModified" &/ content partETags = r $/ s3Elem' "Part" &/ s3Elem' "ETag" &/ content
partETags = r $/ s3Elem' "Part" &/ s3Elem' "ETag" &/ content partSizeStr = r $/ s3Elem' "Part" &/ s3Elem' "Size" &/ content
partSizeStr = r $/ s3Elem' "Part" &/ s3Elem' "Size" &/ content
partModTimes <- mapM parseS3XMLTime partModTimeStr partModTimes <- mapM parseS3XMLTime partModTimeStr
partSizes <- parseDecimals partSizeStr partSizes <- parseDecimals partSizeStr
partNumbers <- parseDecimals partNumberStr partNumbers <- parseDecimals partNumberStr
nextPartNum <- parseDecimals $ maybeToList nextPartNumStr nextPartNum <- parseDecimals $ maybeToList nextPartNumStr
let let partInfos =
partInfos = map (uncurry4 ObjectPartInfo) $ map (uncurry4 ObjectPartInfo) $
zip4 partNumbers partETags partSizes partModTimes zip4 partNumbers partETags partSizes partModTimes
return $ ListPartsResult hasMore (listToMaybe nextPartNum) partInfos return $ ListPartsResult hasMore (listToMaybe nextPartNum) partInfos
parseErrResponse :: (MonadIO m) => LByteString -> m ServiceErr parseErrResponse :: (MonadIO m) => LByteString -> m ServiceErr
parseErrResponse xmldata = do parseErrResponse xmldata = do
r <- parseRoot xmldata r <- parseRoot xmldata
@ -250,28 +239,31 @@ parseNotification xmldata = do
<*> (mapM (parseNode ns "Topic") tcfg) <*> (mapM (parseNode ns "Topic") tcfg)
<*> (mapM (parseNode ns "CloudFunction") lcfg) <*> (mapM (parseNode ns "CloudFunction") lcfg)
where where
getFilterRule ns c = getFilterRule ns c =
let name = T.concat $ c $/ s3Elem ns "Name" &/ content let name = T.concat $ c $/ s3Elem ns "Name" &/ content
value = T.concat $ c $/ s3Elem ns "Value" &/ content value = T.concat $ c $/ s3Elem ns "Value" &/ content
in FilterRule name value in FilterRule name value
parseNode ns arnName nodeData = do parseNode ns arnName nodeData = do
let c = fromNode nodeData let c = fromNode nodeData
id = T.concat $ c $/ s3Elem ns "Id" &/ content id = T.concat $ c $/ s3Elem ns "Id" &/ content
arn = T.concat $ c $/ s3Elem ns arnName &/ content arn = T.concat $ c $/ s3Elem ns arnName &/ content
events = catMaybes $ map textToEvent $ c $/ s3Elem ns "Event" &/ content events = catMaybes $ map textToEvent $ c $/ s3Elem ns "Event" &/ content
rules = c $/ s3Elem ns "Filter" &/ s3Elem ns "S3Key" &/ rules =
s3Elem ns "FilterRule" &| getFilterRule ns c $/ s3Elem ns "Filter" &/ s3Elem ns "S3Key"
return $ NotificationConfig id arn events &/ s3Elem ns "FilterRule" &| getFilterRule ns
(Filter $ FilterKey $ FilterRules rules) return $
NotificationConfig
id
arn
events
(Filter $ FilterKey $ FilterRules rules)
parseSelectProgress :: MonadIO m => ByteString -> m Progress parseSelectProgress :: MonadIO m => ByteString -> m Progress
parseSelectProgress xmldata = do parseSelectProgress xmldata = do
r <- parseRoot $ LB.fromStrict xmldata r <- parseRoot $ LB.fromStrict xmldata
let bScanned = T.concat $ r $/ element "BytesScanned" &/ content let bScanned = T.concat $ r $/ element "BytesScanned" &/ content
bProcessed = T.concat $ r $/element "BytesProcessed" &/ content bProcessed = T.concat $ r $/ element "BytesProcessed" &/ content
bReturned = T.concat $ r $/element "BytesReturned" &/ content bReturned = T.concat $ r $/ element "BytesReturned" &/ content
Progress <$> parseDecimal bScanned Progress <$> parseDecimal bScanned
<*> parseDecimal bProcessed <*> parseDecimal bProcessed
<*> parseDecimal bReturned <*> parseDecimal bReturned

View File

@ -15,7 +15,7 @@
# resolver: # resolver:
# name: custom-snapshot # name: custom-snapshot
# location: "./custom-snapshot.yaml" # location: "./custom-snapshot.yaml"
resolver: lts-14.6 resolver: lts-16.0
# User packages to be built. # User packages to be built.
# Various formats can be used as shown in the example below. # Various formats can be used as shown in the example below.
@ -39,7 +39,9 @@ packages:
- '.' - '.'
# Dependency packages to be pulled from upstream that are not in the resolver # Dependency packages to be pulled from upstream that are not in the resolver
# (e.g., acme-missiles-0.3) # (e.g., acme-missiles-0.3)
extra-deps: [] extra-deps:
- unliftio-core-0.2.0.1
- protolude-0.3.0
# Override default flag values for local packages and extra-deps # Override default flag values for local packages and extra-deps
flags: {} flags: {}

View File

@ -3,10 +3,24 @@
# For more information, please see the documentation at: # For more information, please see the documentation at:
# https://docs.haskellstack.org/en/stable/lock_files # https://docs.haskellstack.org/en/stable/lock_files
packages: [] packages:
- completed:
hackage: unliftio-core-0.2.0.1@sha256:9b3e44ea9aacacbfc35b3b54015af450091916ac3618a41868ebf6546977659a,1082
pantry-tree:
size: 328
sha256: e81c5a1e82ec2cd68cbbbec9cd60567363abe02257fa1370a906f6754b6818b8
original:
hackage: unliftio-core-0.2.0.1
- completed:
hackage: protolude-0.3.0@sha256:8361b811b420585b122a7ba715aa5923834db6e8c36309bf267df2dbf66b95ef,2693
pantry-tree:
size: 1644
sha256: babf32b414f25f790b7a4ce6bae5c960bc51a11a289e7c47335b222e6762560c
original:
hackage: protolude-0.3.0
snapshots: snapshots:
- completed: - completed:
size: 524127 size: 531237
url: https://raw.githubusercontent.com/commercialhaskell/stackage-snapshots/master/lts/14/6.yaml url: https://raw.githubusercontent.com/commercialhaskell/stackage-snapshots/master/lts/16/0.yaml
sha256: dc70dfb45e2c32f54719819bd055f46855dd4b3bd2e58b9f3f38729a2d553fbb sha256: 210e15b7043e2783115afe16b0d54914b1611cdaa73f3ca3ca7f8e0847ff54e5
original: lts-14.6 original: lts-16.0

File diff suppressed because it is too large Load Diff

View File

@ -15,88 +15,100 @@
-- --
module Network.Minio.API.Test module Network.Minio.API.Test
( bucketNameValidityTests ( bucketNameValidityTests,
, objectNameValidityTests objectNameValidityTests,
, parseServerInfoJSONTest parseServerInfoJSONTest,
, parseHealStatusTest parseHealStatusTest,
, parseHealStartRespTest parseHealStartRespTest,
) where )
where
import Data.Aeson (eitherDecode) import Data.Aeson (eitherDecode)
import Test.Tasty import Lib.Prelude
import Test.Tasty.HUnit import Network.Minio.API
import Network.Minio.AdminAPI
import Lib.Prelude import Test.Tasty
import Test.Tasty.HUnit
import Network.Minio.AdminAPI
import Network.Minio.API
assertBool' :: Bool -> Assertion assertBool' :: Bool -> Assertion
assertBool' = assertBool "Test failed!" assertBool' = assertBool "Test failed!"
bucketNameValidityTests :: TestTree bucketNameValidityTests :: TestTree
bucketNameValidityTests = testGroup "Bucket Name Validity Tests" bucketNameValidityTests =
[ testCase "Too short 1" $ assertBool' $ not $ isValidBucketName "" testGroup
, testCase "Too short 2" $ assertBool' $ not $ isValidBucketName "ab" "Bucket Name Validity Tests"
, testCase "Too long 1" $ assertBool' $ not $ isValidBucketName "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" [ testCase "Too short 1" $ assertBool' $ not $ isValidBucketName "",
, testCase "Has upper case" $ assertBool' $ not $ isValidBucketName "ABCD" testCase "Too short 2" $ assertBool' $ not $ isValidBucketName "ab",
, testCase "Has punctuation" $ assertBool' $ not $ isValidBucketName "abc,2" testCase "Too long 1" $ assertBool' $ not $ isValidBucketName "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
, testCase "Has hyphen at end" $ assertBool' $ not $ isValidBucketName "abc-" testCase "Has upper case" $ assertBool' $ not $ isValidBucketName "ABCD",
, testCase "Has consecutive dot" $ assertBool' $ not $ isValidBucketName "abck..eedg" testCase "Has punctuation" $ assertBool' $ not $ isValidBucketName "abc,2",
, testCase "Looks like IP" $ assertBool' $ not $ isValidBucketName "10.0.0.1" testCase "Has hyphen at end" $ assertBool' $ not $ isValidBucketName "abc-",
, testCase "Valid bucket name 1" $ assertBool' $ isValidBucketName "abcd.pqeq.rea" testCase "Has consecutive dot" $ assertBool' $ not $ isValidBucketName "abck..eedg",
, testCase "Valid bucket name 2" $ assertBool' $ isValidBucketName "abcdedgh1d" testCase "Looks like IP" $ assertBool' $ not $ isValidBucketName "10.0.0.1",
, testCase "Valid bucket name 3" $ assertBool' $ isValidBucketName "abc-de-dg-h1d" testCase "Valid bucket name 1" $ assertBool' $ isValidBucketName "abcd.pqeq.rea",
] testCase "Valid bucket name 2" $ assertBool' $ isValidBucketName "abcdedgh1d",
testCase "Valid bucket name 3" $ assertBool' $ isValidBucketName "abc-de-dg-h1d"
]
objectNameValidityTests :: TestTree objectNameValidityTests :: TestTree
objectNameValidityTests = testGroup "Object Name Validity Tests" objectNameValidityTests =
[ testCase "Empty name" $ assertBool' $ not $ isValidObjectName "" testGroup
, testCase "Has unicode characters" $ assertBool' $ isValidObjectName "日本国" "Object Name Validity Tests"
] [ testCase "Empty name" $ assertBool' $ not $ isValidObjectName "",
testCase "Has unicode characters" $ assertBool' $ isValidObjectName "日本国"
]
parseServerInfoJSONTest :: TestTree parseServerInfoJSONTest :: TestTree
parseServerInfoJSONTest = testGroup "Parse MinIO Admin API ServerInfo JSON test" $ parseServerInfoJSONTest =
map (\(tName, tDesc, tfn, tVal) -> testCase tName $ assertBool tDesc $ testGroup "Parse MinIO Admin API ServerInfo JSON test" $
tfn (eitherDecode tVal :: Either [Char] [ServerInfo])) testCases map
( \(tName, tDesc, tfn, tVal) ->
testCase tName $ assertBool tDesc $
tfn (eitherDecode tVal :: Either [Char] [ServerInfo])
)
testCases
where where
testCases = [ ("FSBackend", "Verify server info json parsing for FS backend", isRight, fsJSON) testCases =
, ("Erasure Backend", "Verify server info json parsing for Erasure backend", isRight, erasureJSON) [ ("FSBackend", "Verify server info json parsing for FS backend", isRight, fsJSON),
, ("Unknown Backend", "Verify server info json parsing for invalid backend", isLeft, invalidJSON) ("Erasure Backend", "Verify server info json parsing for Erasure backend", isRight, erasureJSON),
] ("Unknown Backend", "Verify server info json parsing for invalid backend", isLeft, invalidJSON)
]
fsJSON = "[{\"error\":\"\",\"addr\":\"192.168.1.218:9000\",\"data\":{\"storage\":{\"Used\":20530,\"Backend\":{\"Type\":1,\"OnlineDisks\":0,\"OfflineDisks\":0,\"StandardSCData\":0,\"StandardSCParity\":0,\"RRSCData\":0,\"RRSCParity\":0,\"Sets\":null}},\"network\":{\"transferred\":808,\"received\":1160},\"http\":{\"totalHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalGETs\":{\"count\":1,\"avgDuration\":\"0s\"},\"successGETs\":{\"count\":1,\"avgDuration\":\"0s\"},\"totalPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"}},\"server\":{\"uptime\":5992503019270,\"version\":\"DEVELOPMENT.GOGET\",\"commitID\":\"DEVELOPMENT.GOGET\",\"region\":\"\",\"sqsARN\":[]}}}]" fsJSON = "[{\"error\":\"\",\"addr\":\"192.168.1.218:9000\",\"data\":{\"storage\":{\"Used\":20530,\"Backend\":{\"Type\":1,\"OnlineDisks\":0,\"OfflineDisks\":0,\"StandardSCData\":0,\"StandardSCParity\":0,\"RRSCData\":0,\"RRSCParity\":0,\"Sets\":null}},\"network\":{\"transferred\":808,\"received\":1160},\"http\":{\"totalHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalGETs\":{\"count\":1,\"avgDuration\":\"0s\"},\"successGETs\":{\"count\":1,\"avgDuration\":\"0s\"},\"totalPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"}},\"server\":{\"uptime\":5992503019270,\"version\":\"DEVELOPMENT.GOGET\",\"commitID\":\"DEVELOPMENT.GOGET\",\"region\":\"\",\"sqsARN\":[]}}}]"
erasureJSON = "[{\"error\":\"\",\"addr\":\"192.168.1.218:9000\",\"data\":{\"storage\":{\"Used\":83084,\"Backend\":{\"Type\":2,\"OnlineDisks\":4,\"OfflineDisks\":0,\"StandardSCData\":2,\"StandardSCParity\":2,\"RRSCData\":2,\"RRSCParity\":2,\"Sets\":[[{\"uuid\":\"16ec6f2c-9197-4787-904a-36bb2c2683f8\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"4052e086-ef99-4aa5-ae2b-8e27559432f6\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"d0639950-ddd3-45b0-93ca-fd86f5d79f72\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"30ec68c0-37e1-4592-82c1-26b143c0ac10\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]]}},\"network\":{\"transferred\":404,\"received\":0},\"http\":{\"totalHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"}},\"server\":{\"uptime\":2738903073,\"version\":\"DEVELOPMENT.GOGET\",\"commitID\":\"DEVELOPMENT.GOGET\",\"region\":\"\",\"sqsARN\":[]}}}]" erasureJSON = "[{\"error\":\"\",\"addr\":\"192.168.1.218:9000\",\"data\":{\"storage\":{\"Used\":83084,\"Backend\":{\"Type\":2,\"OnlineDisks\":4,\"OfflineDisks\":0,\"StandardSCData\":2,\"StandardSCParity\":2,\"RRSCData\":2,\"RRSCParity\":2,\"Sets\":[[{\"uuid\":\"16ec6f2c-9197-4787-904a-36bb2c2683f8\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"4052e086-ef99-4aa5-ae2b-8e27559432f6\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"d0639950-ddd3-45b0-93ca-fd86f5d79f72\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"30ec68c0-37e1-4592-82c1-26b143c0ac10\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]]}},\"network\":{\"transferred\":404,\"received\":0},\"http\":{\"totalHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"}},\"server\":{\"uptime\":2738903073,\"version\":\"DEVELOPMENT.GOGET\",\"commitID\":\"DEVELOPMENT.GOGET\",\"region\":\"\",\"sqsARN\":[]}}}]"
invalidJSON = "[{\"error\":\"\",\"addr\":\"192.168.1.218:9000\",\"data\":{\"storage\":{\"Used\":83084,\"Backend\":{\"Type\":42,\"OnlineDisks\":4,\"OfflineDisks\":0,\"StandardSCData\":2,\"StandardSCParity\":2,\"RRSCData\":2,\"RRSCParity\":2,\"Sets\":[[{\"uuid\":\"16ec6f2c-9197-4787-904a-36bb2c2683f8\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"4052e086-ef99-4aa5-ae2b-8e27559432f6\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"d0639950-ddd3-45b0-93ca-fd86f5d79f72\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"30ec68c0-37e1-4592-82c1-26b143c0ac10\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]]}},\"network\":{\"transferred\":404,\"received\":0},\"http\":{\"totalHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"}},\"server\":{\"uptime\":2738903073,\"version\":\"DEVELOPMENT.GOGET\",\"commitID\":\"DEVELOPMENT.GOGET\",\"region\":\"\",\"sqsARN\":[]}}}]" invalidJSON = "[{\"error\":\"\",\"addr\":\"192.168.1.218:9000\",\"data\":{\"storage\":{\"Used\":83084,\"Backend\":{\"Type\":42,\"OnlineDisks\":4,\"OfflineDisks\":0,\"StandardSCData\":2,\"StandardSCParity\":2,\"RRSCData\":2,\"RRSCParity\":2,\"Sets\":[[{\"uuid\":\"16ec6f2c-9197-4787-904a-36bb2c2683f8\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"4052e086-ef99-4aa5-ae2b-8e27559432f6\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"d0639950-ddd3-45b0-93ca-fd86f5d79f72\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"30ec68c0-37e1-4592-82c1-26b143c0ac10\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]]}},\"network\":{\"transferred\":404,\"received\":0},\"http\":{\"totalHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"}},\"server\":{\"uptime\":2738903073,\"version\":\"DEVELOPMENT.GOGET\",\"commitID\":\"DEVELOPMENT.GOGET\",\"region\":\"\",\"sqsARN\":[]}}}]"
parseHealStatusTest :: TestTree parseHealStatusTest :: TestTree
parseHealStatusTest = testGroup "Parse MinIO Admin API HealStatus JSON test" $ parseHealStatusTest =
map (\(tName, tDesc, tfn, tVal) -> testCase tName $ assertBool tDesc $ testGroup "Parse MinIO Admin API HealStatus JSON test" $
tfn (eitherDecode tVal :: Either [Char] HealStatus)) testCases map
( \(tName, tDesc, tfn, tVal) ->
testCase tName $ assertBool tDesc $
tfn (eitherDecode tVal :: Either [Char] HealStatus)
)
testCases
where where
testCases = [ ("Good", "Verify heal result item for erasure backend", isRight, erasureJSON') testCases =
, ("Corrupted", "Verify heal result item for erasure backend", isLeft, invalidJSON') [ ("Good", "Verify heal result item for erasure backend", isRight, erasureJSON'),
, ("Incorrect Value", "Verify heal result item for erasure backend", isLeft, invalidItemType) ("Corrupted", "Verify heal result item for erasure backend", isLeft, invalidJSON'),
] ("Incorrect Value", "Verify heal result item for erasure backend", isLeft, invalidItemType)
]
erasureJSON' = "{\"Summary\":\"finished\",\"StartTime\":\"2018-06-05T08:09:47.644465513Z\",\"NumDisks\":4,\"Settings\":{\"recursive\":false,\"dryRun\":false},\"Items\":[{\"resultId\":1,\"type\":\"metadata\",\"bucket\":\"\",\"object\":\"\",\"detail\":\"disk-format\",\"diskCount\":4,\"setCount\":1,\"before\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"after\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"objectSize\":0}]}" erasureJSON' = "{\"Summary\":\"finished\",\"StartTime\":\"2018-06-05T08:09:47.644465513Z\",\"NumDisks\":4,\"Settings\":{\"recursive\":false,\"dryRun\":false},\"Items\":[{\"resultId\":1,\"type\":\"metadata\",\"bucket\":\"\",\"object\":\"\",\"detail\":\"disk-format\",\"diskCount\":4,\"setCount\":1,\"before\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"after\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"objectSize\":0}]}"
invalidJSON' = "{\"Summary\":\"finished\",\"StartTime\":\"2018-06-05T08:09:47.644465513Z\",\"NumDisks\":4,\"Settings\":{\"recursive\":false,\"dryRun\":false},\"Items\":[{\"resultId\":1,\"type\":\"metadata\",\"bucket\":\"\",\"object\":\"\",\"detail\":\"disk-format\",\"diskCount\":4,\"setCount\":1,\"before\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"after\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"objectSize\":0}]" invalidJSON' = "{\"Summary\":\"finished\",\"StartTime\":\"2018-06-05T08:09:47.644465513Z\",\"NumDisks\":4,\"Settings\":{\"recursive\":false,\"dryRun\":false},\"Items\":[{\"resultId\":1,\"type\":\"metadata\",\"bucket\":\"\",\"object\":\"\",\"detail\":\"disk-format\",\"diskCount\":4,\"setCount\":1,\"before\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"after\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"objectSize\":0}]"
invalidItemType = "{\"Summary\":\"finished\",\"StartTime\":\"2018-06-05T08:09:47.644465513Z\",\"NumDisks\":4,\"Settings\":{\"recursive\":false,\"dryRun\":false},\"Items\":[{\"resultId\":1,\"type\":\"hello\",\"bucket\":\"\",\"object\":\"\",\"detail\":\"disk-format\",\"diskCount\":4,\"setCount\":1,\"before\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"after\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"objectSize\":0}]}" invalidItemType = "{\"Summary\":\"finished\",\"StartTime\":\"2018-06-05T08:09:47.644465513Z\",\"NumDisks\":4,\"Settings\":{\"recursive\":false,\"dryRun\":false},\"Items\":[{\"resultId\":1,\"type\":\"hello\",\"bucket\":\"\",\"object\":\"\",\"detail\":\"disk-format\",\"diskCount\":4,\"setCount\":1,\"before\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"after\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"objectSize\":0}]}"
parseHealStartRespTest :: TestTree parseHealStartRespTest :: TestTree
parseHealStartRespTest = testGroup "Parse MinIO Admin API HealStartResp JSON test" $ parseHealStartRespTest =
map (\(tName, tDesc, tfn, tVal) -> testCase tName $ assertBool tDesc $ testGroup "Parse MinIO Admin API HealStartResp JSON test" $
tfn (eitherDecode tVal :: Either [Char] HealStartResp)) testCases map
( \(tName, tDesc, tfn, tVal) ->
testCase tName $ assertBool tDesc $
tfn (eitherDecode tVal :: Either [Char] HealStartResp)
)
testCases
where where
testCases = [ ("Good", "Verify heal start response for erasure backend", isRight, hsrJSON) testCases =
, ("Missing Token", "Verify heal start response for erasure backend", isLeft, missingTokenJSON) [ ("Good", "Verify heal start response for erasure backend", isRight, hsrJSON),
] ("Missing Token", "Verify heal start response for erasure backend", isLeft, missingTokenJSON)
]
hsrJSON = "{\"clientToken\":\"3a3aca49-77dd-4b78-bba7-0978f119b23e\",\"clientAddress\":\"127.0.0.1\",\"startTime\":\"2018-06-05T08:09:47.644394493Z\"}" hsrJSON = "{\"clientToken\":\"3a3aca49-77dd-4b78-bba7-0978f119b23e\",\"clientAddress\":\"127.0.0.1\",\"startTime\":\"2018-06-05T08:09:47.644394493Z\"}"
missingTokenJSON = "{\"clientAddress\":\"127.0.0.1\",\"startTime\":\"2018-06-05T08:09:47.644394493Z\"}" missingTokenJSON = "{\"clientAddress\":\"127.0.0.1\",\"startTime\":\"2018-06-05T08:09:47.644394493Z\"}"

View File

@ -15,23 +15,23 @@
-- --
module Network.Minio.JsonParser.Test module Network.Minio.JsonParser.Test
( ( jsonParserTests,
jsonParserTests )
) where where
import Test.Tasty import Lib.Prelude
import Test.Tasty.HUnit import Network.Minio.Errors
import UnliftIO (MonadUnliftIO) import Network.Minio.JsonParser
import Test.Tasty
import Lib.Prelude import Test.Tasty.HUnit
import UnliftIO (MonadUnliftIO)
import Network.Minio.Errors
import Network.Minio.JsonParser
jsonParserTests :: TestTree jsonParserTests :: TestTree
jsonParserTests = testGroup "JSON Parser Tests" jsonParserTests =
[ testCase "Test parseErrResponseJSON" testParseErrResponseJSON testGroup
] "JSON Parser Tests"
[ testCase "Test parseErrResponseJSON" testParseErrResponseJSON
]
tryValidationErr :: (MonadUnliftIO m) => m a -> m (Either MErrV a) tryValidationErr :: (MonadUnliftIO m) => m a -> m (Either MErrV a)
tryValidationErr act = try act tryValidationErr act = try act
@ -43,22 +43,21 @@ testParseErrResponseJSON :: Assertion
testParseErrResponseJSON = do testParseErrResponseJSON = do
-- 1. Test parsing of an invalid error json. -- 1. Test parsing of an invalid error json.
parseResE <- tryValidationErr $ parseErrResponseJSON "ClearlyInvalidJSON" parseResE <- tryValidationErr $ parseErrResponseJSON "ClearlyInvalidJSON"
when (isRight parseResE) $ when (isRight parseResE)
assertFailure $ "Parsing should have failed => " ++ show parseResE $ assertFailure
$ "Parsing should have failed => " ++ show parseResE
forM_ cases $ \(jsondata, sErr) -> do forM_ cases $ \(jsondata, sErr) -> do
parseErr <- tryValidationErr $ parseErrResponseJSON jsondata parseErr <- tryValidationErr $ parseErrResponseJSON jsondata
either assertValidationErr (@?= sErr) parseErr either assertValidationErr (@?= sErr) parseErr
where where
cases = [ cases =
-- 2. Test parsing of a valid error json. [ -- 2. Test parsing of a valid error json.
("{\"Code\":\"InvalidAccessKeyId\",\"Message\":\"The access key ID you provided does not exist in our records.\",\"Key\":\"\",\"BucketName\":\"\",\"Resource\":\"/minio/admin/v1/info\",\"RequestId\":\"3L137\",\"HostId\":\"3L137\"}", ( "{\"Code\":\"InvalidAccessKeyId\",\"Message\":\"The access key ID you provided does not exist in our records.\",\"Key\":\"\",\"BucketName\":\"\",\"Resource\":\"/minio/admin/v1/info\",\"RequestId\":\"3L137\",\"HostId\":\"3L137\"}",
ServiceErr "InvalidAccessKeyId" "The access key ID you provided does not exist in our records." ServiceErr "InvalidAccessKeyId" "The access key ID you provided does not exist in our records."
) ),
, -- 3. Test parsing of a valid, empty Resource.
-- 3. Test parsing of a valid, empty Resource. ( "{\"Code\":\"SignatureDoesNotMatch\",\"Message\":\"The request signature we calculated does not match the signature you provided. Check your key and signing method.\",\"Key\":\"\",\"BucketName\":\"\",\"Resource\":\"/minio/admin/v1/info\",\"RequestId\":\"3L137\",\"HostId\":\"3L137\"}",
("{\"Code\":\"SignatureDoesNotMatch\",\"Message\":\"The request signature we calculated does not match the signature you provided. Check your key and signing method.\",\"Key\":\"\",\"BucketName\":\"\",\"Resource\":\"/minio/admin/v1/info\",\"RequestId\":\"3L137\",\"HostId\":\"3L137\"}", ServiceErr "SignatureDoesNotMatch" "The request signature we calculated does not match the signature you provided. Check your key and signing method."
ServiceErr "SignatureDoesNotMatch" "The request signature we calculated does not match the signature you provided. Check your key and signing method." )
)
] ]

View File

@ -15,18 +15,19 @@
-- --
module Network.Minio.TestHelpers module Network.Minio.TestHelpers
( runTestNS ( runTestNS,
) where )
where
import Network.Minio.Data import Lib.Prelude
import Network.Minio.Data
import Lib.Prelude newtype TestNS = TestNS {testNamespace :: Text}
newtype TestNS = TestNS { testNamespace :: Text }
instance HasSvcNamespace TestNS where instance HasSvcNamespace TestNS where
getSvcNamespace = testNamespace getSvcNamespace = testNamespace
runTestNS :: ReaderT TestNS m a -> m a runTestNS :: ReaderT TestNS m a -> m a
runTestNS = flip runReaderT $ runTestNS =
TestNS "http://s3.amazonaws.com/doc/2006-03-01/" flip runReaderT $
TestNS "http://s3.amazonaws.com/doc/2006-03-01/"

View File

@ -15,33 +15,32 @@
-- --
module Network.Minio.Utils.Test module Network.Minio.Utils.Test
( ( limitedMapConcurrentlyTests,
limitedMapConcurrentlyTests )
) where where
import Test.Tasty import Lib.Prelude
import Test.Tasty.HUnit import Network.Minio.Utils
import Test.Tasty
import Lib.Prelude import Test.Tasty.HUnit
import Network.Minio.Utils
limitedMapConcurrentlyTests :: TestTree limitedMapConcurrentlyTests :: TestTree
limitedMapConcurrentlyTests = testGroup "limitedMapConcurrently Tests" limitedMapConcurrentlyTests =
[ testCase "Test with various thread counts" testLMC testGroup
] "limitedMapConcurrently Tests"
[ testCase "Test with various thread counts" testLMC
]
testLMC :: Assertion testLMC :: Assertion
testLMC = do testLMC = do
let maxNum = 50 let maxNum = 50
-- test with thread count of 1 to 2*maxNum -- test with thread count of 1 to 2*maxNum
forM_ [1..(2*maxNum)] $ \threads -> do forM_ [1 .. (2 * maxNum)] $ \threads -> do
res <- limitedMapConcurrently threads compute [1..maxNum] res <- limitedMapConcurrently threads compute [1 .. maxNum]
sum res @?= overallResultCheck maxNum sum res @?= overallResultCheck maxNum
where where
-- simple function to run in each thread -- simple function to run in each thread
compute :: Int -> IO Int compute :: Int -> IO Int
compute n = return $ sum [1..n] compute n = return $ sum [1 .. n]
-- function to check overall result -- function to check overall result
overallResultCheck n = sum $ map (\t -> (t * (t+1)) `div` 2) [1..n] overallResultCheck n = sum $ map (\t -> (t * (t + 1)) `div` 2) [1 .. n]

View File

@ -13,30 +13,31 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE QuasiQuotes #-} {-# LANGUAGE QuasiQuotes #-}
module Network.Minio.XmlGenerator.Test module Network.Minio.XmlGenerator.Test
( xmlGeneratorTests ( xmlGeneratorTests,
) where )
where
import Test.Tasty import Lib.Prelude
import Test.Tasty.HUnit import Network.Minio.Data
import Text.RawString.QQ (r) import Network.Minio.TestHelpers
import Network.Minio.XmlGenerator
import Lib.Prelude import Network.Minio.XmlParser (parseNotification)
import Test.Tasty
import Network.Minio.Data import Test.Tasty.HUnit
import Network.Minio.TestHelpers import Text.RawString.QQ (r)
import Network.Minio.XmlGenerator
import Network.Minio.XmlParser (parseNotification)
xmlGeneratorTests :: TestTree xmlGeneratorTests :: TestTree
xmlGeneratorTests = testGroup "XML Generator Tests" xmlGeneratorTests =
[ testCase "Test mkCreateBucketConfig" testMkCreateBucketConfig testGroup
, testCase "Test mkCompleteMultipartUploadRequest" testMkCompleteMultipartUploadRequest "XML Generator Tests"
, testCase "Test mkPutNotificationRequest" testMkPutNotificationRequest [ testCase "Test mkCreateBucketConfig" testMkCreateBucketConfig,
, testCase "Test mkSelectRequest" testMkSelectRequest testCase "Test mkCompleteMultipartUploadRequest" testMkCompleteMultipartUploadRequest,
] testCase "Test mkPutNotificationRequest" testMkPutNotificationRequest,
testCase "Test mkSelectRequest" testMkSelectRequest
]
testMkCreateBucketConfig :: Assertion testMkCreateBucketConfig :: Assertion
testMkCreateBucketConfig = do testMkCreateBucketConfig = do
@ -44,100 +45,129 @@ testMkCreateBucketConfig = do
assertEqual "CreateBucketConfiguration xml should match: " expected $ assertEqual "CreateBucketConfiguration xml should match: " expected $
mkCreateBucketConfig ns "EU" mkCreateBucketConfig ns "EU"
where where
expected = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\ expected =
\<CreateBucketConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<LocationConstraint>EU</LocationConstraint>\ \<CreateBucketConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\</CreateBucketConfiguration>" \<LocationConstraint>EU</LocationConstraint>\
\</CreateBucketConfiguration>"
testMkCompleteMultipartUploadRequest :: Assertion testMkCompleteMultipartUploadRequest :: Assertion
testMkCompleteMultipartUploadRequest = testMkCompleteMultipartUploadRequest =
assertEqual "completeMultipartUpload xml should match: " expected $ assertEqual "completeMultipartUpload xml should match: " expected $
mkCompleteMultipartUploadRequest [(1, "abc")] mkCompleteMultipartUploadRequest [(1, "abc")]
where where
expected = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\ expected =
\<CompleteMultipartUpload>\ "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<Part>\ \<CompleteMultipartUpload>\
\<PartNumber>1</PartNumber><ETag>abc</ETag>\ \<Part>\
\</Part>\ \<PartNumber>1</PartNumber><ETag>abc</ETag>\
\</CompleteMultipartUpload>" \</Part>\
\</CompleteMultipartUpload>"
testMkPutNotificationRequest :: Assertion testMkPutNotificationRequest :: Assertion
testMkPutNotificationRequest = testMkPutNotificationRequest =
forM_ cases $ \val -> do forM_ cases $ \val -> do
let ns = "http://s3.amazonaws.com/doc/2006-03-01/" let ns = "http://s3.amazonaws.com/doc/2006-03-01/"
result = toS $ mkPutNotificationRequest ns val result = fromStrictBS $ mkPutNotificationRequest ns val
ntf <- runExceptT $ runTestNS $ parseNotification result ntf <- runExceptT $ runTestNS $ parseNotification result
either (\_ -> assertFailure "XML Parse Error!") either
(@?= val) ntf (\_ -> assertFailure "XML Parse Error!")
(@?= val)
ntf
where where
cases = [ Notification [] cases =
[ NotificationConfig [ Notification
"YjVkM2Y0YmUtNGI3NC00ZjQyLWEwNGItNDIyYWUxY2I0N2M4" []
"arn:aws:sns:us-east-1:account-id:s3notificationtopic2" [ NotificationConfig
[ReducedRedundancyLostObject, ObjectCreated] defaultFilter "YjVkM2Y0YmUtNGI3NC00ZjQyLWEwNGItNDIyYWUxY2I0N2M4"
] "arn:aws:sns:us-east-1:account-id:s3notificationtopic2"
[] [ReducedRedundancyLostObject, ObjectCreated]
, Notification defaultFilter
[ NotificationConfig ]
"1" "arn:aws:sqs:us-west-2:444455556666:s3notificationqueue" [],
[ObjectCreatedPut] Notification
(Filter $ FilterKey $ FilterRules [ NotificationConfig
[ FilterRule "prefix" "images/" "1"
, FilterRule "suffix" ".jpg"]) "arn:aws:sqs:us-west-2:444455556666:s3notificationqueue"
, NotificationConfig [ObjectCreatedPut]
"" "arn:aws:sqs:us-east-1:356671443308:s3notificationqueue" ( Filter $ FilterKey $
[ObjectCreated] defaultFilter FilterRules
] [ FilterRule "prefix" "images/",
[ NotificationConfig FilterRule "suffix" ".jpg"
"" "arn:aws:sns:us-east-1:356671443308:s3notificationtopic2" ]
[ReducedRedundancyLostObject] defaultFilter ),
] NotificationConfig
[ NotificationConfig ""
"ObjectCreatedEvents" "arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail" "arn:aws:sqs:us-east-1:356671443308:s3notificationqueue"
[ObjectCreated] defaultFilter [ObjectCreated]
] defaultFilter
] ]
[ NotificationConfig
""
"arn:aws:sns:us-east-1:356671443308:s3notificationtopic2"
[ReducedRedundancyLostObject]
defaultFilter
]
[ NotificationConfig
"ObjectCreatedEvents"
"arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail"
[ObjectCreated]
defaultFilter
]
]
testMkSelectRequest :: Assertion testMkSelectRequest :: Assertion
testMkSelectRequest = mapM_ assertFn cases testMkSelectRequest = mapM_ assertFn cases
where where
assertFn (a, b) = assertEqual "selectRequest XML should match: " b $ mkSelectRequest a assertFn (a, b) = assertEqual "selectRequest XML should match: " b $ mkSelectRequest a
cases = [ ( SelectRequest "Select * from S3Object" SQL cases =
(InputSerialization (Just CompressionTypeGzip) [ ( SelectRequest
(InputFormatCSV $ fileHeaderInfo FileHeaderIgnore "Select * from S3Object"
<> recordDelimiter "\n" SQL
<> fieldDelimiter "," ( InputSerialization
<> quoteCharacter "\"" (Just CompressionTypeGzip)
<> quoteEscapeCharacter "\"" ( InputFormatCSV $
)) fileHeaderInfo FileHeaderIgnore
(OutputSerializationCSV $ quoteFields QuoteFieldsAsNeeded <> recordDelimiter "\n"
<> recordDelimiter "\n" <> fieldDelimiter ","
<> fieldDelimiter "," <> quoteCharacter "\""
<> quoteCharacter "\"" <> quoteEscapeCharacter "\""
<> quoteEscapeCharacter "\""
) )
(Just False) )
, [r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>GZIP</CompressionType><CSV><QuoteCharacter>&#34;</QuoteCharacter><RecordDelimiter> ( OutputSerializationCSV $
quoteFields QuoteFieldsAsNeeded
<> recordDelimiter "\n"
<> fieldDelimiter ","
<> quoteCharacter "\""
<> quoteEscapeCharacter "\""
)
(Just False),
[r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>GZIP</CompressionType><CSV><QuoteCharacter>&#34;</QuoteCharacter><RecordDelimiter>
</RecordDelimiter><FileHeaderInfo>IGNORE</FileHeaderInfo><QuoteEscapeCharacter>&#34;</QuoteEscapeCharacter><FieldDelimiter>,</FieldDelimiter></CSV></InputSerialization><OutputSerialization><CSV><QuoteCharacter>&#34;</QuoteCharacter><QuoteFields>ASNEEDED</QuoteFields><RecordDelimiter> </RecordDelimiter><FileHeaderInfo>IGNORE</FileHeaderInfo><QuoteEscapeCharacter>&#34;</QuoteEscapeCharacter><FieldDelimiter>,</FieldDelimiter></CSV></InputSerialization><OutputSerialization><CSV><QuoteCharacter>&#34;</QuoteCharacter><QuoteFields>ASNEEDED</QuoteFields><RecordDelimiter>
</RecordDelimiter><QuoteEscapeCharacter>&#34;</QuoteEscapeCharacter><FieldDelimiter>,</FieldDelimiter></CSV></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|] </RecordDelimiter><QuoteEscapeCharacter>&#34;</QuoteEscapeCharacter><FieldDelimiter>,</FieldDelimiter></CSV></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
) ),
, ( setRequestProgressEnabled False $ ( setRequestProgressEnabled False
setInputCompressionType CompressionTypeGzip $ $ setInputCompressionType CompressionTypeGzip
selectRequest "Select * from S3Object" documentJsonInput $ selectRequest
(outputJSONFromRecordDelimiter "\n") "Select * from S3Object"
, [r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>GZIP</CompressionType><JSON><Type>DOCUMENT</Type></JSON></InputSerialization><OutputSerialization><JSON><RecordDelimiter> documentJsonInput
(outputJSONFromRecordDelimiter "\n"),
[r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>GZIP</CompressionType><JSON><Type>DOCUMENT</Type></JSON></InputSerialization><OutputSerialization><JSON><RecordDelimiter>
</RecordDelimiter></JSON></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|] </RecordDelimiter></JSON></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
) ),
, ( setRequestProgressEnabled False $ ( setRequestProgressEnabled False
setInputCompressionType CompressionTypeNone $ $ setInputCompressionType CompressionTypeNone
selectRequest "Select * from S3Object" defaultParquetInput $ selectRequest
(outputCSVFromProps $ quoteFields QuoteFieldsAsNeeded "Select * from S3Object"
<> recordDelimiter "\n" defaultParquetInput
<> fieldDelimiter "," ( outputCSVFromProps $
<> quoteCharacter "\"" quoteFields QuoteFieldsAsNeeded
<> quoteEscapeCharacter "\"") <> recordDelimiter "\n"
, [r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>NONE</CompressionType><Parquet/></InputSerialization><OutputSerialization><CSV><QuoteCharacter>&#34;</QuoteCharacter><QuoteFields>ASNEEDED</QuoteFields><RecordDelimiter> <> fieldDelimiter ","
<> quoteCharacter "\""
<> quoteEscapeCharacter "\""
),
[r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>NONE</CompressionType><Parquet/></InputSerialization><OutputSerialization><CSV><QuoteCharacter>&#34;</QuoteCharacter><QuoteFields>ASNEEDED</QuoteFields><RecordDelimiter>
</RecordDelimiter><QuoteEscapeCharacter>&#34;</QuoteEscapeCharacter><FieldDelimiter>,</FieldDelimiter></CSV></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|] </RecordDelimiter><QuoteEscapeCharacter>&#34;</QuoteEscapeCharacter><FieldDelimiter>,</FieldDelimiter></CSV></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
) )
] ]

View File

@ -13,39 +13,40 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE QuasiQuotes #-} {-# LANGUAGE QuasiQuotes #-}
module Network.Minio.XmlParser.Test module Network.Minio.XmlParser.Test
( xmlParserTests ( xmlParserTests,
) where )
where
import qualified Data.HashMap.Strict as H import qualified Data.HashMap.Strict as H
import Data.Time (fromGregorian) import Data.Time (fromGregorian)
import Test.Tasty import Lib.Prelude
import Test.Tasty.HUnit import Network.Minio.Data
import Text.RawString.QQ (r) import Network.Minio.Errors
import UnliftIO (MonadUnliftIO) import Network.Minio.TestHelpers
import Network.Minio.XmlParser
import Lib.Prelude import Test.Tasty
import Test.Tasty.HUnit
import Network.Minio.Data import Text.RawString.QQ (r)
import Network.Minio.Errors import UnliftIO (MonadUnliftIO)
import Network.Minio.TestHelpers
import Network.Minio.XmlParser
xmlParserTests :: TestTree xmlParserTests :: TestTree
xmlParserTests = testGroup "XML Parser Tests" xmlParserTests =
[ testCase "Test parseLocation" testParseLocation testGroup
, testCase "Test parseNewMultipartUpload" testParseNewMultipartUpload "XML Parser Tests"
, testCase "Test parseListObjectsResponse" testParseListObjectsResult [ testCase "Test parseLocation" testParseLocation,
, testCase "Test parseListObjectsV1Response" testParseListObjectsV1Result testCase "Test parseNewMultipartUpload" testParseNewMultipartUpload,
, testCase "Test parseListUploadsresponse" testParseListIncompleteUploads testCase "Test parseListObjectsResponse" testParseListObjectsResult,
, testCase "Test parseCompleteMultipartUploadResponse" testParseCompleteMultipartUploadResponse testCase "Test parseListObjectsV1Response" testParseListObjectsV1Result,
, testCase "Test parseListPartsResponse" testParseListPartsResponse testCase "Test parseListUploadsresponse" testParseListIncompleteUploads,
, testCase "Test parseCopyObjectResponse" testParseCopyObjectResponse testCase "Test parseCompleteMultipartUploadResponse" testParseCompleteMultipartUploadResponse,
, testCase "Test parseNotification" testParseNotification testCase "Test parseListPartsResponse" testParseListPartsResponse,
, testCase "Test parseSelectProgress" testParseSelectProgress testCase "Test parseCopyObjectResponse" testParseCopyObjectResponse,
] testCase "Test parseNotification" testParseNotification,
testCase "Test parseSelectProgress" testParseSelectProgress
]
tryValidationErr :: (MonadUnliftIO m) => m a -> m (Either MErrV a) tryValidationErr :: (MonadUnliftIO m) => m a -> m (Either MErrV a)
tryValidationErr act = try act tryValidationErr act = try act
@ -54,232 +55,232 @@ assertValidtionErr :: MErrV -> Assertion
assertValidtionErr e = assertFailure $ "Failed due to validation error => " ++ show e assertValidtionErr e = assertFailure $ "Failed due to validation error => " ++ show e
eitherValidationErr :: Either MErrV a -> (a -> Assertion) -> Assertion eitherValidationErr :: Either MErrV a -> (a -> Assertion) -> Assertion
eitherValidationErr (Left e) _ = assertValidtionErr e eitherValidationErr (Left e) _ = assertValidtionErr e
eitherValidationErr (Right a) f = f a eitherValidationErr (Right a) f = f a
testParseLocation :: Assertion testParseLocation :: Assertion
testParseLocation = do testParseLocation = do
-- 1. Test parsing of an invalid location constraint xml. -- 1. Test parsing of an invalid location constraint xml.
parseResE <- tryValidationErr $ parseLocation "ClearlyInvalidXml" parseResE <- tryValidationErr $ parseLocation "ClearlyInvalidXml"
when (isRight parseResE) $ when (isRight parseResE)
assertFailure $ "Parsing should have failed => " ++ show parseResE $ assertFailure
$ "Parsing should have failed => " ++ show parseResE
forM_ cases $ \(xmldata, expectedLocation) -> do forM_ cases $ \(xmldata, expectedLocation) -> do
parseLocE <- tryValidationErr $ parseLocation xmldata parseLocE <- tryValidationErr $ parseLocation xmldata
either assertValidtionErr (@?= expectedLocation) parseLocE either assertValidtionErr (@?= expectedLocation) parseLocE
where where
cases = [ cases =
-- 2. Test parsing of a valid location xml. [ -- 2. Test parsing of a valid location xml.
("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\ ( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<LocationConstraint xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">EU</LocationConstraint>", \<LocationConstraint xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">EU</LocationConstraint>",
"EU" "EU"
) ),
, -- 3. Test parsing of a valid, empty location xml.
-- 3. Test parsing of a valid, empty location xml. ( "<LocationConstraint xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"/>",
("<LocationConstraint xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"/>", "us-east-1"
"us-east-1" )
)
] ]
testParseNewMultipartUpload :: Assertion testParseNewMultipartUpload :: Assertion
testParseNewMultipartUpload = do testParseNewMultipartUpload = do
forM_ cases $ \(xmldata, expectedUploadId) -> do forM_ cases $ \(xmldata, expectedUploadId) -> do
parsedUploadIdE <- tryValidationErr $ runTestNS $ parseNewMultipartUpload xmldata parsedUploadIdE <- tryValidationErr $ runTestNS $ parseNewMultipartUpload xmldata
eitherValidationErr parsedUploadIdE (@?= expectedUploadId) eitherValidationErr parsedUploadIdE (@?= expectedUploadId)
where where
cases = [ cases =
("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\ [ ( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<InitiateMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ \<InitiateMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\ <Bucket>example-bucket</Bucket>\ \ <Bucket>example-bucket</Bucket>\
\ <Key>example-object</Key>\ \ <Key>example-object</Key>\
\ <UploadId>VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA</UploadId>\ \ <UploadId>VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA</UploadId>\
\</InitiateMultipartUploadResult>", \</InitiateMultipartUploadResult>",
"VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA" "VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA"
), ),
("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\ ( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<InitiateMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ \<InitiateMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\ <Bucket>example-bucket</Bucket>\ \ <Bucket>example-bucket</Bucket>\
\ <Key>example-object</Key>\ \ <Key>example-object</Key>\
\ <UploadId>EXAMPLEJZ6e0YupT2h66iePQCc9IEbYbDUy4RTpMeoSMLPRp8Z5o1u8feSRonpvnWsKKG35tI2LB9VDPiCgTy.Gq2VxQLYjrue4Nq.NBdqI-</UploadId>\ \ <UploadId>EXAMPLEJZ6e0YupT2h66iePQCc9IEbYbDUy4RTpMeoSMLPRp8Z5o1u8feSRonpvnWsKKG35tI2LB9VDPiCgTy.Gq2VxQLYjrue4Nq.NBdqI-</UploadId>\
\</InitiateMultipartUploadResult>", \</InitiateMultipartUploadResult>",
"EXAMPLEJZ6e0YupT2h66iePQCc9IEbYbDUy4RTpMeoSMLPRp8Z5o1u8feSRonpvnWsKKG35tI2LB9VDPiCgTy.Gq2VxQLYjrue4Nq.NBdqI-" "EXAMPLEJZ6e0YupT2h66iePQCc9IEbYbDUy4RTpMeoSMLPRp8Z5o1u8feSRonpvnWsKKG35tI2LB9VDPiCgTy.Gq2VxQLYjrue4Nq.NBdqI-"
) )
] ]
testParseListObjectsResult :: Assertion testParseListObjectsResult :: Assertion
testParseListObjectsResult = do testParseListObjectsResult = do
let let xmldata =
xmldata = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\ "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<ListBucketResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ \<ListBucketResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Name>bucket</Name>\ \<Name>bucket</Name>\
\<Prefix/>\ \<Prefix/>\
\<NextContinuationToken>opaque</NextContinuationToken>\ \<NextContinuationToken>opaque</NextContinuationToken>\
\<KeyCount>1000</KeyCount>\ \<KeyCount>1000</KeyCount>\
\<MaxKeys>1000</MaxKeys>\ \<MaxKeys>1000</MaxKeys>\
\<IsTruncated>true</IsTruncated>\ \<IsTruncated>true</IsTruncated>\
\<Contents>\ \<Contents>\
\<Key>my-image.jpg</Key>\ \<Key>my-image.jpg</Key>\
\<LastModified>2009-10-12T17:50:30.000Z</LastModified>\ \<LastModified>2009-10-12T17:50:30.000Z</LastModified>\
\<ETag>&quot;fba9dede5f27731c9771645a39863328&quot;</ETag>\ \<ETag>&quot;fba9dede5f27731c9771645a39863328&quot;</ETag>\
\<Size>434234</Size>\ \<Size>434234</Size>\
\<StorageClass>STANDARD</StorageClass>\ \<StorageClass>STANDARD</StorageClass>\
\</Contents>\ \</Contents>\
\</ListBucketResult>" \</ListBucketResult>"
expectedListResult = ListObjectsResult True (Just "opaque") [object1] []
expectedListResult = ListObjectsResult True (Just "opaque") [object1] [] object1 = ObjectInfo "my-image.jpg" modifiedTime1 "\"fba9dede5f27731c9771645a39863328\"" 434234 H.empty H.empty
object1 = ObjectInfo "my-image.jpg" modifiedTime1 "\"fba9dede5f27731c9771645a39863328\"" 434234 H.empty H.empty modifiedTime1 = flip UTCTime 64230 $ fromGregorian 2009 10 12
modifiedTime1 = flip UTCTime 64230 $ fromGregorian 2009 10 12
parsedListObjectsResult <- tryValidationErr $ runTestNS $ parseListObjectsResponse xmldata parsedListObjectsResult <- tryValidationErr $ runTestNS $ parseListObjectsResponse xmldata
eitherValidationErr parsedListObjectsResult (@?= expectedListResult) eitherValidationErr parsedListObjectsResult (@?= expectedListResult)
testParseListObjectsV1Result :: Assertion testParseListObjectsV1Result :: Assertion
testParseListObjectsV1Result = do testParseListObjectsV1Result = do
let let xmldata =
xmldata = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\ "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<ListBucketResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ \<ListBucketResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Name>bucket</Name>\ \<Name>bucket</Name>\
\<Prefix/>\ \<Prefix/>\
\<NextMarker>my-image1.jpg</NextMarker>\ \<NextMarker>my-image1.jpg</NextMarker>\
\<KeyCount>1000</KeyCount>\ \<KeyCount>1000</KeyCount>\
\<MaxKeys>1000</MaxKeys>\ \<MaxKeys>1000</MaxKeys>\
\<IsTruncated>true</IsTruncated>\ \<IsTruncated>true</IsTruncated>\
\<Contents>\ \<Contents>\
\<Key>my-image.jpg</Key>\ \<Key>my-image.jpg</Key>\
\<LastModified>2009-10-12T17:50:30.000Z</LastModified>\ \<LastModified>2009-10-12T17:50:30.000Z</LastModified>\
\<ETag>&quot;fba9dede5f27731c9771645a39863328&quot;</ETag>\ \<ETag>&quot;fba9dede5f27731c9771645a39863328&quot;</ETag>\
\<Size>434234</Size>\ \<Size>434234</Size>\
\<StorageClass>STANDARD</StorageClass>\ \<StorageClass>STANDARD</StorageClass>\
\</Contents>\ \</Contents>\
\</ListBucketResult>" \</ListBucketResult>"
expectedListResult = ListObjectsV1Result True (Just "my-image1.jpg") [object1] []
expectedListResult = ListObjectsV1Result True (Just "my-image1.jpg") [object1] [] object1 = ObjectInfo "my-image.jpg" modifiedTime1 "\"fba9dede5f27731c9771645a39863328\"" 434234 H.empty H.empty
object1 = ObjectInfo "my-image.jpg" modifiedTime1 "\"fba9dede5f27731c9771645a39863328\"" 434234 H.empty H.empty modifiedTime1 = flip UTCTime 64230 $ fromGregorian 2009 10 12
modifiedTime1 = flip UTCTime 64230 $ fromGregorian 2009 10 12
parsedListObjectsV1Result <- tryValidationErr $ runTestNS $ parseListObjectsV1Response xmldata parsedListObjectsV1Result <- tryValidationErr $ runTestNS $ parseListObjectsV1Response xmldata
eitherValidationErr parsedListObjectsV1Result (@?= expectedListResult) eitherValidationErr parsedListObjectsV1Result (@?= expectedListResult)
testParseListIncompleteUploads :: Assertion testParseListIncompleteUploads :: Assertion
testParseListIncompleteUploads = do testParseListIncompleteUploads = do
let let xmldata =
xmldata = "<ListMultipartUploadsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ "<ListMultipartUploadsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Bucket>example-bucket</Bucket>\ \<Bucket>example-bucket</Bucket>\
\<KeyMarker/>\ \<KeyMarker/>\
\<UploadIdMarker/>\ \<UploadIdMarker/>\
\<NextKeyMarker>sample.jpg</NextKeyMarker>\ \<NextKeyMarker>sample.jpg</NextKeyMarker>\
\<NextUploadIdMarker>Xgw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1W99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--</NextUploadIdMarker>\ \<NextUploadIdMarker>Xgw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1W99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--</NextUploadIdMarker>\
\<Delimiter>/</Delimiter>\ \<Delimiter>/</Delimiter>\
\<Prefix/>\ \<Prefix/>\
\<MaxUploads>1000</MaxUploads>\ \<MaxUploads>1000</MaxUploads>\
\<IsTruncated>false</IsTruncated>\ \<IsTruncated>false</IsTruncated>\
\<Upload>\ \<Upload>\
\<Key>sample.jpg</Key>\ \<Key>sample.jpg</Key>\
\<UploadId>Agw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1N99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--</UploadId>\ \<UploadId>Agw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1N99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--</UploadId>\
\<Initiator>\ \<Initiator>\
\<ID>314133b66967d86f031c7249d1d9a80249109428335cd0ef1cdc487b4566cb1b</ID>\ \<ID>314133b66967d86f031c7249d1d9a80249109428335cd0ef1cdc487b4566cb1b</ID>\
\<DisplayName>s3-nickname</DisplayName>\ \<DisplayName>s3-nickname</DisplayName>\
\</Initiator>\ \</Initiator>\
\<Owner>\ \<Owner>\
\<ID>314133b66967d86f031c7249d1d9a80249109428335cd0ef1cdc487b4566cb1b</ID>\ \<ID>314133b66967d86f031c7249d1d9a80249109428335cd0ef1cdc487b4566cb1b</ID>\
\<DisplayName>s3-nickname</DisplayName>\ \<DisplayName>s3-nickname</DisplayName>\
\</Owner>\ \</Owner>\
\<StorageClass>STANDARD</StorageClass>\ \<StorageClass>STANDARD</StorageClass>\
\<Initiated>2010-11-26T19:24:17.000Z</Initiated>\ \<Initiated>2010-11-26T19:24:17.000Z</Initiated>\
\</Upload>\ \</Upload>\
\<CommonPrefixes>\ \<CommonPrefixes>\
\<Prefix>photos/</Prefix>\ \<Prefix>photos/</Prefix>\
\</CommonPrefixes>\ \</CommonPrefixes>\
\<CommonPrefixes>\ \<CommonPrefixes>\
\<Prefix>videos/</Prefix>\ \<Prefix>videos/</Prefix>\
\</CommonPrefixes>\ \</CommonPrefixes>\
\</ListMultipartUploadsResult>" \</ListMultipartUploadsResult>"
expectedListResult = ListUploadsResult False (Just "sample.jpg") (Just "Xgw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1W99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--") uploads prefixes expectedListResult = ListUploadsResult False (Just "sample.jpg") (Just "Xgw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1W99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--") uploads prefixes
uploads = [("sample.jpg", "Agw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1N99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--", initTime)] uploads = [("sample.jpg", "Agw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1N99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--", initTime)]
initTime = UTCTime (fromGregorian 2010 11 26) 69857 initTime = UTCTime (fromGregorian 2010 11 26) 69857
prefixes = ["photos/", "videos/"] prefixes = ["photos/", "videos/"]
parsedListUploadsResult <- tryValidationErr $ runTestNS $ parseListUploadsResponse xmldata parsedListUploadsResult <- tryValidationErr $ runTestNS $ parseListUploadsResponse xmldata
eitherValidationErr parsedListUploadsResult (@?= expectedListResult) eitherValidationErr parsedListUploadsResult (@?= expectedListResult)
testParseCompleteMultipartUploadResponse :: Assertion testParseCompleteMultipartUploadResponse :: Assertion
testParseCompleteMultipartUploadResponse = do testParseCompleteMultipartUploadResponse = do
let let xmldata =
xmldata = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\ "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CompleteMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ \<CompleteMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Location>http://Example-Bucket.s3.amazonaws.com/Example-Object</Location>\ \<Location>http://Example-Bucket.s3.amazonaws.com/Example-Object</Location>\
\<Bucket>Example-Bucket</Bucket>\ \<Bucket>Example-Bucket</Bucket>\
\<Key>Example-Object</Key>\ \<Key>Example-Object</Key>\
\<ETag>\"3858f62230ac3c915f300c664312c11f-9\"</ETag>\ \<ETag>\"3858f62230ac3c915f300c664312c11f-9\"</ETag>\
\</CompleteMultipartUploadResult>" \</CompleteMultipartUploadResult>"
expectedETag = "\"3858f62230ac3c915f300c664312c11f-9\"" expectedETag = "\"3858f62230ac3c915f300c664312c11f-9\""
parsedETagE <- runExceptT $ runTestNS $ parseCompleteMultipartUploadResponse xmldata parsedETagE <- runExceptT $ runTestNS $ parseCompleteMultipartUploadResponse xmldata
eitherValidationErr parsedETagE (@?= expectedETag) eitherValidationErr parsedETagE (@?= expectedETag)
testParseListPartsResponse :: Assertion testParseListPartsResponse :: Assertion
testParseListPartsResponse = do testParseListPartsResponse = do
let let xmldata =
xmldata = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\ "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<ListPartsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ \<ListPartsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Bucket>example-bucket</Bucket>\ \<Bucket>example-bucket</Bucket>\
\<Key>example-object</Key>\ \<Key>example-object</Key>\
\<UploadId>XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA</UploadId>\ \<UploadId>XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA</UploadId>\
\<Initiator>\ \<Initiator>\
\<ID>arn:aws:iam::111122223333:user/some-user-11116a31-17b5-4fb7-9df5-b288870f11xx</ID>\ \<ID>arn:aws:iam::111122223333:user/some-user-11116a31-17b5-4fb7-9df5-b288870f11xx</ID>\
\<DisplayName>umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx</DisplayName>\ \<DisplayName>umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx</DisplayName>\
\</Initiator>\ \</Initiator>\
\<Owner>\ \<Owner>\
\<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>\ \<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>\
\<DisplayName>someName</DisplayName>\ \<DisplayName>someName</DisplayName>\
\</Owner>\ \</Owner>\
\<StorageClass>STANDARD</StorageClass>\ \<StorageClass>STANDARD</StorageClass>\
\<PartNumberMarker>1</PartNumberMarker>\ \<PartNumberMarker>1</PartNumberMarker>\
\<NextPartNumberMarker>3</NextPartNumberMarker>\ \<NextPartNumberMarker>3</NextPartNumberMarker>\
\<MaxParts>2</MaxParts>\ \<MaxParts>2</MaxParts>\
\<IsTruncated>true</IsTruncated>\ \<IsTruncated>true</IsTruncated>\
\<Part>\ \<Part>\
\<PartNumber>2</PartNumber>\ \<PartNumber>2</PartNumber>\
\<LastModified>2010-11-10T20:48:34.000Z</LastModified>\ \<LastModified>2010-11-10T20:48:34.000Z</LastModified>\
\<ETag>\"7778aef83f66abc1fa1e8477f296d394\"</ETag>\ \<ETag>\"7778aef83f66abc1fa1e8477f296d394\"</ETag>\
\<Size>10485760</Size>\ \<Size>10485760</Size>\
\</Part>\ \</Part>\
\<Part>\ \<Part>\
\<PartNumber>3</PartNumber>\ \<PartNumber>3</PartNumber>\
\<LastModified>2010-11-10T20:48:33.000Z</LastModified>\ \<LastModified>2010-11-10T20:48:33.000Z</LastModified>\
\<ETag>\"aaaa18db4cc2f85cedef654fccc4a4x8\"</ETag>\ \<ETag>\"aaaa18db4cc2f85cedef654fccc4a4x8\"</ETag>\
\<Size>10485760</Size>\ \<Size>10485760</Size>\
\</Part>\ \</Part>\
\</ListPartsResult>" \</ListPartsResult>"
expectedListResult = ListPartsResult True (Just 3) [part1, part2]
expectedListResult = ListPartsResult True (Just 3) [part1, part2] part1 = ObjectPartInfo 2 "\"7778aef83f66abc1fa1e8477f296d394\"" 10485760 modifiedTime1
part1 = ObjectPartInfo 2 "\"7778aef83f66abc1fa1e8477f296d394\"" 10485760 modifiedTime1 modifiedTime1 = flip UTCTime 74914 $ fromGregorian 2010 11 10
modifiedTime1 = flip UTCTime 74914 $ fromGregorian 2010 11 10 part2 = ObjectPartInfo 3 "\"aaaa18db4cc2f85cedef654fccc4a4x8\"" 10485760 modifiedTime2
part2 = ObjectPartInfo 3 "\"aaaa18db4cc2f85cedef654fccc4a4x8\"" 10485760 modifiedTime2 modifiedTime2 = flip UTCTime 74913 $ fromGregorian 2010 11 10
modifiedTime2 = flip UTCTime 74913 $ fromGregorian 2010 11 10
parsedListPartsResult <- runExceptT $ runTestNS $ parseListPartsResponse xmldata parsedListPartsResult <- runExceptT $ runTestNS $ parseListPartsResponse xmldata
eitherValidationErr parsedListPartsResult (@?= expectedListResult) eitherValidationErr parsedListPartsResult (@?= expectedListResult)
testParseCopyObjectResponse :: Assertion testParseCopyObjectResponse :: Assertion
testParseCopyObjectResponse = do testParseCopyObjectResponse = do
let let cases =
cases = [ ("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\ [ ( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CopyObjectResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ \<CopyObjectResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<LastModified>2009-10-28T22:32:00.000Z</LastModified>\ \<LastModified>2009-10-28T22:32:00.000Z</LastModified>\
\<ETag>\"9b2cf535f27731c974343645a3985328\"</ETag>\ \<ETag>\"9b2cf535f27731c974343645a3985328\"</ETag>\
\</CopyObjectResult>", \</CopyObjectResult>",
("\"9b2cf535f27731c974343645a3985328\"", ( "\"9b2cf535f27731c974343645a3985328\"",
UTCTime (fromGregorian 2009 10 28) 81120)) UTCTime (fromGregorian 2009 10 28) 81120
, ("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\ )
\<CopyPartResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ ),
\<LastModified>2009-10-28T22:32:00.000Z</LastModified>\ ( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<ETag>\"9b2cf535f27731c974343645a3985328\"</ETag>\ \<CopyPartResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\</CopyPartResult>", \<LastModified>2009-10-28T22:32:00.000Z</LastModified>\
("\"9b2cf535f27731c974343645a3985328\"", \<ETag>\"9b2cf535f27731c974343645a3985328\"</ETag>\
UTCTime (fromGregorian 2009 10 28) 81120))] \</CopyPartResult>",
( "\"9b2cf535f27731c974343645a3985328\"",
UTCTime (fromGregorian 2009 10 28) 81120
)
)
]
forM_ cases $ \(xmldata, (etag, modTime)) -> do forM_ cases $ \(xmldata, (etag, modTime)) -> do
parseResult <- runExceptT $ runTestNS $ parseCopyObjectResponse xmldata parseResult <- runExceptT $ runTestNS $ parseCopyObjectResponse xmldata
@ -287,73 +288,88 @@ testParseCopyObjectResponse = do
testParseNotification :: Assertion testParseNotification :: Assertion
testParseNotification = do testParseNotification = do
let let cases =
cases = [ ("<NotificationConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ [ ( "<NotificationConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\ <TopicConfiguration>\ \ <TopicConfiguration>\
\ <Id>YjVkM2Y0YmUtNGI3NC00ZjQyLWEwNGItNDIyYWUxY2I0N2M4</Id>\ \ <Id>YjVkM2Y0YmUtNGI3NC00ZjQyLWEwNGItNDIyYWUxY2I0N2M4</Id>\
\ <Topic>arn:aws:sns:us-east-1:account-id:s3notificationtopic2</Topic>\ \ <Topic>arn:aws:sns:us-east-1:account-id:s3notificationtopic2</Topic>\
\ <Event>s3:ReducedRedundancyLostObject</Event>\ \ <Event>s3:ReducedRedundancyLostObject</Event>\
\ <Event>s3:ObjectCreated:*</Event>\ \ <Event>s3:ObjectCreated:*</Event>\
\ </TopicConfiguration>\ \ </TopicConfiguration>\
\</NotificationConfiguration>", \</NotificationConfiguration>",
Notification [] Notification
[ NotificationConfig []
[ NotificationConfig
"YjVkM2Y0YmUtNGI3NC00ZjQyLWEwNGItNDIyYWUxY2I0N2M4" "YjVkM2Y0YmUtNGI3NC00ZjQyLWEwNGItNDIyYWUxY2I0N2M4"
"arn:aws:sns:us-east-1:account-id:s3notificationtopic2" "arn:aws:sns:us-east-1:account-id:s3notificationtopic2"
[ReducedRedundancyLostObject, ObjectCreated] defaultFilter [ReducedRedundancyLostObject, ObjectCreated]
] defaultFilter
[]) ]
, ("<NotificationConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ []
\ <CloudFunctionConfiguration>\ ),
\ <Id>ObjectCreatedEvents</Id>\ ( "<NotificationConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\ <CloudFunction>arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail</CloudFunction>\ \ <CloudFunctionConfiguration>\
\ <Event>s3:ObjectCreated:*</Event>\ \ <Id>ObjectCreatedEvents</Id>\
\ </CloudFunctionConfiguration>\ \ <CloudFunction>arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail</CloudFunction>\
\ <QueueConfiguration>\ \ <Event>s3:ObjectCreated:*</Event>\
\ <Id>1</Id>\ \ </CloudFunctionConfiguration>\
\ <Filter>\ \ <QueueConfiguration>\
\ <S3Key>\ \ <Id>1</Id>\
\ <FilterRule>\ \ <Filter>\
\ <Name>prefix</Name>\ \ <S3Key>\
\ <Value>images/</Value>\ \ <FilterRule>\
\ </FilterRule>\ \ <Name>prefix</Name>\
\ <FilterRule>\ \ <Value>images/</Value>\
\ <Name>suffix</Name>\ \ </FilterRule>\
\ <Value>.jpg</Value>\ \ <FilterRule>\
\ </FilterRule>\ \ <Name>suffix</Name>\
\ </S3Key>\ \ <Value>.jpg</Value>\
\ </Filter>\ \ </FilterRule>\
\ <Queue>arn:aws:sqs:us-west-2:444455556666:s3notificationqueue</Queue>\ \ </S3Key>\
\ <Event>s3:ObjectCreated:Put</Event>\ \ </Filter>\
\ </QueueConfiguration>\ \ <Queue>arn:aws:sqs:us-west-2:444455556666:s3notificationqueue</Queue>\
\ <TopicConfiguration>\ \ <Event>s3:ObjectCreated:Put</Event>\
\ <Topic>arn:aws:sns:us-east-1:356671443308:s3notificationtopic2</Topic>\ \ </QueueConfiguration>\
\ <Event>s3:ReducedRedundancyLostObject</Event>\ \ <TopicConfiguration>\
\ </TopicConfiguration>\ \ <Topic>arn:aws:sns:us-east-1:356671443308:s3notificationtopic2</Topic>\
\ <QueueConfiguration>\ \ <Event>s3:ReducedRedundancyLostObject</Event>\
\ <Queue>arn:aws:sqs:us-east-1:356671443308:s3notificationqueue</Queue>\ \ </TopicConfiguration>\
\ <Event>s3:ObjectCreated:*</Event>\ \ <QueueConfiguration>\
\ </QueueConfiguration>)\ \ <Queue>arn:aws:sqs:us-east-1:356671443308:s3notificationqueue</Queue>\
\</NotificationConfiguration>", \ <Event>s3:ObjectCreated:*</Event>\
Notification [ NotificationConfig \ </QueueConfiguration>)\
"1" "arn:aws:sqs:us-west-2:444455556666:s3notificationqueue" \</NotificationConfiguration>",
[ObjectCreatedPut] Notification
(Filter $ FilterKey $ FilterRules [ NotificationConfig
[FilterRule "prefix" "images/", "1"
FilterRule "suffix" ".jpg"]) "arn:aws:sqs:us-west-2:444455556666:s3notificationqueue"
, NotificationConfig [ObjectCreatedPut]
"" "arn:aws:sqs:us-east-1:356671443308:s3notificationqueue" ( Filter $ FilterKey $
[ObjectCreated] defaultFilter FilterRules
] [ FilterRule "prefix" "images/",
[ NotificationConfig FilterRule "suffix" ".jpg"
"" "arn:aws:sns:us-east-1:356671443308:s3notificationtopic2" ]
[ReducedRedundancyLostObject] defaultFilter ),
] NotificationConfig
[ NotificationConfig ""
"ObjectCreatedEvents" "arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail" "arn:aws:sqs:us-east-1:356671443308:s3notificationqueue"
[ObjectCreated] defaultFilter [ObjectCreated]
]) defaultFilter
] ]
[ NotificationConfig
""
"arn:aws:sns:us-east-1:356671443308:s3notificationtopic2"
[ReducedRedundancyLostObject]
defaultFilter
]
[ NotificationConfig
"ObjectCreatedEvents"
"arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail"
[ObjectCreated]
defaultFilter
]
)
]
forM_ cases $ \(xmldata, val) -> do forM_ cases $ \(xmldata, val) -> do
result <- runExceptT $ runTestNS $ parseNotification xmldata result <- runExceptT $ runTestNS $ parseNotification xmldata
@ -362,20 +378,25 @@ testParseNotification = do
-- | Tests parsing of both progress and stats -- | Tests parsing of both progress and stats
testParseSelectProgress :: Assertion testParseSelectProgress :: Assertion
testParseSelectProgress = do testParseSelectProgress = do
let cases = [ ([r|<?xml version="1.0" encoding="UTF-8"?> let cases =
[ ( [r|<?xml version="1.0" encoding="UTF-8"?>
<Progress> <Progress>
<BytesScanned>512</BytesScanned> <BytesScanned>512</BytesScanned>
<BytesProcessed>1024</BytesProcessed> <BytesProcessed>1024</BytesProcessed>
<BytesReturned>1024</BytesReturned> <BytesReturned>1024</BytesReturned>
</Progress>|] , Progress 512 1024 1024) </Progress>|],
, ([r|<?xml version="1.0" encoding="UTF-8"?> Progress 512 1024 1024
),
( [r|<?xml version="1.0" encoding="UTF-8"?>
<Stats> <Stats>
<BytesScanned>512</BytesScanned> <BytesScanned>512</BytesScanned>
<BytesProcessed>1024</BytesProcessed> <BytesProcessed>1024</BytesProcessed>
<BytesReturned>1024</BytesReturned> <BytesReturned>1024</BytesReturned>
</Stats>|], Progress 512 1024 1024) </Stats>|],
] Progress 512 1024 1024
)
]
forM_ cases $ \(xmldata, progress) -> do forM_ cases $ \(xmldata, progress) -> do
result <- runExceptT $ parseSelectProgress xmldata result <- runExceptT $ parseSelectProgress xmldata
eitherValidationErr result (@?= progress) eitherValidationErr result (@?= progress)

View File

@ -14,21 +14,18 @@
-- limitations under the License. -- limitations under the License.
-- --
import Test.Tasty import qualified Data.ByteString as B
import Test.Tasty.QuickCheck as QC import qualified Data.List as L
import Lib.Prelude
import qualified Data.ByteString as B import Network.Minio.API.Test
import qualified Data.List as L import Network.Minio.CopyObject
import Network.Minio.Data
import Lib.Prelude import Network.Minio.PutObject
import Network.Minio.Utils.Test
import Network.Minio.API.Test import Network.Minio.XmlGenerator.Test
import Network.Minio.CopyObject import Network.Minio.XmlParser.Test
import Network.Minio.Data import Test.Tasty
import Network.Minio.PutObject import Test.Tasty.QuickCheck as QC
import Network.Minio.Utils.Test
import Network.Minio.XmlGenerator.Test
import Network.Minio.XmlParser.Test
main :: IO () main :: IO ()
main = defaultMain tests main = defaultMain tests
@ -51,82 +48,84 @@ properties = testGroup "Properties" [qcProps] -- [scProps]
-- ] -- ]
qcProps :: TestTree qcProps :: TestTree
qcProps = testGroup "(checked by QuickCheck)" qcProps =
[ QC.testProperty "selectPartSizes:" $ testGroup
\n -> let (pns, offs, sizes) = L.unzip3 (selectPartSizes n) "(checked by QuickCheck)"
[ QC.testProperty "selectPartSizes:" $
\n ->
let (pns, offs, sizes) = L.unzip3 (selectPartSizes n)
-- check that pns increments from 1. -- check that pns increments from 1.
isPNumsAscendingFrom1 = all (\(a, b) -> a == b) $ zip pns [1..] isPNumsAscendingFrom1 = all (\(a, b) -> a == b) $ zip pns [1 ..]
consPairs [] = []
consPairs [] = [] consPairs [_] = []
consPairs [_] = [] consPairs (a : (b : c)) = (a, b) : (consPairs (b : c))
consPairs (a:(b:c)) = (a, b):(consPairs (b:c))
-- check `offs` is monotonically increasing. -- check `offs` is monotonically increasing.
isOffsetsAsc = all (\(a, b) -> a < b) $ consPairs offs isOffsetsAsc = all (\(a, b) -> a < b) $ consPairs offs
-- check sizes sums to n. -- check sizes sums to n.
isSumSizeOk = sum sizes == n isSumSizeOk = sum sizes == n
-- check sizes are constant except last -- check sizes are constant except last
isSizesConstantExceptLast = isSizesConstantExceptLast =
all (\(a, b) -> a == b) (consPairs $ L.init sizes) all (\(a, b) -> a == b) (consPairs $ L.init sizes)
-- check each part except last is at least minPartSize; -- check each part except last is at least minPartSize;
-- last part may be 0 only if it is the only part. -- last part may be 0 only if it is the only part.
nparts = length sizes nparts = length sizes
isMinPartSizeOk = isMinPartSizeOk =
if | nparts > 1 -> -- last part can be smaller but > 0 if
all (>= minPartSize) (take (nparts - 1) sizes) && | nparts > 1 -> -- last part can be smaller but > 0
all (\s -> s > 0) (drop (nparts - 1) sizes) all (>= minPartSize) (take (nparts - 1) sizes)
| nparts == 1 -> -- size may be 0 here. && all (\s -> s > 0) (drop (nparts - 1) sizes)
maybe True (\x -> x >= 0 && x <= minPartSize) $ | nparts == 1 -> -- size may be 0 here.
headMay sizes maybe True (\x -> x >= 0 && x <= minPartSize) $
| otherwise -> False headMay sizes
| otherwise -> False
in n < 0 || in n < 0
(isPNumsAscendingFrom1 && isOffsetsAsc && isSumSizeOk && || ( isPNumsAscendingFrom1 && isOffsetsAsc && isSumSizeOk
isSizesConstantExceptLast && isMinPartSizeOk) && isSizesConstantExceptLast
&& isMinPartSizeOk
, QC.testProperty "selectCopyRanges:" $ ),
\(start, end) -> QC.testProperty "selectCopyRanges:" $
let (_, pairs) = L.unzip (selectCopyRanges (start, end)) \(start, end) ->
let (_, pairs) = L.unzip (selectCopyRanges (start, end))
-- is last part's snd offset end? -- is last part's snd offset end?
isLastPartOk = maybe False ((end ==) . snd) $ lastMay pairs isLastPartOk = maybe False ((end ==) . snd) $ lastMay pairs
-- is first part's fst offset start -- is first part's fst offset start
isFirstPartOk = maybe False ((start ==) . fst) $ headMay pairs isFirstPartOk = maybe False ((start ==) . fst) $ headMay pairs
-- each pair is >=64MiB except last, and all those parts
-- each pair is >=64MiB except last, and all those parts -- have same size.
-- have same size. initSizes = maybe [] (map (\(a, b) -> b - a + 1)) $ initMay pairs
initSizes = maybe [] (map (\(a, b) -> b - a + 1)) $ initMay pairs isPartSizesOk =
isPartSizesOk = all (>= minPartSize) initSizes && all (>= minPartSize) initSizes
maybe True (\k -> all (== k) initSizes) && maybe
(headMay initSizes) True
(\k -> all (== k) initSizes)
-- returned offsets are contiguous. (headMay initSizes)
fsts = drop 1 $ map fst pairs -- returned offsets are contiguous.
snds = take (length pairs - 1) $ map snd pairs fsts = drop 1 $ map fst pairs
isContParts = length fsts == length snds && snds = take (length pairs - 1) $ map snd pairs
and (map (\(a, b) -> a == b + 1) $ zip fsts snds) isContParts =
length fsts == length snds
in start < 0 || start > end || && and (map (\(a, b) -> a == b + 1) $ zip fsts snds)
(isLastPartOk && isFirstPartOk && isPartSizesOk && isContParts) in start < 0 || start > end
|| (isLastPartOk && isFirstPartOk && isPartSizesOk && isContParts),
, QC.testProperty "mkSSECKey:" $ QC.testProperty "mkSSECKey:" $
\w8s -> let bs = B.pack w8s \w8s ->
r = mkSSECKey bs let bs = B.pack w8s
in case r of r = mkSSECKey bs
Just _ -> B.length bs == 32 in case r of
Just _ -> B.length bs == 32
Nothing -> B.length bs /= 32 Nothing -> B.length bs /= 32
] ]
unitTests :: TestTree unitTests :: TestTree
unitTests = testGroup "Unit tests" [ xmlGeneratorTests, xmlParserTests unitTests =
, bucketNameValidityTests testGroup
, objectNameValidityTests "Unit tests"
, parseServerInfoJSONTest [ xmlGeneratorTests,
, parseHealStatusTest xmlParserTests,
, parseHealStartRespTest bucketNameValidityTests,
, limitedMapConcurrentlyTests objectNameValidityTests,
] parseServerInfoJSONTest,
parseHealStatusTest,
parseHealStartRespTest,
limitedMapConcurrentlyTests
]