Compare commits

..

No commits in common. "master" and "v1.3.0" have entirely different histories.

72 changed files with 4812 additions and 7267 deletions

View File

@ -1,266 +0,0 @@
name: CI
# Trigger the workflow on push or pull request, but only for the master branch
on:
pull_request:
branches: [master]
push:
branches: [master]
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true
# Env vars for tests
env:
MINIO_ACCESS_KEY: minio
MINIO_SECRET_KEY: minio123
MINIO_LOCAL: 1
jobs:
ormolu:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: haskell-actions/run-ormolu@v15
with:
version: "0.5.0.1"
hlint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: "Set up HLint"
uses: haskell-actions/hlint-setup@v2
with:
version: "3.5"
- name: "Run HLint"
uses: haskell-actions/hlint-run@v2
with:
path: '["src/", "test/", "examples"]'
fail-on: warning
cabal:
name: ${{ matrix.os }} / ghc-${{ matrix.ghc }} / cabal-${{ matrix.cabal }}
runs-on: ${{ matrix.os }}
needs: ormolu
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
cabal: ["3.8", "latest"]
ghc:
- "9.8"
- "9.6"
- "9.4"
- "9.2"
- "9.0"
- "8.10"
exclude:
# macos llvm issue for versions less than 9.2
- os: macos-latest
ghc: "8.10"
- os: macos-latest
ghc: "9.0"
# Cabal 3.8 supports GHC < 9.6
- cabal: "3.8"
ghc: "9.6"
- cabal: "3.8"
ghc: "9.8"
steps:
- uses: actions/checkout@v4
if: github.event.action == 'opened' || github.event.action == 'synchronize' || github.event.ref == 'refs/heads/master'
- uses: haskell-actions/setup@v2
id: setup
name: Setup Haskell
with:
ghc-version: ${{ matrix.ghc }}
cabal-version: ${{ matrix.cabal }}
cabal-update: true
- name: Configure
run: |
cabal configure --enable-tests --enable-benchmarks --test-show-details=direct -fexamples -fdev -flive-test
cabal build all --dry-run
# The last step generates dist-newstyle/cache/plan.json for the cache key.
- name: Restore cached dependencies
uses: actions/cache/restore@v4
id: cache
env:
key: ${{ runner.os }}-ghc-${{ steps.setup.outputs.ghc-version }}-cabal-${{ steps.setup.outputs.cabal-version }}
with:
path: ${{ steps.setup.outputs.cabal-store }}
key: ${{ env.key }}-plan-${{ hashFiles('**/plan.json') }}
restore-keys: ${{ env.key }}-
- name: Install dependencies
# If we had an exact cache hit, the dependencies will be up to date.
if: steps.cache.outputs.cache-hit != 'true'
run: cabal build all --only-dependencies
# Cache dependencies already here, so that we do not have to rebuild them should the subsequent steps fail.
- name: Save cached dependencies
uses: actions/cache/save@v4
# If we had an exact cache hit, trying to save the cache would error because of key clash.
if: steps.cache.outputs.cache-hit != 'true'
with:
path: ${{ steps.setup.outputs.cabal-store }}
key: ${{ steps.cache.outputs.cache-primary-key }}
- name: Build
run: |
cabal build all
- name: Setup TLS certs for MinIO for testing (Linux)
if: matrix.os == 'ubuntu-latest'
run: |
mkdir -p /tmp/minio /tmp/minio-config/certs
cp test/cert/* /tmp/minio-config/certs/
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/linux-amd64/minio; chmod +x ./minio)
sudo cp /tmp/minio-config/certs/public.crt /usr/local/share/ca-certificates/
sudo update-ca-certificates
## Currently disable TLS setup for MacOS due to issues in trusting it on MacOS.
- name: Setup TLS certs for MinIO for testing (MacOS)
if: matrix.os == 'macos-latest'
run: |
mkdir -p /tmp/minio /tmp/minio-config/certs
cp test/cert/* /tmp/minio-config/certs/
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/darwin-amd64/minio; chmod +x ./minio)
# sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain /tmp/minio-config/certs/public.crt
- name: Setup MinIO for testing (Windows)
if: matrix.os == 'windows-latest'
run: |
New-Item -ItemType Directory -Path "$env:temp/minio-config/certs/"
Copy-Item -Path test\cert\* -Destination "$env:temp/minio-config/certs/"
Invoke-WebRequest -Uri https://dl.minio.io/server/minio/release/windows-amd64/minio.exe -OutFile $HOME/minio.exe
Import-Certificate -FilePath "$env:temp/minio-config/certs/public.crt" -CertStoreLocation Cert:\LocalMachine\Root
- name: Test (Linux)
if: matrix.os == 'ubuntu-latest'
env:
MINIO_SECURE: 1
run: |
/tmp/minio/minio server --quiet --certs-dir /tmp/minio-config/certs data1 data2 data3 data4 2>&1 > minio.log &
ghc --version
cabal --version
cabal test all
- name: Test (MacOS)
if: matrix.os == 'macos-latest'
# # Leave MINIO_SECURE unset to disable TLS in tests.
# env:
# MINIO_SECURE: 1
run: |
/tmp/minio/minio server --quiet data1 data2 data3 data4 2>&1 > minio.log &
ghc --version
cabal --version
cabal test all
- name: Test (Windows)
if: matrix.os == 'windows-latest'
env:
MINIO_SECURE: 1
run: |
Start-Process -NoNewWindow -FilePath "$HOME/minio.exe" -ArgumentList "--certs-dir", "$env:temp/minio-config/certs", "server", "$env:temp/data1", "$env:temp/data2", "$env:temp/data3", "$env:temp/data4"
ghc --version
cabal --version
cabal test all
stack:
name: stack / ghc ${{ matrix.ghc }}
runs-on: ${{ matrix.os }}
env:
MINIO_SECURE: 1
strategy:
matrix:
ghc:
- "8.10.7"
- "9.0.2"
- "9.2.8"
- "9.4.8"
- "9.6.5"
- "9.8.2"
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v4
if: github.event.action == 'opened' || github.event.action == 'synchronize' || github.event.ref == 'refs/heads/master'
- uses: haskell-actions/setup@v2
with:
ghc-version: ${{ matrix.ghc }}
enable-stack: true
stack-version: "latest"
- uses: actions/cache@v4
name: Cache ~/.stack
with:
path: ~/.stack
key: ${{ runner.os }}-stack-global-${{ hashFiles('stack.yaml') }}-${{ hashFiles('package.yaml') }}
restore-keys: |
${{ runner.os }}-stack-global-
- uses: actions/cache@v4
name: Cache .stack-work
with:
path: .stack-work
key: ${{ runner.os }}-stack-work-${{ hashFiles('stack.yaml') }}-${{ hashFiles('package.yaml') }}-${{ hashFiles('**/*.hs') }}
restore-keys: |
${{ runner.os }}-stack-work-
- name: Install dependencies
run: |
stack --version
stack build --system-ghc --test --bench --no-run-tests --no-run-benchmarks --only-dependencies
- name: Build
run: |
stack build --system-ghc --test --bench --no-run-tests --no-run-benchmarks --flag minio-hs:examples --flag minio-hs:live-test --flag minio-hs:dev
- name: Setup MinIO for testing (Linux)
if: matrix.os == 'ubuntu-latest'
run: |
mkdir -p /tmp/minio /tmp/minio-config/certs
cp test/cert/* /tmp/minio-config/certs/
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/linux-amd64/minio; chmod +x ./minio)
sudo cp /tmp/minio-config/certs/public.crt /usr/local/share/ca-certificates/
sudo update-ca-certificates
- name: Setup MinIO for testing (MacOS)
if: matrix.os == 'macos-latest'
run: |
mkdir -p /tmp/minio /tmp/minio-config/certs
cp test/cert/* /tmp/minio-config/certs/
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/darwin-amd64/minio; chmod +x ./minio)
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain /tmp/minio-config/certs/public.crt
- name: Setup MinIO for testing (Windows)
if: matrix.os == 'windows-latest'
run: |
New-Item -ItemType Directory -Path "$env:temp/minio-config/certs/"
Copy-Item -Path test\cert\* -Destination "$env:temp/minio-config/certs/"
Invoke-WebRequest -Uri https://dl.minio.io/server/minio/release/windows-amd64/minio.exe -OutFile $HOME/minio.exe
Import-Certificate -FilePath "$env:temp/minio-config/certs/public.crt" -CertStoreLocation Cert:\LocalMachine\Root
- name: Test (Non-Windows)
if: matrix.os != 'windows-latest'
run: |
/tmp/minio/minio server --quiet --certs-dir /tmp/minio-config/certs data1 data2 data3 data4 2>&1 > minio.log &
ghc --version
stack --version
stack test --system-ghc --flag minio-hs:live-test --flag minio-hs:dev
- name: Test (Windows)
if: matrix.os == 'windows-latest'
run: |
Start-Process -NoNewWindow -FilePath "$HOME/minio.exe" -ArgumentList "--certs-dir", "$env:temp/minio-config/certs", "server", "$env:temp/data1", "$env:temp/data2", "$env:temp/data3", "$env:temp/data4"
ghc --version
cabal --version
stack test --system-ghc --flag minio-hs:live-test --flag minio-hs:dev

233
.stylish-haskell.yaml Normal file
View File

@ -0,0 +1,233 @@
# stylish-haskell configuration file
# ==================================
# The stylish-haskell tool is mainly configured by specifying steps. These steps
# are a list, so they have an order, and one specific step may appear more than
# once (if needed). Each file is processed by these steps in the given order.
steps:
# Convert some ASCII sequences to their Unicode equivalents. This is disabled
# by default.
# - unicode_syntax:
# # In order to make this work, we also need to insert the UnicodeSyntax
# # language pragma. If this flag is set to true, we insert it when it's
# # not already present. You may want to disable it if you configure
# # language extensions using some other method than pragmas. Default:
# # true.
# add_language_pragma: true
# Align the right hand side of some elements. This is quite conservative
# and only applies to statements where each element occupies a single
# line.
- simple_align:
cases: true
top_level_patterns: true
records: true
# Import cleanup
- imports:
# There are different ways we can align names and lists.
#
# - global: Align the import names and import list throughout the entire
# file.
#
# - file: Like global, but don't add padding when there are no qualified
# imports in the file.
#
# - group: Only align the imports per group (a group is formed by adjacent
# import lines).
#
# - none: Do not perform any alignment.
#
# Default: global.
align: global
# The following options affect only import list alignment.
#
# List align has following options:
#
# - after_alias: Import list is aligned with end of import including
# 'as' and 'hiding' keywords.
#
# > import qualified Data.List as List (concat, foldl, foldr, head,
# > init, last, length)
#
# - with_alias: Import list is aligned with start of alias or hiding.
#
# > import qualified Data.List as List (concat, foldl, foldr, head,
# > init, last, length)
#
# - new_line: Import list starts always on new line.
#
# > import qualified Data.List as List
# > (concat, foldl, foldr, head, init, last, length)
#
# Default: after_alias
list_align: after_alias
# Right-pad the module names to align imports in a group:
#
# - true: a little more readable
#
# > import qualified Data.List as List (concat, foldl, foldr,
# > init, last, length)
# > import qualified Data.List.Extra as List (concat, foldl, foldr,
# > init, last, length)
#
# - false: diff-safe
#
# > import qualified Data.List as List (concat, foldl, foldr, init,
# > last, length)
# > import qualified Data.List.Extra as List (concat, foldl, foldr,
# > init, last, length)
#
# Default: true
pad_module_names: true
# Long list align style takes effect when import is too long. This is
# determined by 'columns' setting.
#
# - inline: This option will put as much specs on same line as possible.
#
# - new_line: Import list will start on new line.
#
# - new_line_multiline: Import list will start on new line when it's
# short enough to fit to single line. Otherwise it'll be multiline.
#
# - multiline: One line per import list entry.
# Type with constructor list acts like single import.
#
# > import qualified Data.Map as M
# > ( empty
# > , singleton
# > , ...
# > , delete
# > )
#
# Default: inline
long_list_align: inline
# Align empty list (importing instances)
#
# Empty list align has following options
#
# - inherit: inherit list_align setting
#
# - right_after: () is right after the module name:
#
# > import Vector.Instances ()
#
# Default: inherit
empty_list_align: inherit
# List padding determines indentation of import list on lines after import.
# This option affects 'long_list_align'.
#
# - <integer>: constant value
#
# - module_name: align under start of module name.
# Useful for 'file' and 'group' align settings.
list_padding: 4
# Separate lists option affects formatting of import list for type
# or class. The only difference is single space between type and list
# of constructors, selectors and class functions.
#
# - true: There is single space between Foldable type and list of it's
# functions.
#
# > import Data.Foldable (Foldable (fold, foldl, foldMap))
#
# - false: There is no space between Foldable type and list of it's
# functions.
#
# > import Data.Foldable (Foldable(fold, foldl, foldMap))
#
# Default: true
separate_lists: true
# Space surround option affects formatting of import lists on a single
# line. The only difference is single space after the initial
# parenthesis and a single space before the terminal parenthesis.
#
# - true: There is single space associated with the enclosing
# parenthesis.
#
# > import Data.Foo ( foo )
#
# - false: There is no space associated with the enclosing parenthesis
#
# > import Data.Foo (foo)
#
# Default: false
space_surround: false
# Language pragmas
- language_pragmas:
# We can generate different styles of language pragma lists.
#
# - vertical: Vertical-spaced language pragmas, one per line.
#
# - compact: A more compact style.
#
# - compact_line: Similar to compact, but wrap each line with
# `{-#LANGUAGE #-}'.
#
# Default: vertical.
style: vertical
# Align affects alignment of closing pragma brackets.
#
# - true: Brackets are aligned in same column.
#
# - false: Brackets are not aligned together. There is only one space
# between actual import and closing bracket.
#
# Default: true
align: true
# stylish-haskell can detect redundancy of some language pragmas. If this
# is set to true, it will remove those redundant pragmas. Default: true.
remove_redundant: true
# Replace tabs by spaces. This is disabled by default.
# - tabs:
# # Number of spaces to use for each tab. Default: 8, as specified by the
# # Haskell report.
# spaces: 8
# Remove trailing whitespace
- trailing_whitespace: {}
# A common setting is the number of columns (parts of) code will be wrapped
# to. Different steps take this into account. Default: 80.
columns: 80
# By default, line endings are converted according to the OS. You can override
# preferred format here.
#
# - native: Native newline format. CRLF on Windows, LF on other OSes.
#
# - lf: Convert to LF ("\n").
#
# - crlf: Convert to CRLF ("\r\n").
#
# Default: native.
newline: native
# Sometimes, language extensions are specified in a cabal file or from the
# command line instead of using language pragmas in the file. stylish-haskell
# needs to be aware of these, so it can parse the file correctly.
#
# No language extensions are enabled by default.
language_extensions:
- BangPatterns
- FlexibleContexts
- FlexibleInstances
- MultiParamTypeClasses
- MultiWayIf
- NoImplicitPrelude
- OverloadedStrings
- RankNTypes
- ScopedTypeVariables
- TupleSections
- TypeFamilies

52
.travis.yml Normal file
View File

@ -0,0 +1,52 @@
sudo: true
language: haskell
git:
depth: 5
cabal: "2.4"
cache:
directories:
- "$HOME/.cabal/store"
- "$HOME/.stack"
- "$TRAVIS_BUILD_DIR/.stack-work"
matrix:
include:
# Cabal
- ghc: 8.2.2
- ghc: 8.4.4
- ghc: 8.6.5
# Stack
- ghc: 8.6.5
env: STACK_YAML="$TRAVIS_BUILD_DIR/stack.yaml"
install:
- |
if [ -z "$STACK_YAML" ]; then
ghc --version
cabal --version
cabal new-update
cabal new-build --enable-tests --enable-benchmarks
else
# install stack
curl -sSL https://get.haskellstack.org/ | sh
# build project with stack
stack --version
stack build --system-ghc --test --bench --no-run-tests --no-run-benchmarks
fi
script:
- |
if [ -z "$STACK_YAML" ]; then
cabal new-test --enable-tests
else
stack test --system-ghc
fi
notifications:
email: false

View File

@ -1,73 +1,6 @@
Changelog
==========
## Version 1.7.0 -- Unreleased
* Fix data type `EventMessage` to not export partial fields (#179)
* Bump up min bound on time dep and fix deprecation warnings (#181)
* Add `dev` flag to cabal for building with warnings as errors (#182)
* Fix AWS region map (#185)
* Fix XML generator tests (#187)
* Add support for STS Assume Role API (#188)
### Breaking changes in 1.7.0
* `Credentials` type has been removed. Use `CredentialValue` instead.
* `Provider` type has been replaced with `CredentialLoader`.
* `EventMessage` data type is updated.
## Version 1.6.0
* HLint fixes - some types were changed to newtype (#173)
* Fix XML generation test for S3 SELECT (#161)
* Use region specific endpoints for AWS S3 in presigned Urls (#164)
* Replace protolude with relude and build with GHC 9.0.2 (#168)
* Support aeson 2 (#169)
* CI updates and code formatting changes with ormolu 0.5.0.0
## Version 1.5.3
* Fix windows build
* Fix support for Yandex Storage (#147)
* Fix for HEAD requests to S3/Minio (#155)
* Bump up some dependencies, new code formatting, Github CI, example fixes and other minor improvements.
## Version 1.5.2
* Fix region `us-west-2` for AWS S3 (#139)
* Build examples in CI
* Disable live-server tests by default, but run them in CI
* Drop support for GHC 8.2.x
## Version 1.5.1
* Add support for GHC 8.8
## Version 1.5.0
* Switch to faster map data type - all previous usage of
Data.Map.Strict and Data.Set is replaced with Data.HashMap.Strict
and Data.HashSet.
* Add `oiUserMetadata` to parse and return user metadata stored with
an object.
* Add `GetObjectResponse` data type for the value returned by
`getObject`. It now contains parsed ObjectInfo along with the
conduit of object bytes.
## Version 1.4.0
* Expose runMinioRes and runMinioResWith (#129)
* Improve Haddocks (#127)
* Fix list objects APIs to return directory prefixes when run with
recurse set to False (#126)
* Use streaming signature for streaming payloads when on an insecure
connection (#123)
## Version 1.3.1
* Add TLS helpers to check if server uses TLS, and to disable
certificate validation for easier testing (#121)
## Version 1.3.0
* Retry requests that timeout using full-jitter backoff (#119)

1
CNAME
View File

@ -1 +0,0 @@
minio-hs.min.io

135
README.md
View File

@ -1,65 +1,53 @@
# MinIO Haskell Client SDK for Amazon S3 Compatible Cloud Storage [![CI](https://github.com/minio/minio-hs/actions/workflows/ci.yml/badge.svg)](https://github.com/minio/minio-hs/actions/workflows/ci.yml)[![Hackage](https://img.shields.io/hackage/v/minio-hs.svg)](https://hackage.haskell.org/package/minio-hs)[![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io)
# MinIO Client SDK for Haskell [![Build Status](https://travis-ci.org/minio/minio-hs.svg?branch=master)](https://travis-ci.org/minio/minio-hs)[![Hackage](https://img.shields.io/hackage/v/minio-hs.svg)](https://hackage.haskell.org/package/minio-hs)[![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io)
The MinIO Haskell Client SDK provides simple APIs to access [MinIO](https://min.io) and any Amazon S3 compatible object storage.
The MinIO Haskell Client SDK provides simple APIs to access [MinIO](https://min.io) and Amazon S3 compatible object storage server.
This guide assumes that you have a working [Haskell development environment](https://www.haskell.org/downloads/).
## Minimum Requirements
- The Haskell [stack](https://docs.haskellstack.org/en/stable/README/)
## Installation
### Add to your project
```sh
git clone https://github.com/minio/minio-hs.git
Simply add `minio-hs` to your project's `.cabal` dependencies section or if you are using hpack, to your `package.yaml` file as usual.
cd minio-hs/
### Try it out in a [REPL](https://en.wikipedia.org/wiki/Read%E2%80%93eval%E2%80%93print_loop)
#### For a cabal based environment
Download the library source and change to the extracted directory:
``` sh
$ cabal get minio-hs
$ cd minio-hs-1.6.0/ # directory name could be different
stack install
```
Then load the `ghci` REPL environment with the library and browse the available APIs:
``` sh
$ cabal repl
ghci> :browse Network.Minio
```
#### For a stack based environment
From your home folder or any non-haskell project directory, just run:
Tests can be run with:
```sh
stack install minio-hs
stack test
```
Then start an interpreter session and browse the available APIs with:
A section of the tests use the remote MinIO Play server at
`https://play.min.io:9000` by default. For library development,
using this remote server maybe slow. To run the tests against a
locally running MinIO live server at `http://localhost:9000`, just set
the environment `MINIO_LOCAL` to any value (and unset it to switch
back to Play).
Documentation can be locally built with:
```sh
$ stack ghci
> :browse Network.Minio
stack haddock
```
## Examples
The [examples](https://github.com/minio/minio-hs/tree/master/examples) folder contains many examples that you can try out and use to learn and to help with developing your own projects.
### Quick-Start Example - File Uploader
This example program connects to a MinIO object storage server, makes a bucket on the server and then uploads a file to the bucket.
We will use the MinIO server running at https://play.min.io in this example. Feel free to use this service for testing and development. Access credentials are present in the library and are open to the public.
## Quick-Start Example - File Uploader
### FileUploader.hs
``` haskell
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs --package optparse-applicative --package filepath
-- stack --resolver lts-11.1 runghc --package minio-hs --package optparse-applicative --package filepath
--
-- MinIO Haskell SDK, (C) 2017-2019 MinIO, Inc.
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
@ -88,7 +76,7 @@ import UnliftIO (throwIO, try)
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- https://play.min.io:9000. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
@ -118,19 +106,16 @@ main = do
res <- runMinio minioPlayCI $ do
-- Make a bucket; catch bucket already exists exception if thrown.
bErr <- try $ makeBucket bucket Nothing
-- If the bucket already exists, we would get a specific
-- `ServiceErr` exception thrown.
case bErr of
Left BucketAlreadyOwnedByYou -> return ()
Left e -> throwIO e
Right _ -> return ()
Left (MErrService BucketAlreadyOwnedByYou) -> return ()
Left e -> throwIO e
Right _ -> return ()
-- Upload filepath to bucket; object name is derived from filepath.
fPutObject bucket object filepath defaultPutObjectOptions
-- Upload filepath to bucket; object is derived from filepath.
fPutObject bucket object filepath def
case res of
Left e -> putStrLn $ "file upload failed due to " ++ show e
Left e -> putStrLn $ "file upload failed due to " ++ (show e)
Right () -> putStrLn "file upload succeeded."
```
@ -144,55 +129,3 @@ main = do
## Contribute
[Contributors Guide](https://github.com/minio/minio-hs/blob/master/CONTRIBUTING.md)
### Development
#### Download the source
```sh
$ git clone https://github.com/minio/minio-hs.git
$ cd minio-hs/
```
#### Build the package:
With `cabal`:
```sh
$ # Configure cabal for development enabling all optional flags defined by the package.
$ cabal configure --enable-tests --test-show-details=direct -fexamples -fdev -flive-test
$ cabal build
```
With `stack`:
``` sh
$ stack build --test --no-run-tests --flag minio-hs:live-test --flag minio-hs:dev --flag minio-hs:examples
```
#### Running tests:
A section of the tests use the remote MinIO Play server at `https://play.min.io` by default. For library development, using this remote server maybe slow. To run the tests against a locally running MinIO live server at `http://localhost:9000` with the credentials `access_key=minio` and `secret_key=minio123`, just set the environment `MINIO_LOCAL` to any value (and unset it to switch back to Play).
With `cabal`:
```sh
$ export MINIO_LOCAL=1 # to run live tests against local MinIO server
$ cabal test
```
With `stack`:
``` sh
$ export MINIO_LOCAL=1 # to run live tests against local MinIO server
stack test --flag minio-hs:live-test --flag minio-hs:dev
```
This will run all the test suites.
#### Building documentation:
```sh
$ cabal haddock
$ # OR
$ stack haddock
```

18
Setup.hs Normal file
View File

@ -0,0 +1,18 @@
--
-- MinIO Haskell SDK, (C) 2017 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
import Distribution.Simple
main = defaultMain

View File

@ -20,15 +20,15 @@ awsCI { connectAccesskey = "your-access-key"
```
| Bucket operations | Object Operations | Presigned Operations |
|:--------------------------------------------------|:----------------------------------------------|:--------------------------------------------------|
| [`listBuckets`](#listBuckets) | [`getObject`](#getObject) | [`presignedGetObjectUrl`](#presignedGetObjectUrl) |
| [`makeBucket`](#makeBucket) | [`putObject`](#putObject) | [`presignedPutObjectUrl`](#presignedPutObjectUrl) |
| [`removeBucket`](#removeBucket) | [`fGetObject`](#fGetObject) | [`presignedPostPolicy`](#presignedPostPolicy) |
| [`listObjects`](#listObjects) | [`fPutObject`](#fPutObject) | |
| [`listObjectsV1`](#listObjectsV1) | [`copyObject`](#copyObject) | |
| [`listIncompleteUploads`](#listIncompleteUploads) | [`removeObject`](#removeObject) | |
| [`bucketExists`](#bucketExists) | [`selectObjectContent`](#selectObjectContent) | |
|Bucket operations|Object Operations|Presigned Operations|
|:---|:---|:---|
|[`listBuckets`](#listBuckets) |[`getObject`](#getObject)|[`presignedGetObjectUrl`](#presignedGetObjectUrl)|
|[`makeBucket`](#makeBucket)|[`putObject`](#putObject)|[`presignedPutObjectUrl`](#presignedPutObjectUrl)|
|[`removeBucket`](#removeBucket)|[`fGetObject`](#fGetObject)|[`presignedPostPolicy`](#presignedPostPolicy)|
|[`listObjects`](#listObjects)|[`fPutObject`](#fPutObject)||
|[`listObjectsV1`](#listObjectsV1)|[`copyObject`](#copyObject)||
|[`listIncompleteUploads`](#listIncompleteUploads)|[`removeObject`](#removeObject)||
|[`bucketExists`](#bucketExists)|[`selectObjectContent`](#selectObjectContent)||
## 1. Connecting and running operations on the storage service
@ -69,16 +69,16 @@ enable/disable the automatic region discovery behaviour.
The parameters in the expression `awsWithRegion region autoDiscover` are:
| Parameter | Type | Description |
|:---------------|:----------------------------|:---------------------------------------------------------------------------------------------------------------------------|
| `region` | _Region_ (alias for `Text`) | The region to connect to by default for all requests. |
| `autoDiscover` | _Bool_ | If `True`, region discovery will be enabled. If `False`, discovery is disabled, and all requests go the given region only. |
|Parameter|Type|Description|
|:---|:---|:---|
| `region` | _Region_ (alias for `Text`) | The region to connect to by default for all requests. |
| `autoDiscover` | _Bool_ | If `True`, region discovery will be enabled. If `False`, discovery is disabled, and all requests go the given region only.|
#### minioPlayCI :: ConnectInfo
This constructor provides connection and authentication information to
connect to the public MinIO Play server at
`https://play.min.io/`.
`https://play.min.io:9000/`.
#### minioCI :: Text -> Int -> Bool -> ConnectInfo
@ -86,25 +86,25 @@ Use to connect to a MinIO server.
The parameters in the expression `minioCI host port isSecure` are:
| Parameter | Type | Description |
|:-----------|:-------|:--------------------------------------------------------|
| `host` | _Text_ | Hostname of the MinIO or other S3-API compatible server |
| `port` | _Int_ | Port number to connect to |
| `isSecure` | _Bool_ | Does the server use HTTPS? |
|Parameter|Type|Description|
|:---|:---|:---|
| `host` | _Text_ | Hostname of the MinIO or other S3-API compatible server |
| `port` | _Int_ | Port number to connect to|
| `isSecure` | _Bool_ | Does the server use HTTPS? |
#### The ConnectInfo fields and Default instance
The following table shows the fields in the `ConnectInfo` record-type:
| Field | Type | Description |
|:----------------------------|:----------------------------|:--------------------------------------------------------------------------------------------------|
| `connectHost` | _Text_ | Host name of the server. Defaults to `localhost`. |
| `connectPort` | _Int_ | Port number on which the server listens. Defaults to `9000`. |
| `connectAccessKey` | _Text_ | Access key to use in authentication. Defaults to `minio`. |
| `connectSecretkey` | _Text_ | Secret key to use in authentication. Defaults to `minio123`. |
| `connectIsSecure` | _Bool_ | Specifies if the server used TLS. Defaults to `False` |
| `connectRegion` | _Region_ (alias for `Text`) | Specifies the region to use. Defaults to 'us-east-1' |
| `connectAutoDiscoverRegion` | _Bool_ | Specifies if the library should automatically discover the region of a bucket. Defaults to `True` |
| Field | Type | Description |
|:---|:---|:---|
| `connectHost` | _Text_ | Host name of the server. Defaults to `localhost`. |
| `connectPort` | _Int_ | Port number on which the server listens. Defaults to `9000`. |
| `connectAccessKey` | _Text_ | Access key to use in authentication. Defaults to `minio`. |
| `connectSecretkey` | _Text_ | Secret key to use in authentication. Defaults to `minio123`. |
| `connectIsSecure` | _Bool_ | Specifies if the server used TLS. Defaults to `False` |
| `connectRegion` | _Region_ (alias for `Text`) | Specifies the region to use. Defaults to 'us-east-1' |
| `connectAutoDiscoverRegion` | _Bool_ | Specifies if the library should automatically discover the region of a bucket. Defaults to `True`|
The `def` value of type `ConnectInfo` has all the above default
values.
@ -148,17 +148,17 @@ Lists buckets.
__Return Value__
| Return type | Description |
|:---------------------|:----------------|
| _Minio [BucketInfo]_ | List of buckets |
|Return type |Description |
|:---|:---|
| _Minio [BucketInfo]_| List of buckets |
__BucketInfo record type__
| Field | Type | Description |
|:-----------------|:---------------------------|:----------------------------|
| `biName` | _Bucket_ (alias of `Text`) | Name of the bucket |
| `biCreationDate` | _UTCTime_ | Creation time of the bucket |
|Field |Type |Description |
|:---|:---| :---|
| `biName` | _Bucket_ (alias of `Text`) | Name of the bucket |
| `biCreationDate` | _UTCTime_ | Creation time of the bucket |
<a name="makeBucket"></a>
@ -171,10 +171,10 @@ __Parameters__
In the expression `makeBucket bucketName region` the arguments are:
| Param | Type | Description |
|--------------|-----------------------------|-----------------------------------------------------------------------------------------------------|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `region` | _Maybe Region_ | Region where the bucket is to be created. If not specified, default to the region in `ConnectInfo`. |
| Param | Type | Description |
|---|---|---|
|`bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `region` | _Maybe Region_ | Region where the bucket is to be created. If not specified, default to the region in `ConnectInfo`.|
__Example__
@ -200,9 +200,9 @@ __Parameters__
In the expression `removeBucket bucketName` the arguments are:
| Param | Type | Description |
|--------------|-----------------------------|--------------------|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| Param | Type | Description |
|---|---|---|
|`bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
__Example__
@ -233,27 +233,26 @@ __Parameters__
In the expression `listObjects bucketName prefix recursive` the
arguments are:
| Param | Type | Description |
|:-------------|:----------------------------|:---------------------------------------------------------------------------------------------------------|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `prefix` | _Maybe Text_ | Optional prefix that listed objects should have |
| `recursive` | _Bool_ | `True` indicates recursive style listing and `False` indicates directory style listing delimited by '/'. |
|Param |Type |Description |
|:---|:---| :---|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `prefix` | _Maybe Text_ | Optional prefix that listed objects should have |
| `recursive` | _Bool_ |`True` indicates recursive style listing and `False` indicates directory style listing delimited by '/'. |
__Return Value__
| Return type | Description |
|:------------------------------------|:------------------------------------------------------------------------|
| _C.ConduitM () ObjectInfo Minio ()_ | A Conduit Producer of `ObjectInfo` values corresponding to each object. |
|Return type |Description |
|:---|:---|
| _C.ConduitM () ObjectInfo Minio ()_ | A Conduit Producer of `ObjectInfo` values corresponding to each object. |
__ObjectInfo record type__
| Field | Type | Description |
|:-------------|:----------------------------|:-------------------------------------|
| `oiObject` | _Object_ (alias for `Text`) | Name of object |
| `oiModTime` | _UTCTime_ | Last modified time of the object |
| `oiETag` | _ETag_ (alias for `Text`) | ETag of the object |
| `oiSize` | _Int64_ | Size of the object in bytes |
| `oiMetadata` | _HashMap Text Text_ | Map of key-value user-metadata pairs |
|Field |Type |Description |
|:---|:---| :---|
|`oiObject` | _Object_ (alias for `Text`) | Name of object |
|`oiModTime` | _UTCTime_ | Last modified time of the object |
|`oiETag` | _ETag_ (alias for `Text`) | ETag of the object |
|`oiSize` | _Int64_ | Size of the object in bytes |
__Example__
@ -266,7 +265,7 @@ import Prelude
-- | The following example uses MinIO play server at
-- https://play.min.io. The endpoint and associated
-- https://play.min.io:9000. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
@ -295,26 +294,26 @@ __Parameters__
In the expression `listObjectsV1 bucketName prefix recursive` the
arguments are:
| Param | Type | Description |
|:-------------|:----------------------------|:---------------------------------------------------------------------------------------------------------|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `prefix` | _Maybe Text_ | Optional prefix that listed objects should have |
| `recursive` | _Bool_ | `True` indicates recursive style listing and `False` indicates directory style listing delimited by '/'. |
|Param |Type |Description |
|:---|:---| :---|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `prefix` | _Maybe Text_ | Optional prefix that listed objects should have |
| `recursive` | _Bool_ |`True` indicates recursive style listing and `False` indicates directory style listing delimited by '/'. |
__Return Value__
| Return type | Description |
|:------------------------------------|:------------------------------------------------------------------------|
| _C.ConduitM () ObjectInfo Minio ()_ | A Conduit Producer of `ObjectInfo` values corresponding to each object. |
|Return type |Description |
|:---|:---|
| _C.ConduitM () ObjectInfo Minio ()_ | A Conduit Producer of `ObjectInfo` values corresponding to each object. |
__ObjectInfo record type__
| Field | Type | Description |
|:------------|:----------------------------|:---------------------------------|
| `oiObject` | _Object_ (alias for `Text`) | Name of object |
| `oiModTime` | _UTCTime_ | Last modified time of the object |
| `oiETag` | _ETag_ (alias for `Text`) | ETag of the object |
| `oiSize` | _Int64_ | Size of the object in bytes |
|Field |Type |Description |
|:---|:---| :---|
|`oiObject` | _Object_ (alias for `Text`) | Name of object |
|`oiModTime` | _UTCTime_ | Last modified time of the object |
|`oiETag` | _ETag_ (alias for `Text`) | ETag of the object |
|`oiSize` | _Int64_ | Size of the object in bytes |
__Example__
@ -327,7 +326,7 @@ import Prelude
-- | The following example uses MinIO play server at
-- https://play.min.io. The endpoint and associated
-- https://play.min.io:9000. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
@ -355,11 +354,11 @@ __Parameters__
In the expression `listIncompleteUploads bucketName prefix recursive`
the parameters are:
| Param | Type | Description |
|:-------------|:----------------------------|:---------------------------------------------------------------------------------------------------------|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `prefix` | _Maybe Text_ | Optional prefix that listed objects should have. |
| `recursive` | _Bool_ | `True` indicates recursive style listing and `Talse` indicates directory style listing delimited by '/'. |
|Param |Type |Description |
|:---|:---| :---|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `prefix` | _Maybe Text_ | Optional prefix that listed objects should have. |
| `recursive` | _Bool_ |`True` indicates recursive style listing and `Talse` indicates directory style listing delimited by '/'. |
__Return Value__
@ -369,11 +368,11 @@ __Return Value__
__UploadInfo record type__
| Field | Type | Description |
|:-------------|:---------|:------------------------------------------|
| `uiKey` | _Object_ | Name of incompletely uploaded object |
| `uiUploadId` | _String_ | Upload ID of incompletely uploaded object |
| `uiSize` | _Int64_ | Size of incompletely uploaded object |
|Field |Type |Description |
|:---|:---| :---|
|`uiKey` | _Object_ |Name of incompletely uploaded object |
|`uiUploadId` | _String_ |Upload ID of incompletely uploaded object |
|`uiSize` | _Int64_ |Size of incompletely uploaded object |
__Example__
@ -385,7 +384,7 @@ import Conduit
import Prelude
-- | The following example uses MinIO play server at
-- https://play.min.io. The endpoint and associated
-- https://play.min.io:9000. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
@ -416,29 +415,29 @@ __Parameters__
In the expression `getObject bucketName objectName opts` the parameters
are:
| Param | Type | Description |
|:-------------|:----------------------------|:----------------------------------------------------------------------------|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `objectName` | _Object_ (alias for `Text`) | Name of the object |
| `opts` | _GetObjectOptions_ | Options for GET requests specifying additional options like If-Match, Range |
|Param |Type |Description |
|:---|:---| :---|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `objectName` | _Object_ (alias for `Text`) | Name of the object |
| `opts` | _GetObjectOptions_ | Options for GET requests specifying additional options like If-Match, Range |
__GetObjectOptions record type__
| Field | Type | Description |
|:-----------------------|:--------------------------------|:------------------------------------------------------------------------------------------------------|
| `gooRange` | `Maybe ByteRanges` | Represents the byte range of object. E.g ByteRangeFromTo 0 9 represents first ten bytes of the object |
| `gooIfMatch` | `Maybe ETag` (alias for `Text`) | (Optional) ETag of object should match |
| `gooIfNoneMatch` | `Maybe ETag` (alias for `Text`) | (Optional) ETag of object shouldn't match |
| `gooIfUnmodifiedSince` | `Maybe UTCTime` | (Optional) Time since object wasn't modified |
| `gooIfModifiedSince` | `Maybe UTCTime` | (Optional) Time since object was modified |
|Field |Type |Description |
|:---|:---| :---|
| `gooRange` | `Maybe ByteRanges` | Represents the byte range of object. E.g ByteRangeFromTo 0 9 represents first ten bytes of the object|
| `gooIfMatch` | `Maybe ETag` (alias for `Text`) | (Optional) ETag of object should match |
| `gooIfNoneMatch` | `Maybe ETag` (alias for `Text`) | (Optional) ETag of object shouldn't match |
| `gooIfUnmodifiedSince` | `Maybe UTCTime` | (Optional) Time since object wasn't modified |
| `gooIfModifiedSince` | `Maybe UTCTime` | (Optional) Time since object was modified |
__Return Value__
The return value can be incrementally read to process the contents of
the object.
| Return type | Description |
|:--------------------------------------------|:-----------------------------------------|
| _Minio (C.ConduitM () ByteString Minio ())_ | A Conduit source of `ByteString` values. |
|Return type |Description |
|:---|:---|
| _Minio (C.ConduitM () ByteString Minio ())_ | A Conduit source of `ByteString` values. |
__Example__
@ -452,7 +451,7 @@ import qualified Data.Conduit.Binary as CB
import Prelude
-- | The following example uses MinIO play server at
-- https://play.min.io. The endpoint and associated
-- https://play.min.io:9000. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
@ -483,13 +482,13 @@ __Parameters__
In the expression `putObject bucketName objectName inputSrc` the parameters
are:
| Param | Type | Description |
|:-------------|:------------------------------------|:------------------------------------------------------------------|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `objectName` | _Object_ (alias for `Text`) | Name of the object |
| `inputSrc` | _C.ConduitM () ByteString Minio ()_ | A Conduit producer of `ByteString` values |
| `size` | _Int64_ | Provide stream size (optional) |
| `opts` | _PutObjectOptions_ | Optional parameters to provide additional metadata for the object |
|Param |Type |Description |
|:---|:---| :---|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `objectName` | _Object_ (alias for `Text`) | Name of the object |
| `inputSrc` | _C.ConduitM () ByteString Minio ()_ | A Conduit producer of `ByteString` values |
| `size` | _Int64_ | Provide stream size (optional) |
| `opts` | _PutObjectOptions_ | Optional parameters to provide additional metadata for the object |
__Example__
@ -502,7 +501,7 @@ import qualified Data.Conduit.Combinators as CC
import Prelude
-- | The following example uses MinIO play server at
-- https://play.min.io. The endpoint and associated
-- https://play.min.io:9000. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
@ -534,23 +533,23 @@ __Parameters__
In the expression `fGetObject bucketName objectName inputFile` the parameters
are:
| Param | Type | Description |
|:-------------|:----------------------------|:----------------------------------------------------------------------------|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `objectName` | _Object_ (alias for `Text`) | Name of the object |
| `inputFile` | _FilePath_ | Path to the file to be uploaded |
| `opts` | _GetObjectOptions_ | Options for GET requests specifying additional options like If-Match, Range |
|Param |Type |Description |
|:---|:---| :---|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `objectName` | _Object_ (alias for `Text`) | Name of the object |
| `inputFile` | _FilePath_ | Path to the file to be uploaded |
| `opts` | _GetObjectOptions_ | Options for GET requests specifying additional options like If-Match, Range |
__GetObjectOptions record type__
| Field | Type | Description |
|:-----------------------|:--------------------------------|:------------------------------------------------------------------------------------------------------|
| `gooRange` | `Maybe ByteRanges` | Represents the byte range of object. E.g ByteRangeFromTo 0 9 represents first ten bytes of the object |
| `gooIfMatch` | `Maybe ETag` (alias for `Text`) | (Optional) ETag of object should match |
| `gooIfNoneMatch` | `Maybe ETag` (alias for `Text`) | (Optional) ETag of object shouldn't match |
| `gooIfUnmodifiedSince` | `Maybe UTCTime` | (Optional) Time since object wasn't modified |
| `gooIfModifiedSince` | `Maybe UTCTime` | (Optional) Time since object was modified |
|Field |Type |Description |
|:---|:---| :---|
| `gooRange` | `Maybe ByteRanges` | Represents the byte range of object. E.g ByteRangeFromTo 0 9 represents first ten bytes of the object|
| `gooIfMatch` | `Maybe ETag` (alias for `Text`) | (Optional) ETag of object should match |
| `gooIfNoneMatch` | `Maybe ETag` (alias for `Text`) | (Optional) ETag of object shouldn't match |
| `gooIfUnmodifiedSince` | `Maybe UTCTime` | (Optional) Time since object wasn't modified |
| `gooIfModifiedSince` | `Maybe UTCTime` | (Optional) Time since object was modified |
``` haskell
@ -562,7 +561,7 @@ import Data.Conduit.Binary (sinkLbs)
import Prelude
-- | The following example uses MinIO play server at
-- https://play.min.io. The endpoint and associated
-- https://play.min.io:9000. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
@ -593,11 +592,11 @@ __Parameters__
In the expression `fPutObject bucketName objectName inputFile` the parameters
are:
| Param | Type | Description |
|:-------------|:----------------------------|:--------------------------------|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `objectName` | _Object_ (alias for `Text`) | Name of the object |
| `inputFile` | _FilePath_ | Path to the file to be uploaded |
|Param |Type |Description |
|:---|:---| :---|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `objectName` | _Object_ (alias for `Text`) | Name of the object |
| `inputFile` | _FilePath_ | Path to the file to be uploaded |
__Example__
@ -630,28 +629,28 @@ __Parameters__
In the expression `copyObject dstInfo srcInfo` the parameters
are:
| Param | Type | Description |
|:----------|:------------------|:----------------------------------------------------------|
|Param |Type |Description |
|:---|:---| :---|
| `dstInfo` | _DestinationInfo_ | A value representing properties of the destination object |
| `srcInfo` | _SourceInfo_ | A value representing properties of the source object |
| `srcInfo` | _SourceInfo_ | A value representing properties of the source object |
__SourceInfo record type__
| Field | Type | Description |
|:-----------------------|:-----------------------|:----------------------------------------------------------------------------------------------------------|
| `srcBucket` | `Bucket` | Name of source bucket |
| `srcObject` | `Object` | Name of source object |
| `srcRange` | `Maybe (Int64, Int64)` | (Optional) Represents the byte range of source object. (0, 9) represents first ten bytes of source object |
| `srcIfMatch` | `Maybe Text` | (Optional) ETag source object should match |
| `srcIfNoneMatch` | `Maybe Text` | (Optional) ETag source object shouldn't match |
| `srcIfUnmodifiedSince` | `Maybe UTCTime` | (Optional) Time since source object wasn't modified |
| `srcIfModifiedSince` | `Maybe UTCTime` | (Optional) Time since source object was modified |
|Field |Type |Description |
|:---|:---| :---|
| `srcBucket` | `Bucket` | Name of source bucket |
| `srcObject` | `Object` | Name of source object |
| `srcRange` | `Maybe (Int64, Int64)` | (Optional) Represents the byte range of source object. (0, 9) represents first ten bytes of source object|
| `srcIfMatch` | `Maybe Text` | (Optional) ETag source object should match |
| `srcIfNoneMatch` | `Maybe Text` | (Optional) ETag source object shouldn't match |
| `srcIfUnmodifiedSince` | `Maybe UTCTime` | (Optional) Time since source object wasn't modified |
| `srcIfModifiedSince` | `Maybe UTCTime` | (Optional) Time since source object was modified |
__Destination record type__
| Field | Type | Description |
|:------------|:---------|:-----------------------------------------------------|
|Field |Type |Description |
|:---|:---| :---|
| `dstBucket` | `Bucket` | Name of destination bucket in server-side copyObject |
| `dstObject` | `Object` | Name of destination object in server-side copyObject |
@ -685,10 +684,10 @@ __Parameters__
In the expression `removeObject bucketName objectName` the parameters
are:
| Param | Type | Description |
|:-------------|:----------------------------|:-------------------|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `objectName` | _Object_ (alias for `Text`) | Name of the object |
|Param |Type |Description |
|:---|:---| :---|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `objectName` | _Object_ (alias for `Text`) | Name of the object |
__Example__
@ -719,10 +718,10 @@ __Parameters__
In the expression `removeIncompleteUpload bucketName objectName` the parameters
are:
| Param | Type | Description |
|:-------------|:----------------------------|:-------------------|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `objectName` | _Object_ (alias for `Text`) | Name of the object |
|Param |Type |Description |
|:---|:---| :---|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `objectName` | _Object_ (alias for `Text`) | Name of the object |
__Example__
@ -753,11 +752,11 @@ __Parameters__
In the expression `selectObjectContent bucketName objectName selReq`
the parameters are:
| Param | Type | Description |
|:-------------|:----------------------------|:--------------------------|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `objectName` | _Object_ (alias for `Text`) | Name of the object |
| `selReq` | _SelectRequest_ | Select request parameters |
|Param |Type |Description |
|:---|:---| :---|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `objectName` | _Object_ (alias for `Text`) | Name of the object |
| `selReq` | _SelectRequest_ | Select request parameters |
__SelectRequest record__
@ -805,9 +804,9 @@ __Parameters__
In the expression `bucketExists bucketName` the parameters are:
| Param | Type | Description |
|:-------------|:----------------------------|:-------------------|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
|Param |Type |Description |
|:---|:---| :---|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
## 4. Presigned operations
@ -829,22 +828,22 @@ __Parameters__
In the expression `presignedGetObjectUrl bucketName objectName expiry queryParams headers`
the parameters are:
| Param | Type | Description |
|:--------------|:---------------------------------------------------------------|:------------------------------------------------|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `objectName` | _Object_ (alias for `Text`) | Name of the object |
| `expiry` | _UrlExpiry_ (alias for `Int`) | Url expiry time in seconds |
| `queryParams` | _Query_ (from package `http-types:Network.HTTP.Types`) | Query parameters to add to the URL |
| `headers` | _RequestHeaders_ (from package `http-types:Network.HTTP.Types` | Request headers that would be used with the URL |
|Param |Type |Description |
|:---|:---| :---|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `objectName` | _Object_ (alias for `Text`) | Name of the object |
| `expiry` | _UrlExpiry_ (alias for `Int`) | Url expiry time in seconds |
| `queryParams` | _Query_ (from package `http-types:Network.HTTP.Types`) | Query parameters to add to the URL |
| `headers` | _RequestHeaders_ (from package `http-types:Network.HTTP.Types` | Request headers that would be used with the URL |
__Return Value__
Returns the generated URL - it will include authentication
information.
| Return type | Description |
|:-------------|:------------------------|
| _ByteString_ | Generated presigned URL |
|Return type |Description |
|:---|:---|
| _ByteString_ | Generated presigned URL |
__Example__
@ -887,21 +886,21 @@ __Parameters__
In the expression `presignedPutObjectUrl bucketName objectName expiry headers`
the parameters are:
| Param | Type | Description |
|:-------------|:---------------------------------------------------------------|:------------------------------------------------|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `objectName` | _Object_ (alias for `Text`) | Name of the object |
| `expiry` | _UrlExpiry_ (alias for `Int`) | Url expiry time in seconds |
| `headers` | _RequestHeaders_ (from package `http-types:Network.HTTP.Types` | Request headers that would be used with the URL |
|Param |Type |Description |
|:---|:---| :---|
| `bucketName` | _Bucket_ (alias for `Text`) | Name of the bucket |
| `objectName` | _Object_ (alias for `Text`) | Name of the object |
| `expiry` | _UrlExpiry_ (alias for `Int`) | Url expiry time in seconds |
| `headers` | _RequestHeaders_ (from package `http-types:Network.HTTP.Types` | Request headers that would be used with the URL |
__Return Value__
Returns the generated URL - it will include authentication
information.
| Return type | Description |
|:-------------|:------------------------|
| _ByteString_ | Generated presigned URL |
|Return type |Description |
|:---|:---|
| _ByteString_ | Generated presigned URL |
__Example__
@ -929,7 +928,7 @@ main = do
```
<a name="presignedPostPolicy"></a>
### presignedPostPolicy :: PostPolicy -> Minio (ByteString, HashMap Text ByteString)
### presignedPostPolicy :: PostPolicy -> Minio (ByteString, Map.Map Text ByteString)
Generate a presigned URL and POST policy to upload files via a POST
request. This is intended for browser uploads and generates form data
@ -941,10 +940,10 @@ The `PostPolicy` argument is created using the `newPostPolicy` function:
In the expression `newPostPolicy expirationTime conditions` the parameters are:
| Param | Type | Description |
|:-----------------|:--------------------------------------------------|:---------------------------------------------|
| `expirationTime` | _UTCTime_ (from package `time:Data.Time.UTCTime`) | The expiration time for the policy |
| `conditions` | _[PostPolicyConditions]_ | List of conditions to be added to the policy |
|Param | Type| Description |
|:---|:---|:---|
| `expirationTime` | _UTCTime_ (from package `time:Data.Time.UTCTime`) | The expiration time for the policy |
| `conditions` | _[PostPolicyConditions]_ | List of conditions to be added to the policy |
The policy conditions are created using various helper functions -
please refer to the Haddocks for details.
@ -966,7 +965,7 @@ import Network.Minio
import qualified Data.ByteString as B
import qualified Data.ByteString.Char8 as Char8
import qualified Data.HashMap.Strict as H
import qualified Data.Map.Strict as Map
import qualified Data.Text.Encoding as Enc
import qualified Data.Time as Time
@ -1006,7 +1005,7 @@ main = do
let
formFn (k, v) = B.concat ["-F ", Enc.encodeUtf8 k, "=",
"'", v, "'"]
formOptions = B.intercalate " " $ map formFn $ H.toList formData
formOptions = B.intercalate " " $ map formFn $ Map.toList formData
return $ B.intercalate " " $

View File

@ -1,47 +0,0 @@
--
-- MinIO Haskell SDK, (C) 2023 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Control.Monad.IO.Class (liftIO)
import Network.Minio
import Prelude
main :: IO ()
main = do
-- Use play credentials for example.
let assumeRole =
STSAssumeRole
( CredentialValue
"Q3AM3UQ867SPQQA43P2F"
"zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
Nothing
)
$ defaultSTSAssumeRoleOptions
{ saroLocation = Just "us-east-1",
saroEndpoint = Just "https://play.min.io:9000"
}
-- Retrieve temporary credentials and print them.
cv <- requestSTSCredential assumeRole
print $ "Temporary credentials" ++ show (credentialValueText $ fst cv)
print $ "Expiry" ++ show (snd cv)
-- Configure 'ConnectInfo' to request temporary credentials on demand.
ci <- setSTSCredential assumeRole "https://play.min.io"
res <- runMinio ci $ do
buckets <- listBuckets
liftIO $ print $ "Top 5 buckets: " ++ show (take 5 buckets)
print res

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs
-- stack --resolver lts-11.1 runghc --package minio-hs
--
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
@ -16,17 +16,20 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
{-# Language OverloadedStrings #-}
import Network.Minio
import Control.Monad.IO.Class (liftIO)
import Network.Minio
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- https://play.min.io:9000. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
let bucket = "missingbucket"

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs
-- stack --resolver lts-11.1 runghc --package minio-hs
--
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
@ -16,40 +16,41 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import UnliftIO.Exception (catch, throwIO)
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Control.Monad.Catch (catchIf)
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- https://play.min.io:9000. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
ignoreMinioErr :: ServiceErr -> Minio ()
ignoreMinioErr = return . const ()
main :: IO ()
main = do
let bucket = "test"
let
bucket = "test"
object = "obj"
objectCopy = "obj-copy"
localFile = "/etc/lsb-release"
res1 <- runMinio minioPlayCI $ do
-- 1. Make a bucket; Catch BucketAlreadyOwnedByYou exception.
catch
(makeBucket bucket Nothing)
( \e -> case e of
BucketAlreadyOwnedByYou -> return ()
_ -> throwIO e
)
catchIf (== BucketAlreadyOwnedByYou) (makeBucket bucket Nothing) ignoreMinioErr
-- 2. Upload a file to bucket/object.
fPutObject bucket object localFile defaultPutObjectOptions
fPutObject bucket object localFile
-- 3. Copy bucket/object to bucket/objectCopy.
copyObject
defaultDestinationInfo {dstBucket = bucket, dstObject = objectCopy}
defaultSourceInfo {srcBucket = bucket, srcObject = object}
copyObject def {dstBucket = bucket, dstObject = objectCopy} def { srcBucket = bucket , srcObject = object }
case res1 of
Left e -> putStrLn $ "copyObject failed." ++ show e
Left e -> putStrLn $ "copyObject failed." ++ show e
Right () -> putStrLn "copyObject succeeded."

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs --package optparse-applicative --package filepath
-- stack --resolver lts-11.1 runghc --package minio-hs --package optparse-applicative --package filepath
--
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
@ -16,39 +16,39 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE ScopedTypeVariables #-}
import Data.Text (pack)
import Network.Minio
import Options.Applicative
import System.FilePath.Posix
import UnliftIO (throwIO, try)
import Prelude
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE ScopedTypeVariables #-}
import Network.Minio
import Data.Monoid ((<>))
import Data.Text (pack)
import Options.Applicative
import System.FilePath.Posix
import UnliftIO (throwIO, try)
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- https://play.min.io:9000. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
-- optparse-applicative package based command-line parsing.
fileNameArgs :: Parser FilePath
fileNameArgs =
strArgument
( metavar "FILENAME"
<> help "Name of file to upload to AWS S3 or a MinIO server"
)
fileNameArgs = strArgument
(metavar "FILENAME"
<> help "Name of file to upload to AWS S3 or a MinIO server")
cmdParser :: ParserInfo FilePath
cmdParser =
info
(helper <*> fileNameArgs)
( fullDesc
<> progDesc "FileUploader"
<> header
"FileUploader - a simple file-uploader program using minio-hs"
)
cmdParser = info
(helper <*> fileNameArgs)
(fullDesc
<> progDesc "FileUploader"
<> header
"FileUploader - a simple file-uploader program using minio-hs")
main :: IO ()
main = do
@ -62,13 +62,13 @@ main = do
-- Make a bucket; catch bucket already exists exception if thrown.
bErr <- try $ makeBucket bucket Nothing
case bErr of
Left BucketAlreadyOwnedByYou -> return ()
Left e -> throwIO e
Right _ -> return ()
Left (MErrService BucketAlreadyOwnedByYou) -> return ()
Left e -> throwIO e
Right _ -> return ()
-- Upload filepath to bucket; object is derived from filepath.
fPutObject bucket object filepath defaultPutObjectOptions
fPutObject bucket object filepath def
case res of
Left e -> putStrLn $ "file upload failed due to " ++ show e
Left e -> putStrLn $ "file upload failed due to " ++ (show e)
Right () -> putStrLn "file upload succeeded."

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs
-- stack --resolver lts-11.1 runghc --package minio-hs
--
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
@ -17,14 +17,14 @@
-- limitations under the License.
--
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
main :: IO ()
main = do
res <-
runMinio
minioPlayCI
res <- runMinio def $
getConfig
print res

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs
-- stack --resolver lts-11.1 runghc --package minio-hs
--
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
@ -16,26 +16,31 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import qualified Data.Conduit as C
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import qualified Data.Conduit as C
import qualified Data.Conduit.Binary as CB
import Network.Minio
import Prelude
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- https://play.min.io:9000. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
let bucket = "my-bucket"
let
bucket = "my-bucket"
object = "my-object"
res <- runMinio minioPlayCI $ do
src <- getObject bucket object defaultGetObjectOptions
C.connect (gorObjectStream src) $ CB.sinkFileCautious "/tmp/my-object"
src <- getObject bucket object def
C.connect src $ CB.sinkFileCautious "/tmp/my-object"
case res of
Left e -> putStrLn $ "getObject failed." ++ show e
Left e -> putStrLn $ "getObject failed." ++ (show e)
Right _ -> putStrLn "getObject succeeded."

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs
-- stack --resolver lts-11.1 runghc --package minio-hs
--
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
@ -16,25 +16,28 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.S3API
import Prelude
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.S3API
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- https://play.min.io:9000. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
let bucket = "test"
let
bucket = "test"
object = "passwd"
res <-
runMinio minioPlayCI $
headObject bucket object []
res <- runMinio minioPlayCI $
headObject bucket object
case res of
Left e -> putStrLn $ "headObject failed." ++ show e
Left e -> putStrLn $ "headObject failed." ++ show e
Right objInfo -> putStrLn $ "headObject succeeded." ++ show objInfo

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs
-- stack --resolver lts-11.1 runghc --package minio-hs
--
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
@ -17,21 +17,18 @@
-- limitations under the License.
--
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
main :: IO ()
main = do
res <- runMinio minioPlayCI $
res <- runMinio def $
do
hsr <-
startHeal
Nothing
Nothing
HealOpts
{ hoRecursive = True,
hoDryRun = False
}
hsr <- startHeal Nothing Nothing HealOpts { hoRecursive = True
, hoDryRun = False
}
getHealStatus Nothing Nothing (hsrClientToken hsr)
print res

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs
-- stack --resolver lts-11.1 runghc --package minio-hs
--
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
@ -16,17 +16,19 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Control.Monad.IO.Class (liftIO)
import Network.Minio
import Prelude
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Control.Monad.IO.Class (liftIO)
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- https://play.min.io:9000. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
-- This example list buckets that belongs to the user and returns
-- region of the first bucket returned.

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs
-- stack --resolver lts-11.1 runghc --package minio-hs
--
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
@ -16,36 +16,38 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Conduit
import Network.Minio
import Prelude
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Conduit
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- https://play.min.io:9000. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
let bucket = "test"
let
bucket = "test"
-- Performs a recursive listing of incomplete uploads under bucket "test"
-- on a local minio server.
res <-
runMinio minioPlayCI $
runConduit $
listIncompleteUploads bucket Nothing True .| mapM_C (liftIO . print)
res <- runMinio minioPlayCI $
runConduit $ listIncompleteUploads bucket Nothing True .| mapM_C (\v -> (liftIO $ print v))
print res
{-
Following is the output of the above program on a local MinIO server.
{-
Following is the output of the above program on a local MinIO server.
Right [UploadInfo { uiKey = "go1.6.2.linux-amd64.tar.gz"
, uiUploadId = "063eb592-bdd7-4a0c-be48-34fb3ceb63e2"
, uiInitTime = 2017-03-01 10:16:25.698 UTC
, uiSize = 17731794
}
]
-}
Right [UploadInfo { uiKey = "go1.6.2.linux-amd64.tar.gz"
, uiUploadId = "063eb592-bdd7-4a0c-be48-34fb3ceb63e2"
, uiInitTime = 2017-03-01 10:16:25.698 UTC
, uiSize = 17731794
}
]
-}

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs
-- stack --resolver lts-11.1 runghc --package minio-hs
--
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
@ -16,31 +16,33 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Conduit
import Network.Minio
import Prelude
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Conduit
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- https://play.min.io:9000. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
let bucket = "test"
let
bucket = "test"
-- Performs a recursive listing of all objects under bucket "test"
-- on play.min.io.
res <-
runMinio minioPlayCI $
runConduit $
listObjects bucket Nothing True .| mapM_C (liftIO . print)
res <- runMinio minioPlayCI $
runConduit $ listObjects bucket Nothing True .| mapM_C (\v -> (liftIO $ print v))
print res
{-
Following is the output of the above program on a local MinIO server.
{-
Following is the output of the above program on a local MinIO server.
Right [ObjectInfo {oiObject = "ADVANCED.png", oiModTime = 2017-02-10 05:33:24.816 UTC, oiETag = "\"a69f3af6bbb06fe1d42ac910ec30482f\"", oiSize = 94026},ObjectInfo {oiObject = "obj", oiModTime = 2017-02-10 08:49:26.777 UTC, oiETag = "\"715a872a253a3596652c1490081b4b6a-1\"", oiSize = 15728640}]
-}
Right [ObjectInfo {oiObject = "ADVANCED.png", oiModTime = 2017-02-10 05:33:24.816 UTC, oiETag = "\"a69f3af6bbb06fe1d42ac910ec30482f\"", oiSize = 94026},ObjectInfo {oiObject = "obj", oiModTime = 2017-02-10 08:49:26.777 UTC, oiETag = "\"715a872a253a3596652c1490081b4b6a-1\"", oiSize = 15728640}]
-}

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs
-- stack --resolver lts-11.1 runghc --package minio-hs
--
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
@ -16,21 +16,24 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Prelude
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- https://play.min.io:9000. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
let bucket = "my-bucket"
res <-
runMinio minioPlayCI $
-- N B the region provided for makeBucket is optional.
makeBucket bucket (Just "us-east-1")
res <- runMinio minioPlayCI $
-- N B the region provided for makeBucket is optional.
makeBucket bucket (Just "us-east-1")
print res

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs
-- stack --resolver lts-11.1 runghc --package minio-hs
--
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
@ -16,40 +16,42 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Control.Monad.IO.Class (liftIO)
import qualified Data.ByteString.Char8 as B
import Data.CaseInsensitive (original)
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Control.Monad.IO.Class (liftIO)
import qualified Data.ByteString.Char8 as B
import Data.CaseInsensitive (original)
import qualified Data.Conduit.Combinators as CC
import qualified Data.Text.Encoding as E
import Network.Minio
import qualified Data.Text.Encoding as E
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- https://play.min.io:9000. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
let bucket = "my-bucket"
object = "my-object"
kb15 = 15 * 1024
-- Set query parameter to modify content disposition response
-- header
queryParam =
[ ( "response-content-disposition",
Just "attachment; filename=\"your-filename.txt\""
)
]
let
bucket = "my-bucket"
object = "my-object"
kb15 = 15*1024
-- Set query parameter to modify content disposition response
-- header
queryParam = [("response-content-disposition",
Just "attachment; filename=\"your-filename.txt\"")]
res <- runMinio minioPlayCI $ do
liftIO $ B.putStrLn "Upload a file that we will fetch with a presigned URL..."
putObject bucket object (CC.repeat "a") (Just kb15) defaultPutObjectOptions
liftIO $ putStrLn "Done. Object created at: my-bucket/my-object"
putObject bucket object (CC.repeat "a") (Just kb15) def
liftIO $ putStrLn $ "Done. Object created at: my-bucket/my-object"
-- Extract Etag of uploaded object
oi <- statObject bucket object defaultGetObjectOptions
oi <- statObject bucket object
let etag = oiETag oi
-- Set header to add an if-match constraint - this makes sure
@ -59,29 +61,23 @@ main = do
-- Generate a URL with 7 days expiry time - note that the headers
-- used above must be added to the request with the signed URL
-- generated.
url <-
presignedGetObjectUrl
"my-bucket"
"my-object"
(7 * 24 * 3600)
queryParam
headers
url <- presignedGetObjectUrl "my-bucket" "my-object" (7*24*3600)
queryParam headers
return (headers, etag, url)
case res of
Left e -> putStrLn $ "presignedPutObject URL failed." ++ show e
Right (headers, _, url) -> do
Right (headers, etag, url) -> do
-- We generate a curl command to demonstrate usage of the signed
-- URL.
let hdrOpt (k, v) = B.concat ["-H '", original k, ": ", v, "'"]
curlCmd =
B.intercalate " " $
["curl --fail"]
++ map hdrOpt headers
++ ["-o /tmp/myfile", B.concat ["'", url, "'"]]
let
hdrOpt (k, v) = B.concat ["-H '", original k, ": ", v, "'"]
curlCmd = B.intercalate " " $
["curl --fail"] ++ map hdrOpt headers ++
["-o /tmp/myfile", B.concat ["'", url, "'"]]
putStrLn $
"The following curl command would use the presigned "
++ "URL to fetch the object and write it to \"/tmp/myfile\":"
putStrLn $ "The following curl command would use the presigned " ++
"URL to fetch the object and write it to \"/tmp/myfile\":"
B.putStrLn curlCmd

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs
-- stack --resolver lts-11.1 runghc --package minio-hs
--
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
@ -16,72 +16,69 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import qualified Data.ByteString as B
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import qualified Data.ByteString as B
import qualified Data.ByteString.Char8 as Char8
import qualified Data.HashMap.Strict as H
import qualified Data.Text.Encoding as Enc
import qualified Data.Time as Time
import Network.Minio
import qualified Data.Map.Strict as Map
import qualified Data.Text.Encoding as Enc
import qualified Data.Time as Time
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- https://play.min.io:9000. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
now <- Time.getCurrentTime
let bucket = "my-bucket"
object = "photos/my-object"
-- set an expiration time of 10 days
expireTime = Time.addUTCTime (3600 * 24 * 10) now
-- create a policy with expiration time and conditions - since the
-- conditions are validated, newPostPolicy returns an Either value
policyE =
newPostPolicy
expireTime
[ -- set the object name condition
ppCondKey object,
-- set the bucket name condition
ppCondBucket bucket,
-- set the size range of object as 1B to 10MiB
ppCondContentLengthRange 1 (10 * 1024 * 1024),
-- set content type as jpg image
ppCondContentType "image/jpeg",
-- on success set the server response code to 200
ppCondSuccessActionStatus 200
]
let
bucket = "my-bucket"
object = "my-object"
-- set an expiration time of 10 days
expireTime = Time.addUTCTime (3600 * 24 * 10) now
-- create a policy with expiration time and conditions - since the
-- conditions are validated, newPostPolicy returns an Either value
policyE = newPostPolicy expireTime
[ -- set the object name condition
ppCondKey "photos/my-object"
-- set the bucket name condition
, ppCondBucket "my-bucket"
-- set the size range of object as 1B to 10MiB
, ppCondContentLengthRange 1 (10*1024*1024)
-- set content type as jpg image
, ppCondContentType "image/jpeg"
-- on success set the server response code to 200
, ppCondSuccessActionStatus 200
]
case policyE of
Left err -> print err
Left err -> putStrLn $ show err
Right policy -> do
res <- runMinio minioPlayCI $ do
(url, formData) <- presignedPostPolicy policy
-- a curl command is output to demonstrate using the generated
-- URL and form-data
let formFn (k, v) =
B.concat
[ "-F ",
Enc.encodeUtf8 k,
"=",
"'",
v,
"'"
]
formOptions = B.intercalate " " $ map formFn $ H.toList formData
let
formFn (k, v) = B.concat ["-F ", Enc.encodeUtf8 k, "=",
"'", v, "'"]
formOptions = B.intercalate " " $ map formFn $ Map.toList formData
return $
B.intercalate
" "
["curl", formOptions, "-F file=@/tmp/photo.jpg", url]
return $ B.intercalate " " $
["curl", formOptions, "-F file=@/tmp/photo.jpg", url]
case res of
Left e -> putStrLn $ "post-policy error: " ++ show e
Left e -> putStrLn $ "post-policy error: " ++ (show e)
Right cmd -> do
putStrLn "Put a photo at /tmp/photo.jpg and run command:\n"
putStrLn $ "Put a photo at /tmp/photo.jpg and run command:\n"
-- print the generated curl command
Char8.putStrLn cmd

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs
-- stack --resolver lts-11.1 runghc --package minio-hs
--
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
@ -16,43 +16,44 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import qualified Data.ByteString.Char8 as B
import Data.CaseInsensitive (original)
import Network.Minio
import Data.CaseInsensitive (original)
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- https://play.min.io:9000. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
let -- Use headers to set user-metadata - note that this header will
-- need to be set when the URL is used to make an upload.
headers =
[ ( "x-amz-meta-url-creator",
"minio-hs-presigned-put-example"
)
]
let
-- Use headers to set user-metadata - note that this header will
-- need to be set when the URL is used to make an upload.
headers = [("x-amz-meta-url-creator",
"minio-hs-presigned-put-example")]
res <- runMinio minioPlayCI $ do
-- generate a URL with 7 days expiry time
presignedPutObjectUrl "my-bucket" "my-object" (7 * 24 * 3600) headers
presignedPutObjectUrl "my-bucket" "my-object" (7*24*3600) headers
case res of
Left e -> putStrLn $ "presignedPutObject URL failed." ++ show e
Right url -> do
-- We generate a curl command to demonstrate usage of the signed
-- URL.
let hdrOpt (k, v) = B.concat ["-H '", original k, ": ", v, "'"]
curlCmd =
B.intercalate " " $
["curl "]
++ map hdrOpt headers
++ ["-T /tmp/myfile", B.concat ["'", url, "'"]]
let
hdrOpt (k, v) = B.concat ["-H '", original k, ": ", v, "'"]
curlCmd = B.intercalate " " $
["curl "] ++ map hdrOpt headers ++
["-T /tmp/myfile", B.concat ["'", url, "'"]]
putStrLn $
"The following curl command would use the presigned "
++ "URL to upload the file at \"/tmp/myfile\":"
putStrLn $ "The following curl command would use the presigned " ++
"URL to upload the file at \"/tmp/myfile\":"
B.putStrLn curlCmd

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs
-- stack --resolver lts-11.1 runghc --package minio-hs
--
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
@ -16,36 +16,39 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import qualified Data.Conduit.Combinators as CC
import Network.Minio
import Prelude
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- https://play.min.io:9000. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
let bucket = "test"
let
bucket = "test"
object = "obj"
localFile = "/etc/lsb-release"
kb15 = 15 * 1024
-- Eg 1. Upload a stream of repeating "a" using putObject with default options.
res1 <-
runMinio minioPlayCI $
putObject bucket object (CC.repeat "a") (Just kb15) defaultPutObjectOptions
res1 <- runMinio minioPlayCI $
putObject bucket object (CC.repeat "a") (Just kb15) def
case res1 of
Left e -> putStrLn $ "putObject failed." ++ show e
Left e -> putStrLn $ "putObject failed." ++ show e
Right () -> putStrLn "putObject succeeded."
-- Eg 2. Upload a file using fPutObject with default options.
res2 <-
runMinio minioPlayCI $
fPutObject bucket object localFile defaultPutObjectOptions
res2 <- runMinio minioPlayCI $
fPutObject bucket object localFile def
case res2 of
Left e -> putStrLn $ "fPutObject failed." ++ show e
Left e -> putStrLn $ "fPutObject failed." ++ show e
Right () -> putStrLn "fPutObject succeeded."

View File

@ -1,17 +0,0 @@
# Examples
The examples in this directory illustrate usage of various APIs provided by this library. Each file is self-contained and can be run like a script directly.
To build the examples, the build flag `examples` needs to be turned on:
```sh
stack build --flag minio-hs:examples
```
Now to run and example script [BucketExists.hs](https://github.com/minio/minio-hs/blob/master/examples/BucketExists.hs):
```sh
stack exec BucketExists
```
The CI system is configured to build these examples with every change, so they should be current.

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs
-- stack --resolver lts-11.1 runghc --package minio-hs
--
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
@ -16,18 +16,23 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Prelude
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- https://play.min.io:9000. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
let bucket = "my-bucket"
let
bucket = "my-bucket"
res <- runMinio minioPlayCI $ removeBucket bucket
print res

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs
-- stack --resolver lts-11.1 runghc --package minio-hs
--
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
@ -16,24 +16,27 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Prelude
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Prelude
-- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated
-- https://play.min.io:9000. The endpoint and associated
-- credentials are provided via the libary constant,
--
-- > minioPlayCI :: ConnectInfo
--
main :: IO ()
main = do
let bucket = "mybucket"
object = "myobject"
let
bucket = "mybucket"
object = "myobject"
res <-
runMinio minioPlayCI $
removeIncompleteUpload bucket object
res <- runMinio minioPlayCI $
removeIncompleteUpload bucket object
case res of
Left _ -> putStrLn $ "Failed to remove " ++ show bucket ++ "/" ++ show object

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs
-- stack --resolver lts-11.1 runghc --package minio-hs
--
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
@ -16,19 +16,20 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Prelude
import Network.Minio
import Prelude
main :: IO ()
main = do
let bucket = "mybucket"
object = "myobject"
let
bucket = "mybucket"
object = "myobject"
res <-
runMinio minioPlayCI $
removeObject bucket object
res <- runMinio minioPlayCI $
removeObject bucket object
case res of
Left _ -> putStrLn $ "Failed to remove " ++ show bucket ++ "/" ++ show object

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs
-- stack --resolver lts-13.1 runghc --package minio-hs
--
-- MinIO Haskell SDK, (C) 2019 MinIO, Inc.
@ -16,32 +16,35 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import qualified Conduit as C
import Control.Monad (unless)
import Network.Minio
import Prelude
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import qualified Conduit as C
import Control.Monad (when)
import qualified Data.ByteString.Lazy as LB
import Prelude
main :: IO ()
main = do
let bucket = "selectbucket"
object = "1.csv"
content =
"Name,Place,Temperature\n"
<> "James,San Jose,76\n"
<> "Alicia,San Leandro,88\n"
<> "Mark,San Carlos,90\n"
let bucket = "selectbucket"
object = "1.csv"
content = "Name,Place,Temperature\n"
<> "James,San Jose,76\n"
<> "Alicia,San Leandro,88\n"
<> "Mark,San Carlos,90\n"
res <- runMinio minioPlayCI $ do
exists <- bucketExists bucket
unless exists $
makeBucket bucket Nothing
res <- runMinio minioPlayCI $ do
C.liftIO $ putStrLn "Uploading csv object"
putObject bucket object (C.sourceLazy content) Nothing defaultPutObjectOptions
exists <- bucketExists bucket
when (not exists) $
makeBucket bucket Nothing
let sr = selectRequest "Select * from s3object" defaultCsvInput defaultCsvOutput
res <- selectObjectContent bucket object sr
C.runConduit $ res C..| getPayloadBytes C..| C.stdoutC
print res
C.liftIO $ putStrLn "Uploading csv object"
putObject bucket object (C.sourceLazy content) Nothing defaultPutObjectOptions
let sr = selectRequest "Select * from s3object" defaultCsvInput defaultCsvOutput
res <- selectObjectContent bucket object sr
C.runConduit $ res C..| getPayloadBytes C..| C.stdoutC
print res

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs
-- stack --resolver lts-11.1 runghc --package minio-hs
--
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
@ -17,14 +17,14 @@
-- limitations under the License.
--
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
main :: IO ()
main = do
res <-
runMinio
minioPlayCI
getServerInfo
res <- runMinio def $
getServerInfo
print res

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs
-- stack --resolver lts-11.1 runghc --package minio-hs
--
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
@ -17,13 +17,14 @@
-- limitations under the License.
--
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
main :: IO ()
main = do
res <-
runMinio minioPlayCI $
serviceSendAction ServiceActionRestart
res <- runMinio def $
serviceSendAction ServiceActionRestart
print res

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs
-- stack --resolver lts-11.1 runghc --package minio-hs
--
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
@ -17,13 +17,14 @@
-- limitations under the License.
--
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
main :: IO ()
main = do
res <-
runMinio minioPlayCI $
serviceSendAction ServiceActionStop
res <- runMinio def $
serviceSendAction ServiceActionStop
print res

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs
-- stack --resolver lts-11.1 runghc --package minio-hs
--
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
@ -17,14 +17,14 @@
-- limitations under the License.
--
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
main :: IO ()
main = do
res <-
runMinio
minioPlayCI
serviceStatus
res <- runMinio def $
serviceStatus
print res

View File

@ -1,5 +1,5 @@
#!/usr/bin/env stack
-- stack --resolver lts-14.11 runghc --package minio-hs
-- stack --resolver lts-11.1 runghc --package minio-hs
--
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
@ -16,15 +16,16 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Network.Minio.AdminAPI
import Prelude
main :: IO ()
main = do
res <- runMinio minioPlayCI $
res <- runMinio def $
do
let config = "{\"version\":\"25\",\"credential\":{\"accessKey\":\"minio\",\"secretKey\":\"minio123\"},\"region\":\"\",\"browser\":\"on\",\"worm\":\"off\",\"domain\":\"\",\"storageclass\":{\"standard\":\"\",\"rrs\":\"\"},\"cache\":{\"drives\":[],\"expiry\":90,\"exclude\":[]},\"notify\":{\"amqp\":{\"2\":{\"enable\":false,\"url\":\"amqp://guest:guest@localhost:5672/\",\"exchange\":\"minio\",\"routingKey\":\"minio\",\"exchangeType\":\"direct\",\"deliveryMode\":0,\"mandatory\":false,\"immediate\":false,\"durable\":false,\"internal\":false,\"noWait\":false,\"autoDeleted\":false}},\"elasticsearch\":{\"1\":{\"enable\":false,\"format\":\"namespace\",\"url\":\"http://localhost:9200\",\"index\":\"minio_events\"}},\"kafka\":{\"1\":{\"enable\":false,\"brokers\":null,\"topic\":\"\"}},\"mqtt\":{\"1\":{\"enable\":false,\"broker\":\"\",\"topic\":\"\",\"qos\":0,\"clientId\":\"\",\"username\":\"\",\"password\":\"\",\"reconnectInterval\":0,\"keepAliveInterval\":0}},\"mysql\":{\"1\":{\"enable\":false,\"format\":\"namespace\",\"dsnString\":\"\",\"table\":\"\",\"host\":\"\",\"port\":\"\",\"user\":\"\",\"password\":\"\",\"database\":\"\"}},\"nats\":{\"1\":{\"enable\":false,\"address\":\"\",\"subject\":\"\",\"username\":\"\",\"password\":\"\",\"token\":\"\",\"secure\":false,\"pingInterval\":0,\"streaming\":{\"enable\":false,\"clusterID\":\"\",\"clientID\":\"\",\"async\":false,\"maxPubAcksInflight\":0}}},\"postgresql\":{\"1\":{\"enable\":false,\"format\":\"namespace\",\"connectionString\":\"\",\"table\":\"\",\"host\":\"\",\"port\":\"\",\"user\":\"\",\"password\":\"\",\"database\":\"\"}},\"redis\":{\"test1\":{\"enable\":true,\"format\":\"namespace\",\"address\":\"127.0.0.1:6379\",\"password\":\"\",\"key\":\"bucketevents_ns\"},\"test2\":{\"enable\":true,\"format\":\"access\",\"address\":\"127.0.0.1:6379\",\"password\":\"\",\"key\":\"bucketevents_log\"}},\"webhook\":{\"1\":{\"enable\":true,\"endpoint\":\"http://localhost:3000\"},\"2\":{\"enable\":true,\"endpoint\":\"http://localhost:3001\"}}}}"
setConfig config

View File

@ -1,6 +1,5 @@
cabal-version: 2.4
name: minio-hs
version: 1.7.0
version: 1.3.0
synopsis: A MinIO Haskell Library for Amazon S3 compatible cloud
storage.
description: The MinIO Haskell client library provides simple APIs to
@ -14,70 +13,22 @@ maintainer: dev@min.io
category: Network, AWS, Object Storage
build-type: Simple
stability: Experimental
extra-doc-files:
extra-source-files:
CHANGELOG.md
CONTRIBUTING.md
docs/API.md
README.md
extra-source-files:
examples/*.hs
README.md
stack.yaml
tested-with: GHC == 8.10.7
, GHC == 9.0.2
, GHC == 9.2.8
, GHC == 9.4.8
, GHC == 9.6.5
, GHC == 9.8.2
source-repository head
type: git
location: https://github.com/minio/minio-hs.git
cabal-version: >=1.10
Flag dev
Description: Build package in development mode
Default: False
Manual: True
common base-settings
library
hs-source-dirs: src
ghc-options: -Wall
-Wcompat
-Widentities
-Wincomplete-uni-patterns
-Wincomplete-record-updates
-haddock
if impl(ghc >= 8.0)
ghc-options: -Wredundant-constraints
if impl(ghc >= 8.2)
ghc-options: -fhide-source-paths
if impl(ghc >= 8.4)
ghc-options: -Wpartial-fields
-- -Wmissing-export-lists
if impl(ghc >= 8.8)
ghc-options: -Wmissing-deriving-strategies
-Werror=missing-deriving-strategies
-- if impl(ghc >= 8.10)
-- ghc-options: -Wunused-packages -- disabled due to bug related to mixin config
if impl(ghc >= 9.0)
ghc-options: -Winvalid-haddock
if impl(ghc >= 9.2)
ghc-options: -Wredundant-bang-patterns
if flag(dev)
ghc-options: -Werror
default-language: Haskell2010
default-extensions: BangPatterns
, DerivingStrategies
, FlexibleContexts
, FlexibleInstances
, LambdaCase
, MultiParamTypeClasses
, MultiWayIf
, OverloadedStrings
, RankNTypes
, ScopedTypeVariables
, TupleSections
exposed-modules: Network.Minio
, Network.Minio.AdminAPI
, Network.Minio.S3API
other-modules: Lib.Prelude
, Network.Minio.API
, Network.Minio.APICommon
@ -95,258 +46,223 @@ common base-settings
, Network.Minio.Utils
, Network.Minio.XmlGenerator
, Network.Minio.XmlParser
, Network.Minio.XmlCommon
, Network.Minio.JsonParser
, Network.Minio.Credentials.Types
, Network.Minio.Credentials.AssumeRole
, Network.Minio.Credentials
mixins: base hiding (Prelude)
, relude (Relude as Prelude)
, relude
build-depends: base >= 4.7 && < 5
, relude >= 0.7 && < 2
, aeson >= 1.2 && < 3
, protolude >= 0.2 && < 0.3
, aeson >= 1.2
, base64-bytestring >= 1.0
, binary >= 0.8.5.0
, bytestring >= 0.10
, case-insensitive >= 1.2
, conduit >= 1.3
, conduit-extra >= 1.3
, crypton-connection
, containers >= 0.5
, cryptonite >= 0.25
, cryptonite-conduit >= 0.2
, digest >= 0.0.1
, directory
, exceptions
, filepath >= 1.4
, http-client >= 0.5
, http-client-tls
, http-conduit >= 2.3
, http-types >= 0.12
, ini
, memory >= 0.14
, network-uri
, raw-strings-qq >= 1
, resourcet >= 1.2
, retry
, text >= 1.2
, time >= 1.9
, time-units ^>= 1.0.0
, time >= 1.8
, transformers >= 0.5
, unliftio >= 0.2 && < 0.3
, unliftio-core >= 0.2 && < 0.3
, unliftio >= 0.2
, unliftio-core >= 0.1
, unordered-containers >= 0.2
, xml-conduit >= 1.8
library
import: base-settings
hs-source-dirs: src
exposed-modules: Network.Minio
, Network.Minio.AdminAPI
, Network.Minio.S3API
default-language: Haskell2010
default-extensions: BangPatterns
, FlexibleContexts
, FlexibleInstances
, MultiParamTypeClasses
, MultiWayIf
, NoImplicitPrelude
, OverloadedStrings
, RankNTypes
, ScopedTypeVariables
, TypeFamilies
, TupleSections
Flag live-test
Description: Build the test suite that runs against a live MinIO server
Default: False
Default: True
Manual: True
test-suite minio-hs-live-server-test
import: base-settings
type: exitcode-stdio-1.0
hs-source-dirs: test, src
main-is: LiveServer.hs
ghc-options: -Wall -threaded -rtsopts -with-rtsopts=-N
other-modules: Network.Minio
, Network.Minio.S3API
, Network.Minio.AdminAPI
, Network.Minio.API.Test
, Network.Minio.JsonParser.Test
, Network.Minio.TestHelpers
, Network.Minio.Utils.Test
, Network.Minio.XmlGenerator.Test
, Network.Minio.XmlParser.Test
, Network.Minio.Credentials
build-depends: minio-hs
, raw-strings-qq
, tasty
, tasty-hunit
, tasty-quickcheck
, tasty-smallcheck
, QuickCheck
if !flag(live-test)
buildable: False
test-suite minio-hs-test
import: base-settings
type: exitcode-stdio-1.0
hs-source-dirs: test, src
main-is: Spec.hs
build-depends: minio-hs
, raw-strings-qq
, QuickCheck
, tasty
, tasty-hunit
, tasty-quickcheck
, tasty-smallcheck
ghc-options: -Wall -threaded -rtsopts -with-rtsopts=-N
default-language: Haskell2010
default-extensions: BangPatterns
, FlexibleContexts
, FlexibleInstances
, MultiParamTypeClasses
, MultiWayIf
, NoImplicitPrelude
, OverloadedStrings
, RankNTypes
, ScopedTypeVariables
, TupleSections
, TypeFamilies
other-modules: Lib.Prelude
, Network.Minio
, Network.Minio.S3API
, Network.Minio.AdminAPI
, Network.Minio.API
, Network.Minio.APICommon
, Network.Minio.CopyObject
, Network.Minio.Data
, Network.Minio.Data.ByteString
, Network.Minio.Data.Crypto
, Network.Minio.Data.Time
, Network.Minio.Errors
, Network.Minio.ListOps
, Network.Minio.PresignedOperations
, Network.Minio.PutObject
, Network.Minio.S3API
, Network.Minio.SelectAPI
, Network.Minio.Sign.V4
, Network.Minio.TestHelpers
, Network.Minio.API.Test
, Network.Minio.JsonParser.Test
, Network.Minio.Utils
, Network.Minio.Utils.Test
, Network.Minio.API.Test
, Network.Minio.XmlGenerator
, Network.Minio.XmlGenerator.Test
, Network.Minio.XmlParser
, Network.Minio.XmlParser.Test
, Network.Minio.Credentials
Flag examples
Description: Build the examples
Default: False
Manual: True
common examples-settings
ghc-options: -Wall -threaded -rtsopts -with-rtsopts=-N
default-language: Haskell2010
default-extensions: OverloadedStrings
, Network.Minio.JsonParser
, Network.Minio.JsonParser.Test
build-depends: base >= 4.7 && < 5
, minio-hs
, protolude >= 0.1.6
, aeson
, base64-bytestring
, binary
, bytestring
, case-insensitive
, conduit
, conduit-extra
, containers
, cryptonite
, cryptonite-conduit
, digest
, directory
, exceptions
, filepath
, minio-hs
, optparse-applicative
, http-client
, http-conduit
, http-types
, ini
, memory
, QuickCheck
, raw-strings-qq >= 1
, resourcet
, retry
, tasty
, tasty-hunit
, tasty-quickcheck
, tasty-smallcheck
, temporary
, text
, time
, transformers
, unliftio
, unliftio-core
, unordered-containers
hs-source-dirs: examples
if !flag(examples)
, xml-conduit
if !flag(live-test)
buildable: False
executable BucketExists
import: examples-settings
scope: private
main-is: BucketExists.hs
test-suite minio-hs-test
type: exitcode-stdio-1.0
hs-source-dirs: test, src
main-is: Spec.hs
build-depends: base >= 4.7 && < 5
, minio-hs
, protolude >= 0.1.6
, aeson
, base64-bytestring
, binary
, bytestring
, case-insensitive
, conduit
, conduit-extra
, containers
, cryptonite
, cryptonite-conduit
, filepath
, digest
, directory
, exceptions
, http-client
, http-conduit
, http-types
, ini
, memory
, QuickCheck
, raw-strings-qq >= 1
, resourcet
, retry
, tasty
, tasty-hunit
, tasty-quickcheck
, tasty-smallcheck
, temporary
, text
, time
, transformers
, unliftio
, unliftio-core
, unordered-containers
, xml-conduit
ghc-options: -Wall -threaded -rtsopts -with-rtsopts=-N
default-language: Haskell2010
default-extensions: BangPatterns
, FlexibleContexts
, FlexibleInstances
, MultiParamTypeClasses
, MultiWayIf
, NoImplicitPrelude
, OverloadedStrings
, RankNTypes
, ScopedTypeVariables
, TupleSections
, TypeFamilies
other-modules: Lib.Prelude
, Network.Minio
, Network.Minio.AdminAPI
, Network.Minio.API
, Network.Minio.APICommon
, Network.Minio.Data
, Network.Minio.Data.ByteString
, Network.Minio.Data.Crypto
, Network.Minio.Data.Time
, Network.Minio.CopyObject
, Network.Minio.Errors
, Network.Minio.ListOps
, Network.Minio.PresignedOperations
, Network.Minio.PutObject
, Network.Minio.S3API
, Network.Minio.SelectAPI
, Network.Minio.Sign.V4
, Network.Minio.TestHelpers
, Network.Minio.Utils
, Network.Minio.Utils.Test
, Network.Minio.API.Test
, Network.Minio.XmlGenerator
, Network.Minio.XmlGenerator.Test
, Network.Minio.XmlParser
, Network.Minio.XmlParser.Test
, Network.Minio.JsonParser
, Network.Minio.JsonParser.Test
executable CopyObject
import: examples-settings
scope: private
main-is: CopyObject.hs
executable FileUploader
import: examples-settings
scope: private
main-is: FileUploader.hs
executable GetConfig
import: examples-settings
scope: private
main-is: GetConfig.hs
executable GetObject
import: examples-settings
scope: private
main-is: GetObject.hs
executable HeadObject
import: examples-settings
scope: private
main-is: HeadObject.hs
executable Heal
import: examples-settings
scope: private
main-is: Heal.hs
executable ListBuckets
import: examples-settings
scope: private
main-is: ListBuckets.hs
executable ListIncompleteUploads
import: examples-settings
scope: private
main-is: ListIncompleteUploads.hs
executable ListObjects
import: examples-settings
scope: private
main-is: ListObjects.hs
executable MakeBucket
import: examples-settings
scope: private
main-is: MakeBucket.hs
executable PresignedGetObject
import: examples-settings
scope: private
main-is: PresignedGetObject.hs
executable PresignedPostPolicy
import: examples-settings
scope: private
main-is: PresignedPostPolicy.hs
executable PresignedPutObject
import: examples-settings
scope: private
main-is: PresignedPutObject.hs
executable PutObject
import: examples-settings
scope: private
main-is: PutObject.hs
executable RemoveBucket
import: examples-settings
scope: private
main-is: RemoveBucket.hs
executable RemoveIncompleteUpload
import: examples-settings
scope: private
main-is: RemoveIncompleteUpload.hs
executable RemoveObject
import: examples-settings
scope: private
main-is: RemoveObject.hs
executable SelectObject
import: examples-settings
scope: private
main-is: SelectObject.hs
executable ServerInfo
import: examples-settings
scope: private
main-is: ServerInfo.hs
executable ServiceSendRestart
import: examples-settings
scope: private
main-is: ServiceSendRestart.hs
executable ServiceSendStop
import: examples-settings
scope: private
main-is: ServiceSendStop.hs
executable ServiceStatus
import: examples-settings
scope: private
main-is: ServiceStatus.hs
executable SetConfig
import: examples-settings
scope: private
main-is: SetConfig.hs
executable AssumeRole
import: examples-settings
scope: private
main-is: AssumeRole.hs
source-repository head
type: git
location: https://github.com/minio/minio-hs

View File

@ -15,41 +15,19 @@
--
module Lib.Prelude
( module Exports,
both,
showBS,
toStrictBS,
fromStrictBS,
lastMay,
)
where
( module Exports
, both
) where
import Control.Monad.Trans.Maybe as Exports (MaybeT (..), runMaybeT)
import qualified Data.ByteString.Lazy as LB
import Data.Time as Exports
( UTCTime (..),
diffUTCTime,
)
import UnliftIO as Exports
( Handler,
catch,
catches,
throwIO,
try,
)
import Protolude as Exports hiding (catch, catches,
throwIO, try)
import Control.Monad.Trans.Maybe as Exports (MaybeT (..), runMaybeT)
import Data.Time as Exports (UTCTime (..),
diffUTCTime)
import UnliftIO as Exports (catch, catches, throwIO,
try)
-- | Apply a function on both elements of a pair
both :: (a -> b) -> (a, a) -> (b, b)
both f (a, b) = (f a, f b)
showBS :: (Show a) => a -> ByteString
showBS a = encodeUtf8 (show a :: Text)
toStrictBS :: LByteString -> ByteString
toStrictBS = LB.toStrict
fromStrictBS :: ByteString -> LByteString
fromStrictBS = LB.fromStrict
lastMay :: [a] -> Maybe a
lastMay a = last <$> nonEmpty a

View File

@ -1,5 +1,5 @@
--
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
@ -16,237 +16,216 @@
-- |
-- Module: Network.Minio
-- Copyright: (c) 2017-2023 MinIO Dev Team
-- Copyright: (c) 2017-2019 MinIO Dev Team
-- License: Apache 2.0
-- Maintainer: MinIO Dev Team <dev@min.io>
--
-- Types and functions to conveniently access S3 compatible object
-- storage servers like MinIO.
-- Types and functions to access S3 compatible object storage servers
-- like MinIO.
module Network.Minio
( -- * Credentials
CredentialValue (..),
credentialValueText,
AccessKey (..),
SecretKey (..),
SessionToken (..),
(
-- * Credentials
Credentials (..)
, Provider
, fromAWSConfigFile
, fromAWSEnv
, fromMinioEnv
, findFirst
-- ** Credential Loaders
-- * Connecting to object storage
---------------------------------
, ConnectInfo
, setRegion
, setCreds
, setCredsFrom
, MinioConn
, mkMinioConn
-- | Run actions that retrieve 'CredentialValue's from the environment or
-- files or other custom sources.
CredentialLoader,
fromAWSConfigFile,
fromAWSEnv,
fromMinioEnv,
findFirst,
-- ** Connection helpers
------------------------
, minioPlayCI
, awsCI
, gcsCI
-- * Connecting to object storage
ConnectInfo,
setRegion,
setCreds,
setCredsFrom,
isConnectInfoSecure,
disableTLSCertValidation,
MinioConn,
mkMinioConn,
-- ** Connection helpers
-- * Minio Monad
----------------
-- | The Minio Monad provides connection-reuse, bucket-location
-- caching, resource management and simpler error handling
-- functionality. All actions on object storage are performed within
-- this Monad.
-- | These are helpers to construct 'ConnectInfo' values for common
-- cases.
minioPlayCI,
awsCI,
gcsCI,
, Minio
, runMinioWith
, runMinio
-- ** STS Credential types
STSAssumeRole (..),
STSAssumeRoleOptions (..),
defaultSTSAssumeRoleOptions,
requestSTSCredential,
setSTSCredential,
ExpiryTime (..),
STSCredentialProvider,
-- * Minio Monad
-- * Bucket Operations
----------------------
----------------
-- ** Creation, removal and querying
, Bucket
, makeBucket
, removeBucket
, bucketExists
, Region
, getLocation
-- | The Minio Monad provides connection-reuse, bucket-location
-- caching, resource management and simpler error handling
-- functionality. All actions on object storage are performed within
-- this Monad.
Minio,
runMinioWith,
runMinio,
runMinioResWith,
runMinioRes,
-- ** Listing
, BucketInfo(..)
, listBuckets
-- * Bucket Operations
-- ** Object info type represents object metadata information.
, ObjectInfo
, oiObject
, oiModTime
, oiETag
, oiSize
, oiMetadata
-- ** Creation, removal and querying
Bucket,
makeBucket,
removeBucket,
bucketExists,
Region,
getLocation,
, listObjects
, listObjectsV1
-- ** Listing buckets
BucketInfo (..),
listBuckets,
, UploadId
, UploadInfo(..)
, listIncompleteUploads
, ObjectPartInfo(..)
, listIncompleteParts
-- ** Listing objects
listObjects,
listObjectsV1,
ListItem (..),
ObjectInfo,
oiObject,
oiModTime,
oiETag,
oiSize,
oiUserMetadata,
oiMetadata,
-- ** Bucket Notifications
, Notification(..)
, defaultNotification
, NotificationConfig(..)
, Arn
, Event(..)
, Filter(..)
, defaultFilter
, FilterKey(..)
, defaultFilterKey
, FilterRules(..)
, defaultFilterRules
, FilterRule(..)
, getBucketNotification
, putBucketNotification
, removeAllBucketNotification
-- ** Listing incomplete uploads
listIncompleteUploads,
UploadId,
UploadInfo (..),
listIncompleteParts,
ObjectPartInfo (..),
-- * Object Operations
----------------------
, Object
-- ** Bucket Notifications
getBucketNotification,
putBucketNotification,
removeAllBucketNotification,
Notification (..),
defaultNotification,
NotificationConfig (..),
Arn,
Event (..),
Filter (..),
defaultFilter,
FilterKey (..),
defaultFilterKey,
FilterRules (..),
defaultFilterRules,
FilterRule (..),
-- ** File operations
, fGetObject
, fPutObject
-- * Object Operations
Object,
-- ** Conduit-based streaming operations
, putObject
, PutObjectOptions
, defaultPutObjectOptions
, pooContentType
, pooContentEncoding
, pooContentDisposition
, pooContentLanguage
, pooCacheControl
, pooStorageClass
, pooUserMetadata
, pooNumThreads
, pooSSE
-- ** File-based operations
fGetObject,
fPutObject,
, getObject
, GetObjectOptions
, defaultGetObjectOptions
, gooRange
, gooIfMatch
, gooIfNoneMatch
, gooIfModifiedSince
, gooIfUnmodifiedSince
, gooSSECKey
-- ** Conduit-based streaming operations
putObject,
PutObjectOptions,
defaultPutObjectOptions,
pooContentType,
pooContentEncoding,
pooContentDisposition,
pooContentLanguage,
pooCacheControl,
pooStorageClass,
pooUserMetadata,
pooNumThreads,
pooSSE,
getObject,
GetObjectOptions,
defaultGetObjectOptions,
gooRange,
gooIfMatch,
gooIfNoneMatch,
gooIfModifiedSince,
gooIfUnmodifiedSince,
gooSSECKey,
GetObjectResponse,
gorObjectInfo,
gorObjectStream,
-- ** Server-side copying
, copyObject
, SourceInfo
, defaultSourceInfo
, srcBucket
, srcObject
, srcRange
, srcIfMatch
, srcIfNoneMatch
, srcIfModifiedSince
, srcIfUnmodifiedSince
, DestinationInfo
, defaultDestinationInfo
, dstBucket
, dstObject
-- ** Server-side object copying
copyObject,
SourceInfo,
defaultSourceInfo,
srcBucket,
srcObject,
srcRange,
srcIfMatch,
srcIfNoneMatch,
srcIfModifiedSince,
srcIfUnmodifiedSince,
DestinationInfo,
defaultDestinationInfo,
dstBucket,
dstObject,
-- ** Querying
, statObject
-- ** Querying object info
statObject,
-- ** Object removal functions
, removeObject
, removeIncompleteUpload
-- ** Object removal operations
removeObject,
removeIncompleteUpload,
-- ** Select Object Content with SQL
, module Network.Minio.SelectAPI
-- ** Select Object Content with SQL
module Network.Minio.SelectAPI,
-- * Server-Size Encryption Helpers
-----------------------------------
, SSECKey
, mkSSECKey
, SSE(..)
-- * Server-Side Encryption Helpers
mkSSECKey,
SSECKey,
SSE (..),
-- * Presigned Operations
presignedPutObjectUrl,
presignedGetObjectUrl,
presignedHeadObjectUrl,
UrlExpiry,
-- * Presigned Operations
-------------------------
, UrlExpiry
, presignedPutObjectUrl
, presignedGetObjectUrl
, presignedHeadObjectUrl
-- ** POST (browser) upload helpers
-- ** Utilities for POST (browser) uploads
, PostPolicy
, PostPolicyError(..)
, newPostPolicy
, presignedPostPolicy
, showPostPolicy
-- | Please see
-- https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
-- for detailed information.
newPostPolicy,
presignedPostPolicy,
showPostPolicy,
PostPolicy,
PostPolicyError (..),
-- *** Utilities to specify Post Policy conditions
, PostPolicyCondition
, ppCondBucket
, ppCondContentLengthRange
, ppCondContentType
, ppCondKey
, ppCondKeyStartsWith
, ppCondSuccessActionStatus
-- *** Post Policy condition helpers
PostPolicyCondition,
ppCondBucket,
ppCondContentLengthRange,
ppCondContentType,
ppCondKey,
ppCondKeyStartsWith,
ppCondSuccessActionStatus,
-- * Error handling
-----------------------
-- | Data types representing various errors that may occur while working
-- with an object storage service.
, MinioErr(..)
, MErrV(..)
, ServiceErr(..)
-- * Error handling
-- | Data types representing various errors that may occur while
-- working with an object storage service.
MinioErr (..),
MErrV (..),
ServiceErr (..),
)
where
) where
{-
This module exports the high-level MinIO API for object storage.
-}
import qualified Data.Conduit as C
import qualified Data.Conduit.Binary as CB
import qualified Data.Conduit as C
import qualified Data.Conduit.Binary as CB
import qualified Data.Conduit.Combinators as CC
import Network.Minio.API
import Network.Minio.CopyObject
import Network.Minio.Credentials
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.ListOps
import Network.Minio.PutObject
import Network.Minio.S3API
import Network.Minio.SelectAPI
import Lib.Prelude
import Network.Minio.CopyObject
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.ListOps
import Network.Minio.PutObject
import Network.Minio.S3API
import Network.Minio.SelectAPI
import Network.Minio.Utils
-- | Lists buckets.
listBuckets :: Minio [BucketInfo]
@ -258,15 +237,11 @@ listBuckets = getService
fGetObject :: Bucket -> Object -> FilePath -> GetObjectOptions -> Minio ()
fGetObject bucket object fp opts = do
src <- getObject bucket object opts
C.connect (gorObjectStream src) $ CB.sinkFileCautious fp
C.connect src $ CB.sinkFileCautious fp
-- | Upload the given file to the given object.
fPutObject ::
Bucket ->
Object ->
FilePath ->
PutObjectOptions ->
Minio ()
fPutObject :: Bucket -> Object -> FilePath
-> PutObjectOptions -> Minio ()
fPutObject bucket object f opts =
void $ putObjectInternal bucket object opts $ ODFile f Nothing
@ -274,13 +249,8 @@ fPutObject bucket object f opts =
-- known; this helps the library select optimal part sizes to perform
-- a multipart upload. If not specified, it is assumed that the object
-- can be potentially 5TiB and selects multipart sizes appropriately.
putObject ::
Bucket ->
Object ->
C.ConduitM () ByteString Minio () ->
Maybe Int64 ->
PutObjectOptions ->
Minio ()
putObject :: Bucket -> Object -> C.ConduitM () ByteString Minio ()
-> Maybe Int64 -> PutObjectOptions -> Minio ()
putObject bucket object src sizeMay opts =
void $ putObjectInternal bucket object opts $ ODStream src sizeMay
@ -290,25 +260,18 @@ putObject bucket object src sizeMay opts =
-- copy operation if the new object is to be greater than 5GiB in
-- size.
copyObject :: DestinationInfo -> SourceInfo -> Minio ()
copyObject dstInfo srcInfo =
void $
copyObjectInternal
(dstBucket dstInfo)
(dstObject dstInfo)
srcInfo
copyObject dstInfo srcInfo = void $ copyObjectInternal (dstBucket dstInfo)
(dstObject dstInfo) srcInfo
-- | Remove an object from the object store.
removeObject :: Bucket -> Object -> Minio ()
removeObject = deleteObject
-- | Get an object from the object store.
getObject ::
Bucket ->
Object ->
GetObjectOptions ->
Minio GetObjectResponse
getObject bucket object opts =
getObject' bucket object [] $ gooToHeaders opts
-- | Get an object from the object store as a resumable source (conduit).
getObject :: Bucket -> Object -> GetObjectOptions
-> Minio (C.ConduitM () ByteString Minio ())
getObject bucket object opts = snd <$> getObject' bucket object []
(gooToHeaders opts)
-- | Get an object's metadata from the object store. It accepts the
-- same options as GetObject.
@ -338,8 +301,6 @@ bucketExists = headBucket
-- | Removes an ongoing multipart upload of an object.
removeIncompleteUpload :: Bucket -> Object -> Minio ()
removeIncompleteUpload bucket object = do
uploads <-
C.runConduit $
listIncompleteUploads bucket (Just object) False
C..| CC.sinkList
uploads <- C.runConduit $ listIncompleteUploads bucket (Just object) False
C..| CC.sinkList
mapM_ (abortMultipartUpload bucket object) (uiUploadId <$> uploads)

View File

@ -1,5 +1,5 @@
--
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
@ -15,151 +15,66 @@
--
module Network.Minio.API
( connect,
S3ReqInfo (..),
runMinio,
executeRequest,
buildRequest,
mkStreamRequest,
getLocation,
isValidBucketName,
checkBucketNameValidity,
isValidObjectName,
checkObjectNameValidity,
requestSTSCredential,
)
where
( connect
, S3ReqInfo(..)
, runMinio
, executeRequest
, mkStreamRequest
, getLocation
import Control.Retry
( fullJitterBackoff,
limitRetriesByCumulativeDelay,
retrying,
)
import qualified Data.ByteString as B
import qualified Data.Char as C
import qualified Data.Conduit as C
import qualified Data.HashMap.Strict as H
import qualified Data.Text as T
import qualified Data.Time.Clock as Time
import Lib.Prelude
import Network.HTTP.Client (defaultManagerSettings)
import qualified Network.HTTP.Client as NClient
import Network.HTTP.Conduit (Response)
import qualified Network.HTTP.Conduit as NC
import Network.HTTP.Types (simpleQueryToQuery)
import qualified Network.HTTP.Types as HT
import Network.HTTP.Types.Header (hHost)
import Network.Minio.APICommon
import Network.Minio.Credentials
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.Sign.V4
import Network.Minio.Utils
import Network.Minio.XmlParser
, isValidBucketName
, checkBucketNameValidity
, isValidObjectName
, checkObjectNameValidity
) where
import Control.Retry (fullJitterBackoff,
limitRetriesByCumulativeDelay,
retrying)
import qualified Data.ByteString as B
import qualified Data.Char as C
import qualified Data.Conduit as C
import qualified Data.Map as Map
import qualified Data.Text as T
import qualified Data.Time.Clock as Time
import Network.HTTP.Conduit (Response)
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Network.HTTP.Types.Header (hHost)
import Lib.Prelude
import Network.Minio.APICommon
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.Sign.V4
import Network.Minio.Utils
import Network.Minio.XmlParser
-- | Fetch bucket location (region)
getLocation :: Bucket -> Minio Region
getLocation bucket = do
resp <-
executeRequest $
defaultS3ReqInfo
{ riBucket = Just bucket,
riQueryParams = [("location", Nothing)],
riNeedsLocation = False
}
resp <- executeRequest $ defaultS3ReqInfo {
riBucket = Just bucket
, riQueryParams = [("location", Nothing)]
, riNeedsLocation = False
}
parseLocation $ NC.responseBody resp
-- | Looks for region in RegionMap and updates it using getLocation if
-- absent.
discoverRegion :: S3ReqInfo -> Minio (Maybe Region)
discoverRegion ri = runMaybeT $ do
bucket <- MaybeT $ return $ riBucket ri
regionMay <- lift $ lookupRegionCache bucket
maybe
( do
l <- lift $ getLocation bucket
lift $ addToRegionCache bucket l
return l
)
return
regionMay
maybe (do
l <- lift $ getLocation bucket
lift $ addToRegionCache bucket l
return l
) return regionMay
-- | Returns the region to be used for the request.
getRegion :: S3ReqInfo -> Minio (Maybe Region)
getRegion ri = do
ci <- asks mcConnInfo
-- getService/makeBucket/getLocation -- don't need location
if
| not $ riNeedsLocation ri ->
return $ Just $ connectRegion ci
-- if autodiscovery of location is disabled by user
| not $ connectAutoDiscoverRegion ci ->
return $ Just $ connectRegion ci
-- discover the region for the request
| otherwise -> discoverRegion ri
getRegionHost :: Region -> Minio Text
getRegionHost r = do
ci <- asks mcConnInfo
if "amazonaws.com" `T.isSuffixOf` connectHost ci
then
maybe
(throwIO $ MErrVRegionNotSupported r)
return
(H.lookup r awsRegionMap)
else return $ connectHost ci
-- | Computes the appropriate host, path and region for the request.
--
-- For AWS, always use virtual bucket style, unless bucket has periods. For
-- MinIO and other non-AWS, default to path style.
getHostPathRegion :: S3ReqInfo -> Minio (Text, ByteString, Maybe Region)
getHostPathRegion ri = do
ci <- asks mcConnInfo
regionMay <- getRegion ri
case riBucket ri of
Nothing ->
-- Implies a ListBuckets request.
return (connectHost ci, "/", regionMay)
Just bucket -> do
regionHost <- case regionMay of
Nothing -> return $ connectHost ci
Just "" -> return $ connectHost ci
Just r -> getRegionHost r
let pathStyle =
( regionHost,
getS3Path (riBucket ri) (riObject ri),
regionMay
)
virtualStyle =
( bucket <> "." <> regionHost,
encodeUtf8 $ "/" <> fromMaybe "" (riObject ri),
regionMay
)
( if isAWSConnectInfo ci
then
return $
if bucketHasPeriods bucket
then pathStyle
else virtualStyle
else return pathStyle
)
-- | requestSTSCredential requests temporary credentials using the Security Token
-- Service API. The returned credential will include a populated 'SessionToken'
-- and an 'ExpiryTime'.
requestSTSCredential :: (STSCredentialProvider p) => p -> IO (CredentialValue, ExpiryTime)
requestSTSCredential p = do
endpoint <- maybe (throwIO $ MErrValidation MErrVSTSEndpointNotFound) return $ getSTSEndpoint p
let endPt = NC.parseRequest_ $ toString endpoint
settings
| NC.secure endPt = NC.tlsManagerSettings
| otherwise = defaultManagerSettings
mgr <- NC.newManager settings
liftIO $ retrieveSTSCredentials p ("", 0, False) mgr
buildRequest :: S3ReqInfo -> Minio NC.Request
buildRequest ri = do
@ -168,107 +83,78 @@ buildRequest ri = do
ci <- asks mcConnInfo
(host, path, regionMay) <- getHostPathRegion ri
-- getService/makeBucket/getLocation -- don't need
-- location
region <- if | not $ riNeedsLocation ri ->
return $ Just $ connectRegion ci
let ci' = ci {connectHost = host}
hostHeader = (hHost, getHostAddr ci')
ri' =
ri
{ riHeaders = hostHeader : riHeaders ri,
riRegion = regionMay
}
-- Does not contain body and auth info.
baseRequest =
NC.defaultRequest
{ NC.method = riMethod ri',
NC.secure = connectIsSecure ci',
NC.host = encodeUtf8 $ connectHost ci',
NC.port = connectPort ci',
NC.path = path,
NC.requestHeaders = riHeaders ri',
NC.queryString = HT.renderQuery False $ riQueryParams ri'
}
-- if autodiscovery of location is disabled by user
| not $ connectAutoDiscoverRegion ci ->
return $ Just $ connectRegion ci
-- discover the region for the request
| otherwise -> discoverRegion ri
regionHost <- case region of
Nothing -> return $ connectHost ci
Just r -> if "amazonaws.com" `T.isSuffixOf` connectHost ci
then maybe
(throwIO $ MErrVRegionNotSupported r)
return
(Map.lookup r awsRegionMap)
else return $ connectHost ci
sha256Hash <- if | connectIsSecure ci ->
-- if secure connection
return "UNSIGNED-PAYLOAD"
-- otherwise compute sha256
| otherwise -> getPayloadSHA256Hash (riPayload ri)
timeStamp <- liftIO Time.getCurrentTime
mgr <- asks mcConnManager
cv <- liftIO $ getCredential (connectCreds ci') (getEndpoint ci') mgr
let hostHeader = (hHost, getHostAddr ci)
newRi = ri { riPayloadHash = Just sha256Hash
, riHeaders = hostHeader
: sha256Header sha256Hash
: riHeaders ri
, riRegion = region
}
newCi = ci { connectHost = regionHost }
signReq = toRequest newCi newRi
sp = SignParams (connectAccessKey ci) (connectSecretKey ci)
timeStamp (riRegion newRi) Nothing (riPayloadHash newRi)
let signHeaders = signV4 sp signReq
let sp =
SignParams
(coerce $ cvAccessKey cv)
(coerce $ cvSecretKey cv)
(coerce $ cvSessionToken cv)
ServiceS3
timeStamp
(riRegion ri')
(riPresignExpirySecs ri')
Nothing
-- Update signReq with Authorization header containing v4 signature
return signReq {
NC.requestHeaders = riHeaders newRi ++ mkHeaderFromPairs signHeaders
}
where
toRequest :: ConnectInfo -> S3ReqInfo -> NC.Request
toRequest ci s3Req = NC.defaultRequest {
NC.method = riMethod s3Req
, NC.secure = connectIsSecure ci
, NC.host = encodeUtf8 $ connectHost ci
, NC.port = connectPort ci
, NC.path = getS3Path (riBucket s3Req) (riObject s3Req)
, NC.requestHeaders = riHeaders s3Req
, NC.queryString = HT.renderQuery False $ riQueryParams s3Req
, NC.requestBody = getRequestBody (riPayload s3Req)
}
-- Cases to handle:
--
-- 0. Handle presign URL case.
--
-- 1. Connection is secure: use unsigned payload
--
-- 2. Insecure connection, streaming signature is enabled via use of
-- conduit payload: use streaming signature for request.
--
-- 3. Insecure connection, non-conduit payload: compute payload
-- sha256hash, buffer request in memory and perform request.
if
| isJust (riPresignExpirySecs ri') ->
-- case 0 from above.
do
let signPairs = signV4QueryParams sp baseRequest
qpToAdd = simpleQueryToQuery signPairs
existingQueryParams = HT.parseQuery (NC.queryString baseRequest)
updatedQueryParams = existingQueryParams ++ qpToAdd
return $ NClient.setQueryString updatedQueryParams baseRequest
| isStreamingPayload (riPayload ri') && not (connectIsSecure ci') ->
-- case 2 from above.
do
(pLen, pSrc) <- case riPayload ri of
PayloadC l src -> return (l, src)
_ -> throwIO MErrVUnexpectedPayload
let reqFn = signV4Stream pLen sp baseRequest
return $ reqFn pSrc
| otherwise ->
do
sp' <-
( if connectIsSecure ci'
then -- case 1 described above.
return sp
else
( -- case 3 described above.
do
pHash <- getPayloadSHA256Hash $ riPayload ri'
return $ sp {spPayloadHash = Just pHash}
)
)
let signHeaders = signV4 sp' baseRequest
return $
baseRequest
{ NC.requestHeaders =
NC.requestHeaders baseRequest ++ signHeaders,
NC.requestBody = getRequestBody (riPayload ri')
}
retryAPIRequest :: Minio a -> Minio a
retryAPIRequest apiCall = do
resE <-
retrying retryPolicy (const shouldRetry) $
const $
try apiCall
resE <- retrying retryPolicy (const shouldRetry) $
const $ try apiCall
either throwIO return resE
where
-- Retry using the full-jitter backoff method for up to 10 mins
-- total
retryPolicy =
limitRetriesByCumulativeDelay tenMins $
fullJitterBackoff oneMilliSecond
retryPolicy = limitRetriesByCumulativeDelay tenMins
$ fullJitterBackoff oneMilliSecond
oneMilliSecond = 1000 -- in microseconds
tenMins = 10 * 60 * 1000000 -- in microseconds
-- retry on connection related failure
@ -280,23 +166,23 @@ retryAPIRequest apiCall = do
-- API request failed with a retryable exception
Left httpExn@(NC.HttpExceptionRequest _ exn) ->
case (exn :: NC.HttpExceptionContent) of
NC.ResponseTimeout -> return True
NC.ConnectionTimeout -> return True
NC.ResponseTimeout -> return True
NC.ConnectionTimeout -> return True
NC.ConnectionFailure _ -> return True
-- We received an unexpected exception
_ -> throwIO httpExn
_ -> throwIO httpExn
-- We received an unexpected exception
Left someOtherExn -> throwIO someOtherExn
executeRequest :: S3ReqInfo -> Minio (Response LByteString)
executeRequest ri = do
req <- buildRequest ri
mgr <- asks mcConnManager
retryAPIRequest $ httpLbs req mgr
mkStreamRequest ::
S3ReqInfo ->
Minio (Response (C.ConduitM () ByteString Minio ()))
mkStreamRequest :: S3ReqInfo
-> Minio (Response (C.ConduitM () ByteString Minio ()))
mkStreamRequest ri = do
req <- buildRequest ri
mgr <- asks mcConnManager
@ -305,50 +191,41 @@ mkStreamRequest ri = do
-- Bucket name validity check according to AWS rules.
isValidBucketName :: Bucket -> Bool
isValidBucketName bucket =
not
( or
[ len < 3 || len > 63,
any labelCheck labels,
any labelCharsCheck labels,
isIPCheck
]
)
not (or [ len < 3 || len > 63
, or (map labelCheck labels)
, or (map labelCharsCheck labels)
, isIPCheck
])
where
len = T.length bucket
labels = T.splitOn "." bucket
-- does label `l` fail basic checks of length and start/end?
labelCheck l = T.length l == 0 || T.head l == '-' || T.last l == '-'
-- does label `l` have non-allowed characters?
labelCharsCheck l =
isJust $
T.find
( \x ->
not
( C.isAsciiLower x
|| x == '-'
|| C.isDigit x
)
)
l
labelCharsCheck l = isJust $ T.find (\x -> not (C.isAsciiLower x ||
x == '-' ||
C.isDigit x)) l
-- does label `l` have non-digit characters?
labelNonDigits l = isJust $ T.find (not . C.isDigit) l
labelAsNums = map (not . labelNonDigits) labels
-- check if bucket name looks like an IP
isIPCheck = and labelAsNums && length labelAsNums == 4
-- Throws exception iff bucket name is invalid according to AWS rules.
checkBucketNameValidity :: (MonadIO m) => Bucket -> m ()
checkBucketNameValidity :: MonadIO m => Bucket -> m ()
checkBucketNameValidity bucket =
unless (isValidBucketName bucket) $
throwIO $
MErrVInvalidBucketName bucket
when (not $ isValidBucketName bucket) $
throwIO $ MErrVInvalidBucketName bucket
isValidObjectName :: Object -> Bool
isValidObjectName object =
T.length object > 0 && B.length (encodeUtf8 object) <= 1024
checkObjectNameValidity :: (MonadIO m) => Object -> m ()
checkObjectNameValidity :: MonadIO m => Object -> m ()
checkObjectNameValidity object =
unless (isValidObjectName object) $
throwIO $
MErrVInvalidObjectName object
when (not $ isValidObjectName object) $
throwIO $ MErrVInvalidObjectName object

View File

@ -16,65 +16,29 @@
module Network.Minio.APICommon where
import qualified Conduit as C
import qualified Data.ByteString as BS
import qualified Data.ByteString.Lazy as LB
import Data.Conduit.Binary (sourceHandleRange)
import qualified Data.Text as T
import Lib.Prelude
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Network.Minio.Data
import Network.Minio.Data.Crypto
import Network.Minio.Errors
import Data.Conduit.Binary (sourceHandleRange)
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Data.Crypto
sha256Header :: ByteString -> HT.Header
sha256Header = ("x-amz-content-sha256",)
sha256Header = ("x-amz-content-sha256", )
-- | This function throws an error if the payload is a conduit (as it
-- will not be possible to re-read the conduit after it is consumed).
getPayloadSHA256Hash :: Payload -> Minio ByteString
getPayloadSHA256Hash :: (MonadIO m) => Payload -> m ByteString
getPayloadSHA256Hash (PayloadBS bs) = return $ hashSHA256 bs
getPayloadSHA256Hash (PayloadH h off size) =
hashSHA256FromSource $
sourceHandleRange
h
(return . fromIntegral $ off)
(return . fromIntegral $ size)
getPayloadSHA256Hash (PayloadC _ _) = throwIO MErrVUnexpectedPayload
getPayloadSHA256Hash (PayloadH h off size) = hashSHA256FromSource $
sourceHandleRange h
(return . fromIntegral $ off)
(return . fromIntegral $ size)
getRequestBody :: Payload -> NC.RequestBody
getRequestBody (PayloadBS bs) = NC.RequestBodyBS bs
getRequestBody (PayloadH h off size) =
NC.requestBodySource size $
sourceHandleRange
h
NC.requestBodySource (fromIntegral size) $
sourceHandleRange h
(return . fromIntegral $ off)
(return . fromIntegral $ size)
getRequestBody (PayloadC n src) = NC.requestBodySource n src
mkStreamingPayload :: Payload -> Payload
mkStreamingPayload payload =
case payload of
PayloadBS bs ->
PayloadC
(fromIntegral $ BS.length bs)
(C.sourceLazy $ LB.fromStrict bs)
PayloadH h off len ->
PayloadC len $
sourceHandleRange
h
(return . fromIntegral $ off)
(return . fromIntegral $ len)
_ -> payload
isStreamingPayload :: Payload -> Bool
isStreamingPayload (PayloadC _ _) = True
isStreamingPayload _ = False
-- | Checks if the connect info is for Amazon S3.
isAWSConnectInfo :: ConnectInfo -> Bool
isAWSConnectInfo ci = ".amazonaws.com" `T.isSuffixOf` connectHost ci
bucketHasPeriods :: Bucket -> Bool
bucketHasPeriods b = isJust $ T.find (== '.') b

File diff suppressed because it is too large Load Diff

View File

@ -16,19 +16,19 @@
module Network.Minio.CopyObject where
import qualified Data.List as List
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.S3API
import Network.Minio.Utils
import qualified Data.List as List
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.S3API
import Network.Minio.Utils
-- | Copy an object using single or multipart copy strategy.
copyObjectInternal ::
Bucket ->
Object ->
SourceInfo ->
Minio ETag
copyObjectInternal :: Bucket -> Object -> SourceInfo
-> Minio ETag
copyObjectInternal b' o srcInfo = do
let sBucket = srcBucket srcInfo
sObject = srcObject srcInfo
@ -43,33 +43,27 @@ copyObjectInternal b' o srcInfo = do
startOffset = fst range
endOffset = snd range
when
( isJust rangeMay
&& ( (startOffset < 0)
|| (endOffset < startOffset)
|| (endOffset >= srcSize)
)
)
$ throwIO
$ MErrVInvalidSrcObjByteRange range
when (isJust rangeMay &&
or [startOffset < 0, endOffset < startOffset,
endOffset >= fromIntegral srcSize]) $
throwIO $ MErrVInvalidSrcObjByteRange range
-- 1. If sz > 64MiB (minPartSize) use multipart copy, OR
-- 2. If startOffset /= 0 use multipart copy
let destSize =
(\(a, b) -> b - a + 1) $
maybe (0, srcSize - 1) identity rangeMay
let destSize = (\(a, b) -> b - a + 1 ) $
maybe (0, srcSize - 1) identity rangeMay
if destSize > minPartSize || (endOffset - startOffset + 1 /= srcSize)
then multiPartCopyObject b' o srcInfo srcSize
else fst <$> copyObjectSingle b' o srcInfo {srcRange = Nothing} []
else fst <$> copyObjectSingle b' o srcInfo{srcRange = Nothing} []
-- | Given the input byte range of the source object, compute the
-- splits for a multipart copy object procedure. Minimum part size
-- used is minPartSize.
selectCopyRanges :: (Int64, Int64) -> [(PartNumber, (Int64, Int64))]
selectCopyRanges (st, end) =
zip pns $
zipWith (\x y -> (st + x, st + x + y - 1)) startOffsets partSizes
selectCopyRanges (st, end) = zip pns $
map (\(x, y) -> (st + x, st + x + y - 1)) $ zip startOffsets partSizes
where
size = end - st + 1
(pns, startOffsets, partSizes) = List.unzip3 $ selectPartSizes size
@ -77,30 +71,22 @@ selectCopyRanges (st, end) =
-- | Perform a multipart copy object action. Since we cannot verify
-- existing parts based on the source object, there is no resuming
-- copy action support.
multiPartCopyObject ::
Bucket ->
Object ->
SourceInfo ->
Int64 ->
Minio ETag
multiPartCopyObject :: Bucket -> Object -> SourceInfo -> Int64
-> Minio ETag
multiPartCopyObject b o cps srcSize = do
uid <- newMultipartUpload b o []
let byteRange = maybe (0, srcSize - 1) identity $ srcRange cps
let byteRange = maybe (0, fromIntegral $ srcSize - 1) identity $ srcRange cps
partRanges = selectCopyRanges byteRange
partSources =
map
(\(x, (start, end)) -> (x, cps {srcRange = Just (start, end)}))
partRanges
dstInfo = defaultDestinationInfo {dstBucket = b, dstObject = o}
partSources = map (\(x, (start, end)) -> (x, cps {srcRange = Just (start, end) }))
partRanges
dstInfo = defaultDestinationInfo { dstBucket = b, dstObject = o}
copiedParts <-
limitedMapConcurrently
10
( \(pn, cps') -> do
(etag, _) <- copyObjectPart dstInfo cps' uid pn []
return (pn, etag)
)
partSources
copiedParts <- limitedMapConcurrently 10
(\(pn, cps') -> do
(etag, _) <- copyObjectPart dstInfo cps' uid pn []
return (pn, etag)
)
partSources
completeMultipartUpload b o uid copiedParts

View File

@ -1,77 +0,0 @@
--
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
module Network.Minio.Credentials
( CredentialValue (..),
credentialValueText,
STSCredentialProvider (..),
AccessKey (..),
SecretKey (..),
SessionToken (..),
ExpiryTime (..),
STSCredentialStore,
initSTSCredential,
getSTSCredential,
Creds (..),
getCredential,
Endpoint,
-- * STS Assume Role
defaultSTSAssumeRoleOptions,
STSAssumeRole (..),
STSAssumeRoleOptions (..),
)
where
import Data.Time (diffUTCTime, getCurrentTime)
import qualified Network.HTTP.Client as NC
import Network.Minio.Credentials.AssumeRole
import Network.Minio.Credentials.Types
import qualified UnliftIO.MVar as M
data STSCredentialStore = STSCredentialStore
{ cachedCredentials :: M.MVar (CredentialValue, ExpiryTime),
refreshAction :: Endpoint -> NC.Manager -> IO (CredentialValue, ExpiryTime)
}
initSTSCredential :: (STSCredentialProvider p) => p -> IO STSCredentialStore
initSTSCredential p = do
let action = retrieveSTSCredentials p
-- start with dummy credential, so that refresh happens for first request.
now <- getCurrentTime
mvar <- M.newMVar (CredentialValue mempty mempty mempty, coerce now)
return $
STSCredentialStore
{ cachedCredentials = mvar,
refreshAction = action
}
getSTSCredential :: STSCredentialStore -> Endpoint -> NC.Manager -> IO (CredentialValue, Bool)
getSTSCredential store ep mgr = M.modifyMVar (cachedCredentials store) $ \cc@(v, expiry) -> do
now <- getCurrentTime
if diffUTCTime now (coerce expiry) > 0
then do
res <- refreshAction store ep mgr
return (res, (fst res, True))
else return (cc, (v, False))
data Creds
= CredsStatic CredentialValue
| CredsSTS STSCredentialStore
getCredential :: Creds -> Endpoint -> NC.Manager -> IO CredentialValue
getCredential (CredsStatic v) _ _ = return v
getCredential (CredsSTS s) ep mgr = fst <$> getSTSCredential s ep mgr

View File

@ -1,266 +0,0 @@
--
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
module Network.Minio.Credentials.AssumeRole where
import qualified Data.ByteArray as BA
import qualified Data.ByteString.Lazy as LB
import qualified Data.Text as T
import qualified Data.Time as Time
import Data.Time.Units (Second)
import Lib.Prelude (UTCTime, throwIO)
import Network.HTTP.Client (RequestBody (RequestBodyBS))
import qualified Network.HTTP.Client as NC
import Network.HTTP.Types (hContentType, methodPost, renderSimpleQuery)
import Network.HTTP.Types.Header (hHost)
import Network.Minio.Credentials.Types
import Network.Minio.Data.Crypto (hashSHA256)
import Network.Minio.Errors (MErrV (..))
import Network.Minio.Sign.V4
import Network.Minio.Utils (getHostHeader, httpLbs)
import Network.Minio.XmlCommon
import Text.XML.Cursor hiding (bool)
stsVersion :: ByteString
stsVersion = "2011-06-15"
defaultDurationSeconds :: Second
defaultDurationSeconds = 3600
-- | Assume Role API argument.
--
-- @since 1.7.0
data STSAssumeRole = STSAssumeRole
{ -- | Credentials to use in the AssumeRole STS API.
sarCredentials :: CredentialValue,
-- | Optional settings.
sarOptions :: STSAssumeRoleOptions
}
-- | Options for STS Assume Role API.
data STSAssumeRoleOptions = STSAssumeRoleOptions
{ -- | STS endpoint to which the request will be made. For MinIO, this is the
-- same as the server endpoint. For AWS, this has to be the Security Token
-- Service endpoint. If using with 'setSTSCredential', this option can be
-- left as 'Nothing' and the endpoint in 'ConnectInfo' will be used.
saroEndpoint :: Maybe Text,
-- | Desired validity for the generated credentials.
saroDurationSeconds :: Maybe Second,
-- | IAM policy to apply for the generated credentials.
saroPolicyJSON :: Maybe ByteString,
-- | Location is usually required for AWS.
saroLocation :: Maybe Text,
saroRoleARN :: Maybe Text,
saroRoleSessionName :: Maybe Text
}
-- | Default STS Assume Role options - all options are Nothing, except for
-- duration which is set to 1 hour.
defaultSTSAssumeRoleOptions :: STSAssumeRoleOptions
defaultSTSAssumeRoleOptions =
STSAssumeRoleOptions
{ saroEndpoint = Nothing,
saroDurationSeconds = Just 3600,
saroPolicyJSON = Nothing,
saroLocation = Nothing,
saroRoleARN = Nothing,
saroRoleSessionName = Nothing
}
data AssumeRoleCredentials = AssumeRoleCredentials
{ arcCredentials :: CredentialValue,
arcExpiration :: UTCTime
}
deriving stock (Show, Eq)
data AssumeRoleResult = AssumeRoleResult
{ arrSourceIdentity :: Text,
arrAssumedRoleArn :: Text,
arrAssumedRoleId :: Text,
arrRoleCredentials :: AssumeRoleCredentials
}
deriving stock (Show, Eq)
-- | parseSTSAssumeRoleResult parses an XML response of the following form:
--
-- <AssumeRoleResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
-- <AssumeRoleResult>
-- <SourceIdentity>Alice</SourceIdentity>
-- <AssumedRoleUser>
-- <Arn>arn:aws:sts::123456789012:assumed-role/demo/TestAR</Arn>
-- <AssumedRoleId>ARO123EXAMPLE123:TestAR</AssumedRoleId>
-- </AssumedRoleUser>
-- <Credentials>
-- <AccessKeyId>ASIAIOSFODNN7EXAMPLE</AccessKeyId>
-- <SecretAccessKey>wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY</SecretAccessKey>
-- <SessionToken>
-- AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQW
-- LWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGd
-- QrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU
-- 9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz
-- +scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA==
-- </SessionToken>
-- <Expiration>2019-11-09T13:34:41Z</Expiration>
-- </Credentials>
-- <PackedPolicySize>6</PackedPolicySize>
-- </AssumeRoleResult>
-- <ResponseMetadata>
-- <RequestId>c6104cbe-af31-11e0-8154-cbc7ccf896c7</RequestId>
-- </ResponseMetadata>
-- </AssumeRoleResponse>
parseSTSAssumeRoleResult :: (MonadIO m) => ByteString -> Text -> m AssumeRoleResult
parseSTSAssumeRoleResult xmldata namespace = do
r <- parseRoot $ LB.fromStrict xmldata
let s3Elem' = s3Elem namespace
sourceIdentity =
T.concat $
r
$/ s3Elem' "AssumeRoleResult"
&/ s3Elem' "SourceIdentity"
&/ content
roleArn =
T.concat $
r
$/ s3Elem' "AssumeRoleResult"
&/ s3Elem' "AssumedRoleUser"
&/ s3Elem' "Arn"
&/ content
roleId =
T.concat $
r
$/ s3Elem' "AssumeRoleResult"
&/ s3Elem' "AssumedRoleUser"
&/ s3Elem' "AssumedRoleId"
&/ content
convSB :: Text -> BA.ScrubbedBytes
convSB = BA.convert . (encodeUtf8 :: Text -> ByteString)
credsInfo = do
cr <-
maybe (Left $ MErrVXmlParse "No Credentials Element found") Right $
listToMaybe $
r $/ s3Elem' "AssumeRoleResult" &/ s3Elem' "Credentials"
let cur = fromNode $ node cr
return
( CredentialValue
{ cvAccessKey =
coerce $
T.concat $
cur $/ s3Elem' "AccessKeyId" &/ content,
cvSecretKey =
coerce $
convSB $
T.concat $
cur
$/ s3Elem' "SecretAccessKey"
&/ content,
cvSessionToken =
Just $
coerce $
convSB $
T.concat $
cur
$/ s3Elem' "SessionToken"
&/ content
},
T.concat $ cur $/ s3Elem' "Expiration" &/ content
)
creds <- either throwIO pure credsInfo
expiry <- parseS3XMLTime $ snd creds
let roleCredentials =
AssumeRoleCredentials
{ arcCredentials = fst creds,
arcExpiration = expiry
}
return
AssumeRoleResult
{ arrSourceIdentity = sourceIdentity,
arrAssumedRoleArn = roleArn,
arrAssumedRoleId = roleId,
arrRoleCredentials = roleCredentials
}
instance STSCredentialProvider STSAssumeRole where
getSTSEndpoint = saroEndpoint . sarOptions
retrieveSTSCredentials sar (host', port', isSecure') mgr = do
-- Assemble STS request
let requiredParams =
[ ("Action", "AssumeRole"),
("Version", stsVersion)
]
opts = sarOptions sar
durSecs :: Int =
fromIntegral $
fromMaybe defaultDurationSeconds $
saroDurationSeconds opts
otherParams =
[ ("RoleArn",) . encodeUtf8 <$> saroRoleARN opts,
("RoleSessionName",) . encodeUtf8 <$> saroRoleSessionName opts,
Just ("DurationSeconds", show durSecs),
("Policy",) <$> saroPolicyJSON opts
]
parameters = requiredParams ++ catMaybes otherParams
(host, port, isSecure) =
case getSTSEndpoint sar of
Just ep ->
let endPt = NC.parseRequest_ $ toString ep
in (NC.host endPt, NC.port endPt, NC.secure endPt)
Nothing -> (host', port', isSecure')
reqBody = renderSimpleQuery False parameters
req =
NC.defaultRequest
{ NC.host = host,
NC.port = port,
NC.secure = isSecure,
NC.method = methodPost,
NC.requestHeaders =
[ (hHost, getHostHeader (host, port)),
(hContentType, "application/x-www-form-urlencoded")
],
NC.requestBody = RequestBodyBS reqBody
}
-- Sign the STS request.
timeStamp <- liftIO Time.getCurrentTime
let sp =
SignParams
{ spAccessKey = coerce $ cvAccessKey $ sarCredentials sar,
spSecretKey = coerce $ cvSecretKey $ sarCredentials sar,
spSessionToken = coerce $ cvSessionToken $ sarCredentials sar,
spService = ServiceSTS,
spTimeStamp = timeStamp,
spRegion = saroLocation opts,
spExpirySecs = Nothing,
spPayloadHash = Just $ hashSHA256 reqBody
}
signHeaders = signV4 sp req
signedReq =
req
{ NC.requestHeaders = NC.requestHeaders req ++ signHeaders
}
-- Make the STS request
resp <- httpLbs signedReq mgr
result <-
parseSTSAssumeRoleResult
(toStrict $ NC.responseBody resp)
"https://sts.amazonaws.com/doc/2011-06-15/"
return
( arcCredentials $ arrRoleCredentials result,
coerce $ arcExpiration $ arrRoleCredentials result
)

View File

@ -1,90 +0,0 @@
--
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE StrictData #-}
module Network.Minio.Credentials.Types where
import qualified Data.ByteArray as BA
import Lib.Prelude (UTCTime)
import qualified Network.HTTP.Client as NC
-- | Access Key type.
newtype AccessKey = AccessKey {unAccessKey :: Text}
deriving stock (Show)
deriving newtype (Eq, IsString, Semigroup, Monoid)
-- | Secret Key type - has a show instance that does not print the value.
newtype SecretKey = SecretKey {unSecretKey :: BA.ScrubbedBytes}
deriving stock (Show)
deriving newtype (Eq, IsString, Semigroup, Monoid)
-- | Session Token type - has a show instance that does not print the value.
newtype SessionToken = SessionToken {unSessionToken :: BA.ScrubbedBytes}
deriving stock (Show)
deriving newtype (Eq, IsString, Semigroup, Monoid)
-- | Object storage credential data type. It has support for the optional
-- [SessionToken](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
-- for using temporary credentials requested via STS.
--
-- The show instance for this type does not print the value of secrets for
-- security.
--
-- @since 1.7.0
data CredentialValue = CredentialValue
{ cvAccessKey :: AccessKey,
cvSecretKey :: SecretKey,
cvSessionToken :: Maybe SessionToken
}
deriving stock (Eq, Show)
scrubbedToText :: BA.ScrubbedBytes -> Text
scrubbedToText =
let b2t :: ByteString -> Text
b2t = decodeUtf8
s2b :: BA.ScrubbedBytes -> ByteString
s2b = BA.convert
in b2t . s2b
-- | Convert a 'CredentialValue' to a text tuple. Use this to output the
-- credential to files or other programs.
credentialValueText :: CredentialValue -> (Text, Text, Maybe Text)
credentialValueText cv =
( coerce $ cvAccessKey cv,
(scrubbedToText . coerce) $ cvSecretKey cv,
scrubbedToText . coerce <$> cvSessionToken cv
)
-- | Endpoint represented by host, port and TLS enabled flag.
type Endpoint = (ByteString, Int, Bool)
-- | Typeclass for STS credential providers.
--
-- @since 1.7.0
class STSCredentialProvider p where
retrieveSTSCredentials ::
p ->
-- | STS Endpoint (host, port, isSecure)
Endpoint ->
NC.Manager ->
IO (CredentialValue, ExpiryTime)
getSTSEndpoint :: p -> Maybe Text
-- | 'ExpiryTime' represents a time at which a credential expires.
newtype ExpiryTime = ExpiryTime {unExpiryTime :: UTCTime}
deriving stock (Show)
deriving newtype (Eq)

File diff suppressed because it is too large Load Diff

View File

@ -13,21 +13,23 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE FlexibleInstances #-}
module Network.Minio.Data.ByteString
( stripBS,
UriEncodable (..),
)
where
(
stripBS
, UriEncodable(..)
) where
import qualified Data.ByteString as B
import qualified Data.ByteString.Builder as BB
import qualified Data.ByteString.Char8 as BC8
import qualified Data.ByteString.Lazy as LB
import Data.Char (isAsciiLower, isAsciiUpper, isDigit, isSpace, toUpper)
import Data.Char (isSpace, toUpper, isAsciiUpper, isAsciiLower, isDigit)
import qualified Data.Text as T
import Numeric (showHex)
import Numeric (showHex)
import Lib.Prelude
stripBS :: ByteString -> ByteString
stripBS = BC8.dropWhile isSpace . fst . BC8.spanEnd isSpace
@ -37,10 +39,8 @@ class UriEncodable s where
instance UriEncodable [Char] where
uriEncode encodeSlash payload =
LB.toStrict $
BB.toLazyByteString $
mconcat $
map (`uriEncodeChar` encodeSlash) payload
LB.toStrict $ BB.toLazyByteString $ mconcat $
map (`uriEncodeChar` encodeSlash) payload
instance UriEncodable ByteString where
-- assumes that uriEncode is passed ASCII encoded strings.
@ -59,17 +59,16 @@ uriEncodeChar '/' True = BB.byteString "%2F"
uriEncodeChar '/' False = BB.char7 '/'
uriEncodeChar ch _
| isAsciiUpper ch
|| isAsciiLower ch
|| isDigit ch
|| (ch == '_')
|| (ch == '-')
|| (ch == '.')
|| (ch == '~') =
BB.char7 ch
|| isAsciiLower ch
|| isDigit ch
|| (ch == '_')
|| (ch == '-')
|| (ch == '.')
|| (ch == '~') = BB.char7 ch
| otherwise = mconcat $ map f $ B.unpack $ encodeUtf8 $ T.singleton ch
where
f :: Word8 -> BB.Builder
f n = BB.char7 '%' <> BB.string7 hexStr
where
hexStr = map toUpper $ showHex q $ showHex r ""
(q, r) = divMod n (16 :: Word8)
(q, r) = divMod (fromIntegral n) (16::Word8)

View File

@ -15,54 +15,55 @@
--
module Network.Minio.Data.Crypto
( hashSHA256,
hashSHA256FromSource,
hashMD5,
hashMD5ToBase64,
hashMD5FromSource,
hmacSHA256,
hmacSHA256RawBS,
digestToBS,
digestToBase16,
encodeToBase64,
)
where
(
hashSHA256
, hashSHA256FromSource
import Crypto.Hash
( Digest,
MD5 (..),
SHA256 (..),
hashWith,
)
import Crypto.Hash.Conduit (sinkHash)
import Crypto.MAC.HMAC (HMAC, hmac)
import Data.ByteArray (ByteArrayAccess, convert)
import Data.ByteArray.Encoding (Base (Base16, Base64), convertToBase)
import qualified Data.Conduit as C
, hashMD5
, hashMD5ToBase64
, hashMD5FromSource
, hmacSHA256
, hmacSHA256RawBS
, digestToBS
, digestToBase16
, encodeToBase64
) where
import Crypto.Hash (Digest, MD5 (..), SHA256 (..),
hashWith)
import Crypto.Hash.Conduit (sinkHash)
import Crypto.MAC.HMAC (HMAC, hmac)
import Data.ByteArray (ByteArrayAccess, convert)
import Data.ByteArray.Encoding (Base (Base16, Base64), convertToBase)
import qualified Data.Conduit as C
import Lib.Prelude
hashSHA256 :: ByteString -> ByteString
hashSHA256 = digestToBase16 . hashWith SHA256
hashSHA256FromSource :: (Monad m) => C.ConduitM () ByteString m () -> m ByteString
hashSHA256FromSource :: Monad m => C.ConduitM () ByteString m () -> m ByteString
hashSHA256FromSource src = do
digest <- C.connect src sinkSHA256Hash
return $ digestToBase16 digest
where
-- To help with type inference
sinkSHA256Hash :: (Monad m) => C.ConduitM ByteString Void m (Digest SHA256)
sinkSHA256Hash :: Monad m => C.ConduitM ByteString Void m (Digest SHA256)
sinkSHA256Hash = sinkHash
-- Returns MD5 hash hex encoded.
hashMD5 :: ByteString -> ByteString
hashMD5 = digestToBase16 . hashWith MD5
hashMD5FromSource :: (Monad m) => C.ConduitM () ByteString m () -> m ByteString
hashMD5FromSource :: Monad m => C.ConduitM () ByteString m () -> m ByteString
hashMD5FromSource src = do
digest <- C.connect src sinkMD5Hash
return $ digestToBase16 digest
where
-- To help with type inference
sinkMD5Hash :: (Monad m) => C.ConduitM ByteString Void m (Digest MD5)
sinkMD5Hash :: Monad m => C.ConduitM ByteString Void m (Digest MD5)
sinkMD5Hash = sinkHash
hmacSHA256 :: ByteString -> ByteString -> HMAC SHA256
@ -71,15 +72,15 @@ hmacSHA256 message key = hmac key message
hmacSHA256RawBS :: ByteString -> ByteString -> ByteString
hmacSHA256RawBS message key = convert $ hmacSHA256 message key
digestToBS :: (ByteArrayAccess a) => a -> ByteString
digestToBS :: ByteArrayAccess a => a -> ByteString
digestToBS = convert
digestToBase16 :: (ByteArrayAccess a) => a -> ByteString
digestToBase16 :: ByteArrayAccess a => a -> ByteString
digestToBase16 = convertToBase Base16
-- Returns MD5 hash base 64 encoded.
hashMD5ToBase64 :: (ByteArrayAccess a) => a -> ByteString
hashMD5ToBase64 :: ByteArrayAccess a => a -> ByteString
hashMD5ToBase64 = convertToBase Base64 . hashWith MD5
encodeToBase64 :: (ByteArrayAccess a) => a -> ByteString
encodeToBase64 :: ByteArrayAccess a => a -> ByteString
encodeToBase64 = convertToBase Base64

View File

@ -15,24 +15,20 @@
--
module Network.Minio.Data.Time
( awsTimeFormat,
awsTimeFormatBS,
awsDateFormat,
awsDateFormatBS,
awsParseTime,
iso8601TimeFormat,
UrlExpiry,
)
where
(
awsTimeFormat
, awsTimeFormatBS
, awsDateFormat
, awsDateFormatBS
, awsParseTime
, iso8601TimeFormat
) where
import Data.ByteString.Char8 (pack)
import Data.ByteString.Char8 (pack)
import qualified Data.Time as Time
import Data.Time.Format.ISO8601 (iso8601Show)
import Lib.Prelude
-- | Time to expire for a presigned URL. It interpreted as a number of
-- seconds. The maximum duration that can be specified is 7 days.
type UrlExpiry = Int
import Lib.Prelude
awsTimeFormat :: UTCTime -> [Char]
awsTimeFormat = Time.formatTime Time.defaultTimeLocale "%Y%m%dT%H%M%SZ"
@ -50,4 +46,4 @@ awsParseTime :: [Char] -> Maybe UTCTime
awsParseTime = Time.parseTimeM False Time.defaultTimeLocale "%Y%m%dT%H%M%SZ"
iso8601TimeFormat :: UTCTime -> [Char]
iso8601TimeFormat = iso8601Show
iso8601TimeFormat = Time.formatTime Time.defaultTimeLocale (Time.iso8601DateFormat $ Just "%T%QZ")

View File

@ -1,5 +1,5 @@
--
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
@ -14,83 +14,74 @@
-- limitations under the License.
--
module Network.Minio.Errors
( MErrV (..),
ServiceErr (..),
MinioErr (..),
toServiceErr,
)
where
module Network.Minio.Errors where
import Control.Exception (IOException)
import Control.Exception
import qualified Network.HTTP.Conduit as NC
import Lib.Prelude
---------------------------------
-- Errors
---------------------------------
-- | Various validation errors
data MErrV
= MErrVSinglePUTSizeExceeded Int64
| MErrVPutSizeExceeded Int64
| MErrVETagHeaderNotFound
| MErrVInvalidObjectInfoResponse
| MErrVInvalidSrcObjSpec Text
| MErrVInvalidSrcObjByteRange (Int64, Int64)
| MErrVCopyObjSingleNoRangeAccepted
| MErrVRegionNotSupported Text
| MErrVXmlParse Text
| MErrVInvalidBucketName Text
| MErrVInvalidObjectName Text
| MErrVInvalidUrlExpiry Int
| MErrVJsonParse Text
| MErrVInvalidHealPath
| MErrVMissingCredentials
| MErrVInvalidEncryptionKeyLength
| MErrVStreamingBodyUnexpectedEOF
| MErrVUnexpectedPayload
| MErrVSTSEndpointNotFound
deriving stock (Show, Eq)
data MErrV = MErrVSinglePUTSizeExceeded Int64
| MErrVPutSizeExceeded Int64
| MErrVETagHeaderNotFound
| MErrVInvalidObjectInfoResponse
| MErrVInvalidSrcObjSpec Text
| MErrVInvalidSrcObjByteRange (Int64, Int64)
| MErrVCopyObjSingleNoRangeAccepted
| MErrVRegionNotSupported Text
| MErrVXmlParse Text
| MErrVInvalidBucketName Text
| MErrVInvalidObjectName Text
| MErrVInvalidUrlExpiry Int
| MErrVJsonParse Text
| MErrVInvalidHealPath
| MErrVMissingCredentials
| MErrVInvalidEncryptionKeyLength
deriving (Show, Eq)
instance Exception MErrV
-- | Errors returned by S3 compatible service
data ServiceErr
= BucketAlreadyExists
| BucketAlreadyOwnedByYou
| NoSuchBucket
| InvalidBucketName
| NoSuchKey
| SelectErr Text Text
| ServiceErr Text Text
deriving stock (Show, Eq)
data ServiceErr = BucketAlreadyExists
| BucketAlreadyOwnedByYou
| NoSuchBucket
| InvalidBucketName
| NoSuchKey
| SelectErr Text Text
| ServiceErr Text Text
deriving (Show, Eq)
instance Exception ServiceErr
toServiceErr :: Text -> Text -> ServiceErr
toServiceErr "NoSuchKey" _ = NoSuchKey
toServiceErr "NoSuchBucket" _ = NoSuchBucket
toServiceErr "InvalidBucketName" _ = InvalidBucketName
toServiceErr "NoSuchKey" _ = NoSuchKey
toServiceErr "NoSuchBucket" _ = NoSuchBucket
toServiceErr "InvalidBucketName" _ = InvalidBucketName
toServiceErr "BucketAlreadyOwnedByYou" _ = BucketAlreadyOwnedByYou
toServiceErr "BucketAlreadyExists" _ = BucketAlreadyExists
toServiceErr code message = ServiceErr code message
toServiceErr "BucketAlreadyExists" _ = BucketAlreadyExists
toServiceErr code message = ServiceErr code message
-- | Errors thrown by the library
data MinioErr
= MErrHTTP NC.HttpException
| MErrIO IOException
| MErrService ServiceErr
| MErrValidation MErrV
deriving stock (Show)
data MinioErr = MErrHTTP NC.HttpException
| MErrIO IOException
| MErrService ServiceErr
| MErrValidation MErrV
deriving (Show)
instance Eq MinioErr where
MErrHTTP _ == MErrHTTP _ = True
MErrHTTP _ == _ = False
MErrIO _ == MErrIO _ = True
MErrIO _ == _ = False
MErrService a == MErrService b = a == b
MErrService _ == _ = False
MErrValidation a == MErrValidation b = a == b
MErrValidation _ == _ = False
MErrHTTP _ == MErrHTTP _ = True
MErrHTTP _ == _ = False
MErrIO _ == MErrIO _ = True
MErrIO _ == _ = False
MErrService a == MErrService b = a == b
MErrService _ == _ = False
MErrValidation a == MErrValidation b = a == b
MErrValidation _ == _ = False
instance Exception MinioErr

View File

@ -15,35 +15,28 @@
--
module Network.Minio.JsonParser
( parseErrResponseJSON,
)
where
(
parseErrResponseJSON
) where
import Data.Aeson
( FromJSON,
eitherDecode,
parseJSON,
withObject,
(.:),
)
import qualified Data.Text as T
import Lib.Prelude
import Network.Minio.Errors
import Data.Aeson (FromJSON, eitherDecode, parseJSON,
withObject, (.:))
import qualified Data.Text as T
data AdminErrJSON = AdminErrJSON
{ aeCode :: Text,
aeMessage :: Text
}
deriving stock (Eq, Show)
import Lib.Prelude
import Network.Minio.Errors
data AdminErrJSON = AdminErrJSON { aeCode :: Text
, aeMessage :: Text
} deriving (Eq, Show)
instance FromJSON AdminErrJSON where
parseJSON = withObject "AdminErrJSON" $ \v ->
AdminErrJSON
<$> v .: "Code"
<*> v .: "Message"
parseJSON = withObject "AdminErrJSON" $ \v -> AdminErrJSON
<$> v .: "Code"
<*> v .: "Message"
parseErrResponseJSON :: (MonadIO m) => LByteString -> m ServiceErr
parseErrResponseJSON jsondata =
case eitherDecode jsondata of
Right aErr -> return $ toServiceErr (aeCode aErr) (aeMessage aErr)
Left err -> throwIO $ MErrVJsonParse $ T.pack err
Left err -> throwIO $ MErrVJsonParse $ T.pack err

View File

@ -1,5 +1,5 @@
--
-- MinIO Haskell SDK, (C) 2017-2019 MinIO, Inc.
-- MinIO Haskell SDK, (C) 2017 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
@ -16,165 +16,85 @@
module Network.Minio.ListOps where
import qualified Data.Conduit as C
import qualified Data.Conduit as C
import qualified Data.Conduit.Combinators as CC
import qualified Data.Conduit.List as CL
import Network.Minio.Data
( Bucket,
ListObjectsResult
( lorCPrefixes,
lorHasMore,
lorNextToken,
lorObjects
),
ListObjectsV1Result
( lorCPrefixes',
lorHasMore',
lorNextMarker,
lorObjects'
),
ListPartsResult (lprHasMore, lprNextPart, lprParts),
ListUploadsResult
( lurHasMore,
lurNextKey,
lurNextUpload,
lurUploads
),
Minio,
Object,
ObjectInfo,
ObjectPartInfo (opiSize),
UploadId,
UploadInfo (UploadInfo),
)
import Network.Minio.S3API
( listIncompleteParts',
listIncompleteUploads',
listObjects',
listObjectsV1',
)
import qualified Data.Conduit.List as CL
-- | Represents a list output item - either an object or an object
-- prefix (i.e. a directory).
data ListItem
= ListItemObject ObjectInfo
| ListItemPrefix Text
deriving stock (Show, Eq)
import Lib.Prelude
-- | @'listObjects' bucket prefix recurse@ lists objects in a bucket
-- similar to a file system tree traversal.
--
-- If @prefix@ is not 'Nothing', only items with the given prefix are
-- listed, otherwise items under the bucket are returned.
--
-- If @recurse@ is set to @True@ all directories under the prefix are
-- recursively traversed and only objects are returned.
--
-- If @recurse@ is set to @False@, objects and directories immediately
-- under the given prefix are returned (no recursive traversal is
-- performed).
listObjects :: Bucket -> Maybe Text -> Bool -> C.ConduitM () ListItem Minio ()
import Network.Minio.Data
import Network.Minio.S3API
-- | List objects in a bucket matching the given prefix. If recurse is
-- set to True objects matching prefix are recursively listed.
listObjects :: Bucket -> Maybe Text -> Bool -> C.ConduitM () ObjectInfo Minio ()
listObjects bucket prefix recurse = loop Nothing
where
loop :: Maybe Text -> C.ConduitM () ListItem Minio ()
loop :: Maybe Text -> C.ConduitM () ObjectInfo Minio ()
loop nextToken = do
let delimiter = bool (Just "/") Nothing recurse
let
delimiter = bool (Just "/") Nothing recurse
res <- lift $ listObjects' bucket prefix nextToken delimiter Nothing
CL.sourceList $ map ListItemObject $ lorObjects res
unless recurse $
CL.sourceList $
map ListItemPrefix $
lorCPrefixes res
CL.sourceList $ lorObjects res
when (lorHasMore res) $
loop (lorNextToken res)
-- | Lists objects - similar to @listObjects@, however uses the older
-- V1 AWS S3 API. Prefer @listObjects@ to this.
listObjectsV1 ::
Bucket ->
Maybe Text ->
Bool ->
C.ConduitM () ListItem Minio ()
-- | List objects in a bucket matching the given prefix. If recurse is
-- set to True objects matching prefix are recursively listed.
listObjectsV1 :: Bucket -> Maybe Text -> Bool
-> C.ConduitM () ObjectInfo Minio ()
listObjectsV1 bucket prefix recurse = loop Nothing
where
loop :: Maybe Text -> C.ConduitM () ListItem Minio ()
loop :: Maybe Text -> C.ConduitM () ObjectInfo Minio ()
loop nextMarker = do
let delimiter = bool (Just "/") Nothing recurse
let
delimiter = bool (Just "/") Nothing recurse
res <- lift $ listObjectsV1' bucket prefix nextMarker delimiter Nothing
CL.sourceList $ map ListItemObject $ lorObjects' res
unless recurse $
CL.sourceList $
map ListItemPrefix $
lorCPrefixes' res
CL.sourceList $ lorObjects' res
when (lorHasMore' res) $
loop (lorNextMarker res)
-- | List incomplete uploads in a bucket matching the given prefix. If
-- recurse is set to True incomplete uploads for the given prefix are
-- recursively listed.
listIncompleteUploads ::
Bucket ->
Maybe Text ->
Bool ->
C.ConduitM () UploadInfo Minio ()
listIncompleteUploads :: Bucket -> Maybe Text -> Bool
-> C.ConduitM () UploadInfo Minio ()
listIncompleteUploads bucket prefix recurse = loop Nothing Nothing
where
loop :: Maybe Text -> Maybe Text -> C.ConduitM () UploadInfo Minio ()
loop nextKeyMarker nextUploadIdMarker = do
let delimiter = bool (Just "/") Nothing recurse
let
delimiter = bool (Just "/") Nothing recurse
res <-
lift $
listIncompleteUploads'
bucket
prefix
delimiter
nextKeyMarker
nextUploadIdMarker
Nothing
res <- lift $ listIncompleteUploads' bucket prefix delimiter
nextKeyMarker nextUploadIdMarker Nothing
aggrSizes <- lift $
forM (lurUploads res) $ \(uKey, uId, _) -> do
partInfos <-
C.runConduit $
listIncompleteParts bucket uKey uId
C..| CC.sinkList
return $ foldl' (\sizeSofar p -> opiSize p + sizeSofar) 0 partInfos
aggrSizes <- lift $ forM (lurUploads res) $ \(uKey, uId, _) -> do
partInfos <- C.runConduit $ listIncompleteParts bucket uKey uId
C..| CC.sinkList
return $ foldl (\sizeSofar p -> opiSize p + sizeSofar) 0 partInfos
CL.sourceList $
zipWith
( curry
( \((uKey, uId, uInitTime), size) ->
UploadInfo uKey uId uInitTime size
)
)
(lurUploads res)
aggrSizes
map (\((uKey, uId, uInitTime), size) ->
UploadInfo uKey uId uInitTime size
) $ zip (lurUploads res) aggrSizes
when (lurHasMore res) $
loop (lurNextKey res) (lurNextUpload res)
-- | List object parts of an ongoing multipart upload for given
-- bucket, object and uploadId.
listIncompleteParts ::
Bucket ->
Object ->
UploadId ->
C.ConduitM () ObjectPartInfo Minio ()
listIncompleteParts :: Bucket -> Object -> UploadId
-> C.ConduitM () ObjectPartInfo Minio ()
listIncompleteParts bucket object uploadId = loop Nothing
where
loop :: Maybe Text -> C.ConduitM () ObjectPartInfo Minio ()
loop nextPartMarker = do
res <-
lift $
listIncompleteParts'
bucket
object
uploadId
Nothing
nextPartMarker
res <- lift $ listIncompleteParts' bucket object uploadId Nothing
nextPartMarker
CL.sourceList $ lprParts res
when (lprHasMore res) $
loop (show <$> lprNextPart res)

View File

@ -1,5 +1,5 @@
--
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
-- MinIO Haskell SDK, (C) 2017 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
@ -13,51 +13,45 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE CPP #-}
module Network.Minio.PresignedOperations
( UrlExpiry,
makePresignedUrl,
presignedPutObjectUrl,
presignedGetObjectUrl,
presignedHeadObjectUrl,
PostPolicyCondition (..),
ppCondBucket,
ppCondContentLengthRange,
ppCondContentType,
ppCondKey,
ppCondKeyStartsWith,
ppCondSuccessActionStatus,
PostPolicy (..),
PostPolicyError (..),
newPostPolicy,
showPostPolicy,
presignedPostPolicy,
)
where
( UrlExpiry
, makePresignedUrl
, presignedPutObjectUrl
, presignedGetObjectUrl
, presignedHeadObjectUrl
import Data.Aeson ((.=))
import qualified Data.Aeson as Json
import Data.ByteString.Builder (byteString, toLazyByteString)
import qualified Data.HashMap.Strict as H
import qualified Data.Text as T
import qualified Data.Time as Time
import Lib.Prelude
import qualified Network.HTTP.Client as NClient
import qualified Network.HTTP.Types as HT
import Network.Minio.API (buildRequest)
import Network.Minio.Credentials
import Network.Minio.Data
import Network.Minio.Data.Time
import Network.Minio.Errors
import Network.Minio.Sign.V4
import Network.URI (uriToString)
, PostPolicyCondition(..)
, ppCondBucket
, ppCondContentLengthRange
, ppCondContentType
, ppCondKey
, ppCondKeyStartsWith
, ppCondSuccessActionStatus
{- ORMOLU_DISABLE -}
#if MIN_VERSION_aeson(2,0,0)
import qualified Data.Aeson.Key as A
#endif
{- ORMOLU_ENABLE -}
, PostPolicy(..)
, PostPolicyError(..)
, newPostPolicy
, showPostPolicy
, presignedPostPolicy
) where
import Data.Aeson ((.=))
import qualified Data.Aeson as Json
import Data.ByteString.Builder (byteString, toLazyByteString)
import qualified Data.Map.Strict as Map
import qualified Data.Text as T
import qualified Data.Time as Time
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Network.HTTP.Types.Header (hHost)
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Data.Time
import Network.Minio.Errors
import Network.Minio.Sign.V4
-- | Generate a presigned URL. This function allows for advanced usage
-- - for simple cases prefer the `presigned*Url` functions.
@ -67,36 +61,42 @@ import qualified Data.Aeson.Key as A
--
-- All extra query parameters or headers are signed, and therefore are
-- required to be sent when the generated URL is actually used.
makePresignedUrl ::
UrlExpiry ->
HT.Method ->
Maybe Bucket ->
Maybe Object ->
Maybe Region ->
HT.Query ->
HT.RequestHeaders ->
Minio ByteString
makePresignedUrl :: UrlExpiry -> HT.Method -> Maybe Bucket -> Maybe Object
-> Maybe Region -> HT.Query -> HT.RequestHeaders
-> Minio ByteString
makePresignedUrl expiry method bucket object region extraQuery extraHeaders = do
when (expiry > 7 * 24 * 3600 || expiry < 0) $
throwIO $
MErrVInvalidUrlExpiry expiry
when (expiry > 7*24*3600 || expiry < 0) $
throwIO $ MErrVInvalidUrlExpiry expiry
let s3ri =
defaultS3ReqInfo
{ riPresignExpirySecs = Just expiry,
riMethod = method,
riBucket = bucket,
riObject = object,
riRegion = region,
riQueryParams = extraQuery,
riHeaders = extraHeaders
}
ci <- asks mcConnInfo
req <- buildRequest s3ri
let uri = NClient.getUri req
uriString = uriToString identity uri ""
let
hostHeader = (hHost, getHostAddr ci)
req = NC.defaultRequest {
NC.method = method
, NC.secure = connectIsSecure ci
, NC.host = encodeUtf8 $ connectHost ci
, NC.port = connectPort ci
, NC.path = getS3Path bucket object
, NC.requestHeaders = hostHeader : extraHeaders
, NC.queryString = HT.renderQuery True extraQuery
}
ts <- liftIO Time.getCurrentTime
return $ encodeUtf8 uriString
let sp = SignParams (connectAccessKey ci) (connectSecretKey ci)
ts region (Just expiry) Nothing
signPairs = signV4 sp req
qpToAdd = (fmap . fmap) Just signPairs
queryStr = HT.renderQueryBuilder True
((HT.parseQuery $ NC.queryString req) ++ qpToAdd)
scheme = byteString $ bool "http://" "https://" $ connectIsSecure ci
return $ toS $ toLazyByteString $ scheme
<> byteString (getHostAddr ci)
<> byteString (getS3Path bucket object)
<> queryStr
-- | Generate a URL with authentication signature to PUT (upload) an
-- object. Any extra headers if passed, are signed, and so they are
@ -105,22 +105,12 @@ makePresignedUrl expiry method bucket object region extraQuery extraHeaders = do
--
-- For a list of possible headers to pass, please refer to the PUT
-- object REST API AWS S3 documentation.
presignedPutObjectUrl ::
Bucket ->
Object ->
UrlExpiry ->
HT.RequestHeaders ->
Minio ByteString
presignedPutObjectUrl :: Bucket -> Object -> UrlExpiry -> HT.RequestHeaders
-> Minio ByteString
presignedPutObjectUrl bucket object expirySeconds extraHeaders = do
region <- asks (Just . connectRegion . mcConnInfo)
makePresignedUrl
expirySeconds
HT.methodPut
(Just bucket)
(Just object)
region
[]
extraHeaders
makePresignedUrl expirySeconds HT.methodPut
(Just bucket) (Just object) region [] extraHeaders
-- | Generate a URL with authentication signature to GET (download) an
-- object. All extra query parameters and headers passed here will be
@ -131,23 +121,12 @@ presignedPutObjectUrl bucket object expirySeconds extraHeaders = do
--
-- For a list of possible request parameters and headers, please refer
-- to the GET object REST API AWS S3 documentation.
presignedGetObjectUrl ::
Bucket ->
Object ->
UrlExpiry ->
HT.Query ->
HT.RequestHeaders ->
Minio ByteString
presignedGetObjectUrl :: Bucket -> Object -> UrlExpiry -> HT.Query
-> HT.RequestHeaders -> Minio ByteString
presignedGetObjectUrl bucket object expirySeconds extraQuery extraHeaders = do
region <- asks (Just . connectRegion . mcConnInfo)
makePresignedUrl
expirySeconds
HT.methodGet
(Just bucket)
(Just object)
region
extraQuery
extraHeaders
makePresignedUrl expirySeconds HT.methodGet
(Just bucket) (Just object) region extraQuery extraHeaders
-- | Generate a URL with authentication signature to make a HEAD
-- request on an object. This is used to fetch metadata about an
@ -156,74 +135,50 @@ presignedGetObjectUrl bucket object expirySeconds extraQuery extraHeaders = do
--
-- For a list of possible headers to pass, please refer to the HEAD
-- object REST API AWS S3 documentation.
presignedHeadObjectUrl ::
Bucket ->
Object ->
UrlExpiry ->
HT.RequestHeaders ->
Minio ByteString
presignedHeadObjectUrl :: Bucket -> Object -> UrlExpiry
-> HT.RequestHeaders -> Minio ByteString
presignedHeadObjectUrl bucket object expirySeconds extraHeaders = do
region <- asks (Just . connectRegion . mcConnInfo)
makePresignedUrl
expirySeconds
HT.methodHead
(Just bucket)
(Just object)
region
[]
extraHeaders
makePresignedUrl expirySeconds HT.methodHead
(Just bucket) (Just object) region [] extraHeaders
-- | Represents individual conditions in a Post Policy document.
data PostPolicyCondition
= PPCStartsWith Text Text
| PPCEquals Text Text
| PPCRange Text Int64 Int64
deriving stock (Show, Eq)
data PostPolicyCondition = PPCStartsWith Text Text
| PPCEquals Text Text
| PPCRange Text Int64 Int64
deriving (Show, Eq)
{- ORMOLU_DISABLE -}
instance Json.ToJSON PostPolicyCondition where
toJSON (PPCStartsWith k v) = Json.toJSON ["starts-with", k, v]
#if MIN_VERSION_aeson(2,0,0)
toJSON (PPCEquals k v) = Json.object [(A.fromText k) .= v]
#else
toJSON (PPCEquals k v) = Json.object [k .= v]
#endif
toJSON (PPCRange k minVal maxVal) =
Json.toJSON [Json.toJSON k, Json.toJSON minVal, Json.toJSON maxVal]
toEncoding (PPCStartsWith k v) = Json.foldable ["starts-with", k, v]
#if MIN_VERSION_aeson(2,0,0)
toEncoding (PPCEquals k v) = Json.pairs ((A.fromText k) .= v)
#else
toEncoding (PPCEquals k v) = Json.pairs (k .= v)
#endif
toEncoding (PPCRange k minVal maxVal) =
Json.foldable [Json.toJSON k, Json.toJSON minVal, Json.toJSON maxVal]
{- ORMOLU_ENABLE -}
-- | A PostPolicy is required to perform uploads via browser forms.
data PostPolicy = PostPolicy
{ expiration :: UTCTime,
conditions :: [PostPolicyCondition]
}
deriving stock (Show, Eq)
data PostPolicy = PostPolicy {
expiration :: UTCTime
, conditions :: [PostPolicyCondition]
} deriving (Show, Eq)
instance Json.ToJSON PostPolicy where
toJSON (PostPolicy e c) =
Json.object
[ "expiration" .= iso8601TimeFormat e,
"conditions" .= c
]
Json.object $ [ "expiration" .= iso8601TimeFormat e
, "conditions" .= c
]
toEncoding (PostPolicy e c) =
Json.pairs ("expiration" .= iso8601TimeFormat e <> "conditions" .= c)
-- | Possible validation errors when creating a PostPolicy.
data PostPolicyError
= PPEKeyNotSpecified
| PPEBucketNotSpecified
| PPEConditionKeyEmpty
| PPERangeInvalid
deriving stock (Show, Eq)
data PostPolicyError = PPEKeyNotSpecified
| PPEBucketNotSpecified
| PPEConditionKeyEmpty
| PPERangeInvalid
deriving (Eq, Show)
-- | Set the bucket name that the upload should use.
ppCondBucket :: Bucket -> PostPolicyCondition
@ -231,10 +186,8 @@ ppCondBucket = PPCEquals "bucket"
-- | Set the content length range constraint with minimum and maximum
-- byte count values.
ppCondContentLengthRange ::
Int64 ->
Int64 ->
PostPolicyCondition
ppCondContentLengthRange :: Int64 -> Int64
-> PostPolicyCondition
ppCondContentLengthRange = PPCRange "content-length-range"
-- | Set the content-type header for the upload.
@ -257,99 +210,83 @@ ppCondSuccessActionStatus n =
-- | This function creates a PostPolicy after validating its
-- arguments.
newPostPolicy ::
UTCTime ->
[PostPolicyCondition] ->
Either PostPolicyError PostPolicy
newPostPolicy :: UTCTime -> [PostPolicyCondition]
-> Either PostPolicyError PostPolicy
newPostPolicy expirationTime conds
-- object name condition must be present
| not $ any (keyEquals "key") conds =
Left PPEKeyNotSpecified
-- bucket name condition must be present
| not $ any (keyEquals "bucket") conds =
Left PPEBucketNotSpecified
-- a condition with an empty key is invalid
| any (keyEquals "") conds || any isEmptyRangeKey conds =
Left PPEConditionKeyEmpty
-- invalid range check
| any isInvalidRange conds =
Left PPERangeInvalid
-- all good!
| otherwise =
return $ PostPolicy expirationTime conds
where
keyEquals k' (PPCStartsWith k _) = k == k'
keyEquals k' (PPCEquals k _) = k == k'
keyEquals _ _ = False
keyEquals k' (PPCEquals k _) = k == k'
keyEquals _ _ = False
isEmptyRangeKey (PPCRange k _ _) = k == ""
isEmptyRangeKey _ = False
isEmptyRangeKey _ = False
isInvalidRange (PPCRange _ mi ma) = mi < 0 || mi > ma
isInvalidRange _ = False
isInvalidRange _ = False
-- | Convert Post Policy to a string (e.g. for printing).
showPostPolicy :: PostPolicy -> ByteString
showPostPolicy = toStrictBS . Json.encode
showPostPolicy = toS . Json.encode
-- | Generate a presigned URL and POST policy to upload files via a
-- browser. On success, this function returns a URL and POST
-- form-data.
presignedPostPolicy ::
PostPolicy ->
Minio (ByteString, H.HashMap Text ByteString)
presignedPostPolicy :: PostPolicy
-> Minio (ByteString, Map.Map Text ByteString)
presignedPostPolicy p = do
ci <- asks mcConnInfo
signTime <- liftIO Time.getCurrentTime
mgr <- asks mcConnManager
cv <- liftIO $ getCredential (connectCreds ci) (getEndpoint ci) mgr
signTime <- liftIO $ Time.getCurrentTime
let extraConditions signParams =
[ PPCEquals "x-amz-date" (toText $ awsTimeFormat signTime),
PPCEquals "x-amz-algorithm" "AWS4-HMAC-SHA256",
PPCEquals
"x-amz-credential"
( T.intercalate
"/"
[ coerce $ cvAccessKey cv,
decodeUtf8 $ credentialScope signParams
]
)
]
ppWithCreds signParams =
p
{ conditions = conditions p ++ extraConditions signParams
}
sp =
SignParams
(coerce $ cvAccessKey cv)
(coerce $ cvSecretKey cv)
(coerce $ cvSessionToken cv)
ServiceS3
signTime
(Just $ connectRegion ci)
Nothing
Nothing
signData = signV4PostPolicy (showPostPolicy $ ppWithCreds sp) sp
-- compute form-data
mkPair (PPCStartsWith k v) = Just (k, v)
mkPair (PPCEquals k v) = Just (k, v)
mkPair _ = Nothing
formFromPolicy =
H.map encodeUtf8 $
H.fromList $
mapMaybe
mkPair
(conditions $ ppWithCreds sp)
formData = formFromPolicy `H.union` signData
-- compute POST upload URL
bucket = H.lookupDefault "" "bucket" formData
scheme = byteString $ bool "http://" "https://" $ connectIsSecure ci
url =
toStrictBS $
toLazyByteString $
scheme
<> byteString (getHostAddr ci)
<> byteString "/"
<> byteString bucket
<> byteString "/"
let
extraConditions =
[ PPCEquals "x-amz-date" (toS $ awsTimeFormat signTime)
, PPCEquals "x-amz-algorithm" "AWS4-HMAC-SHA256"
, PPCEquals "x-amz-credential"
(T.intercalate "/" [connectAccessKey ci,
decodeUtf8 $ mkScope signTime region])
]
ppWithCreds = p {
conditions = conditions p ++ extraConditions
}
sp = SignParams (connectAccessKey ci) (connectSecretKey ci)
signTime (Just $ connectRegion ci) Nothing Nothing
signData = signV4PostPolicy (showPostPolicy ppWithCreds) sp
-- compute form-data
mkPair (PPCStartsWith k v) = Just (k, v)
mkPair (PPCEquals k v) = Just (k, v)
mkPair _ = Nothing
formFromPolicy = Map.map toS $ Map.fromList $ catMaybes $
mkPair <$> conditions ppWithCreds
formData = formFromPolicy `Map.union` signData
-- compute POST upload URL
bucket = Map.findWithDefault "" "bucket" formData
scheme = byteString $ bool "http://" "https://" $ connectIsSecure ci
region = connectRegion ci
url = toS $ toLazyByteString $ scheme <> byteString (getHostAddr ci) <>
byteString "/" <> byteString (toS bucket) <> byteString "/"
return (url, formData)

View File

@ -1,5 +1,5 @@
--
-- MinIO Haskell SDK, (C) 2017-2019 MinIO, Inc.
-- MinIO Haskell SDK, (C) 2017 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
@ -15,24 +15,28 @@
--
module Network.Minio.PutObject
( putObjectInternal,
ObjectData (..),
selectPartSizes,
)
where
(
putObjectInternal
, ObjectData(..)
, selectPartSizes
) where
import Conduit (takeC)
import qualified Conduit as C
import qualified Data.ByteString.Lazy as LBS
import qualified Data.Conduit.Binary as CB
import Conduit (takeC)
import qualified Data.ByteString.Lazy as LBS
import qualified Data.Conduit as C
import qualified Data.Conduit.Binary as CB
import qualified Data.Conduit.Combinators as CC
import qualified Data.Conduit.List as CL
import qualified Data.List as List
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.S3API
import Network.Minio.Utils
import qualified Data.Conduit.List as CL
import qualified Data.List as List
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.S3API
import Network.Minio.Utils
-- | A data-type to represent the source data for an object. A
-- file-path or a producer-conduit may be provided.
@ -45,71 +49,57 @@ import Network.Minio.Utils
-- the input - if it is not provided, upload will continue until the
-- stream ends or the object reaches `maxObjectSize` size.
data ObjectData m
= -- | Takes filepath and optional
-- size.
ODFile FilePath (Maybe Int64)
| -- | Pass
-- size
-- (bytes)
-- if
-- known.
ODStream (C.ConduitM () ByteString m ()) (Maybe Int64)
= ODFile FilePath (Maybe Int64) -- ^ Takes filepath and optional
-- size.
| ODStream (C.ConduitM () ByteString m ()) (Maybe Int64) -- ^ Pass
-- size
-- (bytes)
-- if
-- known.
-- | Put an object from ObjectData. This high-level API handles
-- objects of all sizes, and even if the object size is unknown.
putObjectInternal ::
Bucket ->
Object ->
PutObjectOptions ->
ObjectData Minio ->
Minio ETag
putObjectInternal :: Bucket -> Object -> PutObjectOptions
-> ObjectData Minio -> Minio ETag
putObjectInternal b o opts (ODStream src sizeMay) = do
case sizeMay of
-- unable to get size, so assume non-seekable file
Nothing -> sequentialMultipartUpload b o opts Nothing src
-- unable to get size, so assume non-seekable file and max-object size
Nothing -> sequentialMultipartUpload b o opts (Just maxObjectSize) src
-- got file size, so check for single/multipart upload
Just size ->
if
| size <= 64 * oneMiB -> do
bs <- C.runConduit $ src C..| takeC (fromIntegral size) C..| CB.sinkLbs
putObjectSingle' b o (pooToHeaders opts) $ LBS.toStrict bs
| size > maxObjectSize -> throwIO $ MErrVPutSizeExceeded size
| otherwise -> sequentialMultipartUpload b o opts (Just size) src
if | size <= 64 * oneMiB -> do
bs <- C.runConduit $ src C..| takeC (fromIntegral size) C..| CB.sinkLbs
putObjectSingle' b o (pooToHeaders opts) $ LBS.toStrict bs
| size > maxObjectSize -> throwIO $ MErrVPutSizeExceeded size
| otherwise -> sequentialMultipartUpload b o opts (Just size) src
putObjectInternal b o opts (ODFile fp sizeMay) = do
hResE <- withNewHandle fp $ \h ->
liftA2 (,) (isHandleSeekable h) (getFileSize h)
liftM2 (,) (isHandleSeekable h) (getFileSize h)
(isSeekable, handleSizeMay) <-
either
(const $ return (False, Nothing))
return
hResE
(isSeekable, handleSizeMay) <- either (const $ return (False, Nothing)) return
hResE
-- prefer given size to queried size.
let finalSizeMay = listToMaybe $ catMaybes [sizeMay, handleSizeMay]
case finalSizeMay of
-- unable to get size, so assume non-seekable file
Nothing -> sequentialMultipartUpload b o opts Nothing $ CB.sourceFile fp
-- unable to get size, so assume non-seekable file and max-object size
Nothing -> sequentialMultipartUpload b o opts (Just maxObjectSize) $
CB.sourceFile fp
-- got file size, so check for single/multipart upload
Just size ->
if
| size <= 64 * oneMiB ->
either throwIO return
=<< withNewHandle fp (\h -> putObjectSingle b o (pooToHeaders opts) h 0 size)
| size > maxObjectSize -> throwIO $ MErrVPutSizeExceeded size
| isSeekable -> parallelMultipartUpload b o opts fp size
| otherwise ->
sequentialMultipartUpload b o opts (Just size) $
CB.sourceFile fp
if | size <= 64 * oneMiB -> either throwIO return =<<
withNewHandle fp (\h -> putObjectSingle b o (pooToHeaders opts) h 0 size)
| size > maxObjectSize -> throwIO $ MErrVPutSizeExceeded size
| isSeekable -> parallelMultipartUpload b o opts fp size
| otherwise -> sequentialMultipartUpload b o opts (Just size) $
CB.sourceFile fp
parallelMultipartUpload ::
Bucket ->
Object ->
PutObjectOptions ->
FilePath ->
Int64 ->
Minio ETag
parallelMultipartUpload :: Bucket -> Object -> PutObjectOptions
-> FilePath -> Int64 -> Minio ETag
parallelMultipartUpload b o opts filePath size = do
-- get a new upload id.
uploadId <- newMultipartUpload b o (pooToHeaders opts)
@ -119,17 +109,15 @@ parallelMultipartUpload b o opts filePath size = do
let threads = fromMaybe 10 $ pooNumThreads opts
-- perform upload with 'threads' threads
uploadedPartsE <-
limitedMapConcurrently
(fromIntegral threads)
(uploadPart uploadId)
partSizeInfo
uploadedPartsE <- limitedMapConcurrently (fromIntegral threads)
(uploadPart uploadId) partSizeInfo
-- if there were any errors, rethrow exception.
mapM_ throwIO $ lefts uploadedPartsE
-- if we get here, all parts were successfully uploaded.
completeMultipartUpload b o uploadId $ rights uploadedPartsE
where
uploadPart uploadId (partNum, offset, sz) =
withNewHandle filePath $ \h -> do
@ -137,13 +125,10 @@ parallelMultipartUpload b o opts filePath size = do
putObjectPart b o uploadId partNum [] payload
-- | Upload multipart object from conduit source sequentially
sequentialMultipartUpload ::
Bucket ->
Object ->
PutObjectOptions ->
Maybe Int64 ->
C.ConduitM () ByteString Minio () ->
Minio ETag
sequentialMultipartUpload :: Bucket -> Object -> PutObjectOptions
-> Maybe Int64
-> C.ConduitM () ByteString Minio ()
-> Minio ETag
sequentialMultipartUpload b o opts sizeMay src = do
-- get a new upload id.
uploadId <- newMultipartUpload b o (pooToHeaders opts)
@ -151,23 +136,22 @@ sequentialMultipartUpload b o opts sizeMay src = do
-- upload parts in loop
let partSizes = selectPartSizes $ maybe maxObjectSize identity sizeMay
(pnums, _, sizes) = List.unzip3 partSizes
uploadedParts <-
C.runConduit $
src
C..| chunkBSConduit (map fromIntegral sizes)
C..| CL.map PayloadBS
C..| uploadPart' uploadId pnums
C..| CC.sinkList
uploadedParts <- C.runConduit
$ src
C..| chunkBSConduit sizes
C..| CL.map PayloadBS
C..| uploadPart' uploadId pnums
C..| CC.sinkList
-- complete multipart upload
completeMultipartUpload b o uploadId uploadedParts
where
uploadPart' _ [] = return ()
uploadPart' uid (pn : pns) = do
uploadPart' uid (pn:pns) = do
payloadMay <- C.await
case payloadMay of
Nothing -> return ()
Just payload -> do
pinfo <- lift $ putObjectPart b o uid pn [] payload
C.yield pinfo
uploadPart' uid pns
Just payload -> do pinfo <- lift $ putObjectPart b o uid pn [] payload
C.yield pinfo
uploadPart' uid pns

View File

@ -1,5 +1,5 @@
--
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
@ -14,185 +14,131 @@
-- limitations under the License.
--
-- |
-- Module: Network.Minio.S3API
-- Copyright: (c) 2017-2023 MinIO Dev Team
-- License: Apache 2.0
-- Maintainer: MinIO Dev Team <dev@min.io>
--
-- Lower-level API for S3 compatible object stores. Start with @Network.Minio@
-- and use this only if needed.
module Network.Minio.S3API
( Region,
getLocation,
(
Region
, getLocation
-- * Listing buckets
-- * Listing buckets
--------------------
, getService
--------------------
getService,
-- * Listing objects
--------------------
, ListObjectsResult(..)
, ListObjectsV1Result(..)
, listObjects'
, listObjectsV1'
-- * Listing objects
-- * Retrieving buckets
, headBucket
--------------------
ListObjectsResult (..),
ListObjectsV1Result (..),
listObjects',
listObjectsV1',
-- * Retrieving objects
-----------------------
, getObject'
, headObject
-- * Retrieving buckets
headBucket,
-- * Creating buckets and objects
---------------------------------
, putBucket
, ETag
, putObjectSingle'
, putObjectSingle
, copyObjectSingle
-- * Retrieving objects
-- * Multipart Upload APIs
--------------------------
, UploadId
, PartTuple
, Payload(..)
, PartNumber
, newMultipartUpload
, putObjectPart
, copyObjectPart
, completeMultipartUpload
, abortMultipartUpload
, ListUploadsResult(..)
, listIncompleteUploads'
, ListPartsResult(..)
, listIncompleteParts'
-----------------------
getObject',
headObject,
-- * Deletion APIs
--------------------------
, deleteBucket
, deleteObject
-- * Creating buckets and objects
-- * Presigned Operations
-----------------------------
, module Network.Minio.PresignedOperations
---------------------------------
putBucket,
ETag,
maxSinglePutObjectSizeBytes,
putObjectSingle',
putObjectSingle,
copyObjectSingle,
-- ** Bucket Policies
, getBucketPolicy
, setBucketPolicy
-- * Multipart Upload APIs
-- * Bucket Notifications
-------------------------
, Notification(..)
, NotificationConfig(..)
, Arn
, Event(..)
, Filter(..)
, FilterKey(..)
, FilterRules(..)
, FilterRule(..)
, getBucketNotification
, putBucketNotification
, removeAllBucketNotification
) where
--------------------------
UploadId,
PartTuple,
Payload (..),
PartNumber,
newMultipartUpload,
putObjectPart,
copyObjectPart,
completeMultipartUpload,
abortMultipartUpload,
ListUploadsResult (..),
listIncompleteUploads',
ListPartsResult (..),
listIncompleteParts',
import qualified Data.ByteString as BS
import qualified Data.Conduit as C
import qualified Data.Text as T
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Network.HTTP.Types.Status (status404)
import UnliftIO (Handler (Handler))
-- * Deletion APIs
import Lib.Prelude
--------------------------
deleteBucket,
deleteObject,
-- * Presigned Operations
-----------------------------
module Network.Minio.PresignedOperations,
-- ** Bucket Policies
getBucketPolicy,
setBucketPolicy,
-- * Bucket Notifications
-------------------------
Notification (..),
NotificationConfig (..),
Arn,
Event (..),
Filter (..),
FilterKey (..),
FilterRules (..),
FilterRule (..),
getBucketNotification,
putBucketNotification,
removeAllBucketNotification,
)
where
import qualified Data.ByteString as BS
import qualified Data.Text as T
import Lib.Prelude
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Network.HTTP.Types.Status (status404)
import Network.Minio.API
import Network.Minio.APICommon
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.PresignedOperations
import Network.Minio.Utils
import Network.Minio.XmlGenerator
import Network.Minio.XmlParser
import UnliftIO (Handler (Handler))
import Network.Minio.API
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.PresignedOperations
import Network.Minio.Utils
import Network.Minio.XmlGenerator
import Network.Minio.XmlParser
-- | Fetch all buckets from the service.
getService :: Minio [BucketInfo]
getService = do
resp <-
executeRequest $
defaultS3ReqInfo
{ riNeedsLocation = False
}
resp <- executeRequest $ defaultS3ReqInfo {
riNeedsLocation = False
}
parseListBuckets $ NC.responseBody resp
-- Parse headers from getObject and headObject calls.
parseGetObjectHeaders :: Object -> [HT.Header] -> Maybe ObjectInfo
parseGetObjectHeaders object headers =
let metadataPairs = getMetadata headers
userMetadata = getUserMetadataMap metadataPairs
metadata = getNonUserMetadataMap metadataPairs
in ObjectInfo
<$> Just object
<*> getLastModifiedHeader headers
<*> getETagHeader headers
<*> getContentLength headers
<*> Just userMetadata
<*> Just metadata
-- | GET an object from the service and return parsed ObjectInfo and a
-- conduit source for the object content
getObject' ::
Bucket ->
Object ->
HT.Query ->
[HT.Header] ->
Minio GetObjectResponse
-- | GET an object from the service and return the response headers
-- and a conduit source for the object content
getObject' :: Bucket -> Object -> HT.Query -> [HT.Header]
-> Minio ([HT.Header], C.ConduitM () ByteString Minio ())
getObject' bucket object queryParams headers = do
resp <- mkStreamRequest reqInfo
let objInfoMaybe = parseGetObjectHeaders object $ NC.responseHeaders resp
objInfo <-
maybe
(throwIO MErrVInvalidObjectInfoResponse)
return
objInfoMaybe
return $
GetObjectResponse
{ gorObjectInfo = objInfo,
gorObjectStream = NC.responseBody resp
}
return (NC.responseHeaders resp, NC.responseBody resp)
where
reqInfo =
defaultS3ReqInfo
{ riBucket = Just bucket,
riObject = Just object,
riQueryParams = queryParams,
riHeaders =
headers
-- This header is required for safety as otherwise http-client,
-- sends Accept-Encoding: gzip, and the server may actually gzip
-- body. In that case Content-Length header will be missing.
<> [("Accept-Encoding", "identity")]
}
reqInfo = defaultS3ReqInfo { riBucket = Just bucket
, riObject = Just object
, riQueryParams = queryParams
, riHeaders = headers
}
-- | Creates a bucket via a PUT bucket call.
putBucket :: Bucket -> Region -> Minio ()
putBucket bucket location = do
ns <- asks getSvcNamespace
void $
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodPut,
riBucket = Just bucket,
riPayload = PayloadBS $ mkCreateBucketConfig ns location,
riNeedsLocation = False
void $ executeRequest $
defaultS3ReqInfo { riMethod = HT.methodPut
, riBucket = Just bucket
, riPayload = PayloadBS $ mkCreateBucketConfig ns location
, riNeedsLocation = False
}
-- | Single PUT object size.
@ -208,432 +154,318 @@ putObjectSingle' bucket object headers bs = do
let size = fromIntegral (BS.length bs)
-- check length is within single PUT object size.
when (size > maxSinglePutObjectSizeBytes) $
throwIO $
MErrVSinglePUTSizeExceeded size
throwIO $ MErrVSinglePUTSizeExceeded size
let payload = mkStreamingPayload $ PayloadBS bs
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodPut,
riBucket = Just bucket,
riObject = Just object,
riHeaders = headers,
riPayload = payload
}
-- content-length header is automatically set by library.
resp <- executeRequest $
defaultS3ReqInfo { riMethod = HT.methodPut
, riBucket = Just bucket
, riObject = Just object
, riHeaders = headers
, riPayload = PayloadBS bs
}
let rheaders = NC.responseHeaders resp
etag = getETagHeader rheaders
maybe
(throwIO MErrVETagHeaderNotFound)
return
etag
return etag
-- | PUT an object into the service. This function performs a single
-- PUT object call, and so can only transfer objects upto 5GiB.
putObjectSingle ::
Bucket ->
Object ->
[HT.Header] ->
Handle ->
Int64 ->
Int64 ->
Minio ETag
putObjectSingle :: Bucket -> Object -> [HT.Header] -> Handle -> Int64
-> Int64 -> Minio ETag
putObjectSingle bucket object headers h offset size = do
-- check length is within single PUT object size.
when (size > maxSinglePutObjectSizeBytes) $
throwIO $
MErrVSinglePUTSizeExceeded size
throwIO $ MErrVSinglePUTSizeExceeded size
-- content-length header is automatically set by library.
let payload = mkStreamingPayload $ PayloadH h offset size
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodPut,
riBucket = Just bucket,
riObject = Just object,
riHeaders = headers,
riPayload = payload
}
resp <- executeRequest $
defaultS3ReqInfo { riMethod = HT.methodPut
, riBucket = Just bucket
, riObject = Just object
, riHeaders = headers
, riPayload = PayloadH h offset size
}
let rheaders = NC.responseHeaders resp
etag = getETagHeader rheaders
maybe
(throwIO MErrVETagHeaderNotFound)
return
etag
return etag
-- | List objects in a bucket matching prefix up to delimiter,
-- starting from nextMarker.
listObjectsV1' ::
Bucket ->
Maybe Text ->
Maybe Text ->
Maybe Text ->
Maybe Int ->
Minio ListObjectsV1Result
listObjectsV1' :: Bucket -> Maybe Text -> Maybe Text -> Maybe Text -> Maybe Int
-> Minio ListObjectsV1Result
listObjectsV1' bucket prefix nextMarker delimiter maxKeys = do
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodGet,
riBucket = Just bucket,
riQueryParams = mkOptionalParams params
}
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet
, riBucket = Just bucket
, riQueryParams = mkOptionalParams params
}
parseListObjectsV1Response $ NC.responseBody resp
where
params =
[ ("marker", nextMarker),
("prefix", prefix),
("delimiter", delimiter),
("max-keys", show <$> maxKeys)
params = [
("marker", nextMarker)
, ("prefix", prefix)
, ("delimiter", delimiter)
, ("max-keys", show <$> maxKeys)
]
-- | List objects in a bucket matching prefix up to delimiter,
-- starting from nextToken.
listObjects' ::
Bucket ->
Maybe Text ->
Maybe Text ->
Maybe Text ->
Maybe Int ->
Minio ListObjectsResult
listObjects' :: Bucket -> Maybe Text -> Maybe Text -> Maybe Text -> Maybe Int
-> Minio ListObjectsResult
listObjects' bucket prefix nextToken delimiter maxKeys = do
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodGet,
riBucket = Just bucket,
riQueryParams = mkOptionalParams params
}
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet
, riBucket = Just bucket
, riQueryParams = mkOptionalParams params
}
parseListObjectsResponse $ NC.responseBody resp
where
params =
[ ("list-type", Just "2"),
("continuation_token", nextToken),
("prefix", prefix),
("delimiter", delimiter),
("max-keys", show <$> maxKeys)
params = [
("list-type", Just "2")
, ("continuation_token", nextToken)
, ("prefix", prefix)
, ("delimiter", delimiter)
, ("max-keys", show <$> maxKeys)
]
-- | DELETE a bucket from the service.
deleteBucket :: Bucket -> Minio ()
deleteBucket bucket =
void $
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodDelete,
riBucket = Just bucket
deleteBucket bucket = void $
executeRequest $
defaultS3ReqInfo { riMethod = HT.methodDelete
, riBucket = Just bucket
}
-- | DELETE an object from the service.
deleteObject :: Bucket -> Object -> Minio ()
deleteObject bucket object =
void $
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodDelete,
riBucket = Just bucket,
riObject = Just object
deleteObject bucket object = void $
executeRequest $
defaultS3ReqInfo { riMethod = HT.methodDelete
, riBucket = Just bucket
, riObject = Just object
}
-- | Create a new multipart upload.
newMultipartUpload :: Bucket -> Object -> [HT.Header] -> Minio UploadId
newMultipartUpload bucket object headers = do
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodPost,
riBucket = Just bucket,
riObject = Just object,
riQueryParams = [("uploads", Nothing)],
riHeaders = headers
}
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodPost
, riBucket = Just bucket
, riObject = Just object
, riQueryParams = [("uploads", Nothing)]
, riHeaders = headers
}
parseNewMultipartUpload $ NC.responseBody resp
-- | PUT a part of an object as part of a multipart upload.
putObjectPart ::
Bucket ->
Object ->
UploadId ->
PartNumber ->
[HT.Header] ->
Payload ->
Minio PartTuple
putObjectPart :: Bucket -> Object -> UploadId -> PartNumber -> [HT.Header]
-> Payload -> Minio PartTuple
putObjectPart bucket object uploadId partNumber headers payload = do
-- transform payload to conduit to enable streaming signature
let payload' = mkStreamingPayload payload
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodPut,
riBucket = Just bucket,
riObject = Just object,
riQueryParams = mkOptionalParams params,
riHeaders = headers,
riPayload = payload'
}
resp <- executeRequest $
defaultS3ReqInfo { riMethod = HT.methodPut
, riBucket = Just bucket
, riObject = Just object
, riQueryParams = mkOptionalParams params
, riHeaders = headers
, riPayload = payload
}
let rheaders = NC.responseHeaders resp
etag = getETagHeader rheaders
maybe
(throwIO MErrVETagHeaderNotFound)
(return . (partNumber,))
etag
(return . (partNumber, )) etag
where
params =
[ ("uploadId", Just uploadId),
("partNumber", Just $ show partNumber)
params = [
("uploadId", Just uploadId)
, ("partNumber", Just $ show partNumber)
]
srcInfoToHeaders :: SourceInfo -> [HT.Header]
srcInfoToHeaders srcInfo =
( "x-amz-copy-source",
encodeUtf8 $
T.concat
[ "/",
srcBucket srcInfo,
"/",
srcObject srcInfo
]
)
: rangeHdr
++ zip names values
srcInfoToHeaders srcInfo = ("x-amz-copy-source",
toS $ T.concat ["/", srcBucket srcInfo,
"/", srcObject srcInfo]
) : rangeHdr ++ zip names values
where
names =
[ "x-amz-copy-source-if-match",
"x-amz-copy-source-if-none-match",
"x-amz-copy-source-if-unmodified-since",
"x-amz-copy-source-if-modified-since"
]
values =
mapMaybe
(fmap encodeUtf8 . (srcInfo &))
[ srcIfMatch,
srcIfNoneMatch,
fmap formatRFC1123 . srcIfUnmodifiedSince,
fmap formatRFC1123 . srcIfModifiedSince
]
rangeHdr =
maybe [] ((\a -> [("x-amz-copy-source-range", HT.renderByteRanges [a])]) . toByteRange) (srcRange srcInfo)
names = ["x-amz-copy-source-if-match", "x-amz-copy-source-if-none-match",
"x-amz-copy-source-if-unmodified-since",
"x-amz-copy-source-if-modified-since"]
values = mapMaybe (fmap encodeUtf8 . (srcInfo &))
[srcIfMatch, srcIfNoneMatch,
fmap formatRFC1123 . srcIfUnmodifiedSince,
fmap formatRFC1123 . srcIfModifiedSince]
rangeHdr = maybe [] (\a -> [("x-amz-copy-source-range", HT.renderByteRanges [a])])
$ toByteRange <$> srcRange srcInfo
toByteRange :: (Int64, Int64) -> HT.ByteRange
toByteRange (x, y) = HT.ByteRangeFromTo (fromIntegral x) (fromIntegral y)
-- | Performs server-side copy of an object or part of an object as an
-- upload part of an ongoing multi-part upload.
copyObjectPart ::
DestinationInfo ->
SourceInfo ->
UploadId ->
PartNumber ->
[HT.Header] ->
Minio (ETag, UTCTime)
copyObjectPart :: DestinationInfo -> SourceInfo -> UploadId
-> PartNumber -> [HT.Header] -> Minio (ETag, UTCTime)
copyObjectPart dstInfo srcInfo uploadId partNumber headers = do
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodPut,
riBucket = Just $ dstBucket dstInfo,
riObject = Just $ dstObject dstInfo,
riQueryParams = mkOptionalParams params,
riHeaders = headers ++ srcInfoToHeaders srcInfo
}
resp <- executeRequest $
defaultS3ReqInfo { riMethod = HT.methodPut
, riBucket = Just $ dstBucket dstInfo
, riObject = Just $ dstObject dstInfo
, riQueryParams = mkOptionalParams params
, riHeaders = headers ++ srcInfoToHeaders srcInfo
}
parseCopyObjectResponse $ NC.responseBody resp
where
params =
[ ("uploadId", Just uploadId),
("partNumber", Just $ show partNumber)
params = [
("uploadId", Just uploadId)
, ("partNumber", Just $ show partNumber)
]
-- | Performs server-side copy of an object that is upto 5GiB in
-- size. If the object is greater than 5GiB, this function throws the
-- error returned by the server.
copyObjectSingle ::
Bucket ->
Object ->
SourceInfo ->
[HT.Header] ->
Minio (ETag, UTCTime)
copyObjectSingle :: Bucket -> Object -> SourceInfo -> [HT.Header]
-> Minio (ETag, UTCTime)
copyObjectSingle bucket object srcInfo headers = do
-- validate that srcRange is Nothing for this API.
when (isJust $ srcRange srcInfo) $
throwIO MErrVCopyObjSingleNoRangeAccepted
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodPut,
riBucket = Just bucket,
riObject = Just object,
riHeaders = headers ++ srcInfoToHeaders srcInfo
}
resp <- executeRequest $
defaultS3ReqInfo { riMethod = HT.methodPut
, riBucket = Just bucket
, riObject = Just object
, riHeaders = headers ++ srcInfoToHeaders srcInfo
}
parseCopyObjectResponse $ NC.responseBody resp
-- | Complete a multipart upload.
completeMultipartUpload ::
Bucket ->
Object ->
UploadId ->
[PartTuple] ->
Minio ETag
completeMultipartUpload :: Bucket -> Object -> UploadId -> [PartTuple]
-> Minio ETag
completeMultipartUpload bucket object uploadId partTuple = do
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodPost,
riBucket = Just bucket,
riObject = Just object,
riQueryParams = mkOptionalParams params,
riPayload =
PayloadBS $
mkCompleteMultipartUploadRequest partTuple
}
resp <- executeRequest $
defaultS3ReqInfo { riMethod = HT.methodPost
, riBucket = Just bucket
, riObject = Just object
, riQueryParams = mkOptionalParams params
, riPayload = PayloadBS $
mkCompleteMultipartUploadRequest partTuple
}
parseCompleteMultipartUploadResponse $ NC.responseBody resp
where
params = [("uploadId", Just uploadId)]
-- | Abort a multipart upload.
abortMultipartUpload :: Bucket -> Object -> UploadId -> Minio ()
abortMultipartUpload bucket object uploadId =
void $
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodDelete,
riBucket = Just bucket,
riObject = Just object,
riQueryParams = mkOptionalParams params
}
abortMultipartUpload bucket object uploadId = void $
executeRequest $ defaultS3ReqInfo { riMethod = HT.methodDelete
, riBucket = Just bucket
, riObject = Just object
, riQueryParams = mkOptionalParams params
}
where
params = [("uploadId", Just uploadId)]
-- | List incomplete multipart uploads.
listIncompleteUploads' ::
Bucket ->
Maybe Text ->
Maybe Text ->
Maybe Text ->
Maybe Text ->
Maybe Int ->
Minio ListUploadsResult
listIncompleteUploads' :: Bucket -> Maybe Text -> Maybe Text -> Maybe Text
-> Maybe Text -> Maybe Int -> Minio ListUploadsResult
listIncompleteUploads' bucket prefix delimiter keyMarker uploadIdMarker maxKeys = do
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodGet,
riBucket = Just bucket,
riQueryParams = params
}
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet
, riBucket = Just bucket
, riQueryParams = params
}
parseListUploadsResponse $ NC.responseBody resp
where
-- build query params
params =
("uploads", Nothing)
: mkOptionalParams
[ ("prefix", prefix),
("delimiter", delimiter),
("key-marker", keyMarker),
("upload-id-marker", uploadIdMarker),
("max-uploads", show <$> maxKeys)
]
params = ("uploads", Nothing) : mkOptionalParams
[ ("prefix", prefix)
, ("delimiter", delimiter)
, ("key-marker", keyMarker)
, ("upload-id-marker", uploadIdMarker)
, ("max-uploads", show <$> maxKeys)
]
-- | List parts of an ongoing multipart upload.
listIncompleteParts' ::
Bucket ->
Object ->
UploadId ->
Maybe Text ->
Maybe Text ->
Minio ListPartsResult
listIncompleteParts' :: Bucket -> Object -> UploadId -> Maybe Text
-> Maybe Text -> Minio ListPartsResult
listIncompleteParts' bucket object uploadId maxParts partNumMarker = do
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodGet,
riBucket = Just bucket,
riObject = Just object,
riQueryParams = mkOptionalParams params
}
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet
, riBucket = Just bucket
, riObject = Just object
, riQueryParams = mkOptionalParams params
}
parseListPartsResponse $ NC.responseBody resp
where
-- build optional query params
params =
[ ("uploadId", Just uploadId),
("part-number-marker", partNumMarker),
("max-parts", maxParts)
params = [
("uploadId", Just uploadId)
, ("part-number-marker", partNumMarker)
, ("max-parts", maxParts)
]
-- | Get metadata of an object.
headObject :: Bucket -> Object -> [HT.Header] -> Minio ObjectInfo
headObject bucket object reqHeaders = do
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodHead,
riBucket = Just bucket,
riObject = Just object,
riHeaders =
reqHeaders
-- This header is required for safety as otherwise http-client,
-- sends Accept-Encoding: gzip, and the server may actually gzip
-- body. In that case Content-Length header will be missing.
<> [("Accept-Encoding", "identity")]
}
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodHead
, riBucket = Just bucket
, riObject = Just object
, riHeaders = reqHeaders
}
let
headers = NC.responseHeaders resp
modTime = getLastModifiedHeader headers
etag = getETagHeader headers
size = getContentLength headers
metadata = getMetadataMap headers
maybe (throwIO MErrVInvalidObjectInfoResponse) return $
parseGetObjectHeaders object $
NC.responseHeaders resp
ObjectInfo <$> Just object <*> modTime <*> etag <*> size <*> Just metadata
-- | Query the object store if a given bucket exists.
headBucket :: Bucket -> Minio Bool
headBucket bucket =
headBucketEx
`catches` [ Handler handleNoSuchBucket,
Handler handleStatus404
]
headBucket bucket = headBucketEx `catches`
[ Handler handleNoSuchBucket
, Handler handleStatus404
]
where
handleNoSuchBucket :: ServiceErr -> Minio Bool
handleNoSuchBucket e
| e == NoSuchBucket = return False
| otherwise = throwIO e
handleNoSuchBucket e | e == NoSuchBucket = return False
| otherwise = throwIO e
handleStatus404 :: NC.HttpException -> Minio Bool
handleStatus404 e@(NC.HttpExceptionRequest _ (NC.StatusCodeException res _)) =
if NC.responseStatus res == status404
then return False
else throwIO e
then return False
else throwIO e
handleStatus404 e = throwIO e
headBucketEx = do
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodHead,
riBucket = Just bucket
}
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodHead
, riBucket = Just bucket
}
return $ NC.responseStatus resp == HT.ok200
-- | Set the notification configuration on a bucket.
putBucketNotification :: Bucket -> Notification -> Minio ()
putBucketNotification bucket ncfg = do
ns <- asks getSvcNamespace
void $
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodPut,
riBucket = Just bucket,
riQueryParams = [("notification", Nothing)],
riPayload =
PayloadBS $
mkPutNotificationRequest ns ncfg
}
void $ executeRequest $ defaultS3ReqInfo { riMethod = HT.methodPut
, riBucket = Just bucket
, riQueryParams = [("notification", Nothing)]
, riPayload = PayloadBS $
mkPutNotificationRequest ns ncfg
}
-- | Retrieve the notification configuration on a bucket.
getBucketNotification :: Bucket -> Minio Notification
getBucketNotification bucket = do
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodGet,
riBucket = Just bucket,
riQueryParams = [("notification", Nothing)]
}
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet
, riBucket = Just bucket
, riQueryParams = [("notification", Nothing)]
}
parseNotification $ NC.responseBody resp
-- | Remove all notifications configured on a bucket.
@ -643,14 +475,11 @@ removeAllBucketNotification = flip putBucketNotification defaultNotification
-- | Fetch the policy if any on a bucket.
getBucketPolicy :: Bucket -> Minio Text
getBucketPolicy bucket = do
resp <-
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodGet,
riBucket = Just bucket,
riQueryParams = [("policy", Nothing)]
}
return $ decodeUtf8Lenient $ toStrictBS $ NC.responseBody resp
resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet
, riBucket = Just bucket
, riQueryParams = [("policy", Nothing)]
}
return $ toS $ NC.responseBody resp
-- | Set a new policy on a bucket.
-- As a special condition if the policy is empty
@ -662,24 +491,18 @@ setBucketPolicy bucket policy = do
else putBucketPolicy bucket policy
-- | Save a new policy on a bucket.
putBucketPolicy :: Bucket -> Text -> Minio ()
putBucketPolicy :: Bucket -> Text -> Minio()
putBucketPolicy bucket policy = do
void $
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodPut,
riBucket = Just bucket,
riQueryParams = [("policy", Nothing)],
riPayload = PayloadBS $ encodeUtf8 policy
}
void $ executeRequest $ defaultS3ReqInfo { riMethod = HT.methodPut
, riBucket = Just bucket
, riQueryParams = [("policy", Nothing)]
, riPayload = PayloadBS $ encodeUtf8 policy
}
-- | Delete any policy set on a bucket.
deleteBucketPolicy :: Bucket -> Minio ()
deleteBucketPolicy :: Bucket -> Minio()
deleteBucketPolicy bucket = do
void $
executeRequest $
defaultS3ReqInfo
{ riMethod = HT.methodDelete,
riBucket = Just bucket,
riQueryParams = [("policy", Nothing)]
}
void $ executeRequest $ defaultS3ReqInfo { riMethod = HT.methodDelete
, riBucket = Just bucket
, riQueryParams = [("policy", Nothing)]
}

View File

@ -15,103 +15,116 @@
--
module Network.Minio.SelectAPI
( -- | The `selectObjectContent` allows querying CSV, JSON or Parquet
-- format objects in AWS S3 and in MinIO using SQL Select
-- statements. This allows significant reduction of data transfer
-- from object storage for computation-intensive tasks, as relevant
-- data is filtered close to the storage.
selectObjectContent,
SelectRequest,
selectRequest,
(
-- *** Input Serialization
InputSerialization,
defaultCsvInput,
linesJsonInput,
documentJsonInput,
defaultParquetInput,
setInputCSVProps,
CompressionType (..),
setInputCompressionType,
-- | The `selectObjectContent` allows querying CSV, JSON or Parquet
-- format objects in AWS S3 and in MinIO using SQL Select
-- statements. This allows significant reduction of data transfer
-- from object storage for computation-intensive tasks, as relevant
-- data is filtered close to the storage.
-- *** CSV Format details
selectObjectContent
-- | CSV format options such as delimiters and quote characters are
-- specified using using the functions below. Options are combined
-- monoidally.
CSVProp,
recordDelimiter,
fieldDelimiter,
quoteCharacter,
quoteEscapeCharacter,
commentCharacter,
allowQuotedRecordDelimiter,
FileHeaderInfo (..),
fileHeaderInfo,
QuoteFields (..),
quoteFields,
, SelectRequest
, selectRequest
-- *** Output Serialization
OutputSerialization,
defaultCsvOutput,
defaultJsonOutput,
outputCSVFromProps,
outputJSONFromRecordDelimiter,
-- *** Input Serialization
-------------------------
-- *** Progress messages
setRequestProgressEnabled,
, InputSerialization
, defaultCsvInput
, linesJsonInput
, documentJsonInput
, defaultParquetInput
, setInputCSVProps
-- *** Interpreting Select output
, CompressionType(..)
, setInputCompressionType
-- | The conduit returned by `selectObjectContent` returns values of
-- the `EventMessage` data type. This returns the query output
-- messages formatted according to the chosen output serialization,
-- interleaved with progress messages (if enabled by
-- `setRequestProgressEnabled`), and at the end a statistics
-- message.
--
-- If the application is interested in only the payload, then
-- `getPayloadBytes` can be used. For example to simply print the
-- payload to stdout:
--
-- > resultConduit <- selectObjectContent bucket object mySelectRequest
-- > runConduit $ resultConduit .| getPayloadBytes .| stdoutC
--
-- Note that runConduit, the connect operator (.|) and stdoutC are
-- all from the "conduit" package.
getPayloadBytes,
EventMessage (..),
Progress (..),
Stats,
)
where
-- *** CSV Format details
------------------------
-- | CSV format options such as delimiters and quote characters are
-- specified using using the functions below. Options are combined
-- monoidally.
import Conduit ((.|))
import qualified Conduit as C
import qualified Data.Binary as Bin
import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as LB
import Data.Digest.CRC32 (crc32, crc32Update)
import Lib.Prelude
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Network.Minio.API
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.Utils
import Network.Minio.XmlGenerator
import Network.Minio.XmlParser
import UnliftIO (MonadUnliftIO)
, CSVProp
, recordDelimiter
, fieldDelimiter
, quoteCharacter
, quoteEscapeCharacter
, commentCharacter
, allowQuotedRecordDelimiter
, FileHeaderInfo(..)
, fileHeaderInfo
, QuoteFields(..)
, quoteFields
data EventStreamException
= ESEPreludeCRCFailed
| ESEMessageCRCFailed
| ESEUnexpectedEndOfStream
| ESEDecodeFail [Char]
| ESEInvalidHeaderType
| ESEInvalidHeaderValueType
| ESEInvalidMessageType
deriving stock (Eq, Show)
-- *** Output Serialization
-------------------------
, OutputSerialization
, defaultCsvOutput
, defaultJsonOutput
, outputCSVFromProps
, outputJSONFromRecordDelimiter
-- *** Progress messages
------------------------
, setRequestProgressEnabled
-- *** Interpreting Select output
--------------------------------------------
-- | The conduit returned by `selectObjectContent` returns values of
-- the `EventMessage` data type. This returns the query output
-- messages formatted according to the chosen output serialization,
-- interleaved with progress messages (if enabled by
-- `setRequestProgressEnabled`), and at the end a statistics
-- message.
--
-- If the application is interested in only the payload, then
-- `getPayloadBytes` can be used. For example to simply print the
-- payload to stdout:
--
-- > resultConduit <- selectObjectContent bucket object mySelectRequest
-- > runConduit $ resultConduit .| getPayloadBytes .| stdoutC
--
-- Note that runConduit, the connect operator (.|) and stdoutC are
-- all from the "conduit" package.
, getPayloadBytes
, EventMessage(..)
, Progress(..)
, Stats
) where
import Conduit ((.|))
import qualified Conduit as C
import qualified Data.Binary as Bin
import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as LB
import Data.Digest.CRC32 (crc32, crc32Update)
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import UnliftIO (MonadUnliftIO)
import Lib.Prelude
import Network.Minio.API
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.Utils
import Network.Minio.XmlGenerator
import Network.Minio.XmlParser
data EventStreamException = ESEPreludeCRCFailed
| ESEMessageCRCFailed
| ESEUnexpectedEndOfStream
| ESEDecodeFail [Char]
| ESEInvalidHeaderType
| ESEInvalidHeaderValueType
| ESEInvalidMessageType
deriving (Eq, Show)
instance Exception EventStreamException
@ -119,176 +132,171 @@ instance Exception EventStreamException
chunkSize :: Int
chunkSize = 32 * 1024
parseBinary :: (Bin.Binary a) => ByteString -> IO a
parseBinary :: Bin.Binary a => ByteString -> IO a
parseBinary b = do
case Bin.decodeOrFail $ LB.fromStrict b of
Left (_, _, msg) -> throwIO $ ESEDecodeFail msg
Right (_, _, r) -> return r
case Bin.decodeOrFail $ LB.fromStrict b of
Left (_, _, msg) -> throwIO $ ESEDecodeFail msg
Right (_, _, r) -> return r
bytesToHeaderName :: Text -> IO MsgHeaderName
bytesToHeaderName t = case t of
":message-type" -> return MessageType
":event-type" -> return EventType
":content-type" -> return ContentType
":error-code" -> return ErrorCode
":message-type" -> return MessageType
":event-type" -> return EventType
":content-type" -> return ContentType
":error-code" -> return ErrorCode
":error-message" -> return ErrorMessage
_ -> throwIO ESEInvalidHeaderType
_ -> throwIO ESEInvalidHeaderType
parseHeaders ::
(MonadUnliftIO m) =>
Word32 ->
C.ConduitM ByteString a m [MessageHeader]
parseHeaders :: MonadUnliftIO m
=> Word32 -> C.ConduitM ByteString a m [MessageHeader]
parseHeaders 0 = return []
parseHeaders hdrLen = do
bs1 <- readNBytes 1
n :: Word8 <- liftIO $ parseBinary bs1
bs1 <- readNBytes 1
n :: Word8 <- liftIO $ parseBinary bs1
headerKeyBytes <- readNBytes $ fromIntegral n
let headerKey = decodeUtf8Lenient headerKeyBytes
headerName <- liftIO $ bytesToHeaderName headerKey
headerKeyBytes <- readNBytes $ fromIntegral n
let headerKey = decodeUtf8Lenient headerKeyBytes
headerName <- liftIO $ bytesToHeaderName headerKey
bs2 <- readNBytes 1
headerValueType :: Word8 <- liftIO $ parseBinary bs2
when (headerValueType /= 7) $ throwIO ESEInvalidHeaderValueType
bs2 <- readNBytes 1
headerValueType :: Word8 <- liftIO $ parseBinary bs2
when (headerValueType /= 7) $ throwIO ESEInvalidHeaderValueType
bs3 <- readNBytes 2
vLen :: Word16 <- liftIO $ parseBinary bs3
headerValueBytes <- readNBytes $ fromIntegral vLen
let headerValue = decodeUtf8Lenient headerValueBytes
m = (headerName, headerValue)
k = 1 + fromIntegral n + 1 + 2 + fromIntegral vLen
bs3 <- readNBytes 2
vLen :: Word16 <- liftIO $ parseBinary bs3
headerValueBytes <- readNBytes $ fromIntegral vLen
let headerValue = decodeUtf8Lenient headerValueBytes
m = (headerName, headerValue)
k = 1 + fromIntegral n + 1 + 2 + fromIntegral vLen
ms <- parseHeaders (hdrLen - k)
return (m : ms)
ms <- parseHeaders (hdrLen - k)
return (m:ms)
-- readNBytes returns N bytes read from the string and throws an
-- exception if N bytes are not present on the stream.
readNBytes :: (MonadUnliftIO m) => Int -> C.ConduitM ByteString a m ByteString
readNBytes :: MonadUnliftIO m => Int -> C.ConduitM ByteString a m ByteString
readNBytes n = do
b <- LB.toStrict <$> (C.takeCE n .| C.sinkLazy)
if B.length b /= n
then throwIO ESEUnexpectedEndOfStream
else return b
b <- LB.toStrict <$> (C.takeCE n .| C.sinkLazy)
if B.length b /= n
then throwIO ESEUnexpectedEndOfStream
else return b
crcCheck ::
(MonadUnliftIO m) =>
C.ConduitM ByteString ByteString m ()
crcCheck :: MonadUnliftIO m
=> C.ConduitM ByteString ByteString m ()
crcCheck = do
b <- readNBytes 12
n :: Word32 <- liftIO $ parseBinary $ B.take 4 b
preludeCRC :: Word32 <- liftIO $ parseBinary $ B.drop 8 b
when (crc32 (B.take 8 b) /= preludeCRC) $
throwIO ESEPreludeCRCFailed
b <- readNBytes 12
n :: Word32 <- liftIO $ parseBinary $ B.take 4 b
preludeCRC :: Word32 <- liftIO $ parseBinary $ B.drop 8 b
when (crc32 (B.take 8 b) /= preludeCRC) $
throwIO ESEPreludeCRCFailed
-- we do not yield the checksum
C.yield $ B.take 8 b
-- we do not yield the checksum
C.yield $ B.take 8 b
-- 12 bytes have been read off the current message. Now read the
-- next (n-12)-4 bytes and accumulate the checksum, and yield it.
let startCrc = crc32 b
finalCrc <- accumulateYield (fromIntegral n - 16) startCrc
-- 12 bytes have been read off the current message. Now read the
-- next (n-12)-4 bytes and accumulate the checksum, and yield it.
let startCrc = crc32 b
finalCrc <- accumulateYield (fromIntegral n-16) startCrc
bs <- readNBytes 4
expectedCrc :: Word32 <- liftIO $ parseBinary bs
bs <- readNBytes 4
expectedCrc :: Word32 <- liftIO $ parseBinary bs
when (finalCrc /= expectedCrc) $
throwIO ESEMessageCRCFailed
when (finalCrc /= expectedCrc) $
throwIO ESEMessageCRCFailed
-- we unconditionally recurse - downstream figures out when to
-- quit reading the stream
crcCheck
-- we unconditionally recurse - downstream figures out when to
-- quit reading the stream
crcCheck
where
accumulateYield n checkSum = do
let toRead = min n chunkSize
b <- readNBytes toRead
let c' = crc32Update checkSum b
n' = n - B.length b
C.yield b
if n' > 0
then accumulateYield n' c'
else return c'
let toRead = min n chunkSize
b <- readNBytes toRead
let c' = crc32Update checkSum b
n' = n - B.length b
C.yield b
if n' > 0
then accumulateYield n' c'
else return c'
handleMessage :: (MonadUnliftIO m) => C.ConduitT ByteString EventMessage m ()
handleMessage :: MonadUnliftIO m => C.ConduitT ByteString EventMessage m ()
handleMessage = do
b1 <- readNBytes 4
msgLen :: Word32 <- liftIO $ parseBinary b1
b1 <- readNBytes 4
msgLen :: Word32 <- liftIO $ parseBinary b1
b2 <- readNBytes 4
hdrLen :: Word32 <- liftIO $ parseBinary b2
b2 <- readNBytes 4
hdrLen :: Word32 <- liftIO $ parseBinary b2
hs <- parseHeaders hdrLen
hs <- parseHeaders hdrLen
let payloadLen = msgLen - hdrLen - 16
getHdrVal h = fmap snd . find ((h ==) . fst)
eventHdrValue = getHdrVal EventType hs
msgHdrValue = getHdrVal MessageType hs
errCode = getHdrVal ErrorCode hs
errMsg = getHdrVal ErrorMessage hs
let payloadLen = msgLen - hdrLen - 16
getHdrVal h = fmap snd . headMay . filter ((h ==) . fst)
eventHdrValue = getHdrVal EventType hs
msgHdrValue = getHdrVal MessageType hs
errCode = getHdrVal ErrorCode hs
errMsg = getHdrVal ErrorMessage hs
case msgHdrValue of
Just "event" -> do
case eventHdrValue of
Just "Records" -> passThrough $ fromIntegral payloadLen
Just "Cont" -> return ()
Just "Progress" -> do
bs <- readNBytes $ fromIntegral payloadLen
progress <- parseSelectProgress bs
C.yield $ ProgressEventMessage progress
Just "Stats" -> do
bs <- readNBytes $ fromIntegral payloadLen
stats <- parseSelectProgress bs
C.yield $ StatsEventMessage stats
Just "End" -> return ()
_ -> throwIO ESEInvalidMessageType
when (eventHdrValue /= Just "End") handleMessage
Just "error" -> do
let reqMsgMay = RequestLevelErrorMessage <$> errCode <*> errMsg
maybe (throwIO ESEInvalidMessageType) C.yield reqMsgMay
_ -> throwIO ESEInvalidMessageType
case msgHdrValue of
Just "event" -> do
case eventHdrValue of
Just "Records" -> passThrough $ fromIntegral payloadLen
Just "Cont" -> return ()
Just "Progress" -> do
bs <- readNBytes $ fromIntegral payloadLen
progress <- parseSelectProgress bs
C.yield $ ProgressEventMessage progress
Just "Stats" -> do
bs <- readNBytes $ fromIntegral payloadLen
stats <- parseSelectProgress bs
C.yield $ StatsEventMessage stats
Just "End" -> return ()
_ -> throwIO ESEInvalidMessageType
when (eventHdrValue /= Just "End") handleMessage
Just "error" -> do
let reqMsgMay = RequestLevelErrorMessage <$> errCode <*> errMsg
maybe (throwIO ESEInvalidMessageType) C.yield reqMsgMay
_ -> throwIO ESEInvalidMessageType
where
passThrough 0 = return ()
passThrough n = do
let c = min n chunkSize
b <- readNBytes c
C.yield $ RecordPayloadEventMessage b
passThrough $ n - B.length b
let c = min n chunkSize
b <- readNBytes c
C.yield $ RecordPayloadEventMessage b
passThrough $ n - B.length b
selectProtoConduit ::
(MonadUnliftIO m) =>
C.ConduitT ByteString EventMessage m ()
selectProtoConduit :: MonadUnliftIO m
=> C.ConduitT ByteString EventMessage m ()
selectProtoConduit = crcCheck .| handleMessage
-- | selectObjectContent calls the SelectRequest on the given
-- object. It returns a Conduit of event messages that can be consumed
-- by the client.
selectObjectContent ::
Bucket ->
Object ->
SelectRequest ->
Minio (C.ConduitT () EventMessage Minio ())
selectObjectContent :: Bucket -> Object -> SelectRequest
-> Minio (C.ConduitT () EventMessage Minio ())
selectObjectContent b o r = do
let reqInfo =
defaultS3ReqInfo
{ riMethod = HT.methodPost,
riBucket = Just b,
riObject = Just o,
riPayload = PayloadBS $ mkSelectRequest r,
riNeedsLocation = False,
riQueryParams = [("select", Nothing), ("select-type", Just "2")]
}
-- print $ mkSelectRequest r
resp <- mkStreamRequest reqInfo
return $ NC.responseBody resp .| selectProtoConduit
let reqInfo = defaultS3ReqInfo { riMethod = HT.methodPost
, riBucket = Just b
, riObject = Just o
, riPayload = PayloadBS $ mkSelectRequest r
, riNeedsLocation = False
, riQueryParams = [("select", Nothing), ("select-type", Just "2")]
}
--print $ mkSelectRequest r
resp <- mkStreamRequest reqInfo
return $ NC.responseBody resp .| selectProtoConduit
-- | A helper conduit that returns only the record payload bytes.
getPayloadBytes :: (MonadIO m) => C.ConduitT EventMessage ByteString m ()
getPayloadBytes :: MonadIO m => C.ConduitT EventMessage ByteString m ()
getPayloadBytes = do
evM <- C.await
case evM of
Just v -> do
case v of
RecordPayloadEventMessage b -> C.yield b
RequestLevelErrorMessage c m -> liftIO $ throwIO $ SelectErr c m
_ -> return ()
getPayloadBytes
Nothing -> return ()
evM <- C.await
case evM of
Just v -> do
case v of
RecordPayloadEventMessage b -> C.yield b
RequestLevelErrorMessage c m -> liftIO $ throwIO $ SelectErr c m
_ -> return ()
getPayloadBytes
Nothing -> return ()

View File

@ -1,5 +1,5 @@
--
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
-- MinIO Haskell SDK, (C) 2017 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
@ -13,94 +13,85 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE BangPatterns #-}
module Network.Minio.Sign.V4
( SignParams (..),
signV4QueryParams,
signV4,
signV4PostPolicy,
signV4Stream,
Service (..),
credentialScope,
)
where
(
signV4
, signV4PostPolicy
, mkScope
, getHeadersToSign
, mkCanonicalRequest
, mkStringToSign
, mkSigningKey
, computeSignature
, SignV4Data(..)
, SignParams(..)
, debugPrintSignV4Data
) where
import qualified Conduit as C
import qualified Data.ByteArray as BA
import qualified Data.ByteString as B
import qualified Data.ByteString.Base64 as Base64
import qualified Data.ByteString.Char8 as B8
import qualified Data.ByteString.Lazy as LB
import Data.CaseInsensitive (mk)
import qualified Data.CaseInsensitive as CI
import qualified Data.HashMap.Strict as Map
import qualified Data.HashSet as Set
import Data.List (partition)
import qualified Data.List.NonEmpty as NE
import qualified Data.Time as Time
import Lib.Prelude
import qualified Network.HTTP.Conduit as NC
import Network.HTTP.Types (Header, SimpleQuery, hContentEncoding, parseQuery)
import qualified Network.HTTP.Types as H
import Network.HTTP.Types.Header (RequestHeaders)
import Network.Minio.Data.ByteString
import Network.Minio.Data.Crypto
import Network.Minio.Data.Time
import Network.Minio.Errors
import Text.Printf (printf)
import qualified Data.ByteString as B
import qualified Data.ByteString.Base64 as Base64
import qualified Data.ByteString.Char8 as B8
import Data.CaseInsensitive (mk)
import qualified Data.CaseInsensitive as CI
import qualified Data.Map.Strict as Map
import qualified Data.Set as Set
import qualified Data.Time as Time
import qualified Network.HTTP.Conduit as NC
import Network.HTTP.Types (Header, parseQuery)
import qualified Network.HTTP.Types.Header as H
import Lib.Prelude
import Network.Minio.Data.ByteString
import Network.Minio.Data.Crypto
import Network.Minio.Data.Time
-- these headers are not included in the string to sign when signing a
-- request
ignoredHeaders :: Set.HashSet ByteString
ignoredHeaders =
Set.fromList $
map
CI.foldedCase
[ H.hAuthorization,
H.hContentType,
H.hUserAgent
]
ignoredHeaders :: Set ByteString
ignoredHeaders = Set.fromList $ map CI.foldedCase
[ H.hAuthorization
, H.hContentType
, H.hContentLength
, H.hUserAgent
]
data Service = ServiceS3 | ServiceSTS
deriving stock (Eq, Show)
data SignV4Data = SignV4Data {
sv4SignTime :: UTCTime
, sv4Scope :: ByteString
, sv4CanonicalRequest :: ByteString
, sv4HeadersToSign :: [(ByteString, ByteString)]
, sv4Output :: [(ByteString, ByteString)]
, sv4StringToSign :: ByteString
, sv4SigningKey :: ByteString
} deriving (Show)
toByteString :: Service -> ByteString
toByteString ServiceS3 = "s3"
toByteString ServiceSTS = "sts"
data SignParams = SignParams {
spAccessKey :: Text
, spSecretKey :: Text
, spTimeStamp :: UTCTime
, spRegion :: Maybe Text
, spExpirySecs :: Maybe Int
, spPayloadHash :: Maybe ByteString
} deriving (Show)
data SignParams = SignParams
{ spAccessKey :: Text,
spSecretKey :: BA.ScrubbedBytes,
spSessionToken :: Maybe BA.ScrubbedBytes,
spService :: Service,
spTimeStamp :: UTCTime,
spRegion :: Maybe Text,
spExpirySecs :: Maybe UrlExpiry,
spPayloadHash :: Maybe ByteString
}
deriving stock (Show)
mkAuthHeader :: Text -> ByteString -> ByteString -> ByteString -> H.Header
mkAuthHeader accessKey scope signedHeaderKeys sign =
let authValue =
B.concat
[ "AWS4-HMAC-SHA256 Credential=",
encodeUtf8 accessKey,
"/",
scope,
", SignedHeaders=",
signedHeaderKeys,
", Signature=",
sign
]
in (H.hAuthorization, authValue)
data IsStreaming = IsStreamingLength Int64 | NotStreaming
deriving stock (Eq, Show)
amzSecurityToken :: ByteString
amzSecurityToken = "X-Amz-Security-Token"
debugPrintSignV4Data :: SignV4Data -> IO ()
debugPrintSignV4Data (SignV4Data t s cr h2s o sts sk) = do
B8.putStrLn "SignV4Data:"
B8.putStr "Timestamp: " >> print t
B8.putStr "Scope: " >> B8.putStrLn s
B8.putStrLn "Canonical Request:"
B8.putStrLn cr
B8.putStr "Headers to Sign: " >> print h2s
B8.putStr "Output: " >> print o
B8.putStr "StringToSign: " >> B8.putStrLn sts
B8.putStr "SigningKey: " >> printBytes sk
B8.putStrLn "END of SignV4Data ========="
where
printBytes b = do
mapM_ (\x -> B.putStr $ B.concat [show x, " "]) $ B.unpack b
B8.putStrLn ""
-- | Given SignParams and request details, including request method,
-- request path, headers, query params and payload hash, generates an
@ -114,212 +105,128 @@ amzSecurityToken = "X-Amz-Security-Token"
-- is being created. The expiry is interpreted as an integer number of
-- seconds. The output will be the list of query-parameters to add to
-- the request.
signV4QueryParams :: SignParams -> NC.Request -> SimpleQuery
signV4QueryParams !sp !req =
let scope = credentialScope sp
expiry = spExpirySecs sp
headersToSign = getHeadersToSign $ NC.requestHeaders req
signedHeaderKeys = B.intercalate ";" $ sort $ map fst headersToSign
-- query-parameters to be added before signing for presigned URLs
-- (i.e. when `isJust expiry`)
authQP =
[ ("X-Amz-Algorithm", "AWS4-HMAC-SHA256"),
("X-Amz-Credential", B.concat [encodeUtf8 $ spAccessKey sp, "/", scope]),
("X-Amz-Date", awsTimeFormatBS $ spTimeStamp sp),
("X-Amz-Expires", maybe "" showBS expiry),
("X-Amz-SignedHeaders", signedHeaderKeys)
]
++ maybeToList ((amzSecurityToken,) . BA.convert <$> spSessionToken sp)
finalQP =
parseQuery (NC.queryString req)
++ if isJust expiry
then (fmap . fmap) Just authQP
else []
-- 1. compute canonical request
canonicalRequest =
mkCanonicalRequest
False
sp
(NC.setQueryString finalQP req)
headersToSign
-- 2. compute string to sign
stringToSign = mkStringToSign (spTimeStamp sp) scope canonicalRequest
-- 3.1 compute signing key
signingKey = getSigningKey sp
-- 3.2 compute signature
signature = computeSignature stringToSign signingKey
in ("X-Amz-Signature", signature) : authQP
-- | Given SignParams and request details, including request method, request
-- path, headers, query params and payload hash, generates an updated set of
-- headers, including the x-amz-date header and the Authorization header, which
-- includes the signature.
--
-- The output is the list of headers to be added to authenticate the request.
signV4 :: SignParams -> NC.Request -> [Header]
signV4 :: SignParams -> NC.Request -> [(ByteString, ByteString)]
signV4 !sp !req =
let scope = credentialScope sp
let
region = fromMaybe "" $ spRegion sp
ts = spTimeStamp sp
scope = mkScope ts region
accessKey = toS $ spAccessKey sp
secretKey = toS $ spSecretKey sp
expiry = spExpirySecs sp
-- extra headers to be added for signing purposes.
extraHeaders =
("X-Amz-Date", awsTimeFormatBS $ spTimeStamp sp)
: ( -- payload hash is only used for S3 (not STS)
[ ( "x-amz-content-sha256",
fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp
)
| spService sp == ServiceS3
]
)
++ maybeToList ((mk amzSecurityToken,) . BA.convert <$> spSessionToken sp)
-- headers to be added to the request
datePair = ("X-Amz-Date", awsTimeFormatBS ts)
computedHeaders = NC.requestHeaders req ++
if isJust $ expiry
then []
else [(\(x, y) -> (mk x, y)) datePair]
headersToSign = getHeadersToSign computedHeaders
signedHeaderKeys = B.intercalate ";" $ sort $ map fst headersToSign
-- 1. compute canonical request
reqHeaders = NC.requestHeaders req ++ extraHeaders
(canonicalRequest, signedHeaderKeys) =
getCanonicalRequestAndSignedHeaders
NotStreaming
sp
req
reqHeaders
-- query-parameters to be added before signing for presigned URLs
-- (i.e. when `isJust expiry`)
authQP = [ ("X-Amz-Algorithm", "AWS4-HMAC-SHA256")
, ("X-Amz-Credential", B.concat [accessKey, "/", scope])
, datePair
, ("X-Amz-Expires", maybe "" show expiry)
, ("X-Amz-SignedHeaders", signedHeaderKeys)
]
finalQP = parseQuery (NC.queryString req) ++
if isJust expiry
then (fmap . fmap) Just authQP
else []
-- 2. compute string to sign
stringToSign = mkStringToSign (spTimeStamp sp) scope canonicalRequest
-- 3.1 compute signing key
signingKey = getSigningKey sp
-- 3.2 compute signature
signature = computeSignature stringToSign signingKey
-- 4. compute auth header
authHeader = mkAuthHeader (spAccessKey sp) scope signedHeaderKeys signature
in authHeader : extraHeaders
-- 1. compute canonical request
canonicalRequest = mkCanonicalRequest sp (NC.setQueryString finalQP req)
headersToSign
credentialScope :: SignParams -> ByteString
credentialScope sp =
let region = fromMaybe "" $ spRegion sp
in B.intercalate
"/"
[ encodeUtf8 $ Time.formatTime Time.defaultTimeLocale "%Y%m%d" $ spTimeStamp sp,
encodeUtf8 region,
toByteString $ spService sp,
"aws4_request"
]
-- 2. compute string to sign
stringToSign = mkStringToSign ts scope canonicalRequest
-- 3.1 compute signing key
signingKey = mkSigningKey ts region secretKey
-- 3.2 compute signature
signature = computeSignature stringToSign signingKey
-- 4. compute auth header
authValue = B.concat
[ "AWS4-HMAC-SHA256 Credential="
, accessKey
, "/"
, scope
, ", SignedHeaders="
, signedHeaderKeys
, ", Signature="
, signature
]
authHeader = (H.hAuthorization, authValue)
-- finally compute output pairs
output = if isJust expiry
then ("X-Amz-Signature", signature) : authQP
else [(\(x, y) -> (CI.foldedCase x, y)) authHeader,
datePair]
in output
mkScope :: UTCTime -> Text -> ByteString
mkScope ts region = B.intercalate "/"
[ toS $ Time.formatTime Time.defaultTimeLocale "%Y%m%d" ts
, toS region
, "s3"
, "aws4_request"
]
-- Folds header name, trims whitespace in header values, skips ignored headers
-- and sorts headers.
getHeadersToSign :: [Header] -> [(ByteString, ByteString)]
getHeadersToSign !h =
filter ((\hdr -> not $ Set.member hdr ignoredHeaders) . fst) $
map (bimap CI.foldedCase stripBS) h
filter (flip Set.notMember ignoredHeaders . fst) $
map (\(x, y) -> (CI.foldedCase x, stripBS y)) h
-- | Given the list of headers in the request, computes the canonical headers
-- and the signed headers strings.
getCanonicalHeaders :: NonEmpty Header -> (ByteString, ByteString)
getCanonicalHeaders h =
let -- Folds header name, trims spaces in header values, skips ignored
-- headers and sorts headers by name (we must not re-order multi-valued
-- headers).
headersToSign =
NE.toList $
NE.sortBy (\a b -> compare (fst a) (fst b)) $
NE.fromList $
NE.filter ((\hdr -> not $ Set.member hdr ignoredHeaders) . fst) $
NE.map (bimap CI.foldedCase stripBS) h
mkCanonicalRequest :: SignParams -> NC.Request -> [(ByteString, ByteString)]
-> ByteString
mkCanonicalRequest !sp !req !headersForSign =
let
canonicalQueryString = B.intercalate "&" $
map (\(x, y) -> B.concat [x, "=", y]) $
sort $ map (\(x, y) ->
(uriEncode True x, maybe "" (uriEncode True) y)) $
(parseQuery $ NC.queryString req)
canonicalHeaders = mconcat $ map (\(a, b) -> a <> ":" <> b <> "\n") headersToSign
signedHeaderKeys = B.intercalate ";" $ map fst headersToSign
in (canonicalHeaders, signedHeaderKeys)
sortedHeaders = sort headersForSign
getCanonicalRequestAndSignedHeaders ::
IsStreaming ->
SignParams ->
NC.Request ->
[Header] ->
(ByteString, ByteString)
getCanonicalRequestAndSignedHeaders isStreaming sp req requestHeaders =
let httpMethod = NC.method req
canonicalHeaders = B.concat $
map (\(x, y) -> B.concat [x, ":", y, "\n"]) sortedHeaders
canonicalUri = uriEncode False $ NC.path req
signedHeaders = B.intercalate ";" $ map fst sortedHeaders
canonicalQueryString =
B.intercalate "&" $
map (\(x, y) -> B.concat [x, "=", y]) $
sort $
map
( bimap (uriEncode True) (maybe "" (uriEncode True))
)
(parseQuery $ NC.queryString req)
(canonicalHeaders, signedHeaderKeys) = getCanonicalHeaders $ NE.fromList requestHeaders
payloadHashStr =
case isStreaming of
IsStreamingLength _ -> "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
NotStreaming -> fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp
canonicalRequest =
B.intercalate
"\n"
[ httpMethod,
canonicalUri,
canonicalQueryString,
canonicalHeaders,
signedHeaderKeys,
payloadHashStr
]
in (canonicalRequest, signedHeaderKeys)
mkCanonicalRequest ::
Bool ->
SignParams ->
NC.Request ->
[(ByteString, ByteString)] ->
ByteString
mkCanonicalRequest !isStreaming !sp !req !headersForSign =
let httpMethod = NC.method req
canonicalUri = uriEncode False $ NC.path req
canonicalQueryString =
B.intercalate "&" $
map (\(x, y) -> B.concat [x, "=", y]) $
sortBy (\a b -> compare (fst a) (fst b)) $
map
( bimap (uriEncode True) (maybe "" (uriEncode True))
)
(parseQuery $ NC.queryString req)
sortedHeaders = sort headersForSign
canonicalHeaders =
B.concat $
map (\(x, y) -> B.concat [x, ":", y, "\n"]) sortedHeaders
signedHeaders = B.intercalate ";" $ map fst sortedHeaders
payloadHashStr =
if isStreaming
then "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
else fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp
in B.intercalate
"\n"
[ httpMethod,
canonicalUri,
canonicalQueryString,
canonicalHeaders,
signedHeaders,
payloadHashStr
]
mkStringToSign :: UTCTime -> ByteString -> ByteString -> ByteString
mkStringToSign ts !scope !canonicalRequest =
B.intercalate
"\n"
[ "AWS4-HMAC-SHA256",
awsTimeFormatBS ts,
scope,
hashSHA256 canonicalRequest
in
B.intercalate "\n"
[ NC.method req
, uriEncode False $ NC.path req
, canonicalQueryString
, canonicalHeaders
, signedHeaders
, maybe "UNSIGNED-PAYLOAD" identity $ spPayloadHash sp
]
getSigningKey :: SignParams -> ByteString
getSigningKey sp =
hmacSHA256RawBS "aws4_request"
. hmacSHA256RawBS (toByteString $ spService sp)
. hmacSHA256RawBS (encodeUtf8 $ fromMaybe "" $ spRegion sp)
. hmacSHA256RawBS (awsDateFormatBS $ spTimeStamp sp)
$ B.concat ["AWS4", BA.convert $ spSecretKey sp]
mkStringToSign :: UTCTime -> ByteString -> ByteString -> ByteString
mkStringToSign ts !scope !canonicalRequest = B.intercalate "\n"
[ "AWS4-HMAC-SHA256"
, awsTimeFormatBS ts
, scope
, hashSHA256 canonicalRequest
]
mkSigningKey :: UTCTime -> Text -> ByteString -> ByteString
mkSigningKey ts region !secretKey = hmacSHA256RawBS "aws4_request"
. hmacSHA256RawBS "s3"
. hmacSHA256RawBS (toS region)
. hmacSHA256RawBS (awsDateFormatBS ts)
$ B.concat ["AWS4", secretKey]
computeSignature :: ByteString -> ByteString -> ByteString
computeSignature !toSign !key = digestToBase16 $ hmacSHA256 toSign key
@ -327,168 +234,15 @@ computeSignature !toSign !key = digestToBase16 $ hmacSHA256 toSign key
-- | Takes a validated Post Policy JSON bytestring, the signing time,
-- and ConnInfo and returns form-data for the POST upload containing
-- just the signature and the encoded post-policy.
signV4PostPolicy ::
ByteString ->
SignParams ->
Map.HashMap Text ByteString
signV4PostPolicy :: ByteString -> SignParams
-> Map.Map Text ByteString
signV4PostPolicy !postPolicyJSON !sp =
let stringToSign = Base64.encode postPolicyJSON
signingKey = getSigningKey sp
signature = computeSignature stringToSign signingKey
in Map.fromList $
[ ("x-amz-signature", signature),
("policy", stringToSign)
]
++ maybeToList ((decodeUtf8 amzSecurityToken,) . BA.convert <$> spSessionToken sp)
chunkSizeConstant :: Int
chunkSizeConstant = 64 * 1024
-- base16Len computes the number of bytes required to represent @n (> 0)@ in
-- hexadecimal.
base16Len :: (Integral a) => a -> Int
base16Len n
| n == 0 = 0
| otherwise = 1 + base16Len (n `div` 16)
signedStreamLength :: Int64 -> Int64
signedStreamLength dataLen =
let chunkSzInt = fromIntegral chunkSizeConstant
(numChunks, lastChunkLen) = quotRem dataLen chunkSzInt
-- Structure of a chunk:
-- string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n
encodedChunkLen csz = fromIntegral (base16Len csz) + 17 + 64 + 2 + csz + 2
fullChunkSize = encodedChunkLen chunkSzInt
lastChunkSize = bool 0 (encodedChunkLen lastChunkLen) $ lastChunkLen > 0
finalChunkSize = 1 + 17 + 64 + 2 + 2
in numChunks * fullChunkSize + lastChunkSize + finalChunkSize
-- For streaming S3, we need to update the content-encoding header.
addContentEncoding :: [Header] -> [Header]
addContentEncoding hs =
-- assume there is at most one content-encoding header.
let (ceHdrs, others) = partition ((== hContentEncoding) . fst) hs
in maybe
(hContentEncoding, "aws-chunked")
(\(k, v) -> (k, v <> ",aws-chunked"))
(listToMaybe ceHdrs)
: others
signV4Stream ::
Int64 ->
SignParams ->
NC.Request ->
(C.ConduitT () ByteString (C.ResourceT IO) () -> NC.Request)
signV4Stream !payloadLength !sp !req =
let ts = spTimeStamp sp
-- compute the updated list of headers to be added for signing purposes.
signedContentLength = signedStreamLength payloadLength
extraHeaders =
[ ("X-Amz-Date", awsTimeFormatBS $ spTimeStamp sp),
("x-amz-decoded-content-length", showBS payloadLength),
("content-length", showBS signedContentLength),
("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD")
]
++ maybeToList ((mk amzSecurityToken,) . BA.convert <$> spSessionToken sp)
requestHeaders =
addContentEncoding $
foldr setHeader (NC.requestHeaders req) extraHeaders
-- 1. Compute Seed Signature
-- 1.1 Canonical Request
(canonicalReq, signedHeaderKeys) =
getCanonicalRequestAndSignedHeaders
(IsStreamingLength payloadLength)
sp
req
requestHeaders
scope = credentialScope sp
accessKey = spAccessKey sp
-- 1.2 String toSign
stringToSign = mkStringToSign ts scope canonicalReq
-- 1.3 Compute signature
-- 1.3.1 compute signing key
signingKey = getSigningKey sp
-- 1.3.2 Compute signature
seedSignature = computeSignature stringToSign signingKey
-- 1.3.3 Compute Auth Header
authHeader = mkAuthHeader accessKey scope signedHeaderKeys seedSignature
-- 1.4 Updated headers for the request
finalReqHeaders = authHeader : requestHeaders
-- headersToAdd = authHeader : datePair : streamingHeaders
toHexStr n = B8.pack $ printf "%x" n
(numParts, lastPSize) = payloadLength `quotRem` fromIntegral chunkSizeConstant
-- Function to compute string to sign for each chunk.
chunkStrToSign prevSign currChunkHash =
B.intercalate
"\n"
[ "AWS4-HMAC-SHA256-PAYLOAD",
awsTimeFormatBS ts,
scope,
prevSign,
hashSHA256 "",
currChunkHash
]
-- Read n byte from upstream and return a strict bytestring.
mustTakeN n = do
bs <- LB.toStrict <$> (C.takeCE n C..| C.sinkLazy)
when (B.length bs /= n) $
throwIO MErrVStreamingBodyUnexpectedEOF
return bs
signerConduit n lps prevSign =
-- First case encodes a full chunk of length
-- 'chunkSizeConstant'.
if
| n > 0 -> do
bs <- mustTakeN chunkSizeConstant
let strToSign = chunkStrToSign prevSign (hashSHA256 bs)
nextSign = computeSignature strToSign signingKey
chunkBS =
toHexStr chunkSizeConstant
<> ";chunk-signature="
<> nextSign
<> "\r\n"
<> bs
<> "\r\n"
C.yield chunkBS
signerConduit (n - 1) lps nextSign
-- Second case encodes the last chunk which is smaller than
-- 'chunkSizeConstant'
| lps > 0 -> do
bs <- mustTakeN $ fromIntegral lps
let strToSign = chunkStrToSign prevSign (hashSHA256 bs)
nextSign = computeSignature strToSign signingKey
chunkBS =
toHexStr lps
<> ";chunk-signature="
<> nextSign
<> "\r\n"
<> bs
<> "\r\n"
C.yield chunkBS
signerConduit 0 0 nextSign
-- Last case encodes the final signature chunk that has no
-- data.
| otherwise -> do
let strToSign = chunkStrToSign prevSign (hashSHA256 "")
nextSign = computeSignature strToSign signingKey
lastChunkBS = "0;chunk-signature=" <> nextSign <> "\r\n\r\n"
C.yield lastChunkBS
in \src ->
req
{ NC.requestHeaders = finalReqHeaders,
NC.requestBody =
NC.requestBodySource signedContentLength $
src C..| signerConduit numParts lastPSize seedSignature
}
-- "setHeader r hdr" adds the hdr to r, replacing it in r if it already exists.
setHeader :: Header -> RequestHeaders -> RequestHeaders
setHeader hdr r =
let r' = filter (\(name, _) -> name /= fst hdr) r
in hdr : r'
let
stringToSign = Base64.encode postPolicyJSON
region = fromMaybe "" $ spRegion sp
signingKey = mkSigningKey (spTimeStamp sp) region $ toS $ spSecretKey sp
signature = computeSignature stringToSign signingKey
in
Map.fromList [ ("x-amz-signature", signature)
, ("policy", stringToSign)
]

View File

@ -1,5 +1,5 @@
--
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
-- MinIO Haskell SDK, (C) 2017 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
@ -16,77 +16,71 @@
module Network.Minio.Utils where
import qualified Conduit as C
import Control.Monad.IO.Unlift (MonadUnliftIO)
import qualified Control.Monad.Trans.Resource as R
import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as LB
import Data.CaseInsensitive (mk, original)
import qualified Data.Conduit.Binary as CB
import qualified Data.HashMap.Strict as H
import qualified Data.Text as T
import Data.Text.Read (decimal)
import Data.Time
( defaultTimeLocale,
parseTimeM,
rfc822DateFormat,
)
import Lib.Prelude
import Network.HTTP.Conduit (Response)
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import qualified Network.HTTP.Types.Header as Hdr
import Network.Minio.Data.ByteString
import Network.Minio.JsonParser (parseErrResponseJSON)
import Network.Minio.XmlCommon (parseErrResponse)
import qualified System.IO as IO
import qualified UnliftIO as U
import qualified UnliftIO.Async as A
import Control.Monad.IO.Unlift (MonadUnliftIO)
import qualified Control.Monad.Trans.Resource as R
import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as LB
import Data.CaseInsensitive (mk, original)
import qualified Data.Conduit as C
import qualified Data.Conduit.Binary as CB
import qualified Data.List as List
import qualified Data.Map as Map
import qualified Data.Text as T
import Data.Text.Encoding.Error (lenientDecode)
import Data.Text.Read (decimal)
import Data.Time (defaultTimeLocale, parseTimeM,
rfc822DateFormat)
import Network.HTTP.Conduit (Response)
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import qualified Network.HTTP.Types.Header as Hdr
import qualified System.IO as IO
import qualified UnliftIO as U
import qualified UnliftIO.Async as A
import qualified UnliftIO.MVar as UM
allocateReadFile ::
(MonadUnliftIO m, R.MonadResource m) =>
FilePath ->
m (R.ReleaseKey, Handle)
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Data.ByteString
import Network.Minio.JsonParser (parseErrResponseJSON)
import Network.Minio.XmlParser (parseErrResponse)
allocateReadFile :: (MonadUnliftIO m, R.MonadResource m)
=> FilePath -> m (R.ReleaseKey, Handle)
allocateReadFile fp = do
(rk, hdlE) <- R.allocate (openReadFile fp) cleanup
either (\(e :: U.IOException) -> throwIO e) (return . (rk,)) hdlE
either (\(e :: IOException) -> throwIO e) (return . (rk,)) hdlE
where
openReadFile f = U.try $ IO.openBinaryFile f IO.ReadMode
cleanup = either (const $ return ()) IO.hClose
-- | Queries the file size from the handle. Catches any file operation
-- exceptions and returns Nothing instead.
getFileSize ::
(MonadUnliftIO m) =>
Handle ->
m (Maybe Int64)
getFileSize :: (MonadUnliftIO m, R.MonadResource m)
=> Handle -> m (Maybe Int64)
getFileSize h = do
resE <- liftIO $ try $ fromIntegral <$> IO.hFileSize h
case resE of
Left (_ :: U.IOException) -> return Nothing
Right s -> return $ Just s
Left (_ :: IOException) -> return Nothing
Right s -> return $ Just s
-- | Queries if handle is seekable. Catches any file operation
-- exceptions and return False instead.
isHandleSeekable ::
(R.MonadResource m) =>
Handle ->
m Bool
isHandleSeekable :: (R.MonadResource m, MonadUnliftIO m)
=> Handle -> m Bool
isHandleSeekable h = do
resE <- liftIO $ try $ IO.hIsSeekable h
case resE of
Left (_ :: U.IOException) -> return False
Right v -> return v
Left (_ :: IOException) -> return False
Right v -> return v
-- | Helper function that opens a handle to the filepath and performs
-- the given action on it. Exceptions of type MError are caught and
-- returned - both during file handle allocation and when the action
-- is run.
withNewHandle ::
(MonadUnliftIO m, R.MonadResource m) =>
FilePath ->
(Handle -> m a) ->
m (Either U.IOException a)
withNewHandle :: (MonadUnliftIO m, R.MonadResource m)
=> FilePath -> (Handle -> m a) -> m (Either IOException a)
withNewHandle fp fileAction = do
-- opening a handle can throw MError exception.
handleE <- try $ allocateReadFile fp
@ -100,61 +94,19 @@ withNewHandle fp fileAction = do
return resE
mkHeaderFromPairs :: [(ByteString, ByteString)] -> [HT.Header]
mkHeaderFromPairs = map (first mk)
mkHeaderFromPairs = map ((\(x, y) -> (mk x, y)))
lookupHeader :: HT.HeaderName -> [HT.Header] -> Maybe ByteString
lookupHeader hdr = listToMaybe . map snd . filter (\(h, _) -> h == hdr)
lookupHeader hdr = headMay . map snd . filter (\(h, _) -> h == hdr)
getETagHeader :: [HT.Header] -> Maybe Text
getETagHeader hs = decodeUtf8Lenient <$> lookupHeader Hdr.hETag hs
getMetadata :: [HT.Header] -> [(Text, Text)]
getMetadata =
map (\(x, y) -> (decodeUtf8Lenient $ original x, decodeUtf8Lenient $ stripBS y))
getMetadata = map ((\(x, y) -> (decodeUtf8Lenient $ original x, decodeUtf8Lenient $ stripBS y)))
-- | If the given header name has the @X-Amz-Meta-@ prefix, it is
-- stripped and a Just is returned.
userMetadataHeaderNameMaybe :: Text -> Maybe Text
userMetadataHeaderNameMaybe k =
let prefix = T.toCaseFold "X-Amz-Meta-"
n = T.length prefix
in if T.toCaseFold (T.take n k) == prefix
then Just (T.drop n k)
else Nothing
toMaybeMetadataHeader :: (Text, Text) -> Maybe (Text, Text)
toMaybeMetadataHeader (k, v) =
(,v) <$> userMetadataHeaderNameMaybe k
getNonUserMetadataMap :: [(Text, Text)] -> H.HashMap Text Text
getNonUserMetadataMap =
H.fromList
. filter
( isNothing
. userMetadataHeaderNameMaybe
. fst
)
addXAmzMetaPrefix :: Text -> Text
addXAmzMetaPrefix s
| isJust (userMetadataHeaderNameMaybe s) = s
| otherwise = "X-Amz-Meta-" <> s
mkHeaderFromMetadata :: [(Text, Text)] -> [HT.Header]
mkHeaderFromMetadata = map (\(x, y) -> (mk $ encodeUtf8 $ addXAmzMetaPrefix x, encodeUtf8 y))
-- | This function collects all headers starting with `x-amz-meta-`
-- and strips off this prefix, and returns a map.
getUserMetadataMap :: [(Text, Text)] -> H.HashMap Text Text
getUserMetadataMap =
H.fromList
. mapMaybe toMaybeMetadataHeader
getHostHeader :: (ByteString, Int) -> ByteString
getHostHeader (host_, port_) =
if port_ == 80 || port_ == 443
then host_
else host_ <> ":" <> show port_
getMetadataMap :: [HT.Header] -> Map Text Text
getMetadataMap hs = Map.fromList (getMetadata hs)
getLastModifiedHeader :: [HT.Header] -> Maybe UTCTime
getLastModifiedHeader hs = do
@ -164,21 +116,19 @@ getLastModifiedHeader hs = do
getContentLength :: [HT.Header] -> Maybe Int64
getContentLength hs = do
nbs <- decodeUtf8Lenient <$> lookupHeader Hdr.hContentLength hs
fst <$> either (const Nothing) Just (decimal nbs)
fst <$> hush (decimal nbs)
decodeUtf8Lenient :: ByteString -> Text
decodeUtf8Lenient = decodeUtf8With lenientDecode
isSuccessStatus :: HT.Status -> Bool
isSuccessStatus sts =
let s = HT.statusCode sts
in (s >= 200 && s < 300)
isSuccessStatus sts = let s = HT.statusCode sts
in (s >= 200 && s < 300)
httpLbs ::
(MonadIO m) =>
NC.Request ->
NC.Manager ->
m (NC.Response LByteString)
httpLbs :: MonadIO m
=> NC.Request -> NC.Manager
-> m (NC.Response LByteString)
httpLbs req mgr = do
respE <- liftIO $ tryHttpEx $ NC.httpLbs req mgr
resp <- either throwIO return respE
@ -190,26 +140,21 @@ httpLbs req mgr = do
Just "application/json" -> do
sErr <- parseErrResponseJSON $ NC.responseBody resp
throwIO sErr
_ ->
throwIO $
NC.HttpExceptionRequest req $
NC.StatusCodeException (void resp) (showBS resp)
_ -> throwIO $ NC.HttpExceptionRequest req $
NC.StatusCodeException (void resp) (show resp)
return resp
where
tryHttpEx ::
IO (NC.Response LByteString) ->
IO (Either NC.HttpException (NC.Response LByteString))
tryHttpEx :: IO (NC.Response LByteString)
-> IO (Either NC.HttpException (NC.Response LByteString))
tryHttpEx = try
contentTypeMay resp =
lookupHeader Hdr.hContentType $
NC.responseHeaders resp
contentTypeMay resp = lookupHeader Hdr.hContentType $
NC.responseHeaders resp
http ::
(MonadUnliftIO m, R.MonadResource m) =>
NC.Request ->
NC.Manager ->
m (Response (C.ConduitT () ByteString m ()))
http :: (MonadUnliftIO m, R.MonadResource m)
=> NC.Request -> NC.Manager
-> m (Response (C.ConduitT () ByteString m ()))
http req mgr = do
respE <- tryHttpEx $ NC.http req mgr
resp <- either throwIO return respE
@ -219,31 +164,25 @@ http req mgr = do
respBody <- C.connect (NC.responseBody resp) CB.sinkLbs
sErr <- parseErrResponse respBody
throwIO sErr
_ -> do
content <- LB.toStrict . NC.responseBody <$> NC.lbsResponse resp
throwIO $
NC.HttpExceptionRequest req $
NC.StatusCodeException (void resp) content
throwIO $ NC.HttpExceptionRequest req $
NC.StatusCodeException (void resp) content
return resp
where
tryHttpEx ::
(MonadUnliftIO m) =>
m a ->
m (Either NC.HttpException a)
tryHttpEx :: (MonadUnliftIO m) => m a
-> m (Either NC.HttpException a)
tryHttpEx = try
contentTypeMay resp =
lookupHeader Hdr.hContentType $
NC.responseHeaders resp
contentTypeMay resp = lookupHeader Hdr.hContentType $
NC.responseHeaders resp
-- Similar to mapConcurrently but limits the number of threads that
-- can run using a quantity semaphore.
limitedMapConcurrently ::
(MonadUnliftIO m) =>
Int ->
(t -> m a) ->
[t] ->
m [a]
limitedMapConcurrently :: MonadUnliftIO m
=> Int -> (t -> m a) -> [t] -> m [a]
limitedMapConcurrently 0 _ _ = return []
limitedMapConcurrently count act args = do
t' <- U.newTVarIO count
@ -252,15 +191,17 @@ limitedMapConcurrently count act args = do
where
wThread t arg =
U.bracket_ (waitSem t) (signalSem t) $ act arg
-- quantity semaphore implementation using TVar
waitSem t = U.atomically $ do
v <- U.readTVar t
if v > 0
then U.writeTVar t (v - 1)
else U.retrySTM
then U.writeTVar t (v-1)
else U.retrySTM
signalSem t = U.atomically $ do
v <- U.readTVar t
U.writeTVar t (v + 1)
U.writeTVar t (v+1)
-- helper function to 'drop' empty optional parameter.
mkQuery :: Text -> Maybe Text -> Maybe (Text, Text)
@ -269,17 +210,54 @@ mkQuery k mv = (k,) <$> mv
-- helper function to build query parameters that are optional.
-- don't use it with mandatory query params with empty value.
mkOptionalParams :: [(Text, Maybe Text)] -> HT.Query
mkOptionalParams params = HT.toQuery $ uncurry mkQuery <$> params
mkOptionalParams params = HT.toQuery $ uncurry mkQuery <$> params
-- | Conduit that rechunks bytestrings into the given chunk
-- lengths. Stops after given chunk lengths are yielded. Stops if
-- there are no more chunks to yield or if a shorter chunk is
-- received. Does not throw any errors.
chunkBSConduit :: (Monad m) => [Int] -> C.ConduitM ByteString ByteString m ()
chunkBSConduit [] = return ()
chunkBSConduit (s : ss) = do
bs <- fmap LB.toStrict $ C.takeCE s C..| C.sinkLazy
if
| B.length bs == s -> C.yield bs >> chunkBSConduit ss
| B.length bs > 0 -> C.yield bs
| otherwise -> return ()
chunkBSConduit :: (Monad m, Integral a)
=> [a] -> C.ConduitM ByteString ByteString m ()
chunkBSConduit s = loop 0 [] s
where
loop _ _ [] = return ()
loop n readChunks (size:sizes) = do
bsMay <- C.await
case bsMay of
Nothing -> when (n > 0) $ C.yield $ B.concat readChunks
Just bs -> if n + fromIntegral (B.length bs) >= size
then do let (a, b) = B.splitAt (fromIntegral $ size - n) bs
chunkBS = B.concat $ readChunks ++ [a]
C.yield chunkBS
loop (fromIntegral $ B.length b) [b] sizes
else loop (n + fromIntegral (B.length bs))
(readChunks ++ [bs]) (size:sizes)
-- | Select part sizes - the logic is that the minimum part-size will
-- be 64MiB.
selectPartSizes :: Int64 -> [(PartNumber, Int64, Int64)]
selectPartSizes size = uncurry (List.zip3 [1..]) $
List.unzip $ loop 0 size
where
ceil :: Double -> Int64
ceil = ceiling
partSize = max minPartSize (ceil $ fromIntegral size /
fromIntegral maxMultipartParts)
m = fromIntegral partSize
loop st sz
| st > sz = []
| st + m >= sz = [(st, sz - st)]
| otherwise = (st, m) : loop (st + m) sz
lookupRegionCache :: Bucket -> Minio (Maybe Region)
lookupRegionCache b = do
rMVar <- asks mcRegionMap
rMap <- UM.readMVar rMVar
return $ Map.lookup b rMap
addToRegionCache :: Bucket -> Region -> Minio ()
addToRegionCache b region = do
rMVar <- asks mcRegionMap
UM.modifyMVar_ rMVar $ return . Map.insert b region
deleteFromRegionCache :: Bucket -> Minio ()
deleteFromRegionCache b = do
rMVar <- asks mcRegionMap
UM.modifyMVar_ rMVar $ return . Map.delete b

View File

@ -1,65 +0,0 @@
--
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
module Network.Minio.XmlCommon where
import qualified Data.Text as T
import Data.Text.Read (decimal)
import Data.Time (UTCTime)
import Data.Time.Format.ISO8601 (iso8601ParseM)
import Lib.Prelude (throwIO)
import Network.Minio.Errors
import Text.XML (Name (Name), def, parseLBS)
import Text.XML.Cursor (Axis, Cursor, content, element, fromDocument, laxElement, ($/), (&/))
s3Name :: Text -> Text -> Name
s3Name ns s = Name s (Just ns) Nothing
uncurry4 :: (a -> b -> c -> d -> e) -> (a, b, c, d) -> e
uncurry4 f (a, b, c, d) = f a b c d
uncurry6 :: (a -> b -> c -> d -> e -> f -> g) -> (a, b, c, d, e, f) -> g
uncurry6 f (a, b, c, d, e, g) = f a b c d e g
-- | Parse time strings from XML
parseS3XMLTime :: (MonadIO m) => Text -> m UTCTime
parseS3XMLTime t =
maybe (throwIO $ MErrVXmlParse $ "timestamp parse failure: " <> t) return $
iso8601ParseM $
toString t
parseDecimal :: (MonadIO m, Integral a) => Text -> m a
parseDecimal numStr =
either (throwIO . MErrVXmlParse . show) return $
fst <$> decimal numStr
parseDecimals :: (MonadIO m, Integral a) => [Text] -> m [a]
parseDecimals numStr = forM numStr parseDecimal
s3Elem :: Text -> Text -> Axis
s3Elem ns = element . s3Name ns
parseRoot :: (MonadIO m) => LByteString -> m Cursor
parseRoot =
either (throwIO . MErrVXmlParse . show) (return . fromDocument)
. parseLBS def
parseErrResponse :: (MonadIO m) => LByteString -> m ServiceErr
parseErrResponse xmldata = do
r <- parseRoot xmldata
let code = T.concat $ r $/ laxElement "Code" &/ content
message = T.concat $ r $/ laxElement "Message" &/ content
return $ toServiceErr code message

View File

@ -1,5 +1,5 @@
--
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
-- MinIO Haskell SDK, (C) 2017 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
@ -15,112 +15,89 @@
--
module Network.Minio.XmlGenerator
( mkCreateBucketConfig,
mkCompleteMultipartUploadRequest,
mkPutNotificationRequest,
mkSelectRequest,
)
where
( mkCreateBucketConfig
, mkCompleteMultipartUploadRequest
, mkPutNotificationRequest
, mkSelectRequest
) where
import qualified Data.ByteString.Lazy as LBS
import qualified Data.Text as T
import Network.Minio.Data
import Network.Minio.XmlCommon
import Text.XML
import qualified Data.HashMap.Strict as H
import qualified Data.Text as T
import Text.XML
import Lib.Prelude
import Network.Minio.Data
-- | Create a bucketConfig request body XML
mkCreateBucketConfig :: Text -> Region -> ByteString
mkCreateBucketConfig ns location = LBS.toStrict $ renderLBS def bucketConfig
where
s3Element n = Element (s3Name ns n) mempty
root =
s3Element
"CreateBucketConfiguration"
[ NodeElement $
s3Element
"LocationConstraint"
[NodeContent location]
s3Element n = Element (s3Name ns n) mempty
root = s3Element "CreateBucketConfiguration"
[ NodeElement $ s3Element "LocationConstraint"
[ NodeContent location]
]
bucketConfig = Document (Prologue [] Nothing []) root []
bucketConfig = Document (Prologue [] Nothing []) root []
-- | Create a completeMultipartUpload request body XML
mkCompleteMultipartUploadRequest :: [PartTuple] -> ByteString
mkCompleteMultipartUploadRequest partInfo =
LBS.toStrict $ renderLBS def cmur
where
root =
Element "CompleteMultipartUpload" mempty $
map (NodeElement . mkPart) partInfo
mkPart (n, etag) =
Element
"Part"
mempty
[ NodeElement $
Element
"PartNumber"
mempty
[NodeContent $ T.pack $ show n],
NodeElement $
Element
"ETag"
mempty
[NodeContent etag]
]
root = Element "CompleteMultipartUpload" mempty $
map (NodeElement . mkPart) partInfo
mkPart (n, etag) = Element "Part" mempty
[ NodeElement $ Element "PartNumber" mempty
[NodeContent $ T.pack $ show n]
, NodeElement $ Element "ETag" mempty
[NodeContent etag]
]
cmur = Document (Prologue [] Nothing []) root []
-- Simplified XML representation without element attributes.
data XNode
= XNode Text [XNode]
| XLeaf Text Text
deriving stock (Eq, Show)
data XNode = XNode Text [XNode]
| XLeaf Text Text
deriving (Eq, Show)
toXML :: Text -> XNode -> ByteString
toXML ns node =
LBS.toStrict $
renderLBS def $
Document (Prologue [] Nothing []) (xmlNode node) []
toXML ns node = LBS.toStrict $ renderLBS def $
Document (Prologue [] Nothing []) (xmlNode node) []
where
xmlNode :: XNode -> Element
xmlNode (XNode name nodes) =
Element (s3Name ns name) mempty $
map (NodeElement . xmlNode) nodes
xmlNode (XLeaf name content) =
Element
(s3Name ns name)
mempty
[NodeContent content]
xmlNode (XNode name nodes) = Element (s3Name ns name) mempty $
map (NodeElement . xmlNode) nodes
xmlNode (XLeaf name content) = Element (s3Name ns name) mempty
[NodeContent content]
class ToXNode a where
toXNode :: a -> XNode
instance ToXNode Event where
toXNode = XLeaf "Event" . toText
toXNode = XLeaf "Event" . show
instance ToXNode Notification where
toXNode (Notification qc tc lc) =
XNode "NotificationConfiguration" $
map (toXNodesWithArnName "QueueConfiguration" "Queue") qc
++ map (toXNodesWithArnName "TopicConfiguration" "Topic") tc
++ map (toXNodesWithArnName "CloudFunctionConfiguration" "CloudFunction") lc
toXNode (Notification qc tc lc) = XNode "NotificationConfiguration" $
map (toXNodesWithArnName "QueueConfiguration" "Queue") qc ++
map (toXNodesWithArnName "TopicConfiguration" "Topic") tc ++
map (toXNodesWithArnName "CloudFunctionConfiguration" "CloudFunction") lc
toXNodesWithArnName :: Text -> Text -> NotificationConfig -> XNode
toXNodesWithArnName eltName arnName (NotificationConfig itemId arn events fRule) =
XNode eltName $
[XLeaf "Id" itemId, XLeaf arnName arn]
++ map toXNode events
++ [toXNode fRule]
toXNodesWithArnName eltName arnName (NotificationConfig id arn events fRule) =
XNode eltName $ [XLeaf "Id" id, XLeaf arnName arn] ++ map toXNode events ++
[toXNode fRule]
instance ToXNode Filter where
toXNode (Filter (FilterKey (FilterRules rules))) =
XNode "Filter" [XNode "S3Key" (map getFRXNode rules)]
getFRXNode :: FilterRule -> XNode
getFRXNode (FilterRule n v) =
XNode
"FilterRule"
[ XLeaf "Name" n,
XLeaf "Value" v
]
getFRXNode (FilterRule n v) = XNode "FilterRule" [ XLeaf "Name" n
, XLeaf "Value" v
]
mkPutNotificationRequest :: Text -> Notification -> ByteString
mkPutNotificationRequest ns = toXML ns . toXNode
@ -129,103 +106,60 @@ mkSelectRequest :: SelectRequest -> ByteString
mkSelectRequest r = LBS.toStrict $ renderLBS def sr
where
sr = Document (Prologue [] Nothing []) root []
root =
Element "SelectRequest" mempty $
[ NodeElement
( Element
"Expression"
mempty
[NodeContent $ srExpression r]
),
NodeElement
( Element
"ExpressionType"
mempty
[NodeContent $ show $ srExpressionType r]
),
NodeElement
( Element "InputSerialization" mempty $
inputSerializationNodes $
srInputSerialization r
),
NodeElement
( Element "OutputSerialization" mempty $
outputSerializationNodes $
srOutputSerialization r
)
]
++ maybe [] reqProgElem (srRequestProgressEnabled r)
reqProgElem enabled =
[ NodeElement
( Element
"RequestProgress"
mempty
[ NodeElement
( Element
"Enabled"
mempty
[ NodeContent
(if enabled then "TRUE" else "FALSE")
]
)
]
)
]
inputSerializationNodes is =
comprTypeNode (isCompressionType is)
++ [NodeElement $ formatNode (isFormatInfo is)]
comprTypeNode (Just c) =
[ NodeElement $
Element
"CompressionType"
mempty
[ NodeContent $ case c of
CompressionTypeNone -> "NONE"
CompressionTypeGzip -> "GZIP"
CompressionTypeBzip2 -> "BZIP2"
]
]
root = Element "SelectRequest" mempty $
[ NodeElement (Element "Expression" mempty
[NodeContent $ srExpression r])
, NodeElement (Element "ExpressionType" mempty
[NodeContent $ show $ srExpressionType r])
, NodeElement (Element "InputSerialization" mempty $
inputSerializationNodes $ srInputSerialization r)
, NodeElement (Element "OutputSerialization" mempty $
outputSerializationNodes $ srOutputSerialization r)
] ++ maybe [] reqProgElem (srRequestProgressEnabled r)
reqProgElem enabled = [NodeElement
(Element "RequestProgress" mempty
[NodeElement
(Element "Enabled" mempty
[NodeContent
(if enabled then "TRUE" else "FALSE")]
)
]
)
]
inputSerializationNodes is = comprTypeNode (isCompressionType is) ++
[NodeElement $ formatNode (isFormatInfo is)]
comprTypeNode (Just c) = [NodeElement $ Element "CompressionType" mempty
[NodeContent $ case c of
CompressionTypeNone -> "NONE"
CompressionTypeGzip -> "GZIP"
CompressionTypeBzip2 -> "BZIP2"
]
]
comprTypeNode Nothing = []
kvElement (k, v) = Element (Name k Nothing Nothing) mempty [NodeContent v]
formatNode (InputFormatCSV c) =
Element
"CSV"
mempty
(map (NodeElement . kvElement) (csvPropsList c))
formatNode (InputFormatCSV (CSVProp h)) =
Element "CSV" mempty
(map NodeElement $ map kvElement $ H.toList h)
formatNode (InputFormatJSON p) =
Element
"JSON"
mempty
[ NodeElement
( Element
"Type"
mempty
[ NodeContent $ case jsonipType p of
JSONTypeDocument -> "DOCUMENT"
JSONTypeLines -> "LINES"
]
)
]
Element "JSON" mempty
[NodeElement
(Element "Type" mempty
[NodeContent $ case jsonipType p of
JSONTypeDocument -> "DOCUMENT"
JSONTypeLines -> "LINES"
]
)
]
formatNode InputFormatParquet = Element "Parquet" mempty []
outputSerializationNodes (OutputSerializationJSON j) =
[ NodeElement
( Element "JSON" mempty $
rdElem $
jsonopRecordDelimiter j
)
]
outputSerializationNodes (OutputSerializationCSV c) =
[ NodeElement $
Element
"CSV"
mempty
(map (NodeElement . kvElement) (csvPropsList c))
]
[NodeElement (Element "JSON" mempty $
rdElem $ jsonopRecordDelimiter j)]
outputSerializationNodes (OutputSerializationCSV (CSVProp h)) =
[NodeElement $ Element "CSV" mempty
(map NodeElement $ map kvElement $ H.toList h)]
rdElem Nothing = []
rdElem (Just t) =
[ NodeElement $
Element
"RecordDelimiter"
mempty
[NodeContent t]
]
rdElem (Just t) = [NodeElement $ Element "RecordDelimiter" mempty
[NodeContent t]]

View File

@ -1,5 +1,5 @@
--
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
-- MinIO Haskell SDK, (C) 2017 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
@ -15,38 +15,75 @@
--
module Network.Minio.XmlParser
( parseListBuckets,
parseLocation,
parseNewMultipartUpload,
parseCompleteMultipartUploadResponse,
parseCopyObjectResponse,
parseListObjectsResponse,
parseListObjectsV1Response,
parseListUploadsResponse,
parseListPartsResponse,
parseErrResponse,
parseNotification,
parseSelectProgress,
)
where
( parseListBuckets
, parseLocation
, parseNewMultipartUpload
, parseCompleteMultipartUploadResponse
, parseCopyObjectResponse
, parseListObjectsResponse
, parseListObjectsV1Response
, parseListUploadsResponse
, parseListPartsResponse
, parseErrResponse
, parseNotification
, parseSelectProgress
) where
import qualified Data.ByteString.Lazy as LB
import qualified Data.HashMap.Strict as H
import Data.List (zip4, zip6)
import qualified Data.Text as T
import Data.Time
import Network.Minio.Data
import Network.Minio.XmlCommon
import Text.XML.Cursor hiding (bool)
import Data.List (zip3, zip4, zip5)
import qualified Data.Map as Map
import qualified Data.Text as T
import Data.Text.Read (decimal)
import Data.Time
import Text.XML
import Text.XML.Cursor hiding (bool)
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Errors
-- | Represent the time format string returned by S3 API calls.
s3TimeFormat :: [Char]
s3TimeFormat = iso8601DateFormat $ Just "%T%QZ"
-- | Helper functions.
uncurry4 :: (a -> b -> c -> d -> e) -> (a, b, c, d) -> e
uncurry4 f (a, b, c, d) = f a b c d
uncurry5 :: (a -> b -> c -> d -> e -> f) -> (a, b, c, d, e) -> f
uncurry5 f (a, b, c, d, e) = f a b c d e
-- | Parse time strings from XML
parseS3XMLTime :: (MonadIO m) => Text -> m UTCTime
parseS3XMLTime = either (throwIO . MErrVXmlParse) return
. parseTimeM True defaultTimeLocale s3TimeFormat
. T.unpack
parseDecimal :: (MonadIO m, Integral a) => Text -> m a
parseDecimal numStr = either (throwIO . MErrVXmlParse . show) return $
fst <$> decimal numStr
parseDecimals :: (MonadIO m, Integral a) => [Text] -> m [a]
parseDecimals numStr = forM numStr parseDecimal
s3Elem :: Text -> Text -> Axis
s3Elem ns = element . s3Name ns
parseRoot :: (MonadIO m) => LByteString -> m Cursor
parseRoot = either (throwIO . MErrVXmlParse . show) (return . fromDocument)
. parseLBS def
-- | Parse the response XML of a list buckets call.
parseListBuckets :: (MonadReader env m, HasSvcNamespace env, MonadIO m) => LByteString -> m [BucketInfo]
parseListBuckets xmldata = do
r <- parseRoot xmldata
ns <- asks getSvcNamespace
let s3Elem' = s3Elem ns
names = r $// s3Elem' "Bucket" &// s3Elem' "Name" &/ content
timeStrings = r $// s3Elem' "Bucket" &// s3Elem' "CreationDate" &/ content
let
s3Elem' = s3Elem ns
names = r $// s3Elem' "Bucket" &// s3Elem' "Name" &/ content
timeStrings = r $// s3Elem' "Bucket" &// s3Elem' "CreationDate" &/ content
times <- mapM parseS3XMLTime timeStrings
return $ zipWith BucketInfo names times
@ -79,38 +116,40 @@ parseCopyObjectResponse :: (MonadReader env m, HasSvcNamespace env, MonadIO m) =
parseCopyObjectResponse xmldata = do
r <- parseRoot xmldata
ns <- asks getSvcNamespace
let s3Elem' = s3Elem ns
mtimeStr = T.concat $ r $// s3Elem' "LastModified" &/ content
let
s3Elem' = s3Elem ns
mtimeStr = T.concat $ r $// s3Elem' "LastModified" &/ content
mtime <- parseS3XMLTime mtimeStr
return (T.concat $ r $// s3Elem' "ETag" &/ content, mtime)
-- | Parse the response XML of a list objects v1 call.
parseListObjectsV1Response ::
(MonadReader env m, HasSvcNamespace env, MonadIO m) =>
LByteString ->
m ListObjectsV1Result
parseListObjectsV1Response :: (MonadReader env m, HasSvcNamespace env, MonadIO m)
=> LByteString -> m ListObjectsV1Result
parseListObjectsV1Response xmldata = do
r <- parseRoot xmldata
ns <- asks getSvcNamespace
let s3Elem' = s3Elem ns
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
nextMarker = listToMaybe $ r $/ s3Elem' "NextMarker" &/ content
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
keys = r $/ s3Elem' "Contents" &/ s3Elem' "Key" &/ content
modTimeStr = r $/ s3Elem' "Contents" &/ s3Elem' "LastModified" &/ content
etagsList = r $/ s3Elem' "Contents" &/ s3Elem' "ETag" &/ content
-- if response xml contains empty etag response fill them with as
-- many empty Text for the zip4 below to work as intended.
etags = etagsList ++ repeat ""
sizeStr = r $/ s3Elem' "Contents" &/ s3Elem' "Size" &/ content
let
s3Elem' = s3Elem ns
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
nextMarker = headMay $ r $/ s3Elem' "NextMarker" &/ content
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
keys = r $/ s3Elem' "Contents" &/ s3Elem' "Key" &/ content
modTimeStr = r $/ s3Elem' "Contents" &/ s3Elem' "LastModified" &/ content
etagsList = r $/ s3Elem' "Contents" &/ s3Elem' "ETag" &/ content
-- if response xml contains empty etag response fill them with as
-- many empty Text for the zip4 below to work as intended.
etags = etagsList ++ repeat ""
sizeStr = r $/ s3Elem' "Contents" &/ s3Elem' "Size" &/ content
modTimes <- mapM parseS3XMLTime modTimeStr
sizes <- parseDecimals sizeStr
let objects =
map (uncurry6 ObjectInfo) $
zip6 keys modTimes etags sizes (repeat H.empty) (repeat H.empty)
let
objects = map (uncurry5 ObjectInfo) $ zip5 keys modTimes etags sizes (repeat Map.empty)
return $ ListObjectsV1Result hasMore nextMarker objects prefixes
@ -119,24 +158,27 @@ parseListObjectsResponse :: (MonadReader env m, HasSvcNamespace env, MonadIO m)
parseListObjectsResponse xmldata = do
r <- parseRoot xmldata
ns <- asks getSvcNamespace
let s3Elem' = s3Elem ns
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
nextToken = listToMaybe $ r $/ s3Elem' "NextContinuationToken" &/ content
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
keys = r $/ s3Elem' "Contents" &/ s3Elem' "Key" &/ content
modTimeStr = r $/ s3Elem' "Contents" &/ s3Elem' "LastModified" &/ content
etagsList = r $/ s3Elem' "Contents" &/ s3Elem' "ETag" &/ content
-- if response xml contains empty etag response fill them with as
-- many empty Text for the zip4 below to work as intended.
etags = etagsList ++ repeat ""
sizeStr = r $/ s3Elem' "Contents" &/ s3Elem' "Size" &/ content
let
s3Elem' = s3Elem ns
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
nextToken = headMay $ r $/ s3Elem' "NextContinuationToken" &/ content
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
keys = r $/ s3Elem' "Contents" &/ s3Elem' "Key" &/ content
modTimeStr = r $/ s3Elem' "Contents" &/ s3Elem' "LastModified" &/ content
etagsList = r $/ s3Elem' "Contents" &/ s3Elem' "ETag" &/ content
-- if response xml contains empty etag response fill them with as
-- many empty Text for the zip4 below to work as intended.
etags = etagsList ++ repeat ""
sizeStr = r $/ s3Elem' "Contents" &/ s3Elem' "Size" &/ content
modTimes <- mapM parseS3XMLTime modTimeStr
sizes <- parseDecimals sizeStr
let objects =
map (uncurry6 ObjectInfo) $
zip6 keys modTimes etags sizes (repeat H.empty) (repeat H.empty)
let
objects = map (uncurry5 ObjectInfo) $ zip5 keys modTimes etags sizes (repeat Map.empty)
return $ ListObjectsResult hasMore nextToken objects prefixes
@ -145,18 +187,20 @@ parseListUploadsResponse :: (MonadReader env m, HasSvcNamespace env, MonadIO m)
parseListUploadsResponse xmldata = do
r <- parseRoot xmldata
ns <- asks getSvcNamespace
let s3Elem' = s3Elem ns
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
nextKey = listToMaybe $ r $/ s3Elem' "NextKeyMarker" &/ content
nextUpload = listToMaybe $ r $/ s3Elem' "NextUploadIdMarker" &/ content
uploadKeys = r $/ s3Elem' "Upload" &/ s3Elem' "Key" &/ content
uploadIds = r $/ s3Elem' "Upload" &/ s3Elem' "UploadId" &/ content
uploadInitTimeStr = r $/ s3Elem' "Upload" &/ s3Elem' "Initiated" &/ content
let
s3Elem' = s3Elem ns
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
nextKey = headMay $ r $/ s3Elem' "NextKeyMarker" &/ content
nextUpload = headMay $ r $/ s3Elem' "NextUploadIdMarker" &/ content
uploadKeys = r $/ s3Elem' "Upload" &/ s3Elem' "Key" &/ content
uploadIds = r $/ s3Elem' "Upload" &/ s3Elem' "UploadId" &/ content
uploadInitTimeStr = r $/ s3Elem' "Upload" &/ s3Elem' "Initiated" &/ content
uploadInitTimes <- mapM parseS3XMLTime uploadInitTimeStr
let uploads = zip3 uploadKeys uploadIds uploadInitTimes
let
uploads = zip3 uploadKeys uploadIds uploadInitTimes
return $ ListUploadsResult hasMore nextKey nextUpload uploads prefixes
@ -164,25 +208,34 @@ parseListPartsResponse :: (MonadReader env m, HasSvcNamespace env, MonadIO m) =>
parseListPartsResponse xmldata = do
r <- parseRoot xmldata
ns <- asks getSvcNamespace
let s3Elem' = s3Elem ns
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
nextPartNumStr = listToMaybe $ r $/ s3Elem' "NextPartNumberMarker" &/ content
partNumberStr = r $/ s3Elem' "Part" &/ s3Elem' "PartNumber" &/ content
partModTimeStr = r $/ s3Elem' "Part" &/ s3Elem' "LastModified" &/ content
partETags = r $/ s3Elem' "Part" &/ s3Elem' "ETag" &/ content
partSizeStr = r $/ s3Elem' "Part" &/ s3Elem' "Size" &/ content
let
s3Elem' = s3Elem ns
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
nextPartNumStr = headMay $ r $/ s3Elem' "NextPartNumberMarker" &/ content
partNumberStr = r $/ s3Elem' "Part" &/ s3Elem' "PartNumber" &/ content
partModTimeStr = r $/ s3Elem' "Part" &/ s3Elem' "LastModified" &/ content
partETags = r $/ s3Elem' "Part" &/ s3Elem' "ETag" &/ content
partSizeStr = r $/ s3Elem' "Part" &/ s3Elem' "Size" &/ content
partModTimes <- mapM parseS3XMLTime partModTimeStr
partSizes <- parseDecimals partSizeStr
partNumbers <- parseDecimals partNumberStr
nextPartNum <- parseDecimals $ maybeToList nextPartNumStr
let partInfos =
map (uncurry4 ObjectPartInfo) $
zip4 partNumbers partETags partSizes partModTimes
let
partInfos = map (uncurry4 ObjectPartInfo) $
zip4 partNumbers partETags partSizes partModTimes
return $ ListPartsResult hasMore (listToMaybe nextPartNum) partInfos
parseErrResponse :: (MonadIO m) => LByteString -> m ServiceErr
parseErrResponse xmldata = do
r <- parseRoot xmldata
let code = T.concat $ r $/ element "Code" &/ content
message = T.concat $ r $/ element "Message" &/ content
return $ toServiceErr code message
parseNotification :: (MonadReader env m, HasSvcNamespace env, MonadIO m) => LByteString -> m Notification
parseNotification xmldata = do
r <- parseRoot xmldata
@ -191,40 +244,32 @@ parseNotification xmldata = do
qcfg = map node $ r $/ s3Elem' "QueueConfiguration"
tcfg = map node $ r $/ s3Elem' "TopicConfiguration"
lcfg = map node $ r $/ s3Elem' "CloudFunctionConfiguration"
Notification
<$> mapM (parseNode ns "Queue") qcfg
<*> mapM (parseNode ns "Topic") tcfg
<*> mapM (parseNode ns "CloudFunction") lcfg
Notification <$> (mapM (parseNode ns "Queue") qcfg)
<*> (mapM (parseNode ns "Topic") tcfg)
<*> (mapM (parseNode ns "CloudFunction") lcfg)
where
getFilterRule ns c =
let name = T.concat $ c $/ s3Elem ns "Name" &/ content
value = T.concat $ c $/ s3Elem ns "Value" &/ content
in FilterRule name value
in FilterRule name value
parseNode ns arnName nodeData = do
let c = fromNode nodeData
itemId = T.concat $ c $/ s3Elem ns "Id" &/ content
id = T.concat $ c $/ s3Elem ns "Id" &/ content
arn = T.concat $ c $/ s3Elem ns arnName &/ content
events = mapMaybe textToEvent (c $/ s3Elem ns "Event" &/ content)
rules =
c
$/ s3Elem ns "Filter"
&/ s3Elem ns "S3Key"
&/ s3Elem ns "FilterRule"
&| getFilterRule ns
return $
NotificationConfig
itemId
arn
events
(Filter $ FilterKey $ FilterRules rules)
events = catMaybes $ map textToEvent $ c $/ s3Elem ns "Event" &/ content
rules = c $/ s3Elem ns "Filter" &/ s3Elem ns "S3Key" &/
s3Elem ns "FilterRule" &| getFilterRule ns
return $ NotificationConfig id arn events
(Filter $ FilterKey $ FilterRules rules)
parseSelectProgress :: (MonadIO m) => ByteString -> m Progress
parseSelectProgress :: MonadIO m => ByteString -> m Progress
parseSelectProgress xmldata = do
r <- parseRoot $ LB.fromStrict xmldata
let bScanned = T.concat $ r $/ element "BytesScanned" &/ content
bProcessed = T.concat $ r $/ element "BytesProcessed" &/ content
bReturned = T.concat $ r $/ element "BytesReturned" &/ content
Progress
<$> parseDecimal bScanned
<*> parseDecimal bProcessed
<*> parseDecimal bReturned
r <- parseRoot $ LB.fromStrict xmldata
let bScanned = T.concat $ r $/ element "BytesScanned" &/ content
bProcessed = T.concat $ r $/element "BytesProcessed" &/ content
bReturned = T.concat $ r $/element "BytesReturned" &/ content
Progress <$> parseDecimal bScanned
<*> parseDecimal bProcessed
<*> parseDecimal bReturned

View File

@ -15,7 +15,7 @@
# resolver:
# name: custom-snapshot
# location: "./custom-snapshot.yaml"
resolver: lts-22.19
resolver: lts-13.1
# User packages to be built.
# Various formats can be used as shown in the example below.
@ -36,17 +36,17 @@ resolver: lts-22.19
# non-dependency (i.e. a user package), and its test suites and benchmarks
# will not be run. This is useful for tweaking upstream packages.
packages:
- "."
- '.'
# Dependency packages to be pulled from upstream that are not in the resolver
# (e.g., acme-missiles-0.3)
extra-deps:
- crypton-connection-0.3.2
extra-deps: []
# Override default flag values for local packages and extra-deps
flags: {}
# Extra package databases containing global packages
extra-package-dbs: []
# Control whether we use the GHC we find on the path
# system-ghc: true
#

View File

@ -1,19 +0,0 @@
# This file was autogenerated by Stack.
# You should not edit this file by hand.
# For more information, please see the documentation at:
# https://docs.haskellstack.org/en/stable/lock_files
packages:
- completed:
hackage: crypton-connection-0.3.2@sha256:c7937edc25ab022bcf167703f2ec5ab73b62908e545bb587d2aa42b33cd6f6cc,1581
pantry-tree:
sha256: f986ad29b008cbe5732606e9cde1897191c486a2f1f169a4cb75fd915bce397c
size: 394
original:
hackage: crypton-connection-0.3.2
snapshots:
- completed:
sha256: e5cac927cf7ccbd52aa41476baa68b88c564ee6ddc3bc573dbf4210069287fe7
size: 713340
url: https://raw.githubusercontent.com/commercialhaskell/stackage-snapshots/master/lts/22/19.yaml
original: lts-22.19

File diff suppressed because it is too large Load Diff

View File

@ -15,102 +15,88 @@
--
module Network.Minio.API.Test
( bucketNameValidityTests,
objectNameValidityTests,
parseServerInfoJSONTest,
parseHealStatusTest,
parseHealStartRespTest,
)
where
( bucketNameValidityTests
, objectNameValidityTests
, parseServerInfoJSONTest
, parseHealStatusTest
, parseHealStartRespTest
) where
import Data.Aeson (eitherDecode)
import Network.Minio.API
import Network.Minio.AdminAPI
import Test.Tasty
import Test.Tasty.HUnit
import Data.Aeson (eitherDecode)
import Test.Tasty
import Test.Tasty.HUnit
import Lib.Prelude
import Network.Minio.AdminAPI
import Network.Minio.API
assertBool' :: Bool -> Assertion
assertBool' = assertBool "Test failed!"
bucketNameValidityTests :: TestTree
bucketNameValidityTests =
testGroup
"Bucket Name Validity Tests"
[ testCase "Too short 1" $ assertBool' $ not $ isValidBucketName "",
testCase "Too short 2" $ assertBool' $ not $ isValidBucketName "ab",
testCase "Too long 1" $ assertBool' $ not $ isValidBucketName "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
testCase "Has upper case" $ assertBool' $ not $ isValidBucketName "ABCD",
testCase "Has punctuation" $ assertBool' $ not $ isValidBucketName "abc,2",
testCase "Has hyphen at end" $ assertBool' $ not $ isValidBucketName "abc-",
testCase "Has consecutive dot" $ assertBool' $ not $ isValidBucketName "abck..eedg",
testCase "Looks like IP" $ assertBool' $ not $ isValidBucketName "10.0.0.1",
testCase "Valid bucket name 1" $ assertBool' $ isValidBucketName "abcd.pqeq.rea",
testCase "Valid bucket name 2" $ assertBool' $ isValidBucketName "abcdedgh1d",
testCase "Valid bucket name 3" $ assertBool' $ isValidBucketName "abc-de-dg-h1d"
]
bucketNameValidityTests = testGroup "Bucket Name Validity Tests"
[ testCase "Too short 1" $ assertBool' $ not $ isValidBucketName ""
, testCase "Too short 2" $ assertBool' $ not $ isValidBucketName "ab"
, testCase "Too long 1" $ assertBool' $ not $ isValidBucketName "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
, testCase "Has upper case" $ assertBool' $ not $ isValidBucketName "ABCD"
, testCase "Has punctuation" $ assertBool' $ not $ isValidBucketName "abc,2"
, testCase "Has hyphen at end" $ assertBool' $ not $ isValidBucketName "abc-"
, testCase "Has consecutive dot" $ assertBool' $ not $ isValidBucketName "abck..eedg"
, testCase "Looks like IP" $ assertBool' $ not $ isValidBucketName "10.0.0.1"
, testCase "Valid bucket name 1" $ assertBool' $ isValidBucketName "abcd.pqeq.rea"
, testCase "Valid bucket name 2" $ assertBool' $ isValidBucketName "abcdedgh1d"
, testCase "Valid bucket name 3" $ assertBool' $ isValidBucketName "abc-de-dg-h1d"
]
objectNameValidityTests :: TestTree
objectNameValidityTests =
testGroup
"Object Name Validity Tests"
[ testCase "Empty name" $ assertBool' $ not $ isValidObjectName "",
testCase "Has unicode characters" $ assertBool' $ isValidObjectName "日本国"
]
objectNameValidityTests = testGroup "Object Name Validity Tests"
[ testCase "Empty name" $ assertBool' $ not $ isValidObjectName ""
, testCase "Has unicode characters" $ assertBool' $ isValidObjectName "日本国"
]
parseServerInfoJSONTest :: TestTree
parseServerInfoJSONTest =
testGroup "Parse MinIO Admin API ServerInfo JSON test" $
map
( \(tName, tDesc, tfn, tVal) ->
testCase tName $
assertBool tDesc $
tfn (eitherDecode tVal :: Either [Char] [ServerInfo])
)
testCases
parseServerInfoJSONTest = testGroup "Parse MinIO Admin API ServerInfo JSON test" $
map (\(tName, tDesc, tfn, tVal) -> testCase tName $ assertBool tDesc $
tfn (eitherDecode tVal :: Either [Char] [ServerInfo])) testCases
where
testCases =
[ ("FSBackend", "Verify server info json parsing for FS backend", isRight, fsJSON),
("Erasure Backend", "Verify server info json parsing for Erasure backend", isRight, erasureJSON),
("Unknown Backend", "Verify server info json parsing for invalid backend", isLeft, invalidJSON)
]
testCases = [ ("FSBackend", "Verify server info json parsing for FS backend", isRight, fsJSON)
, ("Erasure Backend", "Verify server info json parsing for Erasure backend", isRight, erasureJSON)
, ("Unknown Backend", "Verify server info json parsing for invalid backend", isLeft, invalidJSON)
]
fsJSON = "[{\"error\":\"\",\"addr\":\"192.168.1.218:9000\",\"data\":{\"storage\":{\"Used\":20530,\"Backend\":{\"Type\":1,\"OnlineDisks\":0,\"OfflineDisks\":0,\"StandardSCData\":0,\"StandardSCParity\":0,\"RRSCData\":0,\"RRSCParity\":0,\"Sets\":null}},\"network\":{\"transferred\":808,\"received\":1160},\"http\":{\"totalHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalGETs\":{\"count\":1,\"avgDuration\":\"0s\"},\"successGETs\":{\"count\":1,\"avgDuration\":\"0s\"},\"totalPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"}},\"server\":{\"uptime\":5992503019270,\"version\":\"DEVELOPMENT.GOGET\",\"commitID\":\"DEVELOPMENT.GOGET\",\"region\":\"\",\"sqsARN\":[]}}}]"
erasureJSON = "[{\"error\":\"\",\"addr\":\"192.168.1.218:9000\",\"data\":{\"storage\":{\"Used\":83084,\"Backend\":{\"Type\":2,\"OnlineDisks\":4,\"OfflineDisks\":0,\"StandardSCData\":2,\"StandardSCParity\":2,\"RRSCData\":2,\"RRSCParity\":2,\"Sets\":[[{\"uuid\":\"16ec6f2c-9197-4787-904a-36bb2c2683f8\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"4052e086-ef99-4aa5-ae2b-8e27559432f6\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"d0639950-ddd3-45b0-93ca-fd86f5d79f72\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"30ec68c0-37e1-4592-82c1-26b143c0ac10\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]]}},\"network\":{\"transferred\":404,\"received\":0},\"http\":{\"totalHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"}},\"server\":{\"uptime\":2738903073,\"version\":\"DEVELOPMENT.GOGET\",\"commitID\":\"DEVELOPMENT.GOGET\",\"region\":\"\",\"sqsARN\":[]}}}]"
invalidJSON = "[{\"error\":\"\",\"addr\":\"192.168.1.218:9000\",\"data\":{\"storage\":{\"Used\":83084,\"Backend\":{\"Type\":42,\"OnlineDisks\":4,\"OfflineDisks\":0,\"StandardSCData\":2,\"StandardSCParity\":2,\"RRSCData\":2,\"RRSCParity\":2,\"Sets\":[[{\"uuid\":\"16ec6f2c-9197-4787-904a-36bb2c2683f8\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"4052e086-ef99-4aa5-ae2b-8e27559432f6\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"d0639950-ddd3-45b0-93ca-fd86f5d79f72\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"30ec68c0-37e1-4592-82c1-26b143c0ac10\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]]}},\"network\":{\"transferred\":404,\"received\":0},\"http\":{\"totalHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"}},\"server\":{\"uptime\":2738903073,\"version\":\"DEVELOPMENT.GOGET\",\"commitID\":\"DEVELOPMENT.GOGET\",\"region\":\"\",\"sqsARN\":[]}}}]"
parseHealStatusTest :: TestTree
parseHealStatusTest =
testGroup "Parse MinIO Admin API HealStatus JSON test" $
map
( \(tName, tDesc, tfn, tVal) ->
testCase tName $
assertBool tDesc $
tfn (eitherDecode tVal :: Either [Char] HealStatus)
)
testCases
parseHealStatusTest = testGroup "Parse MinIO Admin API HealStatus JSON test" $
map (\(tName, tDesc, tfn, tVal) -> testCase tName $ assertBool tDesc $
tfn (eitherDecode tVal :: Either [Char] HealStatus)) testCases
where
testCases =
[ ("Good", "Verify heal result item for erasure backend", isRight, erasureJSON'),
("Corrupted", "Verify heal result item for erasure backend", isLeft, invalidJSON'),
("Incorrect Value", "Verify heal result item for erasure backend", isLeft, invalidItemType)
]
testCases = [ ("Good", "Verify heal result item for erasure backend", isRight, erasureJSON')
, ("Corrupted", "Verify heal result item for erasure backend", isLeft, invalidJSON')
, ("Incorrect Value", "Verify heal result item for erasure backend", isLeft, invalidItemType)
]
erasureJSON' = "{\"Summary\":\"finished\",\"StartTime\":\"2018-06-05T08:09:47.644465513Z\",\"NumDisks\":4,\"Settings\":{\"recursive\":false,\"dryRun\":false},\"Items\":[{\"resultId\":1,\"type\":\"metadata\",\"bucket\":\"\",\"object\":\"\",\"detail\":\"disk-format\",\"diskCount\":4,\"setCount\":1,\"before\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"after\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"objectSize\":0}]}"
invalidJSON' = "{\"Summary\":\"finished\",\"StartTime\":\"2018-06-05T08:09:47.644465513Z\",\"NumDisks\":4,\"Settings\":{\"recursive\":false,\"dryRun\":false},\"Items\":[{\"resultId\":1,\"type\":\"metadata\",\"bucket\":\"\",\"object\":\"\",\"detail\":\"disk-format\",\"diskCount\":4,\"setCount\":1,\"before\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"after\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"objectSize\":0}]"
invalidItemType = "{\"Summary\":\"finished\",\"StartTime\":\"2018-06-05T08:09:47.644465513Z\",\"NumDisks\":4,\"Settings\":{\"recursive\":false,\"dryRun\":false},\"Items\":[{\"resultId\":1,\"type\":\"hello\",\"bucket\":\"\",\"object\":\"\",\"detail\":\"disk-format\",\"diskCount\":4,\"setCount\":1,\"before\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"after\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"objectSize\":0}]}"
parseHealStartRespTest :: TestTree
parseHealStartRespTest =
testGroup "Parse MinIO Admin API HealStartResp JSON test" $
map
( \(tName, tDesc, tfn, tVal) ->
testCase tName $
assertBool tDesc $
tfn (eitherDecode tVal :: Either [Char] HealStartResp)
)
testCases
parseHealStartRespTest = testGroup "Parse MinIO Admin API HealStartResp JSON test" $
map (\(tName, tDesc, tfn, tVal) -> testCase tName $ assertBool tDesc $
tfn (eitherDecode tVal :: Either [Char] HealStartResp)) testCases
where
testCases =
[ ("Good", "Verify heal start response for erasure backend", isRight, hsrJSON),
("Missing Token", "Verify heal start response for erasure backend", isLeft, missingTokenJSON)
]
testCases = [ ("Good", "Verify heal start response for erasure backend", isRight, hsrJSON)
, ("Missing Token", "Verify heal start response for erasure backend", isLeft, missingTokenJSON)
]
hsrJSON = "{\"clientToken\":\"3a3aca49-77dd-4b78-bba7-0978f119b23e\",\"clientAddress\":\"127.0.0.1\",\"startTime\":\"2018-06-05T08:09:47.644394493Z\"}"
missingTokenJSON = "{\"clientAddress\":\"127.0.0.1\",\"startTime\":\"2018-06-05T08:09:47.644394493Z\"}"

View File

@ -15,26 +15,26 @@
--
module Network.Minio.JsonParser.Test
( jsonParserTests,
)
where
(
jsonParserTests
) where
import Lib.Prelude
import Network.Minio.Errors
import Network.Minio.JsonParser
import Test.Tasty
import Test.Tasty.HUnit
import UnliftIO (MonadUnliftIO)
import Test.Tasty
import Test.Tasty.HUnit
import UnliftIO (MonadUnliftIO)
import Lib.Prelude
import Network.Minio.Errors
import Network.Minio.JsonParser
jsonParserTests :: TestTree
jsonParserTests =
testGroup
"JSON Parser Tests"
[ testCase "Test parseErrResponseJSON" testParseErrResponseJSON
]
jsonParserTests = testGroup "JSON Parser Tests"
[ testCase "Test parseErrResponseJSON" testParseErrResponseJSON
]
tryValidationErr :: (MonadUnliftIO m) => m a -> m (Either MErrV a)
tryValidationErr = try
tryValidationErr act = try act
assertValidationErr :: MErrV -> Assertion
assertValidationErr e = assertFailure $ "Failed due to validation error => " ++ show e
@ -44,20 +44,21 @@ testParseErrResponseJSON = do
-- 1. Test parsing of an invalid error json.
parseResE <- tryValidationErr $ parseErrResponseJSON "ClearlyInvalidJSON"
when (isRight parseResE) $
assertFailure $
"Parsing should have failed => " ++ show parseResE
assertFailure $ "Parsing should have failed => " ++ show parseResE
forM_ cases $ \(jsondata, sErr) -> do
parseErr <- tryValidationErr $ parseErrResponseJSON jsondata
either assertValidationErr (@?= sErr) parseErr
where
cases =
[ -- 2. Test parsing of a valid error json.
( "{\"Code\":\"InvalidAccessKeyId\",\"Message\":\"The access key ID you provided does not exist in our records.\",\"Key\":\"\",\"BucketName\":\"\",\"Resource\":\"/minio/admin/v1/info\",\"RequestId\":\"3L137\",\"HostId\":\"3L137\"}",
ServiceErr "InvalidAccessKeyId" "The access key ID you provided does not exist in our records."
),
-- 3. Test parsing of a valid, empty Resource.
( "{\"Code\":\"SignatureDoesNotMatch\",\"Message\":\"The request signature we calculated does not match the signature you provided. Check your key and signing method.\",\"Key\":\"\",\"BucketName\":\"\",\"Resource\":\"/minio/admin/v1/info\",\"RequestId\":\"3L137\",\"HostId\":\"3L137\"}",
ServiceErr "SignatureDoesNotMatch" "The request signature we calculated does not match the signature you provided. Check your key and signing method."
)
cases = [
-- 2. Test parsing of a valid error json.
("{\"Code\":\"InvalidAccessKeyId\",\"Message\":\"The access key ID you provided does not exist in our records.\",\"Key\":\"\",\"BucketName\":\"\",\"Resource\":\"/minio/admin/v1/info\",\"RequestId\":\"3L137\",\"HostId\":\"3L137\"}",
ServiceErr "InvalidAccessKeyId" "The access key ID you provided does not exist in our records."
)
,
-- 3. Test parsing of a valid, empty Resource.
("{\"Code\":\"SignatureDoesNotMatch\",\"Message\":\"The request signature we calculated does not match the signature you provided. Check your key and signing method.\",\"Key\":\"\",\"BucketName\":\"\",\"Resource\":\"/minio/admin/v1/info\",\"RequestId\":\"3L137\",\"HostId\":\"3L137\"}",
ServiceErr "SignatureDoesNotMatch" "The request signature we calculated does not match the signature you provided. Check your key and signing method."
)
]

View File

@ -15,18 +15,18 @@
--
module Network.Minio.TestHelpers
( runTestNS,
)
where
( runTestNS
) where
import Network.Minio.Data
import Network.Minio.Data
newtype TestNS = TestNS {testNamespace :: Text}
import Lib.Prelude
newtype TestNS = TestNS { testNamespace :: Text }
instance HasSvcNamespace TestNS where
getSvcNamespace = testNamespace
runTestNS :: ReaderT TestNS m a -> m a
runTestNS =
flip runReaderT $
TestNS "http://s3.amazonaws.com/doc/2006-03-01/"
runTestNS = flip runReaderT $
TestNS "http://s3.amazonaws.com/doc/2006-03-01/"

View File

@ -15,31 +15,33 @@
--
module Network.Minio.Utils.Test
( limitedMapConcurrentlyTests,
)
where
(
limitedMapConcurrentlyTests
) where
import Network.Minio.Utils
import Test.Tasty
import Test.Tasty.HUnit
import Test.Tasty
import Test.Tasty.HUnit
import Lib.Prelude
import Network.Minio.Utils
limitedMapConcurrentlyTests :: TestTree
limitedMapConcurrentlyTests =
testGroup
"limitedMapConcurrently Tests"
[ testCase "Test with various thread counts" testLMC
]
limitedMapConcurrentlyTests = testGroup "limitedMapConcurrently Tests"
[ testCase "Test with various thread counts" testLMC
]
testLMC :: Assertion
testLMC = do
let maxNum = 50
-- test with thread count of 1 to 2*maxNum
forM_ [1 .. (2 * maxNum)] $ \threads -> do
res <- limitedMapConcurrently threads compute [1 .. maxNum]
forM_ [1..(2*maxNum)] $ \threads -> do
res <- limitedMapConcurrently threads compute [1..maxNum]
sum res @?= overallResultCheck maxNum
where
-- simple function to run in each thread
compute :: Int -> IO Int
compute n = return $ sum [1 .. n]
compute n = return $ sum [1..n]
-- function to check overall result
overallResultCheck n = sum $ map (\t -> (t * (t + 1)) `div` 2) [1 .. n]
overallResultCheck n = sum $ map (\t -> (t * (t+1)) `div` 2) [1..n]

View File

@ -13,33 +13,30 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE QuasiQuotes #-}
module Network.Minio.XmlGenerator.Test
( xmlGeneratorTests,
)
where
( xmlGeneratorTests
) where
import qualified Data.ByteString.Lazy as LBS
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.TestHelpers
import Network.Minio.XmlGenerator
import Network.Minio.XmlParser (parseNotification)
import Test.Tasty
import Test.Tasty.HUnit
import Text.RawString.QQ (r)
import Text.XML (def, parseLBS)
import Test.Tasty
import Test.Tasty.HUnit
import Text.RawString.QQ (r)
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.TestHelpers
import Network.Minio.XmlGenerator
import Network.Minio.XmlParser (parseNotification)
xmlGeneratorTests :: TestTree
xmlGeneratorTests =
testGroup
"XML Generator Tests"
[ testCase "Test mkCreateBucketConfig" testMkCreateBucketConfig,
testCase "Test mkCompleteMultipartUploadRequest" testMkCompleteMultipartUploadRequest,
testCase "Test mkPutNotificationRequest" testMkPutNotificationRequest,
testCase "Test mkSelectRequest" testMkSelectRequest
]
xmlGeneratorTests = testGroup "XML Generator Tests"
[ testCase "Test mkCreateBucketConfig" testMkCreateBucketConfig
, testCase "Test mkCompleteMultipartUploadRequest" testMkCompleteMultipartUploadRequest
, testCase "Test mkPutNotificationRequest" testMkPutNotificationRequest
, testCase "Test mkSelectRequest" testMkSelectRequest
]
testMkCreateBucketConfig :: Assertion
testMkCreateBucketConfig = do
@ -47,136 +44,100 @@ testMkCreateBucketConfig = do
assertEqual "CreateBucketConfiguration xml should match: " expected $
mkCreateBucketConfig ns "EU"
where
expected =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CreateBucketConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<LocationConstraint>EU</LocationConstraint>\
\</CreateBucketConfiguration>"
expected = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CreateBucketConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<LocationConstraint>EU</LocationConstraint>\
\</CreateBucketConfiguration>"
testMkCompleteMultipartUploadRequest :: Assertion
testMkCompleteMultipartUploadRequest =
assertEqual "completeMultipartUpload xml should match: " expected $
mkCompleteMultipartUploadRequest [(1, "abc")]
mkCompleteMultipartUploadRequest [(1, "abc")]
where
expected =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CompleteMultipartUpload>\
\<Part>\
\<PartNumber>1</PartNumber><ETag>abc</ETag>\
\</Part>\
\</CompleteMultipartUpload>"
expected = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CompleteMultipartUpload>\
\<Part>\
\<PartNumber>1</PartNumber><ETag>abc</ETag>\
\</Part>\
\</CompleteMultipartUpload>"
testMkPutNotificationRequest :: Assertion
testMkPutNotificationRequest =
forM_ cases $ \val -> do
let ns = "http://s3.amazonaws.com/doc/2006-03-01/"
result = fromStrictBS $ mkPutNotificationRequest ns val
result = toS $ mkPutNotificationRequest ns val
ntf <- runExceptT $ runTestNS $ parseNotification result
either
(\_ -> assertFailure "XML Parse Error!")
(@?= val)
ntf
either (\_ -> assertFailure "XML Parse Error!")
(@?= val) ntf
where
cases =
[ Notification
[]
[ NotificationConfig
"YjVkM2Y0YmUtNGI3NC00ZjQyLWEwNGItNDIyYWUxY2I0N2M4"
"arn:aws:sns:us-east-1:account-id:s3notificationtopic2"
[ReducedRedundancyLostObject, ObjectCreated]
defaultFilter
]
[],
Notification
[ NotificationConfig
"1"
"arn:aws:sqs:us-west-2:444455556666:s3notificationqueue"
[ObjectCreatedPut]
( Filter $
FilterKey $
FilterRules
[ FilterRule "prefix" "images/",
FilterRule "suffix" ".jpg"
]
),
NotificationConfig
""
"arn:aws:sqs:us-east-1:356671443308:s3notificationqueue"
[ObjectCreated]
defaultFilter
]
[ NotificationConfig
""
"arn:aws:sns:us-east-1:356671443308:s3notificationtopic2"
[ReducedRedundancyLostObject]
defaultFilter
]
[ NotificationConfig
"ObjectCreatedEvents"
"arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail"
[ObjectCreated]
defaultFilter
]
]
cases = [ Notification []
[ NotificationConfig
"YjVkM2Y0YmUtNGI3NC00ZjQyLWEwNGItNDIyYWUxY2I0N2M4"
"arn:aws:sns:us-east-1:account-id:s3notificationtopic2"
[ReducedRedundancyLostObject, ObjectCreated] defaultFilter
]
[]
, Notification
[ NotificationConfig
"1" "arn:aws:sqs:us-west-2:444455556666:s3notificationqueue"
[ObjectCreatedPut]
(Filter $ FilterKey $ FilterRules
[ FilterRule "prefix" "images/"
, FilterRule "suffix" ".jpg"])
, NotificationConfig
"" "arn:aws:sqs:us-east-1:356671443308:s3notificationqueue"
[ObjectCreated] defaultFilter
]
[ NotificationConfig
"" "arn:aws:sns:us-east-1:356671443308:s3notificationtopic2"
[ReducedRedundancyLostObject] defaultFilter
]
[ NotificationConfig
"ObjectCreatedEvents" "arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail"
[ObjectCreated] defaultFilter
]
]
testMkSelectRequest :: Assertion
testMkSelectRequest = mapM_ assertFn cases
where
assertFn (a, b) =
let generatedReqDoc = parseLBS def $ LBS.fromStrict $ mkSelectRequest a
expectedReqDoc = parseLBS def $ LBS.fromStrict b
in case (generatedReqDoc, expectedReqDoc) of
(Right genDoc, Right expDoc) -> assertEqual "selectRequest XML should match: " expDoc genDoc
(Left err, _) -> assertFailure $ "Generated selectRequest failed to parse as XML" ++ show err
(_, Left err) -> assertFailure $ "Expected selectRequest failed to parse as XML" ++ show err
cases =
[ ( SelectRequest
"Select * from S3Object"
SQL
( InputSerialization
(Just CompressionTypeGzip)
( InputFormatCSV $
fileHeaderInfo FileHeaderIgnore
<> recordDelimiter "\n"
<> fieldDelimiter ","
<> quoteCharacter "\""
<> quoteEscapeCharacter "\""
assertFn (a, b) = assertEqual "selectRequest XML should match: " b $ mkSelectRequest a
cases = [ ( SelectRequest "Select * from S3Object" SQL
(InputSerialization (Just CompressionTypeGzip)
(InputFormatCSV $ fileHeaderInfo FileHeaderIgnore
<> recordDelimiter "\n"
<> fieldDelimiter ","
<> quoteCharacter "\""
<> quoteEscapeCharacter "\""
))
(OutputSerializationCSV $ quoteFields QuoteFieldsAsNeeded
<> recordDelimiter "\n"
<> fieldDelimiter ","
<> quoteCharacter "\""
<> quoteEscapeCharacter "\""
)
)
( OutputSerializationCSV $
quoteFields QuoteFieldsAsNeeded
<> recordDelimiter "\n"
<> fieldDelimiter ","
<> quoteCharacter "\""
<> quoteEscapeCharacter "\""
)
(Just False),
[r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>GZIP</CompressionType><CSV><FieldDelimiter>,</FieldDelimiter><FileHeaderInfo>IGNORE</FileHeaderInfo><QuoteCharacter>"</QuoteCharacter><QuoteEscapeCharacter>"</QuoteEscapeCharacter><RecordDelimiter>
</RecordDelimiter></CSV></InputSerialization><OutputSerialization><CSV><FieldDelimiter>,</FieldDelimiter><QuoteCharacter>"</QuoteCharacter><QuoteEscapeCharacter>"</QuoteEscapeCharacter><QuoteFields>ASNEEDED</QuoteFields><RecordDelimiter>
</RecordDelimiter></CSV></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
),
( setRequestProgressEnabled False $
setInputCompressionType CompressionTypeGzip $
selectRequest
"Select * from S3Object"
documentJsonInput
(outputJSONFromRecordDelimiter "\n"),
[r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>GZIP</CompressionType><JSON><Type>DOCUMENT</Type></JSON></InputSerialization><OutputSerialization><JSON><RecordDelimiter>
(Just False)
, [r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>GZIP</CompressionType><CSV><QuoteCharacter>&#34;</QuoteCharacter><RecordDelimiter>
</RecordDelimiter><FileHeaderInfo>IGNORE</FileHeaderInfo><QuoteEscapeCharacter>&#34;</QuoteEscapeCharacter><FieldDelimiter>,</FieldDelimiter></CSV></InputSerialization><OutputSerialization><CSV><QuoteCharacter>&#34;</QuoteCharacter><QuoteFields>ASNEEDED</QuoteFields><RecordDelimiter>
</RecordDelimiter><QuoteEscapeCharacter>&#34;</QuoteEscapeCharacter><FieldDelimiter>,</FieldDelimiter></CSV></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
)
, ( setRequestProgressEnabled False $
setInputCompressionType CompressionTypeGzip $
selectRequest "Select * from S3Object" documentJsonInput
(outputJSONFromRecordDelimiter "\n")
, [r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>GZIP</CompressionType><JSON><Type>DOCUMENT</Type></JSON></InputSerialization><OutputSerialization><JSON><RecordDelimiter>
</RecordDelimiter></JSON></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
),
( setRequestProgressEnabled False $
setInputCompressionType CompressionTypeNone $
selectRequest
"Select * from S3Object"
defaultParquetInput
( outputCSVFromProps $
quoteFields QuoteFieldsAsNeeded
<> recordDelimiter "\n"
<> fieldDelimiter ","
<> quoteCharacter "\""
<> quoteEscapeCharacter "\""
),
[r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>NONE</CompressionType><Parquet/></InputSerialization><OutputSerialization><CSV><FieldDelimiter>,</FieldDelimiter><QuoteCharacter>"</QuoteCharacter><QuoteEscapeCharacter>"</QuoteEscapeCharacter><QuoteFields>ASNEEDED</QuoteFields><RecordDelimiter>
</RecordDelimiter></CSV></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
)
]
)
, ( setRequestProgressEnabled False $
setInputCompressionType CompressionTypeNone $
selectRequest "Select * from S3Object" defaultParquetInput
(outputCSVFromProps $ quoteFields QuoteFieldsAsNeeded
<> recordDelimiter "\n"
<> fieldDelimiter ","
<> quoteCharacter "\""
<> quoteEscapeCharacter "\"")
, [r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>NONE</CompressionType><Parquet/></InputSerialization><OutputSerialization><CSV><QuoteCharacter>&#34;</QuoteCharacter><QuoteFields>ASNEEDED</QuoteFields><RecordDelimiter>
</RecordDelimiter><QuoteEscapeCharacter>&#34;</QuoteEscapeCharacter><FieldDelimiter>,</FieldDelimiter></CSV></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
)
]

View File

@ -13,49 +13,48 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE QuasiQuotes #-}
module Network.Minio.XmlParser.Test
( xmlParserTests,
)
where
( xmlParserTests
) where
import qualified Data.HashMap.Strict as H
import Data.Time (fromGregorian)
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.TestHelpers
import Network.Minio.XmlParser
import Test.Tasty
import Test.Tasty.HUnit
import Text.RawString.QQ (r)
import UnliftIO (MonadUnliftIO)
import qualified Data.Map as Map
import Data.Time (fromGregorian)
import Test.Tasty
import Test.Tasty.HUnit
import Text.RawString.QQ (r)
import UnliftIO (MonadUnliftIO)
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.TestHelpers
import Network.Minio.XmlParser
xmlParserTests :: TestTree
xmlParserTests =
testGroup
"XML Parser Tests"
[ testCase "Test parseLocation" testParseLocation,
testCase "Test parseNewMultipartUpload" testParseNewMultipartUpload,
testCase "Test parseListObjectsResponse" testParseListObjectsResult,
testCase "Test parseListObjectsV1Response" testParseListObjectsV1Result,
testCase "Test parseListUploadsresponse" testParseListIncompleteUploads,
testCase "Test parseCompleteMultipartUploadResponse" testParseCompleteMultipartUploadResponse,
testCase "Test parseListPartsResponse" testParseListPartsResponse,
testCase "Test parseCopyObjectResponse" testParseCopyObjectResponse,
testCase "Test parseNotification" testParseNotification,
testCase "Test parseSelectProgress" testParseSelectProgress
]
xmlParserTests = testGroup "XML Parser Tests"
[ testCase "Test parseLocation" testParseLocation
, testCase "Test parseNewMultipartUpload" testParseNewMultipartUpload
, testCase "Test parseListObjectsResponse" testParseListObjectsResult
, testCase "Test parseListObjectsV1Response" testParseListObjectsV1Result
, testCase "Test parseListUploadsresponse" testParseListIncompleteUploads
, testCase "Test parseCompleteMultipartUploadResponse" testParseCompleteMultipartUploadResponse
, testCase "Test parseListPartsResponse" testParseListPartsResponse
, testCase "Test parseCopyObjectResponse" testParseCopyObjectResponse
, testCase "Test parseNotification" testParseNotification
, testCase "Test parseSelectProgress" testParseSelectProgress
]
tryValidationErr :: (MonadUnliftIO m) => m a -> m (Either MErrV a)
tryValidationErr = try
tryValidationErr act = try act
assertValidtionErr :: MErrV -> Assertion
assertValidtionErr e = assertFailure $ "Failed due to validation error => " ++ show e
eitherValidationErr :: Either MErrV a -> (a -> Assertion) -> Assertion
eitherValidationErr (Left e) _ = assertValidtionErr e
eitherValidationErr (Left e) _ = assertValidtionErr e
eitherValidationErr (Right a) f = f a
testParseLocation :: Assertion
@ -63,224 +62,224 @@ testParseLocation = do
-- 1. Test parsing of an invalid location constraint xml.
parseResE <- tryValidationErr $ parseLocation "ClearlyInvalidXml"
when (isRight parseResE) $
assertFailure $
"Parsing should have failed => " ++ show parseResE
assertFailure $ "Parsing should have failed => " ++ show parseResE
forM_ cases $ \(xmldata, expectedLocation) -> do
parseLocE <- tryValidationErr $ parseLocation xmldata
either assertValidtionErr (@?= expectedLocation) parseLocE
where
cases =
[ -- 2. Test parsing of a valid location xml.
( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<LocationConstraint xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">EU</LocationConstraint>",
"EU"
),
-- 3. Test parsing of a valid, empty location xml.
( "<LocationConstraint xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"/>",
"us-east-1"
)
cases = [
-- 2. Test parsing of a valid location xml.
("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<LocationConstraint xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">EU</LocationConstraint>",
"EU"
)
,
-- 3. Test parsing of a valid, empty location xml.
("<LocationConstraint xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"/>",
"us-east-1"
)
]
testParseNewMultipartUpload :: Assertion
testParseNewMultipartUpload = do
forM_ cases $ \(xmldata, expectedUploadId) -> do
parsedUploadIdE <- tryValidationErr $ runTestNS $ parseNewMultipartUpload xmldata
eitherValidationErr parsedUploadIdE (@?= expectedUploadId)
where
cases =
[ ( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<InitiateMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\ <Bucket>example-bucket</Bucket>\
\ <Key>example-object</Key>\
\ <UploadId>VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA</UploadId>\
\</InitiateMultipartUploadResult>",
"VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA"
),
( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<InitiateMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\ <Bucket>example-bucket</Bucket>\
\ <Key>example-object</Key>\
\ <UploadId>EXAMPLEJZ6e0YupT2h66iePQCc9IEbYbDUy4RTpMeoSMLPRp8Z5o1u8feSRonpvnWsKKG35tI2LB9VDPiCgTy.Gq2VxQLYjrue4Nq.NBdqI-</UploadId>\
\</InitiateMultipartUploadResult>",
"EXAMPLEJZ6e0YupT2h66iePQCc9IEbYbDUy4RTpMeoSMLPRp8Z5o1u8feSRonpvnWsKKG35tI2LB9VDPiCgTy.Gq2VxQLYjrue4Nq.NBdqI-"
)
cases = [
("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<InitiateMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\ <Bucket>example-bucket</Bucket>\
\ <Key>example-object</Key>\
\ <UploadId>VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA</UploadId>\
\</InitiateMultipartUploadResult>",
"VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA"
),
("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<InitiateMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\ <Bucket>example-bucket</Bucket>\
\ <Key>example-object</Key>\
\ <UploadId>EXAMPLEJZ6e0YupT2h66iePQCc9IEbYbDUy4RTpMeoSMLPRp8Z5o1u8feSRonpvnWsKKG35tI2LB9VDPiCgTy.Gq2VxQLYjrue4Nq.NBdqI-</UploadId>\
\</InitiateMultipartUploadResult>",
"EXAMPLEJZ6e0YupT2h66iePQCc9IEbYbDUy4RTpMeoSMLPRp8Z5o1u8feSRonpvnWsKKG35tI2LB9VDPiCgTy.Gq2VxQLYjrue4Nq.NBdqI-"
)
]
testParseListObjectsResult :: Assertion
testParseListObjectsResult = do
let xmldata =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<ListBucketResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Name>bucket</Name>\
\<Prefix/>\
\<NextContinuationToken>opaque</NextContinuationToken>\
\<KeyCount>1000</KeyCount>\
\<MaxKeys>1000</MaxKeys>\
\<IsTruncated>true</IsTruncated>\
\<Contents>\
\<Key>my-image.jpg</Key>\
\<LastModified>2009-10-12T17:50:30.000Z</LastModified>\
\<ETag>&quot;fba9dede5f27731c9771645a39863328&quot;</ETag>\
\<Size>434234</Size>\
\<StorageClass>STANDARD</StorageClass>\
\</Contents>\
\</ListBucketResult>"
expectedListResult = ListObjectsResult True (Just "opaque") [object1] []
object1 = ObjectInfo "my-image.jpg" modifiedTime1 "\"fba9dede5f27731c9771645a39863328\"" 434234 H.empty H.empty
modifiedTime1 = flip UTCTime 64230 $ fromGregorian 2009 10 12
let
xmldata = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<ListBucketResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Name>bucket</Name>\
\<Prefix/>\
\<NextContinuationToken>opaque</NextContinuationToken>\
\<KeyCount>1000</KeyCount>\
\<MaxKeys>1000</MaxKeys>\
\<IsTruncated>true</IsTruncated>\
\<Contents>\
\<Key>my-image.jpg</Key>\
\<LastModified>2009-10-12T17:50:30.000Z</LastModified>\
\<ETag>&quot;fba9dede5f27731c9771645a39863328&quot;</ETag>\
\<Size>434234</Size>\
\<StorageClass>STANDARD</StorageClass>\
\</Contents>\
\</ListBucketResult>"
expectedListResult = ListObjectsResult True (Just "opaque") [object1] []
object1 = ObjectInfo "my-image.jpg" modifiedTime1 "\"fba9dede5f27731c9771645a39863328\"" 434234 Map.empty
modifiedTime1 = flip UTCTime 64230 $ fromGregorian 2009 10 12
parsedListObjectsResult <- tryValidationErr $ runTestNS $ parseListObjectsResponse xmldata
eitherValidationErr parsedListObjectsResult (@?= expectedListResult)
testParseListObjectsV1Result :: Assertion
testParseListObjectsV1Result = do
let xmldata =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<ListBucketResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Name>bucket</Name>\
\<Prefix/>\
\<NextMarker>my-image1.jpg</NextMarker>\
\<KeyCount>1000</KeyCount>\
\<MaxKeys>1000</MaxKeys>\
\<IsTruncated>true</IsTruncated>\
\<Contents>\
\<Key>my-image.jpg</Key>\
\<LastModified>2009-10-12T17:50:30.000Z</LastModified>\
\<ETag>&quot;fba9dede5f27731c9771645a39863328&quot;</ETag>\
\<Size>434234</Size>\
\<StorageClass>STANDARD</StorageClass>\
\</Contents>\
\</ListBucketResult>"
expectedListResult = ListObjectsV1Result True (Just "my-image1.jpg") [object1] []
object1 = ObjectInfo "my-image.jpg" modifiedTime1 "\"fba9dede5f27731c9771645a39863328\"" 434234 H.empty H.empty
modifiedTime1 = flip UTCTime 64230 $ fromGregorian 2009 10 12
let
xmldata = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<ListBucketResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Name>bucket</Name>\
\<Prefix/>\
\<NextMarker>my-image1.jpg</NextMarker>\
\<KeyCount>1000</KeyCount>\
\<MaxKeys>1000</MaxKeys>\
\<IsTruncated>true</IsTruncated>\
\<Contents>\
\<Key>my-image.jpg</Key>\
\<LastModified>2009-10-12T17:50:30.000Z</LastModified>\
\<ETag>&quot;fba9dede5f27731c9771645a39863328&quot;</ETag>\
\<Size>434234</Size>\
\<StorageClass>STANDARD</StorageClass>\
\</Contents>\
\</ListBucketResult>"
expectedListResult = ListObjectsV1Result True (Just "my-image1.jpg") [object1] []
object1 = ObjectInfo "my-image.jpg" modifiedTime1 "\"fba9dede5f27731c9771645a39863328\"" 434234 Map.empty
modifiedTime1 = flip UTCTime 64230 $ fromGregorian 2009 10 12
parsedListObjectsV1Result <- tryValidationErr $ runTestNS $ parseListObjectsV1Response xmldata
eitherValidationErr parsedListObjectsV1Result (@?= expectedListResult)
testParseListIncompleteUploads :: Assertion
testParseListIncompleteUploads = do
let xmldata =
"<ListMultipartUploadsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Bucket>example-bucket</Bucket>\
\<KeyMarker/>\
\<UploadIdMarker/>\
\<NextKeyMarker>sample.jpg</NextKeyMarker>\
\<NextUploadIdMarker>Xgw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1W99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--</NextUploadIdMarker>\
\<Delimiter>/</Delimiter>\
\<Prefix/>\
\<MaxUploads>1000</MaxUploads>\
\<IsTruncated>false</IsTruncated>\
\<Upload>\
\<Key>sample.jpg</Key>\
\<UploadId>Agw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1N99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--</UploadId>\
\<Initiator>\
\<ID>314133b66967d86f031c7249d1d9a80249109428335cd0ef1cdc487b4566cb1b</ID>\
\<DisplayName>s3-nickname</DisplayName>\
\</Initiator>\
\<Owner>\
\<ID>314133b66967d86f031c7249d1d9a80249109428335cd0ef1cdc487b4566cb1b</ID>\
\<DisplayName>s3-nickname</DisplayName>\
\</Owner>\
\<StorageClass>STANDARD</StorageClass>\
\<Initiated>2010-11-26T19:24:17.000Z</Initiated>\
\</Upload>\
\<CommonPrefixes>\
\<Prefix>photos/</Prefix>\
\</CommonPrefixes>\
\<CommonPrefixes>\
\<Prefix>videos/</Prefix>\
\</CommonPrefixes>\
\</ListMultipartUploadsResult>"
expectedListResult = ListUploadsResult False (Just "sample.jpg") (Just "Xgw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1W99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--") uploads prefixes
uploads = [("sample.jpg", "Agw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1N99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--", initTime)]
initTime = UTCTime (fromGregorian 2010 11 26) 69857
prefixes = ["photos/", "videos/"]
let
xmldata = "<ListMultipartUploadsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Bucket>example-bucket</Bucket>\
\<KeyMarker/>\
\<UploadIdMarker/>\
\<NextKeyMarker>sample.jpg</NextKeyMarker>\
\<NextUploadIdMarker>Xgw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1W99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--</NextUploadIdMarker>\
\<Delimiter>/</Delimiter>\
\<Prefix/>\
\<MaxUploads>1000</MaxUploads>\
\<IsTruncated>false</IsTruncated>\
\<Upload>\
\<Key>sample.jpg</Key>\
\<UploadId>Agw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1N99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--</UploadId>\
\<Initiator>\
\<ID>314133b66967d86f031c7249d1d9a80249109428335cd0ef1cdc487b4566cb1b</ID>\
\<DisplayName>s3-nickname</DisplayName>\
\</Initiator>\
\<Owner>\
\<ID>314133b66967d86f031c7249d1d9a80249109428335cd0ef1cdc487b4566cb1b</ID>\
\<DisplayName>s3-nickname</DisplayName>\
\</Owner>\
\<StorageClass>STANDARD</StorageClass>\
\<Initiated>2010-11-26T19:24:17.000Z</Initiated>\
\</Upload>\
\<CommonPrefixes>\
\<Prefix>photos/</Prefix>\
\</CommonPrefixes>\
\<CommonPrefixes>\
\<Prefix>videos/</Prefix>\
\</CommonPrefixes>\
\</ListMultipartUploadsResult>"
expectedListResult = ListUploadsResult False (Just "sample.jpg") (Just "Xgw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1W99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--") uploads prefixes
uploads = [("sample.jpg", "Agw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1N99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--", initTime)]
initTime = UTCTime (fromGregorian 2010 11 26) 69857
prefixes = ["photos/", "videos/"]
parsedListUploadsResult <- tryValidationErr $ runTestNS $ parseListUploadsResponse xmldata
eitherValidationErr parsedListUploadsResult (@?= expectedListResult)
testParseCompleteMultipartUploadResponse :: Assertion
testParseCompleteMultipartUploadResponse = do
let xmldata =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CompleteMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Location>http://Example-Bucket.s3.amazonaws.com/Example-Object</Location>\
\<Bucket>Example-Bucket</Bucket>\
\<Key>Example-Object</Key>\
\<ETag>\"3858f62230ac3c915f300c664312c11f-9\"</ETag>\
\</CompleteMultipartUploadResult>"
expectedETag = "\"3858f62230ac3c915f300c664312c11f-9\""
let
xmldata = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CompleteMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Location>http://Example-Bucket.s3.amazonaws.com/Example-Object</Location>\
\<Bucket>Example-Bucket</Bucket>\
\<Key>Example-Object</Key>\
\<ETag>\"3858f62230ac3c915f300c664312c11f-9\"</ETag>\
\</CompleteMultipartUploadResult>"
expectedETag = "\"3858f62230ac3c915f300c664312c11f-9\""
parsedETagE <- runExceptT $ runTestNS $ parseCompleteMultipartUploadResponse xmldata
eitherValidationErr parsedETagE (@?= expectedETag)
testParseListPartsResponse :: Assertion
testParseListPartsResponse = do
let xmldata =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<ListPartsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Bucket>example-bucket</Bucket>\
\<Key>example-object</Key>\
\<UploadId>XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA</UploadId>\
\<Initiator>\
\<ID>arn:aws:iam::111122223333:user/some-user-11116a31-17b5-4fb7-9df5-b288870f11xx</ID>\
\<DisplayName>umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx</DisplayName>\
\</Initiator>\
\<Owner>\
\<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>\
\<DisplayName>someName</DisplayName>\
\</Owner>\
\<StorageClass>STANDARD</StorageClass>\
\<PartNumberMarker>1</PartNumberMarker>\
\<NextPartNumberMarker>3</NextPartNumberMarker>\
\<MaxParts>2</MaxParts>\
\<IsTruncated>true</IsTruncated>\
\<Part>\
\<PartNumber>2</PartNumber>\
\<LastModified>2010-11-10T20:48:34.000Z</LastModified>\
\<ETag>\"7778aef83f66abc1fa1e8477f296d394\"</ETag>\
\<Size>10485760</Size>\
\</Part>\
\<Part>\
\<PartNumber>3</PartNumber>\
\<LastModified>2010-11-10T20:48:33.000Z</LastModified>\
\<ETag>\"aaaa18db4cc2f85cedef654fccc4a4x8\"</ETag>\
\<Size>10485760</Size>\
\</Part>\
\</ListPartsResult>"
expectedListResult = ListPartsResult True (Just 3) [part1, part2]
part1 = ObjectPartInfo 2 "\"7778aef83f66abc1fa1e8477f296d394\"" 10485760 modifiedTime1
modifiedTime1 = flip UTCTime 74914 $ fromGregorian 2010 11 10
part2 = ObjectPartInfo 3 "\"aaaa18db4cc2f85cedef654fccc4a4x8\"" 10485760 modifiedTime2
modifiedTime2 = flip UTCTime 74913 $ fromGregorian 2010 11 10
let
xmldata = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<ListPartsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Bucket>example-bucket</Bucket>\
\<Key>example-object</Key>\
\<UploadId>XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA</UploadId>\
\<Initiator>\
\<ID>arn:aws:iam::111122223333:user/some-user-11116a31-17b5-4fb7-9df5-b288870f11xx</ID>\
\<DisplayName>umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx</DisplayName>\
\</Initiator>\
\<Owner>\
\<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>\
\<DisplayName>someName</DisplayName>\
\</Owner>\
\<StorageClass>STANDARD</StorageClass>\
\<PartNumberMarker>1</PartNumberMarker>\
\<NextPartNumberMarker>3</NextPartNumberMarker>\
\<MaxParts>2</MaxParts>\
\<IsTruncated>true</IsTruncated>\
\<Part>\
\<PartNumber>2</PartNumber>\
\<LastModified>2010-11-10T20:48:34.000Z</LastModified>\
\<ETag>\"7778aef83f66abc1fa1e8477f296d394\"</ETag>\
\<Size>10485760</Size>\
\</Part>\
\<Part>\
\<PartNumber>3</PartNumber>\
\<LastModified>2010-11-10T20:48:33.000Z</LastModified>\
\<ETag>\"aaaa18db4cc2f85cedef654fccc4a4x8\"</ETag>\
\<Size>10485760</Size>\
\</Part>\
\</ListPartsResult>"
expectedListResult = ListPartsResult True (Just 3) [part1, part2]
part1 = ObjectPartInfo 2 "\"7778aef83f66abc1fa1e8477f296d394\"" 10485760 modifiedTime1
modifiedTime1 = flip UTCTime 74914 $ fromGregorian 2010 11 10
part2 = ObjectPartInfo 3 "\"aaaa18db4cc2f85cedef654fccc4a4x8\"" 10485760 modifiedTime2
modifiedTime2 = flip UTCTime 74913 $ fromGregorian 2010 11 10
parsedListPartsResult <- runExceptT $ runTestNS $ parseListPartsResponse xmldata
eitherValidationErr parsedListPartsResult (@?= expectedListResult)
testParseCopyObjectResponse :: Assertion
testParseCopyObjectResponse = do
let cases =
[ ( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CopyObjectResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<LastModified>2009-10-28T22:32:00.000Z</LastModified>\
\<ETag>\"9b2cf535f27731c974343645a3985328\"</ETag>\
\</CopyObjectResult>",
( "\"9b2cf535f27731c974343645a3985328\"",
UTCTime (fromGregorian 2009 10 28) 81120
)
),
( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CopyPartResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<LastModified>2009-10-28T22:32:00.000Z</LastModified>\
\<ETag>\"9b2cf535f27731c974343645a3985328\"</ETag>\
\</CopyPartResult>",
( "\"9b2cf535f27731c974343645a3985328\"",
UTCTime (fromGregorian 2009 10 28) 81120
)
)
]
let
cases = [ ("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CopyObjectResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<LastModified>2009-10-28T22:32:00.000Z</LastModified>\
\<ETag>\"9b2cf535f27731c974343645a3985328\"</ETag>\
\</CopyObjectResult>",
("\"9b2cf535f27731c974343645a3985328\"",
UTCTime (fromGregorian 2009 10 28) 81120))
, ("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CopyPartResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<LastModified>2009-10-28T22:32:00.000Z</LastModified>\
\<ETag>\"9b2cf535f27731c974343645a3985328\"</ETag>\
\</CopyPartResult>",
("\"9b2cf535f27731c974343645a3985328\"",
UTCTime (fromGregorian 2009 10 28) 81120))]
forM_ cases $ \(xmldata, (etag, modTime)) -> do
parseResult <- runExceptT $ runTestNS $ parseCopyObjectResponse xmldata
@ -288,89 +287,73 @@ testParseCopyObjectResponse = do
testParseNotification :: Assertion
testParseNotification = do
let cases =
[ ( "<NotificationConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\ <TopicConfiguration>\
\ <Id>YjVkM2Y0YmUtNGI3NC00ZjQyLWEwNGItNDIyYWUxY2I0N2M4</Id>\
\ <Topic>arn:aws:sns:us-east-1:account-id:s3notificationtopic2</Topic>\
\ <Event>s3:ReducedRedundancyLostObject</Event>\
\ <Event>s3:ObjectCreated:*</Event>\
\ </TopicConfiguration>\
\</NotificationConfiguration>",
Notification
[]
[ NotificationConfig
let
cases = [ ("<NotificationConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\ <TopicConfiguration>\
\ <Id>YjVkM2Y0YmUtNGI3NC00ZjQyLWEwNGItNDIyYWUxY2I0N2M4</Id>\
\ <Topic>arn:aws:sns:us-east-1:account-id:s3notificationtopic2</Topic>\
\ <Event>s3:ReducedRedundancyLostObject</Event>\
\ <Event>s3:ObjectCreated:*</Event>\
\ </TopicConfiguration>\
\</NotificationConfiguration>",
Notification []
[ NotificationConfig
"YjVkM2Y0YmUtNGI3NC00ZjQyLWEwNGItNDIyYWUxY2I0N2M4"
"arn:aws:sns:us-east-1:account-id:s3notificationtopic2"
[ReducedRedundancyLostObject, ObjectCreated]
defaultFilter
]
[]
),
( "<NotificationConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\ <CloudFunctionConfiguration>\
\ <Id>ObjectCreatedEvents</Id>\
\ <CloudFunction>arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail</CloudFunction>\
\ <Event>s3:ObjectCreated:*</Event>\
\ </CloudFunctionConfiguration>\
\ <QueueConfiguration>\
\ <Id>1</Id>\
\ <Filter>\
\ <S3Key>\
\ <FilterRule>\
\ <Name>prefix</Name>\
\ <Value>images/</Value>\
\ </FilterRule>\
\ <FilterRule>\
\ <Name>suffix</Name>\
\ <Value>.jpg</Value>\
\ </FilterRule>\
\ </S3Key>\
\ </Filter>\
\ <Queue>arn:aws:sqs:us-west-2:444455556666:s3notificationqueue</Queue>\
\ <Event>s3:ObjectCreated:Put</Event>\
\ </QueueConfiguration>\
\ <TopicConfiguration>\
\ <Topic>arn:aws:sns:us-east-1:356671443308:s3notificationtopic2</Topic>\
\ <Event>s3:ReducedRedundancyLostObject</Event>\
\ </TopicConfiguration>\
\ <QueueConfiguration>\
\ <Queue>arn:aws:sqs:us-east-1:356671443308:s3notificationqueue</Queue>\
\ <Event>s3:ObjectCreated:*</Event>\
\ </QueueConfiguration>)\
\</NotificationConfiguration>",
Notification
[ NotificationConfig
"1"
"arn:aws:sqs:us-west-2:444455556666:s3notificationqueue"
[ObjectCreatedPut]
( Filter $
FilterKey $
FilterRules
[ FilterRule "prefix" "images/",
FilterRule "suffix" ".jpg"
]
),
NotificationConfig
""
"arn:aws:sqs:us-east-1:356671443308:s3notificationqueue"
[ObjectCreated]
defaultFilter
]
[ NotificationConfig
""
"arn:aws:sns:us-east-1:356671443308:s3notificationtopic2"
[ReducedRedundancyLostObject]
defaultFilter
]
[ NotificationConfig
"ObjectCreatedEvents"
"arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail"
[ObjectCreated]
defaultFilter
]
)
]
[ReducedRedundancyLostObject, ObjectCreated] defaultFilter
]
[])
, ("<NotificationConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\ <CloudFunctionConfiguration>\
\ <Id>ObjectCreatedEvents</Id>\
\ <CloudFunction>arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail</CloudFunction>\
\ <Event>s3:ObjectCreated:*</Event>\
\ </CloudFunctionConfiguration>\
\ <QueueConfiguration>\
\ <Id>1</Id>\
\ <Filter>\
\ <S3Key>\
\ <FilterRule>\
\ <Name>prefix</Name>\
\ <Value>images/</Value>\
\ </FilterRule>\
\ <FilterRule>\
\ <Name>suffix</Name>\
\ <Value>.jpg</Value>\
\ </FilterRule>\
\ </S3Key>\
\ </Filter>\
\ <Queue>arn:aws:sqs:us-west-2:444455556666:s3notificationqueue</Queue>\
\ <Event>s3:ObjectCreated:Put</Event>\
\ </QueueConfiguration>\
\ <TopicConfiguration>\
\ <Topic>arn:aws:sns:us-east-1:356671443308:s3notificationtopic2</Topic>\
\ <Event>s3:ReducedRedundancyLostObject</Event>\
\ </TopicConfiguration>\
\ <QueueConfiguration>\
\ <Queue>arn:aws:sqs:us-east-1:356671443308:s3notificationqueue</Queue>\
\ <Event>s3:ObjectCreated:*</Event>\
\ </QueueConfiguration>)\
\</NotificationConfiguration>",
Notification [ NotificationConfig
"1" "arn:aws:sqs:us-west-2:444455556666:s3notificationqueue"
[ObjectCreatedPut]
(Filter $ FilterKey $ FilterRules
[FilterRule "prefix" "images/",
FilterRule "suffix" ".jpg"])
, NotificationConfig
"" "arn:aws:sqs:us-east-1:356671443308:s3notificationqueue"
[ObjectCreated] defaultFilter
]
[ NotificationConfig
"" "arn:aws:sns:us-east-1:356671443308:s3notificationtopic2"
[ReducedRedundancyLostObject] defaultFilter
]
[ NotificationConfig
"ObjectCreatedEvents" "arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail"
[ObjectCreated] defaultFilter
])
]
forM_ cases $ \(xmldata, val) -> do
result <- runExceptT $ runTestNS $ parseNotification xmldata
@ -379,25 +362,20 @@ testParseNotification = do
-- | Tests parsing of both progress and stats
testParseSelectProgress :: Assertion
testParseSelectProgress = do
let cases =
[ ( [r|<?xml version="1.0" encoding="UTF-8"?>
let cases = [ ([r|<?xml version="1.0" encoding="UTF-8"?>
<Progress>
<BytesScanned>512</BytesScanned>
<BytesProcessed>1024</BytesProcessed>
<BytesReturned>1024</BytesReturned>
</Progress>|],
Progress 512 1024 1024
),
( [r|<?xml version="1.0" encoding="UTF-8"?>
</Progress>|] , Progress 512 1024 1024)
, ([r|<?xml version="1.0" encoding="UTF-8"?>
<Stats>
<BytesScanned>512</BytesScanned>
<BytesProcessed>1024</BytesProcessed>
<BytesReturned>1024</BytesReturned>
</Stats>|],
Progress 512 1024 1024
)
]
</Stats>|], Progress 512 1024 1024)
]
forM_ cases $ \(xmldata, progress) -> do
result <- runExceptT $ parseSelectProgress xmldata
eitherValidationErr result (@?= progress)
forM_ cases $ \(xmldata, progress) -> do
result <- runExceptT $ parseSelectProgress xmldata
eitherValidationErr result (@?= progress)

View File

@ -1,5 +1,5 @@
--
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
@ -14,17 +14,21 @@
-- limitations under the License.
--
import qualified Data.ByteString as B
import qualified Data.List as L
import Lib.Prelude
import Network.Minio.API.Test
import Network.Minio.CopyObject
import Network.Minio.Data
import Network.Minio.Utils.Test
import Network.Minio.XmlGenerator.Test
import Network.Minio.XmlParser.Test
import Test.Tasty
import Test.Tasty.QuickCheck as QC
import Test.Tasty
import Test.Tasty.QuickCheck as QC
import qualified Data.ByteString as B
import qualified Data.List as L
import Lib.Prelude
import Network.Minio.API.Test
import Network.Minio.CopyObject
import Network.Minio.Data
import Network.Minio.PutObject
import Network.Minio.Utils.Test
import Network.Minio.XmlGenerator.Test
import Network.Minio.XmlParser.Test
main :: IO ()
main = defaultMain tests
@ -47,87 +51,82 @@ properties = testGroup "Properties" [qcProps] -- [scProps]
-- ]
qcProps :: TestTree
qcProps =
testGroup
"(checked by QuickCheck)"
[ QC.testProperty "selectPartSizes:" $
\n ->
let (pns, offs, sizes) = L.unzip3 (selectPartSizes n)
qcProps = testGroup "(checked by QuickCheck)"
[ QC.testProperty "selectPartSizes:" $
\n -> let (pns, offs, sizes) = L.unzip3 (selectPartSizes n)
-- check that pns increments from 1.
isPNumsAscendingFrom1 = all (uncurry (==)) $ zip pns [1 ..]
consPairs [] = []
consPairs [_] = []
consPairs (a : (b : c)) = (a, b) : consPairs (b : c)
isPNumsAscendingFrom1 = all (\(a, b) -> a == b) $ zip pns [1..]
consPairs [] = []
consPairs [_] = []
consPairs (a:(b:c)) = (a, b):(consPairs (b:c))
-- check `offs` is monotonically increasing.
isOffsetsAsc = all (uncurry (<)) $ consPairs offs
isOffsetsAsc = all (\(a, b) -> a < b) $ consPairs offs
-- check sizes sums to n.
isSumSizeOk = sum sizes == n
-- check sizes are constant except last
isSizesConstantExceptLast =
all (uncurry (==)) (consPairs $ L.init sizes)
all (\(a, b) -> a == b) (consPairs $ L.init sizes)
-- check each part except last is at least minPartSize;
-- last part may be 0 only if it is the only part.
nparts = length sizes
isMinPartSizeOk =
if
| nparts > 1 -> -- last part can be smaller but > 0
all (>= minPartSize) (take (nparts - 1) sizes)
&& all (> 0) (drop (nparts - 1) sizes)
| nparts == 1 -> -- size may be 0 here.
maybe True (\x -> x >= 0 && x <= minPartSize) $
listToMaybe sizes
| otherwise -> False
in n < 0
|| ( isPNumsAscendingFrom1
&& isOffsetsAsc
&& isSumSizeOk
&& isSizesConstantExceptLast
&& isMinPartSizeOk
),
QC.testProperty "selectCopyRanges:" $
\(start, end) ->
let (_, pairs) = L.unzip (selectCopyRanges (start, end))
-- is last part's snd offset end?
isLastPartOk = maybe False ((end ==) . snd) $ lastMay pairs
-- is first part's fst offset start
isFirstPartOk = maybe False ((start ==) . fst) $ listToMaybe pairs
-- each pair is >=64MiB except last, and all those parts
-- have same size.
initSizes = maybe [] (map (\(a, b) -> b - a + 1) . init) (nonEmpty pairs)
isPartSizesOk =
all (>= minPartSize) initSizes
&& maybe
True
(\k -> all (== k) initSizes)
(listToMaybe initSizes)
-- returned offsets are contiguous.
fsts = drop 1 $ map fst pairs
snds = take (length pairs - 1) $ map snd pairs
isContParts =
length fsts == length snds
&& all (\(a, b) -> a == b + 1) (zip fsts snds)
in start < 0
|| start > end
|| (isLastPartOk && isFirstPartOk && isPartSizesOk && isContParts),
QC.testProperty "mkSSECKey:" $
\w8s ->
let bs = B.pack w8s
r = mkSSECKey bs
in case r of
Just _ -> B.length bs == 32
if | nparts > 1 -> -- last part can be smaller but > 0
all (>= minPartSize) (take (nparts - 1) sizes) &&
all (\s -> s > 0) (drop (nparts - 1) sizes)
| nparts == 1 -> -- size may be 0 here.
maybe True (\x -> x >= 0 && x <= minPartSize) $
headMay sizes
| otherwise -> False
in n < 0 ||
(isPNumsAscendingFrom1 && isOffsetsAsc && isSumSizeOk &&
isSizesConstantExceptLast && isMinPartSizeOk)
, QC.testProperty "selectCopyRanges:" $
\(start, end) ->
let (_, pairs) = L.unzip (selectCopyRanges (start, end))
-- is last part's snd offset end?
isLastPartOk = maybe False ((end ==) . snd) $ lastMay pairs
-- is first part's fst offset start
isFirstPartOk = maybe False ((start ==) . fst) $ headMay pairs
-- each pair is >=64MiB except last, and all those parts
-- have same size.
initSizes = maybe [] (map (\(a, b) -> b - a + 1)) $ initMay pairs
isPartSizesOk = all (>= minPartSize) initSizes &&
maybe True (\k -> all (== k) initSizes)
(headMay initSizes)
-- returned offsets are contiguous.
fsts = drop 1 $ map fst pairs
snds = take (length pairs - 1) $ map snd pairs
isContParts = length fsts == length snds &&
and (map (\(a, b) -> a == b + 1) $ zip fsts snds)
in start < 0 || start > end ||
(isLastPartOk && isFirstPartOk && isPartSizesOk && isContParts)
, QC.testProperty "mkSSECKey:" $
\w8s -> let bs = B.pack w8s
r = mkSSECKey bs
in case r of
Just _ -> B.length bs == 32
Nothing -> B.length bs /= 32
]
]
unitTests :: TestTree
unitTests =
testGroup
"Unit tests"
[ xmlGeneratorTests,
xmlParserTests,
bucketNameValidityTests,
objectNameValidityTests,
parseServerInfoJSONTest,
parseHealStatusTest,
parseHealStartRespTest,
limitedMapConcurrentlyTests
]
unitTests = testGroup "Unit tests" [ xmlGeneratorTests, xmlParserTests
, bucketNameValidityTests
, objectNameValidityTests
, parseServerInfoJSONTest
, parseHealStatusTest
, parseHealStartRespTest
, limitedMapConcurrentlyTests
]

View File

@ -1,28 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC3G9IiC+adjf0p
i/2KYc+4dizeuzUFN7wraSdhiOMdQgCnu9Dc3t2YEsQhNdrARjOTyXd36KeM3TwI
rPJ61dRGQSuN12l+mzngFJQjE0sysZHUJOLQC3rVvIrHSQ57utPg8ifxt/SunlPY
fhcUcq03onMGq44yOfE6mIhoe0Y9wcPQ3RjjNNS44bgmXiXwa+Do0h2hEn6/essq
5KjHL8WW2vGg7G9edpYdxINA/A2fdLtr8BwPNrZhOx84eee2XcUNdBuTtUUxE+0L
9yRqItqddriRxJFwOXb5OPW8xx2WGaV2a0wbE4gB2PTwwDvfo72mo9HXHZUHM1A8
4TD/RXMbAgMBAAECggEBAJ7r1oUWLyGvinn0tijUm6RNbMQjVvEgXoCO008jr3pF
PqxVpgEMrOa/4tmwFBus0jcCNF4t3r2zhddBw3I5A/O1vEdvHnBz6NdDBQ8sP6fP
1fF50iEe1Y2MBibQkXFxxVMG2QRB1Gt5nuvXA9ELdqtCovK3EsMk5ukkWb/UvjH5
8hcmQsaSqvzFEF4wJSY2mkeGSGIJTphPhhuA22xbhaBMInQyhZu8EHsn0h6s/Wgy
C4Cp2+4qZTKaaf6x3/ZjJ8CuKiSX+ZsJKjOEv8sqx7j/Y7QFOmJPewInKDhwazr/
xIK+N0KXPbUzeSEz6ZvExNDTxtR5ZlQP2UrRDg28yQECgYEA4Is1O2BvKVzNFOkj
bTVz25a/bb0Xrcfgi0Y9rdfLzlNdItFjAkxLTVRSW2Hv9ICl0RDDAG+wTlktXRdh
rfvDjwG2CvLQo1VEdMWTTkKVg03SwMEy2hFiWV69lENFGSaY8Y6unZDbia5HQinA
EgSS4sCojS+a2jtzG5FVVHJDKlkCgYEA0MKhMhD4SUhr2y1idPBrmLxuW5mVozuW
8bYaBeSzmfS0BRsN4fP9JGODPBPDdNbfGfGC9ezWLgD/lmCgjIEyBOq8EmqWSsiS
Kihds1+Z7hXtbzGsFGAFJJTIh7blBCsK5QFuyuih2UG0fL9z6K/dy+UUJkzrYqph
vSfKixyM8pMCgYEAmUPLsNyw4325aeV8TeWnUCJERaZFDFQa21W1cfyS2yEhuEtN
llr3JzBACqn9vFk3VU1onNqfb8sE4L696KCpKeqUFEMK0AG6eS4Gzus53Gb5TKJS
kHA/PhshsZp9Bp7G1FJ8s4YVo5N2hh2zQVkn3Wh9Y+kzfHQJrK51nO9lEvkCgYBi
BuKWle1gzAcJdnhDHRoJMIJJtQbVDYhFnBMALXJAmu1lcFzGe0GlMq1PKqCfXr6I
eiXawQmZtJJP1LPPBmOsd2U06KQGHcS00xucvQmVCOrjSdnZ/3SqxsqbH8DOgj+t
ZUzXLwHA+N99rJEK9Hob4kfh7ECjpgobPnIXfKKazQKBgQChAuiXHtf/Qq18hY3u
x48zFWjGgfd6GpOBZYkXOwGdCJgnYjZbE26LZEnYbwPh8ZUA2vp7mgHRJkD5e3Fj
ERuJLCw86WqyYZmLEuBciYGjCZqR5nbavfwsziWD00jeNruds2ZwKxRfFm4V7o2S
WLd/RUatd2Uu9f3B2J78OUdnxg==
-----END PRIVATE KEY-----

View File

@ -1,19 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDCzCCAfOgAwIBAgIUaIUOMI78LCu+r1zl0mmFHK8n5/AwDQYJKoZIhvcNAQEL
BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTE5MTAyNDE5NTMxOVoYDzIxMTkw
OTMwMTk1MzE5WjAUMRIwEAYDVQQDDAlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB
AQUAA4IBDwAwggEKAoIBAQC3G9IiC+adjf0pi/2KYc+4dizeuzUFN7wraSdhiOMd
QgCnu9Dc3t2YEsQhNdrARjOTyXd36KeM3TwIrPJ61dRGQSuN12l+mzngFJQjE0sy
sZHUJOLQC3rVvIrHSQ57utPg8ifxt/SunlPYfhcUcq03onMGq44yOfE6mIhoe0Y9
wcPQ3RjjNNS44bgmXiXwa+Do0h2hEn6/essq5KjHL8WW2vGg7G9edpYdxINA/A2f
dLtr8BwPNrZhOx84eee2XcUNdBuTtUUxE+0L9yRqItqddriRxJFwOXb5OPW8xx2W
GaV2a0wbE4gB2PTwwDvfo72mo9HXHZUHM1A84TD/RXMbAgMBAAGjUzBRMB0GA1Ud
DgQWBBSEWXQ2JRD+OK7/KTmlD+OW16pGmzAfBgNVHSMEGDAWgBSEWXQ2JRD+OK7/
KTmlD+OW16pGmzAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCF
0zYRaabB3X0jzGI9/Lr3Phrb90GvoL1DFLRuiOuTlDkz0vrm/HrZskwHCgMNrkCj
OTD9Vpas4D1QZBbQbRzfnf3OOoG4bgmcCwLFZl3dy27yIDAhrmbUP++g9l1Jmy4v
vBR/M4lt2scQ8LcZYEPqhEaE5EzFQEjtaxDcKdWDNKY9W1NUzSIABhF9eHiAUNdH
AFNJlYeBlCHxcWIeqgon184Dqp/CsvKtz3z3Ni+rlwPM/zuJCFHh1VF+z++0LJjG
roBCV0Tro4XyiEz9yp7Cb5kQYMaj1KL9TqBG0tZx0pmv7y+lXc4TT6DEllXz6USy
rbIba9/uUet3BqeIMTqj
-----END CERTIFICATE-----