Compare commits

..

No commits in common. "master" and "v1.5.2" have entirely different histories.

70 changed files with 4640 additions and 6542 deletions

View File

@ -1,266 +0,0 @@
name: CI
# Trigger the workflow on push or pull request, but only for the master branch
on:
pull_request:
branches: [master]
push:
branches: [master]
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true
# Env vars for tests
env:
MINIO_ACCESS_KEY: minio
MINIO_SECRET_KEY: minio123
MINIO_LOCAL: 1
jobs:
ormolu:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: haskell-actions/run-ormolu@v15
with:
version: "0.5.0.1"
hlint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: "Set up HLint"
uses: haskell-actions/hlint-setup@v2
with:
version: "3.5"
- name: "Run HLint"
uses: haskell-actions/hlint-run@v2
with:
path: '["src/", "test/", "examples"]'
fail-on: warning
cabal:
name: ${{ matrix.os }} / ghc-${{ matrix.ghc }} / cabal-${{ matrix.cabal }}
runs-on: ${{ matrix.os }}
needs: ormolu
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
cabal: ["3.8", "latest"]
ghc:
- "9.8"
- "9.6"
- "9.4"
- "9.2"
- "9.0"
- "8.10"
exclude:
# macos llvm issue for versions less than 9.2
- os: macos-latest
ghc: "8.10"
- os: macos-latest
ghc: "9.0"
# Cabal 3.8 supports GHC < 9.6
- cabal: "3.8"
ghc: "9.6"
- cabal: "3.8"
ghc: "9.8"
steps:
- uses: actions/checkout@v4
if: github.event.action == 'opened' || github.event.action == 'synchronize' || github.event.ref == 'refs/heads/master'
- uses: haskell-actions/setup@v2
id: setup
name: Setup Haskell
with:
ghc-version: ${{ matrix.ghc }}
cabal-version: ${{ matrix.cabal }}
cabal-update: true
- name: Configure
run: |
cabal configure --enable-tests --enable-benchmarks --test-show-details=direct -fexamples -fdev -flive-test
cabal build all --dry-run
# The last step generates dist-newstyle/cache/plan.json for the cache key.
- name: Restore cached dependencies
uses: actions/cache/restore@v4
id: cache
env:
key: ${{ runner.os }}-ghc-${{ steps.setup.outputs.ghc-version }}-cabal-${{ steps.setup.outputs.cabal-version }}
with:
path: ${{ steps.setup.outputs.cabal-store }}
key: ${{ env.key }}-plan-${{ hashFiles('**/plan.json') }}
restore-keys: ${{ env.key }}-
- name: Install dependencies
# If we had an exact cache hit, the dependencies will be up to date.
if: steps.cache.outputs.cache-hit != 'true'
run: cabal build all --only-dependencies
# Cache dependencies already here, so that we do not have to rebuild them should the subsequent steps fail.
- name: Save cached dependencies
uses: actions/cache/save@v4
# If we had an exact cache hit, trying to save the cache would error because of key clash.
if: steps.cache.outputs.cache-hit != 'true'
with:
path: ${{ steps.setup.outputs.cabal-store }}
key: ${{ steps.cache.outputs.cache-primary-key }}
- name: Build
run: |
cabal build all
- name: Setup TLS certs for MinIO for testing (Linux)
if: matrix.os == 'ubuntu-latest'
run: |
mkdir -p /tmp/minio /tmp/minio-config/certs
cp test/cert/* /tmp/minio-config/certs/
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/linux-amd64/minio; chmod +x ./minio)
sudo cp /tmp/minio-config/certs/public.crt /usr/local/share/ca-certificates/
sudo update-ca-certificates
## Currently disable TLS setup for MacOS due to issues in trusting it on MacOS.
- name: Setup TLS certs for MinIO for testing (MacOS)
if: matrix.os == 'macos-latest'
run: |
mkdir -p /tmp/minio /tmp/minio-config/certs
cp test/cert/* /tmp/minio-config/certs/
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/darwin-amd64/minio; chmod +x ./minio)
# sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain /tmp/minio-config/certs/public.crt
- name: Setup MinIO for testing (Windows)
if: matrix.os == 'windows-latest'
run: |
New-Item -ItemType Directory -Path "$env:temp/minio-config/certs/"
Copy-Item -Path test\cert\* -Destination "$env:temp/minio-config/certs/"
Invoke-WebRequest -Uri https://dl.minio.io/server/minio/release/windows-amd64/minio.exe -OutFile $HOME/minio.exe
Import-Certificate -FilePath "$env:temp/minio-config/certs/public.crt" -CertStoreLocation Cert:\LocalMachine\Root
- name: Test (Linux)
if: matrix.os == 'ubuntu-latest'
env:
MINIO_SECURE: 1
run: |
/tmp/minio/minio server --quiet --certs-dir /tmp/minio-config/certs data1 data2 data3 data4 2>&1 > minio.log &
ghc --version
cabal --version
cabal test all
- name: Test (MacOS)
if: matrix.os == 'macos-latest'
# # Leave MINIO_SECURE unset to disable TLS in tests.
# env:
# MINIO_SECURE: 1
run: |
/tmp/minio/minio server --quiet data1 data2 data3 data4 2>&1 > minio.log &
ghc --version
cabal --version
cabal test all
- name: Test (Windows)
if: matrix.os == 'windows-latest'
env:
MINIO_SECURE: 1
run: |
Start-Process -NoNewWindow -FilePath "$HOME/minio.exe" -ArgumentList "--certs-dir", "$env:temp/minio-config/certs", "server", "$env:temp/data1", "$env:temp/data2", "$env:temp/data3", "$env:temp/data4"
ghc --version
cabal --version
cabal test all
stack:
name: stack / ghc ${{ matrix.ghc }}
runs-on: ${{ matrix.os }}
env:
MINIO_SECURE: 1
strategy:
matrix:
ghc:
- "8.10.7"
- "9.0.2"
- "9.2.8"
- "9.4.8"
- "9.6.5"
- "9.8.2"
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v4
if: github.event.action == 'opened' || github.event.action == 'synchronize' || github.event.ref == 'refs/heads/master'
- uses: haskell-actions/setup@v2
with:
ghc-version: ${{ matrix.ghc }}
enable-stack: true
stack-version: "latest"
- uses: actions/cache@v4
name: Cache ~/.stack
with:
path: ~/.stack
key: ${{ runner.os }}-stack-global-${{ hashFiles('stack.yaml') }}-${{ hashFiles('package.yaml') }}
restore-keys: |
${{ runner.os }}-stack-global-
- uses: actions/cache@v4
name: Cache .stack-work
with:
path: .stack-work
key: ${{ runner.os }}-stack-work-${{ hashFiles('stack.yaml') }}-${{ hashFiles('package.yaml') }}-${{ hashFiles('**/*.hs') }}
restore-keys: |
${{ runner.os }}-stack-work-
- name: Install dependencies
run: |
stack --version
stack build --system-ghc --test --bench --no-run-tests --no-run-benchmarks --only-dependencies
- name: Build
run: |
stack build --system-ghc --test --bench --no-run-tests --no-run-benchmarks --flag minio-hs:examples --flag minio-hs:live-test --flag minio-hs:dev
- name: Setup MinIO for testing (Linux)
if: matrix.os == 'ubuntu-latest'
run: |
mkdir -p /tmp/minio /tmp/minio-config/certs
cp test/cert/* /tmp/minio-config/certs/
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/linux-amd64/minio; chmod +x ./minio)
sudo cp /tmp/minio-config/certs/public.crt /usr/local/share/ca-certificates/
sudo update-ca-certificates
- name: Setup MinIO for testing (MacOS)
if: matrix.os == 'macos-latest'
run: |
mkdir -p /tmp/minio /tmp/minio-config/certs
cp test/cert/* /tmp/minio-config/certs/
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/darwin-amd64/minio; chmod +x ./minio)
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain /tmp/minio-config/certs/public.crt
- name: Setup MinIO for testing (Windows)
if: matrix.os == 'windows-latest'
run: |
New-Item -ItemType Directory -Path "$env:temp/minio-config/certs/"
Copy-Item -Path test\cert\* -Destination "$env:temp/minio-config/certs/"
Invoke-WebRequest -Uri https://dl.minio.io/server/minio/release/windows-amd64/minio.exe -OutFile $HOME/minio.exe
Import-Certificate -FilePath "$env:temp/minio-config/certs/public.crt" -CertStoreLocation Cert:\LocalMachine\Root
- name: Test (Non-Windows)
if: matrix.os != 'windows-latest'
run: |
/tmp/minio/minio server --quiet --certs-dir /tmp/minio-config/certs data1 data2 data3 data4 2>&1 > minio.log &
ghc --version
stack --version
stack test --system-ghc --flag minio-hs:live-test --flag minio-hs:dev
- name: Test (Windows)
if: matrix.os == 'windows-latest'
run: |
Start-Process -NoNewWindow -FilePath "$HOME/minio.exe" -ArgumentList "--certs-dir", "$env:temp/minio-config/certs", "server", "$env:temp/data1", "$env:temp/data2", "$env:temp/data3", "$env:temp/data4"
ghc --version
cabal --version
stack test --system-ghc --flag minio-hs:live-test --flag minio-hs:dev

233
.stylish-haskell.yaml Normal file
View File

@ -0,0 +1,233 @@
# stylish-haskell configuration file
# ==================================
# The stylish-haskell tool is mainly configured by specifying steps. These steps
# are a list, so they have an order, and one specific step may appear more than
# once (if needed). Each file is processed by these steps in the given order.
steps:
# Convert some ASCII sequences to their Unicode equivalents. This is disabled
# by default.
# - unicode_syntax:
# # In order to make this work, we also need to insert the UnicodeSyntax
# # language pragma. If this flag is set to true, we insert it when it's
# # not already present. You may want to disable it if you configure
# # language extensions using some other method than pragmas. Default:
# # true.
# add_language_pragma: true
# Align the right hand side of some elements. This is quite conservative
# and only applies to statements where each element occupies a single
# line.
- simple_align:
cases: true
top_level_patterns: true
records: true
# Import cleanup
- imports:
# There are different ways we can align names and lists.
#
# - global: Align the import names and import list throughout the entire
# file.
#
# - file: Like global, but don't add padding when there are no qualified
# imports in the file.
#
# - group: Only align the imports per group (a group is formed by adjacent
# import lines).
#
# - none: Do not perform any alignment.
#
# Default: global.
align: global
# The following options affect only import list alignment.
#
# List align has following options:
#
# - after_alias: Import list is aligned with end of import including
# 'as' and 'hiding' keywords.
#
# > import qualified Data.List as List (concat, foldl, foldr, head,
# > init, last, length)
#
# - with_alias: Import list is aligned with start of alias or hiding.
#
# > import qualified Data.List as List (concat, foldl, foldr, head,
# > init, last, length)
#
# - new_line: Import list starts always on new line.
#
# > import qualified Data.List as List
# > (concat, foldl, foldr, head, init, last, length)
#
# Default: after_alias
list_align: after_alias
# Right-pad the module names to align imports in a group:
#
# - true: a little more readable
#
# > import qualified Data.List as List (concat, foldl, foldr,
# > init, last, length)
# > import qualified Data.List.Extra as List (concat, foldl, foldr,
# > init, last, length)
#
# - false: diff-safe
#
# > import qualified Data.List as List (concat, foldl, foldr, init,
# > last, length)
# > import qualified Data.List.Extra as List (concat, foldl, foldr,
# > init, last, length)
#
# Default: true
pad_module_names: true
# Long list align style takes effect when import is too long. This is
# determined by 'columns' setting.
#
# - inline: This option will put as much specs on same line as possible.
#
# - new_line: Import list will start on new line.
#
# - new_line_multiline: Import list will start on new line when it's
# short enough to fit to single line. Otherwise it'll be multiline.
#
# - multiline: One line per import list entry.
# Type with constructor list acts like single import.
#
# > import qualified Data.Map as M
# > ( empty
# > , singleton
# > , ...
# > , delete
# > )
#
# Default: inline
long_list_align: inline
# Align empty list (importing instances)
#
# Empty list align has following options
#
# - inherit: inherit list_align setting
#
# - right_after: () is right after the module name:
#
# > import Vector.Instances ()
#
# Default: inherit
empty_list_align: inherit
# List padding determines indentation of import list on lines after import.
# This option affects 'long_list_align'.
#
# - <integer>: constant value
#
# - module_name: align under start of module name.
# Useful for 'file' and 'group' align settings.
list_padding: 4
# Separate lists option affects formatting of import list for type
# or class. The only difference is single space between type and list
# of constructors, selectors and class functions.
#
# - true: There is single space between Foldable type and list of it's
# functions.
#
# > import Data.Foldable (Foldable (fold, foldl, foldMap))
#
# - false: There is no space between Foldable type and list of it's
# functions.
#
# > import Data.Foldable (Foldable(fold, foldl, foldMap))
#
# Default: true
separate_lists: true
# Space surround option affects formatting of import lists on a single
# line. The only difference is single space after the initial
# parenthesis and a single space before the terminal parenthesis.
#
# - true: There is single space associated with the enclosing
# parenthesis.
#
# > import Data.Foo ( foo )
#
# - false: There is no space associated with the enclosing parenthesis
#
# > import Data.Foo (foo)
#
# Default: false
space_surround: false
# Language pragmas
- language_pragmas:
# We can generate different styles of language pragma lists.
#
# - vertical: Vertical-spaced language pragmas, one per line.
#
# - compact: A more compact style.
#
# - compact_line: Similar to compact, but wrap each line with
# `{-#LANGUAGE #-}'.
#
# Default: vertical.
style: vertical
# Align affects alignment of closing pragma brackets.
#
# - true: Brackets are aligned in same column.
#
# - false: Brackets are not aligned together. There is only one space
# between actual import and closing bracket.
#
# Default: true
align: true
# stylish-haskell can detect redundancy of some language pragmas. If this
# is set to true, it will remove those redundant pragmas. Default: true.
remove_redundant: true
# Replace tabs by spaces. This is disabled by default.
# - tabs:
# # Number of spaces to use for each tab. Default: 8, as specified by the
# # Haskell report.
# spaces: 8
# Remove trailing whitespace
- trailing_whitespace: {}
# A common setting is the number of columns (parts of) code will be wrapped
# to. Different steps take this into account. Default: 80.
columns: 80
# By default, line endings are converted according to the OS. You can override
# preferred format here.
#
# - native: Native newline format. CRLF on Windows, LF on other OSes.
#
# - lf: Convert to LF ("\n").
#
# - crlf: Convert to CRLF ("\r\n").
#
# Default: native.
newline: native
# Sometimes, language extensions are specified in a cabal file or from the
# command line instead of using language pragmas in the file. stylish-haskell
# needs to be aware of these, so it can parse the file correctly.
#
# No language extensions are enabled by default.
language_extensions:
- BangPatterns
- FlexibleContexts
- FlexibleInstances
- MultiParamTypeClasses
- MultiWayIf
- NoImplicitPrelude
- OverloadedStrings
- RankNTypes
- ScopedTypeVariables
- TupleSections
- TypeFamilies

61
.travis.yml Normal file
View File

@ -0,0 +1,61 @@
sudo: true
language: haskell
git:
depth: 5
cabal: "3.0"
cache:
directories:
- "$HOME/.cabal/store"
- "$HOME/.stack"
- "$TRAVIS_BUILD_DIR/.stack-work"
matrix:
include:
# Cabal
- ghc: 8.4.4
- ghc: 8.6.5
- ghc: 8.8.1
# Stack
- ghc: 8.6.5
env: STACK_YAML="$TRAVIS_BUILD_DIR/stack.yaml"
before_install:
- sudo apt-get install devscripts
- mkdir /tmp/minio /tmp/certs
- (cd /tmp/minio; wget https://dl.min.io/server/minio/release/linux-amd64/minio; chmod +x ./minio)
- (cd /tmp/certs; openssl req -newkey rsa:2048 -nodes -keyout private.key -x509 -days 36500 -out public.crt -subj "/C=US/ST=NRW/L=Earth/O=CompanyName/OU=IT/CN=localhost/emailAddress=email@example.com")
- sudo cp /tmp/certs/public.crt /usr/local/share/ca-certificates/
- sudo update-ca-certificates
- MINIO_ACCESS_KEY=minio MINIO_SECRET_KEY=minio123 /tmp/minio/minio server --quiet --certs-dir /tmp/certs data 2>&1 > minio.log &
install:
- |
if [ -z "$STACK_YAML" ]; then
ghc --version
cabal --version
cabal new-update
cabal new-build --enable-tests --enable-benchmarks -fexamples
else
# install stack
curl -sSL https://get.haskellstack.org/ | sh
# build project with stack
stack --version
stack build --system-ghc --test --bench --no-run-tests --no-run-benchmarks --flag minio-hs:examples
fi
script:
- |
if [ -z "$STACK_YAML" ]; then
MINIO_LOCAL=1 MINIO_SECURE=1 cabal new-test --enable-tests -flive-test
else
MINIO_LOCAL=1 MINIO_SECURE=1 stack test --system-ghc --flag minio-hs:live-test
fi
notifications:
email: false

View File

@ -1,37 +1,6 @@
Changelog Changelog
========== ==========
## Version 1.7.0 -- Unreleased
* Fix data type `EventMessage` to not export partial fields (#179)
* Bump up min bound on time dep and fix deprecation warnings (#181)
* Add `dev` flag to cabal for building with warnings as errors (#182)
* Fix AWS region map (#185)
* Fix XML generator tests (#187)
* Add support for STS Assume Role API (#188)
### Breaking changes in 1.7.0
* `Credentials` type has been removed. Use `CredentialValue` instead.
* `Provider` type has been replaced with `CredentialLoader`.
* `EventMessage` data type is updated.
## Version 1.6.0
* HLint fixes - some types were changed to newtype (#173)
* Fix XML generation test for S3 SELECT (#161)
* Use region specific endpoints for AWS S3 in presigned Urls (#164)
* Replace protolude with relude and build with GHC 9.0.2 (#168)
* Support aeson 2 (#169)
* CI updates and code formatting changes with ormolu 0.5.0.0
## Version 1.5.3
* Fix windows build
* Fix support for Yandex Storage (#147)
* Fix for HEAD requests to S3/Minio (#155)
* Bump up some dependencies, new code formatting, Github CI, example fixes and other minor improvements.
## Version 1.5.2 ## Version 1.5.2
* Fix region `us-west-2` for AWS S3 (#139) * Fix region `us-west-2` for AWS S3 (#139)

1
CNAME
View File

@ -1 +0,0 @@
minio-hs.min.io

View File

@ -1,8 +1,10 @@
# MinIO Haskell Client SDK for Amazon S3 Compatible Cloud Storage [![CI](https://github.com/minio/minio-hs/actions/workflows/ci.yml/badge.svg)](https://github.com/minio/minio-hs/actions/workflows/ci.yml)[![Hackage](https://img.shields.io/hackage/v/minio-hs.svg)](https://hackage.haskell.org/package/minio-hs)[![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) # MinIO Client SDK for Haskell [![Build Status](https://travis-ci.org/minio/minio-hs.svg?branch=master)](https://travis-ci.org/minio/minio-hs)[![Hackage](https://img.shields.io/hackage/v/minio-hs.svg)](https://hackage.haskell.org/package/minio-hs)[![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io)
The MinIO Haskell Client SDK provides simple APIs to access [MinIO](https://min.io) and any Amazon S3 compatible object storage. The MinIO Haskell Client SDK provides simple APIs to access [MinIO](https://min.io) and Amazon S3 compatible object storage server.
This guide assumes that you have a working [Haskell development environment](https://www.haskell.org/downloads/). ## Minimum Requirements
- The Haskell [stack](https://docs.haskellstack.org/en/stable/README/)
## Installation ## Installation
@ -10,35 +12,20 @@ This guide assumes that you have a working [Haskell development environment](htt
Simply add `minio-hs` to your project's `.cabal` dependencies section or if you are using hpack, to your `package.yaml` file as usual. Simply add `minio-hs` to your project's `.cabal` dependencies section or if you are using hpack, to your `package.yaml` file as usual.
### Try it out in a [REPL](https://en.wikipedia.org/wiki/Read%E2%80%93eval%E2%80%93print_loop) ### Try it out directly with `ghci`
#### For a cabal based environment
Download the library source and change to the extracted directory:
``` sh
$ cabal get minio-hs
$ cd minio-hs-1.6.0/ # directory name could be different
```
Then load the `ghci` REPL environment with the library and browse the available APIs:
``` sh
$ cabal repl
ghci> :browse Network.Minio
```
#### For a stack based environment
From your home folder or any non-haskell project directory, just run: From your home folder or any non-haskell project directory, just run:
```sh ```sh
stack install minio-hs stack install minio-hs
``` ```
Then start an interpreter session and browse the available APIs with: Then start an interpreter session and browse the available APIs with:
```sh ```sh
$ stack ghci $ stack ghci
> :browse Network.Minio > :browse Network.Minio
``` ```
@ -147,52 +134,44 @@ main = do
### Development ### Development
#### Download the source To setup:
```sh ```sh
$ git clone https://github.com/minio/minio-hs.git git clone https://github.com/minio/minio-hs.git
$ cd minio-hs/
```
#### Build the package: cd minio-hs/
With `cabal`: stack install
```
Tests can be run with:
```sh ```sh
$ # Configure cabal for development enabling all optional flags defined by the package.
$ cabal configure --enable-tests --test-show-details=direct -fexamples -fdev -flive-test stack test
$ cabal build
``` ```
With `stack`: A section of the tests use the remote MinIO Play server at `https://play.min.io` by default. For library development, using this remote server maybe slow. To run the tests against a locally running MinIO live server at `http://localhost:9000`, just set the environment `MINIO_LOCAL` to any value (and unset it to switch back to Play).
``` sh To run the live server tests, set a build flag as shown below:
$ stack build --test --no-run-tests --flag minio-hs:live-test --flag minio-hs:dev --flag minio-hs:examples
```
#### Running tests:
A section of the tests use the remote MinIO Play server at `https://play.min.io` by default. For library development, using this remote server maybe slow. To run the tests against a locally running MinIO live server at `http://localhost:9000` with the credentials `access_key=minio` and `secret_key=minio123`, just set the environment `MINIO_LOCAL` to any value (and unset it to switch back to Play).
With `cabal`:
```sh ```sh
$ export MINIO_LOCAL=1 # to run live tests against local MinIO server
$ cabal test stack test --flag minio-hs:live-test
# OR against a local MinIO server with:
MINIO_LOCAL=1 stack test --flag minio-hs:live-test
``` ```
With `stack`: The configured CI system always runs both test-suites for every change.
``` sh Documentation can be locally built with:
$ export MINIO_LOCAL=1 # to run live tests against local MinIO server
stack test --flag minio-hs:live-test --flag minio-hs:dev
```
This will run all the test suites.
#### Building documentation:
```sh ```sh
$ cabal haddock
$ # OR stack haddock
$ stack haddock
``` ```

18
Setup.hs Normal file
View File

@ -0,0 +1,18 @@
--
-- MinIO Haskell SDK, (C) 2017 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
import Distribution.Simple
main = defaultMain

View File

@ -1,47 +0,0 @@
--
-- MinIO Haskell SDK, (C) 2023 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Control.Monad.IO.Class (liftIO)
import Network.Minio
import Prelude
main :: IO ()
main = do
-- Use play credentials for example.
let assumeRole =
STSAssumeRole
( CredentialValue
"Q3AM3UQ867SPQQA43P2F"
"zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
Nothing
)
$ defaultSTSAssumeRoleOptions
{ saroLocation = Just "us-east-1",
saroEndpoint = Just "https://play.min.io:9000"
}
-- Retrieve temporary credentials and print them.
cv <- requestSTSCredential assumeRole
print $ "Temporary credentials" ++ show (credentialValueText $ fst cv)
print $ "Expiry" ++ show (snd cv)
-- Configure 'ConnectInfo' to request temporary credentials on demand.
ci <- setSTSCredential assumeRole "https://play.min.io"
res <- runMinio ci $ do
buckets <- listBuckets
liftIO $ print $ "Top 5 buckets: " ++ show (take 5 buckets)
print res

View File

@ -16,17 +16,20 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-}
import Control.Monad.IO.Class (liftIO) {-# LANGUAGE OverloadedStrings #-}
import Network.Minio import Network.Minio
import Prelude
import Control.Monad.IO.Class (liftIO)
import Prelude
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let bucket = "missingbucket" let bucket = "missingbucket"
@ -36,5 +39,5 @@ main = do
liftIO $ putStrLn $ "Does " ++ show bucket ++ " exist? - " ++ show foundBucket liftIO $ putStrLn $ "Does " ++ show bucket ++ " exist? - " ++ show foundBucket
case res1 of case res1 of
Left e -> putStrLn $ "bucketExists failed." ++ show e Left e -> putStrLn $ "bucketExists failed." ++ show e
Right () -> return () Right () -> return ()

View File

@ -16,40 +16,42 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio {-# LANGUAGE OverloadedStrings #-}
import UnliftIO.Exception (catch, throwIO) import Network.Minio
import UnliftIO.Exception (catch, throwIO)
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let bucket = "test" let
bucket = "test"
object = "obj" object = "obj"
objectCopy = "obj-copy" objectCopy = "obj-copy"
localFile = "/etc/lsb-release" localFile = "/etc/lsb-release"
res1 <- runMinio minioPlayCI $ do res1 <- runMinio minioPlayCI $ do
-- 1. Make a bucket; Catch BucketAlreadyOwnedByYou exception. -- 1. Make a bucket; Catch BucketAlreadyOwnedByYou exception.
catch catch (makeBucket bucket Nothing) (
(makeBucket bucket Nothing) \e -> case e of
( \e -> case e of BucketAlreadyOwnedByYou -> return ()
BucketAlreadyOwnedByYou -> return () _ -> throwIO e
_ -> throwIO e
) )
-- 2. Upload a file to bucket/object. -- 2. Upload a file to bucket/object.
fPutObject bucket object localFile defaultPutObjectOptions fPutObject bucket object localFile defaultPutObjectOptions
-- 3. Copy bucket/object to bucket/objectCopy. -- 3. Copy bucket/object to bucket/objectCopy.
copyObject copyObject defaultDestinationInfo {dstBucket = bucket, dstObject = objectCopy}
defaultDestinationInfo {dstBucket = bucket, dstObject = objectCopy} defaultSourceInfo { srcBucket = bucket , srcObject = object }
defaultSourceInfo {srcBucket = bucket, srcObject = object}
case res1 of case res1 of
Left e -> putStrLn $ "copyObject failed." ++ show e Left e -> putStrLn $ "copyObject failed." ++ show e
Right () -> putStrLn "copyObject succeeded." Right () -> putStrLn "copyObject succeeded."

View File

@ -16,39 +16,40 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE ScopedTypeVariables #-}
import Data.Text (pack)
import Network.Minio {-# LANGUAGE OverloadedStrings #-}
import Options.Applicative {-# LANGUAGE ScopedTypeVariables #-}
import System.FilePath.Posix import Network.Minio
import UnliftIO (throwIO, try)
import Prelude import Data.Monoid ((<>))
import Data.Text (pack)
import Options.Applicative
import System.FilePath.Posix
import UnliftIO (throwIO, try)
import Prelude
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
-- optparse-applicative package based command-line parsing. -- optparse-applicative package based command-line parsing.
fileNameArgs :: Parser FilePath fileNameArgs :: Parser FilePath
fileNameArgs = fileNameArgs = strArgument
strArgument (metavar "FILENAME"
( metavar "FILENAME" <> help "Name of file to upload to AWS S3 or a MinIO server")
<> help "Name of file to upload to AWS S3 or a MinIO server"
)
cmdParser :: ParserInfo FilePath cmdParser :: ParserInfo FilePath
cmdParser = cmdParser = info
info (helper <*> fileNameArgs)
(helper <*> fileNameArgs) (fullDesc
( fullDesc <> progDesc "FileUploader"
<> progDesc "FileUploader" <> header
<> header "FileUploader - a simple file-uploader program using minio-hs")
"FileUploader - a simple file-uploader program using minio-hs"
)
main :: IO () main :: IO ()
main = do main = do
@ -63,12 +64,12 @@ main = do
bErr <- try $ makeBucket bucket Nothing bErr <- try $ makeBucket bucket Nothing
case bErr of case bErr of
Left BucketAlreadyOwnedByYou -> return () Left BucketAlreadyOwnedByYou -> return ()
Left e -> throwIO e Left e -> throwIO e
Right _ -> return () Right _ -> return ()
-- Upload filepath to bucket; object is derived from filepath. -- Upload filepath to bucket; object is derived from filepath.
fPutObject bucket object filepath defaultPutObjectOptions fPutObject bucket object filepath defaultPutObjectOptions
case res of case res of
Left e -> putStrLn $ "file upload failed due to " ++ show e Left e -> putStrLn $ "file upload failed due to " ++ (show e)
Right () -> putStrLn "file upload succeeded." Right () -> putStrLn "file upload succeeded."

View File

@ -17,14 +17,14 @@
-- limitations under the License. -- limitations under the License.
-- --
import Network.Minio {-# LANGUAGE OverloadedStrings #-}
import Network.Minio.AdminAPI import Network.Minio
import Prelude import Network.Minio.AdminAPI
import Prelude
main :: IO () main :: IO ()
main = do main = do
res <- res <- runMinio minioPlayCI $
runMinio
minioPlayCI
getConfig getConfig
print res print res

View File

@ -16,26 +16,31 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-}
import qualified Data.Conduit as C {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import qualified Data.Conduit as C
import qualified Data.Conduit.Binary as CB import qualified Data.Conduit.Binary as CB
import Network.Minio
import Prelude import Prelude
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let bucket = "my-bucket" let
bucket = "my-bucket"
object = "my-object" object = "my-object"
res <- runMinio minioPlayCI $ do res <- runMinio minioPlayCI $ do
src <- getObject bucket object defaultGetObjectOptions src <- getObject bucket object defaultGetObjectOptions
C.connect (gorObjectStream src) $ CB.sinkFileCautious "/tmp/my-object" C.connect (gorObjectStream src) $ CB.sinkFileCautious "/tmp/my-object"
case res of case res of
Left e -> putStrLn $ "getObject failed." ++ show e Left e -> putStrLn $ "getObject failed." ++ (show e)
Right _ -> putStrLn "getObject succeeded." Right _ -> putStrLn "getObject succeeded."

View File

@ -16,25 +16,28 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio {-# LANGUAGE OverloadedStrings #-}
import Network.Minio.S3API import Network.Minio
import Prelude import Network.Minio.S3API
import Prelude
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let bucket = "test" let
bucket = "test"
object = "passwd" object = "passwd"
res <- res <- runMinio minioPlayCI $
runMinio minioPlayCI $ headObject bucket object []
headObject bucket object []
case res of case res of
Left e -> putStrLn $ "headObject failed." ++ show e Left e -> putStrLn $ "headObject failed." ++ show e
Right objInfo -> putStrLn $ "headObject succeeded." ++ show objInfo Right objInfo -> putStrLn $ "headObject succeeded." ++ show objInfo

View File

@ -17,21 +17,18 @@
-- limitations under the License. -- limitations under the License.
-- --
import Network.Minio {-# LANGUAGE OverloadedStrings #-}
import Network.Minio.AdminAPI import Network.Minio
import Prelude import Network.Minio.AdminAPI
import Prelude
main :: IO () main :: IO ()
main = do main = do
res <- runMinio minioPlayCI $ res <- runMinio minioPlayCI $
do do
hsr <- hsr <- startHeal Nothing Nothing HealOpts { hoRecursive = True
startHeal , hoDryRun = False
Nothing }
Nothing
HealOpts
{ hoRecursive = True,
hoDryRun = False
}
getHealStatus Nothing Nothing (hsrClientToken hsr) getHealStatus Nothing Nothing (hsrClientToken hsr)
print res print res

View File

@ -16,17 +16,19 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-}
import Control.Monad.IO.Class (liftIO) {-# LANGUAGE OverloadedStrings #-}
import Network.Minio import Network.Minio
import Prelude
import Control.Monad.IO.Class (liftIO)
import Prelude
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
-- This example list buckets that belongs to the user and returns -- This example list buckets that belongs to the user and returns
-- region of the first bucket returned. -- region of the first bucket returned.

View File

@ -16,36 +16,38 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-}
import Conduit {-# LANGUAGE OverloadedStrings #-}
import Network.Minio import Network.Minio
import Prelude
import Conduit
import Prelude
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let bucket = "test" let
bucket = "test"
-- Performs a recursive listing of incomplete uploads under bucket "test" -- Performs a recursive listing of incomplete uploads under bucket "test"
-- on a local minio server. -- on a local minio server.
res <- res <- runMinio minioPlayCI $
runMinio minioPlayCI $ runConduit $ listIncompleteUploads bucket Nothing True .| mapM_C (\v -> (liftIO $ print v))
runConduit $
listIncompleteUploads bucket Nothing True .| mapM_C (liftIO . print)
print res print res
{- {-
Following is the output of the above program on a local MinIO server. Following is the output of the above program on a local MinIO server.
Right [UploadInfo { uiKey = "go1.6.2.linux-amd64.tar.gz" Right [UploadInfo { uiKey = "go1.6.2.linux-amd64.tar.gz"
, uiUploadId = "063eb592-bdd7-4a0c-be48-34fb3ceb63e2" , uiUploadId = "063eb592-bdd7-4a0c-be48-34fb3ceb63e2"
, uiInitTime = 2017-03-01 10:16:25.698 UTC , uiInitTime = 2017-03-01 10:16:25.698 UTC
, uiSize = 17731794 , uiSize = 17731794
} }
] ]
-} -}

View File

@ -16,31 +16,33 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-}
import Conduit {-# LANGUAGE OverloadedStrings #-}
import Network.Minio import Network.Minio
import Prelude
import Conduit
import Prelude
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let bucket = "test" let
bucket = "test"
-- Performs a recursive listing of all objects under bucket "test" -- Performs a recursive listing of all objects under bucket "test"
-- on play.min.io. -- on play.min.io.
res <- res <- runMinio minioPlayCI $
runMinio minioPlayCI $ runConduit $ listObjects bucket Nothing True .| mapM_C (\v -> (liftIO $ print v))
runConduit $
listObjects bucket Nothing True .| mapM_C (liftIO . print)
print res print res
{-
Following is the output of the above program on a local MinIO server.
{- Right [ObjectInfo {oiObject = "ADVANCED.png", oiModTime = 2017-02-10 05:33:24.816 UTC, oiETag = "\"a69f3af6bbb06fe1d42ac910ec30482f\"", oiSize = 94026},ObjectInfo {oiObject = "obj", oiModTime = 2017-02-10 08:49:26.777 UTC, oiETag = "\"715a872a253a3596652c1490081b4b6a-1\"", oiSize = 15728640}]
Following is the output of the above program on a local MinIO server. -}
Right [ObjectInfo {oiObject = "ADVANCED.png", oiModTime = 2017-02-10 05:33:24.816 UTC, oiETag = "\"a69f3af6bbb06fe1d42ac910ec30482f\"", oiSize = 94026},ObjectInfo {oiObject = "obj", oiModTime = 2017-02-10 08:49:26.777 UTC, oiETag = "\"715a872a253a3596652c1490081b4b6a-1\"", oiSize = 15728640}]
-}

View File

@ -16,21 +16,24 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Prelude {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Prelude
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let bucket = "my-bucket" let bucket = "my-bucket"
res <- res <- runMinio minioPlayCI $
runMinio minioPlayCI $ -- N B the region provided for makeBucket is optional.
-- N B the region provided for makeBucket is optional. makeBucket bucket (Just "us-east-1")
makeBucket bucket (Just "us-east-1")
print res print res

View File

@ -16,37 +16,39 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-}
import Control.Monad.IO.Class (liftIO) {-# LANGUAGE OverloadedStrings #-}
import qualified Data.ByteString.Char8 as B import Network.Minio
import Data.CaseInsensitive (original)
import Control.Monad.IO.Class (liftIO)
import qualified Data.ByteString.Char8 as B
import Data.CaseInsensitive (original)
import qualified Data.Conduit.Combinators as CC import qualified Data.Conduit.Combinators as CC
import qualified Data.Text.Encoding as E import qualified Data.Text.Encoding as E
import Network.Minio
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let bucket = "my-bucket" let
object = "my-object" bucket = "my-bucket"
kb15 = 15 * 1024 object = "my-object"
-- Set query parameter to modify content disposition response kb15 = 15*1024
-- header
queryParam = -- Set query parameter to modify content disposition response
[ ( "response-content-disposition", -- header
Just "attachment; filename=\"your-filename.txt\"" queryParam = [("response-content-disposition",
) Just "attachment; filename=\"your-filename.txt\"")]
]
res <- runMinio minioPlayCI $ do res <- runMinio minioPlayCI $ do
liftIO $ B.putStrLn "Upload a file that we will fetch with a presigned URL..." liftIO $ B.putStrLn "Upload a file that we will fetch with a presigned URL..."
putObject bucket object (CC.repeat "a") (Just kb15) defaultPutObjectOptions putObject bucket object (CC.repeat "a") (Just kb15) defaultPutObjectOptions
liftIO $ putStrLn "Done. Object created at: my-bucket/my-object" liftIO $ putStrLn $ "Done. Object created at: my-bucket/my-object"
-- Extract Etag of uploaded object -- Extract Etag of uploaded object
oi <- statObject bucket object defaultGetObjectOptions oi <- statObject bucket object defaultGetObjectOptions
@ -59,29 +61,23 @@ main = do
-- Generate a URL with 7 days expiry time - note that the headers -- Generate a URL with 7 days expiry time - note that the headers
-- used above must be added to the request with the signed URL -- used above must be added to the request with the signed URL
-- generated. -- generated.
url <- url <- presignedGetObjectUrl "my-bucket" "my-object" (7*24*3600)
presignedGetObjectUrl queryParam headers
"my-bucket"
"my-object"
(7 * 24 * 3600)
queryParam
headers
return (headers, etag, url) return (headers, etag, url)
case res of case res of
Left e -> putStrLn $ "presignedPutObject URL failed." ++ show e Left e -> putStrLn $ "presignedPutObject URL failed." ++ show e
Right (headers, _, url) -> do Right (headers, _, url) -> do
-- We generate a curl command to demonstrate usage of the signed -- We generate a curl command to demonstrate usage of the signed
-- URL. -- URL.
let hdrOpt (k, v) = B.concat ["-H '", original k, ": ", v, "'"] let
curlCmd = hdrOpt (k, v) = B.concat ["-H '", original k, ": ", v, "'"]
B.intercalate " " $ curlCmd = B.intercalate " " $
["curl --fail"] ["curl --fail"] ++ map hdrOpt headers ++
++ map hdrOpt headers ["-o /tmp/myfile", B.concat ["'", url, "'"]]
++ ["-o /tmp/myfile", B.concat ["'", url, "'"]]
putStrLn $ putStrLn $ "The following curl command would use the presigned " ++
"The following curl command would use the presigned " "URL to fetch the object and write it to \"/tmp/myfile\":"
++ "URL to fetch the object and write it to \"/tmp/myfile\":"
B.putStrLn curlCmd B.putStrLn curlCmd

View File

@ -16,72 +16,69 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-}
import qualified Data.ByteString as B {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import qualified Data.ByteString as B
import qualified Data.ByteString.Char8 as Char8 import qualified Data.ByteString.Char8 as Char8
import qualified Data.HashMap.Strict as H import qualified Data.HashMap.Strict as H
import qualified Data.Text.Encoding as Enc import qualified Data.Text.Encoding as Enc
import qualified Data.Time as Time import qualified Data.Time as Time
import Network.Minio
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
now <- Time.getCurrentTime now <- Time.getCurrentTime
let bucket = "my-bucket" let
object = "photos/my-object" bucket = "my-bucket"
-- set an expiration time of 10 days object = "photos/my-object"
expireTime = Time.addUTCTime (3600 * 24 * 10) now
-- create a policy with expiration time and conditions - since the -- set an expiration time of 10 days
-- conditions are validated, newPostPolicy returns an Either value expireTime = Time.addUTCTime (3600 * 24 * 10) now
policyE =
newPostPolicy -- create a policy with expiration time and conditions - since the
expireTime -- conditions are validated, newPostPolicy returns an Either value
[ -- set the object name condition policyE = newPostPolicy expireTime
ppCondKey object, [ -- set the object name condition
-- set the bucket name condition ppCondKey object
ppCondBucket bucket, -- set the bucket name condition
-- set the size range of object as 1B to 10MiB , ppCondBucket bucket
ppCondContentLengthRange 1 (10 * 1024 * 1024), -- set the size range of object as 1B to 10MiB
-- set content type as jpg image , ppCondContentLengthRange 1 (10*1024*1024)
ppCondContentType "image/jpeg", -- set content type as jpg image
-- on success set the server response code to 200 , ppCondContentType "image/jpeg"
ppCondSuccessActionStatus 200 -- on success set the server response code to 200
] , ppCondSuccessActionStatus 200
]
case policyE of case policyE of
Left err -> print err Left err -> putStrLn $ show err
Right policy -> do Right policy -> do
res <- runMinio minioPlayCI $ do res <- runMinio minioPlayCI $ do
(url, formData) <- presignedPostPolicy policy (url, formData) <- presignedPostPolicy policy
-- a curl command is output to demonstrate using the generated -- a curl command is output to demonstrate using the generated
-- URL and form-data -- URL and form-data
let formFn (k, v) = let
B.concat formFn (k, v) = B.concat ["-F ", Enc.encodeUtf8 k, "=",
[ "-F ", "'", v, "'"]
Enc.encodeUtf8 k, formOptions = B.intercalate " " $ map formFn $ H.toList formData
"=",
"'",
v,
"'"
]
formOptions = B.intercalate " " $ map formFn $ H.toList formData
return $
B.intercalate return $ B.intercalate " " $
" " ["curl", formOptions, "-F file=@/tmp/photo.jpg", url]
["curl", formOptions, "-F file=@/tmp/photo.jpg", url]
case res of case res of
Left e -> putStrLn $ "post-policy error: " ++ show e Left e -> putStrLn $ "post-policy error: " ++ (show e)
Right cmd -> do Right cmd -> do
putStrLn "Put a photo at /tmp/photo.jpg and run command:\n" putStrLn $ "Put a photo at /tmp/photo.jpg and run command:\n"
-- print the generated curl command -- print the generated curl command
Char8.putStrLn cmd Char8.putStrLn cmd

View File

@ -16,43 +16,44 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import qualified Data.ByteString.Char8 as B import qualified Data.ByteString.Char8 as B
import Data.CaseInsensitive (original) import Data.CaseInsensitive (original)
import Network.Minio
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let -- Use headers to set user-metadata - note that this header will let
-- need to be set when the URL is used to make an upload. -- Use headers to set user-metadata - note that this header will
headers = -- need to be set when the URL is used to make an upload.
[ ( "x-amz-meta-url-creator", headers = [("x-amz-meta-url-creator",
"minio-hs-presigned-put-example" "minio-hs-presigned-put-example")]
)
]
res <- runMinio minioPlayCI $ do res <- runMinio minioPlayCI $ do
-- generate a URL with 7 days expiry time -- generate a URL with 7 days expiry time
presignedPutObjectUrl "my-bucket" "my-object" (7 * 24 * 3600) headers presignedPutObjectUrl "my-bucket" "my-object" (7*24*3600) headers
case res of case res of
Left e -> putStrLn $ "presignedPutObject URL failed." ++ show e Left e -> putStrLn $ "presignedPutObject URL failed." ++ show e
Right url -> do Right url -> do
-- We generate a curl command to demonstrate usage of the signed -- We generate a curl command to demonstrate usage of the signed
-- URL. -- URL.
let hdrOpt (k, v) = B.concat ["-H '", original k, ": ", v, "'"] let
curlCmd = hdrOpt (k, v) = B.concat ["-H '", original k, ": ", v, "'"]
B.intercalate " " $ curlCmd = B.intercalate " " $
["curl "] ["curl "] ++ map hdrOpt headers ++
++ map hdrOpt headers ["-T /tmp/myfile", B.concat ["'", url, "'"]]
++ ["-T /tmp/myfile", B.concat ["'", url, "'"]]
putStrLn $ putStrLn $ "The following curl command would use the presigned " ++
"The following curl command would use the presigned " "URL to upload the file at \"/tmp/myfile\":"
++ "URL to upload the file at \"/tmp/myfile\":"
B.putStrLn curlCmd B.putStrLn curlCmd

View File

@ -16,36 +16,39 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import qualified Data.Conduit.Combinators as CC import qualified Data.Conduit.Combinators as CC
import Network.Minio
import Prelude import Prelude
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let bucket = "test" let
bucket = "test"
object = "obj" object = "obj"
localFile = "/etc/lsb-release" localFile = "/etc/lsb-release"
kb15 = 15 * 1024 kb15 = 15 * 1024
-- Eg 1. Upload a stream of repeating "a" using putObject with default options. -- Eg 1. Upload a stream of repeating "a" using putObject with default options.
res1 <- res1 <- runMinio minioPlayCI $
runMinio minioPlayCI $ putObject bucket object (CC.repeat "a") (Just kb15) defaultPutObjectOptions
putObject bucket object (CC.repeat "a") (Just kb15) defaultPutObjectOptions
case res1 of case res1 of
Left e -> putStrLn $ "putObject failed." ++ show e Left e -> putStrLn $ "putObject failed." ++ show e
Right () -> putStrLn "putObject succeeded." Right () -> putStrLn "putObject succeeded."
-- Eg 2. Upload a file using fPutObject with default options. -- Eg 2. Upload a file using fPutObject with default options.
res2 <- res2 <- runMinio minioPlayCI $
runMinio minioPlayCI $ fPutObject bucket object localFile defaultPutObjectOptions
fPutObject bucket object localFile defaultPutObjectOptions
case res2 of case res2 of
Left e -> putStrLn $ "fPutObject failed." ++ show e Left e -> putStrLn $ "fPutObject failed." ++ show e
Right () -> putStrLn "fPutObject succeeded." Right () -> putStrLn "fPutObject succeeded."

View File

@ -16,18 +16,23 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Prelude {-# LANGUAGE OverloadedStrings #-}
import Network.Minio
import Prelude
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let bucket = "my-bucket" let
bucket = "my-bucket"
res <- runMinio minioPlayCI $ removeBucket bucket res <- runMinio minioPlayCI $ removeBucket bucket
print res print res

View File

@ -16,24 +16,27 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio {-# LANGUAGE OverloadedStrings #-}
import Prelude import Network.Minio
import Prelude
-- | The following example uses minio's play server at -- | The following example uses minio's play server at
-- https://play.min.io. The endpoint and associated -- https://play.min.io. The endpoint and associated
-- credentials are provided via the libary constant, -- credentials are provided via the libary constant,
-- --
-- > minioPlayCI :: ConnectInfo -- > minioPlayCI :: ConnectInfo
--
main :: IO () main :: IO ()
main = do main = do
let bucket = "mybucket" let
object = "myobject" bucket = "mybucket"
object = "myobject"
res <- res <- runMinio minioPlayCI $
runMinio minioPlayCI $ removeIncompleteUpload bucket object
removeIncompleteUpload bucket object
case res of case res of
Left _ -> putStrLn $ "Failed to remove " ++ show bucket ++ "/" ++ show object Left _ -> putStrLn $ "Failed to remove " ++ show bucket ++ "/" ++ show object

View File

@ -16,19 +16,20 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import Network.Minio import Network.Minio
import Prelude import Prelude
main :: IO () main :: IO ()
main = do main = do
let bucket = "mybucket" let
object = "myobject" bucket = "mybucket"
object = "myobject"
res <- res <- runMinio minioPlayCI $
runMinio minioPlayCI $ removeObject bucket object
removeObject bucket object
case res of case res of
Left _ -> putStrLn $ "Failed to remove " ++ show bucket ++ "/" ++ show object Left _ -> putStrLn $ "Failed to remove " ++ show bucket ++ "/" ++ show object

View File

@ -16,32 +16,34 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-}
import qualified Conduit as C {-# LANGUAGE OverloadedStrings #-}
import Control.Monad (unless) import Network.Minio
import Network.Minio
import Prelude import qualified Conduit as C
import Control.Monad (when)
import Prelude
main :: IO () main :: IO ()
main = do main = do
let bucket = "selectbucket" let bucket = "selectbucket"
object = "1.csv" object = "1.csv"
content = content = "Name,Place,Temperature\n"
"Name,Place,Temperature\n" <> "James,San Jose,76\n"
<> "James,San Jose,76\n" <> "Alicia,San Leandro,88\n"
<> "Alicia,San Leandro,88\n" <> "Mark,San Carlos,90\n"
<> "Mark,San Carlos,90\n"
res <- runMinio minioPlayCI $ do res <- runMinio minioPlayCI $ do
exists <- bucketExists bucket
unless exists $
makeBucket bucket Nothing
C.liftIO $ putStrLn "Uploading csv object" exists <- bucketExists bucket
putObject bucket object (C.sourceLazy content) Nothing defaultPutObjectOptions when (not exists) $
makeBucket bucket Nothing
let sr = selectRequest "Select * from s3object" defaultCsvInput defaultCsvOutput C.liftIO $ putStrLn "Uploading csv object"
res <- selectObjectContent bucket object sr putObject bucket object (C.sourceLazy content) Nothing defaultPutObjectOptions
C.runConduit $ res C..| getPayloadBytes C..| C.stdoutC
print res let sr = selectRequest "Select * from s3object" defaultCsvInput defaultCsvOutput
res <- selectObjectContent bucket object sr
C.runConduit $ res C..| getPayloadBytes C..| C.stdoutC
print res

View File

@ -17,14 +17,14 @@
-- limitations under the License. -- limitations under the License.
-- --
import Network.Minio {-# LANGUAGE OverloadedStrings #-}
import Network.Minio.AdminAPI import Network.Minio
import Prelude import Network.Minio.AdminAPI
import Prelude
main :: IO () main :: IO ()
main = do main = do
res <- res <- runMinio minioPlayCI $
runMinio getServerInfo
minioPlayCI
getServerInfo
print res print res

View File

@ -17,13 +17,14 @@
-- limitations under the License. -- limitations under the License.
-- --
import Network.Minio {-# LANGUAGE OverloadedStrings #-}
import Network.Minio.AdminAPI import Network.Minio
import Prelude import Network.Minio.AdminAPI
import Prelude
main :: IO () main :: IO ()
main = do main = do
res <- res <- runMinio minioPlayCI $
runMinio minioPlayCI $ serviceSendAction ServiceActionRestart
serviceSendAction ServiceActionRestart
print res print res

View File

@ -17,13 +17,14 @@
-- limitations under the License. -- limitations under the License.
-- --
import Network.Minio {-# LANGUAGE OverloadedStrings #-}
import Network.Minio.AdminAPI import Network.Minio
import Prelude import Network.Minio.AdminAPI
import Prelude
main :: IO () main :: IO ()
main = do main = do
res <- res <- runMinio minioPlayCI $
runMinio minioPlayCI $ serviceSendAction ServiceActionStop
serviceSendAction ServiceActionStop
print res print res

View File

@ -17,14 +17,14 @@
-- limitations under the License. -- limitations under the License.
-- --
import Network.Minio {-# LANGUAGE OverloadedStrings #-}
import Network.Minio.AdminAPI import Network.Minio
import Prelude import Network.Minio.AdminAPI
import Prelude
main :: IO () main :: IO ()
main = do main = do
res <- res <- runMinio minioPlayCI $
runMinio serviceStatus
minioPlayCI
serviceStatus
print res print res

View File

@ -16,11 +16,12 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio {-# LANGUAGE OverloadedStrings #-}
import Network.Minio.AdminAPI import Network.Minio
import Prelude import Network.Minio.AdminAPI
import Prelude
main :: IO () main :: IO ()
main = do main = do

View File

@ -1,6 +1,6 @@
cabal-version: 2.4 cabal-version: 2.2
name: minio-hs name: minio-hs
version: 1.7.0 version: 1.5.2
synopsis: A MinIO Haskell Library for Amazon S3 compatible cloud synopsis: A MinIO Haskell Library for Amazon S3 compatible cloud
storage. storage.
description: The MinIO Haskell client library provides simple APIs to description: The MinIO Haskell client library provides simple APIs to
@ -14,70 +14,29 @@ maintainer: dev@min.io
category: Network, AWS, Object Storage category: Network, AWS, Object Storage
build-type: Simple build-type: Simple
stability: Experimental stability: Experimental
extra-doc-files: extra-source-files:
CHANGELOG.md CHANGELOG.md
CONTRIBUTING.md CONTRIBUTING.md
docs/API.md docs/API.md
README.md
extra-source-files:
examples/*.hs examples/*.hs
README.md
stack.yaml stack.yaml
tested-with: GHC == 8.10.7
, GHC == 9.0.2
, GHC == 9.2.8
, GHC == 9.4.8
, GHC == 9.6.5
, GHC == 9.8.2
source-repository head
type: git
location: https://github.com/minio/minio-hs.git
Flag dev
Description: Build package in development mode
Default: False
Manual: True
common base-settings common base-settings
ghc-options: -Wall ghc-options: -Wall
-Wcompat
-Widentities
-Wincomplete-uni-patterns
-Wincomplete-record-updates
-haddock
if impl(ghc >= 8.0)
ghc-options: -Wredundant-constraints
if impl(ghc >= 8.2)
ghc-options: -fhide-source-paths
if impl(ghc >= 8.4)
ghc-options: -Wpartial-fields
-- -Wmissing-export-lists
if impl(ghc >= 8.8)
ghc-options: -Wmissing-deriving-strategies
-Werror=missing-deriving-strategies
-- if impl(ghc >= 8.10)
-- ghc-options: -Wunused-packages -- disabled due to bug related to mixin config
if impl(ghc >= 9.0)
ghc-options: -Winvalid-haddock
if impl(ghc >= 9.2)
ghc-options: -Wredundant-bang-patterns
if flag(dev)
ghc-options: -Werror
default-language: Haskell2010 default-language: Haskell2010
default-extensions: BangPatterns default-extensions: BangPatterns
, DerivingStrategies
, FlexibleContexts , FlexibleContexts
, FlexibleInstances , FlexibleInstances
, LambdaCase
, MultiParamTypeClasses , MultiParamTypeClasses
, MultiWayIf , MultiWayIf
, NoImplicitPrelude
, OverloadedStrings , OverloadedStrings
, RankNTypes , RankNTypes
, ScopedTypeVariables , ScopedTypeVariables
, TypeFamilies
, TupleSections , TupleSections
other-modules: Lib.Prelude other-modules: Lib.Prelude
, Network.Minio.API , Network.Minio.API
, Network.Minio.APICommon , Network.Minio.APICommon
@ -95,30 +54,22 @@ common base-settings
, Network.Minio.Utils , Network.Minio.Utils
, Network.Minio.XmlGenerator , Network.Minio.XmlGenerator
, Network.Minio.XmlParser , Network.Minio.XmlParser
, Network.Minio.XmlCommon
, Network.Minio.JsonParser , Network.Minio.JsonParser
, Network.Minio.Credentials.Types
, Network.Minio.Credentials.AssumeRole
, Network.Minio.Credentials
mixins: base hiding (Prelude)
, relude (Relude as Prelude)
, relude
build-depends: base >= 4.7 && < 5 build-depends: base >= 4.7 && < 5
, relude >= 0.7 && < 2 , protolude >= 0.2 && < 0.3
, aeson >= 1.2 && < 3 , aeson >= 1.2
, base64-bytestring >= 1.0 , base64-bytestring >= 1.0
, binary >= 0.8.5.0 , binary >= 0.8.5.0
, bytestring >= 0.10 , bytestring >= 0.10
, case-insensitive >= 1.2 , case-insensitive >= 1.2
, conduit >= 1.3 , conduit >= 1.3
, conduit-extra >= 1.3 , conduit-extra >= 1.3
, crypton-connection , connection
, cryptonite >= 0.25 , cryptonite >= 0.25
, cryptonite-conduit >= 0.2 , cryptonite-conduit >= 0.2
, digest >= 0.0.1 , digest >= 0.0.1
, directory , directory
, exceptions
, filepath >= 1.4 , filepath >= 1.4
, http-client >= 0.5 , http-client >= 0.5
, http-client-tls , http-client-tls
@ -126,15 +77,14 @@ common base-settings
, http-types >= 0.12 , http-types >= 0.12
, ini , ini
, memory >= 0.14 , memory >= 0.14
, network-uri , raw-strings-qq >= 1
, resourcet >= 1.2 , resourcet >= 1.2
, retry , retry
, text >= 1.2 , text >= 1.2
, time >= 1.9 , time >= 1.8
, time-units ^>= 1.0.0
, transformers >= 0.5 , transformers >= 0.5
, unliftio >= 0.2 && < 0.3 , unliftio >= 0.2
, unliftio-core >= 0.2 && < 0.3 , unliftio-core >= 0.1
, unordered-containers >= 0.2 , unordered-containers >= 0.2
, xml-conduit >= 1.8 , xml-conduit >= 1.8
@ -165,9 +115,7 @@ test-suite minio-hs-live-server-test
, Network.Minio.Utils.Test , Network.Minio.Utils.Test
, Network.Minio.XmlGenerator.Test , Network.Minio.XmlGenerator.Test
, Network.Minio.XmlParser.Test , Network.Minio.XmlParser.Test
, Network.Minio.Credentials
build-depends: minio-hs build-depends: minio-hs
, raw-strings-qq
, tasty , tasty
, tasty-hunit , tasty-hunit
, tasty-quickcheck , tasty-quickcheck
@ -182,7 +130,6 @@ test-suite minio-hs-test
hs-source-dirs: test, src hs-source-dirs: test, src
main-is: Spec.hs main-is: Spec.hs
build-depends: minio-hs build-depends: minio-hs
, raw-strings-qq
, QuickCheck , QuickCheck
, tasty , tasty
, tasty-hunit , tasty-hunit
@ -199,7 +146,6 @@ test-suite minio-hs-test
, Network.Minio.Utils.Test , Network.Minio.Utils.Test
, Network.Minio.XmlGenerator.Test , Network.Minio.XmlGenerator.Test
, Network.Minio.XmlParser.Test , Network.Minio.XmlParser.Test
, Network.Minio.Credentials
Flag examples Flag examples
Description: Build the examples Description: Build the examples
@ -346,7 +292,6 @@ executable SetConfig
scope: private scope: private
main-is: SetConfig.hs main-is: SetConfig.hs
executable AssumeRole source-repository head
import: examples-settings type: git
scope: private location: https://github.com/minio/minio-hs
main-is: AssumeRole.hs

View File

@ -15,41 +15,19 @@
-- --
module Lib.Prelude module Lib.Prelude
( module Exports, ( module Exports
both, , both
showBS, ) where
toStrictBS,
fromStrictBS,
lastMay,
)
where
import Control.Monad.Trans.Maybe as Exports (MaybeT (..), runMaybeT) import Protolude as Exports hiding (catch, catches,
import qualified Data.ByteString.Lazy as LB throwIO, try)
import Data.Time as Exports
( UTCTime (..), import Control.Monad.Trans.Maybe as Exports (MaybeT (..), runMaybeT)
diffUTCTime, import Data.Time as Exports (UTCTime (..),
) diffUTCTime)
import UnliftIO as Exports import UnliftIO as Exports (catch, catches, throwIO,
( Handler, try)
catch,
catches,
throwIO,
try,
)
-- | Apply a function on both elements of a pair -- | Apply a function on both elements of a pair
both :: (a -> b) -> (a, a) -> (b, b) both :: (a -> b) -> (a, a) -> (b, b)
both f (a, b) = (f a, f b) both f (a, b) = (f a, f b)
showBS :: (Show a) => a -> ByteString
showBS a = encodeUtf8 (show a :: Text)
toStrictBS :: LByteString -> ByteString
toStrictBS = LB.toStrict
fromStrictBS :: ByteString -> LByteString
fromStrictBS = LB.fromStrict
lastMay :: [a] -> Maybe a
lastMay a = last <$> nonEmpty a

View File

@ -1,5 +1,5 @@
-- --
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc. -- MinIO Haskell SDK, (C) 2017-2019 MinIO, Inc.
-- --
-- Licensed under the Apache License, Version 2.0 (the "License"); -- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License. -- you may not use this file except in compliance with the License.
@ -16,237 +16,224 @@
-- | -- |
-- Module: Network.Minio -- Module: Network.Minio
-- Copyright: (c) 2017-2023 MinIO Dev Team -- Copyright: (c) 2017-2019 MinIO Dev Team
-- License: Apache 2.0 -- License: Apache 2.0
-- Maintainer: MinIO Dev Team <dev@min.io> -- Maintainer: MinIO Dev Team <dev@min.io>
-- --
-- Types and functions to conveniently access S3 compatible object -- Types and functions to conveniently access S3 compatible object
-- storage servers like MinIO. -- storage servers like MinIO.
module Network.Minio module Network.Minio
( -- * Credentials (
CredentialValue (..), -- * Credentials
credentialValueText, Credentials (..)
AccessKey (..),
SecretKey (..),
SessionToken (..),
-- ** Credential Loaders -- ** Credential providers
-- | Run actions that retrieve 'Credentials' from the environment or
-- files or other custom sources.
, Provider
, fromAWSConfigFile
, fromAWSEnv
, fromMinioEnv
, findFirst
-- | Run actions that retrieve 'CredentialValue's from the environment or -- * Connecting to object storage
-- files or other custom sources. , ConnectInfo
CredentialLoader, , setRegion
fromAWSConfigFile, , setCreds
fromAWSEnv, , setCredsFrom
fromMinioEnv, , isConnectInfoSecure
findFirst, , disableTLSCertValidation
, MinioConn
, mkMinioConn
-- * Connecting to object storage -- ** Connection helpers
ConnectInfo, -- | These are helpers to construct 'ConnectInfo' values for common
setRegion, -- cases.
setCreds, , minioPlayCI
setCredsFrom, , awsCI
isConnectInfoSecure, , gcsCI
disableTLSCertValidation,
MinioConn,
mkMinioConn,
-- ** Connection helpers -- * Minio Monad
----------------
-- | The Minio Monad provides connection-reuse, bucket-location
-- caching, resource management and simpler error handling
-- functionality. All actions on object storage are performed within
-- this Monad.
, Minio
, runMinioWith
, runMinio
, runMinioResWith
, runMinioRes
-- | These are helpers to construct 'ConnectInfo' values for common -- * Bucket Operations
-- cases.
minioPlayCI,
awsCI,
gcsCI,
-- ** STS Credential types -- ** Creation, removal and querying
STSAssumeRole (..), , Bucket
STSAssumeRoleOptions (..), , makeBucket
defaultSTSAssumeRoleOptions, , removeBucket
requestSTSCredential, , bucketExists
setSTSCredential, , Region
ExpiryTime (..), , getLocation
STSCredentialProvider,
-- * Minio Monad -- ** Listing buckets
, BucketInfo(..)
, listBuckets
---------------- -- ** Listing objects
, listObjects
, listObjectsV1
, ListItem(..)
-- | The Minio Monad provides connection-reuse, bucket-location , ObjectInfo
-- caching, resource management and simpler error handling , oiObject
-- functionality. All actions on object storage are performed within , oiModTime
-- this Monad. , oiETag
Minio, , oiSize
runMinioWith, , oiUserMetadata
runMinio, , oiMetadata
runMinioResWith,
runMinioRes,
-- * Bucket Operations -- ** Listing incomplete uploads
, listIncompleteUploads
, UploadId
, UploadInfo(..)
, listIncompleteParts
, ObjectPartInfo(..)
-- ** Creation, removal and querying -- ** Bucket Notifications
Bucket, , getBucketNotification
makeBucket, , putBucketNotification
removeBucket, , removeAllBucketNotification
bucketExists, , Notification(..)
Region, , defaultNotification
getLocation, , NotificationConfig(..)
, Arn
, Event(..)
, Filter(..)
, defaultFilter
, FilterKey(..)
, defaultFilterKey
, FilterRules(..)
, defaultFilterRules
, FilterRule(..)
-- ** Listing buckets -- * Object Operations
BucketInfo (..), , Object
listBuckets,
-- ** Listing objects -- ** File-based operations
listObjects, , fGetObject
listObjectsV1, , fPutObject
ListItem (..),
ObjectInfo,
oiObject,
oiModTime,
oiETag,
oiSize,
oiUserMetadata,
oiMetadata,
-- ** Listing incomplete uploads -- ** Conduit-based streaming operations
listIncompleteUploads, , putObject
UploadId, , PutObjectOptions
UploadInfo (..), , defaultPutObjectOptions
listIncompleteParts, , pooContentType
ObjectPartInfo (..), , pooContentEncoding
, pooContentDisposition
, pooContentLanguage
, pooCacheControl
, pooStorageClass
, pooUserMetadata
, pooNumThreads
, pooSSE
-- ** Bucket Notifications , getObject
getBucketNotification, , GetObjectOptions
putBucketNotification, , defaultGetObjectOptions
removeAllBucketNotification, , gooRange
Notification (..), , gooIfMatch
defaultNotification, , gooIfNoneMatch
NotificationConfig (..), , gooIfModifiedSince
Arn, , gooIfUnmodifiedSince
Event (..), , gooSSECKey
Filter (..), , GetObjectResponse
defaultFilter, , gorObjectInfo
FilterKey (..), , gorObjectStream
defaultFilterKey,
FilterRules (..),
defaultFilterRules,
FilterRule (..),
-- * Object Operations -- ** Server-side object copying
Object, , copyObject
, SourceInfo
, defaultSourceInfo
, srcBucket
, srcObject
, srcRange
, srcIfMatch
, srcIfNoneMatch
, srcIfModifiedSince
, srcIfUnmodifiedSince
, DestinationInfo
, defaultDestinationInfo
, dstBucket
, dstObject
-- ** File-based operations -- ** Querying object info
fGetObject, , statObject
fPutObject,
-- ** Conduit-based streaming operations -- ** Object removal operations
putObject, , removeObject
PutObjectOptions, , removeIncompleteUpload
defaultPutObjectOptions,
pooContentType,
pooContentEncoding,
pooContentDisposition,
pooContentLanguage,
pooCacheControl,
pooStorageClass,
pooUserMetadata,
pooNumThreads,
pooSSE,
getObject,
GetObjectOptions,
defaultGetObjectOptions,
gooRange,
gooIfMatch,
gooIfNoneMatch,
gooIfModifiedSince,
gooIfUnmodifiedSince,
gooSSECKey,
GetObjectResponse,
gorObjectInfo,
gorObjectStream,
-- ** Server-side object copying -- ** Select Object Content with SQL
copyObject, , module Network.Minio.SelectAPI
SourceInfo,
defaultSourceInfo,
srcBucket,
srcObject,
srcRange,
srcIfMatch,
srcIfNoneMatch,
srcIfModifiedSince,
srcIfUnmodifiedSince,
DestinationInfo,
defaultDestinationInfo,
dstBucket,
dstObject,
-- ** Querying object info -- * Server-Side Encryption Helpers
statObject, , mkSSECKey
, SSECKey
, SSE(..)
-- ** Object removal operations -- * Presigned Operations
removeObject, , presignedPutObjectUrl
removeIncompleteUpload, , presignedGetObjectUrl
, presignedHeadObjectUrl
, UrlExpiry
-- ** Select Object Content with SQL -- ** POST (browser) upload helpers
module Network.Minio.SelectAPI, -- | Please see
-- https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
-- for detailed information.
, newPostPolicy
, presignedPostPolicy
, showPostPolicy
, PostPolicy
, PostPolicyError(..)
-- * Server-Side Encryption Helpers -- *** Post Policy condition helpers
mkSSECKey, , PostPolicyCondition
SSECKey, , ppCondBucket
SSE (..), , ppCondContentLengthRange
, ppCondContentType
, ppCondKey
, ppCondKeyStartsWith
, ppCondSuccessActionStatus
-- * Presigned Operations -- * Error handling
presignedPutObjectUrl, -- | Data types representing various errors that may occur while
presignedGetObjectUrl, -- working with an object storage service.
presignedHeadObjectUrl, , MinioErr(..)
UrlExpiry, , MErrV(..)
, ServiceErr(..)
-- ** POST (browser) upload helpers ) where
-- | Please see
-- https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
-- for detailed information.
newPostPolicy,
presignedPostPolicy,
showPostPolicy,
PostPolicy,
PostPolicyError (..),
-- *** Post Policy condition helpers
PostPolicyCondition,
ppCondBucket,
ppCondContentLengthRange,
ppCondContentType,
ppCondKey,
ppCondKeyStartsWith,
ppCondSuccessActionStatus,
-- * Error handling
-- | Data types representing various errors that may occur while
-- working with an object storage service.
MinioErr (..),
MErrV (..),
ServiceErr (..),
)
where
{- {-
This module exports the high-level MinIO API for object storage. This module exports the high-level MinIO API for object storage.
-} -}
import qualified Data.Conduit as C import qualified Data.Conduit as C
import qualified Data.Conduit.Binary as CB import qualified Data.Conduit.Binary as CB
import qualified Data.Conduit.Combinators as CC import qualified Data.Conduit.Combinators as CC
import Network.Minio.API
import Network.Minio.CopyObject import Lib.Prelude
import Network.Minio.Credentials
import Network.Minio.Data import Network.Minio.CopyObject
import Network.Minio.Errors import Network.Minio.Data
import Network.Minio.ListOps import Network.Minio.Errors
import Network.Minio.PutObject import Network.Minio.ListOps
import Network.Minio.S3API import Network.Minio.PutObject
import Network.Minio.SelectAPI import Network.Minio.S3API
import Network.Minio.SelectAPI
import Network.Minio.Utils
-- | Lists buckets. -- | Lists buckets.
listBuckets :: Minio [BucketInfo] listBuckets :: Minio [BucketInfo]
@ -261,12 +248,8 @@ fGetObject bucket object fp opts = do
C.connect (gorObjectStream src) $ CB.sinkFileCautious fp C.connect (gorObjectStream src) $ CB.sinkFileCautious fp
-- | Upload the given file to the given object. -- | Upload the given file to the given object.
fPutObject :: fPutObject :: Bucket -> Object -> FilePath
Bucket -> -> PutObjectOptions -> Minio ()
Object ->
FilePath ->
PutObjectOptions ->
Minio ()
fPutObject bucket object f opts = fPutObject bucket object f opts =
void $ putObjectInternal bucket object opts $ ODFile f Nothing void $ putObjectInternal bucket object opts $ ODFile f Nothing
@ -274,13 +257,8 @@ fPutObject bucket object f opts =
-- known; this helps the library select optimal part sizes to perform -- known; this helps the library select optimal part sizes to perform
-- a multipart upload. If not specified, it is assumed that the object -- a multipart upload. If not specified, it is assumed that the object
-- can be potentially 5TiB and selects multipart sizes appropriately. -- can be potentially 5TiB and selects multipart sizes appropriately.
putObject :: putObject :: Bucket -> Object -> C.ConduitM () ByteString Minio ()
Bucket -> -> Maybe Int64 -> PutObjectOptions -> Minio ()
Object ->
C.ConduitM () ByteString Minio () ->
Maybe Int64 ->
PutObjectOptions ->
Minio ()
putObject bucket object src sizeMay opts = putObject bucket object src sizeMay opts =
void $ putObjectInternal bucket object opts $ ODStream src sizeMay void $ putObjectInternal bucket object opts $ ODStream src sizeMay
@ -290,25 +268,18 @@ putObject bucket object src sizeMay opts =
-- copy operation if the new object is to be greater than 5GiB in -- copy operation if the new object is to be greater than 5GiB in
-- size. -- size.
copyObject :: DestinationInfo -> SourceInfo -> Minio () copyObject :: DestinationInfo -> SourceInfo -> Minio ()
copyObject dstInfo srcInfo = copyObject dstInfo srcInfo = void $ copyObjectInternal (dstBucket dstInfo)
void $ (dstObject dstInfo) srcInfo
copyObjectInternal
(dstBucket dstInfo)
(dstObject dstInfo)
srcInfo
-- | Remove an object from the object store. -- | Remove an object from the object store.
removeObject :: Bucket -> Object -> Minio () removeObject :: Bucket -> Object -> Minio ()
removeObject = deleteObject removeObject = deleteObject
-- | Get an object from the object store. -- | Get an object from the object store.
getObject :: getObject :: Bucket -> Object -> GetObjectOptions
Bucket -> -> Minio GetObjectResponse
Object ->
GetObjectOptions ->
Minio GetObjectResponse
getObject bucket object opts = getObject bucket object opts =
getObject' bucket object [] $ gooToHeaders opts getObject' bucket object [] $ gooToHeaders opts
-- | Get an object's metadata from the object store. It accepts the -- | Get an object's metadata from the object store. It accepts the
-- same options as GetObject. -- same options as GetObject.
@ -338,8 +309,6 @@ bucketExists = headBucket
-- | Removes an ongoing multipart upload of an object. -- | Removes an ongoing multipart upload of an object.
removeIncompleteUpload :: Bucket -> Object -> Minio () removeIncompleteUpload :: Bucket -> Object -> Minio ()
removeIncompleteUpload bucket object = do removeIncompleteUpload bucket object = do
uploads <- uploads <- C.runConduit $ listIncompleteUploads bucket (Just object) False
C.runConduit $ C..| CC.sinkList
listIncompleteUploads bucket (Just object) False
C..| CC.sinkList
mapM_ (abortMultipartUpload bucket object) (uiUploadId <$> uploads) mapM_ (abortMultipartUpload bucket object) (uiUploadId <$> uploads)

View File

@ -1,5 +1,5 @@
-- --
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc. -- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
-- --
-- Licensed under the Apache License, Version 2.0 (the "License"); -- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License. -- you may not use this file except in compliance with the License.
@ -15,260 +15,169 @@
-- --
module Network.Minio.API module Network.Minio.API
( connect, ( connect
S3ReqInfo (..), , S3ReqInfo(..)
runMinio, , runMinio
executeRequest, , executeRequest
buildRequest, , mkStreamRequest
mkStreamRequest, , getLocation
getLocation,
isValidBucketName,
checkBucketNameValidity,
isValidObjectName,
checkObjectNameValidity,
requestSTSCredential,
)
where
import Control.Retry , isValidBucketName
( fullJitterBackoff, , checkBucketNameValidity
limitRetriesByCumulativeDelay, , isValidObjectName
retrying, , checkObjectNameValidity
) ) where
import qualified Data.ByteString as B
import qualified Data.Char as C import Control.Retry (fullJitterBackoff,
import qualified Data.Conduit as C limitRetriesByCumulativeDelay,
import qualified Data.HashMap.Strict as H retrying)
import qualified Data.Text as T import qualified Data.ByteString as B
import qualified Data.Time.Clock as Time import qualified Data.Char as C
import Lib.Prelude import qualified Data.Conduit as C
import Network.HTTP.Client (defaultManagerSettings) import qualified Data.HashMap.Strict as H
import qualified Network.HTTP.Client as NClient import qualified Data.Text as T
import Network.HTTP.Conduit (Response) import qualified Data.Time.Clock as Time
import qualified Network.HTTP.Conduit as NC import Network.HTTP.Conduit (Response)
import Network.HTTP.Types (simpleQueryToQuery) import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT import qualified Network.HTTP.Types as HT
import Network.HTTP.Types.Header (hHost) import Network.HTTP.Types.Header (hHost)
import Network.Minio.APICommon
import Network.Minio.Credentials import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Errors import Network.Minio.APICommon
import Network.Minio.Sign.V4 import Network.Minio.Data
import Network.Minio.Utils import Network.Minio.Errors
import Network.Minio.XmlParser import Network.Minio.Sign.V4
import Network.Minio.Utils
import Network.Minio.XmlParser
-- | Fetch bucket location (region) -- | Fetch bucket location (region)
getLocation :: Bucket -> Minio Region getLocation :: Bucket -> Minio Region
getLocation bucket = do getLocation bucket = do
resp <- resp <- executeRequest $ defaultS3ReqInfo {
executeRequest $ riBucket = Just bucket
defaultS3ReqInfo , riQueryParams = [("location", Nothing)]
{ riBucket = Just bucket, , riNeedsLocation = False
riQueryParams = [("location", Nothing)], }
riNeedsLocation = False
}
parseLocation $ NC.responseBody resp parseLocation $ NC.responseBody resp
-- | Looks for region in RegionMap and updates it using getLocation if -- | Looks for region in RegionMap and updates it using getLocation if
-- absent. -- absent.
discoverRegion :: S3ReqInfo -> Minio (Maybe Region) discoverRegion :: S3ReqInfo -> Minio (Maybe Region)
discoverRegion ri = runMaybeT $ do discoverRegion ri = runMaybeT $ do
bucket <- MaybeT $ return $ riBucket ri bucket <- MaybeT $ return $ riBucket ri
regionMay <- lift $ lookupRegionCache bucket regionMay <- lift $ lookupRegionCache bucket
maybe maybe (do
( do l <- lift $ getLocation bucket
l <- lift $ getLocation bucket lift $ addToRegionCache bucket l
lift $ addToRegionCache bucket l return l
return l ) return regionMay
)
return
regionMay
-- | Returns the region to be used for the request.
getRegion :: S3ReqInfo -> Minio (Maybe Region) getRegion :: S3ReqInfo -> Minio (Maybe Region)
getRegion ri = do getRegion ri = do
ci <- asks mcConnInfo ci <- asks mcConnInfo
-- getService/makeBucket/getLocation -- don't need location -- getService/makeBucket/getLocation -- don't need location
if if | not $ riNeedsLocation ri ->
| not $ riNeedsLocation ri -> return $ Just $ connectRegion ci
return $ Just $ connectRegion ci
-- if autodiscovery of location is disabled by user -- if autodiscovery of location is disabled by user
| not $ connectAutoDiscoverRegion ci -> | not $ connectAutoDiscoverRegion ci ->
return $ Just $ connectRegion ci return $ Just $ connectRegion ci
-- discover the region for the request
| otherwise -> discoverRegion ri -- discover the region for the request
| otherwise -> discoverRegion ri
getRegionHost :: Region -> Minio Text getRegionHost :: Region -> Minio Text
getRegionHost r = do getRegionHost r = do
ci <- asks mcConnInfo ci <- asks mcConnInfo
if "amazonaws.com" `T.isSuffixOf` connectHost ci if "amazonaws.com" `T.isSuffixOf` connectHost ci
then then maybe (throwIO $ MErrVRegionNotSupported r)
maybe return (H.lookup r awsRegionMap)
(throwIO $ MErrVRegionNotSupported r)
return
(H.lookup r awsRegionMap)
else return $ connectHost ci else return $ connectHost ci
-- | Computes the appropriate host, path and region for the request.
--
-- For AWS, always use virtual bucket style, unless bucket has periods. For
-- MinIO and other non-AWS, default to path style.
getHostPathRegion :: S3ReqInfo -> Minio (Text, ByteString, Maybe Region)
getHostPathRegion ri = do
ci <- asks mcConnInfo
regionMay <- getRegion ri
case riBucket ri of
Nothing ->
-- Implies a ListBuckets request.
return (connectHost ci, "/", regionMay)
Just bucket -> do
regionHost <- case regionMay of
Nothing -> return $ connectHost ci
Just "" -> return $ connectHost ci
Just r -> getRegionHost r
let pathStyle =
( regionHost,
getS3Path (riBucket ri) (riObject ri),
regionMay
)
virtualStyle =
( bucket <> "." <> regionHost,
encodeUtf8 $ "/" <> fromMaybe "" (riObject ri),
regionMay
)
( if isAWSConnectInfo ci
then
return $
if bucketHasPeriods bucket
then pathStyle
else virtualStyle
else return pathStyle
)
-- | requestSTSCredential requests temporary credentials using the Security Token
-- Service API. The returned credential will include a populated 'SessionToken'
-- and an 'ExpiryTime'.
requestSTSCredential :: (STSCredentialProvider p) => p -> IO (CredentialValue, ExpiryTime)
requestSTSCredential p = do
endpoint <- maybe (throwIO $ MErrValidation MErrVSTSEndpointNotFound) return $ getSTSEndpoint p
let endPt = NC.parseRequest_ $ toString endpoint
settings
| NC.secure endPt = NC.tlsManagerSettings
| otherwise = defaultManagerSettings
mgr <- NC.newManager settings
liftIO $ retrieveSTSCredentials p ("", 0, False) mgr
buildRequest :: S3ReqInfo -> Minio NC.Request buildRequest :: S3ReqInfo -> Minio NC.Request
buildRequest ri = do buildRequest ri = do
maybe (return ()) checkBucketNameValidity $ riBucket ri maybe (return ()) checkBucketNameValidity $ riBucket ri
maybe (return ()) checkObjectNameValidity $ riObject ri maybe (return ()) checkObjectNameValidity $ riObject ri
ci <- asks mcConnInfo ci <- asks mcConnInfo
(host, path, regionMay) <- getHostPathRegion ri regionMay <- getRegion ri
let ci' = ci {connectHost = host} regionHost <- maybe (return $ connectHost ci) getRegionHost regionMay
hostHeader = (hHost, getHostAddr ci')
ri' =
ri
{ riHeaders = hostHeader : riHeaders ri,
riRegion = regionMay
}
-- Does not contain body and auth info.
baseRequest =
NC.defaultRequest
{ NC.method = riMethod ri',
NC.secure = connectIsSecure ci',
NC.host = encodeUtf8 $ connectHost ci',
NC.port = connectPort ci',
NC.path = path,
NC.requestHeaders = riHeaders ri',
NC.queryString = HT.renderQuery False $ riQueryParams ri'
}
timeStamp <- liftIO Time.getCurrentTime let ri' = ri { riHeaders = hostHeader : riHeaders ri
, riRegion = regionMay
}
ci' = ci { connectHost = regionHost }
hostHeader = (hHost, getHostAddr ci')
mgr <- asks mcConnManager -- Does not contain body and auth info.
cv <- liftIO $ getCredential (connectCreds ci') (getEndpoint ci') mgr baseRequest = NC.defaultRequest
{ NC.method = riMethod ri'
, NC.secure = connectIsSecure ci'
, NC.host = encodeUtf8 $ connectHost ci'
, NC.port = connectPort ci'
, NC.path = getS3Path (riBucket ri') (riObject ri')
, NC.requestHeaders = riHeaders ri'
, NC.queryString = HT.renderQuery False $ riQueryParams ri'
}
let sp = timeStamp <- liftIO Time.getCurrentTime
SignParams
(coerce $ cvAccessKey cv)
(coerce $ cvSecretKey cv)
(coerce $ cvSessionToken cv)
ServiceS3
timeStamp
(riRegion ri')
(riPresignExpirySecs ri')
Nothing
-- Cases to handle: let sp = SignParams (connectAccessKey ci') (connectSecretKey ci')
-- timeStamp (riRegion ri') Nothing Nothing
-- 0. Handle presign URL case.
--
-- 1. Connection is secure: use unsigned payload
--
-- 2. Insecure connection, streaming signature is enabled via use of
-- conduit payload: use streaming signature for request.
--
-- 3. Insecure connection, non-conduit payload: compute payload
-- sha256hash, buffer request in memory and perform request.
if -- Cases to handle:
| isJust (riPresignExpirySecs ri') -> --
-- case 0 from above. -- 1. Connection is secure: use unsigned payload
do --
let signPairs = signV4QueryParams sp baseRequest -- 2. Insecure connection, streaming signature is enabled via use of
qpToAdd = simpleQueryToQuery signPairs -- conduit payload: use streaming signature for request.
existingQueryParams = HT.parseQuery (NC.queryString baseRequest) --
updatedQueryParams = existingQueryParams ++ qpToAdd -- 3. Insecure connection, non-conduit payload: compute payload
return $ NClient.setQueryString updatedQueryParams baseRequest -- sha256hash, buffer request in memory and perform request.
| isStreamingPayload (riPayload ri') && not (connectIsSecure ci') ->
-- case 2 from above. -- case 2 from above.
do if | isStreamingPayload (riPayload ri') &&
(pLen, pSrc) <- case riPayload ri of (not $ connectIsSecure ci') -> do
PayloadC l src -> return (l, src) (pLen, pSrc) <- case riPayload ri of
_ -> throwIO MErrVUnexpectedPayload PayloadC l src -> return (l, src)
let reqFn = signV4Stream pLen sp baseRequest _ -> throwIO MErrVUnexpectedPayload
return $ reqFn pSrc let reqFn = signV4Stream pLen sp baseRequest
| otherwise -> return $ reqFn pSrc
do
sp' <- | otherwise -> do
( if connectIsSecure ci' -- case 1 described above.
then -- case 1 described above. sp' <- if | connectIsSecure ci' -> return sp
return sp -- case 3 described above.
else | otherwise -> do
( -- case 3 described above. pHash <- getPayloadSHA256Hash $ riPayload ri'
do return $ sp { spPayloadHash = Just pHash }
pHash <- getPayloadSHA256Hash $ riPayload ri'
return $ sp {spPayloadHash = Just pHash} let signHeaders = signV4 sp' baseRequest
) return $ baseRequest
) { NC.requestHeaders =
NC.requestHeaders baseRequest ++
mkHeaderFromPairs signHeaders
, NC.requestBody = getRequestBody (riPayload ri')
}
let signHeaders = signV4 sp' baseRequest
return $
baseRequest
{ NC.requestHeaders =
NC.requestHeaders baseRequest ++ signHeaders,
NC.requestBody = getRequestBody (riPayload ri')
}
retryAPIRequest :: Minio a -> Minio a retryAPIRequest :: Minio a -> Minio a
retryAPIRequest apiCall = do retryAPIRequest apiCall = do
resE <- resE <- retrying retryPolicy (const shouldRetry) $
retrying retryPolicy (const shouldRetry) $ const $ try apiCall
const $
try apiCall
either throwIO return resE either throwIO return resE
where where
-- Retry using the full-jitter backoff method for up to 10 mins -- Retry using the full-jitter backoff method for up to 10 mins
-- total -- total
retryPolicy = retryPolicy = limitRetriesByCumulativeDelay tenMins
limitRetriesByCumulativeDelay tenMins $ $ fullJitterBackoff oneMilliSecond
fullJitterBackoff oneMilliSecond
oneMilliSecond = 1000 -- in microseconds oneMilliSecond = 1000 -- in microseconds
tenMins = 10 * 60 * 1000000 -- in microseconds tenMins = 10 * 60 * 1000000 -- in microseconds
-- retry on connection related failure -- retry on connection related failure
@ -280,23 +189,23 @@ retryAPIRequest apiCall = do
-- API request failed with a retryable exception -- API request failed with a retryable exception
Left httpExn@(NC.HttpExceptionRequest _ exn) -> Left httpExn@(NC.HttpExceptionRequest _ exn) ->
case (exn :: NC.HttpExceptionContent) of case (exn :: NC.HttpExceptionContent) of
NC.ResponseTimeout -> return True NC.ResponseTimeout -> return True
NC.ConnectionTimeout -> return True NC.ConnectionTimeout -> return True
NC.ConnectionFailure _ -> return True NC.ConnectionFailure _ -> return True
-- We received an unexpected exception -- We received an unexpected exception
_ -> throwIO httpExn _ -> throwIO httpExn
-- We received an unexpected exception -- We received an unexpected exception
Left someOtherExn -> throwIO someOtherExn Left someOtherExn -> throwIO someOtherExn
executeRequest :: S3ReqInfo -> Minio (Response LByteString) executeRequest :: S3ReqInfo -> Minio (Response LByteString)
executeRequest ri = do executeRequest ri = do
req <- buildRequest ri req <- buildRequest ri
mgr <- asks mcConnManager mgr <- asks mcConnManager
retryAPIRequest $ httpLbs req mgr retryAPIRequest $ httpLbs req mgr
mkStreamRequest :: mkStreamRequest :: S3ReqInfo
S3ReqInfo -> -> Minio (Response (C.ConduitM () ByteString Minio ()))
Minio (Response (C.ConduitM () ByteString Minio ()))
mkStreamRequest ri = do mkStreamRequest ri = do
req <- buildRequest ri req <- buildRequest ri
mgr <- asks mcConnManager mgr <- asks mcConnManager
@ -305,50 +214,41 @@ mkStreamRequest ri = do
-- Bucket name validity check according to AWS rules. -- Bucket name validity check according to AWS rules.
isValidBucketName :: Bucket -> Bool isValidBucketName :: Bucket -> Bool
isValidBucketName bucket = isValidBucketName bucket =
not not (or [ len < 3 || len > 63
( or , or (map labelCheck labels)
[ len < 3 || len > 63, , or (map labelCharsCheck labels)
any labelCheck labels, , isIPCheck
any labelCharsCheck labels, ])
isIPCheck
]
)
where where
len = T.length bucket len = T.length bucket
labels = T.splitOn "." bucket labels = T.splitOn "." bucket
-- does label `l` fail basic checks of length and start/end? -- does label `l` fail basic checks of length and start/end?
labelCheck l = T.length l == 0 || T.head l == '-' || T.last l == '-' labelCheck l = T.length l == 0 || T.head l == '-' || T.last l == '-'
-- does label `l` have non-allowed characters? -- does label `l` have non-allowed characters?
labelCharsCheck l = labelCharsCheck l = isJust $ T.find (\x -> not (C.isAsciiLower x ||
isJust $ x == '-' ||
T.find C.isDigit x)) l
( \x ->
not
( C.isAsciiLower x
|| x == '-'
|| C.isDigit x
)
)
l
-- does label `l` have non-digit characters? -- does label `l` have non-digit characters?
labelNonDigits l = isJust $ T.find (not . C.isDigit) l labelNonDigits l = isJust $ T.find (not . C.isDigit) l
labelAsNums = map (not . labelNonDigits) labels labelAsNums = map (not . labelNonDigits) labels
-- check if bucket name looks like an IP -- check if bucket name looks like an IP
isIPCheck = and labelAsNums && length labelAsNums == 4 isIPCheck = and labelAsNums && length labelAsNums == 4
-- Throws exception iff bucket name is invalid according to AWS rules. -- Throws exception iff bucket name is invalid according to AWS rules.
checkBucketNameValidity :: (MonadIO m) => Bucket -> m () checkBucketNameValidity :: MonadIO m => Bucket -> m ()
checkBucketNameValidity bucket = checkBucketNameValidity bucket =
unless (isValidBucketName bucket) $ when (not $ isValidBucketName bucket) $
throwIO $ throwIO $ MErrVInvalidBucketName bucket
MErrVInvalidBucketName bucket
isValidObjectName :: Object -> Bool isValidObjectName :: Object -> Bool
isValidObjectName object = isValidObjectName object =
T.length object > 0 && B.length (encodeUtf8 object) <= 1024 T.length object > 0 && B.length (encodeUtf8 object) <= 1024
checkObjectNameValidity :: (MonadIO m) => Object -> m () checkObjectNameValidity :: MonadIO m => Object -> m ()
checkObjectNameValidity object = checkObjectNameValidity object =
unless (isValidObjectName object) $ when (not $ isValidObjectName object) $
throwIO $ throwIO $ MErrVInvalidObjectName object
MErrVInvalidObjectName object

View File

@ -16,39 +16,37 @@
module Network.Minio.APICommon where module Network.Minio.APICommon where
import qualified Conduit as C import qualified Conduit as C
import qualified Data.ByteString as BS import qualified Data.ByteString as BS
import qualified Data.ByteString.Lazy as LB import qualified Data.ByteString.Lazy as LB
import Data.Conduit.Binary (sourceHandleRange) import Data.Conduit.Binary (sourceHandleRange)
import qualified Data.Text as T import qualified Network.HTTP.Conduit as NC
import Lib.Prelude import qualified Network.HTTP.Types as HT
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Data.Crypto import Network.Minio.Data
import Network.Minio.Errors import Network.Minio.Data.Crypto
import Network.Minio.Errors
sha256Header :: ByteString -> HT.Header sha256Header :: ByteString -> HT.Header
sha256Header = ("x-amz-content-sha256",) sha256Header = ("x-amz-content-sha256", )
-- | This function throws an error if the payload is a conduit (as it -- | This function throws an error if the payload is a conduit (as it
-- will not be possible to re-read the conduit after it is consumed). -- will not be possible to re-read the conduit after it is consumed).
getPayloadSHA256Hash :: Payload -> Minio ByteString getPayloadSHA256Hash :: Payload -> Minio ByteString
getPayloadSHA256Hash (PayloadBS bs) = return $ hashSHA256 bs getPayloadSHA256Hash (PayloadBS bs) = return $ hashSHA256 bs
getPayloadSHA256Hash (PayloadH h off size) = getPayloadSHA256Hash (PayloadH h off size) = hashSHA256FromSource $
hashSHA256FromSource $ sourceHandleRange h
sourceHandleRange (return . fromIntegral $ off)
h (return . fromIntegral $ size)
(return . fromIntegral $ off)
(return . fromIntegral $ size)
getPayloadSHA256Hash (PayloadC _ _) = throwIO MErrVUnexpectedPayload getPayloadSHA256Hash (PayloadC _ _) = throwIO MErrVUnexpectedPayload
getRequestBody :: Payload -> NC.RequestBody getRequestBody :: Payload -> NC.RequestBody
getRequestBody (PayloadBS bs) = NC.RequestBodyBS bs getRequestBody (PayloadBS bs) = NC.RequestBodyBS bs
getRequestBody (PayloadH h off size) = getRequestBody (PayloadH h off size) =
NC.requestBodySource size $ NC.requestBodySource (fromIntegral size) $
sourceHandleRange sourceHandleRange h
h
(return . fromIntegral $ off) (return . fromIntegral $ off)
(return . fromIntegral $ size) (return . fromIntegral $ size)
getRequestBody (PayloadC n src) = NC.requestBodySource n src getRequestBody (PayloadC n src) = NC.requestBodySource n src
@ -57,24 +55,14 @@ mkStreamingPayload :: Payload -> Payload
mkStreamingPayload payload = mkStreamingPayload payload =
case payload of case payload of
PayloadBS bs -> PayloadBS bs ->
PayloadC PayloadC (fromIntegral $ BS.length bs)
(fromIntegral $ BS.length bs)
(C.sourceLazy $ LB.fromStrict bs) (C.sourceLazy $ LB.fromStrict bs)
PayloadH h off len -> PayloadH h off len ->
PayloadC len $ PayloadC len $ sourceHandleRange h
sourceHandleRange (return . fromIntegral $ off)
h (return . fromIntegral $ len)
(return . fromIntegral $ off)
(return . fromIntegral $ len)
_ -> payload _ -> payload
isStreamingPayload :: Payload -> Bool isStreamingPayload :: Payload -> Bool
isStreamingPayload (PayloadC _ _) = True isStreamingPayload (PayloadC _ _) = True
isStreamingPayload _ = False isStreamingPayload _ = False
-- | Checks if the connect info is for Amazon S3.
isAWSConnectInfo :: ConnectInfo -> Bool
isAWSConnectInfo ci = ".amazonaws.com" `T.isSuffixOf` connectHost ci
bucketHasPeriods :: Bucket -> Bool
bucketHasPeriods b = isJust $ T.find (== '.') b

File diff suppressed because it is too large Load Diff

View File

@ -16,19 +16,19 @@
module Network.Minio.CopyObject where module Network.Minio.CopyObject where
import qualified Data.List as List import qualified Data.List as List
import Lib.Prelude
import Network.Minio.Data import Lib.Prelude
import Network.Minio.Errors
import Network.Minio.S3API import Network.Minio.Data
import Network.Minio.Utils import Network.Minio.Errors
import Network.Minio.S3API
import Network.Minio.Utils
-- | Copy an object using single or multipart copy strategy. -- | Copy an object using single or multipart copy strategy.
copyObjectInternal :: copyObjectInternal :: Bucket -> Object -> SourceInfo
Bucket -> -> Minio ETag
Object ->
SourceInfo ->
Minio ETag
copyObjectInternal b' o srcInfo = do copyObjectInternal b' o srcInfo = do
let sBucket = srcBucket srcInfo let sBucket = srcBucket srcInfo
sObject = srcObject srcInfo sObject = srcObject srcInfo
@ -43,33 +43,27 @@ copyObjectInternal b' o srcInfo = do
startOffset = fst range startOffset = fst range
endOffset = snd range endOffset = snd range
when when (isJust rangeMay &&
( isJust rangeMay or [startOffset < 0, endOffset < startOffset,
&& ( (startOffset < 0) endOffset >= fromIntegral srcSize]) $
|| (endOffset < startOffset) throwIO $ MErrVInvalidSrcObjByteRange range
|| (endOffset >= srcSize)
)
)
$ throwIO
$ MErrVInvalidSrcObjByteRange range
-- 1. If sz > 64MiB (minPartSize) use multipart copy, OR -- 1. If sz > 64MiB (minPartSize) use multipart copy, OR
-- 2. If startOffset /= 0 use multipart copy -- 2. If startOffset /= 0 use multipart copy
let destSize = let destSize = (\(a, b) -> b - a + 1 ) $
(\(a, b) -> b - a + 1) $ maybe (0, srcSize - 1) identity rangeMay
maybe (0, srcSize - 1) identity rangeMay
if destSize > minPartSize || (endOffset - startOffset + 1 /= srcSize) if destSize > minPartSize || (endOffset - startOffset + 1 /= srcSize)
then multiPartCopyObject b' o srcInfo srcSize then multiPartCopyObject b' o srcInfo srcSize
else fst <$> copyObjectSingle b' o srcInfo {srcRange = Nothing} []
else fst <$> copyObjectSingle b' o srcInfo{srcRange = Nothing} []
-- | Given the input byte range of the source object, compute the -- | Given the input byte range of the source object, compute the
-- splits for a multipart copy object procedure. Minimum part size -- splits for a multipart copy object procedure. Minimum part size
-- used is minPartSize. -- used is minPartSize.
selectCopyRanges :: (Int64, Int64) -> [(PartNumber, (Int64, Int64))] selectCopyRanges :: (Int64, Int64) -> [(PartNumber, (Int64, Int64))]
selectCopyRanges (st, end) = selectCopyRanges (st, end) = zip pns $
zip pns $ map (\(x, y) -> (st + x, st + x + y - 1)) $ zip startOffsets partSizes
zipWith (\x y -> (st + x, st + x + y - 1)) startOffsets partSizes
where where
size = end - st + 1 size = end - st + 1
(pns, startOffsets, partSizes) = List.unzip3 $ selectPartSizes size (pns, startOffsets, partSizes) = List.unzip3 $ selectPartSizes size
@ -77,30 +71,22 @@ selectCopyRanges (st, end) =
-- | Perform a multipart copy object action. Since we cannot verify -- | Perform a multipart copy object action. Since we cannot verify
-- existing parts based on the source object, there is no resuming -- existing parts based on the source object, there is no resuming
-- copy action support. -- copy action support.
multiPartCopyObject :: multiPartCopyObject :: Bucket -> Object -> SourceInfo -> Int64
Bucket -> -> Minio ETag
Object ->
SourceInfo ->
Int64 ->
Minio ETag
multiPartCopyObject b o cps srcSize = do multiPartCopyObject b o cps srcSize = do
uid <- newMultipartUpload b o [] uid <- newMultipartUpload b o []
let byteRange = maybe (0, srcSize - 1) identity $ srcRange cps let byteRange = maybe (0, fromIntegral $ srcSize - 1) identity $ srcRange cps
partRanges = selectCopyRanges byteRange partRanges = selectCopyRanges byteRange
partSources = partSources = map (\(x, (start, end)) -> (x, cps {srcRange = Just (start, end) }))
map partRanges
(\(x, (start, end)) -> (x, cps {srcRange = Just (start, end)})) dstInfo = defaultDestinationInfo { dstBucket = b, dstObject = o}
partRanges
dstInfo = defaultDestinationInfo {dstBucket = b, dstObject = o}
copiedParts <- copiedParts <- limitedMapConcurrently 10
limitedMapConcurrently (\(pn, cps') -> do
10 (etag, _) <- copyObjectPart dstInfo cps' uid pn []
( \(pn, cps') -> do return (pn, etag)
(etag, _) <- copyObjectPart dstInfo cps' uid pn [] )
return (pn, etag) partSources
)
partSources
completeMultipartUpload b o uid copiedParts completeMultipartUpload b o uid copiedParts

View File

@ -1,77 +0,0 @@
--
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
module Network.Minio.Credentials
( CredentialValue (..),
credentialValueText,
STSCredentialProvider (..),
AccessKey (..),
SecretKey (..),
SessionToken (..),
ExpiryTime (..),
STSCredentialStore,
initSTSCredential,
getSTSCredential,
Creds (..),
getCredential,
Endpoint,
-- * STS Assume Role
defaultSTSAssumeRoleOptions,
STSAssumeRole (..),
STSAssumeRoleOptions (..),
)
where
import Data.Time (diffUTCTime, getCurrentTime)
import qualified Network.HTTP.Client as NC
import Network.Minio.Credentials.AssumeRole
import Network.Minio.Credentials.Types
import qualified UnliftIO.MVar as M
data STSCredentialStore = STSCredentialStore
{ cachedCredentials :: M.MVar (CredentialValue, ExpiryTime),
refreshAction :: Endpoint -> NC.Manager -> IO (CredentialValue, ExpiryTime)
}
initSTSCredential :: (STSCredentialProvider p) => p -> IO STSCredentialStore
initSTSCredential p = do
let action = retrieveSTSCredentials p
-- start with dummy credential, so that refresh happens for first request.
now <- getCurrentTime
mvar <- M.newMVar (CredentialValue mempty mempty mempty, coerce now)
return $
STSCredentialStore
{ cachedCredentials = mvar,
refreshAction = action
}
getSTSCredential :: STSCredentialStore -> Endpoint -> NC.Manager -> IO (CredentialValue, Bool)
getSTSCredential store ep mgr = M.modifyMVar (cachedCredentials store) $ \cc@(v, expiry) -> do
now <- getCurrentTime
if diffUTCTime now (coerce expiry) > 0
then do
res <- refreshAction store ep mgr
return (res, (fst res, True))
else return (cc, (v, False))
data Creds
= CredsStatic CredentialValue
| CredsSTS STSCredentialStore
getCredential :: Creds -> Endpoint -> NC.Manager -> IO CredentialValue
getCredential (CredsStatic v) _ _ = return v
getCredential (CredsSTS s) ep mgr = fst <$> getSTSCredential s ep mgr

View File

@ -1,266 +0,0 @@
--
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
module Network.Minio.Credentials.AssumeRole where
import qualified Data.ByteArray as BA
import qualified Data.ByteString.Lazy as LB
import qualified Data.Text as T
import qualified Data.Time as Time
import Data.Time.Units (Second)
import Lib.Prelude (UTCTime, throwIO)
import Network.HTTP.Client (RequestBody (RequestBodyBS))
import qualified Network.HTTP.Client as NC
import Network.HTTP.Types (hContentType, methodPost, renderSimpleQuery)
import Network.HTTP.Types.Header (hHost)
import Network.Minio.Credentials.Types
import Network.Minio.Data.Crypto (hashSHA256)
import Network.Minio.Errors (MErrV (..))
import Network.Minio.Sign.V4
import Network.Minio.Utils (getHostHeader, httpLbs)
import Network.Minio.XmlCommon
import Text.XML.Cursor hiding (bool)
stsVersion :: ByteString
stsVersion = "2011-06-15"
defaultDurationSeconds :: Second
defaultDurationSeconds = 3600
-- | Assume Role API argument.
--
-- @since 1.7.0
data STSAssumeRole = STSAssumeRole
{ -- | Credentials to use in the AssumeRole STS API.
sarCredentials :: CredentialValue,
-- | Optional settings.
sarOptions :: STSAssumeRoleOptions
}
-- | Options for STS Assume Role API.
data STSAssumeRoleOptions = STSAssumeRoleOptions
{ -- | STS endpoint to which the request will be made. For MinIO, this is the
-- same as the server endpoint. For AWS, this has to be the Security Token
-- Service endpoint. If using with 'setSTSCredential', this option can be
-- left as 'Nothing' and the endpoint in 'ConnectInfo' will be used.
saroEndpoint :: Maybe Text,
-- | Desired validity for the generated credentials.
saroDurationSeconds :: Maybe Second,
-- | IAM policy to apply for the generated credentials.
saroPolicyJSON :: Maybe ByteString,
-- | Location is usually required for AWS.
saroLocation :: Maybe Text,
saroRoleARN :: Maybe Text,
saroRoleSessionName :: Maybe Text
}
-- | Default STS Assume Role options - all options are Nothing, except for
-- duration which is set to 1 hour.
defaultSTSAssumeRoleOptions :: STSAssumeRoleOptions
defaultSTSAssumeRoleOptions =
STSAssumeRoleOptions
{ saroEndpoint = Nothing,
saroDurationSeconds = Just 3600,
saroPolicyJSON = Nothing,
saroLocation = Nothing,
saroRoleARN = Nothing,
saroRoleSessionName = Nothing
}
data AssumeRoleCredentials = AssumeRoleCredentials
{ arcCredentials :: CredentialValue,
arcExpiration :: UTCTime
}
deriving stock (Show, Eq)
data AssumeRoleResult = AssumeRoleResult
{ arrSourceIdentity :: Text,
arrAssumedRoleArn :: Text,
arrAssumedRoleId :: Text,
arrRoleCredentials :: AssumeRoleCredentials
}
deriving stock (Show, Eq)
-- | parseSTSAssumeRoleResult parses an XML response of the following form:
--
-- <AssumeRoleResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
-- <AssumeRoleResult>
-- <SourceIdentity>Alice</SourceIdentity>
-- <AssumedRoleUser>
-- <Arn>arn:aws:sts::123456789012:assumed-role/demo/TestAR</Arn>
-- <AssumedRoleId>ARO123EXAMPLE123:TestAR</AssumedRoleId>
-- </AssumedRoleUser>
-- <Credentials>
-- <AccessKeyId>ASIAIOSFODNN7EXAMPLE</AccessKeyId>
-- <SecretAccessKey>wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY</SecretAccessKey>
-- <SessionToken>
-- AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQW
-- LWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGd
-- QrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU
-- 9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz
-- +scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA==
-- </SessionToken>
-- <Expiration>2019-11-09T13:34:41Z</Expiration>
-- </Credentials>
-- <PackedPolicySize>6</PackedPolicySize>
-- </AssumeRoleResult>
-- <ResponseMetadata>
-- <RequestId>c6104cbe-af31-11e0-8154-cbc7ccf896c7</RequestId>
-- </ResponseMetadata>
-- </AssumeRoleResponse>
parseSTSAssumeRoleResult :: (MonadIO m) => ByteString -> Text -> m AssumeRoleResult
parseSTSAssumeRoleResult xmldata namespace = do
r <- parseRoot $ LB.fromStrict xmldata
let s3Elem' = s3Elem namespace
sourceIdentity =
T.concat $
r
$/ s3Elem' "AssumeRoleResult"
&/ s3Elem' "SourceIdentity"
&/ content
roleArn =
T.concat $
r
$/ s3Elem' "AssumeRoleResult"
&/ s3Elem' "AssumedRoleUser"
&/ s3Elem' "Arn"
&/ content
roleId =
T.concat $
r
$/ s3Elem' "AssumeRoleResult"
&/ s3Elem' "AssumedRoleUser"
&/ s3Elem' "AssumedRoleId"
&/ content
convSB :: Text -> BA.ScrubbedBytes
convSB = BA.convert . (encodeUtf8 :: Text -> ByteString)
credsInfo = do
cr <-
maybe (Left $ MErrVXmlParse "No Credentials Element found") Right $
listToMaybe $
r $/ s3Elem' "AssumeRoleResult" &/ s3Elem' "Credentials"
let cur = fromNode $ node cr
return
( CredentialValue
{ cvAccessKey =
coerce $
T.concat $
cur $/ s3Elem' "AccessKeyId" &/ content,
cvSecretKey =
coerce $
convSB $
T.concat $
cur
$/ s3Elem' "SecretAccessKey"
&/ content,
cvSessionToken =
Just $
coerce $
convSB $
T.concat $
cur
$/ s3Elem' "SessionToken"
&/ content
},
T.concat $ cur $/ s3Elem' "Expiration" &/ content
)
creds <- either throwIO pure credsInfo
expiry <- parseS3XMLTime $ snd creds
let roleCredentials =
AssumeRoleCredentials
{ arcCredentials = fst creds,
arcExpiration = expiry
}
return
AssumeRoleResult
{ arrSourceIdentity = sourceIdentity,
arrAssumedRoleArn = roleArn,
arrAssumedRoleId = roleId,
arrRoleCredentials = roleCredentials
}
instance STSCredentialProvider STSAssumeRole where
getSTSEndpoint = saroEndpoint . sarOptions
retrieveSTSCredentials sar (host', port', isSecure') mgr = do
-- Assemble STS request
let requiredParams =
[ ("Action", "AssumeRole"),
("Version", stsVersion)
]
opts = sarOptions sar
durSecs :: Int =
fromIntegral $
fromMaybe defaultDurationSeconds $
saroDurationSeconds opts
otherParams =
[ ("RoleArn",) . encodeUtf8 <$> saroRoleARN opts,
("RoleSessionName",) . encodeUtf8 <$> saroRoleSessionName opts,
Just ("DurationSeconds", show durSecs),
("Policy",) <$> saroPolicyJSON opts
]
parameters = requiredParams ++ catMaybes otherParams
(host, port, isSecure) =
case getSTSEndpoint sar of
Just ep ->
let endPt = NC.parseRequest_ $ toString ep
in (NC.host endPt, NC.port endPt, NC.secure endPt)
Nothing -> (host', port', isSecure')
reqBody = renderSimpleQuery False parameters
req =
NC.defaultRequest
{ NC.host = host,
NC.port = port,
NC.secure = isSecure,
NC.method = methodPost,
NC.requestHeaders =
[ (hHost, getHostHeader (host, port)),
(hContentType, "application/x-www-form-urlencoded")
],
NC.requestBody = RequestBodyBS reqBody
}
-- Sign the STS request.
timeStamp <- liftIO Time.getCurrentTime
let sp =
SignParams
{ spAccessKey = coerce $ cvAccessKey $ sarCredentials sar,
spSecretKey = coerce $ cvSecretKey $ sarCredentials sar,
spSessionToken = coerce $ cvSessionToken $ sarCredentials sar,
spService = ServiceSTS,
spTimeStamp = timeStamp,
spRegion = saroLocation opts,
spExpirySecs = Nothing,
spPayloadHash = Just $ hashSHA256 reqBody
}
signHeaders = signV4 sp req
signedReq =
req
{ NC.requestHeaders = NC.requestHeaders req ++ signHeaders
}
-- Make the STS request
resp <- httpLbs signedReq mgr
result <-
parseSTSAssumeRoleResult
(toStrict $ NC.responseBody resp)
"https://sts.amazonaws.com/doc/2011-06-15/"
return
( arcCredentials $ arrRoleCredentials result,
coerce $ arcExpiration $ arrRoleCredentials result
)

View File

@ -1,90 +0,0 @@
--
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE StrictData #-}
module Network.Minio.Credentials.Types where
import qualified Data.ByteArray as BA
import Lib.Prelude (UTCTime)
import qualified Network.HTTP.Client as NC
-- | Access Key type.
newtype AccessKey = AccessKey {unAccessKey :: Text}
deriving stock (Show)
deriving newtype (Eq, IsString, Semigroup, Monoid)
-- | Secret Key type - has a show instance that does not print the value.
newtype SecretKey = SecretKey {unSecretKey :: BA.ScrubbedBytes}
deriving stock (Show)
deriving newtype (Eq, IsString, Semigroup, Monoid)
-- | Session Token type - has a show instance that does not print the value.
newtype SessionToken = SessionToken {unSessionToken :: BA.ScrubbedBytes}
deriving stock (Show)
deriving newtype (Eq, IsString, Semigroup, Monoid)
-- | Object storage credential data type. It has support for the optional
-- [SessionToken](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
-- for using temporary credentials requested via STS.
--
-- The show instance for this type does not print the value of secrets for
-- security.
--
-- @since 1.7.0
data CredentialValue = CredentialValue
{ cvAccessKey :: AccessKey,
cvSecretKey :: SecretKey,
cvSessionToken :: Maybe SessionToken
}
deriving stock (Eq, Show)
scrubbedToText :: BA.ScrubbedBytes -> Text
scrubbedToText =
let b2t :: ByteString -> Text
b2t = decodeUtf8
s2b :: BA.ScrubbedBytes -> ByteString
s2b = BA.convert
in b2t . s2b
-- | Convert a 'CredentialValue' to a text tuple. Use this to output the
-- credential to files or other programs.
credentialValueText :: CredentialValue -> (Text, Text, Maybe Text)
credentialValueText cv =
( coerce $ cvAccessKey cv,
(scrubbedToText . coerce) $ cvSecretKey cv,
scrubbedToText . coerce <$> cvSessionToken cv
)
-- | Endpoint represented by host, port and TLS enabled flag.
type Endpoint = (ByteString, Int, Bool)
-- | Typeclass for STS credential providers.
--
-- @since 1.7.0
class STSCredentialProvider p where
retrieveSTSCredentials ::
p ->
-- | STS Endpoint (host, port, isSecure)
Endpoint ->
NC.Manager ->
IO (CredentialValue, ExpiryTime)
getSTSEndpoint :: p -> Maybe Text
-- | 'ExpiryTime' represents a time at which a credential expires.
newtype ExpiryTime = ExpiryTime {unExpiryTime :: UTCTime}
deriving stock (Show)
deriving newtype (Eq)

File diff suppressed because it is too large Load Diff

View File

@ -13,21 +13,23 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE FlexibleInstances #-}
module Network.Minio.Data.ByteString module Network.Minio.Data.ByteString
( stripBS, (
UriEncodable (..), stripBS
) , UriEncodable(..)
where ) where
import qualified Data.ByteString as B import qualified Data.ByteString as B
import qualified Data.ByteString.Builder as BB import qualified Data.ByteString.Builder as BB
import qualified Data.ByteString.Char8 as BC8 import qualified Data.ByteString.Char8 as BC8
import qualified Data.ByteString.Lazy as LB import qualified Data.ByteString.Lazy as LB
import Data.Char (isAsciiLower, isAsciiUpper, isDigit, isSpace, toUpper) import Data.Char (isSpace, toUpper, isAsciiUpper, isAsciiLower, isDigit)
import qualified Data.Text as T import qualified Data.Text as T
import Numeric (showHex) import Numeric (showHex)
import Lib.Prelude
stripBS :: ByteString -> ByteString stripBS :: ByteString -> ByteString
stripBS = BC8.dropWhile isSpace . fst . BC8.spanEnd isSpace stripBS = BC8.dropWhile isSpace . fst . BC8.spanEnd isSpace
@ -37,10 +39,8 @@ class UriEncodable s where
instance UriEncodable [Char] where instance UriEncodable [Char] where
uriEncode encodeSlash payload = uriEncode encodeSlash payload =
LB.toStrict $ LB.toStrict $ BB.toLazyByteString $ mconcat $
BB.toLazyByteString $ map (`uriEncodeChar` encodeSlash) payload
mconcat $
map (`uriEncodeChar` encodeSlash) payload
instance UriEncodable ByteString where instance UriEncodable ByteString where
-- assumes that uriEncode is passed ASCII encoded strings. -- assumes that uriEncode is passed ASCII encoded strings.
@ -59,17 +59,16 @@ uriEncodeChar '/' True = BB.byteString "%2F"
uriEncodeChar '/' False = BB.char7 '/' uriEncodeChar '/' False = BB.char7 '/'
uriEncodeChar ch _ uriEncodeChar ch _
| isAsciiUpper ch | isAsciiUpper ch
|| isAsciiLower ch || isAsciiLower ch
|| isDigit ch || isDigit ch
|| (ch == '_') || (ch == '_')
|| (ch == '-') || (ch == '-')
|| (ch == '.') || (ch == '.')
|| (ch == '~') = || (ch == '~') = BB.char7 ch
BB.char7 ch
| otherwise = mconcat $ map f $ B.unpack $ encodeUtf8 $ T.singleton ch | otherwise = mconcat $ map f $ B.unpack $ encodeUtf8 $ T.singleton ch
where where
f :: Word8 -> BB.Builder f :: Word8 -> BB.Builder
f n = BB.char7 '%' <> BB.string7 hexStr f n = BB.char7 '%' <> BB.string7 hexStr
where where
hexStr = map toUpper $ showHex q $ showHex r "" hexStr = map toUpper $ showHex q $ showHex r ""
(q, r) = divMod n (16 :: Word8) (q, r) = divMod (fromIntegral n) (16::Word8)

View File

@ -15,54 +15,55 @@
-- --
module Network.Minio.Data.Crypto module Network.Minio.Data.Crypto
( hashSHA256, (
hashSHA256FromSource, hashSHA256
hashMD5, , hashSHA256FromSource
hashMD5ToBase64,
hashMD5FromSource,
hmacSHA256,
hmacSHA256RawBS,
digestToBS,
digestToBase16,
encodeToBase64,
)
where
import Crypto.Hash , hashMD5
( Digest, , hashMD5ToBase64
MD5 (..), , hashMD5FromSource
SHA256 (..),
hashWith, , hmacSHA256
) , hmacSHA256RawBS
import Crypto.Hash.Conduit (sinkHash) , digestToBS
import Crypto.MAC.HMAC (HMAC, hmac) , digestToBase16
import Data.ByteArray (ByteArrayAccess, convert)
import Data.ByteArray.Encoding (Base (Base16, Base64), convertToBase) , encodeToBase64
import qualified Data.Conduit as C ) where
import Crypto.Hash (Digest, MD5 (..), SHA256 (..),
hashWith)
import Crypto.Hash.Conduit (sinkHash)
import Crypto.MAC.HMAC (HMAC, hmac)
import Data.ByteArray (ByteArrayAccess, convert)
import Data.ByteArray.Encoding (Base (Base16, Base64), convertToBase)
import qualified Data.Conduit as C
import Lib.Prelude
hashSHA256 :: ByteString -> ByteString hashSHA256 :: ByteString -> ByteString
hashSHA256 = digestToBase16 . hashWith SHA256 hashSHA256 = digestToBase16 . hashWith SHA256
hashSHA256FromSource :: (Monad m) => C.ConduitM () ByteString m () -> m ByteString hashSHA256FromSource :: Monad m => C.ConduitM () ByteString m () -> m ByteString
hashSHA256FromSource src = do hashSHA256FromSource src = do
digest <- C.connect src sinkSHA256Hash digest <- C.connect src sinkSHA256Hash
return $ digestToBase16 digest return $ digestToBase16 digest
where where
-- To help with type inference -- To help with type inference
sinkSHA256Hash :: (Monad m) => C.ConduitM ByteString Void m (Digest SHA256) sinkSHA256Hash :: Monad m => C.ConduitM ByteString Void m (Digest SHA256)
sinkSHA256Hash = sinkHash sinkSHA256Hash = sinkHash
-- Returns MD5 hash hex encoded. -- Returns MD5 hash hex encoded.
hashMD5 :: ByteString -> ByteString hashMD5 :: ByteString -> ByteString
hashMD5 = digestToBase16 . hashWith MD5 hashMD5 = digestToBase16 . hashWith MD5
hashMD5FromSource :: (Monad m) => C.ConduitM () ByteString m () -> m ByteString hashMD5FromSource :: Monad m => C.ConduitM () ByteString m () -> m ByteString
hashMD5FromSource src = do hashMD5FromSource src = do
digest <- C.connect src sinkMD5Hash digest <- C.connect src sinkMD5Hash
return $ digestToBase16 digest return $ digestToBase16 digest
where where
-- To help with type inference -- To help with type inference
sinkMD5Hash :: (Monad m) => C.ConduitM ByteString Void m (Digest MD5) sinkMD5Hash :: Monad m => C.ConduitM ByteString Void m (Digest MD5)
sinkMD5Hash = sinkHash sinkMD5Hash = sinkHash
hmacSHA256 :: ByteString -> ByteString -> HMAC SHA256 hmacSHA256 :: ByteString -> ByteString -> HMAC SHA256
@ -71,15 +72,15 @@ hmacSHA256 message key = hmac key message
hmacSHA256RawBS :: ByteString -> ByteString -> ByteString hmacSHA256RawBS :: ByteString -> ByteString -> ByteString
hmacSHA256RawBS message key = convert $ hmacSHA256 message key hmacSHA256RawBS message key = convert $ hmacSHA256 message key
digestToBS :: (ByteArrayAccess a) => a -> ByteString digestToBS :: ByteArrayAccess a => a -> ByteString
digestToBS = convert digestToBS = convert
digestToBase16 :: (ByteArrayAccess a) => a -> ByteString digestToBase16 :: ByteArrayAccess a => a -> ByteString
digestToBase16 = convertToBase Base16 digestToBase16 = convertToBase Base16
-- Returns MD5 hash base 64 encoded. -- Returns MD5 hash base 64 encoded.
hashMD5ToBase64 :: (ByteArrayAccess a) => a -> ByteString hashMD5ToBase64 :: ByteArrayAccess a => a -> ByteString
hashMD5ToBase64 = convertToBase Base64 . hashWith MD5 hashMD5ToBase64 = convertToBase Base64 . hashWith MD5
encodeToBase64 :: (ByteArrayAccess a) => a -> ByteString encodeToBase64 :: ByteArrayAccess a => a -> ByteString
encodeToBase64 = convertToBase Base64 encodeToBase64 = convertToBase Base64

View File

@ -15,24 +15,20 @@
-- --
module Network.Minio.Data.Time module Network.Minio.Data.Time
( awsTimeFormat, (
awsTimeFormatBS, awsTimeFormat
awsDateFormat, , awsTimeFormatBS
awsDateFormatBS, , awsDateFormat
awsParseTime, , awsDateFormatBS
iso8601TimeFormat, , awsParseTime
UrlExpiry, , iso8601TimeFormat
) ) where
where
import Data.ByteString.Char8 (pack)
import Data.ByteString.Char8 (pack)
import qualified Data.Time as Time import qualified Data.Time as Time
import Data.Time.Format.ISO8601 (iso8601Show)
import Lib.Prelude
-- | Time to expire for a presigned URL. It interpreted as a number of import Lib.Prelude
-- seconds. The maximum duration that can be specified is 7 days.
type UrlExpiry = Int
awsTimeFormat :: UTCTime -> [Char] awsTimeFormat :: UTCTime -> [Char]
awsTimeFormat = Time.formatTime Time.defaultTimeLocale "%Y%m%dT%H%M%SZ" awsTimeFormat = Time.formatTime Time.defaultTimeLocale "%Y%m%dT%H%M%SZ"
@ -50,4 +46,4 @@ awsParseTime :: [Char] -> Maybe UTCTime
awsParseTime = Time.parseTimeM False Time.defaultTimeLocale "%Y%m%dT%H%M%SZ" awsParseTime = Time.parseTimeM False Time.defaultTimeLocale "%Y%m%dT%H%M%SZ"
iso8601TimeFormat :: UTCTime -> [Char] iso8601TimeFormat :: UTCTime -> [Char]
iso8601TimeFormat = iso8601Show iso8601TimeFormat = Time.formatTime Time.defaultTimeLocale (Time.iso8601DateFormat $ Just "%T%QZ")

View File

@ -1,5 +1,5 @@
-- --
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc. -- MinIO Haskell SDK, (C) 2017-2019 MinIO, Inc.
-- --
-- Licensed under the Apache License, Version 2.0 (the "License"); -- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License. -- you may not use this file except in compliance with the License.
@ -14,83 +14,76 @@
-- limitations under the License. -- limitations under the License.
-- --
module Network.Minio.Errors module Network.Minio.Errors where
( MErrV (..),
ServiceErr (..),
MinioErr (..),
toServiceErr,
)
where
import Control.Exception (IOException) import Control.Exception
import qualified Network.HTTP.Conduit as NC import qualified Network.HTTP.Conduit as NC
import Lib.Prelude
--------------------------------- ---------------------------------
-- Errors -- Errors
--------------------------------- ---------------------------------
-- | Various validation errors -- | Various validation errors
data MErrV data MErrV = MErrVSinglePUTSizeExceeded Int64
= MErrVSinglePUTSizeExceeded Int64 | MErrVPutSizeExceeded Int64
| MErrVPutSizeExceeded Int64 | MErrVETagHeaderNotFound
| MErrVETagHeaderNotFound | MErrVInvalidObjectInfoResponse
| MErrVInvalidObjectInfoResponse | MErrVInvalidSrcObjSpec Text
| MErrVInvalidSrcObjSpec Text | MErrVInvalidSrcObjByteRange (Int64, Int64)
| MErrVInvalidSrcObjByteRange (Int64, Int64) | MErrVCopyObjSingleNoRangeAccepted
| MErrVCopyObjSingleNoRangeAccepted | MErrVRegionNotSupported Text
| MErrVRegionNotSupported Text | MErrVXmlParse Text
| MErrVXmlParse Text | MErrVInvalidBucketName Text
| MErrVInvalidBucketName Text | MErrVInvalidObjectName Text
| MErrVInvalidObjectName Text | MErrVInvalidUrlExpiry Int
| MErrVInvalidUrlExpiry Int | MErrVJsonParse Text
| MErrVJsonParse Text | MErrVInvalidHealPath
| MErrVInvalidHealPath | MErrVMissingCredentials
| MErrVMissingCredentials | MErrVInvalidEncryptionKeyLength
| MErrVInvalidEncryptionKeyLength | MErrVStreamingBodyUnexpectedEOF
| MErrVStreamingBodyUnexpectedEOF | MErrVUnexpectedPayload
| MErrVUnexpectedPayload deriving (Show, Eq)
| MErrVSTSEndpointNotFound
deriving stock (Show, Eq)
instance Exception MErrV instance Exception MErrV
-- | Errors returned by S3 compatible service -- | Errors returned by S3 compatible service
data ServiceErr data ServiceErr = BucketAlreadyExists
= BucketAlreadyExists | BucketAlreadyOwnedByYou
| BucketAlreadyOwnedByYou | NoSuchBucket
| NoSuchBucket | InvalidBucketName
| InvalidBucketName | NoSuchKey
| NoSuchKey | SelectErr Text Text
| SelectErr Text Text | ServiceErr Text Text
| ServiceErr Text Text deriving (Show, Eq)
deriving stock (Show, Eq)
instance Exception ServiceErr instance Exception ServiceErr
toServiceErr :: Text -> Text -> ServiceErr toServiceErr :: Text -> Text -> ServiceErr
toServiceErr "NoSuchKey" _ = NoSuchKey toServiceErr "NoSuchKey" _ = NoSuchKey
toServiceErr "NoSuchBucket" _ = NoSuchBucket toServiceErr "NoSuchBucket" _ = NoSuchBucket
toServiceErr "InvalidBucketName" _ = InvalidBucketName toServiceErr "InvalidBucketName" _ = InvalidBucketName
toServiceErr "BucketAlreadyOwnedByYou" _ = BucketAlreadyOwnedByYou toServiceErr "BucketAlreadyOwnedByYou" _ = BucketAlreadyOwnedByYou
toServiceErr "BucketAlreadyExists" _ = BucketAlreadyExists toServiceErr "BucketAlreadyExists" _ = BucketAlreadyExists
toServiceErr code message = ServiceErr code message toServiceErr code message = ServiceErr code message
-- | Errors thrown by the library -- | Errors thrown by the library
data MinioErr data MinioErr = MErrHTTP NC.HttpException
= MErrHTTP NC.HttpException | MErrIO IOException
| MErrIO IOException | MErrService ServiceErr
| MErrService ServiceErr | MErrValidation MErrV
| MErrValidation MErrV deriving (Show)
deriving stock (Show)
instance Eq MinioErr where instance Eq MinioErr where
MErrHTTP _ == MErrHTTP _ = True MErrHTTP _ == MErrHTTP _ = True
MErrHTTP _ == _ = False MErrHTTP _ == _ = False
MErrIO _ == MErrIO _ = True MErrIO _ == MErrIO _ = True
MErrIO _ == _ = False MErrIO _ == _ = False
MErrService a == MErrService b = a == b MErrService a == MErrService b = a == b
MErrService _ == _ = False MErrService _ == _ = False
MErrValidation a == MErrValidation b = a == b MErrValidation a == MErrValidation b = a == b
MErrValidation _ == _ = False MErrValidation _ == _ = False
instance Exception MinioErr instance Exception MinioErr

View File

@ -15,35 +15,28 @@
-- --
module Network.Minio.JsonParser module Network.Minio.JsonParser
( parseErrResponseJSON, (
) parseErrResponseJSON
where ) where
import Data.Aeson import Data.Aeson (FromJSON, eitherDecode, parseJSON,
( FromJSON, withObject, (.:))
eitherDecode, import qualified Data.Text as T
parseJSON,
withObject,
(.:),
)
import qualified Data.Text as T
import Lib.Prelude
import Network.Minio.Errors
data AdminErrJSON = AdminErrJSON import Lib.Prelude
{ aeCode :: Text,
aeMessage :: Text
}
deriving stock (Eq, Show)
import Network.Minio.Errors
data AdminErrJSON = AdminErrJSON { aeCode :: Text
, aeMessage :: Text
} deriving (Eq, Show)
instance FromJSON AdminErrJSON where instance FromJSON AdminErrJSON where
parseJSON = withObject "AdminErrJSON" $ \v -> parseJSON = withObject "AdminErrJSON" $ \v -> AdminErrJSON
AdminErrJSON <$> v .: "Code"
<$> v .: "Code" <*> v .: "Message"
<*> v .: "Message"
parseErrResponseJSON :: (MonadIO m) => LByteString -> m ServiceErr parseErrResponseJSON :: (MonadIO m) => LByteString -> m ServiceErr
parseErrResponseJSON jsondata = parseErrResponseJSON jsondata =
case eitherDecode jsondata of case eitherDecode jsondata of
Right aErr -> return $ toServiceErr (aeCode aErr) (aeMessage aErr) Right aErr -> return $ toServiceErr (aeCode aErr) (aeMessage aErr)
Left err -> throwIO $ MErrVJsonParse $ T.pack err Left err -> throwIO $ MErrVJsonParse $ T.pack err

View File

@ -16,50 +16,20 @@
module Network.Minio.ListOps where module Network.Minio.ListOps where
import qualified Data.Conduit as C import qualified Data.Conduit as C
import qualified Data.Conduit.Combinators as CC import qualified Data.Conduit.Combinators as CC
import qualified Data.Conduit.List as CL import qualified Data.Conduit.List as CL
import Network.Minio.Data
( Bucket, import Lib.Prelude
ListObjectsResult
( lorCPrefixes, import Network.Minio.Data
lorHasMore, import Network.Minio.S3API
lorNextToken,
lorObjects
),
ListObjectsV1Result
( lorCPrefixes',
lorHasMore',
lorNextMarker,
lorObjects'
),
ListPartsResult (lprHasMore, lprNextPart, lprParts),
ListUploadsResult
( lurHasMore,
lurNextKey,
lurNextUpload,
lurUploads
),
Minio,
Object,
ObjectInfo,
ObjectPartInfo (opiSize),
UploadId,
UploadInfo (UploadInfo),
)
import Network.Minio.S3API
( listIncompleteParts',
listIncompleteUploads',
listObjects',
listObjectsV1',
)
-- | Represents a list output item - either an object or an object -- | Represents a list output item - either an object or an object
-- prefix (i.e. a directory). -- prefix (i.e. a directory).
data ListItem data ListItem = ListItemObject ObjectInfo
= ListItemObject ObjectInfo | ListItemPrefix Text
| ListItemPrefix Text deriving (Show, Eq)
deriving stock (Show, Eq)
-- | @'listObjects' bucket prefix recurse@ lists objects in a bucket -- | @'listObjects' bucket prefix recurse@ lists objects in a bucket
-- similar to a file system tree traversal. -- similar to a file system tree traversal.
@ -78,103 +48,73 @@ listObjects bucket prefix recurse = loop Nothing
where where
loop :: Maybe Text -> C.ConduitM () ListItem Minio () loop :: Maybe Text -> C.ConduitM () ListItem Minio ()
loop nextToken = do loop nextToken = do
let delimiter = bool (Just "/") Nothing recurse let
delimiter = bool (Just "/") Nothing recurse
res <- lift $ listObjects' bucket prefix nextToken delimiter Nothing res <- lift $ listObjects' bucket prefix nextToken delimiter Nothing
CL.sourceList $ map ListItemObject $ lorObjects res CL.sourceList $ map ListItemObject $ lorObjects res
unless recurse $ unless recurse $
CL.sourceList $ CL.sourceList $ map ListItemPrefix $ lorCPrefixes res
map ListItemPrefix $
lorCPrefixes res
when (lorHasMore res) $ when (lorHasMore res) $
loop (lorNextToken res) loop (lorNextToken res)
-- | Lists objects - similar to @listObjects@, however uses the older -- | Lists objects - similar to @listObjects@, however uses the older
-- V1 AWS S3 API. Prefer @listObjects@ to this. -- V1 AWS S3 API. Prefer @listObjects@ to this.
listObjectsV1 :: listObjectsV1 :: Bucket -> Maybe Text -> Bool
Bucket -> -> C.ConduitM () ListItem Minio ()
Maybe Text ->
Bool ->
C.ConduitM () ListItem Minio ()
listObjectsV1 bucket prefix recurse = loop Nothing listObjectsV1 bucket prefix recurse = loop Nothing
where where
loop :: Maybe Text -> C.ConduitM () ListItem Minio () loop :: Maybe Text -> C.ConduitM () ListItem Minio ()
loop nextMarker = do loop nextMarker = do
let delimiter = bool (Just "/") Nothing recurse let
delimiter = bool (Just "/") Nothing recurse
res <- lift $ listObjectsV1' bucket prefix nextMarker delimiter Nothing res <- lift $ listObjectsV1' bucket prefix nextMarker delimiter Nothing
CL.sourceList $ map ListItemObject $ lorObjects' res CL.sourceList $ map ListItemObject $ lorObjects' res
unless recurse $ unless recurse $
CL.sourceList $ CL.sourceList $ map ListItemPrefix $ lorCPrefixes' res
map ListItemPrefix $
lorCPrefixes' res
when (lorHasMore' res) $ when (lorHasMore' res) $
loop (lorNextMarker res) loop (lorNextMarker res)
-- | List incomplete uploads in a bucket matching the given prefix. If -- | List incomplete uploads in a bucket matching the given prefix. If
-- recurse is set to True incomplete uploads for the given prefix are -- recurse is set to True incomplete uploads for the given prefix are
-- recursively listed. -- recursively listed.
listIncompleteUploads :: listIncompleteUploads :: Bucket -> Maybe Text -> Bool
Bucket -> -> C.ConduitM () UploadInfo Minio ()
Maybe Text ->
Bool ->
C.ConduitM () UploadInfo Minio ()
listIncompleteUploads bucket prefix recurse = loop Nothing Nothing listIncompleteUploads bucket prefix recurse = loop Nothing Nothing
where where
loop :: Maybe Text -> Maybe Text -> C.ConduitM () UploadInfo Minio () loop :: Maybe Text -> Maybe Text -> C.ConduitM () UploadInfo Minio ()
loop nextKeyMarker nextUploadIdMarker = do loop nextKeyMarker nextUploadIdMarker = do
let delimiter = bool (Just "/") Nothing recurse let
delimiter = bool (Just "/") Nothing recurse
res <- res <- lift $ listIncompleteUploads' bucket prefix delimiter
lift $ nextKeyMarker nextUploadIdMarker Nothing
listIncompleteUploads'
bucket
prefix
delimiter
nextKeyMarker
nextUploadIdMarker
Nothing
aggrSizes <- lift $ aggrSizes <- lift $ forM (lurUploads res) $ \(uKey, uId, _) -> do
forM (lurUploads res) $ \(uKey, uId, _) -> do partInfos <- C.runConduit $ listIncompleteParts bucket uKey uId
partInfos <- C..| CC.sinkList
C.runConduit $ return $ foldl (\sizeSofar p -> opiSize p + sizeSofar) 0 partInfos
listIncompleteParts bucket uKey uId
C..| CC.sinkList
return $ foldl' (\sizeSofar p -> opiSize p + sizeSofar) 0 partInfos
CL.sourceList $ CL.sourceList $
zipWith map (\((uKey, uId, uInitTime), size) ->
( curry UploadInfo uKey uId uInitTime size
( \((uKey, uId, uInitTime), size) -> ) $ zip (lurUploads res) aggrSizes
UploadInfo uKey uId uInitTime size
)
)
(lurUploads res)
aggrSizes
when (lurHasMore res) $ when (lurHasMore res) $
loop (lurNextKey res) (lurNextUpload res) loop (lurNextKey res) (lurNextUpload res)
-- | List object parts of an ongoing multipart upload for given -- | List object parts of an ongoing multipart upload for given
-- bucket, object and uploadId. -- bucket, object and uploadId.
listIncompleteParts :: listIncompleteParts :: Bucket -> Object -> UploadId
Bucket -> -> C.ConduitM () ObjectPartInfo Minio ()
Object ->
UploadId ->
C.ConduitM () ObjectPartInfo Minio ()
listIncompleteParts bucket object uploadId = loop Nothing listIncompleteParts bucket object uploadId = loop Nothing
where where
loop :: Maybe Text -> C.ConduitM () ObjectPartInfo Minio () loop :: Maybe Text -> C.ConduitM () ObjectPartInfo Minio ()
loop nextPartMarker = do loop nextPartMarker = do
res <- res <- lift $ listIncompleteParts' bucket object uploadId Nothing
lift $ nextPartMarker
listIncompleteParts'
bucket
object
uploadId
Nothing
nextPartMarker
CL.sourceList $ lprParts res CL.sourceList $ lprParts res
when (lprHasMore res) $ when (lprHasMore res) $
loop (show <$> lprNextPart res) loop (show <$> lprNextPart res)

View File

@ -1,5 +1,5 @@
-- --
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc. -- MinIO Haskell SDK, (C) 2017 MinIO, Inc.
-- --
-- Licensed under the Apache License, Version 2.0 (the "License"); -- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License. -- you may not use this file except in compliance with the License.
@ -13,51 +13,45 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE CPP #-}
module Network.Minio.PresignedOperations module Network.Minio.PresignedOperations
( UrlExpiry, ( UrlExpiry
makePresignedUrl, , makePresignedUrl
presignedPutObjectUrl, , presignedPutObjectUrl
presignedGetObjectUrl, , presignedGetObjectUrl
presignedHeadObjectUrl, , presignedHeadObjectUrl
PostPolicyCondition (..),
ppCondBucket,
ppCondContentLengthRange,
ppCondContentType,
ppCondKey,
ppCondKeyStartsWith,
ppCondSuccessActionStatus,
PostPolicy (..),
PostPolicyError (..),
newPostPolicy,
showPostPolicy,
presignedPostPolicy,
)
where
import Data.Aeson ((.=)) , PostPolicyCondition(..)
import qualified Data.Aeson as Json , ppCondBucket
import Data.ByteString.Builder (byteString, toLazyByteString) , ppCondContentLengthRange
import qualified Data.HashMap.Strict as H , ppCondContentType
import qualified Data.Text as T , ppCondKey
import qualified Data.Time as Time , ppCondKeyStartsWith
import Lib.Prelude , ppCondSuccessActionStatus
import qualified Network.HTTP.Client as NClient
import qualified Network.HTTP.Types as HT
import Network.Minio.API (buildRequest)
import Network.Minio.Credentials
import Network.Minio.Data
import Network.Minio.Data.Time
import Network.Minio.Errors
import Network.Minio.Sign.V4
import Network.URI (uriToString)
{- ORMOLU_DISABLE -} , PostPolicy(..)
#if MIN_VERSION_aeson(2,0,0) , PostPolicyError(..)
import qualified Data.Aeson.Key as A , newPostPolicy
#endif , showPostPolicy
{- ORMOLU_ENABLE -} , presignedPostPolicy
) where
import Data.Aeson ((.=))
import qualified Data.Aeson as Json
import Data.ByteString.Builder (byteString, toLazyByteString)
import qualified Data.HashMap.Strict as H
import qualified Data.Text as T
import qualified Data.Time as Time
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Network.HTTP.Types.Header (hHost)
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Data.Time
import Network.Minio.Errors
import Network.Minio.Sign.V4
-- | Generate a presigned URL. This function allows for advanced usage -- | Generate a presigned URL. This function allows for advanced usage
-- - for simple cases prefer the `presigned*Url` functions. -- - for simple cases prefer the `presigned*Url` functions.
@ -67,36 +61,42 @@ import qualified Data.Aeson.Key as A
-- --
-- All extra query parameters or headers are signed, and therefore are -- All extra query parameters or headers are signed, and therefore are
-- required to be sent when the generated URL is actually used. -- required to be sent when the generated URL is actually used.
makePresignedUrl :: makePresignedUrl :: UrlExpiry -> HT.Method -> Maybe Bucket -> Maybe Object
UrlExpiry -> -> Maybe Region -> HT.Query -> HT.RequestHeaders
HT.Method -> -> Minio ByteString
Maybe Bucket ->
Maybe Object ->
Maybe Region ->
HT.Query ->
HT.RequestHeaders ->
Minio ByteString
makePresignedUrl expiry method bucket object region extraQuery extraHeaders = do makePresignedUrl expiry method bucket object region extraQuery extraHeaders = do
when (expiry > 7 * 24 * 3600 || expiry < 0) $ when (expiry > 7*24*3600 || expiry < 0) $
throwIO $ throwIO $ MErrVInvalidUrlExpiry expiry
MErrVInvalidUrlExpiry expiry
let s3ri = ci <- asks mcConnInfo
defaultS3ReqInfo
{ riPresignExpirySecs = Just expiry,
riMethod = method,
riBucket = bucket,
riObject = object,
riRegion = region,
riQueryParams = extraQuery,
riHeaders = extraHeaders
}
req <- buildRequest s3ri let
let uri = NClient.getUri req hostHeader = (hHost, getHostAddr ci)
uriString = uriToString identity uri "" req = NC.defaultRequest {
NC.method = method
, NC.secure = connectIsSecure ci
, NC.host = encodeUtf8 $ connectHost ci
, NC.port = connectPort ci
, NC.path = getS3Path bucket object
, NC.requestHeaders = hostHeader : extraHeaders
, NC.queryString = HT.renderQuery True extraQuery
}
ts <- liftIO Time.getCurrentTime
return $ encodeUtf8 uriString let sp = SignParams (connectAccessKey ci) (connectSecretKey ci)
ts region (Just expiry) Nothing
signPairs = signV4 sp req
qpToAdd = (fmap . fmap) Just signPairs
queryStr = HT.renderQueryBuilder True
((HT.parseQuery $ NC.queryString req) ++ qpToAdd)
scheme = byteString $ bool "http://" "https://" $ connectIsSecure ci
return $ toS $ toLazyByteString $ scheme
<> byteString (getHostAddr ci)
<> byteString (getS3Path bucket object)
<> queryStr
-- | Generate a URL with authentication signature to PUT (upload) an -- | Generate a URL with authentication signature to PUT (upload) an
-- object. Any extra headers if passed, are signed, and so they are -- object. Any extra headers if passed, are signed, and so they are
@ -105,22 +105,12 @@ makePresignedUrl expiry method bucket object region extraQuery extraHeaders = do
-- --
-- For a list of possible headers to pass, please refer to the PUT -- For a list of possible headers to pass, please refer to the PUT
-- object REST API AWS S3 documentation. -- object REST API AWS S3 documentation.
presignedPutObjectUrl :: presignedPutObjectUrl :: Bucket -> Object -> UrlExpiry -> HT.RequestHeaders
Bucket -> -> Minio ByteString
Object ->
UrlExpiry ->
HT.RequestHeaders ->
Minio ByteString
presignedPutObjectUrl bucket object expirySeconds extraHeaders = do presignedPutObjectUrl bucket object expirySeconds extraHeaders = do
region <- asks (Just . connectRegion . mcConnInfo) region <- asks (Just . connectRegion . mcConnInfo)
makePresignedUrl makePresignedUrl expirySeconds HT.methodPut
expirySeconds (Just bucket) (Just object) region [] extraHeaders
HT.methodPut
(Just bucket)
(Just object)
region
[]
extraHeaders
-- | Generate a URL with authentication signature to GET (download) an -- | Generate a URL with authentication signature to GET (download) an
-- object. All extra query parameters and headers passed here will be -- object. All extra query parameters and headers passed here will be
@ -131,23 +121,12 @@ presignedPutObjectUrl bucket object expirySeconds extraHeaders = do
-- --
-- For a list of possible request parameters and headers, please refer -- For a list of possible request parameters and headers, please refer
-- to the GET object REST API AWS S3 documentation. -- to the GET object REST API AWS S3 documentation.
presignedGetObjectUrl :: presignedGetObjectUrl :: Bucket -> Object -> UrlExpiry -> HT.Query
Bucket -> -> HT.RequestHeaders -> Minio ByteString
Object ->
UrlExpiry ->
HT.Query ->
HT.RequestHeaders ->
Minio ByteString
presignedGetObjectUrl bucket object expirySeconds extraQuery extraHeaders = do presignedGetObjectUrl bucket object expirySeconds extraQuery extraHeaders = do
region <- asks (Just . connectRegion . mcConnInfo) region <- asks (Just . connectRegion . mcConnInfo)
makePresignedUrl makePresignedUrl expirySeconds HT.methodGet
expirySeconds (Just bucket) (Just object) region extraQuery extraHeaders
HT.methodGet
(Just bucket)
(Just object)
region
extraQuery
extraHeaders
-- | Generate a URL with authentication signature to make a HEAD -- | Generate a URL with authentication signature to make a HEAD
-- request on an object. This is used to fetch metadata about an -- request on an object. This is used to fetch metadata about an
@ -156,74 +135,50 @@ presignedGetObjectUrl bucket object expirySeconds extraQuery extraHeaders = do
-- --
-- For a list of possible headers to pass, please refer to the HEAD -- For a list of possible headers to pass, please refer to the HEAD
-- object REST API AWS S3 documentation. -- object REST API AWS S3 documentation.
presignedHeadObjectUrl :: presignedHeadObjectUrl :: Bucket -> Object -> UrlExpiry
Bucket -> -> HT.RequestHeaders -> Minio ByteString
Object ->
UrlExpiry ->
HT.RequestHeaders ->
Minio ByteString
presignedHeadObjectUrl bucket object expirySeconds extraHeaders = do presignedHeadObjectUrl bucket object expirySeconds extraHeaders = do
region <- asks (Just . connectRegion . mcConnInfo) region <- asks (Just . connectRegion . mcConnInfo)
makePresignedUrl makePresignedUrl expirySeconds HT.methodHead
expirySeconds (Just bucket) (Just object) region [] extraHeaders
HT.methodHead
(Just bucket)
(Just object)
region
[]
extraHeaders
-- | Represents individual conditions in a Post Policy document. -- | Represents individual conditions in a Post Policy document.
data PostPolicyCondition data PostPolicyCondition = PPCStartsWith Text Text
= PPCStartsWith Text Text | PPCEquals Text Text
| PPCEquals Text Text | PPCRange Text Int64 Int64
| PPCRange Text Int64 Int64 deriving (Show, Eq)
deriving stock (Show, Eq)
{- ORMOLU_DISABLE -}
instance Json.ToJSON PostPolicyCondition where instance Json.ToJSON PostPolicyCondition where
toJSON (PPCStartsWith k v) = Json.toJSON ["starts-with", k, v] toJSON (PPCStartsWith k v) = Json.toJSON ["starts-with", k, v]
#if MIN_VERSION_aeson(2,0,0)
toJSON (PPCEquals k v) = Json.object [(A.fromText k) .= v]
#else
toJSON (PPCEquals k v) = Json.object [k .= v] toJSON (PPCEquals k v) = Json.object [k .= v]
#endif
toJSON (PPCRange k minVal maxVal) = toJSON (PPCRange k minVal maxVal) =
Json.toJSON [Json.toJSON k, Json.toJSON minVal, Json.toJSON maxVal] Json.toJSON [Json.toJSON k, Json.toJSON minVal, Json.toJSON maxVal]
toEncoding (PPCStartsWith k v) = Json.foldable ["starts-with", k, v] toEncoding (PPCStartsWith k v) = Json.foldable ["starts-with", k, v]
#if MIN_VERSION_aeson(2,0,0)
toEncoding (PPCEquals k v) = Json.pairs ((A.fromText k) .= v)
#else
toEncoding (PPCEquals k v) = Json.pairs (k .= v) toEncoding (PPCEquals k v) = Json.pairs (k .= v)
#endif
toEncoding (PPCRange k minVal maxVal) = toEncoding (PPCRange k minVal maxVal) =
Json.foldable [Json.toJSON k, Json.toJSON minVal, Json.toJSON maxVal] Json.foldable [Json.toJSON k, Json.toJSON minVal, Json.toJSON maxVal]
{- ORMOLU_ENABLE -}
-- | A PostPolicy is required to perform uploads via browser forms. -- | A PostPolicy is required to perform uploads via browser forms.
data PostPolicy = PostPolicy data PostPolicy = PostPolicy {
{ expiration :: UTCTime, expiration :: UTCTime
conditions :: [PostPolicyCondition] , conditions :: [PostPolicyCondition]
} } deriving (Show, Eq)
deriving stock (Show, Eq)
instance Json.ToJSON PostPolicy where instance Json.ToJSON PostPolicy where
toJSON (PostPolicy e c) = toJSON (PostPolicy e c) =
Json.object Json.object $ [ "expiration" .= iso8601TimeFormat e
[ "expiration" .= iso8601TimeFormat e, , "conditions" .= c
"conditions" .= c ]
]
toEncoding (PostPolicy e c) = toEncoding (PostPolicy e c) =
Json.pairs ("expiration" .= iso8601TimeFormat e <> "conditions" .= c) Json.pairs ("expiration" .= iso8601TimeFormat e <> "conditions" .= c)
-- | Possible validation errors when creating a PostPolicy. -- | Possible validation errors when creating a PostPolicy.
data PostPolicyError data PostPolicyError = PPEKeyNotSpecified
= PPEKeyNotSpecified | PPEBucketNotSpecified
| PPEBucketNotSpecified | PPEConditionKeyEmpty
| PPEConditionKeyEmpty | PPERangeInvalid
| PPERangeInvalid deriving (Eq, Show)
deriving stock (Show, Eq)
-- | Set the bucket name that the upload should use. -- | Set the bucket name that the upload should use.
ppCondBucket :: Bucket -> PostPolicyCondition ppCondBucket :: Bucket -> PostPolicyCondition
@ -231,10 +186,8 @@ ppCondBucket = PPCEquals "bucket"
-- | Set the content length range constraint with minimum and maximum -- | Set the content length range constraint with minimum and maximum
-- byte count values. -- byte count values.
ppCondContentLengthRange :: ppCondContentLengthRange :: Int64 -> Int64
Int64 -> -> PostPolicyCondition
Int64 ->
PostPolicyCondition
ppCondContentLengthRange = PPCRange "content-length-range" ppCondContentLengthRange = PPCRange "content-length-range"
-- | Set the content-type header for the upload. -- | Set the content-type header for the upload.
@ -257,99 +210,83 @@ ppCondSuccessActionStatus n =
-- | This function creates a PostPolicy after validating its -- | This function creates a PostPolicy after validating its
-- arguments. -- arguments.
newPostPolicy :: newPostPolicy :: UTCTime -> [PostPolicyCondition]
UTCTime -> -> Either PostPolicyError PostPolicy
[PostPolicyCondition] ->
Either PostPolicyError PostPolicy
newPostPolicy expirationTime conds newPostPolicy expirationTime conds
-- object name condition must be present -- object name condition must be present
| not $ any (keyEquals "key") conds = | not $ any (keyEquals "key") conds =
Left PPEKeyNotSpecified Left PPEKeyNotSpecified
-- bucket name condition must be present -- bucket name condition must be present
| not $ any (keyEquals "bucket") conds = | not $ any (keyEquals "bucket") conds =
Left PPEBucketNotSpecified Left PPEBucketNotSpecified
-- a condition with an empty key is invalid -- a condition with an empty key is invalid
| any (keyEquals "") conds || any isEmptyRangeKey conds = | any (keyEquals "") conds || any isEmptyRangeKey conds =
Left PPEConditionKeyEmpty Left PPEConditionKeyEmpty
-- invalid range check -- invalid range check
| any isInvalidRange conds = | any isInvalidRange conds =
Left PPERangeInvalid Left PPERangeInvalid
-- all good! -- all good!
| otherwise = | otherwise =
return $ PostPolicy expirationTime conds return $ PostPolicy expirationTime conds
where where
keyEquals k' (PPCStartsWith k _) = k == k' keyEquals k' (PPCStartsWith k _) = k == k'
keyEquals k' (PPCEquals k _) = k == k' keyEquals k' (PPCEquals k _) = k == k'
keyEquals _ _ = False keyEquals _ _ = False
isEmptyRangeKey (PPCRange k _ _) = k == "" isEmptyRangeKey (PPCRange k _ _) = k == ""
isEmptyRangeKey _ = False isEmptyRangeKey _ = False
isInvalidRange (PPCRange _ mi ma) = mi < 0 || mi > ma isInvalidRange (PPCRange _ mi ma) = mi < 0 || mi > ma
isInvalidRange _ = False isInvalidRange _ = False
-- | Convert Post Policy to a string (e.g. for printing). -- | Convert Post Policy to a string (e.g. for printing).
showPostPolicy :: PostPolicy -> ByteString showPostPolicy :: PostPolicy -> ByteString
showPostPolicy = toStrictBS . Json.encode showPostPolicy = toS . Json.encode
-- | Generate a presigned URL and POST policy to upload files via a -- | Generate a presigned URL and POST policy to upload files via a
-- browser. On success, this function returns a URL and POST -- browser. On success, this function returns a URL and POST
-- form-data. -- form-data.
presignedPostPolicy :: presignedPostPolicy :: PostPolicy
PostPolicy -> -> Minio (ByteString, H.HashMap Text ByteString)
Minio (ByteString, H.HashMap Text ByteString)
presignedPostPolicy p = do presignedPostPolicy p = do
ci <- asks mcConnInfo ci <- asks mcConnInfo
signTime <- liftIO Time.getCurrentTime signTime <- liftIO $ Time.getCurrentTime
mgr <- asks mcConnManager
cv <- liftIO $ getCredential (connectCreds ci) (getEndpoint ci) mgr
let extraConditions signParams = let
[ PPCEquals "x-amz-date" (toText $ awsTimeFormat signTime), extraConditions =
PPCEquals "x-amz-algorithm" "AWS4-HMAC-SHA256", [ PPCEquals "x-amz-date" (toS $ awsTimeFormat signTime)
PPCEquals , PPCEquals "x-amz-algorithm" "AWS4-HMAC-SHA256"
"x-amz-credential" , PPCEquals "x-amz-credential"
( T.intercalate (T.intercalate "/" [connectAccessKey ci,
"/" decodeUtf8 $ mkScope signTime region])
[ coerce $ cvAccessKey cv, ]
decodeUtf8 $ credentialScope signParams ppWithCreds = p {
] conditions = conditions p ++ extraConditions
) }
] sp = SignParams (connectAccessKey ci) (connectSecretKey ci)
ppWithCreds signParams = signTime (Just $ connectRegion ci) Nothing Nothing
p signData = signV4PostPolicy (showPostPolicy ppWithCreds) sp
{ conditions = conditions p ++ extraConditions signParams
}
sp = -- compute form-data
SignParams mkPair (PPCStartsWith k v) = Just (k, v)
(coerce $ cvAccessKey cv) mkPair (PPCEquals k v) = Just (k, v)
(coerce $ cvSecretKey cv) mkPair _ = Nothing
(coerce $ cvSessionToken cv) formFromPolicy = H.map toS $ H.fromList $ catMaybes $
ServiceS3 mkPair <$> conditions ppWithCreds
signTime formData = formFromPolicy `H.union` signData
(Just $ connectRegion ci)
Nothing -- compute POST upload URL
Nothing bucket = H.lookupDefault "" "bucket" formData
signData = signV4PostPolicy (showPostPolicy $ ppWithCreds sp) sp scheme = byteString $ bool "http://" "https://" $ connectIsSecure ci
-- compute form-data region = connectRegion ci
mkPair (PPCStartsWith k v) = Just (k, v)
mkPair (PPCEquals k v) = Just (k, v) url = toS $ toLazyByteString $ scheme <> byteString (getHostAddr ci) <>
mkPair _ = Nothing byteString "/" <> byteString (toS bucket) <> byteString "/"
formFromPolicy =
H.map encodeUtf8 $
H.fromList $
mapMaybe
mkPair
(conditions $ ppWithCreds sp)
formData = formFromPolicy `H.union` signData
-- compute POST upload URL
bucket = H.lookupDefault "" "bucket" formData
scheme = byteString $ bool "http://" "https://" $ connectIsSecure ci
url =
toStrictBS $
toLazyByteString $
scheme
<> byteString (getHostAddr ci)
<> byteString "/"
<> byteString bucket
<> byteString "/"
return (url, formData) return (url, formData)

View File

@ -15,24 +15,29 @@
-- --
module Network.Minio.PutObject module Network.Minio.PutObject
( putObjectInternal, (
ObjectData (..), putObjectInternal
selectPartSizes, , ObjectData(..)
) , selectPartSizes
where ) where
import Conduit (takeC)
import qualified Conduit as C import Conduit (takeC)
import qualified Data.ByteString.Lazy as LBS import qualified Conduit as C
import qualified Data.Conduit.Binary as CB import qualified Data.ByteString.Lazy as LBS
import qualified Data.Conduit.Binary as CB
import qualified Data.Conduit.Combinators as CC import qualified Data.Conduit.Combinators as CC
import qualified Data.Conduit.List as CL import qualified Data.Conduit.List as CL
import qualified Data.List as List import qualified Data.List as List
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Errors import Lib.Prelude
import Network.Minio.S3API
import Network.Minio.Utils import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.S3API
import Network.Minio.Utils
-- | A data-type to represent the source data for an object. A -- | A data-type to represent the source data for an object. A
-- file-path or a producer-conduit may be provided. -- file-path or a producer-conduit may be provided.
@ -45,45 +50,37 @@ import Network.Minio.Utils
-- the input - if it is not provided, upload will continue until the -- the input - if it is not provided, upload will continue until the
-- stream ends or the object reaches `maxObjectSize` size. -- stream ends or the object reaches `maxObjectSize` size.
data ObjectData m data ObjectData m
= -- | Takes filepath and optional = ODFile FilePath (Maybe Int64) -- ^ Takes filepath and optional
-- size. -- size.
ODFile FilePath (Maybe Int64) | ODStream (C.ConduitM () ByteString m ()) (Maybe Int64) -- ^ Pass
| -- | Pass -- size
-- size -- (bytes)
-- (bytes) -- if
-- if -- known.
-- known.
ODStream (C.ConduitM () ByteString m ()) (Maybe Int64)
-- | Put an object from ObjectData. This high-level API handles -- | Put an object from ObjectData. This high-level API handles
-- objects of all sizes, and even if the object size is unknown. -- objects of all sizes, and even if the object size is unknown.
putObjectInternal :: putObjectInternal :: Bucket -> Object -> PutObjectOptions
Bucket -> -> ObjectData Minio -> Minio ETag
Object ->
PutObjectOptions ->
ObjectData Minio ->
Minio ETag
putObjectInternal b o opts (ODStream src sizeMay) = do putObjectInternal b o opts (ODStream src sizeMay) = do
case sizeMay of case sizeMay of
-- unable to get size, so assume non-seekable file -- unable to get size, so assume non-seekable file
Nothing -> sequentialMultipartUpload b o opts Nothing src Nothing -> sequentialMultipartUpload b o opts Nothing src
-- got file size, so check for single/multipart upload -- got file size, so check for single/multipart upload
Just size -> Just size ->
if if | size <= 64 * oneMiB -> do
| size <= 64 * oneMiB -> do bs <- C.runConduit $ src C..| takeC (fromIntegral size) C..| CB.sinkLbs
bs <- C.runConduit $ src C..| takeC (fromIntegral size) C..| CB.sinkLbs putObjectSingle' b o (pooToHeaders opts) $ LBS.toStrict bs
putObjectSingle' b o (pooToHeaders opts) $ LBS.toStrict bs | size > maxObjectSize -> throwIO $ MErrVPutSizeExceeded size
| size > maxObjectSize -> throwIO $ MErrVPutSizeExceeded size | otherwise -> sequentialMultipartUpload b o opts (Just size) src
| otherwise -> sequentialMultipartUpload b o opts (Just size) src
putObjectInternal b o opts (ODFile fp sizeMay) = do putObjectInternal b o opts (ODFile fp sizeMay) = do
hResE <- withNewHandle fp $ \h -> hResE <- withNewHandle fp $ \h ->
liftA2 (,) (isHandleSeekable h) (getFileSize h) liftM2 (,) (isHandleSeekable h) (getFileSize h)
(isSeekable, handleSizeMay) <- (isSeekable, handleSizeMay) <- either (const $ return (False, Nothing)) return
either hResE
(const $ return (False, Nothing))
return
hResE
-- prefer given size to queried size. -- prefer given size to queried size.
let finalSizeMay = listToMaybe $ catMaybes [sizeMay, handleSizeMay] let finalSizeMay = listToMaybe $ catMaybes [sizeMay, handleSizeMay]
@ -91,25 +88,18 @@ putObjectInternal b o opts (ODFile fp sizeMay) = do
case finalSizeMay of case finalSizeMay of
-- unable to get size, so assume non-seekable file -- unable to get size, so assume non-seekable file
Nothing -> sequentialMultipartUpload b o opts Nothing $ CB.sourceFile fp Nothing -> sequentialMultipartUpload b o opts Nothing $ CB.sourceFile fp
-- got file size, so check for single/multipart upload -- got file size, so check for single/multipart upload
Just size -> Just size ->
if if | size <= 64 * oneMiB -> either throwIO return =<<
| size <= 64 * oneMiB -> withNewHandle fp (\h -> putObjectSingle b o (pooToHeaders opts) h 0 size)
either throwIO return | size > maxObjectSize -> throwIO $ MErrVPutSizeExceeded size
=<< withNewHandle fp (\h -> putObjectSingle b o (pooToHeaders opts) h 0 size) | isSeekable -> parallelMultipartUpload b o opts fp size
| size > maxObjectSize -> throwIO $ MErrVPutSizeExceeded size | otherwise -> sequentialMultipartUpload b o opts (Just size) $
| isSeekable -> parallelMultipartUpload b o opts fp size CB.sourceFile fp
| otherwise ->
sequentialMultipartUpload b o opts (Just size) $
CB.sourceFile fp
parallelMultipartUpload :: parallelMultipartUpload :: Bucket -> Object -> PutObjectOptions
Bucket -> -> FilePath -> Int64 -> Minio ETag
Object ->
PutObjectOptions ->
FilePath ->
Int64 ->
Minio ETag
parallelMultipartUpload b o opts filePath size = do parallelMultipartUpload b o opts filePath size = do
-- get a new upload id. -- get a new upload id.
uploadId <- newMultipartUpload b o (pooToHeaders opts) uploadId <- newMultipartUpload b o (pooToHeaders opts)
@ -119,17 +109,15 @@ parallelMultipartUpload b o opts filePath size = do
let threads = fromMaybe 10 $ pooNumThreads opts let threads = fromMaybe 10 $ pooNumThreads opts
-- perform upload with 'threads' threads -- perform upload with 'threads' threads
uploadedPartsE <- uploadedPartsE <- limitedMapConcurrently (fromIntegral threads)
limitedMapConcurrently (uploadPart uploadId) partSizeInfo
(fromIntegral threads)
(uploadPart uploadId)
partSizeInfo
-- if there were any errors, rethrow exception. -- if there were any errors, rethrow exception.
mapM_ throwIO $ lefts uploadedPartsE mapM_ throwIO $ lefts uploadedPartsE
-- if we get here, all parts were successfully uploaded. -- if we get here, all parts were successfully uploaded.
completeMultipartUpload b o uploadId $ rights uploadedPartsE completeMultipartUpload b o uploadId $ rights uploadedPartsE
where where
uploadPart uploadId (partNum, offset, sz) = uploadPart uploadId (partNum, offset, sz) =
withNewHandle filePath $ \h -> do withNewHandle filePath $ \h -> do
@ -137,13 +125,10 @@ parallelMultipartUpload b o opts filePath size = do
putObjectPart b o uploadId partNum [] payload putObjectPart b o uploadId partNum [] payload
-- | Upload multipart object from conduit source sequentially -- | Upload multipart object from conduit source sequentially
sequentialMultipartUpload :: sequentialMultipartUpload :: Bucket -> Object -> PutObjectOptions
Bucket -> -> Maybe Int64
Object -> -> C.ConduitM () ByteString Minio ()
PutObjectOptions -> -> Minio ETag
Maybe Int64 ->
C.ConduitM () ByteString Minio () ->
Minio ETag
sequentialMultipartUpload b o opts sizeMay src = do sequentialMultipartUpload b o opts sizeMay src = do
-- get a new upload id. -- get a new upload id.
uploadId <- newMultipartUpload b o (pooToHeaders opts) uploadId <- newMultipartUpload b o (pooToHeaders opts)
@ -151,23 +136,22 @@ sequentialMultipartUpload b o opts sizeMay src = do
-- upload parts in loop -- upload parts in loop
let partSizes = selectPartSizes $ maybe maxObjectSize identity sizeMay let partSizes = selectPartSizes $ maybe maxObjectSize identity sizeMay
(pnums, _, sizes) = List.unzip3 partSizes (pnums, _, sizes) = List.unzip3 partSizes
uploadedParts <- uploadedParts <- C.runConduit
C.runConduit $ $ src
src C..| chunkBSConduit (map fromIntegral sizes)
C..| chunkBSConduit (map fromIntegral sizes) C..| CL.map PayloadBS
C..| CL.map PayloadBS C..| uploadPart' uploadId pnums
C..| uploadPart' uploadId pnums C..| CC.sinkList
C..| CC.sinkList
-- complete multipart upload -- complete multipart upload
completeMultipartUpload b o uploadId uploadedParts completeMultipartUpload b o uploadId uploadedParts
where where
uploadPart' _ [] = return () uploadPart' _ [] = return ()
uploadPart' uid (pn : pns) = do uploadPart' uid (pn:pns) = do
payloadMay <- C.await payloadMay <- C.await
case payloadMay of case payloadMay of
Nothing -> return () Nothing -> return ()
Just payload -> do Just payload -> do pinfo <- lift $ putObjectPart b o uid pn [] payload
pinfo <- lift $ putObjectPart b o uid pn [] payload C.yield pinfo
C.yield pinfo uploadPart' uid pns
uploadPart' uid pns

View File

@ -1,5 +1,5 @@
-- --
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc. -- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
-- --
-- Licensed under the Apache License, Version 2.0 (the "License"); -- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License. -- you may not use this file except in compliance with the License.
@ -14,185 +14,150 @@
-- limitations under the License. -- limitations under the License.
-- --
-- |
-- Module: Network.Minio.S3API
-- Copyright: (c) 2017-2023 MinIO Dev Team
-- License: Apache 2.0
-- Maintainer: MinIO Dev Team <dev@min.io>
--
-- Lower-level API for S3 compatible object stores. Start with @Network.Minio@
-- and use this only if needed.
module Network.Minio.S3API module Network.Minio.S3API
( Region, (
getLocation, Region
, getLocation
-- * Listing buckets -- * Listing buckets
--------------------
, getService
-------------------- -- * Listing objects
getService, --------------------
, ListObjectsResult(..)
, ListObjectsV1Result(..)
, listObjects'
, listObjectsV1'
-- * Listing objects -- * Retrieving buckets
, headBucket
-------------------- -- * Retrieving objects
ListObjectsResult (..), -----------------------
ListObjectsV1Result (..), , getObject'
listObjects', , headObject
listObjectsV1',
-- * Retrieving buckets -- * Creating buckets and objects
headBucket, ---------------------------------
, putBucket
, ETag
, maxSinglePutObjectSizeBytes
, putObjectSingle'
, putObjectSingle
, copyObjectSingle
-- * Retrieving objects -- * Multipart Upload APIs
--------------------------
, UploadId
, PartTuple
, Payload(..)
, PartNumber
, newMultipartUpload
, putObjectPart
, copyObjectPart
, completeMultipartUpload
, abortMultipartUpload
, ListUploadsResult(..)
, listIncompleteUploads'
, ListPartsResult(..)
, listIncompleteParts'
----------------------- -- * Deletion APIs
getObject', --------------------------
headObject, , deleteBucket
, deleteObject
-- * Creating buckets and objects -- * Presigned Operations
-----------------------------
, module Network.Minio.PresignedOperations
--------------------------------- -- ** Bucket Policies
putBucket, , getBucketPolicy
ETag, , setBucketPolicy
maxSinglePutObjectSizeBytes,
putObjectSingle',
putObjectSingle,
copyObjectSingle,
-- * Multipart Upload APIs -- * Bucket Notifications
-------------------------
, Notification(..)
, NotificationConfig(..)
, Arn
, Event(..)
, Filter(..)
, FilterKey(..)
, FilterRules(..)
, FilterRule(..)
, getBucketNotification
, putBucketNotification
, removeAllBucketNotification
) where
-------------------------- import qualified Data.ByteString as BS
UploadId, import qualified Data.Text as T
PartTuple, import qualified Network.HTTP.Conduit as NC
Payload (..), import qualified Network.HTTP.Types as HT
PartNumber, import Network.HTTP.Types.Status (status404)
newMultipartUpload, import UnliftIO (Handler (Handler))
putObjectPart,
copyObjectPart,
completeMultipartUpload,
abortMultipartUpload,
ListUploadsResult (..),
listIncompleteUploads',
ListPartsResult (..),
listIncompleteParts',
-- * Deletion APIs import Lib.Prelude
-------------------------- import Network.Minio.API
deleteBucket, import Network.Minio.APICommon
deleteObject, import Network.Minio.Data
import Network.Minio.Errors
-- * Presigned Operations import Network.Minio.PresignedOperations
import Network.Minio.Utils
----------------------------- import Network.Minio.XmlGenerator
module Network.Minio.PresignedOperations, import Network.Minio.XmlParser
-- ** Bucket Policies
getBucketPolicy,
setBucketPolicy,
-- * Bucket Notifications
-------------------------
Notification (..),
NotificationConfig (..),
Arn,
Event (..),
Filter (..),
FilterKey (..),
FilterRules (..),
FilterRule (..),
getBucketNotification,
putBucketNotification,
removeAllBucketNotification,
)
where
import qualified Data.ByteString as BS
import qualified Data.Text as T
import Lib.Prelude
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Network.HTTP.Types.Status (status404)
import Network.Minio.API
import Network.Minio.APICommon
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.PresignedOperations
import Network.Minio.Utils
import Network.Minio.XmlGenerator
import Network.Minio.XmlParser
import UnliftIO (Handler (Handler))
-- | Fetch all buckets from the service. -- | Fetch all buckets from the service.
getService :: Minio [BucketInfo] getService :: Minio [BucketInfo]
getService = do getService = do
resp <- resp <- executeRequest $ defaultS3ReqInfo {
executeRequest $ riNeedsLocation = False
defaultS3ReqInfo }
{ riNeedsLocation = False
}
parseListBuckets $ NC.responseBody resp parseListBuckets $ NC.responseBody resp
-- Parse headers from getObject and headObject calls. -- Parse headers from getObject and headObject calls.
parseGetObjectHeaders :: Object -> [HT.Header] -> Maybe ObjectInfo parseGetObjectHeaders :: Object -> [HT.Header] -> Maybe ObjectInfo
parseGetObjectHeaders object headers = parseGetObjectHeaders object headers =
let metadataPairs = getMetadata headers let metadataPairs = getMetadata headers
userMetadata = getUserMetadataMap metadataPairs userMetadata = getUserMetadataMap metadataPairs
metadata = getNonUserMetadataMap metadataPairs metadata = getNonUserMetadataMap metadataPairs
in ObjectInfo in ObjectInfo <$> Just object
<$> Just object <*> getLastModifiedHeader headers
<*> getLastModifiedHeader headers <*> getETagHeader headers
<*> getETagHeader headers <*> getContentLength headers
<*> getContentLength headers <*> Just userMetadata
<*> Just userMetadata <*> Just metadata
<*> Just metadata
-- | GET an object from the service and return parsed ObjectInfo and a -- | GET an object from the service and return parsed ObjectInfo and a
-- conduit source for the object content -- conduit source for the object content
getObject' :: getObject' :: Bucket -> Object -> HT.Query -> [HT.Header]
Bucket -> -> Minio GetObjectResponse
Object ->
HT.Query ->
[HT.Header] ->
Minio GetObjectResponse
getObject' bucket object queryParams headers = do getObject' bucket object queryParams headers = do
resp <- mkStreamRequest reqInfo resp <- mkStreamRequest reqInfo
let objInfoMaybe = parseGetObjectHeaders object $ NC.responseHeaders resp let objInfoMaybe = parseGetObjectHeaders object $ NC.responseHeaders resp
objInfo <- objInfo <- maybe (throwIO MErrVInvalidObjectInfoResponse) return
maybe objInfoMaybe
(throwIO MErrVInvalidObjectInfoResponse) return $ GetObjectResponse { gorObjectInfo = objInfo
return , gorObjectStream = NC.responseBody resp
objInfoMaybe }
return $
GetObjectResponse
{ gorObjectInfo = objInfo,
gorObjectStream = NC.responseBody resp
}
where where
reqInfo = reqInfo = defaultS3ReqInfo { riBucket = Just bucket
defaultS3ReqInfo , riObject = Just object
{ riBucket = Just bucket, , riQueryParams = queryParams
riObject = Just object, , riHeaders = headers
riQueryParams = queryParams, }
riHeaders =
headers
-- This header is required for safety as otherwise http-client,
-- sends Accept-Encoding: gzip, and the server may actually gzip
-- body. In that case Content-Length header will be missing.
<> [("Accept-Encoding", "identity")]
}
-- | Creates a bucket via a PUT bucket call. -- | Creates a bucket via a PUT bucket call.
putBucket :: Bucket -> Region -> Minio () putBucket :: Bucket -> Region -> Minio ()
putBucket bucket location = do putBucket bucket location = do
ns <- asks getSvcNamespace ns <- asks getSvcNamespace
void $ void $ executeRequest $
executeRequest $ defaultS3ReqInfo { riMethod = HT.methodPut
defaultS3ReqInfo , riBucket = Just bucket
{ riMethod = HT.methodPut, , riPayload = PayloadBS $ mkCreateBucketConfig ns location
riBucket = Just bucket, , riNeedsLocation = False
riPayload = PayloadBS $ mkCreateBucketConfig ns location,
riNeedsLocation = False
} }
-- | Single PUT object size. -- | Single PUT object size.
@ -208,432 +173,314 @@ putObjectSingle' bucket object headers bs = do
let size = fromIntegral (BS.length bs) let size = fromIntegral (BS.length bs)
-- check length is within single PUT object size. -- check length is within single PUT object size.
when (size > maxSinglePutObjectSizeBytes) $ when (size > maxSinglePutObjectSizeBytes) $
throwIO $ throwIO $ MErrVSinglePUTSizeExceeded size
MErrVSinglePUTSizeExceeded size
let payload = mkStreamingPayload $ PayloadBS bs let payload = mkStreamingPayload $ PayloadBS bs
resp <- resp <- executeRequest $
executeRequest $ defaultS3ReqInfo { riMethod = HT.methodPut
defaultS3ReqInfo , riBucket = Just bucket
{ riMethod = HT.methodPut, , riObject = Just object
riBucket = Just bucket, , riHeaders = headers
riObject = Just object, , riPayload = payload
riHeaders = headers, }
riPayload = payload
}
let rheaders = NC.responseHeaders resp let rheaders = NC.responseHeaders resp
etag = getETagHeader rheaders etag = getETagHeader rheaders
maybe maybe
(throwIO MErrVETagHeaderNotFound) (throwIO MErrVETagHeaderNotFound)
return return etag
etag
-- | PUT an object into the service. This function performs a single -- | PUT an object into the service. This function performs a single
-- PUT object call, and so can only transfer objects upto 5GiB. -- PUT object call, and so can only transfer objects upto 5GiB.
putObjectSingle :: putObjectSingle :: Bucket -> Object -> [HT.Header] -> Handle -> Int64
Bucket -> -> Int64 -> Minio ETag
Object ->
[HT.Header] ->
Handle ->
Int64 ->
Int64 ->
Minio ETag
putObjectSingle bucket object headers h offset size = do putObjectSingle bucket object headers h offset size = do
-- check length is within single PUT object size. -- check length is within single PUT object size.
when (size > maxSinglePutObjectSizeBytes) $ when (size > maxSinglePutObjectSizeBytes) $
throwIO $ throwIO $ MErrVSinglePUTSizeExceeded size
MErrVSinglePUTSizeExceeded size
-- content-length header is automatically set by library. -- content-length header is automatically set by library.
let payload = mkStreamingPayload $ PayloadH h offset size let payload = mkStreamingPayload $ PayloadH h offset size
resp <- resp <- executeRequest $
executeRequest $ defaultS3ReqInfo { riMethod = HT.methodPut
defaultS3ReqInfo , riBucket = Just bucket
{ riMethod = HT.methodPut, , riObject = Just object
riBucket = Just bucket, , riHeaders = headers
riObject = Just object, , riPayload = payload
riHeaders = headers, }
riPayload = payload
}
let rheaders = NC.responseHeaders resp let rheaders = NC.responseHeaders resp
etag = getETagHeader rheaders etag = getETagHeader rheaders
maybe maybe
(throwIO MErrVETagHeaderNotFound) (throwIO MErrVETagHeaderNotFound)
return return etag
etag
-- | List objects in a bucket matching prefix up to delimiter, -- | List objects in a bucket matching prefix up to delimiter,
-- starting from nextMarker. -- starting from nextMarker.
listObjectsV1' :: listObjectsV1' :: Bucket -> Maybe Text -> Maybe Text -> Maybe Text -> Maybe Int
Bucket -> -> Minio ListObjectsV1Result
Maybe Text ->
Maybe Text ->
Maybe Text ->
Maybe Int ->
Minio ListObjectsV1Result
listObjectsV1' bucket prefix nextMarker delimiter maxKeys = do listObjectsV1' bucket prefix nextMarker delimiter maxKeys = do
resp <- resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet
executeRequest $ , riBucket = Just bucket
defaultS3ReqInfo , riQueryParams = mkOptionalParams params
{ riMethod = HT.methodGet, }
riBucket = Just bucket,
riQueryParams = mkOptionalParams params
}
parseListObjectsV1Response $ NC.responseBody resp parseListObjectsV1Response $ NC.responseBody resp
where where
params = params = [
[ ("marker", nextMarker), ("marker", nextMarker)
("prefix", prefix), , ("prefix", prefix)
("delimiter", delimiter), , ("delimiter", delimiter)
("max-keys", show <$> maxKeys) , ("max-keys", show <$> maxKeys)
] ]
-- | List objects in a bucket matching prefix up to delimiter, -- | List objects in a bucket matching prefix up to delimiter,
-- starting from nextToken. -- starting from nextToken.
listObjects' :: listObjects' :: Bucket -> Maybe Text -> Maybe Text -> Maybe Text -> Maybe Int
Bucket -> -> Minio ListObjectsResult
Maybe Text ->
Maybe Text ->
Maybe Text ->
Maybe Int ->
Minio ListObjectsResult
listObjects' bucket prefix nextToken delimiter maxKeys = do listObjects' bucket prefix nextToken delimiter maxKeys = do
resp <- resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet
executeRequest $ , riBucket = Just bucket
defaultS3ReqInfo , riQueryParams = mkOptionalParams params
{ riMethod = HT.methodGet, }
riBucket = Just bucket,
riQueryParams = mkOptionalParams params
}
parseListObjectsResponse $ NC.responseBody resp parseListObjectsResponse $ NC.responseBody resp
where where
params = params = [
[ ("list-type", Just "2"), ("list-type", Just "2")
("continuation_token", nextToken), , ("continuation_token", nextToken)
("prefix", prefix), , ("prefix", prefix)
("delimiter", delimiter), , ("delimiter", delimiter)
("max-keys", show <$> maxKeys) , ("max-keys", show <$> maxKeys)
] ]
-- | DELETE a bucket from the service. -- | DELETE a bucket from the service.
deleteBucket :: Bucket -> Minio () deleteBucket :: Bucket -> Minio ()
deleteBucket bucket = deleteBucket bucket = void $
void $ executeRequest $
executeRequest $ defaultS3ReqInfo { riMethod = HT.methodDelete
defaultS3ReqInfo , riBucket = Just bucket
{ riMethod = HT.methodDelete,
riBucket = Just bucket
} }
-- | DELETE an object from the service. -- | DELETE an object from the service.
deleteObject :: Bucket -> Object -> Minio () deleteObject :: Bucket -> Object -> Minio ()
deleteObject bucket object = deleteObject bucket object = void $
void $ executeRequest $
executeRequest $ defaultS3ReqInfo { riMethod = HT.methodDelete
defaultS3ReqInfo , riBucket = Just bucket
{ riMethod = HT.methodDelete, , riObject = Just object
riBucket = Just bucket,
riObject = Just object
} }
-- | Create a new multipart upload. -- | Create a new multipart upload.
newMultipartUpload :: Bucket -> Object -> [HT.Header] -> Minio UploadId newMultipartUpload :: Bucket -> Object -> [HT.Header] -> Minio UploadId
newMultipartUpload bucket object headers = do newMultipartUpload bucket object headers = do
resp <- resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodPost
executeRequest $ , riBucket = Just bucket
defaultS3ReqInfo , riObject = Just object
{ riMethod = HT.methodPost, , riQueryParams = [("uploads", Nothing)]
riBucket = Just bucket, , riHeaders = headers
riObject = Just object, }
riQueryParams = [("uploads", Nothing)],
riHeaders = headers
}
parseNewMultipartUpload $ NC.responseBody resp parseNewMultipartUpload $ NC.responseBody resp
-- | PUT a part of an object as part of a multipart upload. -- | PUT a part of an object as part of a multipart upload.
putObjectPart :: putObjectPart :: Bucket -> Object -> UploadId -> PartNumber -> [HT.Header]
Bucket -> -> Payload -> Minio PartTuple
Object ->
UploadId ->
PartNumber ->
[HT.Header] ->
Payload ->
Minio PartTuple
putObjectPart bucket object uploadId partNumber headers payload = do putObjectPart bucket object uploadId partNumber headers payload = do
-- transform payload to conduit to enable streaming signature -- transform payload to conduit to enable streaming signature
let payload' = mkStreamingPayload payload let payload' = mkStreamingPayload payload
resp <- resp <- executeRequest $
executeRequest $ defaultS3ReqInfo { riMethod = HT.methodPut
defaultS3ReqInfo , riBucket = Just bucket
{ riMethod = HT.methodPut, , riObject = Just object
riBucket = Just bucket, , riQueryParams = mkOptionalParams params
riObject = Just object, , riHeaders = headers
riQueryParams = mkOptionalParams params, , riPayload = payload'
riHeaders = headers, }
riPayload = payload'
}
let rheaders = NC.responseHeaders resp let rheaders = NC.responseHeaders resp
etag = getETagHeader rheaders etag = getETagHeader rheaders
maybe maybe
(throwIO MErrVETagHeaderNotFound) (throwIO MErrVETagHeaderNotFound)
(return . (partNumber,)) (return . (partNumber, )) etag
etag
where where
params = params = [
[ ("uploadId", Just uploadId), ("uploadId", Just uploadId)
("partNumber", Just $ show partNumber) , ("partNumber", Just $ show partNumber)
] ]
srcInfoToHeaders :: SourceInfo -> [HT.Header] srcInfoToHeaders :: SourceInfo -> [HT.Header]
srcInfoToHeaders srcInfo = srcInfoToHeaders srcInfo = ("x-amz-copy-source",
( "x-amz-copy-source", toS $ T.concat ["/", srcBucket srcInfo,
encodeUtf8 $ "/", srcObject srcInfo]
T.concat ) : rangeHdr ++ zip names values
[ "/",
srcBucket srcInfo,
"/",
srcObject srcInfo
]
)
: rangeHdr
++ zip names values
where where
names = names = ["x-amz-copy-source-if-match", "x-amz-copy-source-if-none-match",
[ "x-amz-copy-source-if-match", "x-amz-copy-source-if-unmodified-since",
"x-amz-copy-source-if-none-match", "x-amz-copy-source-if-modified-since"]
"x-amz-copy-source-if-unmodified-since", values = mapMaybe (fmap encodeUtf8 . (srcInfo &))
"x-amz-copy-source-if-modified-since" [srcIfMatch, srcIfNoneMatch,
] fmap formatRFC1123 . srcIfUnmodifiedSince,
values = fmap formatRFC1123 . srcIfModifiedSince]
mapMaybe rangeHdr = maybe [] (\a -> [("x-amz-copy-source-range", HT.renderByteRanges [a])])
(fmap encodeUtf8 . (srcInfo &)) $ toByteRange <$> srcRange srcInfo
[ srcIfMatch,
srcIfNoneMatch,
fmap formatRFC1123 . srcIfUnmodifiedSince,
fmap formatRFC1123 . srcIfModifiedSince
]
rangeHdr =
maybe [] ((\a -> [("x-amz-copy-source-range", HT.renderByteRanges [a])]) . toByteRange) (srcRange srcInfo)
toByteRange :: (Int64, Int64) -> HT.ByteRange toByteRange :: (Int64, Int64) -> HT.ByteRange
toByteRange (x, y) = HT.ByteRangeFromTo (fromIntegral x) (fromIntegral y) toByteRange (x, y) = HT.ByteRangeFromTo (fromIntegral x) (fromIntegral y)
-- | Performs server-side copy of an object or part of an object as an -- | Performs server-side copy of an object or part of an object as an
-- upload part of an ongoing multi-part upload. -- upload part of an ongoing multi-part upload.
copyObjectPart :: copyObjectPart :: DestinationInfo -> SourceInfo -> UploadId
DestinationInfo -> -> PartNumber -> [HT.Header] -> Minio (ETag, UTCTime)
SourceInfo ->
UploadId ->
PartNumber ->
[HT.Header] ->
Minio (ETag, UTCTime)
copyObjectPart dstInfo srcInfo uploadId partNumber headers = do copyObjectPart dstInfo srcInfo uploadId partNumber headers = do
resp <- resp <- executeRequest $
executeRequest $ defaultS3ReqInfo { riMethod = HT.methodPut
defaultS3ReqInfo , riBucket = Just $ dstBucket dstInfo
{ riMethod = HT.methodPut, , riObject = Just $ dstObject dstInfo
riBucket = Just $ dstBucket dstInfo, , riQueryParams = mkOptionalParams params
riObject = Just $ dstObject dstInfo, , riHeaders = headers ++ srcInfoToHeaders srcInfo
riQueryParams = mkOptionalParams params, }
riHeaders = headers ++ srcInfoToHeaders srcInfo
}
parseCopyObjectResponse $ NC.responseBody resp parseCopyObjectResponse $ NC.responseBody resp
where where
params = params = [
[ ("uploadId", Just uploadId), ("uploadId", Just uploadId)
("partNumber", Just $ show partNumber) , ("partNumber", Just $ show partNumber)
] ]
-- | Performs server-side copy of an object that is upto 5GiB in -- | Performs server-side copy of an object that is upto 5GiB in
-- size. If the object is greater than 5GiB, this function throws the -- size. If the object is greater than 5GiB, this function throws the
-- error returned by the server. -- error returned by the server.
copyObjectSingle :: copyObjectSingle :: Bucket -> Object -> SourceInfo -> [HT.Header]
Bucket -> -> Minio (ETag, UTCTime)
Object ->
SourceInfo ->
[HT.Header] ->
Minio (ETag, UTCTime)
copyObjectSingle bucket object srcInfo headers = do copyObjectSingle bucket object srcInfo headers = do
-- validate that srcRange is Nothing for this API. -- validate that srcRange is Nothing for this API.
when (isJust $ srcRange srcInfo) $ when (isJust $ srcRange srcInfo) $
throwIO MErrVCopyObjSingleNoRangeAccepted throwIO MErrVCopyObjSingleNoRangeAccepted
resp <- resp <- executeRequest $
executeRequest $ defaultS3ReqInfo { riMethod = HT.methodPut
defaultS3ReqInfo , riBucket = Just bucket
{ riMethod = HT.methodPut, , riObject = Just object
riBucket = Just bucket, , riHeaders = headers ++ srcInfoToHeaders srcInfo
riObject = Just object, }
riHeaders = headers ++ srcInfoToHeaders srcInfo
}
parseCopyObjectResponse $ NC.responseBody resp parseCopyObjectResponse $ NC.responseBody resp
-- | Complete a multipart upload. -- | Complete a multipart upload.
completeMultipartUpload :: completeMultipartUpload :: Bucket -> Object -> UploadId -> [PartTuple]
Bucket -> -> Minio ETag
Object ->
UploadId ->
[PartTuple] ->
Minio ETag
completeMultipartUpload bucket object uploadId partTuple = do completeMultipartUpload bucket object uploadId partTuple = do
resp <- resp <- executeRequest $
executeRequest $ defaultS3ReqInfo { riMethod = HT.methodPost
defaultS3ReqInfo , riBucket = Just bucket
{ riMethod = HT.methodPost, , riObject = Just object
riBucket = Just bucket, , riQueryParams = mkOptionalParams params
riObject = Just object, , riPayload = PayloadBS $
riQueryParams = mkOptionalParams params, mkCompleteMultipartUploadRequest partTuple
riPayload = }
PayloadBS $
mkCompleteMultipartUploadRequest partTuple
}
parseCompleteMultipartUploadResponse $ NC.responseBody resp parseCompleteMultipartUploadResponse $ NC.responseBody resp
where where
params = [("uploadId", Just uploadId)] params = [("uploadId", Just uploadId)]
-- | Abort a multipart upload. -- | Abort a multipart upload.
abortMultipartUpload :: Bucket -> Object -> UploadId -> Minio () abortMultipartUpload :: Bucket -> Object -> UploadId -> Minio ()
abortMultipartUpload bucket object uploadId = abortMultipartUpload bucket object uploadId = void $
void $ executeRequest $ defaultS3ReqInfo { riMethod = HT.methodDelete
executeRequest $ , riBucket = Just bucket
defaultS3ReqInfo , riObject = Just object
{ riMethod = HT.methodDelete, , riQueryParams = mkOptionalParams params
riBucket = Just bucket, }
riObject = Just object,
riQueryParams = mkOptionalParams params
}
where where
params = [("uploadId", Just uploadId)] params = [("uploadId", Just uploadId)]
-- | List incomplete multipart uploads. -- | List incomplete multipart uploads.
listIncompleteUploads' :: listIncompleteUploads' :: Bucket -> Maybe Text -> Maybe Text -> Maybe Text
Bucket -> -> Maybe Text -> Maybe Int -> Minio ListUploadsResult
Maybe Text ->
Maybe Text ->
Maybe Text ->
Maybe Text ->
Maybe Int ->
Minio ListUploadsResult
listIncompleteUploads' bucket prefix delimiter keyMarker uploadIdMarker maxKeys = do listIncompleteUploads' bucket prefix delimiter keyMarker uploadIdMarker maxKeys = do
resp <- resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet
executeRequest $ , riBucket = Just bucket
defaultS3ReqInfo , riQueryParams = params
{ riMethod = HT.methodGet, }
riBucket = Just bucket,
riQueryParams = params
}
parseListUploadsResponse $ NC.responseBody resp parseListUploadsResponse $ NC.responseBody resp
where where
-- build query params -- build query params
params = params = ("uploads", Nothing) : mkOptionalParams
("uploads", Nothing) [ ("prefix", prefix)
: mkOptionalParams , ("delimiter", delimiter)
[ ("prefix", prefix), , ("key-marker", keyMarker)
("delimiter", delimiter), , ("upload-id-marker", uploadIdMarker)
("key-marker", keyMarker), , ("max-uploads", show <$> maxKeys)
("upload-id-marker", uploadIdMarker), ]
("max-uploads", show <$> maxKeys)
]
-- | List parts of an ongoing multipart upload. -- | List parts of an ongoing multipart upload.
listIncompleteParts' :: listIncompleteParts' :: Bucket -> Object -> UploadId -> Maybe Text
Bucket -> -> Maybe Text -> Minio ListPartsResult
Object ->
UploadId ->
Maybe Text ->
Maybe Text ->
Minio ListPartsResult
listIncompleteParts' bucket object uploadId maxParts partNumMarker = do listIncompleteParts' bucket object uploadId maxParts partNumMarker = do
resp <- resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet
executeRequest $ , riBucket = Just bucket
defaultS3ReqInfo , riObject = Just object
{ riMethod = HT.methodGet, , riQueryParams = mkOptionalParams params
riBucket = Just bucket, }
riObject = Just object,
riQueryParams = mkOptionalParams params
}
parseListPartsResponse $ NC.responseBody resp parseListPartsResponse $ NC.responseBody resp
where where
-- build optional query params -- build optional query params
params = params = [
[ ("uploadId", Just uploadId), ("uploadId", Just uploadId)
("part-number-marker", partNumMarker), , ("part-number-marker", partNumMarker)
("max-parts", maxParts) , ("max-parts", maxParts)
] ]
-- | Get metadata of an object. -- | Get metadata of an object.
headObject :: Bucket -> Object -> [HT.Header] -> Minio ObjectInfo headObject :: Bucket -> Object -> [HT.Header] -> Minio ObjectInfo
headObject bucket object reqHeaders = do headObject bucket object reqHeaders = do
resp <- resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodHead
executeRequest $ , riBucket = Just bucket
defaultS3ReqInfo , riObject = Just object
{ riMethod = HT.methodHead, , riHeaders = reqHeaders
riBucket = Just bucket, }
riObject = Just object,
riHeaders =
reqHeaders
-- This header is required for safety as otherwise http-client,
-- sends Accept-Encoding: gzip, and the server may actually gzip
-- body. In that case Content-Length header will be missing.
<> [("Accept-Encoding", "identity")]
}
maybe (throwIO MErrVInvalidObjectInfoResponse) return $ maybe (throwIO MErrVInvalidObjectInfoResponse) return $
parseGetObjectHeaders object $ parseGetObjectHeaders object $ NC.responseHeaders resp
NC.responseHeaders resp
-- | Query the object store if a given bucket exists. -- | Query the object store if a given bucket exists.
headBucket :: Bucket -> Minio Bool headBucket :: Bucket -> Minio Bool
headBucket bucket = headBucket bucket = headBucketEx `catches`
headBucketEx [ Handler handleNoSuchBucket
`catches` [ Handler handleNoSuchBucket, , Handler handleStatus404
Handler handleStatus404 ]
]
where where
handleNoSuchBucket :: ServiceErr -> Minio Bool handleNoSuchBucket :: ServiceErr -> Minio Bool
handleNoSuchBucket e handleNoSuchBucket e | e == NoSuchBucket = return False
| e == NoSuchBucket = return False | otherwise = throwIO e
| otherwise = throwIO e
handleStatus404 :: NC.HttpException -> Minio Bool handleStatus404 :: NC.HttpException -> Minio Bool
handleStatus404 e@(NC.HttpExceptionRequest _ (NC.StatusCodeException res _)) = handleStatus404 e@(NC.HttpExceptionRequest _ (NC.StatusCodeException res _)) =
if NC.responseStatus res == status404 if NC.responseStatus res == status404
then return False then return False
else throwIO e else throwIO e
handleStatus404 e = throwIO e handleStatus404 e = throwIO e
headBucketEx = do headBucketEx = do
resp <- resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodHead
executeRequest $ , riBucket = Just bucket
defaultS3ReqInfo }
{ riMethod = HT.methodHead,
riBucket = Just bucket
}
return $ NC.responseStatus resp == HT.ok200 return $ NC.responseStatus resp == HT.ok200
-- | Set the notification configuration on a bucket. -- | Set the notification configuration on a bucket.
putBucketNotification :: Bucket -> Notification -> Minio () putBucketNotification :: Bucket -> Notification -> Minio ()
putBucketNotification bucket ncfg = do putBucketNotification bucket ncfg = do
ns <- asks getSvcNamespace ns <- asks getSvcNamespace
void $ void $ executeRequest $ defaultS3ReqInfo { riMethod = HT.methodPut
executeRequest $ , riBucket = Just bucket
defaultS3ReqInfo , riQueryParams = [("notification", Nothing)]
{ riMethod = HT.methodPut, , riPayload = PayloadBS $
riBucket = Just bucket, mkPutNotificationRequest ns ncfg
riQueryParams = [("notification", Nothing)], }
riPayload =
PayloadBS $
mkPutNotificationRequest ns ncfg
}
-- | Retrieve the notification configuration on a bucket. -- | Retrieve the notification configuration on a bucket.
getBucketNotification :: Bucket -> Minio Notification getBucketNotification :: Bucket -> Minio Notification
getBucketNotification bucket = do getBucketNotification bucket = do
resp <- resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet
executeRequest $ , riBucket = Just bucket
defaultS3ReqInfo , riQueryParams = [("notification", Nothing)]
{ riMethod = HT.methodGet, }
riBucket = Just bucket,
riQueryParams = [("notification", Nothing)]
}
parseNotification $ NC.responseBody resp parseNotification $ NC.responseBody resp
-- | Remove all notifications configured on a bucket. -- | Remove all notifications configured on a bucket.
@ -643,14 +490,11 @@ removeAllBucketNotification = flip putBucketNotification defaultNotification
-- | Fetch the policy if any on a bucket. -- | Fetch the policy if any on a bucket.
getBucketPolicy :: Bucket -> Minio Text getBucketPolicy :: Bucket -> Minio Text
getBucketPolicy bucket = do getBucketPolicy bucket = do
resp <- resp <- executeRequest $ defaultS3ReqInfo { riMethod = HT.methodGet
executeRequest $ , riBucket = Just bucket
defaultS3ReqInfo , riQueryParams = [("policy", Nothing)]
{ riMethod = HT.methodGet, }
riBucket = Just bucket, return $ toS $ NC.responseBody resp
riQueryParams = [("policy", Nothing)]
}
return $ decodeUtf8Lenient $ toStrictBS $ NC.responseBody resp
-- | Set a new policy on a bucket. -- | Set a new policy on a bucket.
-- As a special condition if the policy is empty -- As a special condition if the policy is empty
@ -662,24 +506,18 @@ setBucketPolicy bucket policy = do
else putBucketPolicy bucket policy else putBucketPolicy bucket policy
-- | Save a new policy on a bucket. -- | Save a new policy on a bucket.
putBucketPolicy :: Bucket -> Text -> Minio () putBucketPolicy :: Bucket -> Text -> Minio()
putBucketPolicy bucket policy = do putBucketPolicy bucket policy = do
void $ void $ executeRequest $ defaultS3ReqInfo { riMethod = HT.methodPut
executeRequest $ , riBucket = Just bucket
defaultS3ReqInfo , riQueryParams = [("policy", Nothing)]
{ riMethod = HT.methodPut, , riPayload = PayloadBS $ encodeUtf8 policy
riBucket = Just bucket, }
riQueryParams = [("policy", Nothing)],
riPayload = PayloadBS $ encodeUtf8 policy
}
-- | Delete any policy set on a bucket. -- | Delete any policy set on a bucket.
deleteBucketPolicy :: Bucket -> Minio () deleteBucketPolicy :: Bucket -> Minio()
deleteBucketPolicy bucket = do deleteBucketPolicy bucket = do
void $ void $ executeRequest $ defaultS3ReqInfo { riMethod = HT.methodDelete
executeRequest $ , riBucket = Just bucket
defaultS3ReqInfo , riQueryParams = [("policy", Nothing)]
{ riMethod = HT.methodDelete, }
riBucket = Just bucket,
riQueryParams = [("policy", Nothing)]
}

View File

@ -15,103 +15,113 @@
-- --
module Network.Minio.SelectAPI module Network.Minio.SelectAPI
( -- | The `selectObjectContent` allows querying CSV, JSON or Parquet (
-- format objects in AWS S3 and in MinIO using SQL Select
-- statements. This allows significant reduction of data transfer
-- from object storage for computation-intensive tasks, as relevant
-- data is filtered close to the storage.
selectObjectContent,
SelectRequest,
selectRequest,
-- *** Input Serialization -- | The `selectObjectContent` allows querying CSV, JSON or Parquet
InputSerialization, -- format objects in AWS S3 and in MinIO using SQL Select
defaultCsvInput, -- statements. This allows significant reduction of data transfer
linesJsonInput, -- from object storage for computation-intensive tasks, as relevant
documentJsonInput, -- data is filtered close to the storage.
defaultParquetInput,
setInputCSVProps,
CompressionType (..),
setInputCompressionType,
-- *** CSV Format details selectObjectContent
-- | CSV format options such as delimiters and quote characters are , SelectRequest
-- specified using using the functions below. Options are combined , selectRequest
-- monoidally.
CSVProp,
recordDelimiter,
fieldDelimiter,
quoteCharacter,
quoteEscapeCharacter,
commentCharacter,
allowQuotedRecordDelimiter,
FileHeaderInfo (..),
fileHeaderInfo,
QuoteFields (..),
quoteFields,
-- *** Output Serialization -- *** Input Serialization
OutputSerialization,
defaultCsvOutput,
defaultJsonOutput,
outputCSVFromProps,
outputJSONFromRecordDelimiter,
-- *** Progress messages , InputSerialization
setRequestProgressEnabled, , defaultCsvInput
, linesJsonInput
, documentJsonInput
, defaultParquetInput
, setInputCSVProps
-- *** Interpreting Select output , CompressionType(..)
, setInputCompressionType
-- | The conduit returned by `selectObjectContent` returns values of -- *** CSV Format details
-- the `EventMessage` data type. This returns the query output
-- messages formatted according to the chosen output serialization,
-- interleaved with progress messages (if enabled by
-- `setRequestProgressEnabled`), and at the end a statistics
-- message.
--
-- If the application is interested in only the payload, then
-- `getPayloadBytes` can be used. For example to simply print the
-- payload to stdout:
--
-- > resultConduit <- selectObjectContent bucket object mySelectRequest
-- > runConduit $ resultConduit .| getPayloadBytes .| stdoutC
--
-- Note that runConduit, the connect operator (.|) and stdoutC are
-- all from the "conduit" package.
getPayloadBytes,
EventMessage (..),
Progress (..),
Stats,
)
where
import Conduit ((.|)) -- | CSV format options such as delimiters and quote characters are
import qualified Conduit as C -- specified using using the functions below. Options are combined
import qualified Data.Binary as Bin -- monoidally.
import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as LB
import Data.Digest.CRC32 (crc32, crc32Update)
import Lib.Prelude
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import Network.Minio.API
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.Utils
import Network.Minio.XmlGenerator
import Network.Minio.XmlParser
import UnliftIO (MonadUnliftIO)
data EventStreamException , CSVProp
= ESEPreludeCRCFailed , recordDelimiter
| ESEMessageCRCFailed , fieldDelimiter
| ESEUnexpectedEndOfStream , quoteCharacter
| ESEDecodeFail [Char] , quoteEscapeCharacter
| ESEInvalidHeaderType , commentCharacter
| ESEInvalidHeaderValueType , allowQuotedRecordDelimiter
| ESEInvalidMessageType , FileHeaderInfo(..)
deriving stock (Eq, Show) , fileHeaderInfo
, QuoteFields(..)
, quoteFields
-- *** Output Serialization
, OutputSerialization
, defaultCsvOutput
, defaultJsonOutput
, outputCSVFromProps
, outputJSONFromRecordDelimiter
-- *** Progress messages
, setRequestProgressEnabled
-- *** Interpreting Select output
-- | The conduit returned by `selectObjectContent` returns values of
-- the `EventMessage` data type. This returns the query output
-- messages formatted according to the chosen output serialization,
-- interleaved with progress messages (if enabled by
-- `setRequestProgressEnabled`), and at the end a statistics
-- message.
--
-- If the application is interested in only the payload, then
-- `getPayloadBytes` can be used. For example to simply print the
-- payload to stdout:
--
-- > resultConduit <- selectObjectContent bucket object mySelectRequest
-- > runConduit $ resultConduit .| getPayloadBytes .| stdoutC
--
-- Note that runConduit, the connect operator (.|) and stdoutC are
-- all from the "conduit" package.
, getPayloadBytes
, EventMessage(..)
, Progress(..)
, Stats
) where
import Conduit ((.|))
import qualified Conduit as C
import qualified Data.Binary as Bin
import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as LB
import Data.Digest.CRC32 (crc32, crc32Update)
import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT
import UnliftIO (MonadUnliftIO)
import Lib.Prelude
import Network.Minio.API
import Network.Minio.Data
import Network.Minio.Errors
import Network.Minio.Utils
import Network.Minio.XmlGenerator
import Network.Minio.XmlParser
data EventStreamException = ESEPreludeCRCFailed
| ESEMessageCRCFailed
| ESEUnexpectedEndOfStream
| ESEDecodeFail [Char]
| ESEInvalidHeaderType
| ESEInvalidHeaderValueType
| ESEInvalidMessageType
deriving (Eq, Show)
instance Exception EventStreamException instance Exception EventStreamException
@ -119,176 +129,171 @@ instance Exception EventStreamException
chunkSize :: Int chunkSize :: Int
chunkSize = 32 * 1024 chunkSize = 32 * 1024
parseBinary :: (Bin.Binary a) => ByteString -> IO a parseBinary :: Bin.Binary a => ByteString -> IO a
parseBinary b = do parseBinary b = do
case Bin.decodeOrFail $ LB.fromStrict b of case Bin.decodeOrFail $ LB.fromStrict b of
Left (_, _, msg) -> throwIO $ ESEDecodeFail msg Left (_, _, msg) -> throwIO $ ESEDecodeFail msg
Right (_, _, r) -> return r Right (_, _, r) -> return r
bytesToHeaderName :: Text -> IO MsgHeaderName bytesToHeaderName :: Text -> IO MsgHeaderName
bytesToHeaderName t = case t of bytesToHeaderName t = case t of
":message-type" -> return MessageType ":message-type" -> return MessageType
":event-type" -> return EventType ":event-type" -> return EventType
":content-type" -> return ContentType ":content-type" -> return ContentType
":error-code" -> return ErrorCode ":error-code" -> return ErrorCode
":error-message" -> return ErrorMessage ":error-message" -> return ErrorMessage
_ -> throwIO ESEInvalidHeaderType _ -> throwIO ESEInvalidHeaderType
parseHeaders :: parseHeaders :: MonadUnliftIO m
(MonadUnliftIO m) => => Word32 -> C.ConduitM ByteString a m [MessageHeader]
Word32 ->
C.ConduitM ByteString a m [MessageHeader]
parseHeaders 0 = return [] parseHeaders 0 = return []
parseHeaders hdrLen = do parseHeaders hdrLen = do
bs1 <- readNBytes 1 bs1 <- readNBytes 1
n :: Word8 <- liftIO $ parseBinary bs1 n :: Word8 <- liftIO $ parseBinary bs1
headerKeyBytes <- readNBytes $ fromIntegral n headerKeyBytes <- readNBytes $ fromIntegral n
let headerKey = decodeUtf8Lenient headerKeyBytes let headerKey = decodeUtf8Lenient headerKeyBytes
headerName <- liftIO $ bytesToHeaderName headerKey headerName <- liftIO $ bytesToHeaderName headerKey
bs2 <- readNBytes 1 bs2 <- readNBytes 1
headerValueType :: Word8 <- liftIO $ parseBinary bs2 headerValueType :: Word8 <- liftIO $ parseBinary bs2
when (headerValueType /= 7) $ throwIO ESEInvalidHeaderValueType when (headerValueType /= 7) $ throwIO ESEInvalidHeaderValueType
bs3 <- readNBytes 2 bs3 <- readNBytes 2
vLen :: Word16 <- liftIO $ parseBinary bs3 vLen :: Word16 <- liftIO $ parseBinary bs3
headerValueBytes <- readNBytes $ fromIntegral vLen headerValueBytes <- readNBytes $ fromIntegral vLen
let headerValue = decodeUtf8Lenient headerValueBytes let headerValue = decodeUtf8Lenient headerValueBytes
m = (headerName, headerValue) m = (headerName, headerValue)
k = 1 + fromIntegral n + 1 + 2 + fromIntegral vLen k = 1 + fromIntegral n + 1 + 2 + fromIntegral vLen
ms <- parseHeaders (hdrLen - k) ms <- parseHeaders (hdrLen - k)
return (m : ms) return (m:ms)
-- readNBytes returns N bytes read from the string and throws an -- readNBytes returns N bytes read from the string and throws an
-- exception if N bytes are not present on the stream. -- exception if N bytes are not present on the stream.
readNBytes :: (MonadUnliftIO m) => Int -> C.ConduitM ByteString a m ByteString readNBytes :: MonadUnliftIO m => Int -> C.ConduitM ByteString a m ByteString
readNBytes n = do readNBytes n = do
b <- LB.toStrict <$> (C.takeCE n .| C.sinkLazy) b <- LB.toStrict <$> (C.takeCE n .| C.sinkLazy)
if B.length b /= n if B.length b /= n
then throwIO ESEUnexpectedEndOfStream then throwIO ESEUnexpectedEndOfStream
else return b else return b
crcCheck :: crcCheck :: MonadUnliftIO m
(MonadUnliftIO m) => => C.ConduitM ByteString ByteString m ()
C.ConduitM ByteString ByteString m ()
crcCheck = do crcCheck = do
b <- readNBytes 12 b <- readNBytes 12
n :: Word32 <- liftIO $ parseBinary $ B.take 4 b n :: Word32 <- liftIO $ parseBinary $ B.take 4 b
preludeCRC :: Word32 <- liftIO $ parseBinary $ B.drop 8 b preludeCRC :: Word32 <- liftIO $ parseBinary $ B.drop 8 b
when (crc32 (B.take 8 b) /= preludeCRC) $ when (crc32 (B.take 8 b) /= preludeCRC) $
throwIO ESEPreludeCRCFailed throwIO ESEPreludeCRCFailed
-- we do not yield the checksum -- we do not yield the checksum
C.yield $ B.take 8 b C.yield $ B.take 8 b
-- 12 bytes have been read off the current message. Now read the -- 12 bytes have been read off the current message. Now read the
-- next (n-12)-4 bytes and accumulate the checksum, and yield it. -- next (n-12)-4 bytes and accumulate the checksum, and yield it.
let startCrc = crc32 b let startCrc = crc32 b
finalCrc <- accumulateYield (fromIntegral n - 16) startCrc finalCrc <- accumulateYield (fromIntegral n-16) startCrc
bs <- readNBytes 4 bs <- readNBytes 4
expectedCrc :: Word32 <- liftIO $ parseBinary bs expectedCrc :: Word32 <- liftIO $ parseBinary bs
when (finalCrc /= expectedCrc) $ when (finalCrc /= expectedCrc) $
throwIO ESEMessageCRCFailed throwIO ESEMessageCRCFailed
-- we unconditionally recurse - downstream figures out when to -- we unconditionally recurse - downstream figures out when to
-- quit reading the stream -- quit reading the stream
crcCheck crcCheck
where where
accumulateYield n checkSum = do accumulateYield n checkSum = do
let toRead = min n chunkSize let toRead = min n chunkSize
b <- readNBytes toRead b <- readNBytes toRead
let c' = crc32Update checkSum b let c' = crc32Update checkSum b
n' = n - B.length b n' = n - B.length b
C.yield b C.yield b
if n' > 0 if n' > 0
then accumulateYield n' c' then accumulateYield n' c'
else return c' else return c'
handleMessage :: (MonadUnliftIO m) => C.ConduitT ByteString EventMessage m () handleMessage :: MonadUnliftIO m => C.ConduitT ByteString EventMessage m ()
handleMessage = do handleMessage = do
b1 <- readNBytes 4 b1 <- readNBytes 4
msgLen :: Word32 <- liftIO $ parseBinary b1 msgLen :: Word32 <- liftIO $ parseBinary b1
b2 <- readNBytes 4 b2 <- readNBytes 4
hdrLen :: Word32 <- liftIO $ parseBinary b2 hdrLen :: Word32 <- liftIO $ parseBinary b2
hs <- parseHeaders hdrLen hs <- parseHeaders hdrLen
let payloadLen = msgLen - hdrLen - 16 let payloadLen = msgLen - hdrLen - 16
getHdrVal h = fmap snd . find ((h ==) . fst) getHdrVal h = fmap snd . headMay . filter ((h ==) . fst)
eventHdrValue = getHdrVal EventType hs eventHdrValue = getHdrVal EventType hs
msgHdrValue = getHdrVal MessageType hs msgHdrValue = getHdrVal MessageType hs
errCode = getHdrVal ErrorCode hs errCode = getHdrVal ErrorCode hs
errMsg = getHdrVal ErrorMessage hs errMsg = getHdrVal ErrorMessage hs
case msgHdrValue of
Just "event" -> do
case eventHdrValue of
Just "Records" -> passThrough $ fromIntegral payloadLen
Just "Cont" -> return ()
Just "Progress" -> do
bs <- readNBytes $ fromIntegral payloadLen
progress <- parseSelectProgress bs
C.yield $ ProgressEventMessage progress
Just "Stats" -> do
bs <- readNBytes $ fromIntegral payloadLen
stats <- parseSelectProgress bs
C.yield $ StatsEventMessage stats
Just "End" -> return ()
_ -> throwIO ESEInvalidMessageType
when (eventHdrValue /= Just "End") handleMessage
Just "error" -> do
let reqMsgMay = RequestLevelErrorMessage <$> errCode <*> errMsg
maybe (throwIO ESEInvalidMessageType) C.yield reqMsgMay
_ -> throwIO ESEInvalidMessageType
case msgHdrValue of
Just "event" -> do
case eventHdrValue of
Just "Records" -> passThrough $ fromIntegral payloadLen
Just "Cont" -> return ()
Just "Progress" -> do
bs <- readNBytes $ fromIntegral payloadLen
progress <- parseSelectProgress bs
C.yield $ ProgressEventMessage progress
Just "Stats" -> do
bs <- readNBytes $ fromIntegral payloadLen
stats <- parseSelectProgress bs
C.yield $ StatsEventMessage stats
Just "End" -> return ()
_ -> throwIO ESEInvalidMessageType
when (eventHdrValue /= Just "End") handleMessage
Just "error" -> do
let reqMsgMay = RequestLevelErrorMessage <$> errCode <*> errMsg
maybe (throwIO ESEInvalidMessageType) C.yield reqMsgMay
_ -> throwIO ESEInvalidMessageType
where where
passThrough 0 = return () passThrough 0 = return ()
passThrough n = do passThrough n = do
let c = min n chunkSize let c = min n chunkSize
b <- readNBytes c b <- readNBytes c
C.yield $ RecordPayloadEventMessage b C.yield $ RecordPayloadEventMessage b
passThrough $ n - B.length b passThrough $ n - B.length b
selectProtoConduit ::
(MonadUnliftIO m) => selectProtoConduit :: MonadUnliftIO m
C.ConduitT ByteString EventMessage m () => C.ConduitT ByteString EventMessage m ()
selectProtoConduit = crcCheck .| handleMessage selectProtoConduit = crcCheck .| handleMessage
-- | selectObjectContent calls the SelectRequest on the given -- | selectObjectContent calls the SelectRequest on the given
-- object. It returns a Conduit of event messages that can be consumed -- object. It returns a Conduit of event messages that can be consumed
-- by the client. -- by the client.
selectObjectContent :: selectObjectContent :: Bucket -> Object -> SelectRequest
Bucket -> -> Minio (C.ConduitT () EventMessage Minio ())
Object ->
SelectRequest ->
Minio (C.ConduitT () EventMessage Minio ())
selectObjectContent b o r = do selectObjectContent b o r = do
let reqInfo = let reqInfo = defaultS3ReqInfo { riMethod = HT.methodPost
defaultS3ReqInfo , riBucket = Just b
{ riMethod = HT.methodPost, , riObject = Just o
riBucket = Just b, , riPayload = PayloadBS $ mkSelectRequest r
riObject = Just o, , riNeedsLocation = False
riPayload = PayloadBS $ mkSelectRequest r, , riQueryParams = [("select", Nothing), ("select-type", Just "2")]
riNeedsLocation = False, }
riQueryParams = [("select", Nothing), ("select-type", Just "2")] --print $ mkSelectRequest r
} resp <- mkStreamRequest reqInfo
-- print $ mkSelectRequest r return $ NC.responseBody resp .| selectProtoConduit
resp <- mkStreamRequest reqInfo
return $ NC.responseBody resp .| selectProtoConduit
-- | A helper conduit that returns only the record payload bytes. -- | A helper conduit that returns only the record payload bytes.
getPayloadBytes :: (MonadIO m) => C.ConduitT EventMessage ByteString m () getPayloadBytes :: MonadIO m => C.ConduitT EventMessage ByteString m ()
getPayloadBytes = do getPayloadBytes = do
evM <- C.await evM <- C.await
case evM of case evM of
Just v -> do Just v -> do
case v of case v of
RecordPayloadEventMessage b -> C.yield b RecordPayloadEventMessage b -> C.yield b
RequestLevelErrorMessage c m -> liftIO $ throwIO $ SelectErr c m RequestLevelErrorMessage c m -> liftIO $ throwIO $ SelectErr c m
_ -> return () _ -> return ()
getPayloadBytes getPayloadBytes
Nothing -> return () Nothing -> return ()

View File

@ -1,5 +1,5 @@
-- --
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc. -- MinIO Haskell SDK, (C) 2017-2019 MinIO, Inc.
-- --
-- Licensed under the Apache License, Version 2.0 (the "License"); -- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License. -- you may not use this file except in compliance with the License.
@ -13,94 +13,88 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE BangPatterns #-}
module Network.Minio.Sign.V4 module Network.Minio.Sign.V4 where
( SignParams (..),
signV4QueryParams,
signV4,
signV4PostPolicy,
signV4Stream,
Service (..),
credentialScope,
)
where
import qualified Conduit as C import qualified Conduit as C
import qualified Data.ByteArray as BA import qualified Data.ByteString as B
import qualified Data.ByteString as B import qualified Data.ByteString.Base64 as Base64
import qualified Data.ByteString.Base64 as Base64 import qualified Data.ByteString.Char8 as B8
import qualified Data.ByteString.Char8 as B8 import Data.CaseInsensitive (mk)
import qualified Data.ByteString.Lazy as LB import qualified Data.CaseInsensitive as CI
import Data.CaseInsensitive (mk) import qualified Data.HashMap.Strict as Map
import qualified Data.CaseInsensitive as CI import qualified Data.HashSet as Set
import qualified Data.HashMap.Strict as Map import qualified Data.Time as Time
import qualified Data.HashSet as Set import qualified Network.HTTP.Conduit as NC
import Data.List (partition) import Network.HTTP.Types (Header, parseQuery)
import qualified Data.List.NonEmpty as NE import qualified Network.HTTP.Types as H
import qualified Data.Time as Time import Text.Printf (printf)
import Lib.Prelude
import qualified Network.HTTP.Conduit as NC import Lib.Prelude
import Network.HTTP.Types (Header, SimpleQuery, hContentEncoding, parseQuery)
import qualified Network.HTTP.Types as H import Network.Minio.Data.ByteString
import Network.HTTP.Types.Header (RequestHeaders) import Network.Minio.Data.Crypto
import Network.Minio.Data.ByteString import Network.Minio.Data.Time
import Network.Minio.Data.Crypto import Network.Minio.Errors
import Network.Minio.Data.Time
import Network.Minio.Errors
import Text.Printf (printf)
-- these headers are not included in the string to sign when signing a -- these headers are not included in the string to sign when signing a
-- request -- request
ignoredHeaders :: Set.HashSet ByteString ignoredHeaders :: Set.HashSet ByteString
ignoredHeaders = ignoredHeaders = Set.fromList $ map CI.foldedCase
Set.fromList $ [ H.hAuthorization
map , H.hContentType
CI.foldedCase , H.hUserAgent
[ H.hAuthorization, ]
H.hContentType,
H.hUserAgent
]
data Service = ServiceS3 | ServiceSTS data SignV4Data = SignV4Data {
deriving stock (Eq, Show) sv4SignTime :: UTCTime
, sv4Scope :: ByteString
, sv4CanonicalRequest :: ByteString
, sv4HeadersToSign :: [(ByteString, ByteString)]
, sv4Output :: [(ByteString, ByteString)]
, sv4StringToSign :: ByteString
, sv4SigningKey :: ByteString
} deriving (Show)
toByteString :: Service -> ByteString data SignParams = SignParams {
toByteString ServiceS3 = "s3" spAccessKey :: Text
toByteString ServiceSTS = "sts" , spSecretKey :: Text
, spTimeStamp :: UTCTime
, spRegion :: Maybe Text
, spExpirySecs :: Maybe Int
, spPayloadHash :: Maybe ByteString
} deriving (Show)
data SignParams = SignParams debugPrintSignV4Data :: SignV4Data -> IO ()
{ spAccessKey :: Text, debugPrintSignV4Data (SignV4Data t s cr h2s o sts sk) = do
spSecretKey :: BA.ScrubbedBytes, B8.putStrLn "SignV4Data:"
spSessionToken :: Maybe BA.ScrubbedBytes, B8.putStr "Timestamp: " >> print t
spService :: Service, B8.putStr "Scope: " >> B8.putStrLn s
spTimeStamp :: UTCTime, B8.putStrLn "Canonical Request:"
spRegion :: Maybe Text, B8.putStrLn cr
spExpirySecs :: Maybe UrlExpiry, B8.putStr "Headers to Sign: " >> print h2s
spPayloadHash :: Maybe ByteString B8.putStr "Output: " >> print o
} B8.putStr "StringToSign: " >> B8.putStrLn sts
deriving stock (Show) B8.putStr "SigningKey: " >> printBytes sk
B8.putStrLn "END of SignV4Data ========="
where
printBytes b = do
mapM_ (\x -> B.putStr $ B.concat [show x, " "]) $ B.unpack b
B8.putStrLn ""
mkAuthHeader :: Text -> ByteString -> ByteString -> ByteString -> H.Header mkAuthHeader :: Text -> ByteString -> ByteString -> ByteString -> H.Header
mkAuthHeader accessKey scope signedHeaderKeys sign = mkAuthHeader accessKey scope signedHeaderKeys sign =
let authValue = let authValue = B.concat
B.concat [ "AWS4-HMAC-SHA256 Credential="
[ "AWS4-HMAC-SHA256 Credential=", , toS accessKey
encodeUtf8 accessKey, , "/"
"/", , scope
scope, , ", SignedHeaders="
", SignedHeaders=", , signedHeaderKeys
signedHeaderKeys, , ", Signature="
", Signature=", , sign
sign ]
] in (H.hAuthorization, authValue)
in (H.hAuthorization, authValue)
data IsStreaming = IsStreamingLength Int64 | NotStreaming
deriving stock (Eq, Show)
amzSecurityToken :: ByteString
amzSecurityToken = "X-Amz-Security-Token"
-- | Given SignParams and request details, including request method, -- | Given SignParams and request details, including request method,
-- request path, headers, query params and payload hash, generates an -- request path, headers, query params and payload hash, generates an
@ -114,212 +108,124 @@ amzSecurityToken = "X-Amz-Security-Token"
-- is being created. The expiry is interpreted as an integer number of -- is being created. The expiry is interpreted as an integer number of
-- seconds. The output will be the list of query-parameters to add to -- seconds. The output will be the list of query-parameters to add to
-- the request. -- the request.
signV4QueryParams :: SignParams -> NC.Request -> SimpleQuery signV4 :: SignParams -> NC.Request -> [(ByteString, ByteString)]
signV4QueryParams !sp !req =
let scope = credentialScope sp
expiry = spExpirySecs sp
headersToSign = getHeadersToSign $ NC.requestHeaders req
signedHeaderKeys = B.intercalate ";" $ sort $ map fst headersToSign
-- query-parameters to be added before signing for presigned URLs
-- (i.e. when `isJust expiry`)
authQP =
[ ("X-Amz-Algorithm", "AWS4-HMAC-SHA256"),
("X-Amz-Credential", B.concat [encodeUtf8 $ spAccessKey sp, "/", scope]),
("X-Amz-Date", awsTimeFormatBS $ spTimeStamp sp),
("X-Amz-Expires", maybe "" showBS expiry),
("X-Amz-SignedHeaders", signedHeaderKeys)
]
++ maybeToList ((amzSecurityToken,) . BA.convert <$> spSessionToken sp)
finalQP =
parseQuery (NC.queryString req)
++ if isJust expiry
then (fmap . fmap) Just authQP
else []
-- 1. compute canonical request
canonicalRequest =
mkCanonicalRequest
False
sp
(NC.setQueryString finalQP req)
headersToSign
-- 2. compute string to sign
stringToSign = mkStringToSign (spTimeStamp sp) scope canonicalRequest
-- 3.1 compute signing key
signingKey = getSigningKey sp
-- 3.2 compute signature
signature = computeSignature stringToSign signingKey
in ("X-Amz-Signature", signature) : authQP
-- | Given SignParams and request details, including request method, request
-- path, headers, query params and payload hash, generates an updated set of
-- headers, including the x-amz-date header and the Authorization header, which
-- includes the signature.
--
-- The output is the list of headers to be added to authenticate the request.
signV4 :: SignParams -> NC.Request -> [Header]
signV4 !sp !req = signV4 !sp !req =
let scope = credentialScope sp let
region = fromMaybe "" $ spRegion sp
ts = spTimeStamp sp
scope = mkScope ts region
accessKey = toS $ spAccessKey sp
secretKey = toS $ spSecretKey sp
expiry = spExpirySecs sp
-- extra headers to be added for signing purposes. -- headers to be added to the request
extraHeaders = datePair = ("X-Amz-Date", awsTimeFormatBS ts)
("X-Amz-Date", awsTimeFormatBS $ spTimeStamp sp) computedHeaders = NC.requestHeaders req ++
: ( -- payload hash is only used for S3 (not STS) if isJust $ expiry
[ ( "x-amz-content-sha256", then []
fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp else [(\(x, y) -> (mk x, y)) datePair]
) headersToSign = getHeadersToSign computedHeaders
| spService sp == ServiceS3 signedHeaderKeys = B.intercalate ";" $ sort $ map fst headersToSign
]
)
++ maybeToList ((mk amzSecurityToken,) . BA.convert <$> spSessionToken sp)
-- 1. compute canonical request -- query-parameters to be added before signing for presigned URLs
reqHeaders = NC.requestHeaders req ++ extraHeaders -- (i.e. when `isJust expiry`)
(canonicalRequest, signedHeaderKeys) = authQP = [ ("X-Amz-Algorithm", "AWS4-HMAC-SHA256")
getCanonicalRequestAndSignedHeaders , ("X-Amz-Credential", B.concat [accessKey, "/", scope])
NotStreaming , datePair
sp , ("X-Amz-Expires", maybe "" show expiry)
req , ("X-Amz-SignedHeaders", signedHeaderKeys)
reqHeaders ]
finalQP = parseQuery (NC.queryString req) ++
if isJust expiry
then (fmap . fmap) Just authQP
else []
-- 2. compute string to sign -- 1. compute canonical request
stringToSign = mkStringToSign (spTimeStamp sp) scope canonicalRequest canonicalRequest = mkCanonicalRequest False sp
-- 3.1 compute signing key (NC.setQueryString finalQP req)
signingKey = getSigningKey sp headersToSign
-- 3.2 compute signature
signature = computeSignature stringToSign signingKey
-- 4. compute auth header
authHeader = mkAuthHeader (spAccessKey sp) scope signedHeaderKeys signature
in authHeader : extraHeaders
credentialScope :: SignParams -> ByteString -- 2. compute string to sign
credentialScope sp = stringToSign = mkStringToSign ts scope canonicalRequest
let region = fromMaybe "" $ spRegion sp
in B.intercalate -- 3.1 compute signing key
"/" signingKey = mkSigningKey ts region secretKey
[ encodeUtf8 $ Time.formatTime Time.defaultTimeLocale "%Y%m%d" $ spTimeStamp sp,
encodeUtf8 region, -- 3.2 compute signature
toByteString $ spService sp, signature = computeSignature stringToSign signingKey
"aws4_request"
] -- 4. compute auth header
authHeader = mkAuthHeader (spAccessKey sp) scope signedHeaderKeys signature
-- finally compute output pairs
sha256Hdr = ("x-amz-content-sha256",
fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp)
output = if isJust expiry
then ("X-Amz-Signature", signature) : authQP
else [(\(x, y) -> (CI.foldedCase x, y)) authHeader,
datePair, sha256Hdr]
in output
mkScope :: UTCTime -> Text -> ByteString
mkScope ts region = B.intercalate "/"
[ toS $ Time.formatTime Time.defaultTimeLocale "%Y%m%d" ts
, toS region
, "s3"
, "aws4_request"
]
-- Folds header name, trims whitespace in header values, skips ignored headers
-- and sorts headers.
getHeadersToSign :: [Header] -> [(ByteString, ByteString)] getHeadersToSign :: [Header] -> [(ByteString, ByteString)]
getHeadersToSign !h = getHeadersToSign !h =
filter ((\hdr -> not $ Set.member hdr ignoredHeaders) . fst) $ filter ((\hdr -> not $ Set.member hdr ignoredHeaders) . fst) $
map (bimap CI.foldedCase stripBS) h map (\(x, y) -> (CI.foldedCase x, stripBS y)) h
-- | Given the list of headers in the request, computes the canonical headers mkCanonicalRequest :: Bool -> SignParams -> NC.Request -> [(ByteString, ByteString)]
-- and the signed headers strings. -> ByteString
getCanonicalHeaders :: NonEmpty Header -> (ByteString, ByteString)
getCanonicalHeaders h =
let -- Folds header name, trims spaces in header values, skips ignored
-- headers and sorts headers by name (we must not re-order multi-valued
-- headers).
headersToSign =
NE.toList $
NE.sortBy (\a b -> compare (fst a) (fst b)) $
NE.fromList $
NE.filter ((\hdr -> not $ Set.member hdr ignoredHeaders) . fst) $
NE.map (bimap CI.foldedCase stripBS) h
canonicalHeaders = mconcat $ map (\(a, b) -> a <> ":" <> b <> "\n") headersToSign
signedHeaderKeys = B.intercalate ";" $ map fst headersToSign
in (canonicalHeaders, signedHeaderKeys)
getCanonicalRequestAndSignedHeaders ::
IsStreaming ->
SignParams ->
NC.Request ->
[Header] ->
(ByteString, ByteString)
getCanonicalRequestAndSignedHeaders isStreaming sp req requestHeaders =
let httpMethod = NC.method req
canonicalUri = uriEncode False $ NC.path req
canonicalQueryString =
B.intercalate "&" $
map (\(x, y) -> B.concat [x, "=", y]) $
sort $
map
( bimap (uriEncode True) (maybe "" (uriEncode True))
)
(parseQuery $ NC.queryString req)
(canonicalHeaders, signedHeaderKeys) = getCanonicalHeaders $ NE.fromList requestHeaders
payloadHashStr =
case isStreaming of
IsStreamingLength _ -> "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
NotStreaming -> fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp
canonicalRequest =
B.intercalate
"\n"
[ httpMethod,
canonicalUri,
canonicalQueryString,
canonicalHeaders,
signedHeaderKeys,
payloadHashStr
]
in (canonicalRequest, signedHeaderKeys)
mkCanonicalRequest ::
Bool ->
SignParams ->
NC.Request ->
[(ByteString, ByteString)] ->
ByteString
mkCanonicalRequest !isStreaming !sp !req !headersForSign = mkCanonicalRequest !isStreaming !sp !req !headersForSign =
let httpMethod = NC.method req let
canonicalUri = uriEncode False $ NC.path req canonicalQueryString = B.intercalate "&" $
canonicalQueryString = map (\(x, y) -> B.concat [x, "=", y]) $
B.intercalate "&" $ sort $ map (\(x, y) ->
map (\(x, y) -> B.concat [x, "=", y]) $ (uriEncode True x, maybe "" (uriEncode True) y)) $
sortBy (\a b -> compare (fst a) (fst b)) $ (parseQuery $ NC.queryString req)
map
( bimap (uriEncode True) (maybe "" (uriEncode True))
)
(parseQuery $ NC.queryString req)
sortedHeaders = sort headersForSign
canonicalHeaders =
B.concat $
map (\(x, y) -> B.concat [x, ":", y, "\n"]) sortedHeaders
signedHeaders = B.intercalate ";" $ map fst sortedHeaders
payloadHashStr =
if isStreaming
then "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
else fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp
in B.intercalate
"\n"
[ httpMethod,
canonicalUri,
canonicalQueryString,
canonicalHeaders,
signedHeaders,
payloadHashStr
]
mkStringToSign :: UTCTime -> ByteString -> ByteString -> ByteString sortedHeaders = sort headersForSign
mkStringToSign ts !scope !canonicalRequest =
B.intercalate canonicalHeaders = B.concat $
"\n" map (\(x, y) -> B.concat [x, ":", y, "\n"]) sortedHeaders
[ "AWS4-HMAC-SHA256",
awsTimeFormatBS ts, signedHeaders = B.intercalate ";" $ map fst sortedHeaders
scope,
hashSHA256 canonicalRequest payloadHashStr =
if isStreaming
then "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
else fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp
in
B.intercalate "\n"
[ NC.method req
, uriEncode False $ NC.path req
, canonicalQueryString
, canonicalHeaders
, signedHeaders
, payloadHashStr
] ]
getSigningKey :: SignParams -> ByteString mkStringToSign :: UTCTime -> ByteString -> ByteString -> ByteString
getSigningKey sp = mkStringToSign ts !scope !canonicalRequest = B.intercalate "\n"
hmacSHA256RawBS "aws4_request" [ "AWS4-HMAC-SHA256"
. hmacSHA256RawBS (toByteString $ spService sp) , awsTimeFormatBS ts
. hmacSHA256RawBS (encodeUtf8 $ fromMaybe "" $ spRegion sp) , scope
. hmacSHA256RawBS (awsDateFormatBS $ spTimeStamp sp) , hashSHA256 canonicalRequest
$ B.concat ["AWS4", BA.convert $ spSecretKey sp] ]
mkSigningKey :: UTCTime -> Text -> ByteString -> ByteString
mkSigningKey ts region !secretKey = hmacSHA256RawBS "aws4_request"
. hmacSHA256RawBS "s3"
. hmacSHA256RawBS (toS region)
. hmacSHA256RawBS (awsDateFormatBS ts)
$ B.concat ["AWS4", secretKey]
computeSignature :: ByteString -> ByteString -> ByteString computeSignature :: ByteString -> ByteString -> ByteString
computeSignature !toSign !key = digestToBase16 $ hmacSHA256 toSign key computeSignature !toSign !key = digestToBase16 $ hmacSHA256 toSign key
@ -327,168 +233,159 @@ computeSignature !toSign !key = digestToBase16 $ hmacSHA256 toSign key
-- | Takes a validated Post Policy JSON bytestring, the signing time, -- | Takes a validated Post Policy JSON bytestring, the signing time,
-- and ConnInfo and returns form-data for the POST upload containing -- and ConnInfo and returns form-data for the POST upload containing
-- just the signature and the encoded post-policy. -- just the signature and the encoded post-policy.
signV4PostPolicy :: signV4PostPolicy :: ByteString -> SignParams
ByteString -> -> Map.HashMap Text ByteString
SignParams ->
Map.HashMap Text ByteString
signV4PostPolicy !postPolicyJSON !sp = signV4PostPolicy !postPolicyJSON !sp =
let stringToSign = Base64.encode postPolicyJSON let
signingKey = getSigningKey sp stringToSign = Base64.encode postPolicyJSON
signature = computeSignature stringToSign signingKey region = fromMaybe "" $ spRegion sp
in Map.fromList $ signingKey = mkSigningKey (spTimeStamp sp) region $ toS $ spSecretKey sp
[ ("x-amz-signature", signature), signature = computeSignature stringToSign signingKey
("policy", stringToSign) in
] Map.fromList [ ("x-amz-signature", signature)
++ maybeToList ((decodeUtf8 amzSecurityToken,) . BA.convert <$> spSessionToken sp) , ("policy", stringToSign)
]
chunkSizeConstant :: Int chunkSizeConstant :: Int
chunkSizeConstant = 64 * 1024 chunkSizeConstant = 64 * 1024
-- base16Len computes the number of bytes required to represent @n (> 0)@ in -- base16Len computes the number of bytes required to represent @n (> 0)@ in
-- hexadecimal. -- hexadecimal.
base16Len :: (Integral a) => a -> Int base16Len :: Integral a => a -> Int
base16Len n base16Len n | n == 0 = 0
| n == 0 = 0 | otherwise = 1 + base16Len (n `div` 16)
| otherwise = 1 + base16Len (n `div` 16)
signedStreamLength :: Int64 -> Int64 signedStreamLength :: Int64 -> Int64
signedStreamLength dataLen = signedStreamLength dataLen =
let chunkSzInt = fromIntegral chunkSizeConstant let
(numChunks, lastChunkLen) = quotRem dataLen chunkSzInt chunkSzInt = fromIntegral chunkSizeConstant
-- Structure of a chunk: (numChunks, lastChunkLen) = quotRem dataLen chunkSzInt
-- string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n
encodedChunkLen csz = fromIntegral (base16Len csz) + 17 + 64 + 2 + csz + 2
fullChunkSize = encodedChunkLen chunkSzInt
lastChunkSize = bool 0 (encodedChunkLen lastChunkLen) $ lastChunkLen > 0
finalChunkSize = 1 + 17 + 64 + 2 + 2
in numChunks * fullChunkSize + lastChunkSize + finalChunkSize
-- For streaming S3, we need to update the content-encoding header.
addContentEncoding :: [Header] -> [Header]
addContentEncoding hs =
-- assume there is at most one content-encoding header.
let (ceHdrs, others) = partition ((== hContentEncoding) . fst) hs
in maybe
(hContentEncoding, "aws-chunked")
(\(k, v) -> (k, v <> ",aws-chunked"))
(listToMaybe ceHdrs)
: others
signV4Stream :: -- Structure of a chunk:
Int64 -> -- string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n
SignParams -> encodedChunkLen csz = fromIntegral (base16Len csz) + 17 + 64 + 2 + csz + 2
NC.Request -> fullChunkSize = encodedChunkLen chunkSzInt
(C.ConduitT () ByteString (C.ResourceT IO) () -> NC.Request) lastChunkSize = bool 0 (encodedChunkLen lastChunkLen) $ lastChunkLen > 0
finalChunkSize = 1 + 17 + 64 + 2 + 2
in
numChunks * fullChunkSize + lastChunkSize + finalChunkSize
signV4Stream :: Int64 -> SignParams -> NC.Request
-> (C.ConduitT () ByteString (C.ResourceT IO) () -> NC.Request)
-- -> ([Header], C.ConduitT () ByteString (C.ResourceT IO) () -> NC.RequestBody)
signV4Stream !payloadLength !sp !req = signV4Stream !payloadLength !sp !req =
let ts = spTimeStamp sp let
ts = spTimeStamp sp
-- compute the updated list of headers to be added for signing purposes. addContentEncoding hs =
signedContentLength = signedStreamLength payloadLength let ceMay = headMay $ filter (\(x, _) -> x == "content-encoding") hs
extraHeaders = in case ceMay of
[ ("X-Amz-Date", awsTimeFormatBS $ spTimeStamp sp), Nothing -> ("content-encoding", "aws-chunked") : hs
("x-amz-decoded-content-length", showBS payloadLength), Just (_, ce) -> ("content-encoding", ce <> ",aws-chunked") :
("content-length", showBS signedContentLength), filter (\(x, _) -> x /= "content-encoding") hs
("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD")
-- headers to be added to the request
datePair = ("X-Amz-Date", awsTimeFormatBS ts)
computedHeaders = addContentEncoding $
datePair : NC.requestHeaders req
-- headers specific to streaming signature
signedContentLength = signedStreamLength payloadLength
streamingHeaders :: [Header]
streamingHeaders =
[ ("x-amz-decoded-content-length", show payloadLength)
, ("content-length", show signedContentLength )
, ("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD")
] ]
++ maybeToList ((mk amzSecurityToken,) . BA.convert <$> spSessionToken sp) headersToSign = getHeadersToSign $ computedHeaders ++ streamingHeaders
requestHeaders = signedHeaderKeys = B.intercalate ";" $ sort $ map fst headersToSign
addContentEncoding $ finalQP = parseQuery (NC.queryString req)
foldr setHeader (NC.requestHeaders req) extraHeaders
-- 1. Compute Seed Signature -- 1. Compute Seed Signature
-- 1.1 Canonical Request -- 1.1 Canonical Request
(canonicalReq, signedHeaderKeys) = canonicalReq = mkCanonicalRequest True sp
getCanonicalRequestAndSignedHeaders (NC.setQueryString finalQP req)
(IsStreamingLength payloadLength) headersToSign
sp
req
requestHeaders
scope = credentialScope sp region = fromMaybe "" $ spRegion sp
accessKey = spAccessKey sp scope = mkScope ts region
-- 1.2 String toSign accessKey = spAccessKey sp
stringToSign = mkStringToSign ts scope canonicalReq secretKey = spSecretKey sp
-- 1.3 Compute signature
-- 1.3.1 compute signing key
signingKey = getSigningKey sp
-- 1.3.2 Compute signature
seedSignature = computeSignature stringToSign signingKey
-- 1.3.3 Compute Auth Header
authHeader = mkAuthHeader accessKey scope signedHeaderKeys seedSignature
-- 1.4 Updated headers for the request
finalReqHeaders = authHeader : requestHeaders
-- headersToAdd = authHeader : datePair : streamingHeaders
toHexStr n = B8.pack $ printf "%x" n -- 1.2 String toSign
(numParts, lastPSize) = payloadLength `quotRem` fromIntegral chunkSizeConstant stringToSign = mkStringToSign ts scope canonicalReq
-- Function to compute string to sign for each chunk.
chunkStrToSign prevSign currChunkHash = -- 1.3 Compute signature
B.intercalate -- 1.3.1 compute signing key
"\n" signingKey = mkSigningKey ts region $ toS secretKey
[ "AWS4-HMAC-SHA256-PAYLOAD",
awsTimeFormatBS ts, -- 1.3.2 Compute signature
scope, seedSignature = computeSignature stringToSign signingKey
prevSign,
hashSHA256 "", -- 1.3.3 Compute Auth Header
currChunkHash authHeader = mkAuthHeader accessKey scope signedHeaderKeys seedSignature
]
-- Read n byte from upstream and return a strict bytestring. -- 1.4 Updated headers for the request
mustTakeN n = do finalReqHeaders = authHeader : (computedHeaders ++ streamingHeaders)
bs <- LB.toStrict <$> (C.takeCE n C..| C.sinkLazy) -- headersToAdd = authHeader : datePair : streamingHeaders
toHexStr n = B8.pack $ printf "%x" n
(numParts, lastPSize) = payloadLength `quotRem` fromIntegral chunkSizeConstant
-- Function to compute string to sign for each chunk.
chunkStrToSign prevSign currChunkHash =
B.intercalate "\n"
[ "AWS4-HMAC-SHA256-PAYLOAD"
, awsTimeFormatBS ts
, scope
, prevSign
, hashSHA256 ""
, currChunkHash
]
-- Read n byte from upstream and return a strict bytestring.
mustTakeN n = do
bs <- toS <$> (C.takeCE n C..| C.sinkLazy)
when (B.length bs /= n) $ when (B.length bs /= n) $
throwIO MErrVStreamingBodyUnexpectedEOF throwIO MErrVStreamingBodyUnexpectedEOF
return bs return bs
signerConduit n lps prevSign =
-- First case encodes a full chunk of length
-- 'chunkSizeConstant'.
if
| n > 0 -> do
bs <- mustTakeN chunkSizeConstant
let strToSign = chunkStrToSign prevSign (hashSHA256 bs)
nextSign = computeSignature strToSign signingKey
chunkBS =
toHexStr chunkSizeConstant
<> ";chunk-signature="
<> nextSign
<> "\r\n"
<> bs
<> "\r\n"
C.yield chunkBS
signerConduit (n - 1) lps nextSign
-- Second case encodes the last chunk which is smaller than signerConduit n lps prevSign =
-- 'chunkSizeConstant' -- First case encodes a full chunk of length
| lps > 0 -> do -- 'chunkSizeConstant'.
bs <- mustTakeN $ fromIntegral lps if | n > 0 -> do
let strToSign = chunkStrToSign prevSign (hashSHA256 bs) bs <- mustTakeN chunkSizeConstant
nextSign = computeSignature strToSign signingKey let strToSign = chunkStrToSign prevSign (hashSHA256 bs)
chunkBS = nextSign = computeSignature strToSign signingKey
toHexStr lps chunkBS = toHexStr chunkSizeConstant
<> ";chunk-signature=" <> ";chunk-signature="
<> nextSign <> nextSign <> "\r\n" <> bs <> "\r\n"
<> "\r\n" C.yield chunkBS
<> bs signerConduit (n-1) lps nextSign
<> "\r\n"
C.yield chunkBS
signerConduit 0 0 nextSign
-- Last case encodes the final signature chunk that has no -- Second case encodes the last chunk which is smaller than
-- data. -- 'chunkSizeConstant'
| otherwise -> do | lps > 0 -> do
let strToSign = chunkStrToSign prevSign (hashSHA256 "") bs <- mustTakeN $ fromIntegral lps
nextSign = computeSignature strToSign signingKey let strToSign = chunkStrToSign prevSign (hashSHA256 bs)
lastChunkBS = "0;chunk-signature=" <> nextSign <> "\r\n\r\n" nextSign = computeSignature strToSign signingKey
C.yield lastChunkBS chunkBS = toHexStr lps <> ";chunk-signature="
in \src -> <> nextSign <> "\r\n" <> bs <> "\r\n"
req C.yield chunkBS
{ NC.requestHeaders = finalReqHeaders, signerConduit 0 0 nextSign
NC.requestBody =
NC.requestBodySource signedContentLength $
src C..| signerConduit numParts lastPSize seedSignature
}
-- "setHeader r hdr" adds the hdr to r, replacing it in r if it already exists. -- Last case encodes the final signature chunk that has no
setHeader :: Header -> RequestHeaders -> RequestHeaders -- data.
setHeader hdr r = | otherwise -> do
let r' = filter (\(name, _) -> name /= fst hdr) r let strToSign = chunkStrToSign prevSign (hashSHA256 "")
in hdr : r' nextSign = computeSignature strToSign signingKey
lastChunkBS = "0;chunk-signature=" <> nextSign <> "\r\n\r\n"
C.yield lastChunkBS
in
\src -> req { NC.requestHeaders = finalReqHeaders
, NC.requestBody =
NC.requestBodySource signedContentLength $
src C..| signerConduit numParts lastPSize seedSignature
}

View File

@ -1,5 +1,5 @@
-- --
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc. -- MinIO Haskell SDK, (C) 2017-2019 MinIO, Inc.
-- --
-- Licensed under the Apache License, Version 2.0 (the "License"); -- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License. -- you may not use this file except in compliance with the License.
@ -16,77 +16,70 @@
module Network.Minio.Utils where module Network.Minio.Utils where
import qualified Conduit as C import qualified Conduit as C
import Control.Monad.IO.Unlift (MonadUnliftIO) import Control.Monad.IO.Unlift (MonadUnliftIO)
import qualified Control.Monad.Trans.Resource as R import qualified Control.Monad.Trans.Resource as R
import qualified Data.ByteString as B import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as LB import qualified Data.ByteString.Lazy as LB
import Data.CaseInsensitive (mk, original) import Data.CaseInsensitive (mk, original)
import qualified Data.Conduit.Binary as CB import qualified Data.Conduit.Binary as CB
import qualified Data.HashMap.Strict as H import qualified Data.HashMap.Strict as H
import qualified Data.Text as T import qualified Data.List as List
import Data.Text.Read (decimal) import qualified Data.Text as T
import Data.Time import Data.Text.Read (decimal)
( defaultTimeLocale, import Data.Time (defaultTimeLocale, parseTimeM,
parseTimeM, rfc822DateFormat)
rfc822DateFormat, import Network.HTTP.Conduit (Response)
) import qualified Network.HTTP.Conduit as NC
import Lib.Prelude import qualified Network.HTTP.Types as HT
import Network.HTTP.Conduit (Response) import qualified Network.HTTP.Types.Header as Hdr
import qualified Network.HTTP.Conduit as NC import qualified System.IO as IO
import qualified Network.HTTP.Types as HT import qualified UnliftIO as U
import qualified Network.HTTP.Types.Header as Hdr import qualified UnliftIO.Async as A
import Network.Minio.Data.ByteString import qualified UnliftIO.MVar as UM
import Network.Minio.JsonParser (parseErrResponseJSON)
import Network.Minio.XmlCommon (parseErrResponse)
import qualified System.IO as IO
import qualified UnliftIO as U
import qualified UnliftIO.Async as A
allocateReadFile :: import Lib.Prelude
(MonadUnliftIO m, R.MonadResource m) =>
FilePath -> import Network.Minio.Data
m (R.ReleaseKey, Handle) import Network.Minio.Data.ByteString
import Network.Minio.JsonParser (parseErrResponseJSON)
import Network.Minio.XmlParser (parseErrResponse)
allocateReadFile :: (MonadUnliftIO m, R.MonadResource m)
=> FilePath -> m (R.ReleaseKey, Handle)
allocateReadFile fp = do allocateReadFile fp = do
(rk, hdlE) <- R.allocate (openReadFile fp) cleanup (rk, hdlE) <- R.allocate (openReadFile fp) cleanup
either (\(e :: U.IOException) -> throwIO e) (return . (rk,)) hdlE either (\(e :: IOException) -> throwIO e) (return . (rk,)) hdlE
where where
openReadFile f = U.try $ IO.openBinaryFile f IO.ReadMode openReadFile f = U.try $ IO.openBinaryFile f IO.ReadMode
cleanup = either (const $ return ()) IO.hClose cleanup = either (const $ return ()) IO.hClose
-- | Queries the file size from the handle. Catches any file operation -- | Queries the file size from the handle. Catches any file operation
-- exceptions and returns Nothing instead. -- exceptions and returns Nothing instead.
getFileSize :: getFileSize :: (MonadUnliftIO m, R.MonadResource m)
(MonadUnliftIO m) => => Handle -> m (Maybe Int64)
Handle ->
m (Maybe Int64)
getFileSize h = do getFileSize h = do
resE <- liftIO $ try $ fromIntegral <$> IO.hFileSize h resE <- liftIO $ try $ fromIntegral <$> IO.hFileSize h
case resE of case resE of
Left (_ :: U.IOException) -> return Nothing Left (_ :: IOException) -> return Nothing
Right s -> return $ Just s Right s -> return $ Just s
-- | Queries if handle is seekable. Catches any file operation -- | Queries if handle is seekable. Catches any file operation
-- exceptions and return False instead. -- exceptions and return False instead.
isHandleSeekable :: isHandleSeekable :: (R.MonadResource m, MonadUnliftIO m)
(R.MonadResource m) => => Handle -> m Bool
Handle ->
m Bool
isHandleSeekable h = do isHandleSeekable h = do
resE <- liftIO $ try $ IO.hIsSeekable h resE <- liftIO $ try $ IO.hIsSeekable h
case resE of case resE of
Left (_ :: U.IOException) -> return False Left (_ :: IOException) -> return False
Right v -> return v Right v -> return v
-- | Helper function that opens a handle to the filepath and performs -- | Helper function that opens a handle to the filepath and performs
-- the given action on it. Exceptions of type MError are caught and -- the given action on it. Exceptions of type MError are caught and
-- returned - both during file handle allocation and when the action -- returned - both during file handle allocation and when the action
-- is run. -- is run.
withNewHandle :: withNewHandle :: (MonadUnliftIO m, R.MonadResource m)
(MonadUnliftIO m, R.MonadResource m) => => FilePath -> (Handle -> m a) -> m (Either IOException a)
FilePath ->
(Handle -> m a) ->
m (Either U.IOException a)
withNewHandle fp fileAction = do withNewHandle fp fileAction = do
-- opening a handle can throw MError exception. -- opening a handle can throw MError exception.
handleE <- try $ allocateReadFile fp handleE <- try $ allocateReadFile fp
@ -100,61 +93,34 @@ withNewHandle fp fileAction = do
return resE return resE
mkHeaderFromPairs :: [(ByteString, ByteString)] -> [HT.Header] mkHeaderFromPairs :: [(ByteString, ByteString)] -> [HT.Header]
mkHeaderFromPairs = map (first mk) mkHeaderFromPairs = map ((\(x, y) -> (mk x, y)))
lookupHeader :: HT.HeaderName -> [HT.Header] -> Maybe ByteString lookupHeader :: HT.HeaderName -> [HT.Header] -> Maybe ByteString
lookupHeader hdr = listToMaybe . map snd . filter (\(h, _) -> h == hdr) lookupHeader hdr = headMay . map snd . filter (\(h, _) -> h == hdr)
getETagHeader :: [HT.Header] -> Maybe Text getETagHeader :: [HT.Header] -> Maybe Text
getETagHeader hs = decodeUtf8Lenient <$> lookupHeader Hdr.hETag hs getETagHeader hs = decodeUtf8Lenient <$> lookupHeader Hdr.hETag hs
getMetadata :: [HT.Header] -> [(Text, Text)] getMetadata :: [HT.Header] -> [(Text, Text)]
getMetadata = getMetadata =
map (\(x, y) -> (decodeUtf8Lenient $ original x, decodeUtf8Lenient $ stripBS y)) map ((\(x, y) -> (decodeUtf8Lenient $ original x, decodeUtf8Lenient $ stripBS y)))
-- | If the given header name has the @X-Amz-Meta-@ prefix, it is
-- stripped and a Just is returned.
userMetadataHeaderNameMaybe :: Text -> Maybe Text
userMetadataHeaderNameMaybe k =
let prefix = T.toCaseFold "X-Amz-Meta-"
n = T.length prefix
in if T.toCaseFold (T.take n k) == prefix
then Just (T.drop n k)
else Nothing
toMaybeMetadataHeader :: (Text, Text) -> Maybe (Text, Text) toMaybeMetadataHeader :: (Text, Text) -> Maybe (Text, Text)
toMaybeMetadataHeader (k, v) = toMaybeMetadataHeader (k, v) =
(,v) <$> userMetadataHeaderNameMaybe k (, v) <$> userMetadataHeaderNameMaybe k
getNonUserMetadataMap :: [(Text, Text)] -> H.HashMap Text Text getNonUserMetadataMap :: [(Text, Text)] -> H.HashMap Text Text
getNonUserMetadataMap = getNonUserMetadataMap = H.fromList
H.fromList . filter ( isNothing
. filter . userMetadataHeaderNameMaybe
( isNothing . fst
. userMetadataHeaderNameMaybe )
. fst
)
addXAmzMetaPrefix :: Text -> Text
addXAmzMetaPrefix s
| isJust (userMetadataHeaderNameMaybe s) = s
| otherwise = "X-Amz-Meta-" <> s
mkHeaderFromMetadata :: [(Text, Text)] -> [HT.Header]
mkHeaderFromMetadata = map (\(x, y) -> (mk $ encodeUtf8 $ addXAmzMetaPrefix x, encodeUtf8 y))
-- | This function collects all headers starting with `x-amz-meta-` -- | This function collects all headers starting with `x-amz-meta-`
-- and strips off this prefix, and returns a map. -- and strips off this prefix, and returns a map.
getUserMetadataMap :: [(Text, Text)] -> H.HashMap Text Text getUserMetadataMap :: [(Text, Text)] -> H.HashMap Text Text
getUserMetadataMap = getUserMetadataMap = H.fromList
H.fromList . mapMaybe toMaybeMetadataHeader
. mapMaybe toMaybeMetadataHeader
getHostHeader :: (ByteString, Int) -> ByteString
getHostHeader (host_, port_) =
if port_ == 80 || port_ == 443
then host_
else host_ <> ":" <> show port_
getLastModifiedHeader :: [HT.Header] -> Maybe UTCTime getLastModifiedHeader :: [HT.Header] -> Maybe UTCTime
getLastModifiedHeader hs = do getLastModifiedHeader hs = do
@ -164,21 +130,19 @@ getLastModifiedHeader hs = do
getContentLength :: [HT.Header] -> Maybe Int64 getContentLength :: [HT.Header] -> Maybe Int64
getContentLength hs = do getContentLength hs = do
nbs <- decodeUtf8Lenient <$> lookupHeader Hdr.hContentLength hs nbs <- decodeUtf8Lenient <$> lookupHeader Hdr.hContentLength hs
fst <$> either (const Nothing) Just (decimal nbs) fst <$> hush (decimal nbs)
decodeUtf8Lenient :: ByteString -> Text decodeUtf8Lenient :: ByteString -> Text
decodeUtf8Lenient = decodeUtf8With lenientDecode decodeUtf8Lenient = decodeUtf8With lenientDecode
isSuccessStatus :: HT.Status -> Bool isSuccessStatus :: HT.Status -> Bool
isSuccessStatus sts = isSuccessStatus sts = let s = HT.statusCode sts
let s = HT.statusCode sts in (s >= 200 && s < 300)
in (s >= 200 && s < 300)
httpLbs :: httpLbs :: MonadIO m
(MonadIO m) => => NC.Request -> NC.Manager
NC.Request -> -> m (NC.Response LByteString)
NC.Manager ->
m (NC.Response LByteString)
httpLbs req mgr = do httpLbs req mgr = do
respE <- liftIO $ tryHttpEx $ NC.httpLbs req mgr respE <- liftIO $ tryHttpEx $ NC.httpLbs req mgr
resp <- either throwIO return respE resp <- either throwIO return respE
@ -190,26 +154,21 @@ httpLbs req mgr = do
Just "application/json" -> do Just "application/json" -> do
sErr <- parseErrResponseJSON $ NC.responseBody resp sErr <- parseErrResponseJSON $ NC.responseBody resp
throwIO sErr throwIO sErr
_ ->
throwIO $ _ -> throwIO $ NC.HttpExceptionRequest req $
NC.HttpExceptionRequest req $ NC.StatusCodeException (void resp) (show resp)
NC.StatusCodeException (void resp) (showBS resp)
return resp return resp
where where
tryHttpEx :: tryHttpEx :: IO (NC.Response LByteString)
IO (NC.Response LByteString) -> -> IO (Either NC.HttpException (NC.Response LByteString))
IO (Either NC.HttpException (NC.Response LByteString))
tryHttpEx = try tryHttpEx = try
contentTypeMay resp = contentTypeMay resp = lookupHeader Hdr.hContentType $
lookupHeader Hdr.hContentType $ NC.responseHeaders resp
NC.responseHeaders resp
http :: http :: (MonadUnliftIO m, R.MonadResource m)
(MonadUnliftIO m, R.MonadResource m) => => NC.Request -> NC.Manager
NC.Request -> -> m (Response (C.ConduitT () ByteString m ()))
NC.Manager ->
m (Response (C.ConduitT () ByteString m ()))
http req mgr = do http req mgr = do
respE <- tryHttpEx $ NC.http req mgr respE <- tryHttpEx $ NC.http req mgr
resp <- either throwIO return respE resp <- either throwIO return respE
@ -219,31 +178,25 @@ http req mgr = do
respBody <- C.connect (NC.responseBody resp) CB.sinkLbs respBody <- C.connect (NC.responseBody resp) CB.sinkLbs
sErr <- parseErrResponse respBody sErr <- parseErrResponse respBody
throwIO sErr throwIO sErr
_ -> do _ -> do
content <- LB.toStrict . NC.responseBody <$> NC.lbsResponse resp content <- LB.toStrict . NC.responseBody <$> NC.lbsResponse resp
throwIO $ throwIO $ NC.HttpExceptionRequest req $
NC.HttpExceptionRequest req $ NC.StatusCodeException (void resp) content
NC.StatusCodeException (void resp) content
return resp return resp
where where
tryHttpEx :: tryHttpEx :: (MonadUnliftIO m) => m a
(MonadUnliftIO m) => -> m (Either NC.HttpException a)
m a ->
m (Either NC.HttpException a)
tryHttpEx = try tryHttpEx = try
contentTypeMay resp = contentTypeMay resp = lookupHeader Hdr.hContentType $
lookupHeader Hdr.hContentType $ NC.responseHeaders resp
NC.responseHeaders resp
-- Similar to mapConcurrently but limits the number of threads that -- Similar to mapConcurrently but limits the number of threads that
-- can run using a quantity semaphore. -- can run using a quantity semaphore.
limitedMapConcurrently :: limitedMapConcurrently :: MonadUnliftIO m
(MonadUnliftIO m) => => Int -> (t -> m a) -> [t] -> m [a]
Int ->
(t -> m a) ->
[t] ->
m [a]
limitedMapConcurrently 0 _ _ = return [] limitedMapConcurrently 0 _ _ = return []
limitedMapConcurrently count act args = do limitedMapConcurrently count act args = do
t' <- U.newTVarIO count t' <- U.newTVarIO count
@ -252,15 +205,17 @@ limitedMapConcurrently count act args = do
where where
wThread t arg = wThread t arg =
U.bracket_ (waitSem t) (signalSem t) $ act arg U.bracket_ (waitSem t) (signalSem t) $ act arg
-- quantity semaphore implementation using TVar -- quantity semaphore implementation using TVar
waitSem t = U.atomically $ do waitSem t = U.atomically $ do
v <- U.readTVar t v <- U.readTVar t
if v > 0 if v > 0
then U.writeTVar t (v - 1) then U.writeTVar t (v-1)
else U.retrySTM else U.retrySTM
signalSem t = U.atomically $ do signalSem t = U.atomically $ do
v <- U.readTVar t v <- U.readTVar t
U.writeTVar t (v + 1) U.writeTVar t (v+1)
-- helper function to 'drop' empty optional parameter. -- helper function to 'drop' empty optional parameter.
mkQuery :: Text -> Maybe Text -> Maybe (Text, Text) mkQuery :: Text -> Maybe Text -> Maybe (Text, Text)
@ -269,7 +224,7 @@ mkQuery k mv = (k,) <$> mv
-- helper function to build query parameters that are optional. -- helper function to build query parameters that are optional.
-- don't use it with mandatory query params with empty value. -- don't use it with mandatory query params with empty value.
mkOptionalParams :: [(Text, Maybe Text)] -> HT.Query mkOptionalParams :: [(Text, Maybe Text)] -> HT.Query
mkOptionalParams params = HT.toQuery $ uncurry mkQuery <$> params mkOptionalParams params = HT.toQuery $ uncurry mkQuery <$> params
-- | Conduit that rechunks bytestrings into the given chunk -- | Conduit that rechunks bytestrings into the given chunk
-- lengths. Stops after given chunk lengths are yielded. Stops if -- lengths. Stops after given chunk lengths are yielded. Stops if
@ -277,9 +232,41 @@ mkOptionalParams params = HT.toQuery $ uncurry mkQuery <$> params
-- received. Does not throw any errors. -- received. Does not throw any errors.
chunkBSConduit :: (Monad m) => [Int] -> C.ConduitM ByteString ByteString m () chunkBSConduit :: (Monad m) => [Int] -> C.ConduitM ByteString ByteString m ()
chunkBSConduit [] = return () chunkBSConduit [] = return ()
chunkBSConduit (s : ss) = do chunkBSConduit (s:ss) = do
bs <- fmap LB.toStrict $ C.takeCE s C..| C.sinkLazy bs <- fmap LB.toStrict $ C.takeCE s C..| C.sinkLazy
if if | B.length bs == s -> C.yield bs >> chunkBSConduit ss
| B.length bs == s -> C.yield bs >> chunkBSConduit ss | B.length bs > 0 -> C.yield bs
| B.length bs > 0 -> C.yield bs | otherwise -> return ()
| otherwise -> return ()
-- | Select part sizes - the logic is that the minimum part-size will
-- be 64MiB.
selectPartSizes :: Int64 -> [(PartNumber, Int64, Int64)]
selectPartSizes size = uncurry (List.zip3 [1..]) $
List.unzip $ loop 0 size
where
ceil :: Double -> Int64
ceil = ceiling
partSize = max minPartSize (ceil $ fromIntegral size /
fromIntegral maxMultipartParts)
m = fromIntegral partSize
loop st sz
| st > sz = []
| st + m >= sz = [(st, sz - st)]
| otherwise = (st, m) : loop (st + m) sz
lookupRegionCache :: Bucket -> Minio (Maybe Region)
lookupRegionCache b = do
rMVar <- asks mcRegionMap
rMap <- UM.readMVar rMVar
return $ H.lookup b rMap
addToRegionCache :: Bucket -> Region -> Minio ()
addToRegionCache b region = do
rMVar <- asks mcRegionMap
UM.modifyMVar_ rMVar $ return . H.insert b region
deleteFromRegionCache :: Bucket -> Minio ()
deleteFromRegionCache b = do
rMVar <- asks mcRegionMap
UM.modifyMVar_ rMVar $ return . H.delete b

View File

@ -1,65 +0,0 @@
--
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
module Network.Minio.XmlCommon where
import qualified Data.Text as T
import Data.Text.Read (decimal)
import Data.Time (UTCTime)
import Data.Time.Format.ISO8601 (iso8601ParseM)
import Lib.Prelude (throwIO)
import Network.Minio.Errors
import Text.XML (Name (Name), def, parseLBS)
import Text.XML.Cursor (Axis, Cursor, content, element, fromDocument, laxElement, ($/), (&/))
s3Name :: Text -> Text -> Name
s3Name ns s = Name s (Just ns) Nothing
uncurry4 :: (a -> b -> c -> d -> e) -> (a, b, c, d) -> e
uncurry4 f (a, b, c, d) = f a b c d
uncurry6 :: (a -> b -> c -> d -> e -> f -> g) -> (a, b, c, d, e, f) -> g
uncurry6 f (a, b, c, d, e, g) = f a b c d e g
-- | Parse time strings from XML
parseS3XMLTime :: (MonadIO m) => Text -> m UTCTime
parseS3XMLTime t =
maybe (throwIO $ MErrVXmlParse $ "timestamp parse failure: " <> t) return $
iso8601ParseM $
toString t
parseDecimal :: (MonadIO m, Integral a) => Text -> m a
parseDecimal numStr =
either (throwIO . MErrVXmlParse . show) return $
fst <$> decimal numStr
parseDecimals :: (MonadIO m, Integral a) => [Text] -> m [a]
parseDecimals numStr = forM numStr parseDecimal
s3Elem :: Text -> Text -> Axis
s3Elem ns = element . s3Name ns
parseRoot :: (MonadIO m) => LByteString -> m Cursor
parseRoot =
either (throwIO . MErrVXmlParse . show) (return . fromDocument)
. parseLBS def
parseErrResponse :: (MonadIO m) => LByteString -> m ServiceErr
parseErrResponse xmldata = do
r <- parseRoot xmldata
let code = T.concat $ r $/ laxElement "Code" &/ content
message = T.concat $ r $/ laxElement "Message" &/ content
return $ toServiceErr code message

View File

@ -1,5 +1,5 @@
-- --
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc. -- MinIO Haskell SDK, (C) 2017 MinIO, Inc.
-- --
-- Licensed under the Apache License, Version 2.0 (the "License"); -- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License. -- you may not use this file except in compliance with the License.
@ -15,112 +15,89 @@
-- --
module Network.Minio.XmlGenerator module Network.Minio.XmlGenerator
( mkCreateBucketConfig, ( mkCreateBucketConfig
mkCompleteMultipartUploadRequest, , mkCompleteMultipartUploadRequest
mkPutNotificationRequest, , mkPutNotificationRequest
mkSelectRequest, , mkSelectRequest
) ) where
where
import qualified Data.ByteString.Lazy as LBS import qualified Data.ByteString.Lazy as LBS
import qualified Data.Text as T import qualified Data.HashMap.Strict as H
import Network.Minio.Data import qualified Data.Text as T
import Network.Minio.XmlCommon import Text.XML
import Text.XML
import Lib.Prelude
import Network.Minio.Data
-- | Create a bucketConfig request body XML -- | Create a bucketConfig request body XML
mkCreateBucketConfig :: Text -> Region -> ByteString mkCreateBucketConfig :: Text -> Region -> ByteString
mkCreateBucketConfig ns location = LBS.toStrict $ renderLBS def bucketConfig mkCreateBucketConfig ns location = LBS.toStrict $ renderLBS def bucketConfig
where where
s3Element n = Element (s3Name ns n) mempty s3Element n = Element (s3Name ns n) mempty
root = root = s3Element "CreateBucketConfiguration"
s3Element [ NodeElement $ s3Element "LocationConstraint"
"CreateBucketConfiguration" [ NodeContent location]
[ NodeElement $
s3Element
"LocationConstraint"
[NodeContent location]
] ]
bucketConfig = Document (Prologue [] Nothing []) root [] bucketConfig = Document (Prologue [] Nothing []) root []
-- | Create a completeMultipartUpload request body XML -- | Create a completeMultipartUpload request body XML
mkCompleteMultipartUploadRequest :: [PartTuple] -> ByteString mkCompleteMultipartUploadRequest :: [PartTuple] -> ByteString
mkCompleteMultipartUploadRequest partInfo = mkCompleteMultipartUploadRequest partInfo =
LBS.toStrict $ renderLBS def cmur LBS.toStrict $ renderLBS def cmur
where where
root = root = Element "CompleteMultipartUpload" mempty $
Element "CompleteMultipartUpload" mempty $ map (NodeElement . mkPart) partInfo
map (NodeElement . mkPart) partInfo mkPart (n, etag) = Element "Part" mempty
mkPart (n, etag) = [ NodeElement $ Element "PartNumber" mempty
Element [NodeContent $ T.pack $ show n]
"Part" , NodeElement $ Element "ETag" mempty
mempty [NodeContent etag]
[ NodeElement $ ]
Element
"PartNumber"
mempty
[NodeContent $ T.pack $ show n],
NodeElement $
Element
"ETag"
mempty
[NodeContent etag]
]
cmur = Document (Prologue [] Nothing []) root [] cmur = Document (Prologue [] Nothing []) root []
-- Simplified XML representation without element attributes. -- Simplified XML representation without element attributes.
data XNode data XNode = XNode Text [XNode]
= XNode Text [XNode] | XLeaf Text Text
| XLeaf Text Text deriving (Eq, Show)
deriving stock (Eq, Show)
toXML :: Text -> XNode -> ByteString toXML :: Text -> XNode -> ByteString
toXML ns node = toXML ns node = LBS.toStrict $ renderLBS def $
LBS.toStrict $ Document (Prologue [] Nothing []) (xmlNode node) []
renderLBS def $
Document (Prologue [] Nothing []) (xmlNode node) []
where where
xmlNode :: XNode -> Element xmlNode :: XNode -> Element
xmlNode (XNode name nodes) = xmlNode (XNode name nodes) = Element (s3Name ns name) mempty $
Element (s3Name ns name) mempty $ map (NodeElement . xmlNode) nodes
map (NodeElement . xmlNode) nodes xmlNode (XLeaf name content) = Element (s3Name ns name) mempty
xmlNode (XLeaf name content) = [NodeContent content]
Element
(s3Name ns name)
mempty
[NodeContent content]
class ToXNode a where class ToXNode a where
toXNode :: a -> XNode toXNode :: a -> XNode
instance ToXNode Event where instance ToXNode Event where
toXNode = XLeaf "Event" . toText toXNode = XLeaf "Event" . show
instance ToXNode Notification where instance ToXNode Notification where
toXNode (Notification qc tc lc) = toXNode (Notification qc tc lc) = XNode "NotificationConfiguration" $
XNode "NotificationConfiguration" $ map (toXNodesWithArnName "QueueConfiguration" "Queue") qc ++
map (toXNodesWithArnName "QueueConfiguration" "Queue") qc map (toXNodesWithArnName "TopicConfiguration" "Topic") tc ++
++ map (toXNodesWithArnName "TopicConfiguration" "Topic") tc map (toXNodesWithArnName "CloudFunctionConfiguration" "CloudFunction") lc
++ map (toXNodesWithArnName "CloudFunctionConfiguration" "CloudFunction") lc
toXNodesWithArnName :: Text -> Text -> NotificationConfig -> XNode toXNodesWithArnName :: Text -> Text -> NotificationConfig -> XNode
toXNodesWithArnName eltName arnName (NotificationConfig itemId arn events fRule) = toXNodesWithArnName eltName arnName (NotificationConfig id arn events fRule) =
XNode eltName $ XNode eltName $ [XLeaf "Id" id, XLeaf arnName arn] ++ map toXNode events ++
[XLeaf "Id" itemId, XLeaf arnName arn] [toXNode fRule]
++ map toXNode events
++ [toXNode fRule]
instance ToXNode Filter where instance ToXNode Filter where
toXNode (Filter (FilterKey (FilterRules rules))) = toXNode (Filter (FilterKey (FilterRules rules))) =
XNode "Filter" [XNode "S3Key" (map getFRXNode rules)] XNode "Filter" [XNode "S3Key" (map getFRXNode rules)]
getFRXNode :: FilterRule -> XNode getFRXNode :: FilterRule -> XNode
getFRXNode (FilterRule n v) = getFRXNode (FilterRule n v) = XNode "FilterRule" [ XLeaf "Name" n
XNode , XLeaf "Value" v
"FilterRule" ]
[ XLeaf "Name" n,
XLeaf "Value" v
]
mkPutNotificationRequest :: Text -> Notification -> ByteString mkPutNotificationRequest :: Text -> Notification -> ByteString
mkPutNotificationRequest ns = toXML ns . toXNode mkPutNotificationRequest ns = toXML ns . toXNode
@ -129,103 +106,60 @@ mkSelectRequest :: SelectRequest -> ByteString
mkSelectRequest r = LBS.toStrict $ renderLBS def sr mkSelectRequest r = LBS.toStrict $ renderLBS def sr
where where
sr = Document (Prologue [] Nothing []) root [] sr = Document (Prologue [] Nothing []) root []
root = root = Element "SelectRequest" mempty $
Element "SelectRequest" mempty $ [ NodeElement (Element "Expression" mempty
[ NodeElement [NodeContent $ srExpression r])
( Element , NodeElement (Element "ExpressionType" mempty
"Expression" [NodeContent $ show $ srExpressionType r])
mempty , NodeElement (Element "InputSerialization" mempty $
[NodeContent $ srExpression r] inputSerializationNodes $ srInputSerialization r)
), , NodeElement (Element "OutputSerialization" mempty $
NodeElement outputSerializationNodes $ srOutputSerialization r)
( Element ] ++ maybe [] reqProgElem (srRequestProgressEnabled r)
"ExpressionType" reqProgElem enabled = [NodeElement
mempty (Element "RequestProgress" mempty
[NodeContent $ show $ srExpressionType r] [NodeElement
), (Element "Enabled" mempty
NodeElement [NodeContent
( Element "InputSerialization" mempty $ (if enabled then "TRUE" else "FALSE")]
inputSerializationNodes $ )
srInputSerialization r ]
), )
NodeElement ]
( Element "OutputSerialization" mempty $ inputSerializationNodes is = comprTypeNode (isCompressionType is) ++
outputSerializationNodes $ [NodeElement $ formatNode (isFormatInfo is)]
srOutputSerialization r comprTypeNode (Just c) = [NodeElement $ Element "CompressionType" mempty
) [NodeContent $ case c of
] CompressionTypeNone -> "NONE"
++ maybe [] reqProgElem (srRequestProgressEnabled r) CompressionTypeGzip -> "GZIP"
reqProgElem enabled = CompressionTypeBzip2 -> "BZIP2"
[ NodeElement ]
( Element ]
"RequestProgress"
mempty
[ NodeElement
( Element
"Enabled"
mempty
[ NodeContent
(if enabled then "TRUE" else "FALSE")
]
)
]
)
]
inputSerializationNodes is =
comprTypeNode (isCompressionType is)
++ [NodeElement $ formatNode (isFormatInfo is)]
comprTypeNode (Just c) =
[ NodeElement $
Element
"CompressionType"
mempty
[ NodeContent $ case c of
CompressionTypeNone -> "NONE"
CompressionTypeGzip -> "GZIP"
CompressionTypeBzip2 -> "BZIP2"
]
]
comprTypeNode Nothing = [] comprTypeNode Nothing = []
kvElement (k, v) = Element (Name k Nothing Nothing) mempty [NodeContent v] kvElement (k, v) = Element (Name k Nothing Nothing) mempty [NodeContent v]
formatNode (InputFormatCSV c) = formatNode (InputFormatCSV (CSVProp h)) =
Element Element "CSV" mempty
"CSV" (map NodeElement $ map kvElement $ H.toList h)
mempty
(map (NodeElement . kvElement) (csvPropsList c))
formatNode (InputFormatJSON p) = formatNode (InputFormatJSON p) =
Element Element "JSON" mempty
"JSON" [NodeElement
mempty (Element "Type" mempty
[ NodeElement [NodeContent $ case jsonipType p of
( Element JSONTypeDocument -> "DOCUMENT"
"Type" JSONTypeLines -> "LINES"
mempty ]
[ NodeContent $ case jsonipType p of )
JSONTypeDocument -> "DOCUMENT" ]
JSONTypeLines -> "LINES"
]
)
]
formatNode InputFormatParquet = Element "Parquet" mempty [] formatNode InputFormatParquet = Element "Parquet" mempty []
outputSerializationNodes (OutputSerializationJSON j) = outputSerializationNodes (OutputSerializationJSON j) =
[ NodeElement [NodeElement (Element "JSON" mempty $
( Element "JSON" mempty $ rdElem $ jsonopRecordDelimiter j)]
rdElem $ outputSerializationNodes (OutputSerializationCSV (CSVProp h)) =
jsonopRecordDelimiter j [NodeElement $ Element "CSV" mempty
) (map NodeElement $ map kvElement $ H.toList h)]
]
outputSerializationNodes (OutputSerializationCSV c) =
[ NodeElement $
Element
"CSV"
mempty
(map (NodeElement . kvElement) (csvPropsList c))
]
rdElem Nothing = [] rdElem Nothing = []
rdElem (Just t) = rdElem (Just t) = [NodeElement $ Element "RecordDelimiter" mempty
[ NodeElement $ [NodeContent t]]
Element
"RecordDelimiter"
mempty
[NodeContent t]
]

View File

@ -1,5 +1,5 @@
-- --
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc. -- MinIO Haskell SDK, (C) 2017 MinIO, Inc.
-- --
-- Licensed under the Apache License, Version 2.0 (the "License"); -- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License. -- you may not use this file except in compliance with the License.
@ -15,38 +15,75 @@
-- --
module Network.Minio.XmlParser module Network.Minio.XmlParser
( parseListBuckets, ( parseListBuckets
parseLocation, , parseLocation
parseNewMultipartUpload, , parseNewMultipartUpload
parseCompleteMultipartUploadResponse, , parseCompleteMultipartUploadResponse
parseCopyObjectResponse, , parseCopyObjectResponse
parseListObjectsResponse, , parseListObjectsResponse
parseListObjectsV1Response, , parseListObjectsV1Response
parseListUploadsResponse, , parseListUploadsResponse
parseListPartsResponse, , parseListPartsResponse
parseErrResponse, , parseErrResponse
parseNotification, , parseNotification
parseSelectProgress, , parseSelectProgress
) ) where
where
import qualified Data.ByteString.Lazy as LB import qualified Data.ByteString.Lazy as LB
import qualified Data.HashMap.Strict as H import qualified Data.HashMap.Strict as H
import Data.List (zip4, zip6) import Data.List (zip3, zip4, zip6)
import qualified Data.Text as T import qualified Data.Text as T
import Data.Time import Data.Text.Read (decimal)
import Network.Minio.Data import Data.Time
import Network.Minio.XmlCommon import Text.XML
import Text.XML.Cursor hiding (bool) import Text.XML.Cursor hiding (bool)
import Lib.Prelude
import Network.Minio.Data
import Network.Minio.Errors
-- | Represent the time format string returned by S3 API calls.
s3TimeFormat :: [Char]
s3TimeFormat = iso8601DateFormat $ Just "%T%QZ"
-- | Helper functions.
uncurry4 :: (a -> b -> c -> d -> e) -> (a, b, c, d) -> e
uncurry4 f (a, b, c, d) = f a b c d
uncurry6 :: (a -> b -> c -> d -> e -> f -> g) -> (a, b, c, d, e, f) -> g
uncurry6 f (a, b, c, d, e, g) = f a b c d e g
-- | Parse time strings from XML
parseS3XMLTime :: MonadIO m => Text -> m UTCTime
parseS3XMLTime t =
maybe (throwIO $ MErrVXmlParse $ "timestamp parse failure: " <> t) return $
parseTimeM True defaultTimeLocale s3TimeFormat $ T.unpack t
parseDecimal :: (MonadIO m, Integral a) => Text -> m a
parseDecimal numStr = either (throwIO . MErrVXmlParse . show) return $
fst <$> decimal numStr
parseDecimals :: (MonadIO m, Integral a) => [Text] -> m [a]
parseDecimals numStr = forM numStr parseDecimal
s3Elem :: Text -> Text -> Axis
s3Elem ns = element . s3Name ns
parseRoot :: (MonadIO m) => LByteString -> m Cursor
parseRoot = either (throwIO . MErrVXmlParse . show) (return . fromDocument)
. parseLBS def
-- | Parse the response XML of a list buckets call. -- | Parse the response XML of a list buckets call.
parseListBuckets :: (MonadReader env m, HasSvcNamespace env, MonadIO m) => LByteString -> m [BucketInfo] parseListBuckets :: (MonadReader env m, HasSvcNamespace env, MonadIO m) => LByteString -> m [BucketInfo]
parseListBuckets xmldata = do parseListBuckets xmldata = do
r <- parseRoot xmldata r <- parseRoot xmldata
ns <- asks getSvcNamespace ns <- asks getSvcNamespace
let s3Elem' = s3Elem ns let
names = r $// s3Elem' "Bucket" &// s3Elem' "Name" &/ content s3Elem' = s3Elem ns
timeStrings = r $// s3Elem' "Bucket" &// s3Elem' "CreationDate" &/ content names = r $// s3Elem' "Bucket" &// s3Elem' "Name" &/ content
timeStrings = r $// s3Elem' "Bucket" &// s3Elem' "CreationDate" &/ content
times <- mapM parseS3XMLTime timeStrings times <- mapM parseS3XMLTime timeStrings
return $ zipWith BucketInfo names times return $ zipWith BucketInfo names times
@ -79,38 +116,41 @@ parseCopyObjectResponse :: (MonadReader env m, HasSvcNamespace env, MonadIO m) =
parseCopyObjectResponse xmldata = do parseCopyObjectResponse xmldata = do
r <- parseRoot xmldata r <- parseRoot xmldata
ns <- asks getSvcNamespace ns <- asks getSvcNamespace
let s3Elem' = s3Elem ns let
mtimeStr = T.concat $ r $// s3Elem' "LastModified" &/ content s3Elem' = s3Elem ns
mtimeStr = T.concat $ r $// s3Elem' "LastModified" &/ content
mtime <- parseS3XMLTime mtimeStr mtime <- parseS3XMLTime mtimeStr
return (T.concat $ r $// s3Elem' "ETag" &/ content, mtime) return (T.concat $ r $// s3Elem' "ETag" &/ content, mtime)
-- | Parse the response XML of a list objects v1 call. -- | Parse the response XML of a list objects v1 call.
parseListObjectsV1Response :: parseListObjectsV1Response :: (MonadReader env m, HasSvcNamespace env, MonadIO m)
(MonadReader env m, HasSvcNamespace env, MonadIO m) => => LByteString -> m ListObjectsV1Result
LByteString ->
m ListObjectsV1Result
parseListObjectsV1Response xmldata = do parseListObjectsV1Response xmldata = do
r <- parseRoot xmldata r <- parseRoot xmldata
ns <- asks getSvcNamespace ns <- asks getSvcNamespace
let s3Elem' = s3Elem ns let
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content) s3Elem' = s3Elem ns
nextMarker = listToMaybe $ r $/ s3Elem' "NextMarker" &/ content hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
keys = r $/ s3Elem' "Contents" &/ s3Elem' "Key" &/ content nextMarker = headMay $ r $/ s3Elem' "NextMarker" &/ content
modTimeStr = r $/ s3Elem' "Contents" &/ s3Elem' "LastModified" &/ content
etagsList = r $/ s3Elem' "Contents" &/ s3Elem' "ETag" &/ content prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
-- if response xml contains empty etag response fill them with as
-- many empty Text for the zip4 below to work as intended. keys = r $/ s3Elem' "Contents" &/ s3Elem' "Key" &/ content
etags = etagsList ++ repeat "" modTimeStr = r $/ s3Elem' "Contents" &/ s3Elem' "LastModified" &/ content
sizeStr = r $/ s3Elem' "Contents" &/ s3Elem' "Size" &/ content etagsList = r $/ s3Elem' "Contents" &/ s3Elem' "ETag" &/ content
-- if response xml contains empty etag response fill them with as
-- many empty Text for the zip4 below to work as intended.
etags = etagsList ++ repeat ""
sizeStr = r $/ s3Elem' "Contents" &/ s3Elem' "Size" &/ content
modTimes <- mapM parseS3XMLTime modTimeStr modTimes <- mapM parseS3XMLTime modTimeStr
sizes <- parseDecimals sizeStr sizes <- parseDecimals sizeStr
let objects = let
map (uncurry6 ObjectInfo) $ objects = map (uncurry6 ObjectInfo) $
zip6 keys modTimes etags sizes (repeat H.empty) (repeat H.empty) zip6 keys modTimes etags sizes (repeat H.empty) (repeat H.empty)
return $ ListObjectsV1Result hasMore nextMarker objects prefixes return $ ListObjectsV1Result hasMore nextMarker objects prefixes
@ -119,24 +159,28 @@ parseListObjectsResponse :: (MonadReader env m, HasSvcNamespace env, MonadIO m)
parseListObjectsResponse xmldata = do parseListObjectsResponse xmldata = do
r <- parseRoot xmldata r <- parseRoot xmldata
ns <- asks getSvcNamespace ns <- asks getSvcNamespace
let s3Elem' = s3Elem ns let
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content) s3Elem' = s3Elem ns
nextToken = listToMaybe $ r $/ s3Elem' "NextContinuationToken" &/ content hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
keys = r $/ s3Elem' "Contents" &/ s3Elem' "Key" &/ content nextToken = headMay $ r $/ s3Elem' "NextContinuationToken" &/ content
modTimeStr = r $/ s3Elem' "Contents" &/ s3Elem' "LastModified" &/ content
etagsList = r $/ s3Elem' "Contents" &/ s3Elem' "ETag" &/ content prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
-- if response xml contains empty etag response fill them with as
-- many empty Text for the zip4 below to work as intended. keys = r $/ s3Elem' "Contents" &/ s3Elem' "Key" &/ content
etags = etagsList ++ repeat "" modTimeStr = r $/ s3Elem' "Contents" &/ s3Elem' "LastModified" &/ content
sizeStr = r $/ s3Elem' "Contents" &/ s3Elem' "Size" &/ content etagsList = r $/ s3Elem' "Contents" &/ s3Elem' "ETag" &/ content
-- if response xml contains empty etag response fill them with as
-- many empty Text for the zip4 below to work as intended.
etags = etagsList ++ repeat ""
sizeStr = r $/ s3Elem' "Contents" &/ s3Elem' "Size" &/ content
modTimes <- mapM parseS3XMLTime modTimeStr modTimes <- mapM parseS3XMLTime modTimeStr
sizes <- parseDecimals sizeStr sizes <- parseDecimals sizeStr
let objects = let
map (uncurry6 ObjectInfo) $ objects = map (uncurry6 ObjectInfo) $
zip6 keys modTimes etags sizes (repeat H.empty) (repeat H.empty) zip6 keys modTimes etags sizes (repeat H.empty) (repeat H.empty)
return $ ListObjectsResult hasMore nextToken objects prefixes return $ ListObjectsResult hasMore nextToken objects prefixes
@ -145,18 +189,20 @@ parseListUploadsResponse :: (MonadReader env m, HasSvcNamespace env, MonadIO m)
parseListUploadsResponse xmldata = do parseListUploadsResponse xmldata = do
r <- parseRoot xmldata r <- parseRoot xmldata
ns <- asks getSvcNamespace ns <- asks getSvcNamespace
let s3Elem' = s3Elem ns let
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content) s3Elem' = s3Elem ns
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
nextKey = listToMaybe $ r $/ s3Elem' "NextKeyMarker" &/ content prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
nextUpload = listToMaybe $ r $/ s3Elem' "NextUploadIdMarker" &/ content nextKey = headMay $ r $/ s3Elem' "NextKeyMarker" &/ content
uploadKeys = r $/ s3Elem' "Upload" &/ s3Elem' "Key" &/ content nextUpload = headMay $ r $/ s3Elem' "NextUploadIdMarker" &/ content
uploadIds = r $/ s3Elem' "Upload" &/ s3Elem' "UploadId" &/ content uploadKeys = r $/ s3Elem' "Upload" &/ s3Elem' "Key" &/ content
uploadInitTimeStr = r $/ s3Elem' "Upload" &/ s3Elem' "Initiated" &/ content uploadIds = r $/ s3Elem' "Upload" &/ s3Elem' "UploadId" &/ content
uploadInitTimeStr = r $/ s3Elem' "Upload" &/ s3Elem' "Initiated" &/ content
uploadInitTimes <- mapM parseS3XMLTime uploadInitTimeStr uploadInitTimes <- mapM parseS3XMLTime uploadInitTimeStr
let uploads = zip3 uploadKeys uploadIds uploadInitTimes let
uploads = zip3 uploadKeys uploadIds uploadInitTimes
return $ ListUploadsResult hasMore nextKey nextUpload uploads prefixes return $ ListUploadsResult hasMore nextKey nextUpload uploads prefixes
@ -164,25 +210,34 @@ parseListPartsResponse :: (MonadReader env m, HasSvcNamespace env, MonadIO m) =>
parseListPartsResponse xmldata = do parseListPartsResponse xmldata = do
r <- parseRoot xmldata r <- parseRoot xmldata
ns <- asks getSvcNamespace ns <- asks getSvcNamespace
let s3Elem' = s3Elem ns let
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content) s3Elem' = s3Elem ns
nextPartNumStr = listToMaybe $ r $/ s3Elem' "NextPartNumberMarker" &/ content hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
partNumberStr = r $/ s3Elem' "Part" &/ s3Elem' "PartNumber" &/ content nextPartNumStr = headMay $ r $/ s3Elem' "NextPartNumberMarker" &/ content
partModTimeStr = r $/ s3Elem' "Part" &/ s3Elem' "LastModified" &/ content partNumberStr = r $/ s3Elem' "Part" &/ s3Elem' "PartNumber" &/ content
partETags = r $/ s3Elem' "Part" &/ s3Elem' "ETag" &/ content partModTimeStr = r $/ s3Elem' "Part" &/ s3Elem' "LastModified" &/ content
partSizeStr = r $/ s3Elem' "Part" &/ s3Elem' "Size" &/ content partETags = r $/ s3Elem' "Part" &/ s3Elem' "ETag" &/ content
partSizeStr = r $/ s3Elem' "Part" &/ s3Elem' "Size" &/ content
partModTimes <- mapM parseS3XMLTime partModTimeStr partModTimes <- mapM parseS3XMLTime partModTimeStr
partSizes <- parseDecimals partSizeStr partSizes <- parseDecimals partSizeStr
partNumbers <- parseDecimals partNumberStr partNumbers <- parseDecimals partNumberStr
nextPartNum <- parseDecimals $ maybeToList nextPartNumStr nextPartNum <- parseDecimals $ maybeToList nextPartNumStr
let partInfos = let
map (uncurry4 ObjectPartInfo) $ partInfos = map (uncurry4 ObjectPartInfo) $
zip4 partNumbers partETags partSizes partModTimes zip4 partNumbers partETags partSizes partModTimes
return $ ListPartsResult hasMore (listToMaybe nextPartNum) partInfos return $ ListPartsResult hasMore (listToMaybe nextPartNum) partInfos
parseErrResponse :: (MonadIO m) => LByteString -> m ServiceErr
parseErrResponse xmldata = do
r <- parseRoot xmldata
let code = T.concat $ r $/ element "Code" &/ content
message = T.concat $ r $/ element "Message" &/ content
return $ toServiceErr code message
parseNotification :: (MonadReader env m, HasSvcNamespace env, MonadIO m) => LByteString -> m Notification parseNotification :: (MonadReader env m, HasSvcNamespace env, MonadIO m) => LByteString -> m Notification
parseNotification xmldata = do parseNotification xmldata = do
r <- parseRoot xmldata r <- parseRoot xmldata
@ -191,40 +246,32 @@ parseNotification xmldata = do
qcfg = map node $ r $/ s3Elem' "QueueConfiguration" qcfg = map node $ r $/ s3Elem' "QueueConfiguration"
tcfg = map node $ r $/ s3Elem' "TopicConfiguration" tcfg = map node $ r $/ s3Elem' "TopicConfiguration"
lcfg = map node $ r $/ s3Elem' "CloudFunctionConfiguration" lcfg = map node $ r $/ s3Elem' "CloudFunctionConfiguration"
Notification Notification <$> (mapM (parseNode ns "Queue") qcfg)
<$> mapM (parseNode ns "Queue") qcfg <*> (mapM (parseNode ns "Topic") tcfg)
<*> mapM (parseNode ns "Topic") tcfg <*> (mapM (parseNode ns "CloudFunction") lcfg)
<*> mapM (parseNode ns "CloudFunction") lcfg
where where
getFilterRule ns c = getFilterRule ns c =
let name = T.concat $ c $/ s3Elem ns "Name" &/ content let name = T.concat $ c $/ s3Elem ns "Name" &/ content
value = T.concat $ c $/ s3Elem ns "Value" &/ content value = T.concat $ c $/ s3Elem ns "Value" &/ content
in FilterRule name value in FilterRule name value
parseNode ns arnName nodeData = do parseNode ns arnName nodeData = do
let c = fromNode nodeData let c = fromNode nodeData
itemId = T.concat $ c $/ s3Elem ns "Id" &/ content id = T.concat $ c $/ s3Elem ns "Id" &/ content
arn = T.concat $ c $/ s3Elem ns arnName &/ content arn = T.concat $ c $/ s3Elem ns arnName &/ content
events = mapMaybe textToEvent (c $/ s3Elem ns "Event" &/ content) events = catMaybes $ map textToEvent $ c $/ s3Elem ns "Event" &/ content
rules = rules = c $/ s3Elem ns "Filter" &/ s3Elem ns "S3Key" &/
c s3Elem ns "FilterRule" &| getFilterRule ns
$/ s3Elem ns "Filter" return $ NotificationConfig id arn events
&/ s3Elem ns "S3Key" (Filter $ FilterKey $ FilterRules rules)
&/ s3Elem ns "FilterRule"
&| getFilterRule ns
return $
NotificationConfig
itemId
arn
events
(Filter $ FilterKey $ FilterRules rules)
parseSelectProgress :: (MonadIO m) => ByteString -> m Progress parseSelectProgress :: MonadIO m => ByteString -> m Progress
parseSelectProgress xmldata = do parseSelectProgress xmldata = do
r <- parseRoot $ LB.fromStrict xmldata r <- parseRoot $ LB.fromStrict xmldata
let bScanned = T.concat $ r $/ element "BytesScanned" &/ content let bScanned = T.concat $ r $/ element "BytesScanned" &/ content
bProcessed = T.concat $ r $/ element "BytesProcessed" &/ content bProcessed = T.concat $ r $/element "BytesProcessed" &/ content
bReturned = T.concat $ r $/ element "BytesReturned" &/ content bReturned = T.concat $ r $/element "BytesReturned" &/ content
Progress Progress <$> parseDecimal bScanned
<$> parseDecimal bScanned <*> parseDecimal bProcessed
<*> parseDecimal bProcessed <*> parseDecimal bReturned
<*> parseDecimal bReturned

View File

@ -15,7 +15,7 @@
# resolver: # resolver:
# name: custom-snapshot # name: custom-snapshot
# location: "./custom-snapshot.yaml" # location: "./custom-snapshot.yaml"
resolver: lts-22.19 resolver: lts-14.6
# User packages to be built. # User packages to be built.
# Various formats can be used as shown in the example below. # Various formats can be used as shown in the example below.
@ -36,17 +36,17 @@ resolver: lts-22.19
# non-dependency (i.e. a user package), and its test suites and benchmarks # non-dependency (i.e. a user package), and its test suites and benchmarks
# will not be run. This is useful for tweaking upstream packages. # will not be run. This is useful for tweaking upstream packages.
packages: packages:
- "." - '.'
# Dependency packages to be pulled from upstream that are not in the resolver # Dependency packages to be pulled from upstream that are not in the resolver
# (e.g., acme-missiles-0.3) # (e.g., acme-missiles-0.3)
extra-deps: extra-deps: []
- crypton-connection-0.3.2
# Override default flag values for local packages and extra-deps # Override default flag values for local packages and extra-deps
flags: {} flags: {}
# Extra package databases containing global packages # Extra package databases containing global packages
extra-package-dbs: [] extra-package-dbs: []
# Control whether we use the GHC we find on the path # Control whether we use the GHC we find on the path
# system-ghc: true # system-ghc: true
# #

View File

@ -1,19 +0,0 @@
# This file was autogenerated by Stack.
# You should not edit this file by hand.
# For more information, please see the documentation at:
# https://docs.haskellstack.org/en/stable/lock_files
packages:
- completed:
hackage: crypton-connection-0.3.2@sha256:c7937edc25ab022bcf167703f2ec5ab73b62908e545bb587d2aa42b33cd6f6cc,1581
pantry-tree:
sha256: f986ad29b008cbe5732606e9cde1897191c486a2f1f169a4cb75fd915bce397c
size: 394
original:
hackage: crypton-connection-0.3.2
snapshots:
- completed:
sha256: e5cac927cf7ccbd52aa41476baa68b88c564ee6ddc3bc573dbf4210069287fe7
size: 713340
url: https://raw.githubusercontent.com/commercialhaskell/stackage-snapshots/master/lts/22/19.yaml
original: lts-22.19

File diff suppressed because it is too large Load Diff

View File

@ -15,102 +15,88 @@
-- --
module Network.Minio.API.Test module Network.Minio.API.Test
( bucketNameValidityTests, ( bucketNameValidityTests
objectNameValidityTests, , objectNameValidityTests
parseServerInfoJSONTest, , parseServerInfoJSONTest
parseHealStatusTest, , parseHealStatusTest
parseHealStartRespTest, , parseHealStartRespTest
) ) where
where
import Data.Aeson (eitherDecode) import Data.Aeson (eitherDecode)
import Network.Minio.API import Test.Tasty
import Network.Minio.AdminAPI import Test.Tasty.HUnit
import Test.Tasty
import Test.Tasty.HUnit import Lib.Prelude
import Network.Minio.AdminAPI
import Network.Minio.API
assertBool' :: Bool -> Assertion assertBool' :: Bool -> Assertion
assertBool' = assertBool "Test failed!" assertBool' = assertBool "Test failed!"
bucketNameValidityTests :: TestTree bucketNameValidityTests :: TestTree
bucketNameValidityTests = bucketNameValidityTests = testGroup "Bucket Name Validity Tests"
testGroup [ testCase "Too short 1" $ assertBool' $ not $ isValidBucketName ""
"Bucket Name Validity Tests" , testCase "Too short 2" $ assertBool' $ not $ isValidBucketName "ab"
[ testCase "Too short 1" $ assertBool' $ not $ isValidBucketName "", , testCase "Too long 1" $ assertBool' $ not $ isValidBucketName "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
testCase "Too short 2" $ assertBool' $ not $ isValidBucketName "ab", , testCase "Has upper case" $ assertBool' $ not $ isValidBucketName "ABCD"
testCase "Too long 1" $ assertBool' $ not $ isValidBucketName "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", , testCase "Has punctuation" $ assertBool' $ not $ isValidBucketName "abc,2"
testCase "Has upper case" $ assertBool' $ not $ isValidBucketName "ABCD", , testCase "Has hyphen at end" $ assertBool' $ not $ isValidBucketName "abc-"
testCase "Has punctuation" $ assertBool' $ not $ isValidBucketName "abc,2", , testCase "Has consecutive dot" $ assertBool' $ not $ isValidBucketName "abck..eedg"
testCase "Has hyphen at end" $ assertBool' $ not $ isValidBucketName "abc-", , testCase "Looks like IP" $ assertBool' $ not $ isValidBucketName "10.0.0.1"
testCase "Has consecutive dot" $ assertBool' $ not $ isValidBucketName "abck..eedg", , testCase "Valid bucket name 1" $ assertBool' $ isValidBucketName "abcd.pqeq.rea"
testCase "Looks like IP" $ assertBool' $ not $ isValidBucketName "10.0.0.1", , testCase "Valid bucket name 2" $ assertBool' $ isValidBucketName "abcdedgh1d"
testCase "Valid bucket name 1" $ assertBool' $ isValidBucketName "abcd.pqeq.rea", , testCase "Valid bucket name 3" $ assertBool' $ isValidBucketName "abc-de-dg-h1d"
testCase "Valid bucket name 2" $ assertBool' $ isValidBucketName "abcdedgh1d", ]
testCase "Valid bucket name 3" $ assertBool' $ isValidBucketName "abc-de-dg-h1d"
]
objectNameValidityTests :: TestTree objectNameValidityTests :: TestTree
objectNameValidityTests = objectNameValidityTests = testGroup "Object Name Validity Tests"
testGroup [ testCase "Empty name" $ assertBool' $ not $ isValidObjectName ""
"Object Name Validity Tests" , testCase "Has unicode characters" $ assertBool' $ isValidObjectName "日本国"
[ testCase "Empty name" $ assertBool' $ not $ isValidObjectName "", ]
testCase "Has unicode characters" $ assertBool' $ isValidObjectName "日本国"
]
parseServerInfoJSONTest :: TestTree parseServerInfoJSONTest :: TestTree
parseServerInfoJSONTest = parseServerInfoJSONTest = testGroup "Parse MinIO Admin API ServerInfo JSON test" $
testGroup "Parse MinIO Admin API ServerInfo JSON test" $ map (\(tName, tDesc, tfn, tVal) -> testCase tName $ assertBool tDesc $
map tfn (eitherDecode tVal :: Either [Char] [ServerInfo])) testCases
( \(tName, tDesc, tfn, tVal) ->
testCase tName $
assertBool tDesc $
tfn (eitherDecode tVal :: Either [Char] [ServerInfo])
)
testCases
where where
testCases = testCases = [ ("FSBackend", "Verify server info json parsing for FS backend", isRight, fsJSON)
[ ("FSBackend", "Verify server info json parsing for FS backend", isRight, fsJSON), , ("Erasure Backend", "Verify server info json parsing for Erasure backend", isRight, erasureJSON)
("Erasure Backend", "Verify server info json parsing for Erasure backend", isRight, erasureJSON), , ("Unknown Backend", "Verify server info json parsing for invalid backend", isLeft, invalidJSON)
("Unknown Backend", "Verify server info json parsing for invalid backend", isLeft, invalidJSON) ]
]
fsJSON = "[{\"error\":\"\",\"addr\":\"192.168.1.218:9000\",\"data\":{\"storage\":{\"Used\":20530,\"Backend\":{\"Type\":1,\"OnlineDisks\":0,\"OfflineDisks\":0,\"StandardSCData\":0,\"StandardSCParity\":0,\"RRSCData\":0,\"RRSCParity\":0,\"Sets\":null}},\"network\":{\"transferred\":808,\"received\":1160},\"http\":{\"totalHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalGETs\":{\"count\":1,\"avgDuration\":\"0s\"},\"successGETs\":{\"count\":1,\"avgDuration\":\"0s\"},\"totalPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"}},\"server\":{\"uptime\":5992503019270,\"version\":\"DEVELOPMENT.GOGET\",\"commitID\":\"DEVELOPMENT.GOGET\",\"region\":\"\",\"sqsARN\":[]}}}]" fsJSON = "[{\"error\":\"\",\"addr\":\"192.168.1.218:9000\",\"data\":{\"storage\":{\"Used\":20530,\"Backend\":{\"Type\":1,\"OnlineDisks\":0,\"OfflineDisks\":0,\"StandardSCData\":0,\"StandardSCParity\":0,\"RRSCData\":0,\"RRSCParity\":0,\"Sets\":null}},\"network\":{\"transferred\":808,\"received\":1160},\"http\":{\"totalHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalGETs\":{\"count\":1,\"avgDuration\":\"0s\"},\"successGETs\":{\"count\":1,\"avgDuration\":\"0s\"},\"totalPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"}},\"server\":{\"uptime\":5992503019270,\"version\":\"DEVELOPMENT.GOGET\",\"commitID\":\"DEVELOPMENT.GOGET\",\"region\":\"\",\"sqsARN\":[]}}}]"
erasureJSON = "[{\"error\":\"\",\"addr\":\"192.168.1.218:9000\",\"data\":{\"storage\":{\"Used\":83084,\"Backend\":{\"Type\":2,\"OnlineDisks\":4,\"OfflineDisks\":0,\"StandardSCData\":2,\"StandardSCParity\":2,\"RRSCData\":2,\"RRSCParity\":2,\"Sets\":[[{\"uuid\":\"16ec6f2c-9197-4787-904a-36bb2c2683f8\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"4052e086-ef99-4aa5-ae2b-8e27559432f6\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"d0639950-ddd3-45b0-93ca-fd86f5d79f72\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"30ec68c0-37e1-4592-82c1-26b143c0ac10\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]]}},\"network\":{\"transferred\":404,\"received\":0},\"http\":{\"totalHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"}},\"server\":{\"uptime\":2738903073,\"version\":\"DEVELOPMENT.GOGET\",\"commitID\":\"DEVELOPMENT.GOGET\",\"region\":\"\",\"sqsARN\":[]}}}]" erasureJSON = "[{\"error\":\"\",\"addr\":\"192.168.1.218:9000\",\"data\":{\"storage\":{\"Used\":83084,\"Backend\":{\"Type\":2,\"OnlineDisks\":4,\"OfflineDisks\":0,\"StandardSCData\":2,\"StandardSCParity\":2,\"RRSCData\":2,\"RRSCParity\":2,\"Sets\":[[{\"uuid\":\"16ec6f2c-9197-4787-904a-36bb2c2683f8\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"4052e086-ef99-4aa5-ae2b-8e27559432f6\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"d0639950-ddd3-45b0-93ca-fd86f5d79f72\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"30ec68c0-37e1-4592-82c1-26b143c0ac10\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]]}},\"network\":{\"transferred\":404,\"received\":0},\"http\":{\"totalHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"}},\"server\":{\"uptime\":2738903073,\"version\":\"DEVELOPMENT.GOGET\",\"commitID\":\"DEVELOPMENT.GOGET\",\"region\":\"\",\"sqsARN\":[]}}}]"
invalidJSON = "[{\"error\":\"\",\"addr\":\"192.168.1.218:9000\",\"data\":{\"storage\":{\"Used\":83084,\"Backend\":{\"Type\":42,\"OnlineDisks\":4,\"OfflineDisks\":0,\"StandardSCData\":2,\"StandardSCParity\":2,\"RRSCData\":2,\"RRSCParity\":2,\"Sets\":[[{\"uuid\":\"16ec6f2c-9197-4787-904a-36bb2c2683f8\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"4052e086-ef99-4aa5-ae2b-8e27559432f6\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"d0639950-ddd3-45b0-93ca-fd86f5d79f72\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"30ec68c0-37e1-4592-82c1-26b143c0ac10\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]]}},\"network\":{\"transferred\":404,\"received\":0},\"http\":{\"totalHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"}},\"server\":{\"uptime\":2738903073,\"version\":\"DEVELOPMENT.GOGET\",\"commitID\":\"DEVELOPMENT.GOGET\",\"region\":\"\",\"sqsARN\":[]}}}]" invalidJSON = "[{\"error\":\"\",\"addr\":\"192.168.1.218:9000\",\"data\":{\"storage\":{\"Used\":83084,\"Backend\":{\"Type\":42,\"OnlineDisks\":4,\"OfflineDisks\":0,\"StandardSCData\":2,\"StandardSCParity\":2,\"RRSCData\":2,\"RRSCParity\":2,\"Sets\":[[{\"uuid\":\"16ec6f2c-9197-4787-904a-36bb2c2683f8\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"4052e086-ef99-4aa5-ae2b-8e27559432f6\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"d0639950-ddd3-45b0-93ca-fd86f5d79f72\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"30ec68c0-37e1-4592-82c1-26b143c0ac10\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]]}},\"network\":{\"transferred\":404,\"received\":0},\"http\":{\"totalHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successHEADs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successGETs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPUTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successPOSTs\":{\"count\":0,\"avgDuration\":\"0s\"},\"totalDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"},\"successDELETEs\":{\"count\":0,\"avgDuration\":\"0s\"}},\"server\":{\"uptime\":2738903073,\"version\":\"DEVELOPMENT.GOGET\",\"commitID\":\"DEVELOPMENT.GOGET\",\"region\":\"\",\"sqsARN\":[]}}}]"
parseHealStatusTest :: TestTree parseHealStatusTest :: TestTree
parseHealStatusTest = parseHealStatusTest = testGroup "Parse MinIO Admin API HealStatus JSON test" $
testGroup "Parse MinIO Admin API HealStatus JSON test" $ map (\(tName, tDesc, tfn, tVal) -> testCase tName $ assertBool tDesc $
map tfn (eitherDecode tVal :: Either [Char] HealStatus)) testCases
( \(tName, tDesc, tfn, tVal) ->
testCase tName $
assertBool tDesc $
tfn (eitherDecode tVal :: Either [Char] HealStatus)
)
testCases
where where
testCases = testCases = [ ("Good", "Verify heal result item for erasure backend", isRight, erasureJSON')
[ ("Good", "Verify heal result item for erasure backend", isRight, erasureJSON'), , ("Corrupted", "Verify heal result item for erasure backend", isLeft, invalidJSON')
("Corrupted", "Verify heal result item for erasure backend", isLeft, invalidJSON'), , ("Incorrect Value", "Verify heal result item for erasure backend", isLeft, invalidItemType)
("Incorrect Value", "Verify heal result item for erasure backend", isLeft, invalidItemType) ]
]
erasureJSON' = "{\"Summary\":\"finished\",\"StartTime\":\"2018-06-05T08:09:47.644465513Z\",\"NumDisks\":4,\"Settings\":{\"recursive\":false,\"dryRun\":false},\"Items\":[{\"resultId\":1,\"type\":\"metadata\",\"bucket\":\"\",\"object\":\"\",\"detail\":\"disk-format\",\"diskCount\":4,\"setCount\":1,\"before\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"after\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"objectSize\":0}]}" erasureJSON' = "{\"Summary\":\"finished\",\"StartTime\":\"2018-06-05T08:09:47.644465513Z\",\"NumDisks\":4,\"Settings\":{\"recursive\":false,\"dryRun\":false},\"Items\":[{\"resultId\":1,\"type\":\"metadata\",\"bucket\":\"\",\"object\":\"\",\"detail\":\"disk-format\",\"diskCount\":4,\"setCount\":1,\"before\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"after\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"objectSize\":0}]}"
invalidJSON' = "{\"Summary\":\"finished\",\"StartTime\":\"2018-06-05T08:09:47.644465513Z\",\"NumDisks\":4,\"Settings\":{\"recursive\":false,\"dryRun\":false},\"Items\":[{\"resultId\":1,\"type\":\"metadata\",\"bucket\":\"\",\"object\":\"\",\"detail\":\"disk-format\",\"diskCount\":4,\"setCount\":1,\"before\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"after\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"objectSize\":0}]" invalidJSON' = "{\"Summary\":\"finished\",\"StartTime\":\"2018-06-05T08:09:47.644465513Z\",\"NumDisks\":4,\"Settings\":{\"recursive\":false,\"dryRun\":false},\"Items\":[{\"resultId\":1,\"type\":\"metadata\",\"bucket\":\"\",\"object\":\"\",\"detail\":\"disk-format\",\"diskCount\":4,\"setCount\":1,\"before\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"after\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"objectSize\":0}]"
invalidItemType = "{\"Summary\":\"finished\",\"StartTime\":\"2018-06-05T08:09:47.644465513Z\",\"NumDisks\":4,\"Settings\":{\"recursive\":false,\"dryRun\":false},\"Items\":[{\"resultId\":1,\"type\":\"hello\",\"bucket\":\"\",\"object\":\"\",\"detail\":\"disk-format\",\"diskCount\":4,\"setCount\":1,\"before\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"after\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"objectSize\":0}]}" invalidItemType = "{\"Summary\":\"finished\",\"StartTime\":\"2018-06-05T08:09:47.644465513Z\",\"NumDisks\":4,\"Settings\":{\"recursive\":false,\"dryRun\":false},\"Items\":[{\"resultId\":1,\"type\":\"hello\",\"bucket\":\"\",\"object\":\"\",\"detail\":\"disk-format\",\"diskCount\":4,\"setCount\":1,\"before\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"after\":{\"drives\":[{\"uuid\":\"c3487166-b8a4-481a-b1e7-fb9b249e2500\",\"endpoint\":\"/tmp/1\",\"state\":\"ok\"},{\"uuid\":\"55a6e787-184f-4e4c-bf09-03dcada658a9\",\"endpoint\":\"/tmp/2\",\"state\":\"ok\"},{\"uuid\":\"f035d8c3-fca1-4407-b89c-38c2bcf4a641\",\"endpoint\":\"/tmp/3\",\"state\":\"ok\"},{\"uuid\":\"4f8b79d3-db90-4c1d-87c2-35a28b0d9a13\",\"endpoint\":\"/tmp/4\",\"state\":\"ok\"}]},\"objectSize\":0}]}"
parseHealStartRespTest :: TestTree parseHealStartRespTest :: TestTree
parseHealStartRespTest = parseHealStartRespTest = testGroup "Parse MinIO Admin API HealStartResp JSON test" $
testGroup "Parse MinIO Admin API HealStartResp JSON test" $ map (\(tName, tDesc, tfn, tVal) -> testCase tName $ assertBool tDesc $
map tfn (eitherDecode tVal :: Either [Char] HealStartResp)) testCases
( \(tName, tDesc, tfn, tVal) ->
testCase tName $
assertBool tDesc $
tfn (eitherDecode tVal :: Either [Char] HealStartResp)
)
testCases
where where
testCases = testCases = [ ("Good", "Verify heal start response for erasure backend", isRight, hsrJSON)
[ ("Good", "Verify heal start response for erasure backend", isRight, hsrJSON), , ("Missing Token", "Verify heal start response for erasure backend", isLeft, missingTokenJSON)
("Missing Token", "Verify heal start response for erasure backend", isLeft, missingTokenJSON) ]
]
hsrJSON = "{\"clientToken\":\"3a3aca49-77dd-4b78-bba7-0978f119b23e\",\"clientAddress\":\"127.0.0.1\",\"startTime\":\"2018-06-05T08:09:47.644394493Z\"}" hsrJSON = "{\"clientToken\":\"3a3aca49-77dd-4b78-bba7-0978f119b23e\",\"clientAddress\":\"127.0.0.1\",\"startTime\":\"2018-06-05T08:09:47.644394493Z\"}"
missingTokenJSON = "{\"clientAddress\":\"127.0.0.1\",\"startTime\":\"2018-06-05T08:09:47.644394493Z\"}" missingTokenJSON = "{\"clientAddress\":\"127.0.0.1\",\"startTime\":\"2018-06-05T08:09:47.644394493Z\"}"

View File

@ -15,26 +15,26 @@
-- --
module Network.Minio.JsonParser.Test module Network.Minio.JsonParser.Test
( jsonParserTests, (
) jsonParserTests
where ) where
import Lib.Prelude import Test.Tasty
import Network.Minio.Errors import Test.Tasty.HUnit
import Network.Minio.JsonParser import UnliftIO (MonadUnliftIO)
import Test.Tasty
import Test.Tasty.HUnit import Lib.Prelude
import UnliftIO (MonadUnliftIO)
import Network.Minio.Errors
import Network.Minio.JsonParser
jsonParserTests :: TestTree jsonParserTests :: TestTree
jsonParserTests = jsonParserTests = testGroup "JSON Parser Tests"
testGroup [ testCase "Test parseErrResponseJSON" testParseErrResponseJSON
"JSON Parser Tests" ]
[ testCase "Test parseErrResponseJSON" testParseErrResponseJSON
]
tryValidationErr :: (MonadUnliftIO m) => m a -> m (Either MErrV a) tryValidationErr :: (MonadUnliftIO m) => m a -> m (Either MErrV a)
tryValidationErr = try tryValidationErr act = try act
assertValidationErr :: MErrV -> Assertion assertValidationErr :: MErrV -> Assertion
assertValidationErr e = assertFailure $ "Failed due to validation error => " ++ show e assertValidationErr e = assertFailure $ "Failed due to validation error => " ++ show e
@ -44,20 +44,21 @@ testParseErrResponseJSON = do
-- 1. Test parsing of an invalid error json. -- 1. Test parsing of an invalid error json.
parseResE <- tryValidationErr $ parseErrResponseJSON "ClearlyInvalidJSON" parseResE <- tryValidationErr $ parseErrResponseJSON "ClearlyInvalidJSON"
when (isRight parseResE) $ when (isRight parseResE) $
assertFailure $ assertFailure $ "Parsing should have failed => " ++ show parseResE
"Parsing should have failed => " ++ show parseResE
forM_ cases $ \(jsondata, sErr) -> do forM_ cases $ \(jsondata, sErr) -> do
parseErr <- tryValidationErr $ parseErrResponseJSON jsondata parseErr <- tryValidationErr $ parseErrResponseJSON jsondata
either assertValidationErr (@?= sErr) parseErr either assertValidationErr (@?= sErr) parseErr
where where
cases = cases = [
[ -- 2. Test parsing of a valid error json. -- 2. Test parsing of a valid error json.
( "{\"Code\":\"InvalidAccessKeyId\",\"Message\":\"The access key ID you provided does not exist in our records.\",\"Key\":\"\",\"BucketName\":\"\",\"Resource\":\"/minio/admin/v1/info\",\"RequestId\":\"3L137\",\"HostId\":\"3L137\"}", ("{\"Code\":\"InvalidAccessKeyId\",\"Message\":\"The access key ID you provided does not exist in our records.\",\"Key\":\"\",\"BucketName\":\"\",\"Resource\":\"/minio/admin/v1/info\",\"RequestId\":\"3L137\",\"HostId\":\"3L137\"}",
ServiceErr "InvalidAccessKeyId" "The access key ID you provided does not exist in our records." ServiceErr "InvalidAccessKeyId" "The access key ID you provided does not exist in our records."
), )
-- 3. Test parsing of a valid, empty Resource. ,
( "{\"Code\":\"SignatureDoesNotMatch\",\"Message\":\"The request signature we calculated does not match the signature you provided. Check your key and signing method.\",\"Key\":\"\",\"BucketName\":\"\",\"Resource\":\"/minio/admin/v1/info\",\"RequestId\":\"3L137\",\"HostId\":\"3L137\"}", -- 3. Test parsing of a valid, empty Resource.
ServiceErr "SignatureDoesNotMatch" "The request signature we calculated does not match the signature you provided. Check your key and signing method." ("{\"Code\":\"SignatureDoesNotMatch\",\"Message\":\"The request signature we calculated does not match the signature you provided. Check your key and signing method.\",\"Key\":\"\",\"BucketName\":\"\",\"Resource\":\"/minio/admin/v1/info\",\"RequestId\":\"3L137\",\"HostId\":\"3L137\"}",
) ServiceErr "SignatureDoesNotMatch" "The request signature we calculated does not match the signature you provided. Check your key and signing method."
)
] ]

View File

@ -15,18 +15,18 @@
-- --
module Network.Minio.TestHelpers module Network.Minio.TestHelpers
( runTestNS, ( runTestNS
) ) where
where
import Network.Minio.Data import Network.Minio.Data
newtype TestNS = TestNS {testNamespace :: Text} import Lib.Prelude
newtype TestNS = TestNS { testNamespace :: Text }
instance HasSvcNamespace TestNS where instance HasSvcNamespace TestNS where
getSvcNamespace = testNamespace getSvcNamespace = testNamespace
runTestNS :: ReaderT TestNS m a -> m a runTestNS :: ReaderT TestNS m a -> m a
runTestNS = runTestNS = flip runReaderT $
flip runReaderT $ TestNS "http://s3.amazonaws.com/doc/2006-03-01/"
TestNS "http://s3.amazonaws.com/doc/2006-03-01/"

View File

@ -15,31 +15,33 @@
-- --
module Network.Minio.Utils.Test module Network.Minio.Utils.Test
( limitedMapConcurrentlyTests, (
) limitedMapConcurrentlyTests
where ) where
import Network.Minio.Utils import Test.Tasty
import Test.Tasty import Test.Tasty.HUnit
import Test.Tasty.HUnit
import Lib.Prelude
import Network.Minio.Utils
limitedMapConcurrentlyTests :: TestTree limitedMapConcurrentlyTests :: TestTree
limitedMapConcurrentlyTests = limitedMapConcurrentlyTests = testGroup "limitedMapConcurrently Tests"
testGroup [ testCase "Test with various thread counts" testLMC
"limitedMapConcurrently Tests" ]
[ testCase "Test with various thread counts" testLMC
]
testLMC :: Assertion testLMC :: Assertion
testLMC = do testLMC = do
let maxNum = 50 let maxNum = 50
-- test with thread count of 1 to 2*maxNum -- test with thread count of 1 to 2*maxNum
forM_ [1 .. (2 * maxNum)] $ \threads -> do forM_ [1..(2*maxNum)] $ \threads -> do
res <- limitedMapConcurrently threads compute [1 .. maxNum] res <- limitedMapConcurrently threads compute [1..maxNum]
sum res @?= overallResultCheck maxNum sum res @?= overallResultCheck maxNum
where where
-- simple function to run in each thread -- simple function to run in each thread
compute :: Int -> IO Int compute :: Int -> IO Int
compute n = return $ sum [1 .. n] compute n = return $ sum [1..n]
-- function to check overall result -- function to check overall result
overallResultCheck n = sum $ map (\t -> (t * (t + 1)) `div` 2) [1 .. n] overallResultCheck n = sum $ map (\t -> (t * (t+1)) `div` 2) [1..n]

View File

@ -13,33 +13,30 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE QuasiQuotes #-} {-# LANGUAGE QuasiQuotes #-}
module Network.Minio.XmlGenerator.Test module Network.Minio.XmlGenerator.Test
( xmlGeneratorTests, ( xmlGeneratorTests
) ) where
where
import qualified Data.ByteString.Lazy as LBS import Test.Tasty
import Lib.Prelude import Test.Tasty.HUnit
import Network.Minio.Data import Text.RawString.QQ (r)
import Network.Minio.TestHelpers
import Network.Minio.XmlGenerator import Lib.Prelude
import Network.Minio.XmlParser (parseNotification)
import Test.Tasty import Network.Minio.Data
import Test.Tasty.HUnit import Network.Minio.TestHelpers
import Text.RawString.QQ (r) import Network.Minio.XmlGenerator
import Text.XML (def, parseLBS) import Network.Minio.XmlParser (parseNotification)
xmlGeneratorTests :: TestTree xmlGeneratorTests :: TestTree
xmlGeneratorTests = xmlGeneratorTests = testGroup "XML Generator Tests"
testGroup [ testCase "Test mkCreateBucketConfig" testMkCreateBucketConfig
"XML Generator Tests" , testCase "Test mkCompleteMultipartUploadRequest" testMkCompleteMultipartUploadRequest
[ testCase "Test mkCreateBucketConfig" testMkCreateBucketConfig, , testCase "Test mkPutNotificationRequest" testMkPutNotificationRequest
testCase "Test mkCompleteMultipartUploadRequest" testMkCompleteMultipartUploadRequest, , testCase "Test mkSelectRequest" testMkSelectRequest
testCase "Test mkPutNotificationRequest" testMkPutNotificationRequest, ]
testCase "Test mkSelectRequest" testMkSelectRequest
]
testMkCreateBucketConfig :: Assertion testMkCreateBucketConfig :: Assertion
testMkCreateBucketConfig = do testMkCreateBucketConfig = do
@ -47,136 +44,100 @@ testMkCreateBucketConfig = do
assertEqual "CreateBucketConfiguration xml should match: " expected $ assertEqual "CreateBucketConfiguration xml should match: " expected $
mkCreateBucketConfig ns "EU" mkCreateBucketConfig ns "EU"
where where
expected = expected = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\ \<CreateBucketConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<CreateBucketConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ \<LocationConstraint>EU</LocationConstraint>\
\<LocationConstraint>EU</LocationConstraint>\ \</CreateBucketConfiguration>"
\</CreateBucketConfiguration>"
testMkCompleteMultipartUploadRequest :: Assertion testMkCompleteMultipartUploadRequest :: Assertion
testMkCompleteMultipartUploadRequest = testMkCompleteMultipartUploadRequest =
assertEqual "completeMultipartUpload xml should match: " expected $ assertEqual "completeMultipartUpload xml should match: " expected $
mkCompleteMultipartUploadRequest [(1, "abc")] mkCompleteMultipartUploadRequest [(1, "abc")]
where where
expected = expected = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\ \<CompleteMultipartUpload>\
\<CompleteMultipartUpload>\ \<Part>\
\<Part>\ \<PartNumber>1</PartNumber><ETag>abc</ETag>\
\<PartNumber>1</PartNumber><ETag>abc</ETag>\ \</Part>\
\</Part>\ \</CompleteMultipartUpload>"
\</CompleteMultipartUpload>"
testMkPutNotificationRequest :: Assertion testMkPutNotificationRequest :: Assertion
testMkPutNotificationRequest = testMkPutNotificationRequest =
forM_ cases $ \val -> do forM_ cases $ \val -> do
let ns = "http://s3.amazonaws.com/doc/2006-03-01/" let ns = "http://s3.amazonaws.com/doc/2006-03-01/"
result = fromStrictBS $ mkPutNotificationRequest ns val result = toS $ mkPutNotificationRequest ns val
ntf <- runExceptT $ runTestNS $ parseNotification result ntf <- runExceptT $ runTestNS $ parseNotification result
either either (\_ -> assertFailure "XML Parse Error!")
(\_ -> assertFailure "XML Parse Error!") (@?= val) ntf
(@?= val)
ntf
where where
cases = cases = [ Notification []
[ Notification [ NotificationConfig
[] "YjVkM2Y0YmUtNGI3NC00ZjQyLWEwNGItNDIyYWUxY2I0N2M4"
[ NotificationConfig "arn:aws:sns:us-east-1:account-id:s3notificationtopic2"
"YjVkM2Y0YmUtNGI3NC00ZjQyLWEwNGItNDIyYWUxY2I0N2M4" [ReducedRedundancyLostObject, ObjectCreated] defaultFilter
"arn:aws:sns:us-east-1:account-id:s3notificationtopic2" ]
[ReducedRedundancyLostObject, ObjectCreated] []
defaultFilter , Notification
] [ NotificationConfig
[], "1" "arn:aws:sqs:us-west-2:444455556666:s3notificationqueue"
Notification [ObjectCreatedPut]
[ NotificationConfig (Filter $ FilterKey $ FilterRules
"1" [ FilterRule "prefix" "images/"
"arn:aws:sqs:us-west-2:444455556666:s3notificationqueue" , FilterRule "suffix" ".jpg"])
[ObjectCreatedPut] , NotificationConfig
( Filter $ "" "arn:aws:sqs:us-east-1:356671443308:s3notificationqueue"
FilterKey $ [ObjectCreated] defaultFilter
FilterRules ]
[ FilterRule "prefix" "images/", [ NotificationConfig
FilterRule "suffix" ".jpg" "" "arn:aws:sns:us-east-1:356671443308:s3notificationtopic2"
] [ReducedRedundancyLostObject] defaultFilter
), ]
NotificationConfig [ NotificationConfig
"" "ObjectCreatedEvents" "arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail"
"arn:aws:sqs:us-east-1:356671443308:s3notificationqueue" [ObjectCreated] defaultFilter
[ObjectCreated] ]
defaultFilter ]
]
[ NotificationConfig
""
"arn:aws:sns:us-east-1:356671443308:s3notificationtopic2"
[ReducedRedundancyLostObject]
defaultFilter
]
[ NotificationConfig
"ObjectCreatedEvents"
"arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail"
[ObjectCreated]
defaultFilter
]
]
testMkSelectRequest :: Assertion testMkSelectRequest :: Assertion
testMkSelectRequest = mapM_ assertFn cases testMkSelectRequest = mapM_ assertFn cases
where where
assertFn (a, b) = assertFn (a, b) = assertEqual "selectRequest XML should match: " b $ mkSelectRequest a
let generatedReqDoc = parseLBS def $ LBS.fromStrict $ mkSelectRequest a cases = [ ( SelectRequest "Select * from S3Object" SQL
expectedReqDoc = parseLBS def $ LBS.fromStrict b (InputSerialization (Just CompressionTypeGzip)
in case (generatedReqDoc, expectedReqDoc) of (InputFormatCSV $ fileHeaderInfo FileHeaderIgnore
(Right genDoc, Right expDoc) -> assertEqual "selectRequest XML should match: " expDoc genDoc <> recordDelimiter "\n"
(Left err, _) -> assertFailure $ "Generated selectRequest failed to parse as XML" ++ show err <> fieldDelimiter ","
(_, Left err) -> assertFailure $ "Expected selectRequest failed to parse as XML" ++ show err <> quoteCharacter "\""
cases = <> quoteEscapeCharacter "\""
[ ( SelectRequest ))
"Select * from S3Object" (OutputSerializationCSV $ quoteFields QuoteFieldsAsNeeded
SQL <> recordDelimiter "\n"
( InputSerialization <> fieldDelimiter ","
(Just CompressionTypeGzip) <> quoteCharacter "\""
( InputFormatCSV $ <> quoteEscapeCharacter "\""
fileHeaderInfo FileHeaderIgnore
<> recordDelimiter "\n"
<> fieldDelimiter ","
<> quoteCharacter "\""
<> quoteEscapeCharacter "\""
) )
) (Just False)
( OutputSerializationCSV $ , [r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>GZIP</CompressionType><CSV><QuoteCharacter>&#34;</QuoteCharacter><RecordDelimiter>
quoteFields QuoteFieldsAsNeeded </RecordDelimiter><FileHeaderInfo>IGNORE</FileHeaderInfo><QuoteEscapeCharacter>&#34;</QuoteEscapeCharacter><FieldDelimiter>,</FieldDelimiter></CSV></InputSerialization><OutputSerialization><CSV><QuoteCharacter>&#34;</QuoteCharacter><QuoteFields>ASNEEDED</QuoteFields><RecordDelimiter>
<> recordDelimiter "\n" </RecordDelimiter><QuoteEscapeCharacter>&#34;</QuoteEscapeCharacter><FieldDelimiter>,</FieldDelimiter></CSV></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
<> fieldDelimiter "," )
<> quoteCharacter "\"" , ( setRequestProgressEnabled False $
<> quoteEscapeCharacter "\"" setInputCompressionType CompressionTypeGzip $
) selectRequest "Select * from S3Object" documentJsonInput
(Just False), (outputJSONFromRecordDelimiter "\n")
[r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>GZIP</CompressionType><CSV><FieldDelimiter>,</FieldDelimiter><FileHeaderInfo>IGNORE</FileHeaderInfo><QuoteCharacter>"</QuoteCharacter><QuoteEscapeCharacter>"</QuoteEscapeCharacter><RecordDelimiter> , [r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>GZIP</CompressionType><JSON><Type>DOCUMENT</Type></JSON></InputSerialization><OutputSerialization><JSON><RecordDelimiter>
</RecordDelimiter></CSV></InputSerialization><OutputSerialization><CSV><FieldDelimiter>,</FieldDelimiter><QuoteCharacter>"</QuoteCharacter><QuoteEscapeCharacter>"</QuoteEscapeCharacter><QuoteFields>ASNEEDED</QuoteFields><RecordDelimiter>
</RecordDelimiter></CSV></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
),
( setRequestProgressEnabled False $
setInputCompressionType CompressionTypeGzip $
selectRequest
"Select * from S3Object"
documentJsonInput
(outputJSONFromRecordDelimiter "\n"),
[r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>GZIP</CompressionType><JSON><Type>DOCUMENT</Type></JSON></InputSerialization><OutputSerialization><JSON><RecordDelimiter>
</RecordDelimiter></JSON></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|] </RecordDelimiter></JSON></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
), )
( setRequestProgressEnabled False $ , ( setRequestProgressEnabled False $
setInputCompressionType CompressionTypeNone $ setInputCompressionType CompressionTypeNone $
selectRequest selectRequest "Select * from S3Object" defaultParquetInput
"Select * from S3Object" (outputCSVFromProps $ quoteFields QuoteFieldsAsNeeded
defaultParquetInput <> recordDelimiter "\n"
( outputCSVFromProps $ <> fieldDelimiter ","
quoteFields QuoteFieldsAsNeeded <> quoteCharacter "\""
<> recordDelimiter "\n" <> quoteEscapeCharacter "\"")
<> fieldDelimiter "," , [r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>NONE</CompressionType><Parquet/></InputSerialization><OutputSerialization><CSV><QuoteCharacter>&#34;</QuoteCharacter><QuoteFields>ASNEEDED</QuoteFields><RecordDelimiter>
<> quoteCharacter "\"" </RecordDelimiter><QuoteEscapeCharacter>&#34;</QuoteEscapeCharacter><FieldDelimiter>,</FieldDelimiter></CSV></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
<> quoteEscapeCharacter "\"" )
), ]
[r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>NONE</CompressionType><Parquet/></InputSerialization><OutputSerialization><CSV><FieldDelimiter>,</FieldDelimiter><QuoteCharacter>"</QuoteCharacter><QuoteEscapeCharacter>"</QuoteEscapeCharacter><QuoteFields>ASNEEDED</QuoteFields><RecordDelimiter>
</RecordDelimiter></CSV></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
)
]

View File

@ -13,49 +13,48 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE QuasiQuotes #-} {-# LANGUAGE QuasiQuotes #-}
module Network.Minio.XmlParser.Test module Network.Minio.XmlParser.Test
( xmlParserTests, ( xmlParserTests
) ) where
where
import qualified Data.HashMap.Strict as H import qualified Data.HashMap.Strict as H
import Data.Time (fromGregorian) import Data.Time (fromGregorian)
import Lib.Prelude import Test.Tasty
import Network.Minio.Data import Test.Tasty.HUnit
import Network.Minio.Errors import Text.RawString.QQ (r)
import Network.Minio.TestHelpers import UnliftIO (MonadUnliftIO)
import Network.Minio.XmlParser
import Test.Tasty import Lib.Prelude
import Test.Tasty.HUnit
import Text.RawString.QQ (r) import Network.Minio.Data
import UnliftIO (MonadUnliftIO) import Network.Minio.Errors
import Network.Minio.TestHelpers
import Network.Minio.XmlParser
xmlParserTests :: TestTree xmlParserTests :: TestTree
xmlParserTests = xmlParserTests = testGroup "XML Parser Tests"
testGroup [ testCase "Test parseLocation" testParseLocation
"XML Parser Tests" , testCase "Test parseNewMultipartUpload" testParseNewMultipartUpload
[ testCase "Test parseLocation" testParseLocation, , testCase "Test parseListObjectsResponse" testParseListObjectsResult
testCase "Test parseNewMultipartUpload" testParseNewMultipartUpload, , testCase "Test parseListObjectsV1Response" testParseListObjectsV1Result
testCase "Test parseListObjectsResponse" testParseListObjectsResult, , testCase "Test parseListUploadsresponse" testParseListIncompleteUploads
testCase "Test parseListObjectsV1Response" testParseListObjectsV1Result, , testCase "Test parseCompleteMultipartUploadResponse" testParseCompleteMultipartUploadResponse
testCase "Test parseListUploadsresponse" testParseListIncompleteUploads, , testCase "Test parseListPartsResponse" testParseListPartsResponse
testCase "Test parseCompleteMultipartUploadResponse" testParseCompleteMultipartUploadResponse, , testCase "Test parseCopyObjectResponse" testParseCopyObjectResponse
testCase "Test parseListPartsResponse" testParseListPartsResponse, , testCase "Test parseNotification" testParseNotification
testCase "Test parseCopyObjectResponse" testParseCopyObjectResponse, , testCase "Test parseSelectProgress" testParseSelectProgress
testCase "Test parseNotification" testParseNotification, ]
testCase "Test parseSelectProgress" testParseSelectProgress
]
tryValidationErr :: (MonadUnliftIO m) => m a -> m (Either MErrV a) tryValidationErr :: (MonadUnliftIO m) => m a -> m (Either MErrV a)
tryValidationErr = try tryValidationErr act = try act
assertValidtionErr :: MErrV -> Assertion assertValidtionErr :: MErrV -> Assertion
assertValidtionErr e = assertFailure $ "Failed due to validation error => " ++ show e assertValidtionErr e = assertFailure $ "Failed due to validation error => " ++ show e
eitherValidationErr :: Either MErrV a -> (a -> Assertion) -> Assertion eitherValidationErr :: Either MErrV a -> (a -> Assertion) -> Assertion
eitherValidationErr (Left e) _ = assertValidtionErr e eitherValidationErr (Left e) _ = assertValidtionErr e
eitherValidationErr (Right a) f = f a eitherValidationErr (Right a) f = f a
testParseLocation :: Assertion testParseLocation :: Assertion
@ -63,224 +62,224 @@ testParseLocation = do
-- 1. Test parsing of an invalid location constraint xml. -- 1. Test parsing of an invalid location constraint xml.
parseResE <- tryValidationErr $ parseLocation "ClearlyInvalidXml" parseResE <- tryValidationErr $ parseLocation "ClearlyInvalidXml"
when (isRight parseResE) $ when (isRight parseResE) $
assertFailure $ assertFailure $ "Parsing should have failed => " ++ show parseResE
"Parsing should have failed => " ++ show parseResE
forM_ cases $ \(xmldata, expectedLocation) -> do forM_ cases $ \(xmldata, expectedLocation) -> do
parseLocE <- tryValidationErr $ parseLocation xmldata parseLocE <- tryValidationErr $ parseLocation xmldata
either assertValidtionErr (@?= expectedLocation) parseLocE either assertValidtionErr (@?= expectedLocation) parseLocE
where where
cases = cases = [
[ -- 2. Test parsing of a valid location xml. -- 2. Test parsing of a valid location xml.
( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\ ("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<LocationConstraint xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">EU</LocationConstraint>", \<LocationConstraint xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">EU</LocationConstraint>",
"EU" "EU"
), )
-- 3. Test parsing of a valid, empty location xml. ,
( "<LocationConstraint xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"/>", -- 3. Test parsing of a valid, empty location xml.
"us-east-1" ("<LocationConstraint xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"/>",
) "us-east-1"
)
] ]
testParseNewMultipartUpload :: Assertion testParseNewMultipartUpload :: Assertion
testParseNewMultipartUpload = do testParseNewMultipartUpload = do
forM_ cases $ \(xmldata, expectedUploadId) -> do forM_ cases $ \(xmldata, expectedUploadId) -> do
parsedUploadIdE <- tryValidationErr $ runTestNS $ parseNewMultipartUpload xmldata parsedUploadIdE <- tryValidationErr $ runTestNS $ parseNewMultipartUpload xmldata
eitherValidationErr parsedUploadIdE (@?= expectedUploadId) eitherValidationErr parsedUploadIdE (@?= expectedUploadId)
where where
cases = cases = [
[ ( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\ ("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<InitiateMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ \<InitiateMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\ <Bucket>example-bucket</Bucket>\ \ <Bucket>example-bucket</Bucket>\
\ <Key>example-object</Key>\ \ <Key>example-object</Key>\
\ <UploadId>VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA</UploadId>\ \ <UploadId>VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA</UploadId>\
\</InitiateMultipartUploadResult>", \</InitiateMultipartUploadResult>",
"VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA" "VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA"
), ),
( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\ ("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<InitiateMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ \<InitiateMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\ <Bucket>example-bucket</Bucket>\ \ <Bucket>example-bucket</Bucket>\
\ <Key>example-object</Key>\ \ <Key>example-object</Key>\
\ <UploadId>EXAMPLEJZ6e0YupT2h66iePQCc9IEbYbDUy4RTpMeoSMLPRp8Z5o1u8feSRonpvnWsKKG35tI2LB9VDPiCgTy.Gq2VxQLYjrue4Nq.NBdqI-</UploadId>\ \ <UploadId>EXAMPLEJZ6e0YupT2h66iePQCc9IEbYbDUy4RTpMeoSMLPRp8Z5o1u8feSRonpvnWsKKG35tI2LB9VDPiCgTy.Gq2VxQLYjrue4Nq.NBdqI-</UploadId>\
\</InitiateMultipartUploadResult>", \</InitiateMultipartUploadResult>",
"EXAMPLEJZ6e0YupT2h66iePQCc9IEbYbDUy4RTpMeoSMLPRp8Z5o1u8feSRonpvnWsKKG35tI2LB9VDPiCgTy.Gq2VxQLYjrue4Nq.NBdqI-" "EXAMPLEJZ6e0YupT2h66iePQCc9IEbYbDUy4RTpMeoSMLPRp8Z5o1u8feSRonpvnWsKKG35tI2LB9VDPiCgTy.Gq2VxQLYjrue4Nq.NBdqI-"
) )
] ]
testParseListObjectsResult :: Assertion testParseListObjectsResult :: Assertion
testParseListObjectsResult = do testParseListObjectsResult = do
let xmldata = let
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\ xmldata = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<ListBucketResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ \<ListBucketResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Name>bucket</Name>\ \<Name>bucket</Name>\
\<Prefix/>\ \<Prefix/>\
\<NextContinuationToken>opaque</NextContinuationToken>\ \<NextContinuationToken>opaque</NextContinuationToken>\
\<KeyCount>1000</KeyCount>\ \<KeyCount>1000</KeyCount>\
\<MaxKeys>1000</MaxKeys>\ \<MaxKeys>1000</MaxKeys>\
\<IsTruncated>true</IsTruncated>\ \<IsTruncated>true</IsTruncated>\
\<Contents>\ \<Contents>\
\<Key>my-image.jpg</Key>\ \<Key>my-image.jpg</Key>\
\<LastModified>2009-10-12T17:50:30.000Z</LastModified>\ \<LastModified>2009-10-12T17:50:30.000Z</LastModified>\
\<ETag>&quot;fba9dede5f27731c9771645a39863328&quot;</ETag>\ \<ETag>&quot;fba9dede5f27731c9771645a39863328&quot;</ETag>\
\<Size>434234</Size>\ \<Size>434234</Size>\
\<StorageClass>STANDARD</StorageClass>\ \<StorageClass>STANDARD</StorageClass>\
\</Contents>\ \</Contents>\
\</ListBucketResult>" \</ListBucketResult>"
expectedListResult = ListObjectsResult True (Just "opaque") [object1] []
object1 = ObjectInfo "my-image.jpg" modifiedTime1 "\"fba9dede5f27731c9771645a39863328\"" 434234 H.empty H.empty expectedListResult = ListObjectsResult True (Just "opaque") [object1] []
modifiedTime1 = flip UTCTime 64230 $ fromGregorian 2009 10 12 object1 = ObjectInfo "my-image.jpg" modifiedTime1 "\"fba9dede5f27731c9771645a39863328\"" 434234 H.empty H.empty
modifiedTime1 = flip UTCTime 64230 $ fromGregorian 2009 10 12
parsedListObjectsResult <- tryValidationErr $ runTestNS $ parseListObjectsResponse xmldata parsedListObjectsResult <- tryValidationErr $ runTestNS $ parseListObjectsResponse xmldata
eitherValidationErr parsedListObjectsResult (@?= expectedListResult) eitherValidationErr parsedListObjectsResult (@?= expectedListResult)
testParseListObjectsV1Result :: Assertion testParseListObjectsV1Result :: Assertion
testParseListObjectsV1Result = do testParseListObjectsV1Result = do
let xmldata = let
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\ xmldata = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<ListBucketResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ \<ListBucketResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Name>bucket</Name>\ \<Name>bucket</Name>\
\<Prefix/>\ \<Prefix/>\
\<NextMarker>my-image1.jpg</NextMarker>\ \<NextMarker>my-image1.jpg</NextMarker>\
\<KeyCount>1000</KeyCount>\ \<KeyCount>1000</KeyCount>\
\<MaxKeys>1000</MaxKeys>\ \<MaxKeys>1000</MaxKeys>\
\<IsTruncated>true</IsTruncated>\ \<IsTruncated>true</IsTruncated>\
\<Contents>\ \<Contents>\
\<Key>my-image.jpg</Key>\ \<Key>my-image.jpg</Key>\
\<LastModified>2009-10-12T17:50:30.000Z</LastModified>\ \<LastModified>2009-10-12T17:50:30.000Z</LastModified>\
\<ETag>&quot;fba9dede5f27731c9771645a39863328&quot;</ETag>\ \<ETag>&quot;fba9dede5f27731c9771645a39863328&quot;</ETag>\
\<Size>434234</Size>\ \<Size>434234</Size>\
\<StorageClass>STANDARD</StorageClass>\ \<StorageClass>STANDARD</StorageClass>\
\</Contents>\ \</Contents>\
\</ListBucketResult>" \</ListBucketResult>"
expectedListResult = ListObjectsV1Result True (Just "my-image1.jpg") [object1] []
object1 = ObjectInfo "my-image.jpg" modifiedTime1 "\"fba9dede5f27731c9771645a39863328\"" 434234 H.empty H.empty expectedListResult = ListObjectsV1Result True (Just "my-image1.jpg") [object1] []
modifiedTime1 = flip UTCTime 64230 $ fromGregorian 2009 10 12 object1 = ObjectInfo "my-image.jpg" modifiedTime1 "\"fba9dede5f27731c9771645a39863328\"" 434234 H.empty H.empty
modifiedTime1 = flip UTCTime 64230 $ fromGregorian 2009 10 12
parsedListObjectsV1Result <- tryValidationErr $ runTestNS $ parseListObjectsV1Response xmldata parsedListObjectsV1Result <- tryValidationErr $ runTestNS $ parseListObjectsV1Response xmldata
eitherValidationErr parsedListObjectsV1Result (@?= expectedListResult) eitherValidationErr parsedListObjectsV1Result (@?= expectedListResult)
testParseListIncompleteUploads :: Assertion testParseListIncompleteUploads :: Assertion
testParseListIncompleteUploads = do testParseListIncompleteUploads = do
let xmldata = let
"<ListMultipartUploadsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ xmldata = "<ListMultipartUploadsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Bucket>example-bucket</Bucket>\ \<Bucket>example-bucket</Bucket>\
\<KeyMarker/>\ \<KeyMarker/>\
\<UploadIdMarker/>\ \<UploadIdMarker/>\
\<NextKeyMarker>sample.jpg</NextKeyMarker>\ \<NextKeyMarker>sample.jpg</NextKeyMarker>\
\<NextUploadIdMarker>Xgw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1W99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--</NextUploadIdMarker>\ \<NextUploadIdMarker>Xgw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1W99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--</NextUploadIdMarker>\
\<Delimiter>/</Delimiter>\ \<Delimiter>/</Delimiter>\
\<Prefix/>\ \<Prefix/>\
\<MaxUploads>1000</MaxUploads>\ \<MaxUploads>1000</MaxUploads>\
\<IsTruncated>false</IsTruncated>\ \<IsTruncated>false</IsTruncated>\
\<Upload>\ \<Upload>\
\<Key>sample.jpg</Key>\ \<Key>sample.jpg</Key>\
\<UploadId>Agw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1N99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--</UploadId>\ \<UploadId>Agw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1N99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--</UploadId>\
\<Initiator>\ \<Initiator>\
\<ID>314133b66967d86f031c7249d1d9a80249109428335cd0ef1cdc487b4566cb1b</ID>\ \<ID>314133b66967d86f031c7249d1d9a80249109428335cd0ef1cdc487b4566cb1b</ID>\
\<DisplayName>s3-nickname</DisplayName>\ \<DisplayName>s3-nickname</DisplayName>\
\</Initiator>\ \</Initiator>\
\<Owner>\ \<Owner>\
\<ID>314133b66967d86f031c7249d1d9a80249109428335cd0ef1cdc487b4566cb1b</ID>\ \<ID>314133b66967d86f031c7249d1d9a80249109428335cd0ef1cdc487b4566cb1b</ID>\
\<DisplayName>s3-nickname</DisplayName>\ \<DisplayName>s3-nickname</DisplayName>\
\</Owner>\ \</Owner>\
\<StorageClass>STANDARD</StorageClass>\ \<StorageClass>STANDARD</StorageClass>\
\<Initiated>2010-11-26T19:24:17.000Z</Initiated>\ \<Initiated>2010-11-26T19:24:17.000Z</Initiated>\
\</Upload>\ \</Upload>\
\<CommonPrefixes>\ \<CommonPrefixes>\
\<Prefix>photos/</Prefix>\ \<Prefix>photos/</Prefix>\
\</CommonPrefixes>\ \</CommonPrefixes>\
\<CommonPrefixes>\ \<CommonPrefixes>\
\<Prefix>videos/</Prefix>\ \<Prefix>videos/</Prefix>\
\</CommonPrefixes>\ \</CommonPrefixes>\
\</ListMultipartUploadsResult>" \</ListMultipartUploadsResult>"
expectedListResult = ListUploadsResult False (Just "sample.jpg") (Just "Xgw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1W99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--") uploads prefixes expectedListResult = ListUploadsResult False (Just "sample.jpg") (Just "Xgw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1W99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--") uploads prefixes
uploads = [("sample.jpg", "Agw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1N99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--", initTime)] uploads = [("sample.jpg", "Agw4MJT6ZPAVxpY0SAuGN7q4uWJJM22ZYg1N99trdp4tpO88.PT6.MhO0w2E17eutfAvQfQWoajgE_W2gpcxQw--", initTime)]
initTime = UTCTime (fromGregorian 2010 11 26) 69857 initTime = UTCTime (fromGregorian 2010 11 26) 69857
prefixes = ["photos/", "videos/"] prefixes = ["photos/", "videos/"]
parsedListUploadsResult <- tryValidationErr $ runTestNS $ parseListUploadsResponse xmldata parsedListUploadsResult <- tryValidationErr $ runTestNS $ parseListUploadsResponse xmldata
eitherValidationErr parsedListUploadsResult (@?= expectedListResult) eitherValidationErr parsedListUploadsResult (@?= expectedListResult)
testParseCompleteMultipartUploadResponse :: Assertion testParseCompleteMultipartUploadResponse :: Assertion
testParseCompleteMultipartUploadResponse = do testParseCompleteMultipartUploadResponse = do
let xmldata = let
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\ xmldata = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CompleteMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ \<CompleteMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Location>http://Example-Bucket.s3.amazonaws.com/Example-Object</Location>\ \<Location>http://Example-Bucket.s3.amazonaws.com/Example-Object</Location>\
\<Bucket>Example-Bucket</Bucket>\ \<Bucket>Example-Bucket</Bucket>\
\<Key>Example-Object</Key>\ \<Key>Example-Object</Key>\
\<ETag>\"3858f62230ac3c915f300c664312c11f-9\"</ETag>\ \<ETag>\"3858f62230ac3c915f300c664312c11f-9\"</ETag>\
\</CompleteMultipartUploadResult>" \</CompleteMultipartUploadResult>"
expectedETag = "\"3858f62230ac3c915f300c664312c11f-9\"" expectedETag = "\"3858f62230ac3c915f300c664312c11f-9\""
parsedETagE <- runExceptT $ runTestNS $ parseCompleteMultipartUploadResponse xmldata parsedETagE <- runExceptT $ runTestNS $ parseCompleteMultipartUploadResponse xmldata
eitherValidationErr parsedETagE (@?= expectedETag) eitherValidationErr parsedETagE (@?= expectedETag)
testParseListPartsResponse :: Assertion testParseListPartsResponse :: Assertion
testParseListPartsResponse = do testParseListPartsResponse = do
let xmldata = let
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\ xmldata = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<ListPartsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ \<ListPartsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<Bucket>example-bucket</Bucket>\ \<Bucket>example-bucket</Bucket>\
\<Key>example-object</Key>\ \<Key>example-object</Key>\
\<UploadId>XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA</UploadId>\ \<UploadId>XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA</UploadId>\
\<Initiator>\ \<Initiator>\
\<ID>arn:aws:iam::111122223333:user/some-user-11116a31-17b5-4fb7-9df5-b288870f11xx</ID>\ \<ID>arn:aws:iam::111122223333:user/some-user-11116a31-17b5-4fb7-9df5-b288870f11xx</ID>\
\<DisplayName>umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx</DisplayName>\ \<DisplayName>umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx</DisplayName>\
\</Initiator>\ \</Initiator>\
\<Owner>\ \<Owner>\
\<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>\ \<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>\
\<DisplayName>someName</DisplayName>\ \<DisplayName>someName</DisplayName>\
\</Owner>\ \</Owner>\
\<StorageClass>STANDARD</StorageClass>\ \<StorageClass>STANDARD</StorageClass>\
\<PartNumberMarker>1</PartNumberMarker>\ \<PartNumberMarker>1</PartNumberMarker>\
\<NextPartNumberMarker>3</NextPartNumberMarker>\ \<NextPartNumberMarker>3</NextPartNumberMarker>\
\<MaxParts>2</MaxParts>\ \<MaxParts>2</MaxParts>\
\<IsTruncated>true</IsTruncated>\ \<IsTruncated>true</IsTruncated>\
\<Part>\ \<Part>\
\<PartNumber>2</PartNumber>\ \<PartNumber>2</PartNumber>\
\<LastModified>2010-11-10T20:48:34.000Z</LastModified>\ \<LastModified>2010-11-10T20:48:34.000Z</LastModified>\
\<ETag>\"7778aef83f66abc1fa1e8477f296d394\"</ETag>\ \<ETag>\"7778aef83f66abc1fa1e8477f296d394\"</ETag>\
\<Size>10485760</Size>\ \<Size>10485760</Size>\
\</Part>\ \</Part>\
\<Part>\ \<Part>\
\<PartNumber>3</PartNumber>\ \<PartNumber>3</PartNumber>\
\<LastModified>2010-11-10T20:48:33.000Z</LastModified>\ \<LastModified>2010-11-10T20:48:33.000Z</LastModified>\
\<ETag>\"aaaa18db4cc2f85cedef654fccc4a4x8\"</ETag>\ \<ETag>\"aaaa18db4cc2f85cedef654fccc4a4x8\"</ETag>\
\<Size>10485760</Size>\ \<Size>10485760</Size>\
\</Part>\ \</Part>\
\</ListPartsResult>" \</ListPartsResult>"
expectedListResult = ListPartsResult True (Just 3) [part1, part2]
part1 = ObjectPartInfo 2 "\"7778aef83f66abc1fa1e8477f296d394\"" 10485760 modifiedTime1 expectedListResult = ListPartsResult True (Just 3) [part1, part2]
modifiedTime1 = flip UTCTime 74914 $ fromGregorian 2010 11 10 part1 = ObjectPartInfo 2 "\"7778aef83f66abc1fa1e8477f296d394\"" 10485760 modifiedTime1
part2 = ObjectPartInfo 3 "\"aaaa18db4cc2f85cedef654fccc4a4x8\"" 10485760 modifiedTime2 modifiedTime1 = flip UTCTime 74914 $ fromGregorian 2010 11 10
modifiedTime2 = flip UTCTime 74913 $ fromGregorian 2010 11 10 part2 = ObjectPartInfo 3 "\"aaaa18db4cc2f85cedef654fccc4a4x8\"" 10485760 modifiedTime2
modifiedTime2 = flip UTCTime 74913 $ fromGregorian 2010 11 10
parsedListPartsResult <- runExceptT $ runTestNS $ parseListPartsResponse xmldata parsedListPartsResult <- runExceptT $ runTestNS $ parseListPartsResponse xmldata
eitherValidationErr parsedListPartsResult (@?= expectedListResult) eitherValidationErr parsedListPartsResult (@?= expectedListResult)
testParseCopyObjectResponse :: Assertion testParseCopyObjectResponse :: Assertion
testParseCopyObjectResponse = do testParseCopyObjectResponse = do
let cases = let
[ ( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\ cases = [ ("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
\<CopyObjectResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ \<CopyObjectResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\<LastModified>2009-10-28T22:32:00.000Z</LastModified>\ \<LastModified>2009-10-28T22:32:00.000Z</LastModified>\
\<ETag>\"9b2cf535f27731c974343645a3985328\"</ETag>\ \<ETag>\"9b2cf535f27731c974343645a3985328\"</ETag>\
\</CopyObjectResult>", \</CopyObjectResult>",
( "\"9b2cf535f27731c974343645a3985328\"", ("\"9b2cf535f27731c974343645a3985328\"",
UTCTime (fromGregorian 2009 10 28) 81120 UTCTime (fromGregorian 2009 10 28) 81120))
) , ("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
), \<CopyPartResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\ \<LastModified>2009-10-28T22:32:00.000Z</LastModified>\
\<CopyPartResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ \<ETag>\"9b2cf535f27731c974343645a3985328\"</ETag>\
\<LastModified>2009-10-28T22:32:00.000Z</LastModified>\ \</CopyPartResult>",
\<ETag>\"9b2cf535f27731c974343645a3985328\"</ETag>\ ("\"9b2cf535f27731c974343645a3985328\"",
\</CopyPartResult>", UTCTime (fromGregorian 2009 10 28) 81120))]
( "\"9b2cf535f27731c974343645a3985328\"",
UTCTime (fromGregorian 2009 10 28) 81120
)
)
]
forM_ cases $ \(xmldata, (etag, modTime)) -> do forM_ cases $ \(xmldata, (etag, modTime)) -> do
parseResult <- runExceptT $ runTestNS $ parseCopyObjectResponse xmldata parseResult <- runExceptT $ runTestNS $ parseCopyObjectResponse xmldata
@ -288,89 +287,73 @@ testParseCopyObjectResponse = do
testParseNotification :: Assertion testParseNotification :: Assertion
testParseNotification = do testParseNotification = do
let cases = let
[ ( "<NotificationConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ cases = [ ("<NotificationConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
\ <TopicConfiguration>\ \ <TopicConfiguration>\
\ <Id>YjVkM2Y0YmUtNGI3NC00ZjQyLWEwNGItNDIyYWUxY2I0N2M4</Id>\ \ <Id>YjVkM2Y0YmUtNGI3NC00ZjQyLWEwNGItNDIyYWUxY2I0N2M4</Id>\
\ <Topic>arn:aws:sns:us-east-1:account-id:s3notificationtopic2</Topic>\ \ <Topic>arn:aws:sns:us-east-1:account-id:s3notificationtopic2</Topic>\
\ <Event>s3:ReducedRedundancyLostObject</Event>\ \ <Event>s3:ReducedRedundancyLostObject</Event>\
\ <Event>s3:ObjectCreated:*</Event>\ \ <Event>s3:ObjectCreated:*</Event>\
\ </TopicConfiguration>\ \ </TopicConfiguration>\
\</NotificationConfiguration>", \</NotificationConfiguration>",
Notification Notification []
[] [ NotificationConfig
[ NotificationConfig
"YjVkM2Y0YmUtNGI3NC00ZjQyLWEwNGItNDIyYWUxY2I0N2M4" "YjVkM2Y0YmUtNGI3NC00ZjQyLWEwNGItNDIyYWUxY2I0N2M4"
"arn:aws:sns:us-east-1:account-id:s3notificationtopic2" "arn:aws:sns:us-east-1:account-id:s3notificationtopic2"
[ReducedRedundancyLostObject, ObjectCreated] [ReducedRedundancyLostObject, ObjectCreated] defaultFilter
defaultFilter ]
] [])
[] , ("<NotificationConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
), \ <CloudFunctionConfiguration>\
( "<NotificationConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\ \ <Id>ObjectCreatedEvents</Id>\
\ <CloudFunctionConfiguration>\ \ <CloudFunction>arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail</CloudFunction>\
\ <Id>ObjectCreatedEvents</Id>\ \ <Event>s3:ObjectCreated:*</Event>\
\ <CloudFunction>arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail</CloudFunction>\ \ </CloudFunctionConfiguration>\
\ <Event>s3:ObjectCreated:*</Event>\ \ <QueueConfiguration>\
\ </CloudFunctionConfiguration>\ \ <Id>1</Id>\
\ <QueueConfiguration>\ \ <Filter>\
\ <Id>1</Id>\ \ <S3Key>\
\ <Filter>\ \ <FilterRule>\
\ <S3Key>\ \ <Name>prefix</Name>\
\ <FilterRule>\ \ <Value>images/</Value>\
\ <Name>prefix</Name>\ \ </FilterRule>\
\ <Value>images/</Value>\ \ <FilterRule>\
\ </FilterRule>\ \ <Name>suffix</Name>\
\ <FilterRule>\ \ <Value>.jpg</Value>\
\ <Name>suffix</Name>\ \ </FilterRule>\
\ <Value>.jpg</Value>\ \ </S3Key>\
\ </FilterRule>\ \ </Filter>\
\ </S3Key>\ \ <Queue>arn:aws:sqs:us-west-2:444455556666:s3notificationqueue</Queue>\
\ </Filter>\ \ <Event>s3:ObjectCreated:Put</Event>\
\ <Queue>arn:aws:sqs:us-west-2:444455556666:s3notificationqueue</Queue>\ \ </QueueConfiguration>\
\ <Event>s3:ObjectCreated:Put</Event>\ \ <TopicConfiguration>\
\ </QueueConfiguration>\ \ <Topic>arn:aws:sns:us-east-1:356671443308:s3notificationtopic2</Topic>\
\ <TopicConfiguration>\ \ <Event>s3:ReducedRedundancyLostObject</Event>\
\ <Topic>arn:aws:sns:us-east-1:356671443308:s3notificationtopic2</Topic>\ \ </TopicConfiguration>\
\ <Event>s3:ReducedRedundancyLostObject</Event>\ \ <QueueConfiguration>\
\ </TopicConfiguration>\ \ <Queue>arn:aws:sqs:us-east-1:356671443308:s3notificationqueue</Queue>\
\ <QueueConfiguration>\ \ <Event>s3:ObjectCreated:*</Event>\
\ <Queue>arn:aws:sqs:us-east-1:356671443308:s3notificationqueue</Queue>\ \ </QueueConfiguration>)\
\ <Event>s3:ObjectCreated:*</Event>\ \</NotificationConfiguration>",
\ </QueueConfiguration>)\ Notification [ NotificationConfig
\</NotificationConfiguration>", "1" "arn:aws:sqs:us-west-2:444455556666:s3notificationqueue"
Notification [ObjectCreatedPut]
[ NotificationConfig (Filter $ FilterKey $ FilterRules
"1" [FilterRule "prefix" "images/",
"arn:aws:sqs:us-west-2:444455556666:s3notificationqueue" FilterRule "suffix" ".jpg"])
[ObjectCreatedPut] , NotificationConfig
( Filter $ "" "arn:aws:sqs:us-east-1:356671443308:s3notificationqueue"
FilterKey $ [ObjectCreated] defaultFilter
FilterRules ]
[ FilterRule "prefix" "images/", [ NotificationConfig
FilterRule "suffix" ".jpg" "" "arn:aws:sns:us-east-1:356671443308:s3notificationtopic2"
] [ReducedRedundancyLostObject] defaultFilter
), ]
NotificationConfig [ NotificationConfig
"" "ObjectCreatedEvents" "arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail"
"arn:aws:sqs:us-east-1:356671443308:s3notificationqueue" [ObjectCreated] defaultFilter
[ObjectCreated] ])
defaultFilter ]
]
[ NotificationConfig
""
"arn:aws:sns:us-east-1:356671443308:s3notificationtopic2"
[ReducedRedundancyLostObject]
defaultFilter
]
[ NotificationConfig
"ObjectCreatedEvents"
"arn:aws:lambda:us-west-2:35667example:function:CreateThumbnail"
[ObjectCreated]
defaultFilter
]
)
]
forM_ cases $ \(xmldata, val) -> do forM_ cases $ \(xmldata, val) -> do
result <- runExceptT $ runTestNS $ parseNotification xmldata result <- runExceptT $ runTestNS $ parseNotification xmldata
@ -379,25 +362,20 @@ testParseNotification = do
-- | Tests parsing of both progress and stats -- | Tests parsing of both progress and stats
testParseSelectProgress :: Assertion testParseSelectProgress :: Assertion
testParseSelectProgress = do testParseSelectProgress = do
let cases = let cases = [ ([r|<?xml version="1.0" encoding="UTF-8"?>
[ ( [r|<?xml version="1.0" encoding="UTF-8"?>
<Progress> <Progress>
<BytesScanned>512</BytesScanned> <BytesScanned>512</BytesScanned>
<BytesProcessed>1024</BytesProcessed> <BytesProcessed>1024</BytesProcessed>
<BytesReturned>1024</BytesReturned> <BytesReturned>1024</BytesReturned>
</Progress>|], </Progress>|] , Progress 512 1024 1024)
Progress 512 1024 1024 , ([r|<?xml version="1.0" encoding="UTF-8"?>
),
( [r|<?xml version="1.0" encoding="UTF-8"?>
<Stats> <Stats>
<BytesScanned>512</BytesScanned> <BytesScanned>512</BytesScanned>
<BytesProcessed>1024</BytesProcessed> <BytesProcessed>1024</BytesProcessed>
<BytesReturned>1024</BytesReturned> <BytesReturned>1024</BytesReturned>
</Stats>|], </Stats>|], Progress 512 1024 1024)
Progress 512 1024 1024 ]
)
]
forM_ cases $ \(xmldata, progress) -> do forM_ cases $ \(xmldata, progress) -> do
result <- runExceptT $ parseSelectProgress xmldata result <- runExceptT $ parseSelectProgress xmldata
eitherValidationErr result (@?= progress) eitherValidationErr result (@?= progress)

View File

@ -1,5 +1,5 @@
-- --
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc. -- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
-- --
-- Licensed under the Apache License, Version 2.0 (the "License"); -- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License. -- you may not use this file except in compliance with the License.
@ -14,17 +14,21 @@
-- limitations under the License. -- limitations under the License.
-- --
import qualified Data.ByteString as B import Test.Tasty
import qualified Data.List as L import Test.Tasty.QuickCheck as QC
import Lib.Prelude
import Network.Minio.API.Test import qualified Data.ByteString as B
import Network.Minio.CopyObject import qualified Data.List as L
import Network.Minio.Data
import Network.Minio.Utils.Test import Lib.Prelude
import Network.Minio.XmlGenerator.Test
import Network.Minio.XmlParser.Test import Network.Minio.API.Test
import Test.Tasty import Network.Minio.CopyObject
import Test.Tasty.QuickCheck as QC import Network.Minio.Data
import Network.Minio.PutObject
import Network.Minio.Utils.Test
import Network.Minio.XmlGenerator.Test
import Network.Minio.XmlParser.Test
main :: IO () main :: IO ()
main = defaultMain tests main = defaultMain tests
@ -47,87 +51,82 @@ properties = testGroup "Properties" [qcProps] -- [scProps]
-- ] -- ]
qcProps :: TestTree qcProps :: TestTree
qcProps = qcProps = testGroup "(checked by QuickCheck)"
testGroup [ QC.testProperty "selectPartSizes:" $
"(checked by QuickCheck)" \n -> let (pns, offs, sizes) = L.unzip3 (selectPartSizes n)
[ QC.testProperty "selectPartSizes:" $
\n ->
let (pns, offs, sizes) = L.unzip3 (selectPartSizes n)
-- check that pns increments from 1. -- check that pns increments from 1.
isPNumsAscendingFrom1 = all (uncurry (==)) $ zip pns [1 ..] isPNumsAscendingFrom1 = all (\(a, b) -> a == b) $ zip pns [1..]
consPairs [] = []
consPairs [_] = [] consPairs [] = []
consPairs (a : (b : c)) = (a, b) : consPairs (b : c) consPairs [_] = []
consPairs (a:(b:c)) = (a, b):(consPairs (b:c))
-- check `offs` is monotonically increasing. -- check `offs` is monotonically increasing.
isOffsetsAsc = all (uncurry (<)) $ consPairs offs isOffsetsAsc = all (\(a, b) -> a < b) $ consPairs offs
-- check sizes sums to n. -- check sizes sums to n.
isSumSizeOk = sum sizes == n isSumSizeOk = sum sizes == n
-- check sizes are constant except last -- check sizes are constant except last
isSizesConstantExceptLast = isSizesConstantExceptLast =
all (uncurry (==)) (consPairs $ L.init sizes) all (\(a, b) -> a == b) (consPairs $ L.init sizes)
-- check each part except last is at least minPartSize; -- check each part except last is at least minPartSize;
-- last part may be 0 only if it is the only part. -- last part may be 0 only if it is the only part.
nparts = length sizes nparts = length sizes
isMinPartSizeOk = isMinPartSizeOk =
if if | nparts > 1 -> -- last part can be smaller but > 0
| nparts > 1 -> -- last part can be smaller but > 0 all (>= minPartSize) (take (nparts - 1) sizes) &&
all (>= minPartSize) (take (nparts - 1) sizes) all (\s -> s > 0) (drop (nparts - 1) sizes)
&& all (> 0) (drop (nparts - 1) sizes) | nparts == 1 -> -- size may be 0 here.
| nparts == 1 -> -- size may be 0 here. maybe True (\x -> x >= 0 && x <= minPartSize) $
maybe True (\x -> x >= 0 && x <= minPartSize) $ headMay sizes
listToMaybe sizes | otherwise -> False
| otherwise -> False
in n < 0 in n < 0 ||
|| ( isPNumsAscendingFrom1 (isPNumsAscendingFrom1 && isOffsetsAsc && isSumSizeOk &&
&& isOffsetsAsc isSizesConstantExceptLast && isMinPartSizeOk)
&& isSumSizeOk
&& isSizesConstantExceptLast , QC.testProperty "selectCopyRanges:" $
&& isMinPartSizeOk \(start, end) ->
), let (_, pairs) = L.unzip (selectCopyRanges (start, end))
QC.testProperty "selectCopyRanges:" $
\(start, end) -> -- is last part's snd offset end?
let (_, pairs) = L.unzip (selectCopyRanges (start, end)) isLastPartOk = maybe False ((end ==) . snd) $ lastMay pairs
-- is last part's snd offset end? -- is first part's fst offset start
isLastPartOk = maybe False ((end ==) . snd) $ lastMay pairs isFirstPartOk = maybe False ((start ==) . fst) $ headMay pairs
-- is first part's fst offset start
isFirstPartOk = maybe False ((start ==) . fst) $ listToMaybe pairs -- each pair is >=64MiB except last, and all those parts
-- each pair is >=64MiB except last, and all those parts -- have same size.
-- have same size. initSizes = maybe [] (map (\(a, b) -> b - a + 1)) $ initMay pairs
initSizes = maybe [] (map (\(a, b) -> b - a + 1) . init) (nonEmpty pairs) isPartSizesOk = all (>= minPartSize) initSizes &&
isPartSizesOk = maybe True (\k -> all (== k) initSizes)
all (>= minPartSize) initSizes (headMay initSizes)
&& maybe
True -- returned offsets are contiguous.
(\k -> all (== k) initSizes) fsts = drop 1 $ map fst pairs
(listToMaybe initSizes) snds = take (length pairs - 1) $ map snd pairs
-- returned offsets are contiguous. isContParts = length fsts == length snds &&
fsts = drop 1 $ map fst pairs and (map (\(a, b) -> a == b + 1) $ zip fsts snds)
snds = take (length pairs - 1) $ map snd pairs
isContParts = in start < 0 || start > end ||
length fsts == length snds (isLastPartOk && isFirstPartOk && isPartSizesOk && isContParts)
&& all (\(a, b) -> a == b + 1) (zip fsts snds)
in start < 0 , QC.testProperty "mkSSECKey:" $
|| start > end \w8s -> let bs = B.pack w8s
|| (isLastPartOk && isFirstPartOk && isPartSizesOk && isContParts), r = mkSSECKey bs
QC.testProperty "mkSSECKey:" $ in case r of
\w8s -> Just _ -> B.length bs == 32
let bs = B.pack w8s
r = mkSSECKey bs
in case r of
Just _ -> B.length bs == 32
Nothing -> B.length bs /= 32 Nothing -> B.length bs /= 32
] ]
unitTests :: TestTree unitTests :: TestTree
unitTests = unitTests = testGroup "Unit tests" [ xmlGeneratorTests, xmlParserTests
testGroup , bucketNameValidityTests
"Unit tests" , objectNameValidityTests
[ xmlGeneratorTests, , parseServerInfoJSONTest
xmlParserTests, , parseHealStatusTest
bucketNameValidityTests, , parseHealStartRespTest
objectNameValidityTests, , limitedMapConcurrentlyTests
parseServerInfoJSONTest, ]
parseHealStatusTest,
parseHealStartRespTest,
limitedMapConcurrentlyTests
]

View File

@ -1,28 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC3G9IiC+adjf0p
i/2KYc+4dizeuzUFN7wraSdhiOMdQgCnu9Dc3t2YEsQhNdrARjOTyXd36KeM3TwI
rPJ61dRGQSuN12l+mzngFJQjE0sysZHUJOLQC3rVvIrHSQ57utPg8ifxt/SunlPY
fhcUcq03onMGq44yOfE6mIhoe0Y9wcPQ3RjjNNS44bgmXiXwa+Do0h2hEn6/essq
5KjHL8WW2vGg7G9edpYdxINA/A2fdLtr8BwPNrZhOx84eee2XcUNdBuTtUUxE+0L
9yRqItqddriRxJFwOXb5OPW8xx2WGaV2a0wbE4gB2PTwwDvfo72mo9HXHZUHM1A8
4TD/RXMbAgMBAAECggEBAJ7r1oUWLyGvinn0tijUm6RNbMQjVvEgXoCO008jr3pF
PqxVpgEMrOa/4tmwFBus0jcCNF4t3r2zhddBw3I5A/O1vEdvHnBz6NdDBQ8sP6fP
1fF50iEe1Y2MBibQkXFxxVMG2QRB1Gt5nuvXA9ELdqtCovK3EsMk5ukkWb/UvjH5
8hcmQsaSqvzFEF4wJSY2mkeGSGIJTphPhhuA22xbhaBMInQyhZu8EHsn0h6s/Wgy
C4Cp2+4qZTKaaf6x3/ZjJ8CuKiSX+ZsJKjOEv8sqx7j/Y7QFOmJPewInKDhwazr/
xIK+N0KXPbUzeSEz6ZvExNDTxtR5ZlQP2UrRDg28yQECgYEA4Is1O2BvKVzNFOkj
bTVz25a/bb0Xrcfgi0Y9rdfLzlNdItFjAkxLTVRSW2Hv9ICl0RDDAG+wTlktXRdh
rfvDjwG2CvLQo1VEdMWTTkKVg03SwMEy2hFiWV69lENFGSaY8Y6unZDbia5HQinA
EgSS4sCojS+a2jtzG5FVVHJDKlkCgYEA0MKhMhD4SUhr2y1idPBrmLxuW5mVozuW
8bYaBeSzmfS0BRsN4fP9JGODPBPDdNbfGfGC9ezWLgD/lmCgjIEyBOq8EmqWSsiS
Kihds1+Z7hXtbzGsFGAFJJTIh7blBCsK5QFuyuih2UG0fL9z6K/dy+UUJkzrYqph
vSfKixyM8pMCgYEAmUPLsNyw4325aeV8TeWnUCJERaZFDFQa21W1cfyS2yEhuEtN
llr3JzBACqn9vFk3VU1onNqfb8sE4L696KCpKeqUFEMK0AG6eS4Gzus53Gb5TKJS
kHA/PhshsZp9Bp7G1FJ8s4YVo5N2hh2zQVkn3Wh9Y+kzfHQJrK51nO9lEvkCgYBi
BuKWle1gzAcJdnhDHRoJMIJJtQbVDYhFnBMALXJAmu1lcFzGe0GlMq1PKqCfXr6I
eiXawQmZtJJP1LPPBmOsd2U06KQGHcS00xucvQmVCOrjSdnZ/3SqxsqbH8DOgj+t
ZUzXLwHA+N99rJEK9Hob4kfh7ECjpgobPnIXfKKazQKBgQChAuiXHtf/Qq18hY3u
x48zFWjGgfd6GpOBZYkXOwGdCJgnYjZbE26LZEnYbwPh8ZUA2vp7mgHRJkD5e3Fj
ERuJLCw86WqyYZmLEuBciYGjCZqR5nbavfwsziWD00jeNruds2ZwKxRfFm4V7o2S
WLd/RUatd2Uu9f3B2J78OUdnxg==
-----END PRIVATE KEY-----

View File

@ -1,19 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDCzCCAfOgAwIBAgIUaIUOMI78LCu+r1zl0mmFHK8n5/AwDQYJKoZIhvcNAQEL
BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTE5MTAyNDE5NTMxOVoYDzIxMTkw
OTMwMTk1MzE5WjAUMRIwEAYDVQQDDAlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB
AQUAA4IBDwAwggEKAoIBAQC3G9IiC+adjf0pi/2KYc+4dizeuzUFN7wraSdhiOMd
QgCnu9Dc3t2YEsQhNdrARjOTyXd36KeM3TwIrPJ61dRGQSuN12l+mzngFJQjE0sy
sZHUJOLQC3rVvIrHSQ57utPg8ifxt/SunlPYfhcUcq03onMGq44yOfE6mIhoe0Y9
wcPQ3RjjNNS44bgmXiXwa+Do0h2hEn6/essq5KjHL8WW2vGg7G9edpYdxINA/A2f
dLtr8BwPNrZhOx84eee2XcUNdBuTtUUxE+0L9yRqItqddriRxJFwOXb5OPW8xx2W
GaV2a0wbE4gB2PTwwDvfo72mo9HXHZUHM1A84TD/RXMbAgMBAAGjUzBRMB0GA1Ud
DgQWBBSEWXQ2JRD+OK7/KTmlD+OW16pGmzAfBgNVHSMEGDAWgBSEWXQ2JRD+OK7/
KTmlD+OW16pGmzAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCF
0zYRaabB3X0jzGI9/Lr3Phrb90GvoL1DFLRuiOuTlDkz0vrm/HrZskwHCgMNrkCj
OTD9Vpas4D1QZBbQbRzfnf3OOoG4bgmcCwLFZl3dy27yIDAhrmbUP++g9l1Jmy4v
vBR/M4lt2scQ8LcZYEPqhEaE5EzFQEjtaxDcKdWDNKY9W1NUzSIABhF9eHiAUNdH
AFNJlYeBlCHxcWIeqgon184Dqp/CsvKtz3z3Ni+rlwPM/zuJCFHh1VF+z++0LJjG
roBCV0Tro4XyiEz9yp7Cb5kQYMaj1KL9TqBG0tZx0pmv7y+lXc4TT6DEllXz6USy
rbIba9/uUet3BqeIMTqj
-----END CERTIFICATE-----