Merge branch 'master' into uni2work

This commit is contained in:
Sarah Vaupel 2024-01-18 02:22:46 +01:00
commit 3dcb276521
58 changed files with 2341 additions and 1404 deletions

230
.github/workflows/ci.yml vendored Normal file
View File

@ -0,0 +1,230 @@
name: CI
# Trigger the workflow on push or pull request, but only for the master branch
on:
pull_request:
branches: [master]
push:
branches: [master]
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true
# Env vars for tests
env:
MINIO_ACCESS_KEY: minio
MINIO_SECRET_KEY: minio123
MINIO_LOCAL: 1
MINIO_SECURE: 1
jobs:
ormolu:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: haskell-actions/run-ormolu@v12
with:
version: "0.5.0.1"
hlint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: 'Set up HLint'
uses: haskell/actions/hlint-setup@v2
with:
version: '3.5'
- name: 'Run HLint'
uses: haskell/actions/hlint-run@v2
with:
path: '["src/", "test/", "examples"]'
fail-on: warning
cabal:
name: ${{ matrix.os }} / ghc-${{ matrix.ghc }} / cabal-${{ matrix.cabal }}
runs-on: ${{ matrix.os }}
needs: ormolu
strategy:
matrix:
os: [ubuntu-latest, windows-latest] # Removed macos-latest due to cert issues.
cabal: ["3.6", "3.8", "latest"]
ghc:
- "9.4"
- "9.2"
- "9.0"
- "8.10"
- "8.8"
- "8.6"
exclude:
- os: windows-latest
ghc: "9.4"
cabal: "3.6"
steps:
- uses: actions/checkout@v3
if: github.event.action == 'opened' || github.event.action == 'synchronize' || github.event.ref == 'refs/heads/master'
- uses: haskell/actions/setup@v2
id: setup-haskell-cabal
name: Setup Haskell
with:
ghc-version: ${{ matrix.ghc }}
cabal-version: ${{ matrix.cabal }}
- name: Configure
run: |
cabal configure --enable-tests --enable-benchmarks --test-show-details=direct -fexamples -fdev -flive-test
- name: Freeze
run: |
cabal freeze
- uses: actions/cache@v3
name: Cache ~/.cabal/packages, ~/.cabal/store and dist-newstyle
with:
path: |
~/.cabal/packages
~/.cabal/store
dist-newstyle
key: ${{ runner.os }}-${{ matrix.ghc }}-${{ hashFiles('**/*.cabal', '**/cabal.project', '**/cabal.project.freeze') }}
restore-keys: ${{ runner.os }}-${{ matrix.ghc }}-
- name: Install dependencies
run: |
cabal build --only-dependencies
- name: Build
run: |
cabal build
- name: Setup MinIO for testing (Linux)
if: matrix.os == 'ubuntu-latest'
run: |
mkdir -p /tmp/minio /tmp/minio-config/certs
cp test/cert/* /tmp/minio-config/certs/
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/linux-amd64/minio; chmod +x ./minio)
sudo cp /tmp/minio-config/certs/public.crt /usr/local/share/ca-certificates/
sudo update-ca-certificates
- name: Setup MinIO for testing (MacOS)
if: matrix.os == 'macos-latest'
run: |
mkdir -p /tmp/minio /tmp/minio-config/certs
cp test/cert/* /tmp/minio-config/certs/
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/darwin-amd64/minio; chmod +x ./minio)
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain /tmp/minio-config/certs/public.crt
- name: Setup MinIO for testing (Windows)
if: matrix.os == 'windows-latest'
run: |
New-Item -ItemType Directory -Path "$env:temp/minio-config/certs/"
Copy-Item -Path test\cert\* -Destination "$env:temp/minio-config/certs/"
Invoke-WebRequest -Uri https://dl.minio.io/server/minio/release/windows-amd64/minio.exe -OutFile $HOME/minio.exe
Import-Certificate -FilePath "$env:temp/minio-config/certs/public.crt" -CertStoreLocation Cert:\LocalMachine\Root
- name: Test (Non-Windows)
if: matrix.os != 'windows-latest'
run: |
/tmp/minio/minio server --quiet --certs-dir /tmp/minio-config/certs data1 data2 data3 data4 2>&1 > minio.log &
ghc --version
cabal --version
cabal test
- name: Test (Windows)
if: matrix.os == 'windows-latest'
run: |
Start-Process -NoNewWindow -FilePath "$HOME/minio.exe" -ArgumentList "--certs-dir", "$env:temp/minio-config/certs", "server", "$env:temp/data1", "$env:temp/data2", "$env:temp/data3", "$env:temp/data4"
ghc --version
cabal --version
cabal test
stack:
name: stack / ghc ${{ matrix.ghc }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
ghc:
- "8.10.7"
- "9.0.2"
- "9.2.4"
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v3
if: github.event.action == 'opened' || github.event.action == 'synchronize' || github.event.ref == 'refs/heads/master'
- uses: haskell/actions/setup@v2
name: Setup Haskell Stack
with:
enable-stack: true
ghc-version: ${{ matrix.ghc }}
stack-version: 'latest'
- uses: actions/cache@v3
name: Cache ~/.stack
with:
path: ~/.stack
key: ${{ runner.os }}-stack-global-${{ hashFiles('stack.yaml') }}-${{ hashFiles('package.yaml') }}
restore-keys: |
${{ runner.os }}-stack-global-
- uses: actions/cache@v3
name: Cache .stack-work
with:
path: .stack-work
key: ${{ runner.os }}-stack-work-${{ hashFiles('stack.yaml') }}-${{ hashFiles('package.yaml') }}-${{ hashFiles('**/*.hs') }}
restore-keys: |
${{ runner.os }}-stack-work-
- name: Install dependencies
run: |
stack build --system-ghc --test --bench --no-run-tests --no-run-benchmarks --only-dependencies
- name: Build
run: |
stack build --system-ghc --test --bench --no-run-tests --no-run-benchmarks --flag minio-hs:examples --flag minio-hs:live-test --flag minio-hs:dev
- name: Setup MinIO for testing (Linux)
if: matrix.os == 'ubuntu-latest'
run: |
mkdir -p /tmp/minio /tmp/minio-config/certs
cp test/cert/* /tmp/minio-config/certs/
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/linux-amd64/minio; chmod +x ./minio)
sudo cp /tmp/minio-config/certs/public.crt /usr/local/share/ca-certificates/
sudo update-ca-certificates
- name: Setup MinIO for testing (MacOS)
if: matrix.os == 'macos-latest'
run: |
mkdir -p /tmp/minio /tmp/minio-config/certs
cp test/cert/* /tmp/minio-config/certs/
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/darwin-amd64/minio; chmod +x ./minio)
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain /tmp/minio-config/certs/public.crt
- name: Setup MinIO for testing (Windows)
if: matrix.os == 'windows-latest'
run: |
New-Item -ItemType Directory -Path "$env:temp/minio-config/certs/"
Copy-Item -Path test\cert\* -Destination "$env:temp/minio-config/certs/"
Invoke-WebRequest -Uri https://dl.minio.io/server/minio/release/windows-amd64/minio.exe -OutFile $HOME/minio.exe
Import-Certificate -FilePath "$env:temp/minio-config/certs/public.crt" -CertStoreLocation Cert:\LocalMachine\Root
- name: Test (Non-Windows)
if: matrix.os != 'windows-latest'
run: |
/tmp/minio/minio server --quiet --certs-dir /tmp/minio-config/certs data1 data2 data3 data4 2>&1 > minio.log &
ghc --version
stack --version
stack test --system-ghc --flag minio-hs:live-test --flag minio-hs:dev
- name: Test (Windows)
if: matrix.os == 'windows-latest'
run: |
Start-Process -NoNewWindow -FilePath "$HOME/minio.exe" -ArgumentList "--certs-dir", "$env:temp/minio-config/certs", "server", "$env:temp/data1", "$env:temp/data2", "$env:temp/data3", "$env:temp/data4"
ghc --version
cabal --version
stack test --system-ghc --flag minio-hs:live-test --flag minio-hs:dev

View File

@ -1,122 +0,0 @@
name: Haskell CI (Cabal)
on:
schedule:
# Run every weekday
- cron: '0 0 * * 1-5'
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
cabal-build:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
ghc: ['8.4', '8.6', '8.8', '8.10']
cabal: ['3.2']
os: [ubuntu-latest, macOS-latest]
experimental: [false]
include:
- ghc: '8.6'
cabal: '3.2'
os: windows-latest
experimental: false
- ghc: '8.10'
cabal: '3.2'
os: windows-latest
experimental: false
# Appears to be buggy to build in windows with ghc 8.4 and 8.8
- ghc: '8.4'
cabal: '3.2'
os: windows-latest
experimental: true
- ghc: '8.8'
cabal: '3.2'
os: windows-latest
experimental: true
steps:
- uses: actions/checkout@v2
- uses: actions/setup-haskell@v1.1
with:
ghc-version: ${{ matrix.ghc }}
cabal-version: ${{ matrix.cabal }}
- name: Cache
uses: actions/cache@v2
env:
cache-name: cabal-cache-${{ matrix.ghc }}-${{ matrix.cabal }}
with:
path: |
~/.cabal
~/.stack
%appdata%\cabal
%LOCALAPPDATA%\Programs\stack
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/*.cabal') }}-${{ hashFiles('**/stack.yaml') }}
restore-keys: |
${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/*.cabal') }}-${{ hashFiles('**/stack.yaml') }}
${{ runner.os }}-build-${{ env.cache-name }}-
${{ runner.os }}-build-
${{ runner.os }}-
- name: Before install (Linux)
if: matrix.os == 'ubuntu-latest'
run: |
mkdir -p /tmp/minio /tmp/minio-config/certs
cp test/cert/* /tmp/minio-config/certs/
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/linux-amd64/minio; chmod +x ./minio)
sudo cp /tmp/minio-config/certs/public.crt /usr/local/share/ca-certificates/
sudo update-ca-certificates
- name: Before install (MacOS)
if: matrix.os == 'macos-latest'
run: |
mkdir -p /tmp/minio /tmp/minio-config/certs
cp test/cert/* /tmp/minio-config/certs/
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/darwin-amd64/minio; chmod +x ./minio)
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain /tmp/minio-config/certs/public.crt
- name: Before install (Windows)
if: matrix.os == 'windows-latest'
run: |
New-Item -ItemType Directory -Path "$env:temp/minio-config/certs/"
Copy-Item -Path test\cert\* -Destination "$env:temp/minio-config/certs/"
Invoke-WebRequest -Uri https://dl.minio.io/server/minio/release/windows-amd64/minio.exe -OutFile $HOME/minio.exe
Import-Certificate -FilePath "$env:temp/minio-config/certs/public.crt" -CertStoreLocation Cert:\LocalMachine\Root
- name: Install dependencies, build and test (Non-Windows)
if: matrix.os != 'windows-latest'
env:
MINIO_ACCESS_KEY: minio
MINIO_SECRET_KEY: minio123
MINIO_LOCAL: 1
MINIO_SECURE: 1
continue-on-error: ${{ matrix.experimental }}
run: |
/tmp/minio/minio server --quiet --certs-dir /tmp/minio-config/certs data1 data2 data3 data4 2>&1 > minio.log &
ghc --version
cabal --version
cabal new-update
cabal new-build --enable-tests --enable-benchmarks -fexamples
cabal new-test --enable-tests -flive-test
- name: Install dependencies, build and test (Windows)
if: matrix.os == 'windows-latest'
env:
MINIO_ACCESS_KEY: minio
MINIO_SECRET_KEY: minio123
MINIO_LOCAL: 1
MINIO_SECURE: 1
continue-on-error: ${{ matrix.experimental }}
run: |
Start-Process -NoNewWindow -FilePath "$HOME/minio.exe" -ArgumentList "--certs-dir", "$env:temp/minio-config/certs", "server", "$env:temp/data1", "$env:temp/data2", "$env:temp/data3", "$env:temp/data4"
ghc --version
cabal --version
cabal new-update
cabal new-build --enable-tests --enable-benchmarks -fexamples
cabal new-test --enable-tests -flive-test

View File

@ -1,108 +0,0 @@
name: Haskell CI (Stack)
on:
schedule:
# Run every weekday
- cron: '0 0 * * 1-5'
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
stack-build:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
ghc: ['8.8']
cabal: ['3.2']
os: [ubuntu-latest, macOS-latest]
experimental: [false]
include:
# Appears to be buggy to build in windows with ghc 8.8
- ghc: '8.8'
cabal: '3.2'
os: windows-latest
experimental: true
steps:
- uses: actions/checkout@v2
- uses: actions/setup-haskell@v1.1
with:
ghc-version: ${{ matrix.ghc }}
cabal-version: ${{ matrix.cabal }}
enable-stack: true
- name: Cache
uses: actions/cache@v2
env:
cache-name: stack-cache-${{ matrix.ghc }}-${{ matrix.cabal }}
with:
path: |
~/.cabal
~/.stack
%appdata%\cabal
%LOCALAPPDATA%\Programs\stack
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/*.cabal') }}-${{ hashFiles('**/stack.yaml') }}
restore-keys: |
${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/*.cabal') }}-${{ hashFiles('**/stack.yaml') }}
${{ runner.os }}-build-${{ env.cache-name }}-
${{ runner.os }}-build-
${{ runner.os }}-
- name: Before install (Linux)
if: matrix.os == 'ubuntu-latest'
run: |
mkdir -p /tmp/minio /tmp/minio-config/certs
cp test/cert/* /tmp/minio-config/certs/
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/linux-amd64/minio; chmod +x ./minio)
sudo cp /tmp/minio-config/certs/public.crt /usr/local/share/ca-certificates/
sudo update-ca-certificates
- name: Before install (MacOS)
if: matrix.os == 'macos-latest'
run: |
mkdir -p /tmp/minio /tmp/minio-config/certs
cp test/cert/* /tmp/minio-config/certs/
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/darwin-amd64/minio; chmod +x ./minio)
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain /tmp/minio-config/certs/public.crt
- name: Before install (Windows)
if: matrix.os == 'windows-latest'
run: |
New-Item -ItemType Directory -Path "$env:temp/minio-config/certs/"
Copy-Item -Path test\cert\* -Destination "$env:temp/minio-config/certs/"
Invoke-WebRequest -Uri https://dl.minio.io/server/minio/release/windows-amd64/minio.exe -OutFile $HOME/minio.exe
Import-Certificate -FilePath "$env:temp/minio-config/certs/public.crt" -CertStoreLocation Cert:\LocalMachine\Root
- name: Install dependencies, build and test (Non-Windows)
if: matrix.os != 'windows-latest'
env:
MINIO_ACCESS_KEY: minio
MINIO_SECRET_KEY: minio123
MINIO_LOCAL: 1
MINIO_SECURE: 1
continue-on-error: ${{ matrix.experimental }}
run: |
/tmp/minio/minio server --quiet --certs-dir /tmp/minio-config/certs data1 data2 data3 data4 2>&1 > minio.log &
ghc --version
stack --version
stack build --system-ghc --test --bench --no-run-tests --no-run-benchmarks --flag minio-hs:examples
stack test --system-ghc --flag minio-hs:live-test
- name: Install dependencies, build and test (Windows)
if: matrix.os == 'windows-latest'
env:
MINIO_ACCESS_KEY: minio
MINIO_SECRET_KEY: minio123
MINIO_LOCAL: 1
MINIO_SECURE: 1
continue-on-error: ${{ matrix.experimental }}
run: |
Start-Process -NoNewWindow -FilePath "$HOME/minio.exe" -ArgumentList "--certs-dir", "$env:temp/minio-config/certs", "server", "$env:temp/data1", "$env:temp/data2", "$env:temp/data3", "$env:temp/data4"
ghc --version
stack --version
stack build --system-ghc --test --bench --no-run-tests --no-run-benchmarks --flag minio-hs:examples
stack test --system-ghc --flag minio-hs:live-test

View File

@ -1,61 +0,0 @@
sudo: true
language: haskell
git:
depth: 5
cabal: "3.0"
cache:
directories:
- "$HOME/.cabal/store"
- "$HOME/.stack"
- "$TRAVIS_BUILD_DIR/.stack-work"
matrix:
include:
# Cabal
- ghc: 8.4.4
- ghc: 8.6.5
- ghc: 8.8.3
# Stack
- ghc: 8.6.5
env: STACK_YAML="$TRAVIS_BUILD_DIR/stack.yaml"
before_install:
- sudo apt-get install devscripts
- mkdir /tmp/minio /tmp/certs
- (cd /tmp/minio; wget https://dl.min.io/server/minio/release/linux-amd64/minio; chmod +x ./minio)
- (cd /tmp/certs; openssl req -newkey rsa:2048 -nodes -keyout private.key -x509 -days 36500 -out public.crt -subj "/C=US/ST=NRW/L=Earth/O=CompanyName/OU=IT/CN=localhost/emailAddress=email@example.com")
- sudo cp /tmp/certs/public.crt /usr/local/share/ca-certificates/
- sudo update-ca-certificates
- MINIO_ACCESS_KEY=minio MINIO_SECRET_KEY=minio123 /tmp/minio/minio server --quiet --certs-dir /tmp/certs data 2>&1 > minio.log &
install:
- |
if [ -z "$STACK_YAML" ]; then
ghc --version
cabal --version
cabal new-update
cabal new-build --enable-tests --enable-benchmarks -fexamples
else
# install stack
curl -sSL https://get.haskellstack.org/ | sh
# build project with stack
stack --version
stack build --system-ghc --test --bench --no-run-tests --no-run-benchmarks --flag minio-hs:examples
fi
script:
- |
if [ -z "$STACK_YAML" ]; then
MINIO_LOCAL=1 MINIO_SECURE=1 cabal new-test --enable-tests -flive-test
else
MINIO_LOCAL=1 MINIO_SECURE=1 stack test --system-ghc --flag minio-hs:live-test
fi
notifications:
email: false

View File

@ -1,6 +1,37 @@
Changelog Changelog
========== ==========
## Version 1.7.0 -- Unreleased
* Fix data type `EventMessage` to not export partial fields (#179)
* Bump up min bound on time dep and fix deprecation warnings (#181)
* Add `dev` flag to cabal for building with warnings as errors (#182)
* Fix AWS region map (#185)
* Fix XML generator tests (#187)
* Add support for STS Assume Role API (#188)
### Breaking changes in 1.7.0
* `Credentials` type has been removed. Use `CredentialValue` instead.
* `Provider` type has been replaced with `CredentialLoader`.
* `EventMessage` data type is updated.
## Version 1.6.0
* HLint fixes - some types were changed to newtype (#173)
* Fix XML generation test for S3 SELECT (#161)
* Use region specific endpoints for AWS S3 in presigned Urls (#164)
* Replace protolude with relude and build with GHC 9.0.2 (#168)
* Support aeson 2 (#169)
* CI updates and code formatting changes with ormolu 0.5.0.0
## Version 1.5.3
* Fix windows build
* Fix support for Yandex Storage (#147)
* Fix for HEAD requests to S3/Minio (#155)
* Bump up some dependencies, new code formatting, Github CI, example fixes and other minor improvements.
## Version 1.5.2 ## Version 1.5.2
* Fix region `us-west-2` for AWS S3 (#139) * Fix region `us-west-2` for AWS S3 (#139)

View File

@ -1,10 +1,8 @@
# MinIO Client SDK for Haskell [![Build Status](https://travis-ci.org/minio/minio-hs.svg?branch=master)](https://travis-ci.org/minio/minio-hs)[![Hackage](https://img.shields.io/hackage/v/minio-hs.svg)](https://hackage.haskell.org/package/minio-hs)[![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) # MinIO Haskell Client SDK for Amazon S3 Compatible Cloud Storage [![CI](https://github.com/minio/minio-hs/actions/workflows/ci.yml/badge.svg)](https://github.com/minio/minio-hs/actions/workflows/ci.yml)[![Hackage](https://img.shields.io/hackage/v/minio-hs.svg)](https://hackage.haskell.org/package/minio-hs)[![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io)
The MinIO Haskell Client SDK provides simple APIs to access [MinIO](https://min.io) and Amazon S3 compatible object storage server. The MinIO Haskell Client SDK provides simple APIs to access [MinIO](https://min.io) and any Amazon S3 compatible object storage.
## Minimum Requirements This guide assumes that you have a working [Haskell development environment](https://www.haskell.org/downloads/).
- The Haskell [stack](https://docs.haskellstack.org/en/stable/README/)
## Installation ## Installation
@ -12,20 +10,35 @@ The MinIO Haskell Client SDK provides simple APIs to access [MinIO](https://min.
Simply add `minio-hs` to your project's `.cabal` dependencies section or if you are using hpack, to your `package.yaml` file as usual. Simply add `minio-hs` to your project's `.cabal` dependencies section or if you are using hpack, to your `package.yaml` file as usual.
### Try it out directly with `ghci` ### Try it out in a [REPL](https://en.wikipedia.org/wiki/Read%E2%80%93eval%E2%80%93print_loop)
#### For a cabal based environment
Download the library source and change to the extracted directory:
``` sh
$ cabal get minio-hs
$ cd minio-hs-1.6.0/ # directory name could be different
```
Then load the `ghci` REPL environment with the library and browse the available APIs:
``` sh
$ cabal repl
ghci> :browse Network.Minio
```
#### For a stack based environment
From your home folder or any non-haskell project directory, just run: From your home folder or any non-haskell project directory, just run:
```sh ```sh
stack install minio-hs stack install minio-hs
``` ```
Then start an interpreter session and browse the available APIs with: Then start an interpreter session and browse the available APIs with:
```sh ```sh
$ stack ghci $ stack ghci
> :browse Network.Minio > :browse Network.Minio
``` ```
@ -134,44 +147,52 @@ main = do
### Development ### Development
To setup: #### Download the source
```sh ```sh
git clone https://github.com/minio/minio-hs.git $ git clone https://github.com/minio/minio-hs.git
$ cd minio-hs/
```
cd minio-hs/ #### Build the package:
stack install With `cabal`:
```
Tests can be run with:
```sh ```sh
$ # Configure cabal for development enabling all optional flags defined by the package.
stack test $ cabal configure --enable-tests --test-show-details=direct -fexamples -fdev -flive-test
$ cabal build
``` ```
A section of the tests use the remote MinIO Play server at `https://play.min.io` by default. For library development, using this remote server maybe slow. To run the tests against a locally running MinIO live server at `http://localhost:9000`, just set the environment `MINIO_LOCAL` to any value (and unset it to switch back to Play). With `stack`:
To run the live server tests, set a build flag as shown below: ``` sh
$ stack build --test --no-run-tests --flag minio-hs:live-test --flag minio-hs:dev --flag minio-hs:examples
```
#### Running tests:
A section of the tests use the remote MinIO Play server at `https://play.min.io` by default. For library development, using this remote server maybe slow. To run the tests against a locally running MinIO live server at `http://localhost:9000` with the credentials `access_key=minio` and `secret_key=minio123`, just set the environment `MINIO_LOCAL` to any value (and unset it to switch back to Play).
With `cabal`:
```sh ```sh
$ export MINIO_LOCAL=1 # to run live tests against local MinIO server
stack test --flag minio-hs:live-test $ cabal test
# OR against a local MinIO server with:
MINIO_LOCAL=1 stack test --flag minio-hs:live-test
``` ```
The configured CI system always runs both test-suites for every change. With `stack`:
Documentation can be locally built with: ``` sh
$ export MINIO_LOCAL=1 # to run live tests against local MinIO server
stack test --flag minio-hs:live-test --flag minio-hs:dev
```
This will run all the test suites.
#### Building documentation:
```sh ```sh
$ cabal haddock
stack haddock $ # OR
$ stack haddock
``` ```

View File

@ -1,19 +0,0 @@
--
-- MinIO Haskell SDK, (C) 2017 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
import Distribution.Simple
main = defaultMain

47
examples/AssumeRole.hs Normal file
View File

@ -0,0 +1,47 @@
--
-- MinIO Haskell SDK, (C) 2023 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE OverloadedStrings #-}
import Control.Monad.IO.Class (liftIO)
import Network.Minio
import Prelude
main :: IO ()
main = do
-- Use play credentials for example.
let assumeRole =
STSAssumeRole
( CredentialValue
"Q3AM3UQ867SPQQA43P2F"
"zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
Nothing
)
$ defaultSTSAssumeRoleOptions
{ saroLocation = Just "us-east-1",
saroEndpoint = Just "https://play.min.io:9000"
}
-- Retrieve temporary credentials and print them.
cv <- requestSTSCredential assumeRole
print $ "Temporary credentials" ++ show (credentialValueText $ fst cv)
print $ "Expiry" ++ show (snd cv)
-- Configure 'ConnectInfo' to request temporary credentials on demand.
ci <- setSTSCredential assumeRole "https://play.min.io"
res <- runMinio ci $ do
buckets <- listBuckets
liftIO $ print $ "Top 5 buckets: " ++ show (take 5 buckets)
print res

View File

@ -19,7 +19,6 @@
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE ScopedTypeVariables #-}
import Data.Monoid ((<>))
import Data.Text (pack) import Data.Text (pack)
import Network.Minio import Network.Minio
import Options.Applicative import Options.Applicative
@ -71,5 +70,5 @@ main = do
fPutObject bucket object filepath defaultPutObjectOptions fPutObject bucket object filepath defaultPutObjectOptions
case res of case res of
Left e -> putStrLn $ "file upload failed due to " ++ (show e) Left e -> putStrLn $ "file upload failed due to " ++ show e
Right () -> putStrLn "file upload succeeded." Right () -> putStrLn "file upload succeeded."

View File

@ -16,7 +16,6 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio import Network.Minio
import Network.Minio.AdminAPI import Network.Minio.AdminAPI
@ -25,6 +24,7 @@ import Prelude
main :: IO () main :: IO ()
main = do main = do
res <- res <-
runMinio minioPlayCI $ runMinio
minioPlayCI
getConfig getConfig
print res print res

View File

@ -37,5 +37,5 @@ main = do
C.connect (gorObjectStream src) $ CB.sinkFileCautious "/tmp/my-object" C.connect (gorObjectStream src) $ CB.sinkFileCautious "/tmp/my-object"
case res of case res of
Left e -> putStrLn $ "getObject failed." ++ (show e) Left e -> putStrLn $ "getObject failed." ++ show e
Right _ -> putStrLn "getObject succeeded." Right _ -> putStrLn "getObject succeeded."

View File

@ -16,7 +16,6 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio import Network.Minio
import Network.Minio.AdminAPI import Network.Minio.AdminAPI

View File

@ -34,9 +34,9 @@ main = do
-- Performs a recursive listing of incomplete uploads under bucket "test" -- Performs a recursive listing of incomplete uploads under bucket "test"
-- on a local minio server. -- on a local minio server.
res <- res <-
runMinio minioPlayCI runMinio minioPlayCI $
$ runConduit runConduit $
$ listIncompleteUploads bucket Nothing True .| mapM_C (\v -> (liftIO $ print v)) listIncompleteUploads bucket Nothing True .| mapM_C (liftIO . print)
print res print res
{- {-

View File

@ -34,9 +34,9 @@ main = do
-- Performs a recursive listing of all objects under bucket "test" -- Performs a recursive listing of all objects under bucket "test"
-- on play.min.io. -- on play.min.io.
res <- res <-
runMinio minioPlayCI runMinio minioPlayCI $
$ runConduit runConduit $
$ listObjects bucket Nothing True .| mapM_C (\v -> (liftIO $ print v)) listObjects bucket Nothing True .| mapM_C (liftIO . print)
print res print res
{- {-

View File

@ -46,7 +46,7 @@ main = do
res <- runMinio minioPlayCI $ do res <- runMinio minioPlayCI $ do
liftIO $ B.putStrLn "Upload a file that we will fetch with a presigned URL..." liftIO $ B.putStrLn "Upload a file that we will fetch with a presigned URL..."
putObject bucket object (CC.repeat "a") (Just kb15) defaultPutObjectOptions putObject bucket object (CC.repeat "a") (Just kb15) defaultPutObjectOptions
liftIO $ putStrLn $ "Done. Object created at: my-bucket/my-object" liftIO $ putStrLn "Done. Object created at: my-bucket/my-object"
-- Extract Etag of uploaded object -- Extract Etag of uploaded object
oi <- statObject bucket object defaultGetObjectOptions oi <- statObject bucket object defaultGetObjectOptions
@ -77,7 +77,8 @@ main = do
let hdrOpt (k, v) = B.concat ["-H '", original k, ": ", v, "'"] let hdrOpt (k, v) = B.concat ["-H '", original k, ": ", v, "'"]
curlCmd = curlCmd =
B.intercalate " " $ B.intercalate " " $
["curl --fail"] ++ map hdrOpt headers ["curl --fail"]
++ map hdrOpt headers
++ ["-o /tmp/myfile", B.concat ["'", url, "'"]] ++ ["-o /tmp/myfile", B.concat ["'", url, "'"]]
putStrLn $ putStrLn $

View File

@ -55,7 +55,7 @@ main = do
] ]
case policyE of case policyE of
Left err -> putStrLn $ show err Left err -> print err
Right policy -> do Right policy -> do
res <- runMinio minioPlayCI $ do res <- runMinio minioPlayCI $ do
(url, formData) <- presignedPostPolicy policy (url, formData) <- presignedPostPolicy policy
@ -73,13 +73,15 @@ main = do
] ]
formOptions = B.intercalate " " $ map formFn $ H.toList formData formOptions = B.intercalate " " $ map formFn $ H.toList formData
return $ B.intercalate " " $ return $
["curl", formOptions, "-F file=@/tmp/photo.jpg", url] B.intercalate
" "
["curl", formOptions, "-F file=@/tmp/photo.jpg", url]
case res of case res of
Left e -> putStrLn $ "post-policy error: " ++ (show e) Left e -> putStrLn $ "post-policy error: " ++ show e
Right cmd -> do Right cmd -> do
putStrLn $ "Put a photo at /tmp/photo.jpg and run command:\n" putStrLn "Put a photo at /tmp/photo.jpg and run command:\n"
-- print the generated curl command -- print the generated curl command
Char8.putStrLn cmd Char8.putStrLn cmd

View File

@ -48,7 +48,8 @@ main = do
let hdrOpt (k, v) = B.concat ["-H '", original k, ": ", v, "'"] let hdrOpt (k, v) = B.concat ["-H '", original k, ": ", v, "'"]
curlCmd = curlCmd =
B.intercalate " " $ B.intercalate " " $
["curl "] ++ map hdrOpt headers ["curl "]
++ map hdrOpt headers
++ ["-T /tmp/myfile", B.concat ["'", url, "'"]] ++ ["-T /tmp/myfile", B.concat ["'", url, "'"]]
putStrLn $ putStrLn $

View File

@ -19,7 +19,7 @@
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE OverloadedStrings #-}
import qualified Conduit as C import qualified Conduit as C
import Control.Monad (when) import Control.Monad (unless)
import Network.Minio import Network.Minio
import Prelude import Prelude
@ -35,7 +35,7 @@ main = do
res <- runMinio minioPlayCI $ do res <- runMinio minioPlayCI $ do
exists <- bucketExists bucket exists <- bucketExists bucket
when (not exists) $ unless exists $
makeBucket bucket Nothing makeBucket bucket Nothing
C.liftIO $ putStrLn "Uploading csv object" C.liftIO $ putStrLn "Uploading csv object"

View File

@ -16,7 +16,6 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio import Network.Minio
import Network.Minio.AdminAPI import Network.Minio.AdminAPI
@ -25,6 +24,7 @@ import Prelude
main :: IO () main :: IO ()
main = do main = do
res <- res <-
runMinio minioPlayCI $ runMinio
minioPlayCI
getServerInfo getServerInfo
print res print res

View File

@ -16,7 +16,6 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio import Network.Minio
import Network.Minio.AdminAPI import Network.Minio.AdminAPI

View File

@ -16,7 +16,6 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio import Network.Minio
import Network.Minio.AdminAPI import Network.Minio.AdminAPI

View File

@ -16,7 +16,6 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE OverloadedStrings #-}
import Network.Minio import Network.Minio
import Network.Minio.AdminAPI import Network.Minio.AdminAPI
@ -25,6 +24,7 @@ import Prelude
main :: IO () main :: IO ()
main = do main = do
res <- res <-
runMinio minioPlayCI $ runMinio
minioPlayCI
serviceStatus serviceStatus
print res print res

View File

@ -1,6 +1,6 @@
cabal-version: 2.2 cabal-version: 2.4
name: minio-hs name: minio-hs
version: 1.5.2 version: 1.7.0
synopsis: A MinIO Haskell Library for Amazon S3 compatible cloud synopsis: A MinIO Haskell Library for Amazon S3 compatible cloud
storage. storage.
description: The MinIO Haskell client library provides simple APIs to description: The MinIO Haskell client library provides simple APIs to
@ -14,29 +14,70 @@ maintainer: dev@min.io
category: Network, AWS, Object Storage category: Network, AWS, Object Storage
build-type: Simple build-type: Simple
stability: Experimental stability: Experimental
extra-source-files: extra-doc-files:
CHANGELOG.md CHANGELOG.md
CONTRIBUTING.md CONTRIBUTING.md
docs/API.md docs/API.md
examples/*.hs
README.md README.md
extra-source-files:
examples/*.hs
stack.yaml stack.yaml
tested-with: GHC == 8.6.5
, GHC == 8.8.4
, GHC == 8.10.7
, GHC == 9.0.2
, GHC == 9.2.7
, GHC == 9.4.5
source-repository head
type: git
location: https://github.com/minio/minio-hs.git
Flag dev
Description: Build package in development mode
Default: False
Manual: True
common base-settings common base-settings
ghc-options: -Wall ghc-options: -Wall
-Wcompat
-Widentities
-Wincomplete-uni-patterns
-Wincomplete-record-updates
-haddock
if impl(ghc >= 8.0)
ghc-options: -Wredundant-constraints
if impl(ghc >= 8.2)
ghc-options: -fhide-source-paths
if impl(ghc >= 8.4)
ghc-options: -Wpartial-fields
-- -Wmissing-export-lists
if impl(ghc >= 8.8)
ghc-options: -Wmissing-deriving-strategies
-Werror=missing-deriving-strategies
-- if impl(ghc >= 8.10)
-- ghc-options: -Wunused-packages -- disabled due to bug related to mixin config
if impl(ghc >= 9.0)
ghc-options: -Winvalid-haddock
if impl(ghc >= 9.2)
ghc-options: -Wredundant-bang-patterns
if flag(dev)
ghc-options: -Werror
default-language: Haskell2010 default-language: Haskell2010
default-extensions: BangPatterns default-extensions: BangPatterns
, DerivingStrategies
, FlexibleContexts , FlexibleContexts
, FlexibleInstances , FlexibleInstances
, LambdaCase
, MultiParamTypeClasses , MultiParamTypeClasses
, MultiWayIf , MultiWayIf
, NoImplicitPrelude
, OverloadedStrings , OverloadedStrings
, RankNTypes , RankNTypes
, ScopedTypeVariables , ScopedTypeVariables
, TypeFamilies
, TupleSections , TupleSections
other-modules: Lib.Prelude other-modules: Lib.Prelude
, Network.Minio.API , Network.Minio.API
, Network.Minio.APICommon , Network.Minio.APICommon
@ -54,10 +95,19 @@ common base-settings
, Network.Minio.Utils , Network.Minio.Utils
, Network.Minio.XmlGenerator , Network.Minio.XmlGenerator
, Network.Minio.XmlParser , Network.Minio.XmlParser
, Network.Minio.XmlCommon
, Network.Minio.JsonParser , Network.Minio.JsonParser
, Network.Minio.Credentials.Types
, Network.Minio.Credentials.AssumeRole
, Network.Minio.Credentials
mixins: base hiding (Prelude)
, relude (Relude as Prelude)
, relude
build-depends: base >= 4.7 && < 5 build-depends: base >= 4.7 && < 5
, protolude >= 0.3 && < 0.4 , relude >= 0.7 && < 2
, aeson >= 1.2 , aeson >= 1.2 && < 3
, base64-bytestring >= 1.0 , base64-bytestring >= 1.0
, binary >= 0.8.5.0 , binary >= 0.8.5.0
, bytestring >= 0.10 , bytestring >= 0.10
@ -69,7 +119,6 @@ common base-settings
, cryptonite-conduit >= 0.2 , cryptonite-conduit >= 0.2
, digest >= 0.0.1 , digest >= 0.0.1
, directory , directory
, exceptions
, filepath >= 1.4 , filepath >= 1.4
, http-client >= 0.5 , http-client >= 0.5
, http-client-tls , http-client-tls
@ -77,11 +126,12 @@ common base-settings
, http-types >= 0.12 , http-types >= 0.12
, ini , ini
, memory >= 0.14 , memory >= 0.14
, raw-strings-qq >= 1 , network-uri
, resourcet >= 1.2 , resourcet >= 1.2
, retry , retry
, text >= 1.2 , text >= 1.2
, time >= 1.8 , time >= 1.9
, time-units ^>= 1.0.0
, transformers >= 0.5 , transformers >= 0.5
, unliftio >= 0.2 && < 0.3 , unliftio >= 0.2 && < 0.3
, unliftio-core >= 0.2 && < 0.3 , unliftio-core >= 0.2 && < 0.3
@ -115,7 +165,9 @@ test-suite minio-hs-live-server-test
, Network.Minio.Utils.Test , Network.Minio.Utils.Test
, Network.Minio.XmlGenerator.Test , Network.Minio.XmlGenerator.Test
, Network.Minio.XmlParser.Test , Network.Minio.XmlParser.Test
, Network.Minio.Credentials
build-depends: minio-hs build-depends: minio-hs
, raw-strings-qq
, tasty , tasty
, tasty-hunit , tasty-hunit
, tasty-quickcheck , tasty-quickcheck
@ -130,6 +182,7 @@ test-suite minio-hs-test
hs-source-dirs: test, src hs-source-dirs: test, src
main-is: Spec.hs main-is: Spec.hs
build-depends: minio-hs build-depends: minio-hs
, raw-strings-qq
, QuickCheck , QuickCheck
, tasty , tasty
, tasty-hunit , tasty-hunit
@ -146,6 +199,7 @@ test-suite minio-hs-test
, Network.Minio.Utils.Test , Network.Minio.Utils.Test
, Network.Minio.XmlGenerator.Test , Network.Minio.XmlGenerator.Test
, Network.Minio.XmlParser.Test , Network.Minio.XmlParser.Test
, Network.Minio.Credentials
Flag examples Flag examples
Description: Build the examples Description: Build the examples
@ -292,6 +346,7 @@ executable SetConfig
scope: private scope: private
main-is: SetConfig.hs main-is: SetConfig.hs
source-repository head executable AssumeRole
type: git import: examples-settings
location: https://github.com/minio/minio-hs scope: private
main-is: AssumeRole.hs

View File

@ -20,6 +20,7 @@ module Lib.Prelude
showBS, showBS,
toStrictBS, toStrictBS,
fromStrictBS, fromStrictBS,
lastMay,
) )
where where
@ -29,14 +30,6 @@ import Data.Time as Exports
( UTCTime (..), ( UTCTime (..),
diffUTCTime, diffUTCTime,
) )
import Protolude as Exports hiding
( Handler,
catch,
catches,
throwIO,
try,
yield,
)
import UnliftIO as Exports import UnliftIO as Exports
( Handler, ( Handler,
catch, catch,
@ -58,3 +51,6 @@ toStrictBS = LB.toStrict
fromStrictBS :: ByteString -> LByteString fromStrictBS :: ByteString -> LByteString
fromStrictBS = LB.fromStrict fromStrictBS = LB.fromStrict
lastMay :: [a] -> Maybe a
lastMay a = last <$> nonEmpty a

View File

@ -1,5 +1,5 @@
-- --
-- MinIO Haskell SDK, (C) 2017-2019 MinIO, Inc. -- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
-- --
-- Licensed under the Apache License, Version 2.0 (the "License"); -- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License. -- you may not use this file except in compliance with the License.
@ -16,7 +16,7 @@
-- | -- |
-- Module: Network.Minio -- Module: Network.Minio
-- Copyright: (c) 2017-2019 MinIO Dev Team -- Copyright: (c) 2017-2023 MinIO Dev Team
-- License: Apache 2.0 -- License: Apache 2.0
-- Maintainer: MinIO Dev Team <dev@min.io> -- Maintainer: MinIO Dev Team <dev@min.io>
-- --
@ -24,13 +24,17 @@
-- storage servers like MinIO. -- storage servers like MinIO.
module Network.Minio module Network.Minio
( -- * Credentials ( -- * Credentials
Credentials (..), CredentialValue (..),
credentialValueText,
AccessKey (..),
SecretKey (..),
SessionToken (..),
-- ** Credential providers -- ** Credential Loaders
-- | Run actions that retrieve 'Credentials' from the environment or -- | Run actions that retrieve 'CredentialValue's from the environment or
-- files or other custom sources. -- files or other custom sources.
Provider, CredentialLoader,
fromAWSConfigFile, fromAWSConfigFile,
fromAWSEnv, fromAWSEnv,
fromMinioEnv, fromMinioEnv,
@ -55,7 +59,17 @@ module Network.Minio
awsCI, awsCI,
gcsCI, gcsCI,
-- ** STS Credential types
STSAssumeRole (..),
STSAssumeRoleOptions (..),
defaultSTSAssumeRoleOptions,
requestSTSCredential,
setSTSCredential,
ExpiryTime (..),
STSCredentialProvider,
-- * Minio Monad -- * Minio Monad
---------------- ----------------
-- | The Minio Monad provides connection-reuse, bucket-location -- | The Minio Monad provides connection-reuse, bucket-location
@ -225,15 +239,15 @@ This module exports the high-level MinIO API for object storage.
import qualified Data.Conduit as C import qualified Data.Conduit as C
import qualified Data.Conduit.Binary as CB import qualified Data.Conduit.Binary as CB
import qualified Data.Conduit.Combinators as CC import qualified Data.Conduit.Combinators as CC
import Lib.Prelude import Network.Minio.API
import Network.Minio.CopyObject import Network.Minio.CopyObject
import Network.Minio.Credentials
import Network.Minio.Data import Network.Minio.Data
import Network.Minio.Errors import Network.Minio.Errors
import Network.Minio.ListOps import Network.Minio.ListOps
import Network.Minio.PutObject import Network.Minio.PutObject
import Network.Minio.S3API import Network.Minio.S3API
import Network.Minio.SelectAPI import Network.Minio.SelectAPI
import Network.Minio.Utils
-- | Lists buckets. -- | Lists buckets.
listBuckets :: Minio [BucketInfo] listBuckets :: Minio [BucketInfo]

View File

@ -1,5 +1,5 @@
-- --
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc. -- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
-- --
-- Licensed under the Apache License, Version 2.0 (the "License"); -- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License. -- you may not use this file except in compliance with the License.
@ -19,12 +19,14 @@ module Network.Minio.API
S3ReqInfo (..), S3ReqInfo (..),
runMinio, runMinio,
executeRequest, executeRequest,
buildRequest,
mkStreamRequest, mkStreamRequest,
getLocation, getLocation,
isValidBucketName, isValidBucketName,
checkBucketNameValidity, checkBucketNameValidity,
isValidObjectName, isValidObjectName,
checkObjectNameValidity, checkObjectNameValidity,
requestSTSCredential,
) )
where where
@ -40,11 +42,15 @@ import qualified Data.HashMap.Strict as H
import qualified Data.Text as T import qualified Data.Text as T
import qualified Data.Time.Clock as Time import qualified Data.Time.Clock as Time
import Lib.Prelude import Lib.Prelude
import Network.HTTP.Client (defaultManagerSettings)
import qualified Network.HTTP.Client as NClient
import Network.HTTP.Conduit (Response) import Network.HTTP.Conduit (Response)
import qualified Network.HTTP.Conduit as NC import qualified Network.HTTP.Conduit as NC
import Network.HTTP.Types (simpleQueryToQuery)
import qualified Network.HTTP.Types as HT import qualified Network.HTTP.Types as HT
import Network.HTTP.Types.Header (hHost) import Network.HTTP.Types.Header (hHost)
import Network.Minio.APICommon import Network.Minio.APICommon
import Network.Minio.Credentials
import Network.Minio.Data import Network.Minio.Data
import Network.Minio.Errors import Network.Minio.Errors
import Network.Minio.Sign.V4 import Network.Minio.Sign.V4
@ -78,6 +84,7 @@ discoverRegion ri = runMaybeT $ do
return return
regionMay regionMay
-- | Returns the region to be used for the request.
getRegion :: S3ReqInfo -> Minio (Maybe Region) getRegion :: S3ReqInfo -> Minio (Maybe Region)
getRegion ri = do getRegion ri = do
ci <- asks mcConnInfo ci <- asks mcConnInfo
@ -85,10 +92,10 @@ getRegion ri = do
-- getService/makeBucket/getLocation -- don't need location -- getService/makeBucket/getLocation -- don't need location
if if
| not $ riNeedsLocation ri -> | not $ riNeedsLocation ri ->
return $ Just $ connectRegion ci return $ Just $ connectRegion ci
-- if autodiscovery of location is disabled by user -- if autodiscovery of location is disabled by user
| not $ connectAutoDiscoverRegion ci -> | not $ connectAutoDiscoverRegion ci ->
return $ Just $ connectRegion ci return $ Just $ connectRegion ci
-- discover the region for the request -- discover the region for the request
| otherwise -> discoverRegion ri | otherwise -> discoverRegion ri
@ -104,6 +111,56 @@ getRegionHost r = do
(H.lookup r awsRegionMap) (H.lookup r awsRegionMap)
else return $ connectHost ci else return $ connectHost ci
-- | Computes the appropriate host, path and region for the request.
--
-- For AWS, always use virtual bucket style, unless bucket has periods. For
-- MinIO and other non-AWS, default to path style.
getHostPathRegion :: S3ReqInfo -> Minio (Text, ByteString, Maybe Region)
getHostPathRegion ri = do
ci <- asks mcConnInfo
regionMay <- getRegion ri
case riBucket ri of
Nothing ->
-- Implies a ListBuckets request.
return (connectHost ci, "/", regionMay)
Just bucket -> do
regionHost <- case regionMay of
Nothing -> return $ connectHost ci
Just "" -> return $ connectHost ci
Just r -> getRegionHost r
let pathStyle =
( regionHost,
getS3Path (riBucket ri) (riObject ri),
regionMay
)
virtualStyle =
( bucket <> "." <> regionHost,
encodeUtf8 $ "/" <> fromMaybe "" (riObject ri),
regionMay
)
( if isAWSConnectInfo ci
then
return $
if bucketHasPeriods bucket
then pathStyle
else virtualStyle
else return pathStyle
)
-- | requestSTSCredential requests temporary credentials using the Security Token
-- Service API. The returned credential will include a populated 'SessionToken'
-- and an 'ExpiryTime'.
requestSTSCredential :: (STSCredentialProvider p) => p -> IO (CredentialValue, ExpiryTime)
requestSTSCredential p = do
endpoint <- maybe (throwIO $ MErrValidation MErrVSTSEndpointNotFound) return $ getSTSEndpoint p
let endPt = NC.parseRequest_ $ toString endpoint
settings
| NC.secure endPt = NC.tlsManagerSettings
| otherwise = defaultManagerSettings
mgr <- NC.newManager settings
liftIO $ retrieveSTSCredentials p ("", 0, False) mgr
buildRequest :: S3ReqInfo -> Minio NC.Request buildRequest :: S3ReqInfo -> Minio NC.Request
buildRequest ri = do buildRequest ri = do
maybe (return ()) checkBucketNameValidity $ riBucket ri maybe (return ()) checkBucketNameValidity $ riBucket ri
@ -111,17 +168,15 @@ buildRequest ri = do
ci <- asks mcConnInfo ci <- asks mcConnInfo
regionMay <- getRegion ri (host, path, regionMay) <- getHostPathRegion ri
regionHost <- maybe (return $ connectHost ci) getRegionHost regionMay let ci' = ci {connectHost = host}
hostHeader = (hHost, getHostAddr ci')
let ri' = ri' =
ri ri
{ riHeaders = hostHeader : riHeaders ri, { riHeaders = hostHeader : riHeaders ri,
riRegion = regionMay riRegion = regionMay
} }
ci' = ci {connectHost = regionHost}
hostHeader = (hHost, getHostAddr ci')
-- Does not contain body and auth info. -- Does not contain body and auth info.
baseRequest = baseRequest =
NC.defaultRequest NC.defaultRequest
@ -129,24 +184,31 @@ buildRequest ri = do
NC.secure = connectIsSecure ci', NC.secure = connectIsSecure ci',
NC.host = encodeUtf8 $ connectHost ci', NC.host = encodeUtf8 $ connectHost ci',
NC.port = connectPort ci', NC.port = connectPort ci',
NC.path = getS3Path (riBucket ri') (riObject ri'), NC.path = path,
NC.requestHeaders = riHeaders ri', NC.requestHeaders = riHeaders ri',
NC.queryString = HT.renderQuery False $ riQueryParams ri' NC.queryString = HT.renderQuery False $ riQueryParams ri'
} }
timeStamp <- liftIO Time.getCurrentTime timeStamp <- liftIO Time.getCurrentTime
mgr <- asks mcConnManager
cv <- liftIO $ getCredential (connectCreds ci') (getEndpoint ci') mgr
let sp = let sp =
SignParams SignParams
(connectAccessKey ci') (coerce $ cvAccessKey cv)
(connectSecretKey ci') (coerce $ cvSecretKey cv)
(coerce $ cvSessionToken cv)
ServiceS3
timeStamp timeStamp
(riRegion ri') (riRegion ri')
Nothing (riPresignExpirySecs ri')
Nothing Nothing
-- Cases to handle: -- Cases to handle:
-- --
-- 0. Handle presign URL case.
--
-- 1. Connection is secure: use unsigned payload -- 1. Connection is secure: use unsigned payload
-- --
-- 2. Insecure connection, streaming signature is enabled via use of -- 2. Insecure connection, streaming signature is enabled via use of
@ -155,40 +217,51 @@ buildRequest ri = do
-- 3. Insecure connection, non-conduit payload: compute payload -- 3. Insecure connection, non-conduit payload: compute payload
-- sha256hash, buffer request in memory and perform request. -- sha256hash, buffer request in memory and perform request.
-- case 2 from above.
if if
| isStreamingPayload (riPayload ri') | isJust (riPresignExpirySecs ri') ->
&& (not $ connectIsSecure ci') -> do -- case 0 from above.
(pLen, pSrc) <- case riPayload ri of do
PayloadC l src -> return (l, src) let signPairs = signV4QueryParams sp baseRequest
_ -> throwIO MErrVUnexpectedPayload qpToAdd = simpleQueryToQuery signPairs
let reqFn = signV4Stream pLen sp baseRequest existingQueryParams = HT.parseQuery (NC.queryString baseRequest)
return $ reqFn pSrc updatedQueryParams = existingQueryParams ++ qpToAdd
| otherwise -> do return $ NClient.setQueryString updatedQueryParams baseRequest
-- case 1 described above. | isStreamingPayload (riPayload ri') && not (connectIsSecure ci') ->
sp' <- -- case 2 from above.
if do
| connectIsSecure ci' -> return sp (pLen, pSrc) <- case riPayload ri of
-- case 3 described above. PayloadC l src -> return (l, src)
| otherwise -> do _ -> throwIO MErrVUnexpectedPayload
pHash <- getPayloadSHA256Hash $ riPayload ri' let reqFn = signV4Stream pLen sp baseRequest
return $ sp {spPayloadHash = Just pHash} return $ reqFn pSrc
| otherwise ->
do
sp' <-
( if connectIsSecure ci'
then -- case 1 described above.
return sp
else
( -- case 3 described above.
do
pHash <- getPayloadSHA256Hash $ riPayload ri'
return $ sp {spPayloadHash = Just pHash}
)
)
let signHeaders = signV4 sp' baseRequest let signHeaders = signV4 sp' baseRequest
return $ return $
baseRequest baseRequest
{ NC.requestHeaders = { NC.requestHeaders =
NC.requestHeaders baseRequest NC.requestHeaders baseRequest ++ signHeaders,
++ mkHeaderFromPairs signHeaders, NC.requestBody = getRequestBody (riPayload ri')
NC.requestBody = getRequestBody (riPayload ri') }
}
retryAPIRequest :: Minio a -> Minio a retryAPIRequest :: Minio a -> Minio a
retryAPIRequest apiCall = do retryAPIRequest apiCall = do
resE <- resE <-
retrying retryPolicy (const shouldRetry) retrying retryPolicy (const shouldRetry) $
$ const const $
$ try apiCall try apiCall
either throwIO return resE either throwIO return resE
where where
-- Retry using the full-jitter backoff method for up to 10 mins -- Retry using the full-jitter backoff method for up to 10 mins
@ -235,8 +308,8 @@ isValidBucketName bucket =
not not
( or ( or
[ len < 3 || len > 63, [ len < 3 || len > 63,
or (map labelCheck labels), any labelCheck labels,
or (map labelCharsCheck labels), any labelCharsCheck labels,
isIPCheck isIPCheck
] ]
) )
@ -264,18 +337,18 @@ isValidBucketName bucket =
isIPCheck = and labelAsNums && length labelAsNums == 4 isIPCheck = and labelAsNums && length labelAsNums == 4
-- Throws exception iff bucket name is invalid according to AWS rules. -- Throws exception iff bucket name is invalid according to AWS rules.
checkBucketNameValidity :: MonadIO m => Bucket -> m () checkBucketNameValidity :: (MonadIO m) => Bucket -> m ()
checkBucketNameValidity bucket = checkBucketNameValidity bucket =
when (not $ isValidBucketName bucket) unless (isValidBucketName bucket) $
$ throwIO throwIO $
$ MErrVInvalidBucketName bucket MErrVInvalidBucketName bucket
isValidObjectName :: Object -> Bool isValidObjectName :: Object -> Bool
isValidObjectName object = isValidObjectName object =
T.length object > 0 && B.length (encodeUtf8 object) <= 1024 T.length object > 0 && B.length (encodeUtf8 object) <= 1024
checkObjectNameValidity :: MonadIO m => Object -> m () checkObjectNameValidity :: (MonadIO m) => Object -> m ()
checkObjectNameValidity object = checkObjectNameValidity object =
when (not $ isValidObjectName object) unless (isValidObjectName object) $
$ throwIO throwIO $
$ MErrVInvalidObjectName object MErrVInvalidObjectName object

View File

@ -20,6 +20,7 @@ import qualified Conduit as C
import qualified Data.ByteString as BS import qualified Data.ByteString as BS
import qualified Data.ByteString.Lazy as LB import qualified Data.ByteString.Lazy as LB
import Data.Conduit.Binary (sourceHandleRange) import Data.Conduit.Binary (sourceHandleRange)
import qualified Data.Text as T
import Lib.Prelude import Lib.Prelude
import qualified Network.HTTP.Conduit as NC import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT import qualified Network.HTTP.Types as HT
@ -45,7 +46,7 @@ getPayloadSHA256Hash (PayloadC _ _) = throwIO MErrVUnexpectedPayload
getRequestBody :: Payload -> NC.RequestBody getRequestBody :: Payload -> NC.RequestBody
getRequestBody (PayloadBS bs) = NC.RequestBodyBS bs getRequestBody (PayloadBS bs) = NC.RequestBodyBS bs
getRequestBody (PayloadH h off size) = getRequestBody (PayloadH h off size) =
NC.requestBodySource (fromIntegral size) $ NC.requestBodySource size $
sourceHandleRange sourceHandleRange
h h
(return . fromIntegral $ off) (return . fromIntegral $ off)
@ -70,3 +71,10 @@ mkStreamingPayload payload =
isStreamingPayload :: Payload -> Bool isStreamingPayload :: Payload -> Bool
isStreamingPayload (PayloadC _ _) = True isStreamingPayload (PayloadC _ _) = True
isStreamingPayload _ = False isStreamingPayload _ = False
-- | Checks if the connect info is for Amazon S3.
isAWSConnectInfo :: ConnectInfo -> Bool
isAWSConnectInfo ci = ".amazonaws.com" `T.isSuffixOf` connectHost ci
bucketHasPeriods :: Bucket -> Bool
bucketHasPeriods b = isJust $ T.find (== '.') b

View File

@ -1,5 +1,5 @@
-- --
-- MinIO Haskell SDK, (C) 2018 MinIO, Inc. -- MinIO Haskell SDK, (C) 2018-2023 MinIO, Inc.
-- --
-- Licensed under the Apache License, Version 2.0 (the "License"); -- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License. -- you may not use this file except in compliance with the License.
@ -16,7 +16,8 @@
module Network.Minio.AdminAPI module Network.Minio.AdminAPI
( -- * MinIO Admin API ( -- * MinIO Admin API
--------------------
--------------------
-- | Provides MinIO admin API and related types. It is in -- | Provides MinIO admin API and related types. It is in
-- experimental state. -- experimental state.
@ -52,10 +53,7 @@ module Network.Minio.AdminAPI
where where
import Data.Aeson import Data.Aeson
( (.:), ( FromJSON,
(.:?),
(.=),
FromJSON,
ToJSON, ToJSON,
Value (Object), Value (Object),
eitherDecode, eitherDecode,
@ -66,6 +64,9 @@ import Data.Aeson
toJSON, toJSON,
withObject, withObject,
withText, withText,
(.:),
(.:?),
(.=),
) )
import qualified Data.Aeson as A import qualified Data.Aeson as A
import Data.Aeson.Types (typeMismatch) import Data.Aeson.Types (typeMismatch)
@ -79,6 +80,7 @@ import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT import qualified Network.HTTP.Types as HT
import Network.HTTP.Types.Header (hHost) import Network.HTTP.Types.Header (hHost)
import Network.Minio.APICommon import Network.Minio.APICommon
import Network.Minio.Credentials
import Network.Minio.Data import Network.Minio.Data
import Network.Minio.Errors import Network.Minio.Errors
import Network.Minio.Sign.V4 import Network.Minio.Sign.V4
@ -89,20 +91,23 @@ data DriveInfo = DriveInfo
diEndpoint :: Text, diEndpoint :: Text,
diState :: Text diState :: Text
} }
deriving (Eq, Show) deriving stock (Show, Eq)
instance FromJSON DriveInfo where instance FromJSON DriveInfo where
parseJSON = withObject "DriveInfo" $ \v -> parseJSON = withObject "DriveInfo" $ \v ->
DriveInfo DriveInfo
<$> v .: "uuid" <$> v
<*> v .: "endpoint" .: "uuid"
<*> v .: "state" <*> v
.: "endpoint"
<*> v
.: "state"
data StorageClass = StorageClass data StorageClass = StorageClass
{ scParity :: Int, { scParity :: Int,
scData :: Int scData :: Int
} }
deriving (Eq, Show) deriving stock (Show, Eq)
data ErasureInfo = ErasureInfo data ErasureInfo = ErasureInfo
{ eiOnlineDisks :: Int, { eiOnlineDisks :: Int,
@ -111,7 +116,7 @@ data ErasureInfo = ErasureInfo
eiReducedRedundancy :: StorageClass, eiReducedRedundancy :: StorageClass,
eiSets :: [[DriveInfo]] eiSets :: [[DriveInfo]]
} }
deriving (Eq, Show) deriving stock (Show, Eq)
instance FromJSON ErasureInfo where instance FromJSON ErasureInfo where
parseJSON = withObject "ErasureInfo" $ \v -> do parseJSON = withObject "ErasureInfo" $ \v -> do
@ -119,19 +124,23 @@ instance FromJSON ErasureInfo where
offlineDisks <- v .: "OfflineDisks" offlineDisks <- v .: "OfflineDisks"
stdClass <- stdClass <-
StorageClass StorageClass
<$> v .: "StandardSCData" <$> v
<*> v .: "StandardSCParity" .: "StandardSCData"
<*> v
.: "StandardSCParity"
rrClass <- rrClass <-
StorageClass StorageClass
<$> v .: "RRSCData" <$> v
<*> v .: "RRSCParity" .: "RRSCData"
<*> v
.: "RRSCParity"
sets <- v .: "Sets" sets <- v .: "Sets"
return $ ErasureInfo onlineDisks offlineDisks stdClass rrClass sets return $ ErasureInfo onlineDisks offlineDisks stdClass rrClass sets
data Backend data Backend
= BackendFS = BackendFS
| BackendErasure ErasureInfo | BackendErasure ErasureInfo
deriving (Eq, Show) deriving stock (Show, Eq)
instance FromJSON Backend where instance FromJSON Backend where
parseJSON = withObject "Backend" $ \v -> do parseJSON = withObject "Backend" $ \v -> do
@ -145,13 +154,15 @@ data ConnStats = ConnStats
{ csTransferred :: Int64, { csTransferred :: Int64,
csReceived :: Int64 csReceived :: Int64
} }
deriving (Eq, Show) deriving stock (Show, Eq)
instance FromJSON ConnStats where instance FromJSON ConnStats where
parseJSON = withObject "ConnStats" $ \v -> parseJSON = withObject "ConnStats" $ \v ->
ConnStats ConnStats
<$> v .: "transferred" <$> v
<*> v .: "received" .: "transferred"
<*> v
.: "received"
data ServerProps = ServerProps data ServerProps = ServerProps
{ spUptime :: NominalDiffTime, { spUptime :: NominalDiffTime,
@ -160,7 +171,7 @@ data ServerProps = ServerProps
spRegion :: Text, spRegion :: Text,
spSqsArns :: [Text] spSqsArns :: [Text]
} }
deriving (Eq, Show) deriving stock (Show, Eq)
instance FromJSON ServerProps where instance FromJSON ServerProps where
parseJSON = withObject "SIServer" $ \v -> do parseJSON = withObject "SIServer" $ \v -> do
@ -176,25 +187,29 @@ data StorageInfo = StorageInfo
{ siUsed :: Int64, { siUsed :: Int64,
siBackend :: Backend siBackend :: Backend
} }
deriving (Eq, Show) deriving stock (Show, Eq)
instance FromJSON StorageInfo where instance FromJSON StorageInfo where
parseJSON = withObject "StorageInfo" $ \v -> parseJSON = withObject "StorageInfo" $ \v ->
StorageInfo StorageInfo
<$> v .: "Used" <$> v
<*> v .: "Backend" .: "Used"
<*> v
.: "Backend"
data CountNAvgTime = CountNAvgTime data CountNAvgTime = CountNAvgTime
{ caCount :: Int64, { caCount :: Int64,
caAvgDuration :: Text caAvgDuration :: Text
} }
deriving (Eq, Show) deriving stock (Show, Eq)
instance FromJSON CountNAvgTime where instance FromJSON CountNAvgTime where
parseJSON = withObject "CountNAvgTime" $ \v -> parseJSON = withObject "CountNAvgTime" $ \v ->
CountNAvgTime CountNAvgTime
<$> v .: "count" <$> v
<*> v .: "avgDuration" .: "count"
<*> v
.: "avgDuration"
data HttpStats = HttpStats data HttpStats = HttpStats
{ hsTotalHeads :: CountNAvgTime, { hsTotalHeads :: CountNAvgTime,
@ -208,21 +223,31 @@ data HttpStats = HttpStats
hsTotalDeletes :: CountNAvgTime, hsTotalDeletes :: CountNAvgTime,
hsSuccessDeletes :: CountNAvgTime hsSuccessDeletes :: CountNAvgTime
} }
deriving (Eq, Show) deriving stock (Show, Eq)
instance FromJSON HttpStats where instance FromJSON HttpStats where
parseJSON = withObject "HttpStats" $ \v -> parseJSON = withObject "HttpStats" $ \v ->
HttpStats HttpStats
<$> v .: "totalHEADs" <$> v
<*> v .: "successHEADs" .: "totalHEADs"
<*> v .: "totalGETs" <*> v
<*> v .: "successGETs" .: "successHEADs"
<*> v .: "totalPUTs" <*> v
<*> v .: "successPUTs" .: "totalGETs"
<*> v .: "totalPOSTs" <*> v
<*> v .: "successPOSTs" .: "successGETs"
<*> v .: "totalDELETEs" <*> v
<*> v .: "successDELETEs" .: "totalPUTs"
<*> v
.: "successPUTs"
<*> v
.: "totalPOSTs"
<*> v
.: "successPOSTs"
<*> v
.: "totalDELETEs"
<*> v
.: "successDELETEs"
data SIData = SIData data SIData = SIData
{ sdStorage :: StorageInfo, { sdStorage :: StorageInfo,
@ -230,47 +255,56 @@ data SIData = SIData
sdHttpStats :: HttpStats, sdHttpStats :: HttpStats,
sdProps :: ServerProps sdProps :: ServerProps
} }
deriving (Eq, Show) deriving stock (Show, Eq)
instance FromJSON SIData where instance FromJSON SIData where
parseJSON = withObject "SIData" $ \v -> parseJSON = withObject "SIData" $ \v ->
SIData SIData
<$> v .: "storage" <$> v
<*> v .: "network" .: "storage"
<*> v .: "http" <*> v
<*> v .: "server" .: "network"
<*> v
.: "http"
<*> v
.: "server"
data ServerInfo = ServerInfo data ServerInfo = ServerInfo
{ siError :: Text, { siError :: Text,
siAddr :: Text, siAddr :: Text,
siData :: SIData siData :: SIData
} }
deriving (Eq, Show) deriving stock (Show, Eq)
instance FromJSON ServerInfo where instance FromJSON ServerInfo where
parseJSON = withObject "ServerInfo" $ \v -> parseJSON = withObject "ServerInfo" $ \v ->
ServerInfo ServerInfo
<$> v .: "error" <$> v
<*> v .: "addr" .: "error"
<*> v .: "data" <*> v
.: "addr"
<*> v
.: "data"
data ServerVersion = ServerVersion data ServerVersion = ServerVersion
{ svVersion :: Text, { svVersion :: Text,
svCommitId :: Text svCommitId :: Text
} }
deriving (Eq, Show) deriving stock (Show, Eq)
instance FromJSON ServerVersion where instance FromJSON ServerVersion where
parseJSON = withObject "ServerVersion" $ \v -> parseJSON = withObject "ServerVersion" $ \v ->
ServerVersion ServerVersion
<$> v .: "version" <$> v
<*> v .: "commitID" .: "version"
<*> v
.: "commitID"
data ServiceStatus = ServiceStatus data ServiceStatus = ServiceStatus
{ ssVersion :: ServerVersion, { ssVersion :: ServerVersion,
ssUptime :: NominalDiffTime ssUptime :: NominalDiffTime
} }
deriving (Eq, Show) deriving stock (Show, Eq)
instance FromJSON ServiceStatus where instance FromJSON ServiceStatus where
parseJSON = withObject "ServiceStatus" $ \v -> do parseJSON = withObject "ServiceStatus" $ \v -> do
@ -282,7 +316,7 @@ instance FromJSON ServiceStatus where
data ServiceAction data ServiceAction
= ServiceActionRestart = ServiceActionRestart
| ServiceActionStop | ServiceActionStop
deriving (Eq, Show) deriving stock (Show, Eq)
instance ToJSON ServiceAction where instance ToJSON ServiceAction where
toJSON a = object ["action" .= serviceActionToText a] toJSON a = object ["action" .= serviceActionToText a]
@ -300,20 +334,23 @@ data HealStartResp = HealStartResp
hsrClientAddr :: Text, hsrClientAddr :: Text,
hsrStartTime :: UTCTime hsrStartTime :: UTCTime
} }
deriving (Eq, Show) deriving stock (Show, Eq)
instance FromJSON HealStartResp where instance FromJSON HealStartResp where
parseJSON = withObject "HealStartResp" $ \v -> parseJSON = withObject "HealStartResp" $ \v ->
HealStartResp HealStartResp
<$> v .: "clientToken" <$> v
<*> v .: "clientAddress" .: "clientToken"
<*> v .: "startTime" <*> v
.: "clientAddress"
<*> v
.: "startTime"
data HealOpts = HealOpts data HealOpts = HealOpts
{ hoRecursive :: Bool, { hoRecursive :: Bool,
hoDryRun :: Bool hoDryRun :: Bool
} }
deriving (Eq, Show) deriving stock (Show, Eq)
instance ToJSON HealOpts where instance ToJSON HealOpts where
toJSON (HealOpts r d) = toJSON (HealOpts r d) =
@ -324,15 +361,17 @@ instance ToJSON HealOpts where
instance FromJSON HealOpts where instance FromJSON HealOpts where
parseJSON = withObject "HealOpts" $ \v -> parseJSON = withObject "HealOpts" $ \v ->
HealOpts HealOpts
<$> v .: "recursive" <$> v
<*> v .: "dryRun" .: "recursive"
<*> v
.: "dryRun"
data HealItemType data HealItemType
= HealItemMetadata = HealItemMetadata
| HealItemBucket | HealItemBucket
| HealItemBucketMetadata | HealItemBucketMetadata
| HealItemObject | HealItemObject
deriving (Eq, Show) deriving stock (Show, Eq)
instance FromJSON HealItemType where instance FromJSON HealItemType where
parseJSON = withText "HealItemType" $ \v -> case v of parseJSON = withText "HealItemType" $ \v -> case v of
@ -347,26 +386,31 @@ data NodeSummary = NodeSummary
nsErrSet :: Bool, nsErrSet :: Bool,
nsErrMessage :: Text nsErrMessage :: Text
} }
deriving (Eq, Show) deriving stock (Show, Eq)
instance FromJSON NodeSummary where instance FromJSON NodeSummary where
parseJSON = withObject "NodeSummary" $ \v -> parseJSON = withObject "NodeSummary" $ \v ->
NodeSummary NodeSummary
<$> v .: "name" <$> v
<*> v .: "errSet" .: "name"
<*> v .: "errMsg" <*> v
.: "errSet"
<*> v
.: "errMsg"
data SetConfigResult = SetConfigResult data SetConfigResult = SetConfigResult
{ scrStatus :: Bool, { scrStatus :: Bool,
scrNodeSummary :: [NodeSummary] scrNodeSummary :: [NodeSummary]
} }
deriving (Eq, Show) deriving stock (Show, Eq)
instance FromJSON SetConfigResult where instance FromJSON SetConfigResult where
parseJSON = withObject "SetConfigResult" $ \v -> parseJSON = withObject "SetConfigResult" $ \v ->
SetConfigResult SetConfigResult
<$> v .: "status" <$> v
<*> v .: "nodeResults" .: "status"
<*> v
.: "nodeResults"
data HealResultItem = HealResultItem data HealResultItem = HealResultItem
{ hriResultIdx :: Int, { hriResultIdx :: Int,
@ -382,21 +426,31 @@ data HealResultItem = HealResultItem
hriBefore :: [DriveInfo], hriBefore :: [DriveInfo],
hriAfter :: [DriveInfo] hriAfter :: [DriveInfo]
} }
deriving (Eq, Show) deriving stock (Show, Eq)
instance FromJSON HealResultItem where instance FromJSON HealResultItem where
parseJSON = withObject "HealResultItem" $ \v -> parseJSON = withObject "HealResultItem" $ \v ->
HealResultItem HealResultItem
<$> v .: "resultId" <$> v
<*> v .: "type" .: "resultId"
<*> v .: "bucket" <*> v
<*> v .: "object" .: "type"
<*> v .: "detail" <*> v
<*> v .:? "parityBlocks" .: "bucket"
<*> v .:? "dataBlocks" <*> v
<*> v .: "diskCount" .: "object"
<*> v .: "setCount" <*> v
<*> v .: "objectSize" .: "detail"
<*> v
.:? "parityBlocks"
<*> v
.:? "dataBlocks"
<*> v
.: "diskCount"
<*> v
.: "setCount"
<*> v
.: "objectSize"
<*> ( do <*> ( do
before <- v .: "before" before <- v .: "before"
before .: "drives" before .: "drives"
@ -414,26 +468,34 @@ data HealStatus = HealStatus
hsFailureDetail :: Maybe Text, hsFailureDetail :: Maybe Text,
hsItems :: Maybe [HealResultItem] hsItems :: Maybe [HealResultItem]
} }
deriving (Eq, Show) deriving stock (Show, Eq)
instance FromJSON HealStatus where instance FromJSON HealStatus where
parseJSON = withObject "HealStatus" $ \v -> parseJSON = withObject "HealStatus" $ \v ->
HealStatus HealStatus
<$> v .: "Summary" <$> v
<*> v .: "StartTime" .: "Summary"
<*> v .: "Settings" <*> v
<*> v .: "NumDisks" .: "StartTime"
<*> v .:? "Detail" <*> v
<*> v .: "Items" .: "Settings"
<*> v
.: "NumDisks"
<*> v
.:? "Detail"
<*> v
.: "Items"
healPath :: Maybe Bucket -> Maybe Text -> ByteString healPath :: Maybe Bucket -> Maybe Text -> ByteString
healPath bucket prefix = do healPath bucket prefix = do
if (isJust bucket) if isJust bucket
then then
encodeUtf8 $ encodeUtf8 $
"v1/heal/" <> fromMaybe "" bucket <> "/" "v1/heal/"
<> fromMaybe "" bucket
<> "/"
<> fromMaybe "" prefix <> fromMaybe "" prefix
else encodeUtf8 $ "v1/heal/" else encodeUtf8 ("v1/heal/" :: Text)
-- | Get server version and uptime. -- | Get server version and uptime.
serviceStatus :: Minio ServiceStatus serviceStatus :: Minio ServiceStatus
@ -596,15 +658,17 @@ buildAdminRequest :: AdminReqInfo -> Minio NC.Request
buildAdminRequest areq = do buildAdminRequest areq = do
ci <- asks mcConnInfo ci <- asks mcConnInfo
sha256Hash <- sha256Hash <-
if if connectIsSecure ci
| connectIsSecure ci -> then -- if secure connection
-- if secure connection return "UNSIGNED-PAYLOAD"
return "UNSIGNED-PAYLOAD" else -- otherwise compute sha256
-- otherwise compute sha256 getPayloadSHA256Hash (ariPayload areq)
| otherwise -> getPayloadSHA256Hash (ariPayload areq)
timeStamp <- liftIO getCurrentTime timeStamp <- liftIO getCurrentTime
mgr <- asks mcConnManager
cv <- liftIO $ getCredential (connectCreds ci) (getEndpoint ci) mgr
let hostHeader = (hHost, getHostAddr ci) let hostHeader = (hHost, getHostAddr ci)
newAreq = newAreq =
areq areq
@ -617,8 +681,10 @@ buildAdminRequest areq = do
signReq = toRequest ci newAreq signReq = toRequest ci newAreq
sp = sp =
SignParams SignParams
(connectAccessKey ci) (coerce $ cvAccessKey cv)
(connectSecretKey ci) (coerce $ cvSecretKey cv)
(coerce $ cvSessionToken cv)
ServiceS3
timeStamp timeStamp
Nothing Nothing
Nothing Nothing
@ -628,7 +694,7 @@ buildAdminRequest areq = do
-- Update signReq with Authorization header containing v4 signature -- Update signReq with Authorization header containing v4 signature
return return
signReq signReq
{ NC.requestHeaders = ariHeaders newAreq ++ mkHeaderFromPairs signHeaders { NC.requestHeaders = ariHeaders newAreq ++ signHeaders
} }
where where
toRequest :: ConnectInfo -> AdminReqInfo -> NC.Request toRequest :: ConnectInfo -> AdminReqInfo -> NC.Request

View File

@ -45,11 +45,10 @@ copyObjectInternal b' o srcInfo = do
when when
( isJust rangeMay ( isJust rangeMay
&& or && ( (startOffset < 0)
[ startOffset < 0, || (endOffset < startOffset)
endOffset < startOffset, || (endOffset >= srcSize)
endOffset >= fromIntegral srcSize )
]
) )
$ throwIO $ throwIO
$ MErrVInvalidSrcObjByteRange range $ MErrVInvalidSrcObjByteRange range
@ -69,9 +68,8 @@ copyObjectInternal b' o srcInfo = do
-- used is minPartSize. -- used is minPartSize.
selectCopyRanges :: (Int64, Int64) -> [(PartNumber, (Int64, Int64))] selectCopyRanges :: (Int64, Int64) -> [(PartNumber, (Int64, Int64))]
selectCopyRanges (st, end) = selectCopyRanges (st, end) =
zip pns zip pns $
$ map (\(x, y) -> (st + x, st + x + y - 1)) zipWith (\x y -> (st + x, st + x + y - 1)) startOffsets partSizes
$ zip startOffsets partSizes
where where
size = end - st + 1 size = end - st + 1
(pns, startOffsets, partSizes) = List.unzip3 $ selectPartSizes size (pns, startOffsets, partSizes) = List.unzip3 $ selectPartSizes size
@ -88,7 +86,7 @@ multiPartCopyObject ::
multiPartCopyObject b o cps srcSize = do multiPartCopyObject b o cps srcSize = do
uid <- newMultipartUpload b o [] uid <- newMultipartUpload b o []
let byteRange = maybe (0, fromIntegral $ srcSize - 1) identity $ srcRange cps let byteRange = maybe (0, srcSize - 1) identity $ srcRange cps
partRanges = selectCopyRanges byteRange partRanges = selectCopyRanges byteRange
partSources = partSources =
map map

View File

@ -0,0 +1,77 @@
--
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
module Network.Minio.Credentials
( CredentialValue (..),
credentialValueText,
STSCredentialProvider (..),
AccessKey (..),
SecretKey (..),
SessionToken (..),
ExpiryTime (..),
STSCredentialStore,
initSTSCredential,
getSTSCredential,
Creds (..),
getCredential,
Endpoint,
-- * STS Assume Role
defaultSTSAssumeRoleOptions,
STSAssumeRole (..),
STSAssumeRoleOptions (..),
)
where
import Data.Time (diffUTCTime, getCurrentTime)
import qualified Network.HTTP.Client as NC
import Network.Minio.Credentials.AssumeRole
import Network.Minio.Credentials.Types
import qualified UnliftIO.MVar as M
data STSCredentialStore = STSCredentialStore
{ cachedCredentials :: M.MVar (CredentialValue, ExpiryTime),
refreshAction :: Endpoint -> NC.Manager -> IO (CredentialValue, ExpiryTime)
}
initSTSCredential :: (STSCredentialProvider p) => p -> IO STSCredentialStore
initSTSCredential p = do
let action = retrieveSTSCredentials p
-- start with dummy credential, so that refresh happens for first request.
now <- getCurrentTime
mvar <- M.newMVar (CredentialValue mempty mempty mempty, coerce now)
return $
STSCredentialStore
{ cachedCredentials = mvar,
refreshAction = action
}
getSTSCredential :: STSCredentialStore -> Endpoint -> NC.Manager -> IO (CredentialValue, Bool)
getSTSCredential store ep mgr = M.modifyMVar (cachedCredentials store) $ \cc@(v, expiry) -> do
now <- getCurrentTime
if diffUTCTime now (coerce expiry) > 0
then do
res <- refreshAction store ep mgr
return (res, (fst res, True))
else return (cc, (v, False))
data Creds
= CredsStatic CredentialValue
| CredsSTS STSCredentialStore
getCredential :: Creds -> Endpoint -> NC.Manager -> IO CredentialValue
getCredential (CredsStatic v) _ _ = return v
getCredential (CredsSTS s) ep mgr = fst <$> getSTSCredential s ep mgr

View File

@ -0,0 +1,266 @@
--
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
module Network.Minio.Credentials.AssumeRole where
import qualified Data.ByteArray as BA
import qualified Data.ByteString.Lazy as LB
import qualified Data.Text as T
import qualified Data.Time as Time
import Data.Time.Units (Second)
import Lib.Prelude (UTCTime, throwIO)
import Network.HTTP.Client (RequestBody (RequestBodyBS))
import qualified Network.HTTP.Client as NC
import Network.HTTP.Types (hContentType, methodPost, renderSimpleQuery)
import Network.HTTP.Types.Header (hHost)
import Network.Minio.Credentials.Types
import Network.Minio.Data.Crypto (hashSHA256)
import Network.Minio.Errors (MErrV (..))
import Network.Minio.Sign.V4
import Network.Minio.Utils (getHostHeader, httpLbs)
import Network.Minio.XmlCommon
import Text.XML.Cursor hiding (bool)
stsVersion :: ByteString
stsVersion = "2011-06-15"
defaultDurationSeconds :: Second
defaultDurationSeconds = 3600
-- | Assume Role API argument.
--
-- @since 1.7.0
data STSAssumeRole = STSAssumeRole
{ -- | Credentials to use in the AssumeRole STS API.
sarCredentials :: CredentialValue,
-- | Optional settings.
sarOptions :: STSAssumeRoleOptions
}
-- | Options for STS Assume Role API.
data STSAssumeRoleOptions = STSAssumeRoleOptions
{ -- | STS endpoint to which the request will be made. For MinIO, this is the
-- same as the server endpoint. For AWS, this has to be the Security Token
-- Service endpoint. If using with 'setSTSCredential', this option can be
-- left as 'Nothing' and the endpoint in 'ConnectInfo' will be used.
saroEndpoint :: Maybe Text,
-- | Desired validity for the generated credentials.
saroDurationSeconds :: Maybe Second,
-- | IAM policy to apply for the generated credentials.
saroPolicyJSON :: Maybe ByteString,
-- | Location is usually required for AWS.
saroLocation :: Maybe Text,
saroRoleARN :: Maybe Text,
saroRoleSessionName :: Maybe Text
}
-- | Default STS Assume Role options - all options are Nothing, except for
-- duration which is set to 1 hour.
defaultSTSAssumeRoleOptions :: STSAssumeRoleOptions
defaultSTSAssumeRoleOptions =
STSAssumeRoleOptions
{ saroEndpoint = Nothing,
saroDurationSeconds = Just 3600,
saroPolicyJSON = Nothing,
saroLocation = Nothing,
saroRoleARN = Nothing,
saroRoleSessionName = Nothing
}
data AssumeRoleCredentials = AssumeRoleCredentials
{ arcCredentials :: CredentialValue,
arcExpiration :: UTCTime
}
deriving stock (Show, Eq)
data AssumeRoleResult = AssumeRoleResult
{ arrSourceIdentity :: Text,
arrAssumedRoleArn :: Text,
arrAssumedRoleId :: Text,
arrRoleCredentials :: AssumeRoleCredentials
}
deriving stock (Show, Eq)
-- | parseSTSAssumeRoleResult parses an XML response of the following form:
--
-- <AssumeRoleResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
-- <AssumeRoleResult>
-- <SourceIdentity>Alice</SourceIdentity>
-- <AssumedRoleUser>
-- <Arn>arn:aws:sts::123456789012:assumed-role/demo/TestAR</Arn>
-- <AssumedRoleId>ARO123EXAMPLE123:TestAR</AssumedRoleId>
-- </AssumedRoleUser>
-- <Credentials>
-- <AccessKeyId>ASIAIOSFODNN7EXAMPLE</AccessKeyId>
-- <SecretAccessKey>wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY</SecretAccessKey>
-- <SessionToken>
-- AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQW
-- LWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGd
-- QrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU
-- 9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz
-- +scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA==
-- </SessionToken>
-- <Expiration>2019-11-09T13:34:41Z</Expiration>
-- </Credentials>
-- <PackedPolicySize>6</PackedPolicySize>
-- </AssumeRoleResult>
-- <ResponseMetadata>
-- <RequestId>c6104cbe-af31-11e0-8154-cbc7ccf896c7</RequestId>
-- </ResponseMetadata>
-- </AssumeRoleResponse>
parseSTSAssumeRoleResult :: (MonadIO m) => ByteString -> Text -> m AssumeRoleResult
parseSTSAssumeRoleResult xmldata namespace = do
r <- parseRoot $ LB.fromStrict xmldata
let s3Elem' = s3Elem namespace
sourceIdentity =
T.concat $
r
$/ s3Elem' "AssumeRoleResult"
&/ s3Elem' "SourceIdentity"
&/ content
roleArn =
T.concat $
r
$/ s3Elem' "AssumeRoleResult"
&/ s3Elem' "AssumedRoleUser"
&/ s3Elem' "Arn"
&/ content
roleId =
T.concat $
r
$/ s3Elem' "AssumeRoleResult"
&/ s3Elem' "AssumedRoleUser"
&/ s3Elem' "AssumedRoleId"
&/ content
convSB :: Text -> BA.ScrubbedBytes
convSB = BA.convert . (encodeUtf8 :: Text -> ByteString)
credsInfo = do
cr <-
maybe (Left $ MErrVXmlParse "No Credentials Element found") Right $
listToMaybe $
r $/ s3Elem' "AssumeRoleResult" &/ s3Elem' "Credentials"
let cur = fromNode $ node cr
return
( CredentialValue
{ cvAccessKey =
coerce $
T.concat $
cur $/ s3Elem' "AccessKeyId" &/ content,
cvSecretKey =
coerce $
convSB $
T.concat $
cur
$/ s3Elem' "SecretAccessKey"
&/ content,
cvSessionToken =
Just $
coerce $
convSB $
T.concat $
cur
$/ s3Elem' "SessionToken"
&/ content
},
T.concat $ cur $/ s3Elem' "Expiration" &/ content
)
creds <- either throwIO pure credsInfo
expiry <- parseS3XMLTime $ snd creds
let roleCredentials =
AssumeRoleCredentials
{ arcCredentials = fst creds,
arcExpiration = expiry
}
return
AssumeRoleResult
{ arrSourceIdentity = sourceIdentity,
arrAssumedRoleArn = roleArn,
arrAssumedRoleId = roleId,
arrRoleCredentials = roleCredentials
}
instance STSCredentialProvider STSAssumeRole where
getSTSEndpoint = saroEndpoint . sarOptions
retrieveSTSCredentials sar (host', port', isSecure') mgr = do
-- Assemble STS request
let requiredParams =
[ ("Action", "AssumeRole"),
("Version", stsVersion)
]
opts = sarOptions sar
durSecs :: Int =
fromIntegral $
fromMaybe defaultDurationSeconds $
saroDurationSeconds opts
otherParams =
[ ("RoleArn",) . encodeUtf8 <$> saroRoleARN opts,
("RoleSessionName",) . encodeUtf8 <$> saroRoleSessionName opts,
Just ("DurationSeconds", show durSecs),
("Policy",) <$> saroPolicyJSON opts
]
parameters = requiredParams ++ catMaybes otherParams
(host, port, isSecure) =
case getSTSEndpoint sar of
Just ep ->
let endPt = NC.parseRequest_ $ toString ep
in (NC.host endPt, NC.port endPt, NC.secure endPt)
Nothing -> (host', port', isSecure')
reqBody = renderSimpleQuery False parameters
req =
NC.defaultRequest
{ NC.host = host,
NC.port = port,
NC.secure = isSecure,
NC.method = methodPost,
NC.requestHeaders =
[ (hHost, getHostHeader (host, port)),
(hContentType, "application/x-www-form-urlencoded")
],
NC.requestBody = RequestBodyBS reqBody
}
-- Sign the STS request.
timeStamp <- liftIO Time.getCurrentTime
let sp =
SignParams
{ spAccessKey = coerce $ cvAccessKey $ sarCredentials sar,
spSecretKey = coerce $ cvSecretKey $ sarCredentials sar,
spSessionToken = coerce $ cvSessionToken $ sarCredentials sar,
spService = ServiceSTS,
spTimeStamp = timeStamp,
spRegion = saroLocation opts,
spExpirySecs = Nothing,
spPayloadHash = Just $ hashSHA256 reqBody
}
signHeaders = signV4 sp req
signedReq =
req
{ NC.requestHeaders = NC.requestHeaders req ++ signHeaders
}
-- Make the STS request
resp <- httpLbs signedReq mgr
result <-
parseSTSAssumeRoleResult
(toStrict $ NC.responseBody resp)
"https://sts.amazonaws.com/doc/2011-06-15/"
return
( arcCredentials $ arrRoleCredentials result,
coerce $ arcExpiration $ arrRoleCredentials result
)

View File

@ -0,0 +1,90 @@
--
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE StrictData #-}
module Network.Minio.Credentials.Types where
import qualified Data.ByteArray as BA
import Lib.Prelude (UTCTime)
import qualified Network.HTTP.Client as NC
-- | Access Key type.
newtype AccessKey = AccessKey {unAccessKey :: Text}
deriving stock (Show)
deriving newtype (Eq, IsString, Semigroup, Monoid)
-- | Secret Key type - has a show instance that does not print the value.
newtype SecretKey = SecretKey {unSecretKey :: BA.ScrubbedBytes}
deriving stock (Show)
deriving newtype (Eq, IsString, Semigroup, Monoid)
-- | Session Token type - has a show instance that does not print the value.
newtype SessionToken = SessionToken {unSessionToken :: BA.ScrubbedBytes}
deriving stock (Show)
deriving newtype (Eq, IsString, Semigroup, Monoid)
-- | Object storage credential data type. It has support for the optional
-- [SessionToken](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
-- for using temporary credentials requested via STS.
--
-- The show instance for this type does not print the value of secrets for
-- security.
--
-- @since 1.7.0
data CredentialValue = CredentialValue
{ cvAccessKey :: AccessKey,
cvSecretKey :: SecretKey,
cvSessionToken :: Maybe SessionToken
}
deriving stock (Eq, Show)
scrubbedToText :: BA.ScrubbedBytes -> Text
scrubbedToText =
let b2t :: ByteString -> Text
b2t = decodeUtf8
s2b :: BA.ScrubbedBytes -> ByteString
s2b = BA.convert
in b2t . s2b
-- | Convert a 'CredentialValue' to a text tuple. Use this to output the
-- credential to files or other programs.
credentialValueText :: CredentialValue -> (Text, Text, Maybe Text)
credentialValueText cv =
( coerce $ cvAccessKey cv,
(scrubbedToText . coerce) $ cvSecretKey cv,
scrubbedToText . coerce <$> cvSessionToken cv
)
-- | Endpoint represented by host, port and TLS enabled flag.
type Endpoint = (ByteString, Int, Bool)
-- | Typeclass for STS credential providers.
--
-- @since 1.7.0
class STSCredentialProvider p where
retrieveSTSCredentials ::
p ->
-- | STS Endpoint (host, port, isSecure)
Endpoint ->
NC.Manager ->
IO (CredentialValue, ExpiryTime)
getSTSEndpoint :: p -> Maybe Text
-- | 'ExpiryTime' represents a time at which a credential expires.
newtype ExpiryTime = ExpiryTime {unExpiryTime :: UTCTime}
deriving stock (Show)
deriving newtype (Eq)

View File

@ -1,5 +1,5 @@
-- --
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc. -- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
-- --
-- Licensed under the Apache License, Version 2.0 (the "License"); -- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License. -- you may not use this file except in compliance with the License.
@ -16,26 +16,32 @@
{-# LANGUAGE CPP #-} {-# LANGUAGE CPP #-}
{-# LANGUAGE GADTs #-} {-# LANGUAGE GADTs #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE StrictData #-}
{-# LANGUAGE TypeFamilies #-} {-# LANGUAGE TypeFamilies #-}
module Network.Minio.Data where module Network.Minio.Data where
import qualified Conduit as C import qualified Conduit as C
import qualified Control.Concurrent.MVar as M import qualified Control.Concurrent.MVar as M
import Control.Monad.Trans.Except (throwE)
import Control.Monad.Trans.Resource import Control.Monad.Trans.Resource
( MonadResource,
MonadThrow (..),
MonadUnliftIO,
ResourceT,
runResourceT,
)
import qualified Data.Aeson as A import qualified Data.Aeson as A
import qualified Data.ByteArray as BA import qualified Data.ByteArray as BA
import qualified Data.ByteString as B import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as LB import qualified Data.ByteString.Lazy as LB
import Data.CaseInsensitive (mk)
import qualified Data.HashMap.Strict as H import qualified Data.HashMap.Strict as H
import qualified Data.Ini as Ini import qualified Data.Ini as Ini
import Data.String (IsString (..)) import qualified Data.List as List
import qualified Data.Text as T import qualified Data.Text as T
import qualified Data.Text.Encoding as TE import qualified Data.Text.Encoding as TE
import Data.Time (defaultTimeLocale, formatTime) import Data.Time (defaultTimeLocale, formatTime)
import GHC.Show (Show (show)) import Lib.Prelude (UTCTime, throwIO)
import Lib.Prelude
import qualified Network.Connection as Conn import qualified Network.Connection as Conn
import Network.HTTP.Client (defaultManagerSettings) import Network.HTTP.Client (defaultManagerSettings)
import qualified Network.HTTP.Client.TLS as TLS import qualified Network.HTTP.Client.TLS as TLS
@ -48,13 +54,22 @@ import Network.HTTP.Types
hRange, hRange,
) )
import qualified Network.HTTP.Types as HT import qualified Network.HTTP.Types as HT
import Network.Minio.Credentials
import Network.Minio.Data.Crypto import Network.Minio.Data.Crypto
( encodeToBase64,
hashMD5ToBase64,
)
import Network.Minio.Data.Time (UrlExpiry)
import Network.Minio.Errors import Network.Minio.Errors
( MErrV (MErrVInvalidEncryptionKeyLength, MErrVMissingCredentials),
MinioErr (..),
)
import Network.Minio.Utils
import System.Directory (doesFileExist, getHomeDirectory) import System.Directory (doesFileExist, getHomeDirectory)
import qualified System.Environment as Env import qualified System.Environment as Env
import System.FilePath.Posix (combine) import System.FilePath.Posix (combine)
import Text.XML
import qualified UnliftIO as U import qualified UnliftIO as U
import qualified UnliftIO.MVar as UM
-- | max obj size is 5TiB -- | max obj size is 5TiB
maxObjectSize :: Int64 maxObjectSize :: Int64
@ -79,20 +94,36 @@ maxMultipartParts = 10000
awsRegionMap :: H.HashMap Text Text awsRegionMap :: H.HashMap Text Text
awsRegionMap = awsRegionMap =
H.fromList H.fromList
[ ("us-east-1", "s3.amazonaws.com"), [ ("us-east-1", "s3.us-east-1.amazonaws.com"),
("us-east-2", "s3-us-east-2.amazonaws.com"), ("us-east-2", "s3.us-east-2.amazonaws.com"),
("us-west-1", "s3-us-west-1.amazonaws.com"), ("us-west-1", "s3.us-west-1.amazonaws.com"),
("us-west-2", "s3-us-west-2.amazonaws.com"), ("us-west-2", "s3.us-west-2.amazonaws.com"),
("ca-central-1", "s3-ca-central-1.amazonaws.com"), ("ca-central-1", "s3.ca-central-1.amazonaws.com"),
("ap-south-1", "s3-ap-south-1.amazonaws.com"), ("ap-south-1", "s3.ap-south-1.amazonaws.com"),
("ap-northeast-1", "s3-ap-northeast-1.amazonaws.com"), ("ap-south-2", "s3.ap-south-2.amazonaws.com"),
("ap-northeast-2", "s3-ap-northeast-2.amazonaws.com"), ("ap-northeast-1", "s3.ap-northeast-1.amazonaws.com"),
("ap-southeast-1", "s3-ap-southeast-1.amazonaws.com"), ("ap-northeast-2", "s3.ap-northeast-2.amazonaws.com"),
("ap-southeast-2", "s3-ap-southeast-2.amazonaws.com"), ("ap-northeast-3", "s3.ap-northeast-3.amazonaws.com"),
("eu-west-1", "s3-eu-west-1.amazonaws.com"), ("ap-southeast-1", "s3.ap-southeast-1.amazonaws.com"),
("eu-west-2", "s3-eu-west-2.amazonaws.com"), ("ap-southeast-2", "s3.ap-southeast-2.amazonaws.com"),
("eu-central-1", "s3-eu-central-1.amazonaws.com"), ("ap-southeast-3", "s3.ap-southeast-3.amazonaws.com"),
("sa-east-1", "s3-sa-east-1.amazonaws.com") ("eu-west-1", "s3.eu-west-1.amazonaws.com"),
("eu-west-2", "s3.eu-west-2.amazonaws.com"),
("eu-west-3", "s3.eu-west-3.amazonaws.com"),
("eu-central-1", "s3.eu-central-1.amazonaws.com"),
("eu-central-2", "s3.eu-central-2.amazonaws.com"),
("eu-south-1", "s3.eu-south-1.amazonaws.com"),
("eu-south-2", "s3.eu-south-2.amazonaws.com"),
("af-south-1", "s3.af-south-1.amazonaws.com"),
("ap-east-1", "s3.ap-east-1.amazonaws.com"),
("cn-north-1", "s3.cn-north-1.amazonaws.com.cn"),
("cn-northwest-1", "s3.cn-northwest-1.amazonaws.com.cn"),
("eu-north-1", "s3.eu-north-1.amazonaws.com"),
("me-south-1", "s3.me-south-1.amazonaws.com"),
("me-central-1", "s3.me-central-1.amazonaws.com"),
("us-gov-east-1", "s3.us-gov-east-1.amazonaws.com"),
("us-gov-west-1", "s3.us-gov-west-1.amazonaws.com"),
("sa-east-1", "s3.sa-east-1.amazonaws.com")
] ]
-- | Connection Info data type. To create a 'ConnectInfo' value, -- | Connection Info data type. To create a 'ConnectInfo' value,
@ -103,14 +134,15 @@ awsRegionMap =
data ConnectInfo = ConnectInfo data ConnectInfo = ConnectInfo
{ connectHost :: Text, { connectHost :: Text,
connectPort :: Int, connectPort :: Int,
connectAccessKey :: Text, connectCreds :: Creds,
connectSecretKey :: Text,
connectIsSecure :: Bool, connectIsSecure :: Bool,
connectRegion :: Region, connectRegion :: Region,
connectAutoDiscoverRegion :: Bool, connectAutoDiscoverRegion :: Bool,
connectDisableTLSCertValidation :: Bool connectDisableTLSCertValidation :: Bool
} }
deriving (Eq, Show)
getEndpoint :: ConnectInfo -> Endpoint
getEndpoint ci = (encodeUtf8 $ connectHost ci, connectPort ci, connectIsSecure ci)
instance IsString ConnectInfo where instance IsString ConnectInfo where
fromString str = fromString str =
@ -118,86 +150,89 @@ instance IsString ConnectInfo where
in ConnectInfo in ConnectInfo
{ connectHost = TE.decodeUtf8 $ NC.host req, { connectHost = TE.decodeUtf8 $ NC.host req,
connectPort = NC.port req, connectPort = NC.port req,
connectAccessKey = "", connectCreds = CredsStatic $ CredentialValue mempty mempty mempty,
connectSecretKey = "",
connectIsSecure = NC.secure req, connectIsSecure = NC.secure req,
connectRegion = "", connectRegion = "",
connectAutoDiscoverRegion = True, connectAutoDiscoverRegion = True,
connectDisableTLSCertValidation = False connectDisableTLSCertValidation = False
} }
-- | Contains access key and secret key to access object storage. -- | A 'CredentialLoader' is an action that may return a 'CredentialValue'.
data Credentials = Credentials -- Loaders may be chained together using 'findFirst'.
{ cAccessKey :: Text, --
cSecretKey :: Text -- @since 1.7.0
} type CredentialLoader = IO (Maybe CredentialValue)
deriving (Eq, Show)
-- | A Provider is an action that may return Credentials. Providers -- | Combines the given list of loaders, by calling each one in
-- may be chained together using 'findFirst'. -- order until a 'CredentialValue' is returned.
type Provider = IO (Maybe Credentials) findFirst :: [CredentialLoader] -> IO (Maybe CredentialValue)
-- | Combines the given list of providers, by calling each one in
-- order until Credentials are found.
findFirst :: [Provider] -> Provider
findFirst [] = return Nothing findFirst [] = return Nothing
findFirst (f : fs) = do findFirst (f : fs) = do
c <- f c <- f
maybe (findFirst fs) (return . Just) c maybe (findFirst fs) (return . Just) c
-- | This Provider loads `Credentials` from @~\/.aws\/credentials@ -- | This action returns a 'CredentialValue' populated from
fromAWSConfigFile :: Provider -- @~\/.aws\/credentials@
fromAWSConfigFile :: CredentialLoader
fromAWSConfigFile = do fromAWSConfigFile = do
credsE <- runExceptT $ do credsE <- runExceptT $ do
homeDir <- lift $ getHomeDirectory homeDir <- lift getHomeDirectory
let awsCredsFile = homeDir `combine` ".aws" `combine` "credentials" let awsCredsFile = homeDir `combine` ".aws" `combine` "credentials"
fileExists <- lift $ doesFileExist awsCredsFile fileExists <- lift $ doesFileExist awsCredsFile
bool (throwE "FileNotFound") (return ()) fileExists bool (throwE "FileNotFound") (return ()) fileExists
ini <- ExceptT $ Ini.readIniFile awsCredsFile ini <- ExceptT $ Ini.readIniFile awsCredsFile
akey <- akey <-
ExceptT $ return $ ExceptT $
Ini.lookupValue "default" "aws_access_key_id" ini return $
Ini.lookupValue "default" "aws_access_key_id" ini
skey <- skey <-
ExceptT $ return $ ExceptT $
Ini.lookupValue "default" "aws_secret_access_key" ini return $
return $ Credentials akey skey Ini.lookupValue "default" "aws_secret_access_key" ini
return $ hush credsE return $ CredentialValue (coerce akey) (fromString $ T.unpack skey) Nothing
return $ either (const Nothing) Just credsE
-- | This Provider loads `Credentials` from @AWS_ACCESS_KEY_ID@ and -- | This action returns a 'CredentialValue` populated from @AWS_ACCESS_KEY_ID@
-- @AWS_SECRET_ACCESS_KEY@ environment variables. -- and @AWS_SECRET_ACCESS_KEY@ environment variables.
fromAWSEnv :: Provider fromAWSEnv :: CredentialLoader
fromAWSEnv = runMaybeT $ do fromAWSEnv = runMaybeT $ do
akey <- MaybeT $ Env.lookupEnv "AWS_ACCESS_KEY_ID" akey <- MaybeT $ Env.lookupEnv "AWS_ACCESS_KEY_ID"
skey <- MaybeT $ Env.lookupEnv "AWS_SECRET_ACCESS_KEY" skey <- MaybeT $ Env.lookupEnv "AWS_SECRET_ACCESS_KEY"
return $ Credentials (T.pack akey) (T.pack skey) return $ CredentialValue (fromString akey) (fromString skey) Nothing
-- | This Provider loads `Credentials` from @MINIO_ACCESS_KEY@ and -- | This action returns a 'CredentialValue' populated from @MINIO_ACCESS_KEY@
-- @MINIO_SECRET_KEY@ environment variables. -- and @MINIO_SECRET_KEY@ environment variables.
fromMinioEnv :: Provider fromMinioEnv :: CredentialLoader
fromMinioEnv = runMaybeT $ do fromMinioEnv = runMaybeT $ do
akey <- MaybeT $ Env.lookupEnv "MINIO_ACCESS_KEY" akey <- MaybeT $ Env.lookupEnv "MINIO_ACCESS_KEY"
skey <- MaybeT $ Env.lookupEnv "MINIO_SECRET_KEY" skey <- MaybeT $ Env.lookupEnv "MINIO_SECRET_KEY"
return $ Credentials (T.pack akey) (T.pack skey) return $ CredentialValue (fromString akey) (fromString skey) Nothing
-- | setCredsFrom retrieves access credentials from the first -- | setCredsFrom retrieves access credentials from the first action in the
-- `Provider` form the given list that succeeds and sets it in the -- given list that succeeds and sets it in the 'ConnectInfo'.
-- `ConnectInfo`. setCredsFrom :: [CredentialLoader] -> ConnectInfo -> IO ConnectInfo
setCredsFrom :: [Provider] -> ConnectInfo -> IO ConnectInfo
setCredsFrom ps ci = do setCredsFrom ps ci = do
pMay <- findFirst ps pMay <- findFirst ps
maybe maybe
(throwIO MErrVMissingCredentials) (throwIO MErrVMissingCredentials)
(return . (flip setCreds ci)) (return . (`setCreds` ci))
pMay pMay
-- | setCreds sets the given `Credentials` in the `ConnectInfo`. -- | setCreds sets the given `CredentialValue` in the `ConnectInfo`.
setCreds :: Credentials -> ConnectInfo -> ConnectInfo setCreds :: CredentialValue -> ConnectInfo -> ConnectInfo
setCreds (Credentials accessKey secretKey) connInfo = setCreds cv connInfo =
connInfo connInfo
{ connectAccessKey = accessKey, { connectCreds = CredsStatic cv
connectSecretKey = secretKey
} }
-- | 'setSTSCredential' configures `ConnectInfo` to retrieve temporary
-- credentials via the STS API on demand. It is automatically refreshed on
-- expiry.
setSTSCredential :: (STSCredentialProvider p) => p -> ConnectInfo -> IO ConnectInfo
setSTSCredential p ci = do
store <- initSTSCredential p
return ci {connectCreds = CredsSTS store}
-- | Set the S3 region parameter in the `ConnectInfo` -- | Set the S3 region parameter in the `ConnectInfo`
setRegion :: Region -> ConnectInfo -> ConnectInfo setRegion :: Region -> ConnectInfo -> ConnectInfo
setRegion r connInfo = setRegion r connInfo =
@ -219,15 +254,7 @@ disableTLSCertValidation :: ConnectInfo -> ConnectInfo
disableTLSCertValidation c = c {connectDisableTLSCertValidation = True} disableTLSCertValidation c = c {connectDisableTLSCertValidation = True}
getHostAddr :: ConnectInfo -> ByteString getHostAddr :: ConnectInfo -> ByteString
getHostAddr ci = getHostAddr ci = getHostHeader (encodeUtf8 $ connectHost ci, connectPort ci)
if
| port == 80 || port == 443 -> TE.encodeUtf8 host
| otherwise ->
TE.encodeUtf8 $
T.concat [host, ":", Lib.Prelude.show port]
where
port = connectPort ci
host = connectHost ci
-- | Default Google Compute Storage ConnectInfo. Works only for -- | Default Google Compute Storage ConnectInfo. Works only for
-- "Simple Migration" use-case with interoperability mode enabled on -- "Simple Migration" use-case with interoperability mode enabled on
@ -250,7 +277,7 @@ awsCI = "https://s3.amazonaws.com"
-- ConnectInfo. Credentials are already filled in. -- ConnectInfo. Credentials are already filled in.
minioPlayCI :: ConnectInfo minioPlayCI :: ConnectInfo
minioPlayCI = minioPlayCI =
let playCreds = Credentials "Q3AM3UQ867SPQQA43P2F" "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" let playCreds = CredentialValue "Q3AM3UQ867SPQQA43P2F" "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" Nothing
in setCreds playCreds $ in setCreds playCreds $
setRegion setRegion
"us-east-1" "us-east-1"
@ -273,16 +300,16 @@ type ETag = Text
-- | Data type to represent an object encryption key. Create one using -- | Data type to represent an object encryption key. Create one using
-- the `mkSSECKey` function. -- the `mkSSECKey` function.
newtype SSECKey = SSECKey BA.ScrubbedBytes newtype SSECKey = SSECKey BA.ScrubbedBytes
deriving (Eq, Show) deriving stock (Eq, Show)
-- | Validates that the given ByteString is 32 bytes long and creates -- | Validates that the given ByteString is 32 bytes long and creates
-- an encryption key. -- an encryption key.
mkSSECKey :: MonadThrow m => ByteString -> m SSECKey mkSSECKey :: (MonadThrow m) => ByteString -> m SSECKey
mkSSECKey keyBytes mkSSECKey keyBytes
| B.length keyBytes /= 32 = | B.length keyBytes /= 32 =
throwM MErrVInvalidEncryptionKeyLength throwM MErrVInvalidEncryptionKeyLength
| otherwise = | otherwise =
return $ SSECKey $ BA.convert keyBytes return $ SSECKey $ BA.convert keyBytes
-- | Data type to represent Server-Side-Encryption settings -- | Data type to represent Server-Side-Encryption settings
data SSE where data SSE where
@ -294,7 +321,7 @@ data SSE where
-- argument is the optional KMS context that must have a -- argument is the optional KMS context that must have a
-- `A.ToJSON` instance - please refer to the AWS S3 documentation -- `A.ToJSON` instance - please refer to the AWS S3 documentation
-- for detailed information. -- for detailed information.
SSEKMS :: A.ToJSON a => Maybe ByteString -> Maybe a -> SSE SSEKMS :: (A.ToJSON a) => Maybe ByteString -> Maybe a -> SSE
-- | Specifies server-side encryption with customer provided -- | Specifies server-side encryption with customer provided
-- key. The argument is the encryption key to be used. -- key. The argument is the encryption key to be used.
SSEC :: SSECKey -> SSE SSEC :: SSECKey -> SSE
@ -352,28 +379,10 @@ data PutObjectOptions = PutObjectOptions
defaultPutObjectOptions :: PutObjectOptions defaultPutObjectOptions :: PutObjectOptions
defaultPutObjectOptions = PutObjectOptions Nothing Nothing Nothing Nothing Nothing Nothing [] Nothing Nothing defaultPutObjectOptions = PutObjectOptions Nothing Nothing Nothing Nothing Nothing Nothing [] Nothing Nothing
-- | If the given header name has the @X-Amz-Meta-@ prefix, it is
-- stripped and a Just is returned.
userMetadataHeaderNameMaybe :: Text -> Maybe Text
userMetadataHeaderNameMaybe k =
let prefix = T.toCaseFold "X-Amz-Meta-"
n = T.length prefix
in if T.toCaseFold (T.take n k) == prefix
then Just (T.drop n k)
else Nothing
addXAmzMetaPrefix :: Text -> Text
addXAmzMetaPrefix s
| isJust (userMetadataHeaderNameMaybe s) = s
| otherwise = "X-Amz-Meta-" <> s
mkHeaderFromMetadata :: [(Text, Text)] -> [HT.Header]
mkHeaderFromMetadata = map (\(x, y) -> (mk $ encodeUtf8 $ addXAmzMetaPrefix $ x, encodeUtf8 y))
pooToHeaders :: PutObjectOptions -> [HT.Header] pooToHeaders :: PutObjectOptions -> [HT.Header]
pooToHeaders poo = pooToHeaders poo =
userMetadata userMetadata
++ (catMaybes $ map tupToMaybe (zipWith (,) names values)) ++ mapMaybe tupToMaybe (zip names values)
++ maybe [] toPutObjectHeaders (pooSSE poo) ++ maybe [] toPutObjectHeaders (pooSSE poo)
where where
tupToMaybe (k, Just v) = Just (k, v) tupToMaybe (k, Just v) = Just (k, v)
@ -404,11 +413,34 @@ data BucketInfo = BucketInfo
{ biName :: Bucket, { biName :: Bucket,
biCreationDate :: UTCTime biCreationDate :: UTCTime
} }
deriving (Show, Eq) deriving stock (Show, Eq)
-- | A type alias to represent a part-number for multipart upload -- | A type alias to represent a part-number for multipart upload
type PartNumber = Int16 type PartNumber = Int16
-- | Select part sizes - the logic is that the minimum part-size will
-- be 64MiB.
selectPartSizes :: Int64 -> [(PartNumber, Int64, Int64)]
selectPartSizes size =
uncurry (List.zip3 [1 ..]) $
List.unzip $
loop 0 size
where
ceil :: Double -> Int64
ceil = ceiling
partSize =
max
minPartSize
( ceil $
fromIntegral size
/ fromIntegral maxMultipartParts
)
m = partSize
loop st sz
| st > sz = []
| st + m >= sz = [(st, sz - st)]
| otherwise = (st, m) : loop (st + m) sz
-- | A type alias to represent an upload-id for multipart upload -- | A type alias to represent an upload-id for multipart upload
type UploadId = Text type UploadId = Text
@ -422,7 +454,7 @@ data ListPartsResult = ListPartsResult
lprNextPart :: Maybe Int, lprNextPart :: Maybe Int,
lprParts :: [ObjectPartInfo] lprParts :: [ObjectPartInfo]
} }
deriving (Show, Eq) deriving stock (Show, Eq)
-- | Represents information about an object part in an ongoing -- | Represents information about an object part in an ongoing
-- multipart upload. -- multipart upload.
@ -432,7 +464,7 @@ data ObjectPartInfo = ObjectPartInfo
opiSize :: Int64, opiSize :: Int64,
opiModTime :: UTCTime opiModTime :: UTCTime
} }
deriving (Show, Eq) deriving stock (Show, Eq)
-- | Represents result from a listing of incomplete uploads to a -- | Represents result from a listing of incomplete uploads to a
-- bucket. -- bucket.
@ -443,7 +475,7 @@ data ListUploadsResult = ListUploadsResult
lurUploads :: [(Object, UploadId, UTCTime)], lurUploads :: [(Object, UploadId, UTCTime)],
lurCPrefixes :: [Text] lurCPrefixes :: [Text]
} }
deriving (Show, Eq) deriving stock (Show, Eq)
-- | Represents information about a multipart upload. -- | Represents information about a multipart upload.
data UploadInfo = UploadInfo data UploadInfo = UploadInfo
@ -452,7 +484,7 @@ data UploadInfo = UploadInfo
uiInitTime :: UTCTime, uiInitTime :: UTCTime,
uiSize :: Int64 uiSize :: Int64
} }
deriving (Show, Eq) deriving stock (Show, Eq)
-- | Represents result from a listing of objects in a bucket. -- | Represents result from a listing of objects in a bucket.
data ListObjectsResult = ListObjectsResult data ListObjectsResult = ListObjectsResult
@ -461,7 +493,7 @@ data ListObjectsResult = ListObjectsResult
lorObjects :: [ObjectInfo], lorObjects :: [ObjectInfo],
lorCPrefixes :: [Text] lorCPrefixes :: [Text]
} }
deriving (Show, Eq) deriving stock (Show, Eq)
-- | Represents result from a listing of objects version 1 in a bucket. -- | Represents result from a listing of objects version 1 in a bucket.
data ListObjectsV1Result = ListObjectsV1Result data ListObjectsV1Result = ListObjectsV1Result
@ -470,7 +502,7 @@ data ListObjectsV1Result = ListObjectsV1Result
lorObjects' :: [ObjectInfo], lorObjects' :: [ObjectInfo],
lorCPrefixes' :: [Text] lorCPrefixes' :: [Text]
} }
deriving (Show, Eq) deriving stock (Show, Eq)
-- | Represents information about an object. -- | Represents information about an object.
data ObjectInfo = ObjectInfo data ObjectInfo = ObjectInfo
@ -494,7 +526,7 @@ data ObjectInfo = ObjectInfo
-- user-metadata pairs) -- user-metadata pairs)
oiMetadata :: H.HashMap Text Text oiMetadata :: H.HashMap Text Text
} }
deriving (Show, Eq) deriving stock (Show, Eq)
-- | Represents source object in server-side copy object -- | Represents source object in server-side copy object
data SourceInfo = SourceInfo data SourceInfo = SourceInfo
@ -526,7 +558,7 @@ data SourceInfo = SourceInfo
-- given time. -- given time.
srcIfUnmodifiedSince :: Maybe UTCTime srcIfUnmodifiedSince :: Maybe UTCTime
} }
deriving (Show, Eq) deriving stock (Show, Eq)
-- | Provide a default for `SourceInfo` -- | Provide a default for `SourceInfo`
defaultSourceInfo :: SourceInfo defaultSourceInfo :: SourceInfo
@ -539,7 +571,7 @@ data DestinationInfo = DestinationInfo
-- | Destination object key -- | Destination object key
dstObject :: Text dstObject :: Text
} }
deriving (Show, Eq) deriving stock (Show, Eq)
-- | Provide a default for `DestinationInfo` -- | Provide a default for `DestinationInfo`
defaultDestinationInfo :: DestinationInfo defaultDestinationInfo :: DestinationInfo
@ -573,7 +605,8 @@ defaultGetObjectOptions =
gooToHeaders :: GetObjectOptions -> [HT.Header] gooToHeaders :: GetObjectOptions -> [HT.Header]
gooToHeaders goo = gooToHeaders goo =
rangeHdr ++ zip names values rangeHdr
++ zip names values
++ maybe [] (toPutObjectHeaders . SSEC) (gooSSECKey goo) ++ maybe [] (toPutObjectHeaders . SSEC) (gooSSECKey goo)
where where
names = names =
@ -616,18 +649,18 @@ data Event
| ObjectRemovedDelete | ObjectRemovedDelete
| ObjectRemovedDeleteMarkerCreated | ObjectRemovedDeleteMarkerCreated
| ReducedRedundancyLostObject | ReducedRedundancyLostObject
deriving (Eq) deriving stock (Eq, Show)
instance Show Event where instance ToText Event where
show ObjectCreated = "s3:ObjectCreated:*" toText ObjectCreated = "s3:ObjectCreated:*"
show ObjectCreatedPut = "s3:ObjectCreated:Put" toText ObjectCreatedPut = "s3:ObjectCreated:Put"
show ObjectCreatedPost = "s3:ObjectCreated:Post" toText ObjectCreatedPost = "s3:ObjectCreated:Post"
show ObjectCreatedCopy = "s3:ObjectCreated:Copy" toText ObjectCreatedCopy = "s3:ObjectCreated:Copy"
show ObjectCreatedMultipartUpload = "s3:ObjectCreated:MultipartUpload" toText ObjectCreatedMultipartUpload = "s3:ObjectCreated:MultipartUpload"
show ObjectRemoved = "s3:ObjectRemoved:*" toText ObjectRemoved = "s3:ObjectRemoved:*"
show ObjectRemovedDelete = "s3:ObjectRemoved:Delete" toText ObjectRemovedDelete = "s3:ObjectRemoved:Delete"
show ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" toText ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated"
show ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" toText ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject"
textToEvent :: Text -> Maybe Event textToEvent :: Text -> Maybe Event
textToEvent t = case t of textToEvent t = case t of
@ -643,10 +676,10 @@ textToEvent t = case t of
_ -> Nothing _ -> Nothing
-- | Filter data type - part of notification configuration -- | Filter data type - part of notification configuration
data Filter = Filter newtype Filter = Filter
{ fFilter :: FilterKey { fFilter :: FilterKey
} }
deriving (Show, Eq) deriving stock (Show, Eq)
-- | defaultFilter is empty, used to create a notification -- | defaultFilter is empty, used to create a notification
-- configuration. -- configuration.
@ -654,10 +687,10 @@ defaultFilter :: Filter
defaultFilter = Filter defaultFilterKey defaultFilter = Filter defaultFilterKey
-- | FilterKey contains FilterRules, and is part of a Filter. -- | FilterKey contains FilterRules, and is part of a Filter.
data FilterKey = FilterKey newtype FilterKey = FilterKey
{ fkKey :: FilterRules { fkKey :: FilterRules
} }
deriving (Show, Eq) deriving stock (Show, Eq)
-- | defaultFilterKey is empty, used to create notification -- | defaultFilterKey is empty, used to create notification
-- configuration. -- configuration.
@ -665,10 +698,10 @@ defaultFilterKey :: FilterKey
defaultFilterKey = FilterKey defaultFilterRules defaultFilterKey = FilterKey defaultFilterRules
-- | FilterRules represents a collection of `FilterRule`s. -- | FilterRules represents a collection of `FilterRule`s.
data FilterRules = FilterRules newtype FilterRules = FilterRules
{ frFilterRules :: [FilterRule] { frFilterRules :: [FilterRule]
} }
deriving (Show, Eq) deriving stock (Show, Eq)
-- | defaultFilterRules is empty, used to create notification -- | defaultFilterRules is empty, used to create notification
-- configuration. -- configuration.
@ -688,7 +721,7 @@ data FilterRule = FilterRule
{ frName :: Text, { frName :: Text,
frValue :: Text frValue :: Text
} }
deriving (Show, Eq) deriving stock (Show, Eq)
-- | Arn is an alias of Text -- | Arn is an alias of Text
type Arn = Text type Arn = Text
@ -702,7 +735,7 @@ data NotificationConfig = NotificationConfig
ncEvents :: [Event], ncEvents :: [Event],
ncFilter :: Filter ncFilter :: Filter
} }
deriving (Show, Eq) deriving stock (Show, Eq)
-- | A data-type to represent bucket notification configuration. It is -- | A data-type to represent bucket notification configuration. It is
-- a collection of queue, topic or lambda function configurations. The -- a collection of queue, topic or lambda function configurations. The
@ -714,7 +747,7 @@ data Notification = Notification
nTopicConfigurations :: [NotificationConfig], nTopicConfigurations :: [NotificationConfig],
nCloudFunctionConfigurations :: [NotificationConfig] nCloudFunctionConfigurations :: [NotificationConfig]
} }
deriving (Eq, Show) deriving stock (Show, Eq)
-- | The default notification configuration is empty. -- | The default notification configuration is empty.
defaultNotification :: Notification defaultNotification :: Notification
@ -733,10 +766,10 @@ data SelectRequest = SelectRequest
srOutputSerialization :: OutputSerialization, srOutputSerialization :: OutputSerialization,
srRequestProgressEnabled :: Maybe Bool srRequestProgressEnabled :: Maybe Bool
} }
deriving (Eq, Show) deriving stock (Show, Eq)
data ExpressionType = SQL data ExpressionType = SQL
deriving (Eq, Show) deriving stock (Show, Eq)
-- | InputSerialization represents format information of the input -- | InputSerialization represents format information of the input
-- object being queried. Use one of the smart constructors such as -- object being queried. Use one of the smart constructors such as
@ -746,7 +779,7 @@ data InputSerialization = InputSerialization
{ isCompressionType :: Maybe CompressionType, { isCompressionType :: Maybe CompressionType,
isFormatInfo :: InputFormatInfo isFormatInfo :: InputFormatInfo
} }
deriving (Eq, Show) deriving stock (Show, Eq)
-- | Data type representing the compression setting in a Select -- | Data type representing the compression setting in a Select
-- request. -- request.
@ -754,7 +787,7 @@ data CompressionType
= CompressionTypeNone = CompressionTypeNone
| CompressionTypeGzip | CompressionTypeGzip
| CompressionTypeBzip2 | CompressionTypeBzip2
deriving (Eq, Show) deriving stock (Show, Eq)
-- | Data type representing input object format information in a -- | Data type representing input object format information in a
-- Select request. -- Select request.
@ -762,7 +795,7 @@ data InputFormatInfo
= InputFormatCSV CSVInputProp = InputFormatCSV CSVInputProp
| InputFormatJSON JSONInputProp | InputFormatJSON JSONInputProp
| InputFormatParquet | InputFormatParquet
deriving (Eq, Show) deriving stock (Show, Eq)
-- | defaultCsvInput returns InputSerialization with default CSV -- | defaultCsvInput returns InputSerialization with default CSV
-- format, and without any compression setting. -- format, and without any compression setting.
@ -841,20 +874,17 @@ type CSVInputProp = CSVProp
-- | CSVProp represents CSV format properties. It is built up using -- | CSVProp represents CSV format properties. It is built up using
-- the Monoid instance. -- the Monoid instance.
data CSVProp = CSVProp (H.HashMap Text Text) newtype CSVProp = CSVProp (H.HashMap Text Text)
deriving (Eq, Show) deriving stock (Show, Eq)
#if (__GLASGOW_HASKELL__ >= 804)
instance Semigroup CSVProp where instance Semigroup CSVProp where
(CSVProp a) <> (CSVProp b) = CSVProp (b <> a) (CSVProp a) <> (CSVProp b) = CSVProp (b <> a)
#endif
instance Monoid CSVProp where instance Monoid CSVProp where
mempty = CSVProp mempty mempty = CSVProp mempty
#if (__GLASGOW_HASKELL__ < 804) csvPropsList :: CSVProp -> [(Text, Text)]
mappend (CSVProp a) (CSVProp b) = CSVProp (b <> a) csvPropsList (CSVProp h) = sort $ H.toList h
#endif
defaultCSVProp :: CSVProp defaultCSVProp :: CSVProp
defaultCSVProp = mempty defaultCSVProp = mempty
@ -884,15 +914,15 @@ data FileHeaderInfo
FileHeaderUse FileHeaderUse
| -- | Header are present, but should be ignored | -- | Header are present, but should be ignored
FileHeaderIgnore FileHeaderIgnore
deriving (Eq, Show) deriving stock (Show, Eq)
-- | Specify the CSV file header info property. -- | Specify the CSV file header info property.
fileHeaderInfo :: FileHeaderInfo -> CSVProp fileHeaderInfo :: FileHeaderInfo -> CSVProp
fileHeaderInfo = CSVProp . H.singleton "FileHeaderInfo" . toString fileHeaderInfo = CSVProp . H.singleton "FileHeaderInfo" . toStr
where where
toString FileHeaderNone = "NONE" toStr FileHeaderNone = "NONE"
toString FileHeaderUse = "USE" toStr FileHeaderUse = "USE"
toString FileHeaderIgnore = "IGNORE" toStr FileHeaderIgnore = "IGNORE"
-- | Specify the CSV comment character property. Lines starting with -- | Specify the CSV comment character property. Lines starting with
-- this character are ignored by the server. -- this character are ignored by the server.
@ -909,13 +939,13 @@ setInputCSVProps p is = is {isFormatInfo = InputFormatCSV p}
-- | Set the CSV format properties in the OutputSerialization. -- | Set the CSV format properties in the OutputSerialization.
outputCSVFromProps :: CSVProp -> OutputSerialization outputCSVFromProps :: CSVProp -> OutputSerialization
outputCSVFromProps p = OutputSerializationCSV p outputCSVFromProps = OutputSerializationCSV
data JSONInputProp = JSONInputProp {jsonipType :: JSONType} newtype JSONInputProp = JSONInputProp {jsonipType :: JSONType}
deriving (Eq, Show) deriving stock (Show, Eq)
data JSONType = JSONTypeDocument | JSONTypeLines data JSONType = JSONTypeDocument | JSONTypeLines
deriving (Eq, Show) deriving stock (Show, Eq)
-- | OutputSerialization represents output serialization settings for -- | OutputSerialization represents output serialization settings for
-- the SelectRequest. Use `defaultCsvOutput` or `defaultJsonOutput` as -- the SelectRequest. Use `defaultCsvOutput` or `defaultJsonOutput` as
@ -923,23 +953,24 @@ data JSONType = JSONTypeDocument | JSONTypeLines
data OutputSerialization data OutputSerialization
= OutputSerializationJSON JSONOutputProp = OutputSerializationJSON JSONOutputProp
| OutputSerializationCSV CSVOutputProp | OutputSerializationCSV CSVOutputProp
deriving (Eq, Show) deriving stock (Show, Eq)
type CSVOutputProp = CSVProp type CSVOutputProp = CSVProp
-- | quoteFields is an output serialization parameter -- | quoteFields is an output serialization parameter
quoteFields :: QuoteFields -> CSVProp quoteFields :: QuoteFields -> CSVProp
quoteFields q = CSVProp $ H.singleton "QuoteFields" $ quoteFields q = CSVProp $
case q of H.singleton "QuoteFields" $
QuoteFieldsAsNeeded -> "ASNEEDED" case q of
QuoteFieldsAlways -> "ALWAYS" QuoteFieldsAsNeeded -> "ASNEEDED"
QuoteFieldsAlways -> "ALWAYS"
-- | Represent the QuoteField setting. -- | Represent the QuoteField setting.
data QuoteFields = QuoteFieldsAsNeeded | QuoteFieldsAlways data QuoteFields = QuoteFieldsAsNeeded | QuoteFieldsAlways
deriving (Eq, Show) deriving stock (Show, Eq)
data JSONOutputProp = JSONOutputProp {jsonopRecordDelimiter :: Maybe Text} newtype JSONOutputProp = JSONOutputProp {jsonopRecordDelimiter :: Maybe Text}
deriving (Eq, Show) deriving stock (Show, Eq)
-- | Set the output record delimiter for JSON format -- | Set the output record delimiter for JSON format
outputJSONFromRecordDelimiter :: Text -> OutputSerialization outputJSONFromRecordDelimiter :: Text -> OutputSerialization
@ -950,14 +981,15 @@ outputJSONFromRecordDelimiter t =
-- | An EventMessage represents each kind of message received from the server. -- | An EventMessage represents each kind of message received from the server.
data EventMessage data EventMessage
= ProgressEventMessage {emProgress :: Progress} = ProgressEventMessage Progress
| StatsEventMessage {emStats :: Stats} | StatsEventMessage Stats
| RequestLevelErrorMessage | RequestLevelErrorMessage
{ emErrorCode :: Text, Text
emErrorMessage :: Text -- ^ Error code
} Text
| RecordPayloadEventMessage {emPayloadBytes :: ByteString} -- ^ Error message
deriving (Eq, Show) | RecordPayloadEventMessage ByteString
deriving stock (Show, Eq)
data MsgHeaderName data MsgHeaderName
= MessageType = MessageType
@ -965,7 +997,7 @@ data MsgHeaderName
| ContentType | ContentType
| ErrorCode | ErrorCode
| ErrorMessage | ErrorMessage
deriving (Eq, Show) deriving stock (Show, Eq)
msgHeaderValueType :: Word8 msgHeaderValueType :: Word8
msgHeaderValueType = 7 msgHeaderValueType = 7
@ -978,7 +1010,7 @@ data Progress = Progress
pBytesProcessed :: Int64, pBytesProcessed :: Int64,
pBytesReturned :: Int64 pBytesReturned :: Int64
} }
deriving (Eq, Show) deriving stock (Show, Eq)
-- | Represent the stats event returned at the end of the Select -- | Represent the stats event returned at the end of the Select
-- response. -- response.
@ -1016,7 +1048,8 @@ data S3ReqInfo = S3ReqInfo
riPayload :: Payload, riPayload :: Payload,
riPayloadHash :: Maybe ByteString, riPayloadHash :: Maybe ByteString,
riRegion :: Maybe Region, riRegion :: Maybe Region,
riNeedsLocation :: Bool riNeedsLocation :: Bool,
riPresignExpirySecs :: Maybe UrlExpiry
} }
defaultS3ReqInfo :: S3ReqInfo defaultS3ReqInfo :: S3ReqInfo
@ -1031,16 +1064,13 @@ defaultS3ReqInfo =
Nothing Nothing
Nothing Nothing
True True
Nothing
getS3Path :: Maybe Bucket -> Maybe Object -> ByteString getS3Path :: Maybe Bucket -> Maybe Object -> ByteString
getS3Path b o = getS3Path b o =
let segments = map TE.encodeUtf8 $ catMaybes $ b : bool [] [o] (isJust b) let segments = map TE.encodeUtf8 $ catMaybes $ b : bool [] [o] (isJust b)
in B.concat ["/", B.intercalate "/" segments] in B.concat ["/", B.intercalate "/" segments]
-- | Time to expire for a presigned URL. It interpreted as a number of
-- seconds. The maximum duration that can be specified is 7 days.
type UrlExpiry = Int
type RegionMap = H.HashMap Bucket Region type RegionMap = H.HashMap Bucket Region
-- | The Minio Monad - all computations accessing object storage -- | The Minio Monad - all computations accessing object storage
@ -1048,7 +1078,7 @@ type RegionMap = H.HashMap Bucket Region
newtype Minio a = Minio newtype Minio a = Minio
{ unMinio :: ReaderT MinioConn (ResourceT IO) a { unMinio :: ReaderT MinioConn (ResourceT IO) a
} }
deriving deriving newtype
( Functor, ( Functor,
Applicative, Applicative,
Monad, Monad,
@ -1074,11 +1104,10 @@ class HasSvcNamespace env where
instance HasSvcNamespace MinioConn where instance HasSvcNamespace MinioConn where
getSvcNamespace env = getSvcNamespace env =
let host = connectHost $ mcConnInfo env let host = connectHost $ mcConnInfo env
in if in ( if host == "storage.googleapis.com"
| host == "storage.googleapis.com" -> then "http://doc.s3.amazonaws.com/2006-03-01"
"http://doc.s3.amazonaws.com/2006-03-01" else "http://s3.amazonaws.com/doc/2006-03-01/"
| otherwise -> )
"http://s3.amazonaws.com/doc/2006-03-01/"
-- | Takes connection information and returns a connection object to -- | Takes connection information and returns a connection object to
-- be passed to 'runMinio'. The returned value can be kept in the -- be passed to 'runMinio'. The returned value can be kept in the
@ -1088,8 +1117,8 @@ connect :: ConnectInfo -> IO MinioConn
connect ci = do connect ci = do
let settings let settings
| connectIsSecure ci && connectDisableTLSCertValidation ci = | connectIsSecure ci && connectDisableTLSCertValidation ci =
let badTlsSettings = Conn.TLSSettingsSimple True False False let badTlsSettings = Conn.TLSSettingsSimple True False False
in TLS.mkManagerSettings badTlsSettings Nothing in TLS.mkManagerSettings badTlsSettings Nothing
| connectIsSecure ci = NC.tlsManagerSettings | connectIsSecure ci = NC.tlsManagerSettings
| otherwise = defaultManagerSettings | otherwise = defaultManagerSettings
mgr <- NC.newManager settings mgr <- NC.newManager settings
@ -1138,9 +1167,22 @@ runMinioRes ci m = do
conn <- liftIO $ connect ci conn <- liftIO $ connect ci
runMinioResWith conn m runMinioResWith conn m
s3Name :: Text -> Text -> Name
s3Name ns s = Name s (Just ns) Nothing
-- | Format as per RFC 1123. -- | Format as per RFC 1123.
formatRFC1123 :: UTCTime -> T.Text formatRFC1123 :: UTCTime -> T.Text
formatRFC1123 = T.pack . formatTime defaultTimeLocale "%a, %d %b %Y %X %Z" formatRFC1123 = T.pack . formatTime defaultTimeLocale "%a, %d %b %Y %X %Z"
lookupRegionCache :: Bucket -> Minio (Maybe Region)
lookupRegionCache b = do
rMVar <- asks mcRegionMap
rMap <- UM.readMVar rMVar
return $ H.lookup b rMap
addToRegionCache :: Bucket -> Region -> Minio ()
addToRegionCache b region = do
rMVar <- asks mcRegionMap
UM.modifyMVar_ rMVar $ return . H.insert b region
deleteFromRegionCache :: Bucket -> Minio ()
deleteFromRegionCache b = do
rMVar <- asks mcRegionMap
UM.modifyMVar_ rMVar $ return . H.delete b

View File

@ -25,9 +25,8 @@ import qualified Data.ByteString as B
import qualified Data.ByteString.Builder as BB import qualified Data.ByteString.Builder as BB
import qualified Data.ByteString.Char8 as BC8 import qualified Data.ByteString.Char8 as BC8
import qualified Data.ByteString.Lazy as LB import qualified Data.ByteString.Lazy as LB
import Data.Char (isAsciiLower, isAsciiUpper, isSpace, isDigit, toUpper) import Data.Char (isAsciiLower, isAsciiUpper, isDigit, isSpace, toUpper)
import qualified Data.Text as T import qualified Data.Text as T
import Lib.Prelude
import Numeric (showHex) import Numeric (showHex)
stripBS :: ByteString -> ByteString stripBS :: ByteString -> ByteString
@ -38,8 +37,10 @@ class UriEncodable s where
instance UriEncodable [Char] where instance UriEncodable [Char] where
uriEncode encodeSlash payload = uriEncode encodeSlash payload =
LB.toStrict $ BB.toLazyByteString $ mconcat $ LB.toStrict $
map (`uriEncodeChar` encodeSlash) payload BB.toLazyByteString $
mconcat $
map (`uriEncodeChar` encodeSlash) payload
instance UriEncodable ByteString where instance UriEncodable ByteString where
-- assumes that uriEncode is passed ASCII encoded strings. -- assumes that uriEncode is passed ASCII encoded strings.
@ -64,11 +65,11 @@ uriEncodeChar ch _
|| (ch == '-') || (ch == '-')
|| (ch == '.') || (ch == '.')
|| (ch == '~') = || (ch == '~') =
BB.char7 ch BB.char7 ch
| otherwise = mconcat $ map f $ B.unpack $ encodeUtf8 $ T.singleton ch | otherwise = mconcat $ map f $ B.unpack $ encodeUtf8 $ T.singleton ch
where where
f :: Word8 -> BB.Builder f :: Word8 -> BB.Builder
f n = BB.char7 '%' <> BB.string7 hexStr f n = BB.char7 '%' <> BB.string7 hexStr
where where
hexStr = map toUpper $ showHex q $ showHex r "" hexStr = map toUpper $ showHex q $ showHex r ""
(q, r) = divMod (fromIntegral n) (16 :: Word8) (q, r) = divMod n (16 :: Word8)

View File

@ -39,31 +39,30 @@ import Crypto.MAC.HMAC (HMAC, hmac)
import Data.ByteArray (ByteArrayAccess, convert) import Data.ByteArray (ByteArrayAccess, convert)
import Data.ByteArray.Encoding (Base (Base16, Base64), convertToBase) import Data.ByteArray.Encoding (Base (Base16, Base64), convertToBase)
import qualified Data.Conduit as C import qualified Data.Conduit as C
import Lib.Prelude
hashSHA256 :: ByteString -> ByteString hashSHA256 :: ByteString -> ByteString
hashSHA256 = digestToBase16 . hashWith SHA256 hashSHA256 = digestToBase16 . hashWith SHA256
hashSHA256FromSource :: Monad m => C.ConduitM () ByteString m () -> m ByteString hashSHA256FromSource :: (Monad m) => C.ConduitM () ByteString m () -> m ByteString
hashSHA256FromSource src = do hashSHA256FromSource src = do
digest <- C.connect src sinkSHA256Hash digest <- C.connect src sinkSHA256Hash
return $ digestToBase16 digest return $ digestToBase16 digest
where where
-- To help with type inference -- To help with type inference
sinkSHA256Hash :: Monad m => C.ConduitM ByteString Void m (Digest SHA256) sinkSHA256Hash :: (Monad m) => C.ConduitM ByteString Void m (Digest SHA256)
sinkSHA256Hash = sinkHash sinkSHA256Hash = sinkHash
-- Returns MD5 hash hex encoded. -- Returns MD5 hash hex encoded.
hashMD5 :: ByteString -> ByteString hashMD5 :: ByteString -> ByteString
hashMD5 = digestToBase16 . hashWith MD5 hashMD5 = digestToBase16 . hashWith MD5
hashMD5FromSource :: Monad m => C.ConduitM () ByteString m () -> m ByteString hashMD5FromSource :: (Monad m) => C.ConduitM () ByteString m () -> m ByteString
hashMD5FromSource src = do hashMD5FromSource src = do
digest <- C.connect src sinkMD5Hash digest <- C.connect src sinkMD5Hash
return $ digestToBase16 digest return $ digestToBase16 digest
where where
-- To help with type inference -- To help with type inference
sinkMD5Hash :: Monad m => C.ConduitM ByteString Void m (Digest MD5) sinkMD5Hash :: (Monad m) => C.ConduitM ByteString Void m (Digest MD5)
sinkMD5Hash = sinkHash sinkMD5Hash = sinkHash
hmacSHA256 :: ByteString -> ByteString -> HMAC SHA256 hmacSHA256 :: ByteString -> ByteString -> HMAC SHA256
@ -72,15 +71,15 @@ hmacSHA256 message key = hmac key message
hmacSHA256RawBS :: ByteString -> ByteString -> ByteString hmacSHA256RawBS :: ByteString -> ByteString -> ByteString
hmacSHA256RawBS message key = convert $ hmacSHA256 message key hmacSHA256RawBS message key = convert $ hmacSHA256 message key
digestToBS :: ByteArrayAccess a => a -> ByteString digestToBS :: (ByteArrayAccess a) => a -> ByteString
digestToBS = convert digestToBS = convert
digestToBase16 :: ByteArrayAccess a => a -> ByteString digestToBase16 :: (ByteArrayAccess a) => a -> ByteString
digestToBase16 = convertToBase Base16 digestToBase16 = convertToBase Base16
-- Returns MD5 hash base 64 encoded. -- Returns MD5 hash base 64 encoded.
hashMD5ToBase64 :: ByteArrayAccess a => a -> ByteString hashMD5ToBase64 :: (ByteArrayAccess a) => a -> ByteString
hashMD5ToBase64 = convertToBase Base64 . hashWith MD5 hashMD5ToBase64 = convertToBase Base64 . hashWith MD5
encodeToBase64 :: ByteArrayAccess a => a -> ByteString encodeToBase64 :: (ByteArrayAccess a) => a -> ByteString
encodeToBase64 = convertToBase Base64 encodeToBase64 = convertToBase Base64

View File

@ -21,13 +21,19 @@ module Network.Minio.Data.Time
awsDateFormatBS, awsDateFormatBS,
awsParseTime, awsParseTime,
iso8601TimeFormat, iso8601TimeFormat,
UrlExpiry,
) )
where where
import Data.ByteString.Char8 (pack) import Data.ByteString.Char8 (pack)
import qualified Data.Time as Time import qualified Data.Time as Time
import Data.Time.Format.ISO8601 (iso8601Show)
import Lib.Prelude import Lib.Prelude
-- | Time to expire for a presigned URL. It interpreted as a number of
-- seconds. The maximum duration that can be specified is 7 days.
type UrlExpiry = Int
awsTimeFormat :: UTCTime -> [Char] awsTimeFormat :: UTCTime -> [Char]
awsTimeFormat = Time.formatTime Time.defaultTimeLocale "%Y%m%dT%H%M%SZ" awsTimeFormat = Time.formatTime Time.defaultTimeLocale "%Y%m%dT%H%M%SZ"
@ -44,4 +50,4 @@ awsParseTime :: [Char] -> Maybe UTCTime
awsParseTime = Time.parseTimeM False Time.defaultTimeLocale "%Y%m%dT%H%M%SZ" awsParseTime = Time.parseTimeM False Time.defaultTimeLocale "%Y%m%dT%H%M%SZ"
iso8601TimeFormat :: UTCTime -> [Char] iso8601TimeFormat :: UTCTime -> [Char]
iso8601TimeFormat = Time.formatTime Time.defaultTimeLocale (Time.iso8601DateFormat $ Just "%T%QZ") iso8601TimeFormat = iso8601Show

View File

@ -1,5 +1,5 @@
-- --
-- MinIO Haskell SDK, (C) 2017-2019 MinIO, Inc. -- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
-- --
-- Licensed under the Apache License, Version 2.0 (the "License"); -- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License. -- you may not use this file except in compliance with the License.
@ -14,10 +14,15 @@
-- limitations under the License. -- limitations under the License.
-- --
module Network.Minio.Errors where module Network.Minio.Errors
( MErrV (..),
ServiceErr (..),
MinioErr (..),
toServiceErr,
)
where
import Control.Exception import Control.Exception (IOException)
import Lib.Prelude
import qualified Network.HTTP.Conduit as NC import qualified Network.HTTP.Conduit as NC
--------------------------------- ---------------------------------
@ -44,7 +49,8 @@ data MErrV
| MErrVInvalidEncryptionKeyLength | MErrVInvalidEncryptionKeyLength
| MErrVStreamingBodyUnexpectedEOF | MErrVStreamingBodyUnexpectedEOF
| MErrVUnexpectedPayload | MErrVUnexpectedPayload
deriving (Show, Eq) | MErrVSTSEndpointNotFound
deriving stock (Show, Eq)
instance Exception MErrV instance Exception MErrV
@ -57,7 +63,7 @@ data ServiceErr
| NoSuchKey | NoSuchKey
| SelectErr Text Text | SelectErr Text Text
| ServiceErr Text Text | ServiceErr Text Text
deriving (Show, Eq) deriving stock (Show, Eq)
instance Exception ServiceErr instance Exception ServiceErr
@ -75,7 +81,7 @@ data MinioErr
| MErrIO IOException | MErrIO IOException
| MErrService ServiceErr | MErrService ServiceErr
| MErrValidation MErrV | MErrValidation MErrV
deriving (Show) deriving stock (Show)
instance Eq MinioErr where instance Eq MinioErr where
MErrHTTP _ == MErrHTTP _ = True MErrHTTP _ == MErrHTTP _ = True

View File

@ -20,11 +20,11 @@ module Network.Minio.JsonParser
where where
import Data.Aeson import Data.Aeson
( (.:), ( FromJSON,
FromJSON,
eitherDecode, eitherDecode,
parseJSON, parseJSON,
withObject, withObject,
(.:),
) )
import qualified Data.Text as T import qualified Data.Text as T
import Lib.Prelude import Lib.Prelude
@ -34,7 +34,7 @@ data AdminErrJSON = AdminErrJSON
{ aeCode :: Text, { aeCode :: Text,
aeMessage :: Text aeMessage :: Text
} }
deriving (Eq, Show) deriving stock (Eq, Show)
instance FromJSON AdminErrJSON where instance FromJSON AdminErrJSON where
parseJSON = withObject "AdminErrJSON" $ \v -> parseJSON = withObject "AdminErrJSON" $ \v ->

View File

@ -19,16 +19,47 @@ module Network.Minio.ListOps where
import qualified Data.Conduit as C import qualified Data.Conduit as C
import qualified Data.Conduit.Combinators as CC import qualified Data.Conduit.Combinators as CC
import qualified Data.Conduit.List as CL import qualified Data.Conduit.List as CL
import Lib.Prelude
import Network.Minio.Data import Network.Minio.Data
( Bucket,
ListObjectsResult
( lorCPrefixes,
lorHasMore,
lorNextToken,
lorObjects
),
ListObjectsV1Result
( lorCPrefixes',
lorHasMore',
lorNextMarker,
lorObjects'
),
ListPartsResult (lprHasMore, lprNextPart, lprParts),
ListUploadsResult
( lurHasMore,
lurNextKey,
lurNextUpload,
lurUploads
),
Minio,
Object,
ObjectInfo,
ObjectPartInfo (opiSize),
UploadId,
UploadInfo (UploadInfo),
)
import Network.Minio.S3API import Network.Minio.S3API
( listIncompleteParts',
listIncompleteUploads',
listObjects',
listObjectsV1',
)
-- | Represents a list output item - either an object or an object -- | Represents a list output item - either an object or an object
-- prefix (i.e. a directory). -- prefix (i.e. a directory).
data ListItem data ListItem
= ListItemObject ObjectInfo = ListItemObject ObjectInfo
| ListItemPrefix Text | ListItemPrefix Text
deriving (Show, Eq) deriving stock (Show, Eq)
-- | @'listObjects' bucket prefix recurse@ lists objects in a bucket -- | @'listObjects' bucket prefix recurse@ lists objects in a bucket
-- similar to a file system tree traversal. -- similar to a file system tree traversal.
@ -51,10 +82,10 @@ listObjects bucket prefix recurse = loop Nothing
res <- lift $ listObjects' bucket prefix nextToken delimiter Nothing res <- lift $ listObjects' bucket prefix nextToken delimiter Nothing
CL.sourceList $ map ListItemObject $ lorObjects res CL.sourceList $ map ListItemObject $ lorObjects res
unless recurse unless recurse $
$ CL.sourceList CL.sourceList $
$ map ListItemPrefix map ListItemPrefix $
$ lorCPrefixes res lorCPrefixes res
when (lorHasMore res) $ when (lorHasMore res) $
loop (lorNextToken res) loop (lorNextToken res)
@ -73,10 +104,10 @@ listObjectsV1 bucket prefix recurse = loop Nothing
res <- lift $ listObjectsV1' bucket prefix nextMarker delimiter Nothing res <- lift $ listObjectsV1' bucket prefix nextMarker delimiter Nothing
CL.sourceList $ map ListItemObject $ lorObjects' res CL.sourceList $ map ListItemObject $ lorObjects' res
unless recurse unless recurse $
$ CL.sourceList CL.sourceList $
$ map ListItemPrefix map ListItemPrefix $
$ lorCPrefixes' res lorCPrefixes' res
when (lorHasMore' res) $ when (lorHasMore' res) $
loop (lorNextMarker res) loop (lorNextMarker res)
@ -104,19 +135,23 @@ listIncompleteUploads bucket prefix recurse = loop Nothing Nothing
nextUploadIdMarker nextUploadIdMarker
Nothing Nothing
aggrSizes <- lift $ forM (lurUploads res) $ \(uKey, uId, _) -> do aggrSizes <- lift $
partInfos <- forM (lurUploads res) $ \(uKey, uId, _) -> do
C.runConduit $ partInfos <-
listIncompleteParts bucket uKey uId C.runConduit $
C..| CC.sinkList listIncompleteParts bucket uKey uId
return $ foldl (\sizeSofar p -> opiSize p + sizeSofar) 0 partInfos C..| CC.sinkList
return $ foldl' (\sizeSofar p -> opiSize p + sizeSofar) 0 partInfos
CL.sourceList CL.sourceList $
$ map zipWith
( \((uKey, uId, uInitTime), size) -> ( curry
UploadInfo uKey uId uInitTime size ( \((uKey, uId, uInitTime), size) ->
UploadInfo uKey uId uInitTime size
)
) )
$ zip (lurUploads res) aggrSizes (lurUploads res)
aggrSizes
when (lurHasMore res) $ when (lurHasMore res) $
loop (lurNextKey res) (lurNextUpload res) loop (lurNextKey res) (lurNextUpload res)

View File

@ -1,5 +1,5 @@
-- --
-- MinIO Haskell SDK, (C) 2017 MinIO, Inc. -- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
-- --
-- Licensed under the Apache License, Version 2.0 (the "License"); -- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License. -- you may not use this file except in compliance with the License.
@ -13,6 +13,7 @@
-- See the License for the specific language governing permissions and -- See the License for the specific language governing permissions and
-- limitations under the License. -- limitations under the License.
-- --
{-# LANGUAGE CPP #-}
module Network.Minio.PresignedOperations module Network.Minio.PresignedOperations
( UrlExpiry, ( UrlExpiry,
@ -43,13 +44,21 @@ import qualified Data.Text as T
import qualified Data.Text.Encoding as TE import qualified Data.Text.Encoding as TE
import qualified Data.Time as Time import qualified Data.Time as Time
import Lib.Prelude import Lib.Prelude
import qualified Network.HTTP.Conduit as NC import qualified Network.HTTP.Client as NClient
import qualified Network.HTTP.Types as HT import qualified Network.HTTP.Types as HT
import Network.HTTP.Types.Header (hHost) import Network.Minio.API (buildRequest)
import Network.Minio.Credentials
import Network.Minio.Data import Network.Minio.Data
import Network.Minio.Data.Time import Network.Minio.Data.Time
import Network.Minio.Errors import Network.Minio.Errors
import Network.Minio.Sign.V4 import Network.Minio.Sign.V4
import Network.URI (uriToString)
{- ORMOLU_DISABLE -}
#if MIN_VERSION_aeson(2,0,0)
import qualified Data.Aeson.Key as A
#endif
{- ORMOLU_ENABLE -}
-- | Generate a presigned URL. This function allows for advanced usage -- | Generate a presigned URL. This function allows for advanced usage
-- - for simple cases prefer the `presigned*Url` functions. -- - for simple cases prefer the `presigned*Url` functions.
@ -69,46 +78,26 @@ makePresignedUrl ::
HT.RequestHeaders -> HT.RequestHeaders ->
Minio ByteString Minio ByteString
makePresignedUrl expiry method bucket object region extraQuery extraHeaders = do makePresignedUrl expiry method bucket object region extraQuery extraHeaders = do
when (expiry > 7 * 24 * 3600 || expiry < 0) when (expiry > 7 * 24 * 3600 || expiry < 0) $
$ throwIO throwIO $
$ MErrVInvalidUrlExpiry expiry MErrVInvalidUrlExpiry expiry
ci <- asks mcConnInfo let s3ri =
defaultS3ReqInfo
let hostHeader = (hHost, getHostAddr ci) { riPresignExpirySecs = Just expiry,
req = riMethod = method,
NC.defaultRequest riBucket = bucket,
{ NC.method = method, riObject = object,
NC.secure = connectIsSecure ci, riRegion = region,
NC.host = encodeUtf8 $ connectHost ci, riQueryParams = extraQuery,
NC.port = connectPort ci, riHeaders = extraHeaders
NC.path = getS3Path bucket object,
NC.requestHeaders = hostHeader : extraHeaders,
NC.queryString = HT.renderQuery True extraQuery
} }
ts <- liftIO Time.getCurrentTime
let sp = req <- buildRequest s3ri
SignParams let uri = NClient.getUri req
(connectAccessKey ci) uriString = uriToString identity uri ""
(connectSecretKey ci)
ts
region
(Just expiry)
Nothing
signPairs = signV4 sp req
qpToAdd = (fmap . fmap) Just signPairs
queryStr =
HT.renderQueryBuilder
True
((HT.parseQuery $ NC.queryString req) ++ qpToAdd)
scheme = byteString $ bool "http://" "https://" $ connectIsSecure ci
return $ toStrictBS $ toLazyByteString $ return $ encodeUtf8 uriString
scheme
<> byteString (getHostAddr ci)
<> byteString (getS3Path bucket object)
<> queryStr
-- | Generate a URL with authentication signature to PUT (upload) an -- | Generate a URL with authentication signature to PUT (upload) an
-- object. Any extra headers if passed, are signed, and so they are -- object. Any extra headers if passed, are signed, and so they are
@ -190,29 +179,39 @@ data PostPolicyCondition
= PPCStartsWith Text Text = PPCStartsWith Text Text
| PPCEquals Text Text | PPCEquals Text Text
| PPCRange Text Int64 Int64 | PPCRange Text Int64 Int64
deriving (Show, Eq) deriving stock (Show, Eq)
{- ORMOLU_DISABLE -}
instance Json.ToJSON PostPolicyCondition where instance Json.ToJSON PostPolicyCondition where
toJSON (PPCStartsWith k v) = Json.toJSON ["starts-with", k, v] toJSON (PPCStartsWith k v) = Json.toJSON ["starts-with", k, v]
#if MIN_VERSION_aeson(2,0,0)
toJSON (PPCEquals k v) = Json.object [(A.fromText k) .= v]
#else
toJSON (PPCEquals k v) = Json.object [k .= v] toJSON (PPCEquals k v) = Json.object [k .= v]
#endif
toJSON (PPCRange k minVal maxVal) = toJSON (PPCRange k minVal maxVal) =
Json.toJSON [Json.toJSON k, Json.toJSON minVal, Json.toJSON maxVal] Json.toJSON [Json.toJSON k, Json.toJSON minVal, Json.toJSON maxVal]
toEncoding (PPCStartsWith k v) = Json.foldable ["starts-with", k, v] toEncoding (PPCStartsWith k v) = Json.foldable ["starts-with", k, v]
#if MIN_VERSION_aeson(2,0,0)
toEncoding (PPCEquals k v) = Json.pairs ((A.fromText k) .= v)
#else
toEncoding (PPCEquals k v) = Json.pairs (k .= v) toEncoding (PPCEquals k v) = Json.pairs (k .= v)
#endif
toEncoding (PPCRange k minVal maxVal) = toEncoding (PPCRange k minVal maxVal) =
Json.foldable [Json.toJSON k, Json.toJSON minVal, Json.toJSON maxVal] Json.foldable [Json.toJSON k, Json.toJSON minVal, Json.toJSON maxVal]
{- ORMOLU_ENABLE -}
-- | A PostPolicy is required to perform uploads via browser forms. -- | A PostPolicy is required to perform uploads via browser forms.
data PostPolicy = PostPolicy data PostPolicy = PostPolicy
{ expiration :: UTCTime, { expiration :: UTCTime,
conditions :: [PostPolicyCondition] conditions :: [PostPolicyCondition]
} }
deriving (Show, Eq) deriving stock (Show, Eq)
instance Json.ToJSON PostPolicy where instance Json.ToJSON PostPolicy where
toJSON (PostPolicy e c) = toJSON (PostPolicy e c) =
Json.object $ Json.object
[ "expiration" .= iso8601TimeFormat e, [ "expiration" .= iso8601TimeFormat e,
"conditions" .= c "conditions" .= c
] ]
@ -225,7 +224,7 @@ data PostPolicyError
| PPEBucketNotSpecified | PPEBucketNotSpecified
| PPEConditionKeyEmpty | PPEConditionKeyEmpty
| PPERangeInvalid | PPERangeInvalid
deriving (Eq, Show) deriving stock (Show, Eq)
-- | Set the bucket name that the upload should use. -- | Set the bucket name that the upload should use.
ppCondBucket :: Bucket -> PostPolicyCondition ppCondBucket :: Bucket -> PostPolicyCondition
@ -266,19 +265,19 @@ newPostPolicy ::
newPostPolicy expirationTime conds newPostPolicy expirationTime conds
-- object name condition must be present -- object name condition must be present
| not $ any (keyEquals "key") conds = | not $ any (keyEquals "key") conds =
Left PPEKeyNotSpecified Left PPEKeyNotSpecified
-- bucket name condition must be present -- bucket name condition must be present
| not $ any (keyEquals "bucket") conds = | not $ any (keyEquals "bucket") conds =
Left PPEBucketNotSpecified Left PPEBucketNotSpecified
-- a condition with an empty key is invalid -- a condition with an empty key is invalid
| any (keyEquals "") conds || any isEmptyRangeKey conds = | any (keyEquals "") conds || any isEmptyRangeKey conds =
Left PPEConditionKeyEmpty Left PPEConditionKeyEmpty
-- invalid range check -- invalid range check
| any isInvalidRange conds = | any isInvalidRange conds =
Left PPERangeInvalid Left PPERangeInvalid
-- all good! -- all good!
| otherwise = | otherwise =
return $ PostPolicy expirationTime conds return $ PostPolicy expirationTime conds
where where
keyEquals k' (PPCStartsWith k _) = k == k' keyEquals k' (PPCStartsWith k _) = k == k'
keyEquals k' (PPCEquals k _) = k == k' keyEquals k' (PPCEquals k _) = k == k'
@ -300,50 +299,58 @@ presignedPostPolicy ::
Minio (ByteString, H.HashMap Text ByteString) Minio (ByteString, H.HashMap Text ByteString)
presignedPostPolicy p = do presignedPostPolicy p = do
ci <- asks mcConnInfo ci <- asks mcConnInfo
signTime <- liftIO $ Time.getCurrentTime signTime <- liftIO Time.getCurrentTime
mgr <- asks mcConnManager
cv <- liftIO $ getCredential (connectCreds ci) (getEndpoint ci) mgr
let extraConditions = let extraConditions signParams =
[ PPCEquals "x-amz-date" (toS $ awsTimeFormat signTime), [ PPCEquals "x-amz-date" (toText $ awsTimeFormat signTime),
PPCEquals "x-amz-algorithm" "AWS4-HMAC-SHA256", PPCEquals "x-amz-algorithm" "AWS4-HMAC-SHA256",
PPCEquals PPCEquals
"x-amz-credential" "x-amz-credential"
( T.intercalate ( T.intercalate
"/" "/"
[ connectAccessKey ci, [ coerce $ cvAccessKey cv,
decodeUtf8 $ mkScope signTime region decodeUtf8 $ credentialScope signParams
] ]
) )
] ]
ppWithCreds = ppWithCreds signParams =
p p
{ conditions = conditions p ++ extraConditions { conditions = conditions p ++ extraConditions signParams
} }
sp = sp =
SignParams SignParams
(connectAccessKey ci) (coerce $ cvAccessKey cv)
(connectSecretKey ci) (coerce $ cvSecretKey cv)
(coerce $ cvSessionToken cv)
ServiceS3
signTime signTime
(Just $ connectRegion ci) (Just $ connectRegion ci)
Nothing Nothing
Nothing Nothing
signData = signV4PostPolicy (showPostPolicy ppWithCreds) sp signData = signV4PostPolicy (showPostPolicy $ ppWithCreds sp) sp
-- compute form-data -- compute form-data
mkPair (PPCStartsWith k v) = Just (k, v) mkPair (PPCStartsWith k v) = Just (k, v)
mkPair (PPCEquals k v) = Just (k, v) mkPair (PPCEquals k v) = Just (k, v)
mkPair _ = Nothing mkPair _ = Nothing
formFromPolicy = formFromPolicy =
H.map TE.encodeUtf8 $ H.fromList $ catMaybes $ H.map TE.encodeUtf8 $
mkPair <$> conditions ppWithCreds H.fromList $
mapMaybe
mkPair
(conditions $ ppWithCreds sp)
formData = formFromPolicy `H.union` signData formData = formFromPolicy `H.union` signData
-- compute POST upload URL -- compute POST upload URL
bucket = H.lookupDefault "" "bucket" formData bucket = H.lookupDefault "" "bucket" formData
scheme = byteString $ bool "http://" "https://" $ connectIsSecure ci scheme = byteString $ bool "http://" "https://" $ connectIsSecure ci
region = connectRegion ci
url = url =
toStrictBS $ toLazyByteString $ toStrictBS $
scheme <> byteString (getHostAddr ci) toLazyByteString $
<> byteString "/" scheme
<> byteString bucket <> byteString (getHostAddr ci)
<> byteString "/" <> byteString "/"
<> byteString bucket
<> byteString "/"
return (url, formData) return (url, formData)

View File

@ -71,13 +71,13 @@ putObjectInternal b o opts (ODStream src sizeMay) = do
Just size -> Just size ->
if if
| size <= 64 * oneMiB -> do | size <= 64 * oneMiB -> do
bs <- C.runConduit $ src C..| takeC (fromIntegral size) C..| CB.sinkLbs bs <- C.runConduit $ src C..| takeC (fromIntegral size) C..| CB.sinkLbs
putObjectSingle' b o (pooToHeaders opts) $ LBS.toStrict bs putObjectSingle' b o (pooToHeaders opts) $ LBS.toStrict bs
| size > maxObjectSize -> throwIO $ MErrVPutSizeExceeded size | size > maxObjectSize -> throwIO $ MErrVPutSizeExceeded size
| otherwise -> sequentialMultipartUpload b o opts (Just size) src | otherwise -> sequentialMultipartUpload b o opts (Just size) src
putObjectInternal b o opts (ODFile fp sizeMay) = do putObjectInternal b o opts (ODFile fp sizeMay) = do
hResE <- withNewHandle fp $ \h -> hResE <- withNewHandle fp $ \h ->
liftM2 (,) (isHandleSeekable h) (getFileSize h) liftA2 (,) (isHandleSeekable h) (getFileSize h)
(isSeekable, handleSizeMay) <- (isSeekable, handleSizeMay) <-
either either
@ -95,13 +95,13 @@ putObjectInternal b o opts (ODFile fp sizeMay) = do
Just size -> Just size ->
if if
| size <= 64 * oneMiB -> | size <= 64 * oneMiB ->
either throwIO return either throwIO return
=<< withNewHandle fp (\h -> putObjectSingle b o (pooToHeaders opts) h 0 size) =<< withNewHandle fp (\h -> putObjectSingle b o (pooToHeaders opts) h 0 size)
| size > maxObjectSize -> throwIO $ MErrVPutSizeExceeded size | size > maxObjectSize -> throwIO $ MErrVPutSizeExceeded size
| isSeekable -> parallelMultipartUpload b o opts fp size | isSeekable -> parallelMultipartUpload b o opts fp size
| otherwise -> | otherwise ->
sequentialMultipartUpload b o opts (Just size) $ sequentialMultipartUpload b o opts (Just size) $
CB.sourceFile fp CB.sourceFile fp
parallelMultipartUpload :: parallelMultipartUpload ::
Bucket -> Bucket ->

View File

@ -1,5 +1,5 @@
-- --
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc. -- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
-- --
-- Licensed under the Apache License, Version 2.0 (the "License"); -- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License. -- you may not use this file except in compliance with the License.
@ -14,15 +14,25 @@
-- limitations under the License. -- limitations under the License.
-- --
-- |
-- Module: Network.Minio.S3API
-- Copyright: (c) 2017-2023 MinIO Dev Team
-- License: Apache 2.0
-- Maintainer: MinIO Dev Team <dev@min.io>
--
-- Lower-level API for S3 compatible object stores. Start with @Network.Minio@
-- and use this only if needed.
module Network.Minio.S3API module Network.Minio.S3API
( Region, ( Region,
getLocation, getLocation,
-- * Listing buckets -- * Listing buckets
-------------------- --------------------
getService, getService,
-- * Listing objects -- * Listing objects
-------------------- --------------------
ListObjectsResult (..), ListObjectsResult (..),
ListObjectsV1Result (..), ListObjectsV1Result (..),
@ -33,11 +43,13 @@ module Network.Minio.S3API
headBucket, headBucket,
-- * Retrieving objects -- * Retrieving objects
----------------------- -----------------------
getObject', getObject',
headObject, headObject,
-- * Creating buckets and objects -- * Creating buckets and objects
--------------------------------- ---------------------------------
putBucket, putBucket,
ETag, ETag,
@ -47,6 +59,7 @@ module Network.Minio.S3API
copyObjectSingle, copyObjectSingle,
-- * Multipart Upload APIs -- * Multipart Upload APIs
-------------------------- --------------------------
UploadId, UploadId,
PartTuple, PartTuple,
@ -63,11 +76,13 @@ module Network.Minio.S3API
listIncompleteParts', listIncompleteParts',
-- * Deletion APIs -- * Deletion APIs
-------------------------- --------------------------
deleteBucket, deleteBucket,
deleteObject, deleteObject,
-- * Presigned Operations -- * Presigned Operations
----------------------------- -----------------------------
module Network.Minio.PresignedOperations, module Network.Minio.PresignedOperations,
@ -76,6 +91,7 @@ module Network.Minio.S3API
setBucketPolicy, setBucketPolicy,
-- * Bucket Notifications -- * Bucket Notifications
------------------------- -------------------------
Notification (..), Notification (..),
NotificationConfig (..), NotificationConfig (..),
@ -124,7 +140,8 @@ parseGetObjectHeaders object headers =
let metadataPairs = getMetadata headers let metadataPairs = getMetadata headers
userMetadata = getUserMetadataMap metadataPairs userMetadata = getUserMetadataMap metadataPairs
metadata = getNonUserMetadataMap metadataPairs metadata = getNonUserMetadataMap metadataPairs
in ObjectInfo <$> Just object in ObjectInfo
<$> Just object
<*> getLastModifiedHeader headers <*> getLastModifiedHeader headers
<*> getETagHeader headers <*> getETagHeader headers
<*> getContentLength headers <*> getContentLength headers
@ -158,24 +175,26 @@ getObject' bucket object queryParams headers = do
{ riBucket = Just bucket, { riBucket = Just bucket,
riObject = Just object, riObject = Just object,
riQueryParams = queryParams, riQueryParams = queryParams,
riHeaders = headers riHeaders =
-- This header is required for safety as otherwise http-client, headers
-- sends Accept-Encoding: gzip, and the server may actually gzip -- This header is required for safety as otherwise http-client,
-- body. In that case Content-Length header will be missing. -- sends Accept-Encoding: gzip, and the server may actually gzip
<> [("Accept-Encoding", "identity")] -- body. In that case Content-Length header will be missing.
<> [("Accept-Encoding", "identity")]
} }
-- | Creates a bucket via a PUT bucket call. -- | Creates a bucket via a PUT bucket call.
putBucket :: Bucket -> Region -> Minio () putBucket :: Bucket -> Region -> Minio ()
putBucket bucket location = do putBucket bucket location = do
ns <- asks getSvcNamespace ns <- asks getSvcNamespace
void $ executeRequest $ void $
defaultS3ReqInfo executeRequest $
{ riMethod = HT.methodPut, defaultS3ReqInfo
riBucket = Just bucket, { riMethod = HT.methodPut,
riPayload = PayloadBS $ mkCreateBucketConfig ns location, riBucket = Just bucket,
riNeedsLocation = False riPayload = PayloadBS $ mkCreateBucketConfig ns location,
} riNeedsLocation = False
}
-- | Single PUT object size. -- | Single PUT object size.
maxSinglePutObjectSizeBytes :: Int64 maxSinglePutObjectSizeBytes :: Int64
@ -189,9 +208,9 @@ putObjectSingle' :: Bucket -> Object -> [HT.Header] -> ByteString -> Minio ETag
putObjectSingle' bucket object headers bs = do putObjectSingle' bucket object headers bs = do
let size = fromIntegral (BS.length bs) let size = fromIntegral (BS.length bs)
-- check length is within single PUT object size. -- check length is within single PUT object size.
when (size > maxSinglePutObjectSizeBytes) when (size > maxSinglePutObjectSizeBytes) $
$ throwIO throwIO $
$ MErrVSinglePUTSizeExceeded size MErrVSinglePUTSizeExceeded size
let payload = mkStreamingPayload $ PayloadBS bs let payload = mkStreamingPayload $ PayloadBS bs
resp <- resp <-
@ -223,9 +242,9 @@ putObjectSingle ::
Minio ETag Minio ETag
putObjectSingle bucket object headers h offset size = do putObjectSingle bucket object headers h offset size = do
-- check length is within single PUT object size. -- check length is within single PUT object size.
when (size > maxSinglePutObjectSizeBytes) when (size > maxSinglePutObjectSizeBytes) $
$ throwIO throwIO $
$ MErrVSinglePUTSizeExceeded size MErrVSinglePUTSizeExceeded size
-- content-length header is automatically set by library. -- content-length header is automatically set by library.
let payload = mkStreamingPayload $ PayloadH h offset size let payload = mkStreamingPayload $ PayloadH h offset size
@ -302,23 +321,23 @@ listObjects' bucket prefix nextToken delimiter maxKeys = do
-- | DELETE a bucket from the service. -- | DELETE a bucket from the service.
deleteBucket :: Bucket -> Minio () deleteBucket :: Bucket -> Minio ()
deleteBucket bucket = deleteBucket bucket =
void void $
$ executeRequest executeRequest $
$ defaultS3ReqInfo defaultS3ReqInfo
{ riMethod = HT.methodDelete, { riMethod = HT.methodDelete,
riBucket = Just bucket riBucket = Just bucket
} }
-- | DELETE an object from the service. -- | DELETE an object from the service.
deleteObject :: Bucket -> Object -> Minio () deleteObject :: Bucket -> Object -> Minio ()
deleteObject bucket object = deleteObject bucket object =
void void $
$ executeRequest executeRequest $
$ defaultS3ReqInfo defaultS3ReqInfo
{ riMethod = HT.methodDelete, { riMethod = HT.methodDelete,
riBucket = Just bucket, riBucket = Just bucket,
riObject = Just object riObject = Just object
} }
-- | Create a new multipart upload. -- | Create a new multipart upload.
newMultipartUpload :: Bucket -> Object -> [HT.Header] -> Minio UploadId newMultipartUpload :: Bucket -> Object -> [HT.Header] -> Minio UploadId
@ -397,8 +416,7 @@ srcInfoToHeaders srcInfo =
fmap formatRFC1123 . srcIfModifiedSince fmap formatRFC1123 . srcIfModifiedSince
] ]
rangeHdr = rangeHdr =
maybe [] (\a -> [("x-amz-copy-source-range", HT.renderByteRanges [a])]) $ maybe [] ((\a -> [("x-amz-copy-source-range", HT.renderByteRanges [a])]) . toByteRange) (srcRange srcInfo)
toByteRange <$> srcRange srcInfo
toByteRange :: (Int64, Int64) -> HT.ByteRange toByteRange :: (Int64, Int64) -> HT.ByteRange
toByteRange (x, y) = HT.ByteRangeFromTo (fromIntegral x) (fromIntegral y) toByteRange (x, y) = HT.ByteRangeFromTo (fromIntegral x) (fromIntegral y)
@ -478,14 +496,14 @@ completeMultipartUpload bucket object uploadId partTuple = do
-- | Abort a multipart upload. -- | Abort a multipart upload.
abortMultipartUpload :: Bucket -> Object -> UploadId -> Minio () abortMultipartUpload :: Bucket -> Object -> UploadId -> Minio ()
abortMultipartUpload bucket object uploadId = abortMultipartUpload bucket object uploadId =
void void $
$ executeRequest executeRequest $
$ defaultS3ReqInfo defaultS3ReqInfo
{ riMethod = HT.methodDelete, { riMethod = HT.methodDelete,
riBucket = Just bucket, riBucket = Just bucket,
riObject = Just object, riObject = Just object,
riQueryParams = mkOptionalParams params riQueryParams = mkOptionalParams params
} }
where where
params = [("uploadId", Just uploadId)] params = [("uploadId", Just uploadId)]
@ -554,15 +572,16 @@ headObject bucket object reqHeaders = do
{ riMethod = HT.methodHead, { riMethod = HT.methodHead,
riBucket = Just bucket, riBucket = Just bucket,
riObject = Just object, riObject = Just object,
riHeaders = reqHeaders riHeaders =
-- This header is required for safety as otherwise http-client, reqHeaders
-- sends Accept-Encoding: gzip, and the server may actually gzip -- This header is required for safety as otherwise http-client,
-- body. In that case Content-Length header will be missing. -- sends Accept-Encoding: gzip, and the server may actually gzip
<> [("Accept-Encoding", "identity")] -- body. In that case Content-Length header will be missing.
<> [("Accept-Encoding", "identity")]
} }
maybe (throwIO MErrVInvalidObjectInfoResponse) return maybe (throwIO MErrVInvalidObjectInfoResponse) return $
$ parseGetObjectHeaders object parseGetObjectHeaders object $
$ NC.responseHeaders resp NC.responseHeaders resp
-- | Query the object store if a given bucket exists. -- | Query the object store if a given bucket exists.
headBucket :: Bucket -> Minio Bool headBucket :: Bucket -> Minio Bool
@ -595,15 +614,16 @@ headBucket bucket =
putBucketNotification :: Bucket -> Notification -> Minio () putBucketNotification :: Bucket -> Notification -> Minio ()
putBucketNotification bucket ncfg = do putBucketNotification bucket ncfg = do
ns <- asks getSvcNamespace ns <- asks getSvcNamespace
void $ executeRequest $ void $
defaultS3ReqInfo executeRequest $
{ riMethod = HT.methodPut, defaultS3ReqInfo
riBucket = Just bucket, { riMethod = HT.methodPut,
riQueryParams = [("notification", Nothing)], riBucket = Just bucket,
riPayload = riQueryParams = [("notification", Nothing)],
PayloadBS $ riPayload =
mkPutNotificationRequest ns ncfg PayloadBS $
} mkPutNotificationRequest ns ncfg
}
-- | Retrieve the notification configuration on a bucket. -- | Retrieve the notification configuration on a bucket.
getBucketNotification :: Bucket -> Minio Notification getBucketNotification :: Bucket -> Minio Notification
@ -645,20 +665,22 @@ setBucketPolicy bucket policy = do
-- | Save a new policy on a bucket. -- | Save a new policy on a bucket.
putBucketPolicy :: Bucket -> Text -> Minio () putBucketPolicy :: Bucket -> Text -> Minio ()
putBucketPolicy bucket policy = do putBucketPolicy bucket policy = do
void $ executeRequest $ void $
defaultS3ReqInfo executeRequest $
{ riMethod = HT.methodPut, defaultS3ReqInfo
riBucket = Just bucket, { riMethod = HT.methodPut,
riQueryParams = [("policy", Nothing)], riBucket = Just bucket,
riPayload = PayloadBS $ encodeUtf8 policy riQueryParams = [("policy", Nothing)],
} riPayload = PayloadBS $ encodeUtf8 policy
}
-- | Delete any policy set on a bucket. -- | Delete any policy set on a bucket.
deleteBucketPolicy :: Bucket -> Minio () deleteBucketPolicy :: Bucket -> Minio ()
deleteBucketPolicy bucket = do deleteBucketPolicy bucket = do
void $ executeRequest $ void $
defaultS3ReqInfo executeRequest $
{ riMethod = HT.methodDelete, defaultS3ReqInfo
riBucket = Just bucket, { riMethod = HT.methodDelete,
riQueryParams = [("policy", Nothing)] riBucket = Just bucket,
} riQueryParams = [("policy", Nothing)]
}

View File

@ -111,7 +111,7 @@ data EventStreamException
| ESEInvalidHeaderType | ESEInvalidHeaderType
| ESEInvalidHeaderValueType | ESEInvalidHeaderValueType
| ESEInvalidMessageType | ESEInvalidMessageType
deriving (Eq, Show) deriving stock (Eq, Show)
instance Exception EventStreamException instance Exception EventStreamException
@ -119,7 +119,7 @@ instance Exception EventStreamException
chunkSize :: Int chunkSize :: Int
chunkSize = 32 * 1024 chunkSize = 32 * 1024
parseBinary :: Bin.Binary a => ByteString -> IO a parseBinary :: (Bin.Binary a) => ByteString -> IO a
parseBinary b = do parseBinary b = do
case Bin.decodeOrFail $ LB.fromStrict b of case Bin.decodeOrFail $ LB.fromStrict b of
Left (_, _, msg) -> throwIO $ ESEDecodeFail msg Left (_, _, msg) -> throwIO $ ESEDecodeFail msg
@ -135,7 +135,7 @@ bytesToHeaderName t = case t of
_ -> throwIO ESEInvalidHeaderType _ -> throwIO ESEInvalidHeaderType
parseHeaders :: parseHeaders ::
MonadUnliftIO m => (MonadUnliftIO m) =>
Word32 -> Word32 ->
C.ConduitM ByteString a m [MessageHeader] C.ConduitM ByteString a m [MessageHeader]
parseHeaders 0 = return [] parseHeaders 0 = return []
@ -163,7 +163,7 @@ parseHeaders hdrLen = do
-- readNBytes returns N bytes read from the string and throws an -- readNBytes returns N bytes read from the string and throws an
-- exception if N bytes are not present on the stream. -- exception if N bytes are not present on the stream.
readNBytes :: MonadUnliftIO m => Int -> C.ConduitM ByteString a m ByteString readNBytes :: (MonadUnliftIO m) => Int -> C.ConduitM ByteString a m ByteString
readNBytes n = do readNBytes n = do
b <- LB.toStrict <$> (C.takeCE n .| C.sinkLazy) b <- LB.toStrict <$> (C.takeCE n .| C.sinkLazy)
if B.length b /= n if B.length b /= n
@ -171,7 +171,7 @@ readNBytes n = do
else return b else return b
crcCheck :: crcCheck ::
MonadUnliftIO m => (MonadUnliftIO m) =>
C.ConduitM ByteString ByteString m () C.ConduitM ByteString ByteString m ()
crcCheck = do crcCheck = do
b <- readNBytes 12 b <- readNBytes 12
@ -186,7 +186,7 @@ crcCheck = do
-- 12 bytes have been read off the current message. Now read the -- 12 bytes have been read off the current message. Now read the
-- next (n-12)-4 bytes and accumulate the checksum, and yield it. -- next (n-12)-4 bytes and accumulate the checksum, and yield it.
let startCrc = crc32 b let startCrc = crc32 b
finalCrc <- accumulateYield (fromIntegral n -16) startCrc finalCrc <- accumulateYield (fromIntegral n - 16) startCrc
bs <- readNBytes 4 bs <- readNBytes 4
expectedCrc :: Word32 <- liftIO $ parseBinary bs expectedCrc :: Word32 <- liftIO $ parseBinary bs
@ -208,7 +208,7 @@ crcCheck = do
then accumulateYield n' c' then accumulateYield n' c'
else return c' else return c'
handleMessage :: MonadUnliftIO m => C.ConduitT ByteString EventMessage m () handleMessage :: (MonadUnliftIO m) => C.ConduitT ByteString EventMessage m ()
handleMessage = do handleMessage = do
b1 <- readNBytes 4 b1 <- readNBytes 4
msgLen :: Word32 <- liftIO $ parseBinary b1 msgLen :: Word32 <- liftIO $ parseBinary b1
@ -219,7 +219,7 @@ handleMessage = do
hs <- parseHeaders hdrLen hs <- parseHeaders hdrLen
let payloadLen = msgLen - hdrLen - 16 let payloadLen = msgLen - hdrLen - 16
getHdrVal h = fmap snd . headMay . filter ((h ==) . fst) getHdrVal h = fmap snd . find ((h ==) . fst)
eventHdrValue = getHdrVal EventType hs eventHdrValue = getHdrVal EventType hs
msgHdrValue = getHdrVal MessageType hs msgHdrValue = getHdrVal MessageType hs
errCode = getHdrVal ErrorCode hs errCode = getHdrVal ErrorCode hs
@ -254,7 +254,7 @@ handleMessage = do
passThrough $ n - B.length b passThrough $ n - B.length b
selectProtoConduit :: selectProtoConduit ::
MonadUnliftIO m => (MonadUnliftIO m) =>
C.ConduitT ByteString EventMessage m () C.ConduitT ByteString EventMessage m ()
selectProtoConduit = crcCheck .| handleMessage selectProtoConduit = crcCheck .| handleMessage
@ -276,12 +276,12 @@ selectObjectContent b o r = do
riNeedsLocation = False, riNeedsLocation = False,
riQueryParams = [("select", Nothing), ("select-type", Just "2")] riQueryParams = [("select", Nothing), ("select-type", Just "2")]
} }
--print $ mkSelectRequest r -- print $ mkSelectRequest r
resp <- mkStreamRequest reqInfo resp <- mkStreamRequest reqInfo
return $ NC.responseBody resp .| selectProtoConduit return $ NC.responseBody resp .| selectProtoConduit
-- | A helper conduit that returns only the record payload bytes. -- | A helper conduit that returns only the record payload bytes.
getPayloadBytes :: MonadIO m => C.ConduitT EventMessage ByteString m () getPayloadBytes :: (MonadIO m) => C.ConduitT EventMessage ByteString m ()
getPayloadBytes = do getPayloadBytes = do
evM <- C.await evM <- C.await
case evM of case evM of

View File

@ -1,5 +1,5 @@
-- --
-- MinIO Haskell SDK, (C) 2017-2019 MinIO, Inc. -- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
-- --
-- Licensed under the Apache License, Version 2.0 (the "License"); -- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License. -- you may not use this file except in compliance with the License.
@ -15,9 +15,19 @@
-- --
{-# LANGUAGE BangPatterns #-} {-# LANGUAGE BangPatterns #-}
module Network.Minio.Sign.V4 where module Network.Minio.Sign.V4
( SignParams (..),
signV4QueryParams,
signV4,
signV4PostPolicy,
signV4Stream,
Service (..),
credentialScope,
)
where
import qualified Conduit as C import qualified Conduit as C
import qualified Data.ByteArray as BA
import qualified Data.ByteString as B import qualified Data.ByteString as B
import qualified Data.ByteString.Base64 as Base64 import qualified Data.ByteString.Base64 as Base64
import qualified Data.ByteString.Char8 as B8 import qualified Data.ByteString.Char8 as B8
@ -26,11 +36,14 @@ import Data.CaseInsensitive (mk)
import qualified Data.CaseInsensitive as CI import qualified Data.CaseInsensitive as CI
import qualified Data.HashMap.Strict as Map import qualified Data.HashMap.Strict as Map
import qualified Data.HashSet as Set import qualified Data.HashSet as Set
import Data.List (partition)
import qualified Data.List.NonEmpty as NE
import qualified Data.Time as Time import qualified Data.Time as Time
import Lib.Prelude import Lib.Prelude
import qualified Network.HTTP.Conduit as NC import qualified Network.HTTP.Conduit as NC
import Network.HTTP.Types (Header, parseQuery) import Network.HTTP.Types (Header, SimpleQuery, hContentEncoding, parseQuery)
import qualified Network.HTTP.Types as H import qualified Network.HTTP.Types as H
import Network.HTTP.Types.Header (RequestHeaders)
import Network.Minio.Data.ByteString import Network.Minio.Data.ByteString
import Network.Minio.Data.Crypto import Network.Minio.Data.Crypto
import Network.Minio.Data.Time import Network.Minio.Data.Time
@ -51,43 +64,24 @@ ignoredHeaders =
H.hUserAgent H.hUserAgent
] ]
data SignV4Data = SignV4Data data Service = ServiceS3 | ServiceSTS
{ sv4SignTime :: UTCTime, deriving stock (Eq, Show)
sv4Scope :: ByteString,
sv4CanonicalRequest :: ByteString, toByteString :: Service -> ByteString
sv4HeadersToSign :: [(ByteString, ByteString)], toByteString ServiceS3 = "s3"
sv4Output :: [(ByteString, ByteString)], toByteString ServiceSTS = "sts"
sv4StringToSign :: ByteString,
sv4SigningKey :: ByteString
}
deriving (Show)
data SignParams = SignParams data SignParams = SignParams
{ spAccessKey :: Text, { spAccessKey :: Text,
spSecretKey :: Text, spSecretKey :: BA.ScrubbedBytes,
spSessionToken :: Maybe BA.ScrubbedBytes,
spService :: Service,
spTimeStamp :: UTCTime, spTimeStamp :: UTCTime,
spRegion :: Maybe Text, spRegion :: Maybe Text,
spExpirySecs :: Maybe Int, spExpirySecs :: Maybe UrlExpiry,
spPayloadHash :: Maybe ByteString spPayloadHash :: Maybe ByteString
} }
deriving (Show) deriving stock (Show)
debugPrintSignV4Data :: SignV4Data -> IO ()
debugPrintSignV4Data (SignV4Data t s cr h2s o sts sk) = do
B8.putStrLn "SignV4Data:"
B8.putStr "Timestamp: " >> print t
B8.putStr "Scope: " >> B8.putStrLn s
B8.putStrLn "Canonical Request:"
B8.putStrLn cr
B8.putStr "Headers to Sign: " >> print h2s
B8.putStr "Output: " >> print o
B8.putStr "StringToSign: " >> B8.putStrLn sts
B8.putStr "SigningKey: " >> printBytes sk
B8.putStrLn "END of SignV4Data ========="
where
printBytes b = do
mapM_ (\x -> B.putStr $ B.singleton x <> " ") $ B.unpack b
B8.putStrLn ""
mkAuthHeader :: Text -> ByteString -> ByteString -> ByteString -> H.Header mkAuthHeader :: Text -> ByteString -> ByteString -> ByteString -> H.Header
mkAuthHeader accessKey scope signedHeaderKeys sign = mkAuthHeader accessKey scope signedHeaderKeys sign =
@ -104,6 +98,12 @@ mkAuthHeader accessKey scope signedHeaderKeys sign =
] ]
in (H.hAuthorization, authValue) in (H.hAuthorization, authValue)
data IsStreaming = IsStreamingLength Int64 | NotStreaming
deriving stock (Eq, Show)
amzSecurityToken :: ByteString
amzSecurityToken = "X-Amz-Security-Token"
-- | Given SignParams and request details, including request method, -- | Given SignParams and request details, including request method,
-- request path, headers, query params and payload hash, generates an -- request path, headers, query params and payload hash, generates an
-- updated set of headers, including the x-amz-date header and the -- updated set of headers, including the x-amz-date header and the
@ -116,36 +116,23 @@ mkAuthHeader accessKey scope signedHeaderKeys sign =
-- is being created. The expiry is interpreted as an integer number of -- is being created. The expiry is interpreted as an integer number of
-- seconds. The output will be the list of query-parameters to add to -- seconds. The output will be the list of query-parameters to add to
-- the request. -- the request.
signV4 :: SignParams -> NC.Request -> [(ByteString, ByteString)] signV4QueryParams :: SignParams -> NC.Request -> SimpleQuery
signV4 !sp !req = signV4QueryParams !sp !req =
let region = fromMaybe "" $ spRegion sp let scope = credentialScope sp
ts = spTimeStamp sp
scope = mkScope ts region
accessKey = TE.encodeUtf8 $ spAccessKey sp
secretKey = TE.encodeUtf8 $ spSecretKey sp
expiry = spExpirySecs sp expiry = spExpirySecs sp
sha256Hdr =
( "x-amz-content-sha256", headersToSign = getHeadersToSign $ NC.requestHeaders req
fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp
)
-- headers to be added to the request
datePair = ("X-Amz-Date", awsTimeFormatBS ts)
computedHeaders =
NC.requestHeaders req
++ if isJust $ expiry
then []
else map (\(x, y) -> (mk x, y)) [datePair, sha256Hdr]
headersToSign = getHeadersToSign computedHeaders
signedHeaderKeys = B.intercalate ";" $ sort $ map fst headersToSign signedHeaderKeys = B.intercalate ";" $ sort $ map fst headersToSign
-- query-parameters to be added before signing for presigned URLs -- query-parameters to be added before signing for presigned URLs
-- (i.e. when `isJust expiry`) -- (i.e. when `isJust expiry`)
authQP = authQP =
[ ("X-Amz-Algorithm", "AWS4-HMAC-SHA256"), [ ("X-Amz-Algorithm", "AWS4-HMAC-SHA256"),
("X-Amz-Credential", B.concat [accessKey, "/", scope]), ("X-Amz-Credential", B.concat [encodeUtf8 $ spAccessKey sp, "/", scope]),
datePair, ("X-Amz-Date", awsTimeFormatBS $ spTimeStamp sp),
("X-Amz-Expires", maybe "" showBS expiry), ("X-Amz-Expires", maybe "" showBS expiry),
("X-Amz-SignedHeaders", signedHeaderKeys) ("X-Amz-SignedHeaders", signedHeaderKeys)
] ]
++ maybeToList ((amzSecurityToken,) . BA.convert <$> spSessionToken sp)
finalQP = finalQP =
parseQuery (NC.queryString req) parseQuery (NC.queryString req)
++ if isJust expiry ++ if isJust expiry
@ -158,39 +145,129 @@ signV4 !sp !req =
sp sp
(NC.setQueryString finalQP req) (NC.setQueryString finalQP req)
headersToSign headersToSign
-- 2. compute string to sign -- 2. compute string to sign
stringToSign = mkStringToSign ts scope canonicalRequest stringToSign = mkStringToSign (spTimeStamp sp) scope canonicalRequest
-- 3.1 compute signing key -- 3.1 compute signing key
signingKey = mkSigningKey ts region secretKey signingKey = getSigningKey sp
-- 3.2 compute signature
signature = computeSignature stringToSign signingKey
in ("X-Amz-Signature", signature) : authQP
-- | Given SignParams and request details, including request method, request
-- path, headers, query params and payload hash, generates an updated set of
-- headers, including the x-amz-date header and the Authorization header, which
-- includes the signature.
--
-- The output is the list of headers to be added to authenticate the request.
signV4 :: SignParams -> NC.Request -> [Header]
signV4 !sp !req =
let scope = credentialScope sp
-- extra headers to be added for signing purposes.
extraHeaders =
("X-Amz-Date", awsTimeFormatBS $ spTimeStamp sp)
: ( -- payload hash is only used for S3 (not STS)
[ ( "x-amz-content-sha256",
fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp
)
| spService sp == ServiceS3
]
)
++ maybeToList ((mk amzSecurityToken,) . BA.convert <$> spSessionToken sp)
-- 1. compute canonical request
reqHeaders = NC.requestHeaders req ++ extraHeaders
(canonicalRequest, signedHeaderKeys) =
getCanonicalRequestAndSignedHeaders
NotStreaming
sp
req
reqHeaders
-- 2. compute string to sign
stringToSign = mkStringToSign (spTimeStamp sp) scope canonicalRequest
-- 3.1 compute signing key
signingKey = getSigningKey sp
-- 3.2 compute signature -- 3.2 compute signature
signature = computeSignature stringToSign signingKey signature = computeSignature stringToSign signingKey
-- 4. compute auth header -- 4. compute auth header
authHeader = mkAuthHeader (spAccessKey sp) scope signedHeaderKeys signature authHeader = mkAuthHeader (spAccessKey sp) scope signedHeaderKeys signature
-- finally compute output pairs in authHeader : extraHeaders
output =
if isJust expiry
then ("X-Amz-Signature", signature) : authQP
else
[ (\(x, y) -> (CI.foldedCase x, y)) authHeader,
datePair,
sha256Hdr
]
in output
mkScope :: UTCTime -> Text -> ByteString credentialScope :: SignParams -> ByteString
mkScope ts region = credentialScope sp =
B.intercalate let region = fromMaybe "" $ spRegion sp
"/" in B.intercalate
[ TE.encodeUtf8 . T.pack $ Time.formatTime Time.defaultTimeLocale "%Y%m%d" ts, "/"
TE.encodeUtf8 region, [ TE.encodeUtf8 $ Time.formatTime Time.defaultTimeLocale "%Y%m%d" $ spTimeStamp sp,
"s3", TE.encodeUtf8 region,
"aws4_request" toByteString $ spService sp,
] "aws4_request"
]
-- Folds header name, trims whitespace in header values, skips ignored headers
-- and sorts headers.
getHeadersToSign :: [Header] -> [(ByteString, ByteString)] getHeadersToSign :: [Header] -> [(ByteString, ByteString)]
getHeadersToSign !h = getHeadersToSign !h =
filter ((\hdr -> not $ Set.member hdr ignoredHeaders) . fst) $ filter ((\hdr -> not $ Set.member hdr ignoredHeaders) . fst) $
map (\(x, y) -> (CI.foldedCase x, stripBS y)) h map (bimap CI.foldedCase stripBS) h
-- | Given the list of headers in the request, computes the canonical headers
-- and the signed headers strings.
getCanonicalHeaders :: NonEmpty Header -> (ByteString, ByteString)
getCanonicalHeaders h =
let -- Folds header name, trims spaces in header values, skips ignored
-- headers and sorts headers by name (we must not re-order multi-valued
-- headers).
headersToSign =
NE.toList $
NE.sortBy (\a b -> compare (fst a) (fst b)) $
NE.fromList $
NE.filter ((\hdr -> not $ Set.member hdr ignoredHeaders) . fst) $
NE.map (bimap CI.foldedCase stripBS) h
canonicalHeaders = mconcat $ map (\(a, b) -> a <> ":" <> b <> "\n") headersToSign
signedHeaderKeys = B.intercalate ";" $ map fst headersToSign
in (canonicalHeaders, signedHeaderKeys)
getCanonicalRequestAndSignedHeaders ::
IsStreaming ->
SignParams ->
NC.Request ->
[Header] ->
(ByteString, ByteString)
getCanonicalRequestAndSignedHeaders isStreaming sp req requestHeaders =
let httpMethod = NC.method req
canonicalUri = uriEncode False $ NC.path req
canonicalQueryString =
B.intercalate "&" $
map (\(x, y) -> B.concat [x, "=", y]) $
sort $
map
( bimap (uriEncode True) (maybe "" (uriEncode True))
)
(parseQuery $ NC.queryString req)
(canonicalHeaders, signedHeaderKeys) = getCanonicalHeaders $ NE.fromList requestHeaders
payloadHashStr =
case isStreaming of
IsStreamingLength _ -> "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
NotStreaming -> fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp
canonicalRequest =
B.intercalate
"\n"
[ httpMethod,
canonicalUri,
canonicalQueryString,
canonicalHeaders,
signedHeaderKeys,
payloadHashStr
]
in (canonicalRequest, signedHeaderKeys)
mkCanonicalRequest :: mkCanonicalRequest ::
Bool -> Bool ->
@ -199,15 +276,16 @@ mkCanonicalRequest ::
[(ByteString, ByteString)] -> [(ByteString, ByteString)] ->
ByteString ByteString
mkCanonicalRequest !isStreaming !sp !req !headersForSign = mkCanonicalRequest !isStreaming !sp !req !headersForSign =
let canonicalQueryString = let httpMethod = NC.method req
B.intercalate "&" canonicalUri = uriEncode False $ NC.path req
$ map (\(x, y) -> B.concat [x, "=", y]) canonicalQueryString =
$ sort B.intercalate "&" $
$ map map (\(x, y) -> B.concat [x, "=", y]) $
( \(x, y) -> sortBy (\a b -> compare (fst a) (fst b)) $
(uriEncode True x, maybe "" (uriEncode True) y) map
) ( bimap (uriEncode True) (maybe "" (uriEncode True))
$ (parseQuery $ NC.queryString req) )
(parseQuery $ NC.queryString req)
sortedHeaders = sort headersForSign sortedHeaders = sort headersForSign
canonicalHeaders = canonicalHeaders =
B.concat $ B.concat $
@ -219,8 +297,8 @@ mkCanonicalRequest !isStreaming !sp !req !headersForSign =
else fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp else fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp
in B.intercalate in B.intercalate
"\n" "\n"
[ NC.method req, [ httpMethod,
uriEncode False $ NC.path req, canonicalUri,
canonicalQueryString, canonicalQueryString,
canonicalHeaders, canonicalHeaders,
signedHeaders, signedHeaders,
@ -237,13 +315,13 @@ mkStringToSign ts !scope !canonicalRequest =
hashSHA256 canonicalRequest hashSHA256 canonicalRequest
] ]
mkSigningKey :: UTCTime -> Text -> ByteString -> ByteString getSigningKey :: SignParams -> ByteString
mkSigningKey ts region !secretKey = getSigningKey sp =
hmacSHA256RawBS "aws4_request" hmacSHA256RawBS "aws4_request"
. hmacSHA256RawBS "s3" . hmacSHA256RawBS (toByteString $ spService sp)
. hmacSHA256RawBS (TE.encodeUtf8 region) . hmacSHA256RawBS (TE.encodeUtf8 $ fromMaybe "" $ spRegion sp)
. hmacSHA256RawBS (awsDateFormatBS ts) . hmacSHA256RawBS (awsDateFormatBS $ spTimeStamp sp)
$ B.concat ["AWS4", secretKey] $ B.concat ["AWS4", BA.convert $ spSecretKey sp]
computeSignature :: ByteString -> ByteString -> ByteString computeSignature :: ByteString -> ByteString -> ByteString
computeSignature !toSign !key = digestToBase16 $ hmacSHA256 toSign key computeSignature !toSign !key = digestToBase16 $ hmacSHA256 toSign key
@ -257,20 +335,20 @@ signV4PostPolicy ::
Map.HashMap Text ByteString Map.HashMap Text ByteString
signV4PostPolicy !postPolicyJSON !sp = signV4PostPolicy !postPolicyJSON !sp =
let stringToSign = Base64.encode postPolicyJSON let stringToSign = Base64.encode postPolicyJSON
region = fromMaybe "" $ spRegion sp signingKey = getSigningKey sp
signingKey = mkSigningKey (spTimeStamp sp) region $ TE.encodeUtf8 $ spSecretKey sp
signature = computeSignature stringToSign signingKey signature = computeSignature stringToSign signingKey
in Map.fromList in Map.fromList $
[ ("x-amz-signature", signature), [ ("x-amz-signature", signature),
("policy", stringToSign) ("policy", stringToSign)
] ]
++ maybeToList ((decodeUtf8 amzSecurityToken,) . BA.convert <$> spSessionToken sp)
chunkSizeConstant :: Int chunkSizeConstant :: Int
chunkSizeConstant = 64 * 1024 chunkSizeConstant = 64 * 1024
-- base16Len computes the number of bytes required to represent @n (> 0)@ in -- base16Len computes the number of bytes required to represent @n (> 0)@ in
-- hexadecimal. -- hexadecimal.
base16Len :: Integral a => a -> Int base16Len :: (Integral a) => a -> Int
base16Len n base16Len n
| n == 0 = 0 | n == 0 = 0
| otherwise = 1 + base16Len (n `div` 16) | otherwise = 1 + base16Len (n `div` 16)
@ -287,60 +365,60 @@ signedStreamLength dataLen =
finalChunkSize = 1 + 17 + 64 + 2 + 2 finalChunkSize = 1 + 17 + 64 + 2 + 2
in numChunks * fullChunkSize + lastChunkSize + finalChunkSize in numChunks * fullChunkSize + lastChunkSize + finalChunkSize
-- For streaming S3, we need to update the content-encoding header.
addContentEncoding :: [Header] -> [Header]
addContentEncoding hs =
-- assume there is at most one content-encoding header.
let (ceHdrs, others) = partition ((== hContentEncoding) . fst) hs
in maybe
(hContentEncoding, "aws-chunked")
(\(k, v) -> (k, v <> ",aws-chunked"))
(listToMaybe ceHdrs)
: others
signV4Stream :: signV4Stream ::
Int64 -> Int64 ->
SignParams -> SignParams ->
NC.Request -> NC.Request ->
(C.ConduitT () ByteString (C.ResourceT IO) () -> NC.Request) (C.ConduitT () ByteString (C.ResourceT IO) () -> NC.Request)
-- -> ([Header], C.ConduitT () ByteString (C.ResourceT IO) () -> NC.RequestBody)
signV4Stream !payloadLength !sp !req = signV4Stream !payloadLength !sp !req =
let ts = spTimeStamp sp let ts = spTimeStamp sp
addContentEncoding hs =
let ceMay = headMay $ filter (\(x, _) -> x == "content-encoding") hs -- compute the updated list of headers to be added for signing purposes.
in case ceMay of
Nothing -> ("content-encoding", "aws-chunked") : hs
Just (_, ce) ->
("content-encoding", ce <> ",aws-chunked")
: filter (\(x, _) -> x /= "content-encoding") hs
-- headers to be added to the request
datePair = ("X-Amz-Date", awsTimeFormatBS ts)
computedHeaders =
addContentEncoding $
datePair : NC.requestHeaders req
-- headers specific to streaming signature
signedContentLength = signedStreamLength payloadLength signedContentLength = signedStreamLength payloadLength
streamingHeaders :: [Header] extraHeaders =
streamingHeaders = [ ("X-Amz-Date", awsTimeFormatBS $ spTimeStamp sp),
[ ("x-amz-decoded-content-length", showBS payloadLength), ("x-amz-decoded-content-length", showBS payloadLength),
("content-length", showBS signedContentLength), ("content-length", showBS signedContentLength),
("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD") ("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD")
] ]
headersToSign = getHeadersToSign $ computedHeaders ++ streamingHeaders ++ maybeToList ((mk amzSecurityToken,) . BA.convert <$> spSessionToken sp)
signedHeaderKeys = B.intercalate ";" $ sort $ map fst headersToSign requestHeaders =
finalQP = parseQuery (NC.queryString req) addContentEncoding $
foldr setHeader (NC.requestHeaders req) extraHeaders
-- 1. Compute Seed Signature -- 1. Compute Seed Signature
-- 1.1 Canonical Request -- 1.1 Canonical Request
canonicalReq = (canonicalReq, signedHeaderKeys) =
mkCanonicalRequest getCanonicalRequestAndSignedHeaders
True (IsStreamingLength payloadLength)
sp sp
(NC.setQueryString finalQP req) req
headersToSign requestHeaders
region = fromMaybe "" $ spRegion sp
scope = mkScope ts region scope = credentialScope sp
accessKey = spAccessKey sp accessKey = spAccessKey sp
secretKey = spSecretKey sp
-- 1.2 String toSign -- 1.2 String toSign
stringToSign = mkStringToSign ts scope canonicalReq stringToSign = mkStringToSign ts scope canonicalReq
-- 1.3 Compute signature -- 1.3 Compute signature
-- 1.3.1 compute signing key -- 1.3.1 compute signing key
signingKey = mkSigningKey ts region $ TE.encodeUtf8 secretKey signingKey = getSigningKey sp
-- 1.3.2 Compute signature -- 1.3.2 Compute signature
seedSignature = computeSignature stringToSign signingKey seedSignature = computeSignature stringToSign signingKey
-- 1.3.3 Compute Auth Header -- 1.3.3 Compute Auth Header
authHeader = mkAuthHeader accessKey scope signedHeaderKeys seedSignature authHeader = mkAuthHeader accessKey scope signedHeaderKeys seedSignature
-- 1.4 Updated headers for the request -- 1.4 Updated headers for the request
finalReqHeaders = authHeader : (computedHeaders ++ streamingHeaders) finalReqHeaders = authHeader : requestHeaders
-- headersToAdd = authHeader : datePair : streamingHeaders -- headersToAdd = authHeader : datePair : streamingHeaders
toHexStr n = B8.pack $ printf "%x" n toHexStr n = B8.pack $ printf "%x" n
@ -367,41 +445,42 @@ signV4Stream !payloadLength !sp !req =
-- 'chunkSizeConstant'. -- 'chunkSizeConstant'.
if if
| n > 0 -> do | n > 0 -> do
bs <- mustTakeN chunkSizeConstant bs <- mustTakeN chunkSizeConstant
let strToSign = chunkStrToSign prevSign (hashSHA256 bs) let strToSign = chunkStrToSign prevSign (hashSHA256 bs)
nextSign = computeSignature strToSign signingKey nextSign = computeSignature strToSign signingKey
chunkBS = chunkBS =
toHexStr chunkSizeConstant toHexStr chunkSizeConstant
<> ";chunk-signature=" <> ";chunk-signature="
<> nextSign <> nextSign
<> "\r\n" <> "\r\n"
<> bs <> bs
<> "\r\n" <> "\r\n"
C.yield chunkBS C.yield chunkBS
signerConduit (n -1) lps nextSign signerConduit (n - 1) lps nextSign
-- Second case encodes the last chunk which is smaller than -- Second case encodes the last chunk which is smaller than
-- 'chunkSizeConstant' -- 'chunkSizeConstant'
| lps > 0 -> do | lps > 0 -> do
bs <- mustTakeN $ fromIntegral lps bs <- mustTakeN $ fromIntegral lps
let strToSign = chunkStrToSign prevSign (hashSHA256 bs) let strToSign = chunkStrToSign prevSign (hashSHA256 bs)
nextSign = computeSignature strToSign signingKey nextSign = computeSignature strToSign signingKey
chunkBS = chunkBS =
toHexStr lps <> ";chunk-signature=" toHexStr lps
<> nextSign <> ";chunk-signature="
<> "\r\n" <> nextSign
<> bs <> "\r\n"
<> "\r\n" <> bs
C.yield chunkBS <> "\r\n"
signerConduit 0 0 nextSign C.yield chunkBS
signerConduit 0 0 nextSign
-- Last case encodes the final signature chunk that has no -- Last case encodes the final signature chunk that has no
-- data. -- data.
| otherwise -> do | otherwise -> do
let strToSign = chunkStrToSign prevSign (hashSHA256 "") let strToSign = chunkStrToSign prevSign (hashSHA256 "")
nextSign = computeSignature strToSign signingKey nextSign = computeSignature strToSign signingKey
lastChunkBS = "0;chunk-signature=" <> nextSign <> "\r\n\r\n" lastChunkBS = "0;chunk-signature=" <> nextSign <> "\r\n\r\n"
C.yield lastChunkBS C.yield lastChunkBS
in \src -> in \src ->
req req
{ NC.requestHeaders = finalReqHeaders, { NC.requestHeaders = finalReqHeaders,
@ -409,3 +488,9 @@ signV4Stream !payloadLength !sp !req =
NC.requestBodySource signedContentLength $ NC.requestBodySource signedContentLength $
src C..| signerConduit numParts lastPSize seedSignature src C..| signerConduit numParts lastPSize seedSignature
} }
-- "setHeader r hdr" adds the hdr to r, replacing it in r if it already exists.
setHeader :: Header -> RequestHeaders -> RequestHeaders
setHeader hdr r =
let r' = filter (\(name, _) -> name /= fst hdr) r
in hdr : r'

View File

@ -1,5 +1,5 @@
-- --
-- MinIO Haskell SDK, (C) 2017-2019 MinIO, Inc. -- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
-- --
-- Licensed under the Apache License, Version 2.0 (the "License"); -- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License. -- you may not use this file except in compliance with the License.
@ -24,7 +24,6 @@ import qualified Data.ByteString.Lazy as LB
import Data.CaseInsensitive (mk, original) import Data.CaseInsensitive (mk, original)
import qualified Data.Conduit.Binary as CB import qualified Data.Conduit.Binary as CB
import qualified Data.HashMap.Strict as H import qualified Data.HashMap.Strict as H
import qualified Data.List as List
import qualified Data.Text as T import qualified Data.Text as T
import Data.Text.Read (decimal) import Data.Text.Read (decimal)
import Data.Time import Data.Time
@ -37,14 +36,12 @@ import Network.HTTP.Conduit (Response)
import qualified Network.HTTP.Conduit as NC import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT import qualified Network.HTTP.Types as HT
import qualified Network.HTTP.Types.Header as Hdr import qualified Network.HTTP.Types.Header as Hdr
import Network.Minio.Data
import Network.Minio.Data.ByteString import Network.Minio.Data.ByteString
import Network.Minio.JsonParser (parseErrResponseJSON) import Network.Minio.JsonParser (parseErrResponseJSON)
import Network.Minio.XmlParser (parseErrResponse) import Network.Minio.XmlCommon (parseErrResponse)
import qualified System.IO as IO import qualified System.IO as IO
import qualified UnliftIO as U import qualified UnliftIO as U
import qualified UnliftIO.Async as A import qualified UnliftIO.Async as A
import qualified UnliftIO.MVar as UM
allocateReadFile :: allocateReadFile ::
(MonadUnliftIO m, R.MonadResource m) => (MonadUnliftIO m, R.MonadResource m) =>
@ -52,7 +49,7 @@ allocateReadFile ::
m (R.ReleaseKey, Handle) m (R.ReleaseKey, Handle)
allocateReadFile fp = do allocateReadFile fp = do
(rk, hdlE) <- R.allocate (openReadFile fp) cleanup (rk, hdlE) <- R.allocate (openReadFile fp) cleanup
either (\(e :: IOException) -> throwIO e) (return . (rk,)) hdlE either (\(e :: U.IOException) -> throwIO e) (return . (rk,)) hdlE
where where
openReadFile f = U.try $ IO.openBinaryFile f IO.ReadMode openReadFile f = U.try $ IO.openBinaryFile f IO.ReadMode
cleanup = either (const $ return ()) IO.hClose cleanup = either (const $ return ()) IO.hClose
@ -60,25 +57,25 @@ allocateReadFile fp = do
-- | Queries the file size from the handle. Catches any file operation -- | Queries the file size from the handle. Catches any file operation
-- exceptions and returns Nothing instead. -- exceptions and returns Nothing instead.
getFileSize :: getFileSize ::
(MonadUnliftIO m, R.MonadResource m) => (MonadUnliftIO m) =>
Handle -> Handle ->
m (Maybe Int64) m (Maybe Int64)
getFileSize h = do getFileSize h = do
resE <- liftIO $ try $ fromIntegral <$> IO.hFileSize h resE <- liftIO $ try $ fromIntegral <$> IO.hFileSize h
case resE of case resE of
Left (_ :: IOException) -> return Nothing Left (_ :: U.IOException) -> return Nothing
Right s -> return $ Just s Right s -> return $ Just s
-- | Queries if handle is seekable. Catches any file operation -- | Queries if handle is seekable. Catches any file operation
-- exceptions and return False instead. -- exceptions and return False instead.
isHandleSeekable :: isHandleSeekable ::
(R.MonadResource m, MonadUnliftIO m) => (R.MonadResource m) =>
Handle -> Handle ->
m Bool m Bool
isHandleSeekable h = do isHandleSeekable h = do
resE <- liftIO $ try $ IO.hIsSeekable h resE <- liftIO $ try $ IO.hIsSeekable h
case resE of case resE of
Left (_ :: IOException) -> return False Left (_ :: U.IOException) -> return False
Right v -> return v Right v -> return v
-- | Helper function that opens a handle to the filepath and performs -- | Helper function that opens a handle to the filepath and performs
@ -89,7 +86,7 @@ withNewHandle ::
(MonadUnliftIO m, R.MonadResource m) => (MonadUnliftIO m, R.MonadResource m) =>
FilePath -> FilePath ->
(Handle -> m a) -> (Handle -> m a) ->
m (Either IOException a) m (Either U.IOException a)
withNewHandle fp fileAction = do withNewHandle fp fileAction = do
-- opening a handle can throw MError exception. -- opening a handle can throw MError exception.
handleE <- try $ allocateReadFile fp handleE <- try $ allocateReadFile fp
@ -103,17 +100,27 @@ withNewHandle fp fileAction = do
return resE return resE
mkHeaderFromPairs :: [(ByteString, ByteString)] -> [HT.Header] mkHeaderFromPairs :: [(ByteString, ByteString)] -> [HT.Header]
mkHeaderFromPairs = map ((\(x, y) -> (mk x, y))) mkHeaderFromPairs = map (first mk)
lookupHeader :: HT.HeaderName -> [HT.Header] -> Maybe ByteString lookupHeader :: HT.HeaderName -> [HT.Header] -> Maybe ByteString
lookupHeader hdr = headMay . map snd . filter (\(h, _) -> h == hdr) lookupHeader hdr = listToMaybe . map snd . filter (\(h, _) -> h == hdr)
getETagHeader :: [HT.Header] -> Maybe Text getETagHeader :: [HT.Header] -> Maybe Text
getETagHeader hs = decodeUtf8Lenient <$> lookupHeader Hdr.hETag hs getETagHeader hs = decodeUtf8Lenient <$> lookupHeader Hdr.hETag hs
getMetadata :: [HT.Header] -> [(Text, Text)] getMetadata :: [HT.Header] -> [(Text, Text)]
getMetadata = getMetadata =
map ((\(x, y) -> (decodeUtf8Lenient $ original x, decodeUtf8Lenient $ stripBS y))) map (\(x, y) -> (decodeUtf8Lenient $ original x, decodeUtf8Lenient $ stripBS y))
-- | If the given header name has the @X-Amz-Meta-@ prefix, it is
-- stripped and a Just is returned.
userMetadataHeaderNameMaybe :: Text -> Maybe Text
userMetadataHeaderNameMaybe k =
let prefix = T.toCaseFold "X-Amz-Meta-"
n = T.length prefix
in if T.toCaseFold (T.take n k) == prefix
then Just (T.drop n k)
else Nothing
toMaybeMetadataHeader :: (Text, Text) -> Maybe (Text, Text) toMaybeMetadataHeader :: (Text, Text) -> Maybe (Text, Text)
toMaybeMetadataHeader (k, v) = toMaybeMetadataHeader (k, v) =
@ -128,6 +135,14 @@ getNonUserMetadataMap =
. fst . fst
) )
addXAmzMetaPrefix :: Text -> Text
addXAmzMetaPrefix s
| isJust (userMetadataHeaderNameMaybe s) = s
| otherwise = "X-Amz-Meta-" <> s
mkHeaderFromMetadata :: [(Text, Text)] -> [HT.Header]
mkHeaderFromMetadata = map (\(x, y) -> (mk $ encodeUtf8 $ addXAmzMetaPrefix x, encodeUtf8 y))
-- | This function collects all headers starting with `x-amz-meta-` -- | This function collects all headers starting with `x-amz-meta-`
-- and strips off this prefix, and returns a map. -- and strips off this prefix, and returns a map.
getUserMetadataMap :: [(Text, Text)] -> H.HashMap Text Text getUserMetadataMap :: [(Text, Text)] -> H.HashMap Text Text
@ -135,6 +150,12 @@ getUserMetadataMap =
H.fromList H.fromList
. mapMaybe toMaybeMetadataHeader . mapMaybe toMaybeMetadataHeader
getHostHeader :: (ByteString, Int) -> ByteString
getHostHeader (host_, port_) =
if port_ == 80 || port_ == 443
then host_
else host_ <> ":" <> show port_
getLastModifiedHeader :: [HT.Header] -> Maybe UTCTime getLastModifiedHeader :: [HT.Header] -> Maybe UTCTime
getLastModifiedHeader hs = do getLastModifiedHeader hs = do
modTimebs <- decodeUtf8Lenient <$> lookupHeader Hdr.hLastModified hs modTimebs <- decodeUtf8Lenient <$> lookupHeader Hdr.hLastModified hs
@ -143,7 +164,7 @@ getLastModifiedHeader hs = do
getContentLength :: [HT.Header] -> Maybe Int64 getContentLength :: [HT.Header] -> Maybe Int64
getContentLength hs = do getContentLength hs = do
nbs <- decodeUtf8Lenient <$> lookupHeader Hdr.hContentLength hs nbs <- decodeUtf8Lenient <$> lookupHeader Hdr.hContentLength hs
fst <$> hush (decimal nbs) fst <$> either (const Nothing) Just (decimal nbs)
decodeUtf8Lenient :: ByteString -> Text decodeUtf8Lenient :: ByteString -> Text
decodeUtf8Lenient = decodeUtf8With lenientDecode decodeUtf8Lenient = decodeUtf8With lenientDecode
@ -154,7 +175,7 @@ isSuccessStatus sts =
in (s >= 200 && s < 300) in (s >= 200 && s < 300)
httpLbs :: httpLbs ::
MonadIO m => (MonadIO m) =>
NC.Request -> NC.Request ->
NC.Manager -> NC.Manager ->
m (NC.Response LByteString) m (NC.Response LByteString)
@ -170,8 +191,9 @@ httpLbs req mgr = do
sErr <- parseErrResponseJSON $ NC.responseBody resp sErr <- parseErrResponseJSON $ NC.responseBody resp
throwIO sErr throwIO sErr
_ -> _ ->
throwIO $ NC.HttpExceptionRequest req $ throwIO $
NC.StatusCodeException (void resp) (showBS resp) NC.HttpExceptionRequest req $
NC.StatusCodeException (void resp) (showBS resp)
return resp return resp
where where
@ -199,8 +221,9 @@ http req mgr = do
throwIO sErr throwIO sErr
_ -> do _ -> do
content <- LB.toStrict . NC.responseBody <$> NC.lbsResponse resp content <- LB.toStrict . NC.responseBody <$> NC.lbsResponse resp
throwIO $ NC.HttpExceptionRequest req $ throwIO $
NC.StatusCodeException (void resp) content NC.HttpExceptionRequest req $
NC.StatusCodeException (void resp) content
return resp return resp
where where
@ -216,7 +239,7 @@ http req mgr = do
-- Similar to mapConcurrently but limits the number of threads that -- Similar to mapConcurrently but limits the number of threads that
-- can run using a quantity semaphore. -- can run using a quantity semaphore.
limitedMapConcurrently :: limitedMapConcurrently ::
MonadUnliftIO m => (MonadUnliftIO m) =>
Int -> Int ->
(t -> m a) -> (t -> m a) ->
[t] -> [t] ->
@ -233,7 +256,7 @@ limitedMapConcurrently count act args = do
waitSem t = U.atomically $ do waitSem t = U.atomically $ do
v <- U.readTVar t v <- U.readTVar t
if v > 0 if v > 0
then U.writeTVar t (v -1) then U.writeTVar t (v - 1)
else U.retrySTM else U.retrySTM
signalSem t = U.atomically $ do signalSem t = U.atomically $ do
v <- U.readTVar t v <- U.readTVar t
@ -260,42 +283,3 @@ chunkBSConduit (s : ss) = do
| B.length bs == s -> C.yield bs >> chunkBSConduit ss | B.length bs == s -> C.yield bs >> chunkBSConduit ss
| B.length bs > 0 -> C.yield bs | B.length bs > 0 -> C.yield bs
| otherwise -> return () | otherwise -> return ()
-- | Select part sizes - the logic is that the minimum part-size will
-- be 64MiB.
selectPartSizes :: Int64 -> [(PartNumber, Int64, Int64)]
selectPartSizes size =
uncurry (List.zip3 [1 ..])
$ List.unzip
$ loop 0 size
where
ceil :: Double -> Int64
ceil = ceiling
partSize =
max
minPartSize
( ceil $
fromIntegral size
/ fromIntegral maxMultipartParts
)
m = fromIntegral partSize
loop st sz
| st > sz = []
| st + m >= sz = [(st, sz - st)]
| otherwise = (st, m) : loop (st + m) sz
lookupRegionCache :: Bucket -> Minio (Maybe Region)
lookupRegionCache b = do
rMVar <- asks mcRegionMap
rMap <- UM.readMVar rMVar
return $ H.lookup b rMap
addToRegionCache :: Bucket -> Region -> Minio ()
addToRegionCache b region = do
rMVar <- asks mcRegionMap
UM.modifyMVar_ rMVar $ return . H.insert b region
deleteFromRegionCache :: Bucket -> Minio ()
deleteFromRegionCache b = do
rMVar <- asks mcRegionMap
UM.modifyMVar_ rMVar $ return . H.delete b

View File

@ -0,0 +1,65 @@
--
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
module Network.Minio.XmlCommon where
import qualified Data.Text as T
import Data.Text.Read (decimal)
import Data.Time (UTCTime)
import Data.Time.Format.ISO8601 (iso8601ParseM)
import Lib.Prelude (throwIO)
import Network.Minio.Errors
import Text.XML (Name (Name), def, parseLBS)
import Text.XML.Cursor (Axis, Cursor, content, element, fromDocument, laxElement, ($/), (&/))
s3Name :: Text -> Text -> Name
s3Name ns s = Name s (Just ns) Nothing
uncurry4 :: (a -> b -> c -> d -> e) -> (a, b, c, d) -> e
uncurry4 f (a, b, c, d) = f a b c d
uncurry6 :: (a -> b -> c -> d -> e -> f -> g) -> (a, b, c, d, e, f) -> g
uncurry6 f (a, b, c, d, e, g) = f a b c d e g
-- | Parse time strings from XML
parseS3XMLTime :: (MonadIO m) => Text -> m UTCTime
parseS3XMLTime t =
maybe (throwIO $ MErrVXmlParse $ "timestamp parse failure: " <> t) return $
iso8601ParseM $
toString t
parseDecimal :: (MonadIO m, Integral a) => Text -> m a
parseDecimal numStr =
either (throwIO . MErrVXmlParse . show) return $
fst <$> decimal numStr
parseDecimals :: (MonadIO m, Integral a) => [Text] -> m [a]
parseDecimals numStr = forM numStr parseDecimal
s3Elem :: Text -> Text -> Axis
s3Elem ns = element . s3Name ns
parseRoot :: (MonadIO m) => LByteString -> m Cursor
parseRoot =
either (throwIO . MErrVXmlParse . show) (return . fromDocument)
. parseLBS def
parseErrResponse :: (MonadIO m) => LByteString -> m ServiceErr
parseErrResponse xmldata = do
r <- parseRoot xmldata
let code = T.concat $ r $/ laxElement "Code" &/ content
message = T.concat $ r $/ laxElement "Message" &/ content
return $ toServiceErr code message

View File

@ -1,5 +1,5 @@
-- --
-- MinIO Haskell SDK, (C) 2017 MinIO, Inc. -- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
-- --
-- Licensed under the Apache License, Version 2.0 (the "License"); -- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License. -- you may not use this file except in compliance with the License.
@ -23,10 +23,9 @@ module Network.Minio.XmlGenerator
where where
import qualified Data.ByteString.Lazy as LBS import qualified Data.ByteString.Lazy as LBS
import qualified Data.HashMap.Strict as H
import qualified Data.Text as T import qualified Data.Text as T
import Lib.Prelude
import Network.Minio.Data import Network.Minio.Data
import Network.Minio.XmlCommon
import Text.XML import Text.XML
-- | Create a bucketConfig request body XML -- | Create a bucketConfig request body XML
@ -73,12 +72,13 @@ mkCompleteMultipartUploadRequest partInfo =
data XNode data XNode
= XNode Text [XNode] = XNode Text [XNode]
| XLeaf Text Text | XLeaf Text Text
deriving (Eq, Show) deriving stock (Eq, Show)
toXML :: Text -> XNode -> ByteString toXML :: Text -> XNode -> ByteString
toXML ns node = toXML ns node =
LBS.toStrict $ renderLBS def $ LBS.toStrict $
Document (Prologue [] Nothing []) (xmlNode node) [] renderLBS def $
Document (Prologue [] Nothing []) (xmlNode node) []
where where
xmlNode :: XNode -> Element xmlNode :: XNode -> Element
xmlNode (XNode name nodes) = xmlNode (XNode name nodes) =
@ -94,7 +94,7 @@ class ToXNode a where
toXNode :: a -> XNode toXNode :: a -> XNode
instance ToXNode Event where instance ToXNode Event where
toXNode = XLeaf "Event" . show toXNode = XLeaf "Event" . toText
instance ToXNode Notification where instance ToXNode Notification where
toXNode (Notification qc tc lc) = toXNode (Notification qc tc lc) =
@ -104,9 +104,10 @@ instance ToXNode Notification where
++ map (toXNodesWithArnName "CloudFunctionConfiguration" "CloudFunction") lc ++ map (toXNodesWithArnName "CloudFunctionConfiguration" "CloudFunction") lc
toXNodesWithArnName :: Text -> Text -> NotificationConfig -> XNode toXNodesWithArnName :: Text -> Text -> NotificationConfig -> XNode
toXNodesWithArnName eltName arnName (NotificationConfig id arn events fRule) = toXNodesWithArnName eltName arnName (NotificationConfig itemId arn events fRule) =
XNode eltName $ XNode eltName $
[XLeaf "Id" id, XLeaf arnName arn] ++ map toXNode events [XLeaf "Id" itemId, XLeaf arnName arn]
++ map toXNode events
++ [toXNode fRule] ++ [toXNode fRule]
instance ToXNode Filter where instance ToXNode Filter where
@ -143,14 +144,14 @@ mkSelectRequest r = LBS.toStrict $ renderLBS def sr
[NodeContent $ show $ srExpressionType r] [NodeContent $ show $ srExpressionType r]
), ),
NodeElement NodeElement
( Element "InputSerialization" mempty ( Element "InputSerialization" mempty $
$ inputSerializationNodes inputSerializationNodes $
$ srInputSerialization r srInputSerialization r
), ),
NodeElement NodeElement
( Element "OutputSerialization" mempty ( Element "OutputSerialization" mempty $
$ outputSerializationNodes outputSerializationNodes $
$ srOutputSerialization r srOutputSerialization r
) )
] ]
++ maybe [] reqProgElem (srRequestProgressEnabled r) ++ maybe [] reqProgElem (srRequestProgressEnabled r)
@ -186,11 +187,11 @@ mkSelectRequest r = LBS.toStrict $ renderLBS def sr
] ]
comprTypeNode Nothing = [] comprTypeNode Nothing = []
kvElement (k, v) = Element (Name k Nothing Nothing) mempty [NodeContent v] kvElement (k, v) = Element (Name k Nothing Nothing) mempty [NodeContent v]
formatNode (InputFormatCSV (CSVProp h)) = formatNode (InputFormatCSV c) =
Element Element
"CSV" "CSV"
mempty mempty
(map NodeElement $ map kvElement $ H.toList h) (map (NodeElement . kvElement) (csvPropsList c))
formatNode (InputFormatJSON p) = formatNode (InputFormatJSON p) =
Element Element
"JSON" "JSON"
@ -208,17 +209,17 @@ mkSelectRequest r = LBS.toStrict $ renderLBS def sr
formatNode InputFormatParquet = Element "Parquet" mempty [] formatNode InputFormatParquet = Element "Parquet" mempty []
outputSerializationNodes (OutputSerializationJSON j) = outputSerializationNodes (OutputSerializationJSON j) =
[ NodeElement [ NodeElement
( Element "JSON" mempty ( Element "JSON" mempty $
$ rdElem rdElem $
$ jsonopRecordDelimiter j jsonopRecordDelimiter j
) )
] ]
outputSerializationNodes (OutputSerializationCSV (CSVProp h)) = outputSerializationNodes (OutputSerializationCSV c) =
[ NodeElement $ [ NodeElement $
Element Element
"CSV" "CSV"
mempty mempty
(map NodeElement $ map kvElement $ H.toList h) (map (NodeElement . kvElement) (csvPropsList c))
] ]
rdElem Nothing = [] rdElem Nothing = []
rdElem (Just t) = rdElem (Just t) =

View File

@ -1,5 +1,5 @@
-- --
-- MinIO Haskell SDK, (C) 2017 MinIO, Inc. -- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
-- --
-- Licensed under the Apache License, Version 2.0 (the "License"); -- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License. -- you may not use this file except in compliance with the License.
@ -32,50 +32,13 @@ where
import qualified Data.ByteString.Lazy as LB import qualified Data.ByteString.Lazy as LB
import qualified Data.HashMap.Strict as H import qualified Data.HashMap.Strict as H
import Data.List (zip3, zip4, zip6) import Data.List (zip4, zip6)
import qualified Data.Text as T import qualified Data.Text as T
import Data.Text.Read (decimal)
import Data.Time import Data.Time
import Lib.Prelude
import Network.Minio.Data import Network.Minio.Data
import Network.Minio.Errors import Network.Minio.XmlCommon
import Text.XML
import Text.XML.Cursor hiding (bool) import Text.XML.Cursor hiding (bool)
-- | Represent the time format string returned by S3 API calls.
s3TimeFormat :: [Char]
s3TimeFormat = iso8601DateFormat $ Just "%T%QZ"
-- | Helper functions.
uncurry4 :: (a -> b -> c -> d -> e) -> (a, b, c, d) -> e
uncurry4 f (a, b, c, d) = f a b c d
uncurry6 :: (a -> b -> c -> d -> e -> f -> g) -> (a, b, c, d, e, f) -> g
uncurry6 f (a, b, c, d, e, g) = f a b c d e g
-- | Parse time strings from XML
parseS3XMLTime :: MonadIO m => Text -> m UTCTime
parseS3XMLTime t =
maybe (throwIO $ MErrVXmlParse $ "timestamp parse failure: " <> t) return
$ parseTimeM True defaultTimeLocale s3TimeFormat
$ T.unpack t
parseDecimal :: (MonadIO m, Integral a) => Text -> m a
parseDecimal numStr =
either (throwIO . MErrVXmlParse . show) return $
fst <$> decimal numStr
parseDecimals :: (MonadIO m, Integral a) => [Text] -> m [a]
parseDecimals numStr = forM numStr parseDecimal
s3Elem :: Text -> Text -> Axis
s3Elem ns = element . s3Name ns
parseRoot :: (MonadIO m) => LByteString -> m Cursor
parseRoot =
either (throwIO . MErrVXmlParse . show) (return . fromDocument)
. parseLBS def
-- | Parse the response XML of a list buckets call. -- | Parse the response XML of a list buckets call.
parseListBuckets :: (MonadReader env m, HasSvcNamespace env, MonadIO m) => LByteString -> m [BucketInfo] parseListBuckets :: (MonadReader env m, HasSvcNamespace env, MonadIO m) => LByteString -> m [BucketInfo]
parseListBuckets xmldata = do parseListBuckets xmldata = do
@ -132,7 +95,7 @@ parseListObjectsV1Response xmldata = do
ns <- asks getSvcNamespace ns <- asks getSvcNamespace
let s3Elem' = s3Elem ns let s3Elem' = s3Elem ns
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content) hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
nextMarker = headMay $ r $/ s3Elem' "NextMarker" &/ content nextMarker = listToMaybe $ r $/ s3Elem' "NextMarker" &/ content
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
keys = r $/ s3Elem' "Contents" &/ s3Elem' "Key" &/ content keys = r $/ s3Elem' "Contents" &/ s3Elem' "Key" &/ content
modTimeStr = r $/ s3Elem' "Contents" &/ s3Elem' "LastModified" &/ content modTimeStr = r $/ s3Elem' "Contents" &/ s3Elem' "LastModified" &/ content
@ -158,7 +121,7 @@ parseListObjectsResponse xmldata = do
ns <- asks getSvcNamespace ns <- asks getSvcNamespace
let s3Elem' = s3Elem ns let s3Elem' = s3Elem ns
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content) hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
nextToken = headMay $ r $/ s3Elem' "NextContinuationToken" &/ content nextToken = listToMaybe $ r $/ s3Elem' "NextContinuationToken" &/ content
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
keys = r $/ s3Elem' "Contents" &/ s3Elem' "Key" &/ content keys = r $/ s3Elem' "Contents" &/ s3Elem' "Key" &/ content
modTimeStr = r $/ s3Elem' "Contents" &/ s3Elem' "LastModified" &/ content modTimeStr = r $/ s3Elem' "Contents" &/ s3Elem' "LastModified" &/ content
@ -185,8 +148,8 @@ parseListUploadsResponse xmldata = do
let s3Elem' = s3Elem ns let s3Elem' = s3Elem ns
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content) hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
nextKey = headMay $ r $/ s3Elem' "NextKeyMarker" &/ content nextKey = listToMaybe $ r $/ s3Elem' "NextKeyMarker" &/ content
nextUpload = headMay $ r $/ s3Elem' "NextUploadIdMarker" &/ content nextUpload = listToMaybe $ r $/ s3Elem' "NextUploadIdMarker" &/ content
uploadKeys = r $/ s3Elem' "Upload" &/ s3Elem' "Key" &/ content uploadKeys = r $/ s3Elem' "Upload" &/ s3Elem' "Key" &/ content
uploadIds = r $/ s3Elem' "Upload" &/ s3Elem' "UploadId" &/ content uploadIds = r $/ s3Elem' "Upload" &/ s3Elem' "UploadId" &/ content
uploadInitTimeStr = r $/ s3Elem' "Upload" &/ s3Elem' "Initiated" &/ content uploadInitTimeStr = r $/ s3Elem' "Upload" &/ s3Elem' "Initiated" &/ content
@ -203,7 +166,7 @@ parseListPartsResponse xmldata = do
ns <- asks getSvcNamespace ns <- asks getSvcNamespace
let s3Elem' = s3Elem ns let s3Elem' = s3Elem ns
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content) hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
nextPartNumStr = headMay $ r $/ s3Elem' "NextPartNumberMarker" &/ content nextPartNumStr = listToMaybe $ r $/ s3Elem' "NextPartNumberMarker" &/ content
partNumberStr = r $/ s3Elem' "Part" &/ s3Elem' "PartNumber" &/ content partNumberStr = r $/ s3Elem' "Part" &/ s3Elem' "PartNumber" &/ content
partModTimeStr = r $/ s3Elem' "Part" &/ s3Elem' "LastModified" &/ content partModTimeStr = r $/ s3Elem' "Part" &/ s3Elem' "LastModified" &/ content
partETags = r $/ s3Elem' "Part" &/ s3Elem' "ETag" &/ content partETags = r $/ s3Elem' "Part" &/ s3Elem' "ETag" &/ content
@ -220,13 +183,6 @@ parseListPartsResponse xmldata = do
return $ ListPartsResult hasMore (listToMaybe nextPartNum) partInfos return $ ListPartsResult hasMore (listToMaybe nextPartNum) partInfos
parseErrResponse :: (MonadIO m) => LByteString -> m ServiceErr
parseErrResponse xmldata = do
r <- parseRoot xmldata
let code = T.concat $ r $/ element "Code" &/ content
message = T.concat $ r $/ element "Message" &/ content
return $ toServiceErr code message
parseNotification :: (MonadReader env m, HasSvcNamespace env, MonadIO m) => LByteString -> m Notification parseNotification :: (MonadReader env m, HasSvcNamespace env, MonadIO m) => LByteString -> m Notification
parseNotification xmldata = do parseNotification xmldata = do
r <- parseRoot xmldata r <- parseRoot xmldata
@ -235,9 +191,10 @@ parseNotification xmldata = do
qcfg = map node $ r $/ s3Elem' "QueueConfiguration" qcfg = map node $ r $/ s3Elem' "QueueConfiguration"
tcfg = map node $ r $/ s3Elem' "TopicConfiguration" tcfg = map node $ r $/ s3Elem' "TopicConfiguration"
lcfg = map node $ r $/ s3Elem' "CloudFunctionConfiguration" lcfg = map node $ r $/ s3Elem' "CloudFunctionConfiguration"
Notification <$> (mapM (parseNode ns "Queue") qcfg) Notification
<*> (mapM (parseNode ns "Topic") tcfg) <$> mapM (parseNode ns "Queue") qcfg
<*> (mapM (parseNode ns "CloudFunction") lcfg) <*> mapM (parseNode ns "Topic") tcfg
<*> mapM (parseNode ns "CloudFunction") lcfg
where where
getFilterRule ns c = getFilterRule ns c =
let name = T.concat $ c $/ s3Elem ns "Name" &/ content let name = T.concat $ c $/ s3Elem ns "Name" &/ content
@ -245,25 +202,29 @@ parseNotification xmldata = do
in FilterRule name value in FilterRule name value
parseNode ns arnName nodeData = do parseNode ns arnName nodeData = do
let c = fromNode nodeData let c = fromNode nodeData
id = T.concat $ c $/ s3Elem ns "Id" &/ content itemId = T.concat $ c $/ s3Elem ns "Id" &/ content
arn = T.concat $ c $/ s3Elem ns arnName &/ content arn = T.concat $ c $/ s3Elem ns arnName &/ content
events = catMaybes $ map textToEvent $ c $/ s3Elem ns "Event" &/ content events = mapMaybe textToEvent (c $/ s3Elem ns "Event" &/ content)
rules = rules =
c $/ s3Elem ns "Filter" &/ s3Elem ns "S3Key" c
&/ s3Elem ns "FilterRule" &| getFilterRule ns $/ s3Elem ns "Filter"
&/ s3Elem ns "S3Key"
&/ s3Elem ns "FilterRule"
&| getFilterRule ns
return $ return $
NotificationConfig NotificationConfig
id itemId
arn arn
events events
(Filter $ FilterKey $ FilterRules rules) (Filter $ FilterKey $ FilterRules rules)
parseSelectProgress :: MonadIO m => ByteString -> m Progress parseSelectProgress :: (MonadIO m) => ByteString -> m Progress
parseSelectProgress xmldata = do parseSelectProgress xmldata = do
r <- parseRoot $ LB.fromStrict xmldata r <- parseRoot $ LB.fromStrict xmldata
let bScanned = T.concat $ r $/ element "BytesScanned" &/ content let bScanned = T.concat $ r $/ element "BytesScanned" &/ content
bProcessed = T.concat $ r $/ element "BytesProcessed" &/ content bProcessed = T.concat $ r $/ element "BytesProcessed" &/ content
bReturned = T.concat $ r $/ element "BytesReturned" &/ content bReturned = T.concat $ r $/ element "BytesReturned" &/ content
Progress <$> parseDecimal bScanned Progress
<$> parseDecimal bScanned
<*> parseDecimal bProcessed <*> parseDecimal bProcessed
<*> parseDecimal bReturned <*> parseDecimal bReturned

View File

@ -15,7 +15,7 @@
# resolver: # resolver:
# name: custom-snapshot # name: custom-snapshot
# location: "./custom-snapshot.yaml" # location: "./custom-snapshot.yaml"
resolver: lts-16.0 resolver: lts-19.7
# User packages to be built. # User packages to be built.
# Various formats can be used as shown in the example below. # Various formats can be used as shown in the example below.
@ -39,9 +39,7 @@ packages:
- '.' - '.'
# Dependency packages to be pulled from upstream that are not in the resolver # Dependency packages to be pulled from upstream that are not in the resolver
# (e.g., acme-missiles-0.3) # (e.g., acme-missiles-0.3)
extra-deps: extra-deps: []
- unliftio-core-0.2.0.1
- protolude-0.3.0
# Override default flag values for local packages and extra-deps # Override default flag values for local packages and extra-deps
flags: {} flags: {}

View File

@ -3,24 +3,10 @@
# For more information, please see the documentation at: # For more information, please see the documentation at:
# https://docs.haskellstack.org/en/stable/lock_files # https://docs.haskellstack.org/en/stable/lock_files
packages: packages: []
- completed:
hackage: unliftio-core-0.2.0.1@sha256:9b3e44ea9aacacbfc35b3b54015af450091916ac3618a41868ebf6546977659a,1082
pantry-tree:
size: 328
sha256: e81c5a1e82ec2cd68cbbbec9cd60567363abe02257fa1370a906f6754b6818b8
original:
hackage: unliftio-core-0.2.0.1
- completed:
hackage: protolude-0.3.0@sha256:8361b811b420585b122a7ba715aa5923834db6e8c36309bf267df2dbf66b95ef,2693
pantry-tree:
size: 1644
sha256: babf32b414f25f790b7a4ce6bae5c960bc51a11a289e7c47335b222e6762560c
original:
hackage: protolude-0.3.0
snapshots: snapshots:
- completed: - completed:
size: 531237 size: 618884
url: https://raw.githubusercontent.com/commercialhaskell/stackage-snapshots/master/lts/16/0.yaml url: https://raw.githubusercontent.com/commercialhaskell/stackage-snapshots/master/lts/19/7.yaml
sha256: 210e15b7043e2783115afe16b0d54914b1611cdaa73f3ca3ca7f8e0847ff54e5 sha256: 57d4ce67cc097fea2058446927987bc1f7408890e3a6df0da74e5e318f051c20
original: lts-16.0 original: lts-19.7

View File

@ -1,7 +1,5 @@
{-# LANGUAGE OverloadedStrings #-}
-- --
-- MinIO Haskell SDK, (C) 2017-2019 MinIO, Inc. -- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
-- --
-- Licensed under the Apache License, Version 2.0 (the "License"); -- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License. -- you may not use this file except in compliance with the License.
@ -32,13 +30,13 @@ import qualified Network.HTTP.Client.MultipartFormData as Form
import qualified Network.HTTP.Conduit as NC import qualified Network.HTTP.Conduit as NC
import qualified Network.HTTP.Types as HT import qualified Network.HTTP.Types as HT
import Network.Minio import Network.Minio
import Network.Minio.Credentials (Creds (CredsStatic))
import Network.Minio.Data import Network.Minio.Data
import Network.Minio.Data.Crypto import Network.Minio.Data.Crypto
import Network.Minio.PutObject
import Network.Minio.S3API import Network.Minio.S3API
import Network.Minio.Utils import Network.Minio.Utils
import System.Directory (getTemporaryDirectory) import System.Directory (getTemporaryDirectory)
import System.Environment (lookupEnv) import qualified System.Environment as Env
import qualified Test.QuickCheck as Q import qualified Test.QuickCheck as Q
import Test.Tasty import Test.Tasty
import Test.Tasty.HUnit import Test.Tasty.HUnit
@ -52,8 +50,8 @@ tests :: TestTree
tests = testGroup "Tests" [liveServerUnitTests] tests = testGroup "Tests" [liveServerUnitTests]
-- conduit that generates random binary stream of given length -- conduit that generates random binary stream of given length
randomDataSrc :: MonadIO m => Int64 -> C.ConduitM () ByteString m () randomDataSrc :: (MonadIO m) => Int64 -> C.ConduitM () ByteString m ()
randomDataSrc s' = genBS s' randomDataSrc = genBS
where where
concatIt bs n = concatIt bs n =
BS.concat $ BS.concat $
@ -70,7 +68,7 @@ randomDataSrc s' = genBS s'
yield $ concatIt byteArr64 oneMiB yield $ concatIt byteArr64 oneMiB
genBS (s - oneMiB) genBS (s - oneMiB)
mkRandFile :: R.MonadResource m => Int64 -> m FilePath mkRandFile :: (R.MonadResource m) => Int64 -> m FilePath
mkRandFile size = do mkRandFile size = do
dir <- liftIO getTemporaryDirectory dir <- liftIO getTemporaryDirectory
C.runConduit $ randomDataSrc size C..| CB.sinkTempFile dir "miniohstest.random" C.runConduit $ randomDataSrc size C..| CB.sinkTempFile dir "miniohstest.random"
@ -78,15 +76,35 @@ mkRandFile size = do
funTestBucketPrefix :: Text funTestBucketPrefix :: Text
funTestBucketPrefix = "miniohstest-" funTestBucketPrefix = "miniohstest-"
loadTestServer :: IO ConnectInfo loadTestServerConnInfo :: IO ConnectInfo
loadTestServer = do loadTestServerConnInfo = do
val <- lookupEnv "MINIO_LOCAL" val <- Env.lookupEnv "MINIO_LOCAL"
isSecure <- lookupEnv "MINIO_SECURE" isSecure <- Env.lookupEnv "MINIO_SECURE"
return $ case (val, isSecure) of return $ case (val, isSecure) of
(Just _, Just _) -> setCreds (Credentials "minio" "minio123") "https://localhost:9000" (Just _, Just _) -> setCreds (CredentialValue "minio" "minio123" mempty) "https://localhost:9000"
(Just _, Nothing) -> setCreds (Credentials "minio" "minio123") "http://localhost:9000" (Just _, Nothing) -> setCreds (CredentialValue "minio" "minio123" mempty) "http://localhost:9000"
(Nothing, _) -> minioPlayCI (Nothing, _) -> minioPlayCI
loadTestServerConnInfoSTS :: IO ConnectInfo
loadTestServerConnInfoSTS = do
val <- Env.lookupEnv "MINIO_LOCAL"
isSecure <- Env.lookupEnv "MINIO_SECURE"
let cv = CredentialValue "minio" "minio123" mempty
assumeRole =
STSAssumeRole
{ sarCredentials = cv,
sarOptions = defaultSTSAssumeRoleOptions
}
case (val, isSecure) of
(Just _, Just _) -> setSTSCredential assumeRole "https://localhost:9000"
(Just _, Nothing) -> setSTSCredential assumeRole "http://localhost:9000"
(Nothing, _) -> do
cv' <- case connectCreds minioPlayCI of
CredsStatic c -> return c
_ -> error "unexpected play creds"
let assumeRole' = assumeRole {sarCredentials = cv'}
setSTSCredential assumeRole' minioPlayCI
funTestWithBucket :: funTestWithBucket ::
TestName -> TestName ->
(([Char] -> Minio ()) -> Bucket -> Minio ()) -> (([Char] -> Minio ()) -> Bucket -> Minio ()) ->
@ -96,7 +114,7 @@ funTestWithBucket t minioTest = testCaseSteps t $ \step -> do
bktSuffix <- liftIO $ generate $ Q.vectorOf 10 (Q.choose ('a', 'z')) bktSuffix <- liftIO $ generate $ Q.vectorOf 10 (Q.choose ('a', 'z'))
let b = T.concat [funTestBucketPrefix, T.pack bktSuffix] let b = T.concat [funTestBucketPrefix, T.pack bktSuffix]
liftStep = liftIO . step liftStep = liftIO . step
connInfo <- loadTestServer connInfo <- loadTestServerConnInfo
ret <- runMinio connInfo $ do ret <- runMinio connInfo $ do
liftStep $ "Creating bucket for test - " ++ t liftStep $ "Creating bucket for test - " ++ t
foundBucket <- bucketExists b foundBucket <- bucketExists b
@ -106,6 +124,17 @@ funTestWithBucket t minioTest = testCaseSteps t $ \step -> do
deleteBucket b deleteBucket b
isRight ret @? ("Functional test " ++ t ++ " failed => " ++ show ret) isRight ret @? ("Functional test " ++ t ++ " failed => " ++ show ret)
connInfoSTS <- loadTestServerConnInfoSTS
let t' = t ++ " (with AssumeRole Credentials)"
ret' <- runMinio connInfoSTS $ do
liftStep $ "Creating bucket for test - " ++ t'
foundBucket <- bucketExists b
liftIO $ foundBucket @?= False
makeBucket b Nothing
minioTest liftStep b
deleteBucket b
isRight ret' @? ("Functional test " ++ t' ++ " failed => " ++ show ret')
liveServerUnitTests :: TestTree liveServerUnitTests :: TestTree
liveServerUnitTests = liveServerUnitTests =
testGroup testGroup
@ -126,7 +155,8 @@ liveServerUnitTests =
presignedUrlFunTest, presignedUrlFunTest,
presignedPostPolicyFunTest, presignedPostPolicyFunTest,
bucketPolicyFunTest, bucketPolicyFunTest,
getNPutSSECTest getNPutSSECTest,
assumeRoleRequestTest
] ]
basicTests :: TestTree basicTests :: TestTree
@ -134,12 +164,13 @@ basicTests = funTestWithBucket "Basic tests" $
\step bucket -> do \step bucket -> do
step "getService works and contains the test bucket." step "getService works and contains the test bucket."
buckets <- getService buckets <- getService
unless (length (filter (== bucket) $ map biName buckets) == 1) unless (length (filter (== bucket) $ map biName buckets) == 1) $
$ liftIO liftIO $
$ assertFailure assertFailure
( "The bucket " ++ show bucket ( "The bucket "
++ " was expected to exist." ++ show bucket
) ++ " was expected to exist."
)
step "makeBucket again to check if BucketAlreadyOwnedByYou exception is raised." step "makeBucket again to check if BucketAlreadyOwnedByYou exception is raised."
mbE <- try $ makeBucket bucket Nothing mbE <- try $ makeBucket bucket Nothing
@ -180,7 +211,7 @@ basicTests = funTestWithBucket "Basic tests" $
"test-file" "test-file"
outFile outFile
defaultGetObjectOptions defaultGetObjectOptions
{ gooIfUnmodifiedSince = (Just unmodifiedTime) { gooIfUnmodifiedSince = Just unmodifiedTime
} }
case resE of case resE of
Left exn -> liftIO $ exn @?= ServiceErr "PreconditionFailed" "At least one of the pre-conditions you specified did not hold" Left exn -> liftIO $ exn @?= ServiceErr "PreconditionFailed" "At least one of the pre-conditions you specified did not hold"
@ -194,7 +225,7 @@ basicTests = funTestWithBucket "Basic tests" $
"test-file" "test-file"
outFile outFile
defaultGetObjectOptions defaultGetObjectOptions
{ gooIfMatch = (Just "invalid-etag") { gooIfMatch = Just "invalid-etag"
} }
case resE1 of case resE1 of
Left exn -> liftIO $ exn @?= ServiceErr "PreconditionFailed" "At least one of the pre-conditions you specified did not hold" Left exn -> liftIO $ exn @?= ServiceErr "PreconditionFailed" "At least one of the pre-conditions you specified did not hold"
@ -208,7 +239,7 @@ basicTests = funTestWithBucket "Basic tests" $
"test-file" "test-file"
outFile outFile
defaultGetObjectOptions defaultGetObjectOptions
{ gooRange = (Just $ HT.ByteRangeFromTo 100 300) { gooRange = Just $ HT.ByteRangeFromTo 100 300
} }
case resE2 of case resE2 of
Left exn -> liftIO $ exn @?= ServiceErr "InvalidRange" "The requested range is not satisfiable" Left exn -> liftIO $ exn @?= ServiceErr "InvalidRange" "The requested range is not satisfiable"
@ -220,7 +251,7 @@ basicTests = funTestWithBucket "Basic tests" $
"test-file" "test-file"
outFile outFile
defaultGetObjectOptions defaultGetObjectOptions
{ gooRange = (Just $ HT.ByteRangeFrom 1) { gooRange = Just $ HT.ByteRangeFrom 1
} }
step "fGetObject a non-existent object and check for NoSuchKey exception" step "fGetObject a non-existent object and check for NoSuchKey exception"
@ -231,7 +262,7 @@ basicTests = funTestWithBucket "Basic tests" $
step "create new multipart upload works" step "create new multipart upload works"
uid <- newMultipartUpload bucket "newmpupload" [] uid <- newMultipartUpload bucket "newmpupload" []
liftIO $ (T.length uid > 0) @? ("Got an empty multipartUpload Id.") liftIO $ (T.length uid > 0) @? "Got an empty multipartUpload Id."
step "abort a new multipart upload works" step "abort a new multipart upload works"
abortMultipartUpload bucket "newmpupload" uid abortMultipartUpload bucket "newmpupload" uid
@ -247,7 +278,7 @@ basicTests = funTestWithBucket "Basic tests" $
step "get metadata of the object" step "get metadata of the object"
res <- statObject bucket object defaultGetObjectOptions res <- statObject bucket object defaultGetObjectOptions
liftIO $ (oiSize res) @?= 0 liftIO $ oiSize res @?= 0
step "delete object" step "delete object"
deleteObject bucket object deleteObject bucket object
@ -262,7 +293,7 @@ lowLevelMultipartTest = funTestWithBucket "Low-level Multipart Test" $
step "Prepare for low-level multipart tests." step "Prepare for low-level multipart tests."
step "create new multipart upload" step "create new multipart upload"
uid <- newMultipartUpload bucket object [] uid <- newMultipartUpload bucket object []
liftIO $ (T.length uid > 0) @? ("Got an empty multipartUpload Id.") liftIO $ (T.length uid > 0) @? "Got an empty multipartUpload Id."
randFile <- mkRandFile mb15 randFile <- mkRandFile mb15
@ -279,7 +310,8 @@ lowLevelMultipartTest = funTestWithBucket "Low-level Multipart Test" $
fGetObject bucket object destFile defaultGetObjectOptions fGetObject bucket object destFile defaultGetObjectOptions
gotSize <- withNewHandle destFile getFileSize gotSize <- withNewHandle destFile getFileSize
liftIO $ liftIO $
gotSize == Right (Just mb15) gotSize
== Right (Just mb15)
@? "Wrong file size of put file after getting" @? "Wrong file size of put file after getting"
step "Cleanup actions" step "Cleanup actions"
@ -303,7 +335,8 @@ putObjectSizeTest = funTestWithBucket "PutObject of conduit source with size" $
fGetObject bucket obj destFile defaultGetObjectOptions fGetObject bucket obj destFile defaultGetObjectOptions
gotSize <- withNewHandle destFile getFileSize gotSize <- withNewHandle destFile getFileSize
liftIO $ liftIO $
gotSize == Right (Just mb1) gotSize
== Right (Just mb1)
@? "Wrong file size of put file after getting" @? "Wrong file size of put file after getting"
step "Cleanup actions" step "Cleanup actions"
@ -327,7 +360,8 @@ putObjectNoSizeTest = funTestWithBucket "PutObject of conduit source with no siz
fGetObject bucket obj destFile defaultGetObjectOptions fGetObject bucket obj destFile defaultGetObjectOptions
gotSize <- withNewHandle destFile getFileSize gotSize <- withNewHandle destFile getFileSize
liftIO $ liftIO $
gotSize == Right (Just mb70) gotSize
== Right (Just mb70)
@? "Wrong file size of put file after getting" @? "Wrong file size of put file after getting"
step "Cleanup actions" step "Cleanup actions"
@ -338,22 +372,20 @@ highLevelListingTest = funTestWithBucket "High-level listObjects Test" $
\step bucket -> do \step bucket -> do
step "High-level listObjects Test" step "High-level listObjects Test"
step "put 3 objects" step "put 3 objects"
let expectedObjects = ["dir/o1", "dir/dir1/o2", "dir/dir2/o3", "o4"] let extractObjectsFromList =
extractObjectsFromList os =
mapM mapM
( \t -> case t of ( \case
ListItemObject o -> Just $ oiObject o ListItemObject o -> Just $ oiObject o
_ -> Nothing _ -> Nothing
) )
os extractObjectsAndDirsFromList =
expectedNonRecList = ["o4", "dir/"]
extractObjectsAndDirsFromList os =
map map
( \t -> case t of ( \case
ListItemObject o -> oiObject o ListItemObject o -> oiObject o
ListItemPrefix d -> d ListItemPrefix d -> d
) )
os expectedObjects = ["dir/o1", "dir/dir1/o2", "dir/dir2/o3", "o4"]
expectedNonRecList = ["o4", "dir/"]
testFilepath <- mkRandFile 200 testFilepath <- mkRandFile 200
forM_ expectedObjects $ forM_ expectedObjects $
@ -361,8 +393,9 @@ highLevelListingTest = funTestWithBucket "High-level listObjects Test" $
step "High-level listing of objects" step "High-level listing of objects"
items <- C.runConduit $ listObjects bucket Nothing False C..| sinkList items <- C.runConduit $ listObjects bucket Nothing False C..| sinkList
liftIO $ assertEqual "Objects/Dirs match failed!" expectedNonRecList $ liftIO $
extractObjectsAndDirsFromList items assertEqual "Objects/Dirs match failed!" expectedNonRecList $
extractObjectsAndDirsFromList items
step "High-level recursive listing of objects" step "High-level recursive listing of objects"
objects <- C.runConduit $ listObjects bucket Nothing True C..| sinkList objects <- C.runConduit $ listObjects bucket Nothing True C..| sinkList
@ -375,8 +408,9 @@ highLevelListingTest = funTestWithBucket "High-level listObjects Test" $
step "High-level listing of objects (version 1)" step "High-level listing of objects (version 1)"
itemsV1 <- C.runConduit $ listObjectsV1 bucket Nothing False C..| sinkList itemsV1 <- C.runConduit $ listObjectsV1 bucket Nothing False C..| sinkList
liftIO $ assertEqual "Objects/Dirs match failed!" expectedNonRecList $ liftIO $
extractObjectsAndDirsFromList itemsV1 assertEqual "Objects/Dirs match failed!" expectedNonRecList $
extractObjectsAndDirsFromList itemsV1
step "High-level recursive listing of objects (version 1)" step "High-level recursive listing of objects (version 1)"
objectsV1 <- objectsV1 <-
@ -433,7 +467,7 @@ highLevelListingTest = funTestWithBucket "High-level listObjects Test" $
step "create 10 multipart uploads" step "create 10 multipart uploads"
forM_ [1 .. 10 :: Int] $ \_ -> do forM_ [1 .. 10 :: Int] $ \_ -> do
uid <- newMultipartUpload bucket object [] uid <- newMultipartUpload bucket object []
liftIO $ (T.length uid > 0) @? ("Got an empty multipartUpload Id.") liftIO $ (T.length uid > 0) @? "Got an empty multipartUpload Id."
step "High-level listing of incomplete multipart uploads" step "High-level listing of incomplete multipart uploads"
uploads <- uploads <-
@ -495,7 +529,7 @@ listingTest = funTestWithBucket "Listing Test" $ \step bucket -> do
map map
( T.concat ( T.concat
. ("test-file-" :) . ("test-file-" :)
. (\x -> [x]) . (: [])
. T.pack . T.pack
. show . show
) )
@ -514,7 +548,7 @@ listingTest = funTestWithBucket "Listing Test" $ \step bucket -> do
let object = "newmpupload" let object = "newmpupload"
forM_ [1 .. 10 :: Int] $ \_ -> do forM_ [1 .. 10 :: Int] $ \_ -> do
uid <- newMultipartUpload bucket object [] uid <- newMultipartUpload bucket object []
liftIO $ (T.length uid > 0) @? ("Got an empty multipartUpload Id.") liftIO $ (T.length uid > 0) @? "Got an empty multipartUpload Id."
step "list incomplete multipart uploads" step "list incomplete multipart uploads"
incompleteUploads <- incompleteUploads <-
@ -525,7 +559,7 @@ listingTest = funTestWithBucket "Listing Test" $ \step bucket -> do
Nothing Nothing
Nothing Nothing
Nothing Nothing
liftIO $ (length $ lurUploads incompleteUploads) @?= 10 liftIO $ length (lurUploads incompleteUploads) @?= 10
step "cleanup" step "cleanup"
forM_ (lurUploads incompleteUploads) $ forM_ (lurUploads incompleteUploads) $
@ -536,7 +570,7 @@ listingTest = funTestWithBucket "Listing Test" $ \step bucket -> do
step "create a multipart upload" step "create a multipart upload"
uid <- newMultipartUpload bucket object [] uid <- newMultipartUpload bucket object []
liftIO $ (T.length uid > 0) @? ("Got an empty multipartUpload Id.") liftIO $ (T.length uid > 0) @? "Got an empty multipartUpload Id."
step "put object parts 1..10" step "put object parts 1..10"
inputFile <- mkRandFile mb5 inputFile <- mkRandFile mb5
@ -546,7 +580,7 @@ listingTest = funTestWithBucket "Listing Test" $ \step bucket -> do
step "fetch list parts" step "fetch list parts"
listPartsResult <- listIncompleteParts' bucket object uid Nothing Nothing listPartsResult <- listIncompleteParts' bucket object uid Nothing Nothing
liftIO $ (length $ lprParts listPartsResult) @?= 10 liftIO $ length (lprParts listPartsResult) @?= 10
abortMultipartUpload bucket object uid abortMultipartUpload bucket object uid
presignedUrlFunTest :: TestTree presignedUrlFunTest :: TestTree
@ -569,6 +603,7 @@ presignedUrlFunTest = funTestWithBucket "presigned Url tests" $
[] []
[] []
print putUrl
let size1 = 1000 :: Int64 let size1 = 1000 :: Int64
inputFile <- mkRandFile size1 inputFile <- mkRandFile size1
@ -615,7 +650,7 @@ presignedUrlFunTest = funTestWithBucket "presigned Url tests" $
headUrl <- presignedHeadObjectUrl bucket obj2 3600 [] headUrl <- presignedHeadObjectUrl bucket obj2 3600 []
headResp <- do headResp <- do
let req = NC.parseRequest_ $ toS $ decodeUtf8 headUrl let req = NC.parseRequest_ $ decodeUtf8 headUrl
NC.httpLbs (req {NC.method = HT.methodHead}) mgr NC.httpLbs (req {NC.method = HT.methodHead}) mgr
liftIO $ liftIO $
(NC.responseStatus headResp == HT.status200) (NC.responseStatus headResp == HT.status200)
@ -643,7 +678,7 @@ presignedUrlFunTest = funTestWithBucket "presigned Url tests" $
mapM_ (removeObject bucket) [obj, obj2] mapM_ (removeObject bucket) [obj, obj2]
where where
putR size filePath mgr url = do putR size filePath mgr url = do
let req = NC.parseRequest_ $ toS $ decodeUtf8 url let req = NC.parseRequest_ $ decodeUtf8 url
let req' = let req' =
req req
{ NC.method = HT.methodPut, { NC.method = HT.methodPut,
@ -653,14 +688,14 @@ presignedUrlFunTest = funTestWithBucket "presigned Url tests" $
} }
NC.httpLbs req' mgr NC.httpLbs req' mgr
getR mgr url = do getR mgr url = do
let req = NC.parseRequest_ $ toS $ decodeUtf8 url let req = NC.parseRequest_ $ decodeUtf8 url
NC.httpLbs req mgr NC.httpLbs req mgr
presignedPostPolicyFunTest :: TestTree presignedPostPolicyFunTest :: TestTree
presignedPostPolicyFunTest = funTestWithBucket "Presigned Post Policy tests" $ presignedPostPolicyFunTest = funTestWithBucket "Presigned Post Policy tests" $
\step bucket -> do \step bucket -> do
step "presignedPostPolicy basic test" step "presignedPostPolicy basic test"
now <- liftIO $ Time.getCurrentTime now <- liftIO Time.getCurrentTime
let key = "presignedPostPolicyTest/myfile" let key = "presignedPostPolicyTest/myfile"
policyConds = policyConds =
@ -689,9 +724,9 @@ presignedPostPolicyFunTest = funTestWithBucket "Presigned Post Policy tests" $
mapM_ (removeObject bucket) [key] mapM_ (removeObject bucket) [key]
where where
postForm url formData inputFile = do postForm url formData inputFile = do
req <- NC.parseRequest $ toS $ decodeUtf8 url req <- NC.parseRequest $ decodeUtf8 url
let parts = let parts =
map (\(x, y) -> Form.partBS x y) $ map (uncurry Form.partBS) $
H.toList formData H.toList formData
parts' = parts ++ [Form.partFile "file" inputFile] parts' = parts ++ [Form.partFile "file" inputFile]
req' <- Form.formDataBody parts' req req' <- Form.formDataBody parts' req
@ -738,17 +773,17 @@ bucketPolicyFunTest = funTestWithBucket "Bucket Policy tests" $
[ proto, [ proto,
getHostAddr connInfo, getHostAddr connInfo,
"/", "/",
toUtf8 bucket, encodeUtf8 bucket,
"/", "/",
toUtf8 obj encodeUtf8 obj
] ]
respE <- respE <-
liftIO $ liftIO $
(fmap (Right . toStrictBS) $ NC.simpleHttp $ toS $ decodeUtf8 url) fmap (Right . toStrictBS) (NC.simpleHttp $ decodeUtf8 url)
`catch` (\(e :: NC.HttpException) -> return $ Left (show e :: Text)) `catch` (\(e :: NC.HttpException) -> return $ Left (show e :: Text))
case respE of case respE of
Left err -> liftIO $ assertFailure $ show err Left err -> liftIO $ assertFailure $ show err
Right s -> liftIO $ s @?= (BS.concat $ replicate 100 "c") Right s -> liftIO $ s @?= BS.concat (replicate 100 "c")
deleteObject bucket obj deleteObject bucket obj
@ -803,7 +838,7 @@ multipartTest = funTestWithBucket "Multipart Tests" $
C.runConduit $ C.runConduit $
listIncompleteUploads bucket (Just object) False listIncompleteUploads bucket (Just object) False
C..| sinkList C..| sinkList
liftIO $ (null uploads) @? "removeIncompleteUploads didn't complete successfully" liftIO $ null uploads @? "removeIncompleteUploads didn't complete successfully"
putObjectContentTypeTest :: TestTree putObjectContentTypeTest :: TestTree
putObjectContentTypeTest = funTestWithBucket "putObject contentType tests" $ putObjectContentTypeTest = funTestWithBucket "putObject contentType tests" $
@ -910,8 +945,9 @@ putObjectUserMetadataTest = funTestWithBucket "putObject user-metadata test" $
let m = oiUserMetadata oi let m = oiUserMetadata oi
-- need to do a case-insensitive comparison -- need to do a case-insensitive comparison
sortedMeta = sortedMeta =
sort $ map (\(k, v) -> (T.toLower k, T.toLower v)) $ sort $
H.toList m map (bimap T.toLower T.toLower) $
H.toList m
ref = sort [("mykey1", "myval1"), ("mykey2", "myval2")] ref = sort [("mykey1", "myval1"), ("mykey2", "myval2")]
liftIO $ (sortedMeta == ref) @? "Metadata mismatch!" liftIO $ (sortedMeta == ref) @? "Metadata mismatch!"
@ -944,8 +980,9 @@ getObjectTest = funTestWithBucket "getObject test" $
let m = oiUserMetadata $ gorObjectInfo gor let m = oiUserMetadata $ gorObjectInfo gor
-- need to do a case-insensitive comparison -- need to do a case-insensitive comparison
sortedMeta = sortedMeta =
sort $ map (\(k, v) -> (T.toLower k, T.toLower v)) $ sort $
H.toList m map (bimap T.toLower T.toLower) $
H.toList m
ref = sort [("mykey1", "myval1"), ("mykey2", "myval2")] ref = sort [("mykey1", "myval1"), ("mykey2", "myval2")]
liftIO $ (sortedMeta == ref) @? "Metadata mismatch!" liftIO $ (sortedMeta == ref) @? "Metadata mismatch!"
@ -1073,7 +1110,7 @@ copyObjectTests = funTestWithBucket "copyObject related tests" $
copyObjectPart copyObjectPart
dstInfo' dstInfo'
srcInfo' srcInfo'
{ srcRange = Just $ (,) ((p -1) * mb5) ((p -1) * mb5 + (mb5 - 1)) { srcRange = Just $ (,) ((p - 1) * mb5) ((p - 1) * mb5 + (mb5 - 1))
} }
uid uid
(fromIntegral p) (fromIntegral p)
@ -1174,9 +1211,37 @@ getNPutSSECTest =
gotSize <- withNewHandle dstFile getFileSize gotSize <- withNewHandle dstFile getFileSize
liftIO $ liftIO $
gotSize == Right (Just mb1) gotSize
== Right (Just mb1)
@? "Wrong file size of object when getting" @? "Wrong file size of object when getting"
step "Cleanup" step "Cleanup"
deleteObject bucket obj deleteObject bucket obj
else step "Skipping encryption test as server is not using TLS" else step "Skipping encryption test as server is not using TLS"
assumeRoleRequestTest :: TestTree
assumeRoleRequestTest = testCaseSteps "Assume Role STS API" $ \step -> do
step "Load credentials"
val <- Env.lookupEnv "MINIO_LOCAL"
isSecure <- Env.lookupEnv "MINIO_SECURE"
let localMinioCred = Just $ CredentialValue "minio" "minio123" mempty
playCreds =
case connectCreds minioPlayCI of
CredsStatic c -> Just c
_ -> Nothing
(cvMay, loc) =
case (val, isSecure) of
(Just _, Just _) -> (localMinioCred, "https://localhost:9000")
(Just _, Nothing) -> (localMinioCred, "http://localhost:9000")
(Nothing, _) -> (playCreds, "https://play.min.io:9000")
cv <- maybe (assertFailure "bad creds") return cvMay
let assumeRole =
STSAssumeRole cv $
defaultSTSAssumeRoleOptions
{ saroLocation = Just "us-east-1",
saroEndpoint = Just loc
}
step "AssumeRole request"
res <- requestSTSCredential assumeRole
let v = credentialValueText $ fst res
print (v, snd res)

View File

@ -24,7 +24,6 @@ module Network.Minio.API.Test
where where
import Data.Aeson (eitherDecode) import Data.Aeson (eitherDecode)
import Lib.Prelude
import Network.Minio.API import Network.Minio.API
import Network.Minio.AdminAPI import Network.Minio.AdminAPI
import Test.Tasty import Test.Tasty
@ -63,8 +62,9 @@ parseServerInfoJSONTest =
testGroup "Parse MinIO Admin API ServerInfo JSON test" $ testGroup "Parse MinIO Admin API ServerInfo JSON test" $
map map
( \(tName, tDesc, tfn, tVal) -> ( \(tName, tDesc, tfn, tVal) ->
testCase tName $ assertBool tDesc $ testCase tName $
tfn (eitherDecode tVal :: Either [Char] [ServerInfo]) assertBool tDesc $
tfn (eitherDecode tVal :: Either [Char] [ServerInfo])
) )
testCases testCases
where where
@ -82,8 +82,9 @@ parseHealStatusTest =
testGroup "Parse MinIO Admin API HealStatus JSON test" $ testGroup "Parse MinIO Admin API HealStatus JSON test" $
map map
( \(tName, tDesc, tfn, tVal) -> ( \(tName, tDesc, tfn, tVal) ->
testCase tName $ assertBool tDesc $ testCase tName $
tfn (eitherDecode tVal :: Either [Char] HealStatus) assertBool tDesc $
tfn (eitherDecode tVal :: Either [Char] HealStatus)
) )
testCases testCases
where where
@ -101,8 +102,9 @@ parseHealStartRespTest =
testGroup "Parse MinIO Admin API HealStartResp JSON test" $ testGroup "Parse MinIO Admin API HealStartResp JSON test" $
map map
( \(tName, tDesc, tfn, tVal) -> ( \(tName, tDesc, tfn, tVal) ->
testCase tName $ assertBool tDesc $ testCase tName $
tfn (eitherDecode tVal :: Either [Char] HealStartResp) assertBool tDesc $
tfn (eitherDecode tVal :: Either [Char] HealStartResp)
) )
testCases testCases
where where

View File

@ -34,7 +34,7 @@ jsonParserTests =
] ]
tryValidationErr :: (MonadUnliftIO m) => m a -> m (Either MErrV a) tryValidationErr :: (MonadUnliftIO m) => m a -> m (Either MErrV a)
tryValidationErr act = try act tryValidationErr = try
assertValidationErr :: MErrV -> Assertion assertValidationErr :: MErrV -> Assertion
assertValidationErr e = assertFailure $ "Failed due to validation error => " ++ show e assertValidationErr e = assertFailure $ "Failed due to validation error => " ++ show e
@ -43,9 +43,9 @@ testParseErrResponseJSON :: Assertion
testParseErrResponseJSON = do testParseErrResponseJSON = do
-- 1. Test parsing of an invalid error json. -- 1. Test parsing of an invalid error json.
parseResE <- tryValidationErr $ parseErrResponseJSON "ClearlyInvalidJSON" parseResE <- tryValidationErr $ parseErrResponseJSON "ClearlyInvalidJSON"
when (isRight parseResE) when (isRight parseResE) $
$ assertFailure assertFailure $
$ "Parsing should have failed => " ++ show parseResE "Parsing should have failed => " ++ show parseResE
forM_ cases $ \(jsondata, sErr) -> do forM_ cases $ \(jsondata, sErr) -> do
parseErr <- tryValidationErr $ parseErrResponseJSON jsondata parseErr <- tryValidationErr $ parseErrResponseJSON jsondata

View File

@ -19,7 +19,6 @@ module Network.Minio.TestHelpers
) )
where where
import Lib.Prelude
import Network.Minio.Data import Network.Minio.Data
newtype TestNS = TestNS {testNamespace :: Text} newtype TestNS = TestNS {testNamespace :: Text}

View File

@ -19,7 +19,6 @@ module Network.Minio.Utils.Test
) )
where where
import Lib.Prelude
import Network.Minio.Utils import Network.Minio.Utils
import Test.Tasty import Test.Tasty
import Test.Tasty.HUnit import Test.Tasty.HUnit

View File

@ -20,6 +20,7 @@ module Network.Minio.XmlGenerator.Test
) )
where where
import qualified Data.ByteString.Lazy as LBS
import Lib.Prelude import Lib.Prelude
import Network.Minio.Data import Network.Minio.Data
import Network.Minio.TestHelpers import Network.Minio.TestHelpers
@ -28,6 +29,7 @@ import Network.Minio.XmlParser (parseNotification)
import Test.Tasty import Test.Tasty
import Test.Tasty.HUnit import Test.Tasty.HUnit
import Text.RawString.QQ (r) import Text.RawString.QQ (r)
import Text.XML (def, parseLBS)
xmlGeneratorTests :: TestTree xmlGeneratorTests :: TestTree
xmlGeneratorTests = xmlGeneratorTests =
@ -90,11 +92,12 @@ testMkPutNotificationRequest =
"1" "1"
"arn:aws:sqs:us-west-2:444455556666:s3notificationqueue" "arn:aws:sqs:us-west-2:444455556666:s3notificationqueue"
[ObjectCreatedPut] [ObjectCreatedPut]
( Filter $ FilterKey $ ( Filter $
FilterRules FilterKey $
[ FilterRule "prefix" "images/", FilterRules
FilterRule "suffix" ".jpg" [ FilterRule "prefix" "images/",
] FilterRule "suffix" ".jpg"
]
), ),
NotificationConfig NotificationConfig
"" ""
@ -119,7 +122,13 @@ testMkPutNotificationRequest =
testMkSelectRequest :: Assertion testMkSelectRequest :: Assertion
testMkSelectRequest = mapM_ assertFn cases testMkSelectRequest = mapM_ assertFn cases
where where
assertFn (a, b) = assertEqual "selectRequest XML should match: " b $ mkSelectRequest a assertFn (a, b) =
let generatedReqDoc = parseLBS def $ LBS.fromStrict $ mkSelectRequest a
expectedReqDoc = parseLBS def $ LBS.fromStrict b
in case (generatedReqDoc, expectedReqDoc) of
(Right genDoc, Right expDoc) -> assertEqual "selectRequest XML should match: " expDoc genDoc
(Left err, _) -> assertFailure $ "Generated selectRequest failed to parse as XML" ++ show err
(_, Left err) -> assertFailure $ "Expected selectRequest failed to parse as XML" ++ show err
cases = cases =
[ ( SelectRequest [ ( SelectRequest
"Select * from S3Object" "Select * from S3Object"
@ -142,32 +151,32 @@ testMkSelectRequest = mapM_ assertFn cases
<> quoteEscapeCharacter "\"" <> quoteEscapeCharacter "\""
) )
(Just False), (Just False),
[r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>GZIP</CompressionType><CSV><QuoteCharacter>&#34;</QuoteCharacter><RecordDelimiter> [r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>GZIP</CompressionType><CSV><FieldDelimiter>,</FieldDelimiter><FileHeaderInfo>IGNORE</FileHeaderInfo><QuoteCharacter>"</QuoteCharacter><QuoteEscapeCharacter>"</QuoteEscapeCharacter><RecordDelimiter>
</RecordDelimiter><FileHeaderInfo>IGNORE</FileHeaderInfo><QuoteEscapeCharacter>&#34;</QuoteEscapeCharacter><FieldDelimiter>,</FieldDelimiter></CSV></InputSerialization><OutputSerialization><CSV><QuoteCharacter>&#34;</QuoteCharacter><QuoteFields>ASNEEDED</QuoteFields><RecordDelimiter> </RecordDelimiter></CSV></InputSerialization><OutputSerialization><CSV><FieldDelimiter>,</FieldDelimiter><QuoteCharacter>"</QuoteCharacter><QuoteEscapeCharacter>"</QuoteEscapeCharacter><QuoteFields>ASNEEDED</QuoteFields><RecordDelimiter>
</RecordDelimiter><QuoteEscapeCharacter>&#34;</QuoteEscapeCharacter><FieldDelimiter>,</FieldDelimiter></CSV></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|] </RecordDelimiter></CSV></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
), ),
( setRequestProgressEnabled False ( setRequestProgressEnabled False $
$ setInputCompressionType CompressionTypeGzip setInputCompressionType CompressionTypeGzip $
$ selectRequest selectRequest
"Select * from S3Object" "Select * from S3Object"
documentJsonInput documentJsonInput
(outputJSONFromRecordDelimiter "\n"), (outputJSONFromRecordDelimiter "\n"),
[r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>GZIP</CompressionType><JSON><Type>DOCUMENT</Type></JSON></InputSerialization><OutputSerialization><JSON><RecordDelimiter> [r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>GZIP</CompressionType><JSON><Type>DOCUMENT</Type></JSON></InputSerialization><OutputSerialization><JSON><RecordDelimiter>
</RecordDelimiter></JSON></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|] </RecordDelimiter></JSON></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
), ),
( setRequestProgressEnabled False ( setRequestProgressEnabled False $
$ setInputCompressionType CompressionTypeNone setInputCompressionType CompressionTypeNone $
$ selectRequest selectRequest
"Select * from S3Object" "Select * from S3Object"
defaultParquetInput defaultParquetInput
( outputCSVFromProps $ ( outputCSVFromProps $
quoteFields QuoteFieldsAsNeeded quoteFields QuoteFieldsAsNeeded
<> recordDelimiter "\n" <> recordDelimiter "\n"
<> fieldDelimiter "," <> fieldDelimiter ","
<> quoteCharacter "\"" <> quoteCharacter "\""
<> quoteEscapeCharacter "\"" <> quoteEscapeCharacter "\""
), ),
[r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>NONE</CompressionType><Parquet/></InputSerialization><OutputSerialization><CSV><QuoteCharacter>&#34;</QuoteCharacter><QuoteFields>ASNEEDED</QuoteFields><RecordDelimiter> [r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>NONE</CompressionType><Parquet/></InputSerialization><OutputSerialization><CSV><FieldDelimiter>,</FieldDelimiter><QuoteCharacter>"</QuoteCharacter><QuoteEscapeCharacter>"</QuoteEscapeCharacter><QuoteFields>ASNEEDED</QuoteFields><RecordDelimiter>
</RecordDelimiter><QuoteEscapeCharacter>&#34;</QuoteEscapeCharacter><FieldDelimiter>,</FieldDelimiter></CSV></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|] </RecordDelimiter></CSV></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
) )
] ]

View File

@ -49,7 +49,7 @@ xmlParserTests =
] ]
tryValidationErr :: (MonadUnliftIO m) => m a -> m (Either MErrV a) tryValidationErr :: (MonadUnliftIO m) => m a -> m (Either MErrV a)
tryValidationErr act = try act tryValidationErr = try
assertValidtionErr :: MErrV -> Assertion assertValidtionErr :: MErrV -> Assertion
assertValidtionErr e = assertFailure $ "Failed due to validation error => " ++ show e assertValidtionErr e = assertFailure $ "Failed due to validation error => " ++ show e
@ -62,9 +62,9 @@ testParseLocation :: Assertion
testParseLocation = do testParseLocation = do
-- 1. Test parsing of an invalid location constraint xml. -- 1. Test parsing of an invalid location constraint xml.
parseResE <- tryValidationErr $ parseLocation "ClearlyInvalidXml" parseResE <- tryValidationErr $ parseLocation "ClearlyInvalidXml"
when (isRight parseResE) when (isRight parseResE) $
$ assertFailure assertFailure $
$ "Parsing should have failed => " ++ show parseResE "Parsing should have failed => " ++ show parseResE
forM_ cases $ \(xmldata, expectedLocation) -> do forM_ cases $ \(xmldata, expectedLocation) -> do
parseLocE <- tryValidationErr $ parseLocation xmldata parseLocE <- tryValidationErr $ parseLocation xmldata
@ -344,11 +344,12 @@ testParseNotification = do
"1" "1"
"arn:aws:sqs:us-west-2:444455556666:s3notificationqueue" "arn:aws:sqs:us-west-2:444455556666:s3notificationqueue"
[ObjectCreatedPut] [ObjectCreatedPut]
( Filter $ FilterKey $ ( Filter $
FilterRules FilterKey $
[ FilterRule "prefix" "images/", FilterRules
FilterRule "suffix" ".jpg" [ FilterRule "prefix" "images/",
] FilterRule "suffix" ".jpg"
]
), ),
NotificationConfig NotificationConfig
"" ""

View File

@ -1,5 +1,5 @@
-- --
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc. -- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
-- --
-- Licensed under the Apache License, Version 2.0 (the "License"); -- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License. -- you may not use this file except in compliance with the License.
@ -20,7 +20,6 @@ import Lib.Prelude
import Network.Minio.API.Test import Network.Minio.API.Test
import Network.Minio.CopyObject import Network.Minio.CopyObject
import Network.Minio.Data import Network.Minio.Data
import Network.Minio.PutObject
import Network.Minio.Utils.Test import Network.Minio.Utils.Test
import Network.Minio.XmlGenerator.Test import Network.Minio.XmlGenerator.Test
import Network.Minio.XmlParser.Test import Network.Minio.XmlParser.Test
@ -55,31 +54,33 @@ qcProps =
\n -> \n ->
let (pns, offs, sizes) = L.unzip3 (selectPartSizes n) let (pns, offs, sizes) = L.unzip3 (selectPartSizes n)
-- check that pns increments from 1. -- check that pns increments from 1.
isPNumsAscendingFrom1 = all (\(a, b) -> a == b) $ zip pns [1 ..] isPNumsAscendingFrom1 = all (uncurry (==)) $ zip pns [1 ..]
consPairs [] = [] consPairs [] = []
consPairs [_] = [] consPairs [_] = []
consPairs (a : (b : c)) = (a, b) : (consPairs (b : c)) consPairs (a : (b : c)) = (a, b) : consPairs (b : c)
-- check `offs` is monotonically increasing. -- check `offs` is monotonically increasing.
isOffsetsAsc = all (\(a, b) -> a < b) $ consPairs offs isOffsetsAsc = all (uncurry (<)) $ consPairs offs
-- check sizes sums to n. -- check sizes sums to n.
isSumSizeOk = sum sizes == n isSumSizeOk = sum sizes == n
-- check sizes are constant except last -- check sizes are constant except last
isSizesConstantExceptLast = isSizesConstantExceptLast =
all (\(a, b) -> a == b) (consPairs $ L.init sizes) all (uncurry (==)) (consPairs $ L.init sizes)
-- check each part except last is at least minPartSize; -- check each part except last is at least minPartSize;
-- last part may be 0 only if it is the only part. -- last part may be 0 only if it is the only part.
nparts = length sizes nparts = length sizes
isMinPartSizeOk = isMinPartSizeOk =
if if
| nparts > 1 -> -- last part can be smaller but > 0 | nparts > 1 -> -- last part can be smaller but > 0
all (>= minPartSize) (take (nparts - 1) sizes) all (>= minPartSize) (take (nparts - 1) sizes)
&& all (\s -> s > 0) (drop (nparts - 1) sizes) && all (> 0) (drop (nparts - 1) sizes)
| nparts == 1 -> -- size may be 0 here. | nparts == 1 -> -- size may be 0 here.
maybe True (\x -> x >= 0 && x <= minPartSize) $ maybe True (\x -> x >= 0 && x <= minPartSize) $
headMay sizes listToMaybe sizes
| otherwise -> False | otherwise -> False
in n < 0 in n < 0
|| ( isPNumsAscendingFrom1 && isOffsetsAsc && isSumSizeOk || ( isPNumsAscendingFrom1
&& isOffsetsAsc
&& isSumSizeOk
&& isSizesConstantExceptLast && isSizesConstantExceptLast
&& isMinPartSizeOk && isMinPartSizeOk
), ),
@ -89,23 +90,24 @@ qcProps =
-- is last part's snd offset end? -- is last part's snd offset end?
isLastPartOk = maybe False ((end ==) . snd) $ lastMay pairs isLastPartOk = maybe False ((end ==) . snd) $ lastMay pairs
-- is first part's fst offset start -- is first part's fst offset start
isFirstPartOk = maybe False ((start ==) . fst) $ headMay pairs isFirstPartOk = maybe False ((start ==) . fst) $ listToMaybe pairs
-- each pair is >=64MiB except last, and all those parts -- each pair is >=64MiB except last, and all those parts
-- have same size. -- have same size.
initSizes = maybe [] (map (\(a, b) -> b - a + 1)) $ initMay pairs initSizes = maybe [] (map (\(a, b) -> b - a + 1) . init) (nonEmpty pairs)
isPartSizesOk = isPartSizesOk =
all (>= minPartSize) initSizes all (>= minPartSize) initSizes
&& maybe && maybe
True True
(\k -> all (== k) initSizes) (\k -> all (== k) initSizes)
(headMay initSizes) (listToMaybe initSizes)
-- returned offsets are contiguous. -- returned offsets are contiguous.
fsts = drop 1 $ map fst pairs fsts = drop 1 $ map fst pairs
snds = take (length pairs - 1) $ map snd pairs snds = take (length pairs - 1) $ map snd pairs
isContParts = isContParts =
length fsts == length snds length fsts == length snds
&& and (map (\(a, b) -> a == b + 1) $ zip fsts snds) && all (\(a, b) -> a == b + 1) (zip fsts snds)
in start < 0 || start > end in start < 0
|| start > end
|| (isLastPartOk && isFirstPartOk && isPartSizesOk && isContParts), || (isLastPartOk && isFirstPartOk && isPartSizesOk && isContParts),
QC.testProperty "mkSSECKey:" $ QC.testProperty "mkSSECKey:" $
\w8s -> \w8s ->