Merge branch 'master' into uni2work
This commit is contained in:
commit
3dcb276521
230
.github/workflows/ci.yml
vendored
Normal file
230
.github/workflows/ci.yml
vendored
Normal file
@ -0,0 +1,230 @@
|
||||
name: CI
|
||||
|
||||
# Trigger the workflow on push or pull request, but only for the master branch
|
||||
on:
|
||||
pull_request:
|
||||
branches: [master]
|
||||
push:
|
||||
branches: [master]
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
# Env vars for tests
|
||||
env:
|
||||
MINIO_ACCESS_KEY: minio
|
||||
MINIO_SECRET_KEY: minio123
|
||||
MINIO_LOCAL: 1
|
||||
MINIO_SECURE: 1
|
||||
|
||||
jobs:
|
||||
ormolu:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: haskell-actions/run-ormolu@v12
|
||||
with:
|
||||
version: "0.5.0.1"
|
||||
|
||||
hlint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: 'Set up HLint'
|
||||
uses: haskell/actions/hlint-setup@v2
|
||||
with:
|
||||
version: '3.5'
|
||||
|
||||
- name: 'Run HLint'
|
||||
uses: haskell/actions/hlint-run@v2
|
||||
with:
|
||||
path: '["src/", "test/", "examples"]'
|
||||
fail-on: warning
|
||||
|
||||
cabal:
|
||||
name: ${{ matrix.os }} / ghc-${{ matrix.ghc }} / cabal-${{ matrix.cabal }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
needs: ormolu
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, windows-latest] # Removed macos-latest due to cert issues.
|
||||
cabal: ["3.6", "3.8", "latest"]
|
||||
ghc:
|
||||
- "9.4"
|
||||
- "9.2"
|
||||
- "9.0"
|
||||
- "8.10"
|
||||
- "8.8"
|
||||
- "8.6"
|
||||
exclude:
|
||||
- os: windows-latest
|
||||
ghc: "9.4"
|
||||
cabal: "3.6"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
if: github.event.action == 'opened' || github.event.action == 'synchronize' || github.event.ref == 'refs/heads/master'
|
||||
|
||||
- uses: haskell/actions/setup@v2
|
||||
id: setup-haskell-cabal
|
||||
name: Setup Haskell
|
||||
with:
|
||||
ghc-version: ${{ matrix.ghc }}
|
||||
cabal-version: ${{ matrix.cabal }}
|
||||
|
||||
- name: Configure
|
||||
run: |
|
||||
cabal configure --enable-tests --enable-benchmarks --test-show-details=direct -fexamples -fdev -flive-test
|
||||
|
||||
- name: Freeze
|
||||
run: |
|
||||
cabal freeze
|
||||
|
||||
- uses: actions/cache@v3
|
||||
name: Cache ~/.cabal/packages, ~/.cabal/store and dist-newstyle
|
||||
with:
|
||||
path: |
|
||||
~/.cabal/packages
|
||||
~/.cabal/store
|
||||
dist-newstyle
|
||||
key: ${{ runner.os }}-${{ matrix.ghc }}-${{ hashFiles('**/*.cabal', '**/cabal.project', '**/cabal.project.freeze') }}
|
||||
restore-keys: ${{ runner.os }}-${{ matrix.ghc }}-
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
cabal build --only-dependencies
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
cabal build
|
||||
|
||||
- name: Setup MinIO for testing (Linux)
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
mkdir -p /tmp/minio /tmp/minio-config/certs
|
||||
cp test/cert/* /tmp/minio-config/certs/
|
||||
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/linux-amd64/minio; chmod +x ./minio)
|
||||
sudo cp /tmp/minio-config/certs/public.crt /usr/local/share/ca-certificates/
|
||||
sudo update-ca-certificates
|
||||
|
||||
- name: Setup MinIO for testing (MacOS)
|
||||
if: matrix.os == 'macos-latest'
|
||||
run: |
|
||||
mkdir -p /tmp/minio /tmp/minio-config/certs
|
||||
cp test/cert/* /tmp/minio-config/certs/
|
||||
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/darwin-amd64/minio; chmod +x ./minio)
|
||||
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain /tmp/minio-config/certs/public.crt
|
||||
|
||||
- name: Setup MinIO for testing (Windows)
|
||||
if: matrix.os == 'windows-latest'
|
||||
run: |
|
||||
New-Item -ItemType Directory -Path "$env:temp/minio-config/certs/"
|
||||
Copy-Item -Path test\cert\* -Destination "$env:temp/minio-config/certs/"
|
||||
Invoke-WebRequest -Uri https://dl.minio.io/server/minio/release/windows-amd64/minio.exe -OutFile $HOME/minio.exe
|
||||
Import-Certificate -FilePath "$env:temp/minio-config/certs/public.crt" -CertStoreLocation Cert:\LocalMachine\Root
|
||||
|
||||
- name: Test (Non-Windows)
|
||||
if: matrix.os != 'windows-latest'
|
||||
run: |
|
||||
/tmp/minio/minio server --quiet --certs-dir /tmp/minio-config/certs data1 data2 data3 data4 2>&1 > minio.log &
|
||||
ghc --version
|
||||
cabal --version
|
||||
cabal test
|
||||
|
||||
- name: Test (Windows)
|
||||
if: matrix.os == 'windows-latest'
|
||||
run: |
|
||||
Start-Process -NoNewWindow -FilePath "$HOME/minio.exe" -ArgumentList "--certs-dir", "$env:temp/minio-config/certs", "server", "$env:temp/data1", "$env:temp/data2", "$env:temp/data3", "$env:temp/data4"
|
||||
ghc --version
|
||||
cabal --version
|
||||
cabal test
|
||||
|
||||
stack:
|
||||
name: stack / ghc ${{ matrix.ghc }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
ghc:
|
||||
- "8.10.7"
|
||||
- "9.0.2"
|
||||
- "9.2.4"
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
if: github.event.action == 'opened' || github.event.action == 'synchronize' || github.event.ref == 'refs/heads/master'
|
||||
|
||||
- uses: haskell/actions/setup@v2
|
||||
name: Setup Haskell Stack
|
||||
with:
|
||||
enable-stack: true
|
||||
ghc-version: ${{ matrix.ghc }}
|
||||
stack-version: 'latest'
|
||||
|
||||
- uses: actions/cache@v3
|
||||
name: Cache ~/.stack
|
||||
with:
|
||||
path: ~/.stack
|
||||
key: ${{ runner.os }}-stack-global-${{ hashFiles('stack.yaml') }}-${{ hashFiles('package.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-stack-global-
|
||||
- uses: actions/cache@v3
|
||||
name: Cache .stack-work
|
||||
with:
|
||||
path: .stack-work
|
||||
key: ${{ runner.os }}-stack-work-${{ hashFiles('stack.yaml') }}-${{ hashFiles('package.yaml') }}-${{ hashFiles('**/*.hs') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-stack-work-
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
stack build --system-ghc --test --bench --no-run-tests --no-run-benchmarks --only-dependencies
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
stack build --system-ghc --test --bench --no-run-tests --no-run-benchmarks --flag minio-hs:examples --flag minio-hs:live-test --flag minio-hs:dev
|
||||
|
||||
- name: Setup MinIO for testing (Linux)
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
mkdir -p /tmp/minio /tmp/minio-config/certs
|
||||
cp test/cert/* /tmp/minio-config/certs/
|
||||
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/linux-amd64/minio; chmod +x ./minio)
|
||||
sudo cp /tmp/minio-config/certs/public.crt /usr/local/share/ca-certificates/
|
||||
sudo update-ca-certificates
|
||||
|
||||
- name: Setup MinIO for testing (MacOS)
|
||||
if: matrix.os == 'macos-latest'
|
||||
run: |
|
||||
mkdir -p /tmp/minio /tmp/minio-config/certs
|
||||
cp test/cert/* /tmp/minio-config/certs/
|
||||
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/darwin-amd64/minio; chmod +x ./minio)
|
||||
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain /tmp/minio-config/certs/public.crt
|
||||
|
||||
- name: Setup MinIO for testing (Windows)
|
||||
if: matrix.os == 'windows-latest'
|
||||
run: |
|
||||
New-Item -ItemType Directory -Path "$env:temp/minio-config/certs/"
|
||||
Copy-Item -Path test\cert\* -Destination "$env:temp/minio-config/certs/"
|
||||
Invoke-WebRequest -Uri https://dl.minio.io/server/minio/release/windows-amd64/minio.exe -OutFile $HOME/minio.exe
|
||||
Import-Certificate -FilePath "$env:temp/minio-config/certs/public.crt" -CertStoreLocation Cert:\LocalMachine\Root
|
||||
|
||||
- name: Test (Non-Windows)
|
||||
if: matrix.os != 'windows-latest'
|
||||
run: |
|
||||
/tmp/minio/minio server --quiet --certs-dir /tmp/minio-config/certs data1 data2 data3 data4 2>&1 > minio.log &
|
||||
ghc --version
|
||||
stack --version
|
||||
stack test --system-ghc --flag minio-hs:live-test --flag minio-hs:dev
|
||||
|
||||
- name: Test (Windows)
|
||||
if: matrix.os == 'windows-latest'
|
||||
run: |
|
||||
Start-Process -NoNewWindow -FilePath "$HOME/minio.exe" -ArgumentList "--certs-dir", "$env:temp/minio-config/certs", "server", "$env:temp/data1", "$env:temp/data2", "$env:temp/data3", "$env:temp/data4"
|
||||
ghc --version
|
||||
cabal --version
|
||||
stack test --system-ghc --flag minio-hs:live-test --flag minio-hs:dev
|
||||
122
.github/workflows/haskell-cabal.yml
vendored
122
.github/workflows/haskell-cabal.yml
vendored
@ -1,122 +0,0 @@
|
||||
name: Haskell CI (Cabal)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run every weekday
|
||||
- cron: '0 0 * * 1-5'
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
cabal-build:
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
ghc: ['8.4', '8.6', '8.8', '8.10']
|
||||
cabal: ['3.2']
|
||||
os: [ubuntu-latest, macOS-latest]
|
||||
experimental: [false]
|
||||
include:
|
||||
- ghc: '8.6'
|
||||
cabal: '3.2'
|
||||
os: windows-latest
|
||||
experimental: false
|
||||
- ghc: '8.10'
|
||||
cabal: '3.2'
|
||||
os: windows-latest
|
||||
experimental: false
|
||||
|
||||
# Appears to be buggy to build in windows with ghc 8.4 and 8.8
|
||||
- ghc: '8.4'
|
||||
cabal: '3.2'
|
||||
os: windows-latest
|
||||
experimental: true
|
||||
- ghc: '8.8'
|
||||
cabal: '3.2'
|
||||
os: windows-latest
|
||||
experimental: true
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-haskell@v1.1
|
||||
with:
|
||||
ghc-version: ${{ matrix.ghc }}
|
||||
cabal-version: ${{ matrix.cabal }}
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v2
|
||||
env:
|
||||
cache-name: cabal-cache-${{ matrix.ghc }}-${{ matrix.cabal }}
|
||||
with:
|
||||
path: |
|
||||
~/.cabal
|
||||
~/.stack
|
||||
%appdata%\cabal
|
||||
%LOCALAPPDATA%\Programs\stack
|
||||
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/*.cabal') }}-${{ hashFiles('**/stack.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/*.cabal') }}-${{ hashFiles('**/stack.yaml') }}
|
||||
${{ runner.os }}-build-${{ env.cache-name }}-
|
||||
${{ runner.os }}-build-
|
||||
${{ runner.os }}-
|
||||
|
||||
- name: Before install (Linux)
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
mkdir -p /tmp/minio /tmp/minio-config/certs
|
||||
cp test/cert/* /tmp/minio-config/certs/
|
||||
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/linux-amd64/minio; chmod +x ./minio)
|
||||
sudo cp /tmp/minio-config/certs/public.crt /usr/local/share/ca-certificates/
|
||||
sudo update-ca-certificates
|
||||
|
||||
- name: Before install (MacOS)
|
||||
if: matrix.os == 'macos-latest'
|
||||
run: |
|
||||
mkdir -p /tmp/minio /tmp/minio-config/certs
|
||||
cp test/cert/* /tmp/minio-config/certs/
|
||||
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/darwin-amd64/minio; chmod +x ./minio)
|
||||
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain /tmp/minio-config/certs/public.crt
|
||||
|
||||
- name: Before install (Windows)
|
||||
if: matrix.os == 'windows-latest'
|
||||
run: |
|
||||
New-Item -ItemType Directory -Path "$env:temp/minio-config/certs/"
|
||||
Copy-Item -Path test\cert\* -Destination "$env:temp/minio-config/certs/"
|
||||
Invoke-WebRequest -Uri https://dl.minio.io/server/minio/release/windows-amd64/minio.exe -OutFile $HOME/minio.exe
|
||||
Import-Certificate -FilePath "$env:temp/minio-config/certs/public.crt" -CertStoreLocation Cert:\LocalMachine\Root
|
||||
|
||||
- name: Install dependencies, build and test (Non-Windows)
|
||||
if: matrix.os != 'windows-latest'
|
||||
env:
|
||||
MINIO_ACCESS_KEY: minio
|
||||
MINIO_SECRET_KEY: minio123
|
||||
MINIO_LOCAL: 1
|
||||
MINIO_SECURE: 1
|
||||
continue-on-error: ${{ matrix.experimental }}
|
||||
run: |
|
||||
/tmp/minio/minio server --quiet --certs-dir /tmp/minio-config/certs data1 data2 data3 data4 2>&1 > minio.log &
|
||||
ghc --version
|
||||
cabal --version
|
||||
cabal new-update
|
||||
cabal new-build --enable-tests --enable-benchmarks -fexamples
|
||||
cabal new-test --enable-tests -flive-test
|
||||
|
||||
- name: Install dependencies, build and test (Windows)
|
||||
if: matrix.os == 'windows-latest'
|
||||
env:
|
||||
MINIO_ACCESS_KEY: minio
|
||||
MINIO_SECRET_KEY: minio123
|
||||
MINIO_LOCAL: 1
|
||||
MINIO_SECURE: 1
|
||||
continue-on-error: ${{ matrix.experimental }}
|
||||
run: |
|
||||
Start-Process -NoNewWindow -FilePath "$HOME/minio.exe" -ArgumentList "--certs-dir", "$env:temp/minio-config/certs", "server", "$env:temp/data1", "$env:temp/data2", "$env:temp/data3", "$env:temp/data4"
|
||||
ghc --version
|
||||
cabal --version
|
||||
cabal new-update
|
||||
cabal new-build --enable-tests --enable-benchmarks -fexamples
|
||||
cabal new-test --enable-tests -flive-test
|
||||
108
.github/workflows/haskell-stack.yml
vendored
108
.github/workflows/haskell-stack.yml
vendored
@ -1,108 +0,0 @@
|
||||
name: Haskell CI (Stack)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run every weekday
|
||||
- cron: '0 0 * * 1-5'
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
stack-build:
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
ghc: ['8.8']
|
||||
cabal: ['3.2']
|
||||
os: [ubuntu-latest, macOS-latest]
|
||||
experimental: [false]
|
||||
include:
|
||||
# Appears to be buggy to build in windows with ghc 8.8
|
||||
- ghc: '8.8'
|
||||
cabal: '3.2'
|
||||
os: windows-latest
|
||||
experimental: true
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-haskell@v1.1
|
||||
with:
|
||||
ghc-version: ${{ matrix.ghc }}
|
||||
cabal-version: ${{ matrix.cabal }}
|
||||
enable-stack: true
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v2
|
||||
env:
|
||||
cache-name: stack-cache-${{ matrix.ghc }}-${{ matrix.cabal }}
|
||||
with:
|
||||
path: |
|
||||
~/.cabal
|
||||
~/.stack
|
||||
%appdata%\cabal
|
||||
%LOCALAPPDATA%\Programs\stack
|
||||
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/*.cabal') }}-${{ hashFiles('**/stack.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/*.cabal') }}-${{ hashFiles('**/stack.yaml') }}
|
||||
${{ runner.os }}-build-${{ env.cache-name }}-
|
||||
${{ runner.os }}-build-
|
||||
${{ runner.os }}-
|
||||
|
||||
- name: Before install (Linux)
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
mkdir -p /tmp/minio /tmp/minio-config/certs
|
||||
cp test/cert/* /tmp/minio-config/certs/
|
||||
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/linux-amd64/minio; chmod +x ./minio)
|
||||
sudo cp /tmp/minio-config/certs/public.crt /usr/local/share/ca-certificates/
|
||||
sudo update-ca-certificates
|
||||
|
||||
- name: Before install (MacOS)
|
||||
if: matrix.os == 'macos-latest'
|
||||
run: |
|
||||
mkdir -p /tmp/minio /tmp/minio-config/certs
|
||||
cp test/cert/* /tmp/minio-config/certs/
|
||||
(cd /tmp/minio; wget -q https://dl.min.io/server/minio/release/darwin-amd64/minio; chmod +x ./minio)
|
||||
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain /tmp/minio-config/certs/public.crt
|
||||
|
||||
- name: Before install (Windows)
|
||||
if: matrix.os == 'windows-latest'
|
||||
run: |
|
||||
New-Item -ItemType Directory -Path "$env:temp/minio-config/certs/"
|
||||
Copy-Item -Path test\cert\* -Destination "$env:temp/minio-config/certs/"
|
||||
Invoke-WebRequest -Uri https://dl.minio.io/server/minio/release/windows-amd64/minio.exe -OutFile $HOME/minio.exe
|
||||
Import-Certificate -FilePath "$env:temp/minio-config/certs/public.crt" -CertStoreLocation Cert:\LocalMachine\Root
|
||||
|
||||
- name: Install dependencies, build and test (Non-Windows)
|
||||
if: matrix.os != 'windows-latest'
|
||||
env:
|
||||
MINIO_ACCESS_KEY: minio
|
||||
MINIO_SECRET_KEY: minio123
|
||||
MINIO_LOCAL: 1
|
||||
MINIO_SECURE: 1
|
||||
continue-on-error: ${{ matrix.experimental }}
|
||||
run: |
|
||||
/tmp/minio/minio server --quiet --certs-dir /tmp/minio-config/certs data1 data2 data3 data4 2>&1 > minio.log &
|
||||
ghc --version
|
||||
stack --version
|
||||
stack build --system-ghc --test --bench --no-run-tests --no-run-benchmarks --flag minio-hs:examples
|
||||
stack test --system-ghc --flag minio-hs:live-test
|
||||
|
||||
- name: Install dependencies, build and test (Windows)
|
||||
if: matrix.os == 'windows-latest'
|
||||
env:
|
||||
MINIO_ACCESS_KEY: minio
|
||||
MINIO_SECRET_KEY: minio123
|
||||
MINIO_LOCAL: 1
|
||||
MINIO_SECURE: 1
|
||||
continue-on-error: ${{ matrix.experimental }}
|
||||
run: |
|
||||
Start-Process -NoNewWindow -FilePath "$HOME/minio.exe" -ArgumentList "--certs-dir", "$env:temp/minio-config/certs", "server", "$env:temp/data1", "$env:temp/data2", "$env:temp/data3", "$env:temp/data4"
|
||||
ghc --version
|
||||
stack --version
|
||||
stack build --system-ghc --test --bench --no-run-tests --no-run-benchmarks --flag minio-hs:examples
|
||||
stack test --system-ghc --flag minio-hs:live-test
|
||||
61
.travis.yml
61
.travis.yml
@ -1,61 +0,0 @@
|
||||
sudo: true
|
||||
language: haskell
|
||||
|
||||
git:
|
||||
depth: 5
|
||||
|
||||
cabal: "3.0"
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- "$HOME/.cabal/store"
|
||||
- "$HOME/.stack"
|
||||
- "$TRAVIS_BUILD_DIR/.stack-work"
|
||||
|
||||
matrix:
|
||||
include:
|
||||
|
||||
# Cabal
|
||||
- ghc: 8.4.4
|
||||
- ghc: 8.6.5
|
||||
- ghc: 8.8.3
|
||||
|
||||
# Stack
|
||||
- ghc: 8.6.5
|
||||
env: STACK_YAML="$TRAVIS_BUILD_DIR/stack.yaml"
|
||||
|
||||
before_install:
|
||||
- sudo apt-get install devscripts
|
||||
- mkdir /tmp/minio /tmp/certs
|
||||
- (cd /tmp/minio; wget https://dl.min.io/server/minio/release/linux-amd64/minio; chmod +x ./minio)
|
||||
- (cd /tmp/certs; openssl req -newkey rsa:2048 -nodes -keyout private.key -x509 -days 36500 -out public.crt -subj "/C=US/ST=NRW/L=Earth/O=CompanyName/OU=IT/CN=localhost/emailAddress=email@example.com")
|
||||
- sudo cp /tmp/certs/public.crt /usr/local/share/ca-certificates/
|
||||
- sudo update-ca-certificates
|
||||
- MINIO_ACCESS_KEY=minio MINIO_SECRET_KEY=minio123 /tmp/minio/minio server --quiet --certs-dir /tmp/certs data 2>&1 > minio.log &
|
||||
|
||||
install:
|
||||
- |
|
||||
if [ -z "$STACK_YAML" ]; then
|
||||
ghc --version
|
||||
cabal --version
|
||||
cabal new-update
|
||||
cabal new-build --enable-tests --enable-benchmarks -fexamples
|
||||
else
|
||||
# install stack
|
||||
curl -sSL https://get.haskellstack.org/ | sh
|
||||
|
||||
# build project with stack
|
||||
stack --version
|
||||
stack build --system-ghc --test --bench --no-run-tests --no-run-benchmarks --flag minio-hs:examples
|
||||
fi
|
||||
|
||||
script:
|
||||
- |
|
||||
if [ -z "$STACK_YAML" ]; then
|
||||
MINIO_LOCAL=1 MINIO_SECURE=1 cabal new-test --enable-tests -flive-test
|
||||
else
|
||||
MINIO_LOCAL=1 MINIO_SECURE=1 stack test --system-ghc --flag minio-hs:live-test
|
||||
fi
|
||||
|
||||
notifications:
|
||||
email: false
|
||||
31
CHANGELOG.md
31
CHANGELOG.md
@ -1,6 +1,37 @@
|
||||
Changelog
|
||||
==========
|
||||
|
||||
## Version 1.7.0 -- Unreleased
|
||||
|
||||
* Fix data type `EventMessage` to not export partial fields (#179)
|
||||
* Bump up min bound on time dep and fix deprecation warnings (#181)
|
||||
* Add `dev` flag to cabal for building with warnings as errors (#182)
|
||||
* Fix AWS region map (#185)
|
||||
* Fix XML generator tests (#187)
|
||||
* Add support for STS Assume Role API (#188)
|
||||
|
||||
### Breaking changes in 1.7.0
|
||||
|
||||
* `Credentials` type has been removed. Use `CredentialValue` instead.
|
||||
* `Provider` type has been replaced with `CredentialLoader`.
|
||||
* `EventMessage` data type is updated.
|
||||
|
||||
## Version 1.6.0
|
||||
|
||||
* HLint fixes - some types were changed to newtype (#173)
|
||||
* Fix XML generation test for S3 SELECT (#161)
|
||||
* Use region specific endpoints for AWS S3 in presigned Urls (#164)
|
||||
* Replace protolude with relude and build with GHC 9.0.2 (#168)
|
||||
* Support aeson 2 (#169)
|
||||
* CI updates and code formatting changes with ormolu 0.5.0.0
|
||||
|
||||
## Version 1.5.3
|
||||
|
||||
* Fix windows build
|
||||
* Fix support for Yandex Storage (#147)
|
||||
* Fix for HEAD requests to S3/Minio (#155)
|
||||
* Bump up some dependencies, new code formatting, Github CI, example fixes and other minor improvements.
|
||||
|
||||
## Version 1.5.2
|
||||
|
||||
* Fix region `us-west-2` for AWS S3 (#139)
|
||||
|
||||
87
README.md
87
README.md
@ -1,10 +1,8 @@
|
||||
# MinIO Client SDK for Haskell [](https://travis-ci.org/minio/minio-hs)[](https://hackage.haskell.org/package/minio-hs)[](https://slack.min.io)
|
||||
# MinIO Haskell Client SDK for Amazon S3 Compatible Cloud Storage [](https://github.com/minio/minio-hs/actions/workflows/ci.yml)[](https://hackage.haskell.org/package/minio-hs)[](https://slack.min.io)
|
||||
|
||||
The MinIO Haskell Client SDK provides simple APIs to access [MinIO](https://min.io) and Amazon S3 compatible object storage server.
|
||||
The MinIO Haskell Client SDK provides simple APIs to access [MinIO](https://min.io) and any Amazon S3 compatible object storage.
|
||||
|
||||
## Minimum Requirements
|
||||
|
||||
- The Haskell [stack](https://docs.haskellstack.org/en/stable/README/)
|
||||
This guide assumes that you have a working [Haskell development environment](https://www.haskell.org/downloads/).
|
||||
|
||||
## Installation
|
||||
|
||||
@ -12,20 +10,35 @@ The MinIO Haskell Client SDK provides simple APIs to access [MinIO](https://min.
|
||||
|
||||
Simply add `minio-hs` to your project's `.cabal` dependencies section or if you are using hpack, to your `package.yaml` file as usual.
|
||||
|
||||
### Try it out directly with `ghci`
|
||||
### Try it out in a [REPL](https://en.wikipedia.org/wiki/Read%E2%80%93eval%E2%80%93print_loop)
|
||||
|
||||
#### For a cabal based environment
|
||||
|
||||
Download the library source and change to the extracted directory:
|
||||
|
||||
``` sh
|
||||
$ cabal get minio-hs
|
||||
$ cd minio-hs-1.6.0/ # directory name could be different
|
||||
```
|
||||
|
||||
Then load the `ghci` REPL environment with the library and browse the available APIs:
|
||||
|
||||
``` sh
|
||||
$ cabal repl
|
||||
ghci> :browse Network.Minio
|
||||
```
|
||||
|
||||
#### For a stack based environment
|
||||
|
||||
From your home folder or any non-haskell project directory, just run:
|
||||
|
||||
```sh
|
||||
|
||||
stack install minio-hs
|
||||
|
||||
```
|
||||
|
||||
Then start an interpreter session and browse the available APIs with:
|
||||
|
||||
```sh
|
||||
|
||||
$ stack ghci
|
||||
> :browse Network.Minio
|
||||
```
|
||||
@ -134,44 +147,52 @@ main = do
|
||||
|
||||
### Development
|
||||
|
||||
To setup:
|
||||
#### Download the source
|
||||
|
||||
```sh
|
||||
git clone https://github.com/minio/minio-hs.git
|
||||
$ git clone https://github.com/minio/minio-hs.git
|
||||
$ cd minio-hs/
|
||||
```
|
||||
|
||||
cd minio-hs/
|
||||
#### Build the package:
|
||||
|
||||
stack install
|
||||
```
|
||||
|
||||
Tests can be run with:
|
||||
With `cabal`:
|
||||
|
||||
```sh
|
||||
|
||||
stack test
|
||||
|
||||
$ # Configure cabal for development enabling all optional flags defined by the package.
|
||||
$ cabal configure --enable-tests --test-show-details=direct -fexamples -fdev -flive-test
|
||||
$ cabal build
|
||||
```
|
||||
|
||||
A section of the tests use the remote MinIO Play server at `https://play.min.io` by default. For library development, using this remote server maybe slow. To run the tests against a locally running MinIO live server at `http://localhost:9000`, just set the environment `MINIO_LOCAL` to any value (and unset it to switch back to Play).
|
||||
With `stack`:
|
||||
|
||||
To run the live server tests, set a build flag as shown below:
|
||||
``` sh
|
||||
$ stack build --test --no-run-tests --flag minio-hs:live-test --flag minio-hs:dev --flag minio-hs:examples
|
||||
```
|
||||
#### Running tests:
|
||||
|
||||
A section of the tests use the remote MinIO Play server at `https://play.min.io` by default. For library development, using this remote server maybe slow. To run the tests against a locally running MinIO live server at `http://localhost:9000` with the credentials `access_key=minio` and `secret_key=minio123`, just set the environment `MINIO_LOCAL` to any value (and unset it to switch back to Play).
|
||||
|
||||
With `cabal`:
|
||||
|
||||
```sh
|
||||
|
||||
stack test --flag minio-hs:live-test
|
||||
|
||||
# OR against a local MinIO server with:
|
||||
|
||||
MINIO_LOCAL=1 stack test --flag minio-hs:live-test
|
||||
|
||||
$ export MINIO_LOCAL=1 # to run live tests against local MinIO server
|
||||
$ cabal test
|
||||
```
|
||||
|
||||
The configured CI system always runs both test-suites for every change.
|
||||
With `stack`:
|
||||
|
||||
Documentation can be locally built with:
|
||||
``` sh
|
||||
$ export MINIO_LOCAL=1 # to run live tests against local MinIO server
|
||||
stack test --flag minio-hs:live-test --flag minio-hs:dev
|
||||
```
|
||||
|
||||
This will run all the test suites.
|
||||
|
||||
#### Building documentation:
|
||||
|
||||
```sh
|
||||
|
||||
stack haddock
|
||||
|
||||
$ cabal haddock
|
||||
$ # OR
|
||||
$ stack haddock
|
||||
```
|
||||
|
||||
19
Setup.hs
19
Setup.hs
@ -1,19 +0,0 @@
|
||||
--
|
||||
-- MinIO Haskell SDK, (C) 2017 MinIO, Inc.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
|
||||
import Distribution.Simple
|
||||
|
||||
main = defaultMain
|
||||
47
examples/AssumeRole.hs
Normal file
47
examples/AssumeRole.hs
Normal file
@ -0,0 +1,47 @@
|
||||
--
|
||||
-- MinIO Haskell SDK, (C) 2023 MinIO, Inc.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
|
||||
import Control.Monad.IO.Class (liftIO)
|
||||
import Network.Minio
|
||||
import Prelude
|
||||
|
||||
main :: IO ()
|
||||
main = do
|
||||
-- Use play credentials for example.
|
||||
let assumeRole =
|
||||
STSAssumeRole
|
||||
( CredentialValue
|
||||
"Q3AM3UQ867SPQQA43P2F"
|
||||
"zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
|
||||
Nothing
|
||||
)
|
||||
$ defaultSTSAssumeRoleOptions
|
||||
{ saroLocation = Just "us-east-1",
|
||||
saroEndpoint = Just "https://play.min.io:9000"
|
||||
}
|
||||
|
||||
-- Retrieve temporary credentials and print them.
|
||||
cv <- requestSTSCredential assumeRole
|
||||
print $ "Temporary credentials" ++ show (credentialValueText $ fst cv)
|
||||
print $ "Expiry" ++ show (snd cv)
|
||||
|
||||
-- Configure 'ConnectInfo' to request temporary credentials on demand.
|
||||
ci <- setSTSCredential assumeRole "https://play.min.io"
|
||||
res <- runMinio ci $ do
|
||||
buckets <- listBuckets
|
||||
liftIO $ print $ "Top 5 buckets: " ++ show (take 5 buckets)
|
||||
print res
|
||||
@ -19,7 +19,6 @@
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
{-# LANGUAGE ScopedTypeVariables #-}
|
||||
|
||||
import Data.Monoid ((<>))
|
||||
import Data.Text (pack)
|
||||
import Network.Minio
|
||||
import Options.Applicative
|
||||
@ -71,5 +70,5 @@ main = do
|
||||
fPutObject bucket object filepath defaultPutObjectOptions
|
||||
|
||||
case res of
|
||||
Left e -> putStrLn $ "file upload failed due to " ++ (show e)
|
||||
Left e -> putStrLn $ "file upload failed due to " ++ show e
|
||||
Right () -> putStrLn "file upload succeeded."
|
||||
|
||||
@ -16,7 +16,6 @@
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
|
||||
import Network.Minio
|
||||
import Network.Minio.AdminAPI
|
||||
@ -25,6 +24,7 @@ import Prelude
|
||||
main :: IO ()
|
||||
main = do
|
||||
res <-
|
||||
runMinio minioPlayCI $
|
||||
runMinio
|
||||
minioPlayCI
|
||||
getConfig
|
||||
print res
|
||||
|
||||
@ -37,5 +37,5 @@ main = do
|
||||
C.connect (gorObjectStream src) $ CB.sinkFileCautious "/tmp/my-object"
|
||||
|
||||
case res of
|
||||
Left e -> putStrLn $ "getObject failed." ++ (show e)
|
||||
Left e -> putStrLn $ "getObject failed." ++ show e
|
||||
Right _ -> putStrLn "getObject succeeded."
|
||||
|
||||
@ -16,7 +16,6 @@
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
|
||||
import Network.Minio
|
||||
import Network.Minio.AdminAPI
|
||||
|
||||
@ -34,9 +34,9 @@ main = do
|
||||
-- Performs a recursive listing of incomplete uploads under bucket "test"
|
||||
-- on a local minio server.
|
||||
res <-
|
||||
runMinio minioPlayCI
|
||||
$ runConduit
|
||||
$ listIncompleteUploads bucket Nothing True .| mapM_C (\v -> (liftIO $ print v))
|
||||
runMinio minioPlayCI $
|
||||
runConduit $
|
||||
listIncompleteUploads bucket Nothing True .| mapM_C (liftIO . print)
|
||||
print res
|
||||
|
||||
{-
|
||||
|
||||
@ -34,9 +34,9 @@ main = do
|
||||
-- Performs a recursive listing of all objects under bucket "test"
|
||||
-- on play.min.io.
|
||||
res <-
|
||||
runMinio minioPlayCI
|
||||
$ runConduit
|
||||
$ listObjects bucket Nothing True .| mapM_C (\v -> (liftIO $ print v))
|
||||
runMinio minioPlayCI $
|
||||
runConduit $
|
||||
listObjects bucket Nothing True .| mapM_C (liftIO . print)
|
||||
print res
|
||||
|
||||
{-
|
||||
|
||||
@ -46,7 +46,7 @@ main = do
|
||||
res <- runMinio minioPlayCI $ do
|
||||
liftIO $ B.putStrLn "Upload a file that we will fetch with a presigned URL..."
|
||||
putObject bucket object (CC.repeat "a") (Just kb15) defaultPutObjectOptions
|
||||
liftIO $ putStrLn $ "Done. Object created at: my-bucket/my-object"
|
||||
liftIO $ putStrLn "Done. Object created at: my-bucket/my-object"
|
||||
|
||||
-- Extract Etag of uploaded object
|
||||
oi <- statObject bucket object defaultGetObjectOptions
|
||||
@ -77,7 +77,8 @@ main = do
|
||||
let hdrOpt (k, v) = B.concat ["-H '", original k, ": ", v, "'"]
|
||||
curlCmd =
|
||||
B.intercalate " " $
|
||||
["curl --fail"] ++ map hdrOpt headers
|
||||
["curl --fail"]
|
||||
++ map hdrOpt headers
|
||||
++ ["-o /tmp/myfile", B.concat ["'", url, "'"]]
|
||||
|
||||
putStrLn $
|
||||
|
||||
@ -55,7 +55,7 @@ main = do
|
||||
]
|
||||
|
||||
case policyE of
|
||||
Left err -> putStrLn $ show err
|
||||
Left err -> print err
|
||||
Right policy -> do
|
||||
res <- runMinio minioPlayCI $ do
|
||||
(url, formData) <- presignedPostPolicy policy
|
||||
@ -73,13 +73,15 @@ main = do
|
||||
]
|
||||
formOptions = B.intercalate " " $ map formFn $ H.toList formData
|
||||
|
||||
return $ B.intercalate " " $
|
||||
["curl", formOptions, "-F file=@/tmp/photo.jpg", url]
|
||||
return $
|
||||
B.intercalate
|
||||
" "
|
||||
["curl", formOptions, "-F file=@/tmp/photo.jpg", url]
|
||||
|
||||
case res of
|
||||
Left e -> putStrLn $ "post-policy error: " ++ (show e)
|
||||
Left e -> putStrLn $ "post-policy error: " ++ show e
|
||||
Right cmd -> do
|
||||
putStrLn $ "Put a photo at /tmp/photo.jpg and run command:\n"
|
||||
putStrLn "Put a photo at /tmp/photo.jpg and run command:\n"
|
||||
|
||||
-- print the generated curl command
|
||||
Char8.putStrLn cmd
|
||||
|
||||
@ -48,7 +48,8 @@ main = do
|
||||
let hdrOpt (k, v) = B.concat ["-H '", original k, ": ", v, "'"]
|
||||
curlCmd =
|
||||
B.intercalate " " $
|
||||
["curl "] ++ map hdrOpt headers
|
||||
["curl "]
|
||||
++ map hdrOpt headers
|
||||
++ ["-T /tmp/myfile", B.concat ["'", url, "'"]]
|
||||
|
||||
putStrLn $
|
||||
|
||||
@ -19,7 +19,7 @@
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
|
||||
import qualified Conduit as C
|
||||
import Control.Monad (when)
|
||||
import Control.Monad (unless)
|
||||
import Network.Minio
|
||||
import Prelude
|
||||
|
||||
@ -35,7 +35,7 @@ main = do
|
||||
|
||||
res <- runMinio minioPlayCI $ do
|
||||
exists <- bucketExists bucket
|
||||
when (not exists) $
|
||||
unless exists $
|
||||
makeBucket bucket Nothing
|
||||
|
||||
C.liftIO $ putStrLn "Uploading csv object"
|
||||
|
||||
@ -16,7 +16,6 @@
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
|
||||
import Network.Minio
|
||||
import Network.Minio.AdminAPI
|
||||
@ -25,6 +24,7 @@ import Prelude
|
||||
main :: IO ()
|
||||
main = do
|
||||
res <-
|
||||
runMinio minioPlayCI $
|
||||
runMinio
|
||||
minioPlayCI
|
||||
getServerInfo
|
||||
print res
|
||||
|
||||
@ -16,7 +16,6 @@
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
|
||||
import Network.Minio
|
||||
import Network.Minio.AdminAPI
|
||||
|
||||
@ -16,7 +16,6 @@
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
|
||||
import Network.Minio
|
||||
import Network.Minio.AdminAPI
|
||||
|
||||
@ -16,7 +16,6 @@
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
|
||||
import Network.Minio
|
||||
import Network.Minio.AdminAPI
|
||||
@ -25,6 +24,7 @@ import Prelude
|
||||
main :: IO ()
|
||||
main = do
|
||||
res <-
|
||||
runMinio minioPlayCI $
|
||||
runMinio
|
||||
minioPlayCI
|
||||
serviceStatus
|
||||
print res
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
cabal-version: 2.2
|
||||
cabal-version: 2.4
|
||||
name: minio-hs
|
||||
version: 1.5.2
|
||||
version: 1.7.0
|
||||
synopsis: A MinIO Haskell Library for Amazon S3 compatible cloud
|
||||
storage.
|
||||
description: The MinIO Haskell client library provides simple APIs to
|
||||
@ -14,29 +14,70 @@ maintainer: dev@min.io
|
||||
category: Network, AWS, Object Storage
|
||||
build-type: Simple
|
||||
stability: Experimental
|
||||
extra-source-files:
|
||||
extra-doc-files:
|
||||
CHANGELOG.md
|
||||
CONTRIBUTING.md
|
||||
docs/API.md
|
||||
examples/*.hs
|
||||
README.md
|
||||
extra-source-files:
|
||||
examples/*.hs
|
||||
stack.yaml
|
||||
tested-with: GHC == 8.6.5
|
||||
, GHC == 8.8.4
|
||||
, GHC == 8.10.7
|
||||
, GHC == 9.0.2
|
||||
, GHC == 9.2.7
|
||||
, GHC == 9.4.5
|
||||
|
||||
source-repository head
|
||||
type: git
|
||||
location: https://github.com/minio/minio-hs.git
|
||||
|
||||
Flag dev
|
||||
Description: Build package in development mode
|
||||
Default: False
|
||||
Manual: True
|
||||
|
||||
common base-settings
|
||||
ghc-options: -Wall
|
||||
-Wcompat
|
||||
-Widentities
|
||||
-Wincomplete-uni-patterns
|
||||
-Wincomplete-record-updates
|
||||
-haddock
|
||||
if impl(ghc >= 8.0)
|
||||
ghc-options: -Wredundant-constraints
|
||||
if impl(ghc >= 8.2)
|
||||
ghc-options: -fhide-source-paths
|
||||
if impl(ghc >= 8.4)
|
||||
ghc-options: -Wpartial-fields
|
||||
-- -Wmissing-export-lists
|
||||
if impl(ghc >= 8.8)
|
||||
ghc-options: -Wmissing-deriving-strategies
|
||||
-Werror=missing-deriving-strategies
|
||||
-- if impl(ghc >= 8.10)
|
||||
-- ghc-options: -Wunused-packages -- disabled due to bug related to mixin config
|
||||
if impl(ghc >= 9.0)
|
||||
ghc-options: -Winvalid-haddock
|
||||
if impl(ghc >= 9.2)
|
||||
ghc-options: -Wredundant-bang-patterns
|
||||
if flag(dev)
|
||||
ghc-options: -Werror
|
||||
|
||||
default-language: Haskell2010
|
||||
|
||||
default-extensions: BangPatterns
|
||||
, DerivingStrategies
|
||||
, FlexibleContexts
|
||||
, FlexibleInstances
|
||||
, LambdaCase
|
||||
, MultiParamTypeClasses
|
||||
, MultiWayIf
|
||||
, NoImplicitPrelude
|
||||
, OverloadedStrings
|
||||
, RankNTypes
|
||||
, ScopedTypeVariables
|
||||
, TypeFamilies
|
||||
, TupleSections
|
||||
|
||||
other-modules: Lib.Prelude
|
||||
, Network.Minio.API
|
||||
, Network.Minio.APICommon
|
||||
@ -54,10 +95,19 @@ common base-settings
|
||||
, Network.Minio.Utils
|
||||
, Network.Minio.XmlGenerator
|
||||
, Network.Minio.XmlParser
|
||||
, Network.Minio.XmlCommon
|
||||
, Network.Minio.JsonParser
|
||||
, Network.Minio.Credentials.Types
|
||||
, Network.Minio.Credentials.AssumeRole
|
||||
, Network.Minio.Credentials
|
||||
|
||||
mixins: base hiding (Prelude)
|
||||
, relude (Relude as Prelude)
|
||||
, relude
|
||||
|
||||
build-depends: base >= 4.7 && < 5
|
||||
, protolude >= 0.3 && < 0.4
|
||||
, aeson >= 1.2
|
||||
, relude >= 0.7 && < 2
|
||||
, aeson >= 1.2 && < 3
|
||||
, base64-bytestring >= 1.0
|
||||
, binary >= 0.8.5.0
|
||||
, bytestring >= 0.10
|
||||
@ -69,7 +119,6 @@ common base-settings
|
||||
, cryptonite-conduit >= 0.2
|
||||
, digest >= 0.0.1
|
||||
, directory
|
||||
, exceptions
|
||||
, filepath >= 1.4
|
||||
, http-client >= 0.5
|
||||
, http-client-tls
|
||||
@ -77,11 +126,12 @@ common base-settings
|
||||
, http-types >= 0.12
|
||||
, ini
|
||||
, memory >= 0.14
|
||||
, raw-strings-qq >= 1
|
||||
, network-uri
|
||||
, resourcet >= 1.2
|
||||
, retry
|
||||
, text >= 1.2
|
||||
, time >= 1.8
|
||||
, time >= 1.9
|
||||
, time-units ^>= 1.0.0
|
||||
, transformers >= 0.5
|
||||
, unliftio >= 0.2 && < 0.3
|
||||
, unliftio-core >= 0.2 && < 0.3
|
||||
@ -115,7 +165,9 @@ test-suite minio-hs-live-server-test
|
||||
, Network.Minio.Utils.Test
|
||||
, Network.Minio.XmlGenerator.Test
|
||||
, Network.Minio.XmlParser.Test
|
||||
, Network.Minio.Credentials
|
||||
build-depends: minio-hs
|
||||
, raw-strings-qq
|
||||
, tasty
|
||||
, tasty-hunit
|
||||
, tasty-quickcheck
|
||||
@ -130,6 +182,7 @@ test-suite minio-hs-test
|
||||
hs-source-dirs: test, src
|
||||
main-is: Spec.hs
|
||||
build-depends: minio-hs
|
||||
, raw-strings-qq
|
||||
, QuickCheck
|
||||
, tasty
|
||||
, tasty-hunit
|
||||
@ -146,6 +199,7 @@ test-suite minio-hs-test
|
||||
, Network.Minio.Utils.Test
|
||||
, Network.Minio.XmlGenerator.Test
|
||||
, Network.Minio.XmlParser.Test
|
||||
, Network.Minio.Credentials
|
||||
|
||||
Flag examples
|
||||
Description: Build the examples
|
||||
@ -292,6 +346,7 @@ executable SetConfig
|
||||
scope: private
|
||||
main-is: SetConfig.hs
|
||||
|
||||
source-repository head
|
||||
type: git
|
||||
location: https://github.com/minio/minio-hs
|
||||
executable AssumeRole
|
||||
import: examples-settings
|
||||
scope: private
|
||||
main-is: AssumeRole.hs
|
||||
|
||||
@ -20,6 +20,7 @@ module Lib.Prelude
|
||||
showBS,
|
||||
toStrictBS,
|
||||
fromStrictBS,
|
||||
lastMay,
|
||||
)
|
||||
where
|
||||
|
||||
@ -29,14 +30,6 @@ import Data.Time as Exports
|
||||
( UTCTime (..),
|
||||
diffUTCTime,
|
||||
)
|
||||
import Protolude as Exports hiding
|
||||
( Handler,
|
||||
catch,
|
||||
catches,
|
||||
throwIO,
|
||||
try,
|
||||
yield,
|
||||
)
|
||||
import UnliftIO as Exports
|
||||
( Handler,
|
||||
catch,
|
||||
@ -58,3 +51,6 @@ toStrictBS = LB.toStrict
|
||||
|
||||
fromStrictBS :: ByteString -> LByteString
|
||||
fromStrictBS = LB.fromStrict
|
||||
|
||||
lastMay :: [a] -> Maybe a
|
||||
lastMay a = last <$> nonEmpty a
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
--
|
||||
-- MinIO Haskell SDK, (C) 2017-2019 MinIO, Inc.
|
||||
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
@ -16,7 +16,7 @@
|
||||
|
||||
-- |
|
||||
-- Module: Network.Minio
|
||||
-- Copyright: (c) 2017-2019 MinIO Dev Team
|
||||
-- Copyright: (c) 2017-2023 MinIO Dev Team
|
||||
-- License: Apache 2.0
|
||||
-- Maintainer: MinIO Dev Team <dev@min.io>
|
||||
--
|
||||
@ -24,13 +24,17 @@
|
||||
-- storage servers like MinIO.
|
||||
module Network.Minio
|
||||
( -- * Credentials
|
||||
Credentials (..),
|
||||
CredentialValue (..),
|
||||
credentialValueText,
|
||||
AccessKey (..),
|
||||
SecretKey (..),
|
||||
SessionToken (..),
|
||||
|
||||
-- ** Credential providers
|
||||
-- ** Credential Loaders
|
||||
|
||||
-- | Run actions that retrieve 'Credentials' from the environment or
|
||||
-- | Run actions that retrieve 'CredentialValue's from the environment or
|
||||
-- files or other custom sources.
|
||||
Provider,
|
||||
CredentialLoader,
|
||||
fromAWSConfigFile,
|
||||
fromAWSEnv,
|
||||
fromMinioEnv,
|
||||
@ -55,7 +59,17 @@ module Network.Minio
|
||||
awsCI,
|
||||
gcsCI,
|
||||
|
||||
-- ** STS Credential types
|
||||
STSAssumeRole (..),
|
||||
STSAssumeRoleOptions (..),
|
||||
defaultSTSAssumeRoleOptions,
|
||||
requestSTSCredential,
|
||||
setSTSCredential,
|
||||
ExpiryTime (..),
|
||||
STSCredentialProvider,
|
||||
|
||||
-- * Minio Monad
|
||||
|
||||
----------------
|
||||
|
||||
-- | The Minio Monad provides connection-reuse, bucket-location
|
||||
@ -225,15 +239,15 @@ This module exports the high-level MinIO API for object storage.
|
||||
import qualified Data.Conduit as C
|
||||
import qualified Data.Conduit.Binary as CB
|
||||
import qualified Data.Conduit.Combinators as CC
|
||||
import Lib.Prelude
|
||||
import Network.Minio.API
|
||||
import Network.Minio.CopyObject
|
||||
import Network.Minio.Credentials
|
||||
import Network.Minio.Data
|
||||
import Network.Minio.Errors
|
||||
import Network.Minio.ListOps
|
||||
import Network.Minio.PutObject
|
||||
import Network.Minio.S3API
|
||||
import Network.Minio.SelectAPI
|
||||
import Network.Minio.Utils
|
||||
|
||||
-- | Lists buckets.
|
||||
listBuckets :: Minio [BucketInfo]
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
--
|
||||
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
|
||||
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
@ -19,12 +19,14 @@ module Network.Minio.API
|
||||
S3ReqInfo (..),
|
||||
runMinio,
|
||||
executeRequest,
|
||||
buildRequest,
|
||||
mkStreamRequest,
|
||||
getLocation,
|
||||
isValidBucketName,
|
||||
checkBucketNameValidity,
|
||||
isValidObjectName,
|
||||
checkObjectNameValidity,
|
||||
requestSTSCredential,
|
||||
)
|
||||
where
|
||||
|
||||
@ -40,11 +42,15 @@ import qualified Data.HashMap.Strict as H
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Time.Clock as Time
|
||||
import Lib.Prelude
|
||||
import Network.HTTP.Client (defaultManagerSettings)
|
||||
import qualified Network.HTTP.Client as NClient
|
||||
import Network.HTTP.Conduit (Response)
|
||||
import qualified Network.HTTP.Conduit as NC
|
||||
import Network.HTTP.Types (simpleQueryToQuery)
|
||||
import qualified Network.HTTP.Types as HT
|
||||
import Network.HTTP.Types.Header (hHost)
|
||||
import Network.Minio.APICommon
|
||||
import Network.Minio.Credentials
|
||||
import Network.Minio.Data
|
||||
import Network.Minio.Errors
|
||||
import Network.Minio.Sign.V4
|
||||
@ -78,6 +84,7 @@ discoverRegion ri = runMaybeT $ do
|
||||
return
|
||||
regionMay
|
||||
|
||||
-- | Returns the region to be used for the request.
|
||||
getRegion :: S3ReqInfo -> Minio (Maybe Region)
|
||||
getRegion ri = do
|
||||
ci <- asks mcConnInfo
|
||||
@ -85,10 +92,10 @@ getRegion ri = do
|
||||
-- getService/makeBucket/getLocation -- don't need location
|
||||
if
|
||||
| not $ riNeedsLocation ri ->
|
||||
return $ Just $ connectRegion ci
|
||||
return $ Just $ connectRegion ci
|
||||
-- if autodiscovery of location is disabled by user
|
||||
| not $ connectAutoDiscoverRegion ci ->
|
||||
return $ Just $ connectRegion ci
|
||||
return $ Just $ connectRegion ci
|
||||
-- discover the region for the request
|
||||
| otherwise -> discoverRegion ri
|
||||
|
||||
@ -104,6 +111,56 @@ getRegionHost r = do
|
||||
(H.lookup r awsRegionMap)
|
||||
else return $ connectHost ci
|
||||
|
||||
-- | Computes the appropriate host, path and region for the request.
|
||||
--
|
||||
-- For AWS, always use virtual bucket style, unless bucket has periods. For
|
||||
-- MinIO and other non-AWS, default to path style.
|
||||
getHostPathRegion :: S3ReqInfo -> Minio (Text, ByteString, Maybe Region)
|
||||
getHostPathRegion ri = do
|
||||
ci <- asks mcConnInfo
|
||||
regionMay <- getRegion ri
|
||||
case riBucket ri of
|
||||
Nothing ->
|
||||
-- Implies a ListBuckets request.
|
||||
return (connectHost ci, "/", regionMay)
|
||||
Just bucket -> do
|
||||
regionHost <- case regionMay of
|
||||
Nothing -> return $ connectHost ci
|
||||
Just "" -> return $ connectHost ci
|
||||
Just r -> getRegionHost r
|
||||
let pathStyle =
|
||||
( regionHost,
|
||||
getS3Path (riBucket ri) (riObject ri),
|
||||
regionMay
|
||||
)
|
||||
virtualStyle =
|
||||
( bucket <> "." <> regionHost,
|
||||
encodeUtf8 $ "/" <> fromMaybe "" (riObject ri),
|
||||
regionMay
|
||||
)
|
||||
( if isAWSConnectInfo ci
|
||||
then
|
||||
return $
|
||||
if bucketHasPeriods bucket
|
||||
then pathStyle
|
||||
else virtualStyle
|
||||
else return pathStyle
|
||||
)
|
||||
|
||||
-- | requestSTSCredential requests temporary credentials using the Security Token
|
||||
-- Service API. The returned credential will include a populated 'SessionToken'
|
||||
-- and an 'ExpiryTime'.
|
||||
requestSTSCredential :: (STSCredentialProvider p) => p -> IO (CredentialValue, ExpiryTime)
|
||||
requestSTSCredential p = do
|
||||
endpoint <- maybe (throwIO $ MErrValidation MErrVSTSEndpointNotFound) return $ getSTSEndpoint p
|
||||
let endPt = NC.parseRequest_ $ toString endpoint
|
||||
settings
|
||||
| NC.secure endPt = NC.tlsManagerSettings
|
||||
| otherwise = defaultManagerSettings
|
||||
|
||||
mgr <- NC.newManager settings
|
||||
liftIO $ retrieveSTSCredentials p ("", 0, False) mgr
|
||||
|
||||
buildRequest :: S3ReqInfo -> Minio NC.Request
|
||||
buildRequest ri = do
|
||||
maybe (return ()) checkBucketNameValidity $ riBucket ri
|
||||
@ -111,17 +168,15 @@ buildRequest ri = do
|
||||
|
||||
ci <- asks mcConnInfo
|
||||
|
||||
regionMay <- getRegion ri
|
||||
(host, path, regionMay) <- getHostPathRegion ri
|
||||
|
||||
regionHost <- maybe (return $ connectHost ci) getRegionHost regionMay
|
||||
|
||||
let ri' =
|
||||
let ci' = ci {connectHost = host}
|
||||
hostHeader = (hHost, getHostAddr ci')
|
||||
ri' =
|
||||
ri
|
||||
{ riHeaders = hostHeader : riHeaders ri,
|
||||
riRegion = regionMay
|
||||
}
|
||||
ci' = ci {connectHost = regionHost}
|
||||
hostHeader = (hHost, getHostAddr ci')
|
||||
-- Does not contain body and auth info.
|
||||
baseRequest =
|
||||
NC.defaultRequest
|
||||
@ -129,24 +184,31 @@ buildRequest ri = do
|
||||
NC.secure = connectIsSecure ci',
|
||||
NC.host = encodeUtf8 $ connectHost ci',
|
||||
NC.port = connectPort ci',
|
||||
NC.path = getS3Path (riBucket ri') (riObject ri'),
|
||||
NC.path = path,
|
||||
NC.requestHeaders = riHeaders ri',
|
||||
NC.queryString = HT.renderQuery False $ riQueryParams ri'
|
||||
}
|
||||
|
||||
timeStamp <- liftIO Time.getCurrentTime
|
||||
|
||||
mgr <- asks mcConnManager
|
||||
cv <- liftIO $ getCredential (connectCreds ci') (getEndpoint ci') mgr
|
||||
|
||||
let sp =
|
||||
SignParams
|
||||
(connectAccessKey ci')
|
||||
(connectSecretKey ci')
|
||||
(coerce $ cvAccessKey cv)
|
||||
(coerce $ cvSecretKey cv)
|
||||
(coerce $ cvSessionToken cv)
|
||||
ServiceS3
|
||||
timeStamp
|
||||
(riRegion ri')
|
||||
Nothing
|
||||
(riPresignExpirySecs ri')
|
||||
Nothing
|
||||
|
||||
-- Cases to handle:
|
||||
--
|
||||
-- 0. Handle presign URL case.
|
||||
--
|
||||
-- 1. Connection is secure: use unsigned payload
|
||||
--
|
||||
-- 2. Insecure connection, streaming signature is enabled via use of
|
||||
@ -155,40 +217,51 @@ buildRequest ri = do
|
||||
-- 3. Insecure connection, non-conduit payload: compute payload
|
||||
-- sha256hash, buffer request in memory and perform request.
|
||||
|
||||
-- case 2 from above.
|
||||
if
|
||||
| isStreamingPayload (riPayload ri')
|
||||
&& (not $ connectIsSecure ci') -> do
|
||||
(pLen, pSrc) <- case riPayload ri of
|
||||
PayloadC l src -> return (l, src)
|
||||
_ -> throwIO MErrVUnexpectedPayload
|
||||
let reqFn = signV4Stream pLen sp baseRequest
|
||||
return $ reqFn pSrc
|
||||
| otherwise -> do
|
||||
-- case 1 described above.
|
||||
sp' <-
|
||||
if
|
||||
| connectIsSecure ci' -> return sp
|
||||
-- case 3 described above.
|
||||
| otherwise -> do
|
||||
pHash <- getPayloadSHA256Hash $ riPayload ri'
|
||||
return $ sp {spPayloadHash = Just pHash}
|
||||
| isJust (riPresignExpirySecs ri') ->
|
||||
-- case 0 from above.
|
||||
do
|
||||
let signPairs = signV4QueryParams sp baseRequest
|
||||
qpToAdd = simpleQueryToQuery signPairs
|
||||
existingQueryParams = HT.parseQuery (NC.queryString baseRequest)
|
||||
updatedQueryParams = existingQueryParams ++ qpToAdd
|
||||
return $ NClient.setQueryString updatedQueryParams baseRequest
|
||||
| isStreamingPayload (riPayload ri') && not (connectIsSecure ci') ->
|
||||
-- case 2 from above.
|
||||
do
|
||||
(pLen, pSrc) <- case riPayload ri of
|
||||
PayloadC l src -> return (l, src)
|
||||
_ -> throwIO MErrVUnexpectedPayload
|
||||
let reqFn = signV4Stream pLen sp baseRequest
|
||||
return $ reqFn pSrc
|
||||
| otherwise ->
|
||||
do
|
||||
sp' <-
|
||||
( if connectIsSecure ci'
|
||||
then -- case 1 described above.
|
||||
return sp
|
||||
else
|
||||
( -- case 3 described above.
|
||||
do
|
||||
pHash <- getPayloadSHA256Hash $ riPayload ri'
|
||||
return $ sp {spPayloadHash = Just pHash}
|
||||
)
|
||||
)
|
||||
|
||||
let signHeaders = signV4 sp' baseRequest
|
||||
return $
|
||||
baseRequest
|
||||
{ NC.requestHeaders =
|
||||
NC.requestHeaders baseRequest
|
||||
++ mkHeaderFromPairs signHeaders,
|
||||
NC.requestBody = getRequestBody (riPayload ri')
|
||||
}
|
||||
let signHeaders = signV4 sp' baseRequest
|
||||
return $
|
||||
baseRequest
|
||||
{ NC.requestHeaders =
|
||||
NC.requestHeaders baseRequest ++ signHeaders,
|
||||
NC.requestBody = getRequestBody (riPayload ri')
|
||||
}
|
||||
|
||||
retryAPIRequest :: Minio a -> Minio a
|
||||
retryAPIRequest apiCall = do
|
||||
resE <-
|
||||
retrying retryPolicy (const shouldRetry)
|
||||
$ const
|
||||
$ try apiCall
|
||||
retrying retryPolicy (const shouldRetry) $
|
||||
const $
|
||||
try apiCall
|
||||
either throwIO return resE
|
||||
where
|
||||
-- Retry using the full-jitter backoff method for up to 10 mins
|
||||
@ -235,8 +308,8 @@ isValidBucketName bucket =
|
||||
not
|
||||
( or
|
||||
[ len < 3 || len > 63,
|
||||
or (map labelCheck labels),
|
||||
or (map labelCharsCheck labels),
|
||||
any labelCheck labels,
|
||||
any labelCharsCheck labels,
|
||||
isIPCheck
|
||||
]
|
||||
)
|
||||
@ -264,18 +337,18 @@ isValidBucketName bucket =
|
||||
isIPCheck = and labelAsNums && length labelAsNums == 4
|
||||
|
||||
-- Throws exception iff bucket name is invalid according to AWS rules.
|
||||
checkBucketNameValidity :: MonadIO m => Bucket -> m ()
|
||||
checkBucketNameValidity :: (MonadIO m) => Bucket -> m ()
|
||||
checkBucketNameValidity bucket =
|
||||
when (not $ isValidBucketName bucket)
|
||||
$ throwIO
|
||||
$ MErrVInvalidBucketName bucket
|
||||
unless (isValidBucketName bucket) $
|
||||
throwIO $
|
||||
MErrVInvalidBucketName bucket
|
||||
|
||||
isValidObjectName :: Object -> Bool
|
||||
isValidObjectName object =
|
||||
T.length object > 0 && B.length (encodeUtf8 object) <= 1024
|
||||
|
||||
checkObjectNameValidity :: MonadIO m => Object -> m ()
|
||||
checkObjectNameValidity :: (MonadIO m) => Object -> m ()
|
||||
checkObjectNameValidity object =
|
||||
when (not $ isValidObjectName object)
|
||||
$ throwIO
|
||||
$ MErrVInvalidObjectName object
|
||||
unless (isValidObjectName object) $
|
||||
throwIO $
|
||||
MErrVInvalidObjectName object
|
||||
|
||||
@ -20,6 +20,7 @@ import qualified Conduit as C
|
||||
import qualified Data.ByteString as BS
|
||||
import qualified Data.ByteString.Lazy as LB
|
||||
import Data.Conduit.Binary (sourceHandleRange)
|
||||
import qualified Data.Text as T
|
||||
import Lib.Prelude
|
||||
import qualified Network.HTTP.Conduit as NC
|
||||
import qualified Network.HTTP.Types as HT
|
||||
@ -45,7 +46,7 @@ getPayloadSHA256Hash (PayloadC _ _) = throwIO MErrVUnexpectedPayload
|
||||
getRequestBody :: Payload -> NC.RequestBody
|
||||
getRequestBody (PayloadBS bs) = NC.RequestBodyBS bs
|
||||
getRequestBody (PayloadH h off size) =
|
||||
NC.requestBodySource (fromIntegral size) $
|
||||
NC.requestBodySource size $
|
||||
sourceHandleRange
|
||||
h
|
||||
(return . fromIntegral $ off)
|
||||
@ -70,3 +71,10 @@ mkStreamingPayload payload =
|
||||
isStreamingPayload :: Payload -> Bool
|
||||
isStreamingPayload (PayloadC _ _) = True
|
||||
isStreamingPayload _ = False
|
||||
|
||||
-- | Checks if the connect info is for Amazon S3.
|
||||
isAWSConnectInfo :: ConnectInfo -> Bool
|
||||
isAWSConnectInfo ci = ".amazonaws.com" `T.isSuffixOf` connectHost ci
|
||||
|
||||
bucketHasPeriods :: Bucket -> Bool
|
||||
bucketHasPeriods b = isJust $ T.find (== '.') b
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
--
|
||||
-- MinIO Haskell SDK, (C) 2018 MinIO, Inc.
|
||||
-- MinIO Haskell SDK, (C) 2018-2023 MinIO, Inc.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
@ -16,7 +16,8 @@
|
||||
|
||||
module Network.Minio.AdminAPI
|
||||
( -- * MinIO Admin API
|
||||
--------------------
|
||||
|
||||
--------------------
|
||||
|
||||
-- | Provides MinIO admin API and related types. It is in
|
||||
-- experimental state.
|
||||
@ -52,10 +53,7 @@ module Network.Minio.AdminAPI
|
||||
where
|
||||
|
||||
import Data.Aeson
|
||||
( (.:),
|
||||
(.:?),
|
||||
(.=),
|
||||
FromJSON,
|
||||
( FromJSON,
|
||||
ToJSON,
|
||||
Value (Object),
|
||||
eitherDecode,
|
||||
@ -66,6 +64,9 @@ import Data.Aeson
|
||||
toJSON,
|
||||
withObject,
|
||||
withText,
|
||||
(.:),
|
||||
(.:?),
|
||||
(.=),
|
||||
)
|
||||
import qualified Data.Aeson as A
|
||||
import Data.Aeson.Types (typeMismatch)
|
||||
@ -79,6 +80,7 @@ import qualified Network.HTTP.Conduit as NC
|
||||
import qualified Network.HTTP.Types as HT
|
||||
import Network.HTTP.Types.Header (hHost)
|
||||
import Network.Minio.APICommon
|
||||
import Network.Minio.Credentials
|
||||
import Network.Minio.Data
|
||||
import Network.Minio.Errors
|
||||
import Network.Minio.Sign.V4
|
||||
@ -89,20 +91,23 @@ data DriveInfo = DriveInfo
|
||||
diEndpoint :: Text,
|
||||
diState :: Text
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
instance FromJSON DriveInfo where
|
||||
parseJSON = withObject "DriveInfo" $ \v ->
|
||||
DriveInfo
|
||||
<$> v .: "uuid"
|
||||
<*> v .: "endpoint"
|
||||
<*> v .: "state"
|
||||
<$> v
|
||||
.: "uuid"
|
||||
<*> v
|
||||
.: "endpoint"
|
||||
<*> v
|
||||
.: "state"
|
||||
|
||||
data StorageClass = StorageClass
|
||||
{ scParity :: Int,
|
||||
scData :: Int
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
data ErasureInfo = ErasureInfo
|
||||
{ eiOnlineDisks :: Int,
|
||||
@ -111,7 +116,7 @@ data ErasureInfo = ErasureInfo
|
||||
eiReducedRedundancy :: StorageClass,
|
||||
eiSets :: [[DriveInfo]]
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
instance FromJSON ErasureInfo where
|
||||
parseJSON = withObject "ErasureInfo" $ \v -> do
|
||||
@ -119,19 +124,23 @@ instance FromJSON ErasureInfo where
|
||||
offlineDisks <- v .: "OfflineDisks"
|
||||
stdClass <-
|
||||
StorageClass
|
||||
<$> v .: "StandardSCData"
|
||||
<*> v .: "StandardSCParity"
|
||||
<$> v
|
||||
.: "StandardSCData"
|
||||
<*> v
|
||||
.: "StandardSCParity"
|
||||
rrClass <-
|
||||
StorageClass
|
||||
<$> v .: "RRSCData"
|
||||
<*> v .: "RRSCParity"
|
||||
<$> v
|
||||
.: "RRSCData"
|
||||
<*> v
|
||||
.: "RRSCParity"
|
||||
sets <- v .: "Sets"
|
||||
return $ ErasureInfo onlineDisks offlineDisks stdClass rrClass sets
|
||||
|
||||
data Backend
|
||||
= BackendFS
|
||||
| BackendErasure ErasureInfo
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
instance FromJSON Backend where
|
||||
parseJSON = withObject "Backend" $ \v -> do
|
||||
@ -145,13 +154,15 @@ data ConnStats = ConnStats
|
||||
{ csTransferred :: Int64,
|
||||
csReceived :: Int64
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
instance FromJSON ConnStats where
|
||||
parseJSON = withObject "ConnStats" $ \v ->
|
||||
ConnStats
|
||||
<$> v .: "transferred"
|
||||
<*> v .: "received"
|
||||
<$> v
|
||||
.: "transferred"
|
||||
<*> v
|
||||
.: "received"
|
||||
|
||||
data ServerProps = ServerProps
|
||||
{ spUptime :: NominalDiffTime,
|
||||
@ -160,7 +171,7 @@ data ServerProps = ServerProps
|
||||
spRegion :: Text,
|
||||
spSqsArns :: [Text]
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
instance FromJSON ServerProps where
|
||||
parseJSON = withObject "SIServer" $ \v -> do
|
||||
@ -176,25 +187,29 @@ data StorageInfo = StorageInfo
|
||||
{ siUsed :: Int64,
|
||||
siBackend :: Backend
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
instance FromJSON StorageInfo where
|
||||
parseJSON = withObject "StorageInfo" $ \v ->
|
||||
StorageInfo
|
||||
<$> v .: "Used"
|
||||
<*> v .: "Backend"
|
||||
<$> v
|
||||
.: "Used"
|
||||
<*> v
|
||||
.: "Backend"
|
||||
|
||||
data CountNAvgTime = CountNAvgTime
|
||||
{ caCount :: Int64,
|
||||
caAvgDuration :: Text
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
instance FromJSON CountNAvgTime where
|
||||
parseJSON = withObject "CountNAvgTime" $ \v ->
|
||||
CountNAvgTime
|
||||
<$> v .: "count"
|
||||
<*> v .: "avgDuration"
|
||||
<$> v
|
||||
.: "count"
|
||||
<*> v
|
||||
.: "avgDuration"
|
||||
|
||||
data HttpStats = HttpStats
|
||||
{ hsTotalHeads :: CountNAvgTime,
|
||||
@ -208,21 +223,31 @@ data HttpStats = HttpStats
|
||||
hsTotalDeletes :: CountNAvgTime,
|
||||
hsSuccessDeletes :: CountNAvgTime
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
instance FromJSON HttpStats where
|
||||
parseJSON = withObject "HttpStats" $ \v ->
|
||||
HttpStats
|
||||
<$> v .: "totalHEADs"
|
||||
<*> v .: "successHEADs"
|
||||
<*> v .: "totalGETs"
|
||||
<*> v .: "successGETs"
|
||||
<*> v .: "totalPUTs"
|
||||
<*> v .: "successPUTs"
|
||||
<*> v .: "totalPOSTs"
|
||||
<*> v .: "successPOSTs"
|
||||
<*> v .: "totalDELETEs"
|
||||
<*> v .: "successDELETEs"
|
||||
<$> v
|
||||
.: "totalHEADs"
|
||||
<*> v
|
||||
.: "successHEADs"
|
||||
<*> v
|
||||
.: "totalGETs"
|
||||
<*> v
|
||||
.: "successGETs"
|
||||
<*> v
|
||||
.: "totalPUTs"
|
||||
<*> v
|
||||
.: "successPUTs"
|
||||
<*> v
|
||||
.: "totalPOSTs"
|
||||
<*> v
|
||||
.: "successPOSTs"
|
||||
<*> v
|
||||
.: "totalDELETEs"
|
||||
<*> v
|
||||
.: "successDELETEs"
|
||||
|
||||
data SIData = SIData
|
||||
{ sdStorage :: StorageInfo,
|
||||
@ -230,47 +255,56 @@ data SIData = SIData
|
||||
sdHttpStats :: HttpStats,
|
||||
sdProps :: ServerProps
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
instance FromJSON SIData where
|
||||
parseJSON = withObject "SIData" $ \v ->
|
||||
SIData
|
||||
<$> v .: "storage"
|
||||
<*> v .: "network"
|
||||
<*> v .: "http"
|
||||
<*> v .: "server"
|
||||
<$> v
|
||||
.: "storage"
|
||||
<*> v
|
||||
.: "network"
|
||||
<*> v
|
||||
.: "http"
|
||||
<*> v
|
||||
.: "server"
|
||||
|
||||
data ServerInfo = ServerInfo
|
||||
{ siError :: Text,
|
||||
siAddr :: Text,
|
||||
siData :: SIData
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
instance FromJSON ServerInfo where
|
||||
parseJSON = withObject "ServerInfo" $ \v ->
|
||||
ServerInfo
|
||||
<$> v .: "error"
|
||||
<*> v .: "addr"
|
||||
<*> v .: "data"
|
||||
<$> v
|
||||
.: "error"
|
||||
<*> v
|
||||
.: "addr"
|
||||
<*> v
|
||||
.: "data"
|
||||
|
||||
data ServerVersion = ServerVersion
|
||||
{ svVersion :: Text,
|
||||
svCommitId :: Text
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
instance FromJSON ServerVersion where
|
||||
parseJSON = withObject "ServerVersion" $ \v ->
|
||||
ServerVersion
|
||||
<$> v .: "version"
|
||||
<*> v .: "commitID"
|
||||
<$> v
|
||||
.: "version"
|
||||
<*> v
|
||||
.: "commitID"
|
||||
|
||||
data ServiceStatus = ServiceStatus
|
||||
{ ssVersion :: ServerVersion,
|
||||
ssUptime :: NominalDiffTime
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
instance FromJSON ServiceStatus where
|
||||
parseJSON = withObject "ServiceStatus" $ \v -> do
|
||||
@ -282,7 +316,7 @@ instance FromJSON ServiceStatus where
|
||||
data ServiceAction
|
||||
= ServiceActionRestart
|
||||
| ServiceActionStop
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
instance ToJSON ServiceAction where
|
||||
toJSON a = object ["action" .= serviceActionToText a]
|
||||
@ -300,20 +334,23 @@ data HealStartResp = HealStartResp
|
||||
hsrClientAddr :: Text,
|
||||
hsrStartTime :: UTCTime
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
instance FromJSON HealStartResp where
|
||||
parseJSON = withObject "HealStartResp" $ \v ->
|
||||
HealStartResp
|
||||
<$> v .: "clientToken"
|
||||
<*> v .: "clientAddress"
|
||||
<*> v .: "startTime"
|
||||
<$> v
|
||||
.: "clientToken"
|
||||
<*> v
|
||||
.: "clientAddress"
|
||||
<*> v
|
||||
.: "startTime"
|
||||
|
||||
data HealOpts = HealOpts
|
||||
{ hoRecursive :: Bool,
|
||||
hoDryRun :: Bool
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
instance ToJSON HealOpts where
|
||||
toJSON (HealOpts r d) =
|
||||
@ -324,15 +361,17 @@ instance ToJSON HealOpts where
|
||||
instance FromJSON HealOpts where
|
||||
parseJSON = withObject "HealOpts" $ \v ->
|
||||
HealOpts
|
||||
<$> v .: "recursive"
|
||||
<*> v .: "dryRun"
|
||||
<$> v
|
||||
.: "recursive"
|
||||
<*> v
|
||||
.: "dryRun"
|
||||
|
||||
data HealItemType
|
||||
= HealItemMetadata
|
||||
| HealItemBucket
|
||||
| HealItemBucketMetadata
|
||||
| HealItemObject
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
instance FromJSON HealItemType where
|
||||
parseJSON = withText "HealItemType" $ \v -> case v of
|
||||
@ -347,26 +386,31 @@ data NodeSummary = NodeSummary
|
||||
nsErrSet :: Bool,
|
||||
nsErrMessage :: Text
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
instance FromJSON NodeSummary where
|
||||
parseJSON = withObject "NodeSummary" $ \v ->
|
||||
NodeSummary
|
||||
<$> v .: "name"
|
||||
<*> v .: "errSet"
|
||||
<*> v .: "errMsg"
|
||||
<$> v
|
||||
.: "name"
|
||||
<*> v
|
||||
.: "errSet"
|
||||
<*> v
|
||||
.: "errMsg"
|
||||
|
||||
data SetConfigResult = SetConfigResult
|
||||
{ scrStatus :: Bool,
|
||||
scrNodeSummary :: [NodeSummary]
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
instance FromJSON SetConfigResult where
|
||||
parseJSON = withObject "SetConfigResult" $ \v ->
|
||||
SetConfigResult
|
||||
<$> v .: "status"
|
||||
<*> v .: "nodeResults"
|
||||
<$> v
|
||||
.: "status"
|
||||
<*> v
|
||||
.: "nodeResults"
|
||||
|
||||
data HealResultItem = HealResultItem
|
||||
{ hriResultIdx :: Int,
|
||||
@ -382,21 +426,31 @@ data HealResultItem = HealResultItem
|
||||
hriBefore :: [DriveInfo],
|
||||
hriAfter :: [DriveInfo]
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
instance FromJSON HealResultItem where
|
||||
parseJSON = withObject "HealResultItem" $ \v ->
|
||||
HealResultItem
|
||||
<$> v .: "resultId"
|
||||
<*> v .: "type"
|
||||
<*> v .: "bucket"
|
||||
<*> v .: "object"
|
||||
<*> v .: "detail"
|
||||
<*> v .:? "parityBlocks"
|
||||
<*> v .:? "dataBlocks"
|
||||
<*> v .: "diskCount"
|
||||
<*> v .: "setCount"
|
||||
<*> v .: "objectSize"
|
||||
<$> v
|
||||
.: "resultId"
|
||||
<*> v
|
||||
.: "type"
|
||||
<*> v
|
||||
.: "bucket"
|
||||
<*> v
|
||||
.: "object"
|
||||
<*> v
|
||||
.: "detail"
|
||||
<*> v
|
||||
.:? "parityBlocks"
|
||||
<*> v
|
||||
.:? "dataBlocks"
|
||||
<*> v
|
||||
.: "diskCount"
|
||||
<*> v
|
||||
.: "setCount"
|
||||
<*> v
|
||||
.: "objectSize"
|
||||
<*> ( do
|
||||
before <- v .: "before"
|
||||
before .: "drives"
|
||||
@ -414,26 +468,34 @@ data HealStatus = HealStatus
|
||||
hsFailureDetail :: Maybe Text,
|
||||
hsItems :: Maybe [HealResultItem]
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
instance FromJSON HealStatus where
|
||||
parseJSON = withObject "HealStatus" $ \v ->
|
||||
HealStatus
|
||||
<$> v .: "Summary"
|
||||
<*> v .: "StartTime"
|
||||
<*> v .: "Settings"
|
||||
<*> v .: "NumDisks"
|
||||
<*> v .:? "Detail"
|
||||
<*> v .: "Items"
|
||||
<$> v
|
||||
.: "Summary"
|
||||
<*> v
|
||||
.: "StartTime"
|
||||
<*> v
|
||||
.: "Settings"
|
||||
<*> v
|
||||
.: "NumDisks"
|
||||
<*> v
|
||||
.:? "Detail"
|
||||
<*> v
|
||||
.: "Items"
|
||||
|
||||
healPath :: Maybe Bucket -> Maybe Text -> ByteString
|
||||
healPath bucket prefix = do
|
||||
if (isJust bucket)
|
||||
if isJust bucket
|
||||
then
|
||||
encodeUtf8 $
|
||||
"v1/heal/" <> fromMaybe "" bucket <> "/"
|
||||
"v1/heal/"
|
||||
<> fromMaybe "" bucket
|
||||
<> "/"
|
||||
<> fromMaybe "" prefix
|
||||
else encodeUtf8 $ "v1/heal/"
|
||||
else encodeUtf8 ("v1/heal/" :: Text)
|
||||
|
||||
-- | Get server version and uptime.
|
||||
serviceStatus :: Minio ServiceStatus
|
||||
@ -596,15 +658,17 @@ buildAdminRequest :: AdminReqInfo -> Minio NC.Request
|
||||
buildAdminRequest areq = do
|
||||
ci <- asks mcConnInfo
|
||||
sha256Hash <-
|
||||
if
|
||||
| connectIsSecure ci ->
|
||||
-- if secure connection
|
||||
return "UNSIGNED-PAYLOAD"
|
||||
-- otherwise compute sha256
|
||||
| otherwise -> getPayloadSHA256Hash (ariPayload areq)
|
||||
if connectIsSecure ci
|
||||
then -- if secure connection
|
||||
return "UNSIGNED-PAYLOAD"
|
||||
else -- otherwise compute sha256
|
||||
getPayloadSHA256Hash (ariPayload areq)
|
||||
|
||||
timeStamp <- liftIO getCurrentTime
|
||||
|
||||
mgr <- asks mcConnManager
|
||||
cv <- liftIO $ getCredential (connectCreds ci) (getEndpoint ci) mgr
|
||||
|
||||
let hostHeader = (hHost, getHostAddr ci)
|
||||
newAreq =
|
||||
areq
|
||||
@ -617,8 +681,10 @@ buildAdminRequest areq = do
|
||||
signReq = toRequest ci newAreq
|
||||
sp =
|
||||
SignParams
|
||||
(connectAccessKey ci)
|
||||
(connectSecretKey ci)
|
||||
(coerce $ cvAccessKey cv)
|
||||
(coerce $ cvSecretKey cv)
|
||||
(coerce $ cvSessionToken cv)
|
||||
ServiceS3
|
||||
timeStamp
|
||||
Nothing
|
||||
Nothing
|
||||
@ -628,7 +694,7 @@ buildAdminRequest areq = do
|
||||
-- Update signReq with Authorization header containing v4 signature
|
||||
return
|
||||
signReq
|
||||
{ NC.requestHeaders = ariHeaders newAreq ++ mkHeaderFromPairs signHeaders
|
||||
{ NC.requestHeaders = ariHeaders newAreq ++ signHeaders
|
||||
}
|
||||
where
|
||||
toRequest :: ConnectInfo -> AdminReqInfo -> NC.Request
|
||||
|
||||
@ -45,11 +45,10 @@ copyObjectInternal b' o srcInfo = do
|
||||
|
||||
when
|
||||
( isJust rangeMay
|
||||
&& or
|
||||
[ startOffset < 0,
|
||||
endOffset < startOffset,
|
||||
endOffset >= fromIntegral srcSize
|
||||
]
|
||||
&& ( (startOffset < 0)
|
||||
|| (endOffset < startOffset)
|
||||
|| (endOffset >= srcSize)
|
||||
)
|
||||
)
|
||||
$ throwIO
|
||||
$ MErrVInvalidSrcObjByteRange range
|
||||
@ -69,9 +68,8 @@ copyObjectInternal b' o srcInfo = do
|
||||
-- used is minPartSize.
|
||||
selectCopyRanges :: (Int64, Int64) -> [(PartNumber, (Int64, Int64))]
|
||||
selectCopyRanges (st, end) =
|
||||
zip pns
|
||||
$ map (\(x, y) -> (st + x, st + x + y - 1))
|
||||
$ zip startOffsets partSizes
|
||||
zip pns $
|
||||
zipWith (\x y -> (st + x, st + x + y - 1)) startOffsets partSizes
|
||||
where
|
||||
size = end - st + 1
|
||||
(pns, startOffsets, partSizes) = List.unzip3 $ selectPartSizes size
|
||||
@ -88,7 +86,7 @@ multiPartCopyObject ::
|
||||
multiPartCopyObject b o cps srcSize = do
|
||||
uid <- newMultipartUpload b o []
|
||||
|
||||
let byteRange = maybe (0, fromIntegral $ srcSize - 1) identity $ srcRange cps
|
||||
let byteRange = maybe (0, srcSize - 1) identity $ srcRange cps
|
||||
partRanges = selectCopyRanges byteRange
|
||||
partSources =
|
||||
map
|
||||
|
||||
77
src/Network/Minio/Credentials.hs
Normal file
77
src/Network/Minio/Credentials.hs
Normal file
@ -0,0 +1,77 @@
|
||||
--
|
||||
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
|
||||
module Network.Minio.Credentials
|
||||
( CredentialValue (..),
|
||||
credentialValueText,
|
||||
STSCredentialProvider (..),
|
||||
AccessKey (..),
|
||||
SecretKey (..),
|
||||
SessionToken (..),
|
||||
ExpiryTime (..),
|
||||
STSCredentialStore,
|
||||
initSTSCredential,
|
||||
getSTSCredential,
|
||||
Creds (..),
|
||||
getCredential,
|
||||
Endpoint,
|
||||
|
||||
-- * STS Assume Role
|
||||
defaultSTSAssumeRoleOptions,
|
||||
STSAssumeRole (..),
|
||||
STSAssumeRoleOptions (..),
|
||||
)
|
||||
where
|
||||
|
||||
import Data.Time (diffUTCTime, getCurrentTime)
|
||||
import qualified Network.HTTP.Client as NC
|
||||
import Network.Minio.Credentials.AssumeRole
|
||||
import Network.Minio.Credentials.Types
|
||||
import qualified UnliftIO.MVar as M
|
||||
|
||||
data STSCredentialStore = STSCredentialStore
|
||||
{ cachedCredentials :: M.MVar (CredentialValue, ExpiryTime),
|
||||
refreshAction :: Endpoint -> NC.Manager -> IO (CredentialValue, ExpiryTime)
|
||||
}
|
||||
|
||||
initSTSCredential :: (STSCredentialProvider p) => p -> IO STSCredentialStore
|
||||
initSTSCredential p = do
|
||||
let action = retrieveSTSCredentials p
|
||||
-- start with dummy credential, so that refresh happens for first request.
|
||||
now <- getCurrentTime
|
||||
mvar <- M.newMVar (CredentialValue mempty mempty mempty, coerce now)
|
||||
return $
|
||||
STSCredentialStore
|
||||
{ cachedCredentials = mvar,
|
||||
refreshAction = action
|
||||
}
|
||||
|
||||
getSTSCredential :: STSCredentialStore -> Endpoint -> NC.Manager -> IO (CredentialValue, Bool)
|
||||
getSTSCredential store ep mgr = M.modifyMVar (cachedCredentials store) $ \cc@(v, expiry) -> do
|
||||
now <- getCurrentTime
|
||||
if diffUTCTime now (coerce expiry) > 0
|
||||
then do
|
||||
res <- refreshAction store ep mgr
|
||||
return (res, (fst res, True))
|
||||
else return (cc, (v, False))
|
||||
|
||||
data Creds
|
||||
= CredsStatic CredentialValue
|
||||
| CredsSTS STSCredentialStore
|
||||
|
||||
getCredential :: Creds -> Endpoint -> NC.Manager -> IO CredentialValue
|
||||
getCredential (CredsStatic v) _ _ = return v
|
||||
getCredential (CredsSTS s) ep mgr = fst <$> getSTSCredential s ep mgr
|
||||
266
src/Network/Minio/Credentials/AssumeRole.hs
Normal file
266
src/Network/Minio/Credentials/AssumeRole.hs
Normal file
@ -0,0 +1,266 @@
|
||||
--
|
||||
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
|
||||
module Network.Minio.Credentials.AssumeRole where
|
||||
|
||||
import qualified Data.ByteArray as BA
|
||||
import qualified Data.ByteString.Lazy as LB
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Time as Time
|
||||
import Data.Time.Units (Second)
|
||||
import Lib.Prelude (UTCTime, throwIO)
|
||||
import Network.HTTP.Client (RequestBody (RequestBodyBS))
|
||||
import qualified Network.HTTP.Client as NC
|
||||
import Network.HTTP.Types (hContentType, methodPost, renderSimpleQuery)
|
||||
import Network.HTTP.Types.Header (hHost)
|
||||
import Network.Minio.Credentials.Types
|
||||
import Network.Minio.Data.Crypto (hashSHA256)
|
||||
import Network.Minio.Errors (MErrV (..))
|
||||
import Network.Minio.Sign.V4
|
||||
import Network.Minio.Utils (getHostHeader, httpLbs)
|
||||
import Network.Minio.XmlCommon
|
||||
import Text.XML.Cursor hiding (bool)
|
||||
|
||||
stsVersion :: ByteString
|
||||
stsVersion = "2011-06-15"
|
||||
|
||||
defaultDurationSeconds :: Second
|
||||
defaultDurationSeconds = 3600
|
||||
|
||||
-- | Assume Role API argument.
|
||||
--
|
||||
-- @since 1.7.0
|
||||
data STSAssumeRole = STSAssumeRole
|
||||
{ -- | Credentials to use in the AssumeRole STS API.
|
||||
sarCredentials :: CredentialValue,
|
||||
-- | Optional settings.
|
||||
sarOptions :: STSAssumeRoleOptions
|
||||
}
|
||||
|
||||
-- | Options for STS Assume Role API.
|
||||
data STSAssumeRoleOptions = STSAssumeRoleOptions
|
||||
{ -- | STS endpoint to which the request will be made. For MinIO, this is the
|
||||
-- same as the server endpoint. For AWS, this has to be the Security Token
|
||||
-- Service endpoint. If using with 'setSTSCredential', this option can be
|
||||
-- left as 'Nothing' and the endpoint in 'ConnectInfo' will be used.
|
||||
saroEndpoint :: Maybe Text,
|
||||
-- | Desired validity for the generated credentials.
|
||||
saroDurationSeconds :: Maybe Second,
|
||||
-- | IAM policy to apply for the generated credentials.
|
||||
saroPolicyJSON :: Maybe ByteString,
|
||||
-- | Location is usually required for AWS.
|
||||
saroLocation :: Maybe Text,
|
||||
saroRoleARN :: Maybe Text,
|
||||
saroRoleSessionName :: Maybe Text
|
||||
}
|
||||
|
||||
-- | Default STS Assume Role options - all options are Nothing, except for
|
||||
-- duration which is set to 1 hour.
|
||||
defaultSTSAssumeRoleOptions :: STSAssumeRoleOptions
|
||||
defaultSTSAssumeRoleOptions =
|
||||
STSAssumeRoleOptions
|
||||
{ saroEndpoint = Nothing,
|
||||
saroDurationSeconds = Just 3600,
|
||||
saroPolicyJSON = Nothing,
|
||||
saroLocation = Nothing,
|
||||
saroRoleARN = Nothing,
|
||||
saroRoleSessionName = Nothing
|
||||
}
|
||||
|
||||
data AssumeRoleCredentials = AssumeRoleCredentials
|
||||
{ arcCredentials :: CredentialValue,
|
||||
arcExpiration :: UTCTime
|
||||
}
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
data AssumeRoleResult = AssumeRoleResult
|
||||
{ arrSourceIdentity :: Text,
|
||||
arrAssumedRoleArn :: Text,
|
||||
arrAssumedRoleId :: Text,
|
||||
arrRoleCredentials :: AssumeRoleCredentials
|
||||
}
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | parseSTSAssumeRoleResult parses an XML response of the following form:
|
||||
--
|
||||
-- <AssumeRoleResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
|
||||
-- <AssumeRoleResult>
|
||||
-- <SourceIdentity>Alice</SourceIdentity>
|
||||
-- <AssumedRoleUser>
|
||||
-- <Arn>arn:aws:sts::123456789012:assumed-role/demo/TestAR</Arn>
|
||||
-- <AssumedRoleId>ARO123EXAMPLE123:TestAR</AssumedRoleId>
|
||||
-- </AssumedRoleUser>
|
||||
-- <Credentials>
|
||||
-- <AccessKeyId>ASIAIOSFODNN7EXAMPLE</AccessKeyId>
|
||||
-- <SecretAccessKey>wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY</SecretAccessKey>
|
||||
-- <SessionToken>
|
||||
-- AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQW
|
||||
-- LWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGd
|
||||
-- QrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU
|
||||
-- 9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz
|
||||
-- +scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA==
|
||||
-- </SessionToken>
|
||||
-- <Expiration>2019-11-09T13:34:41Z</Expiration>
|
||||
-- </Credentials>
|
||||
-- <PackedPolicySize>6</PackedPolicySize>
|
||||
-- </AssumeRoleResult>
|
||||
-- <ResponseMetadata>
|
||||
-- <RequestId>c6104cbe-af31-11e0-8154-cbc7ccf896c7</RequestId>
|
||||
-- </ResponseMetadata>
|
||||
-- </AssumeRoleResponse>
|
||||
parseSTSAssumeRoleResult :: (MonadIO m) => ByteString -> Text -> m AssumeRoleResult
|
||||
parseSTSAssumeRoleResult xmldata namespace = do
|
||||
r <- parseRoot $ LB.fromStrict xmldata
|
||||
let s3Elem' = s3Elem namespace
|
||||
sourceIdentity =
|
||||
T.concat $
|
||||
r
|
||||
$/ s3Elem' "AssumeRoleResult"
|
||||
&/ s3Elem' "SourceIdentity"
|
||||
&/ content
|
||||
roleArn =
|
||||
T.concat $
|
||||
r
|
||||
$/ s3Elem' "AssumeRoleResult"
|
||||
&/ s3Elem' "AssumedRoleUser"
|
||||
&/ s3Elem' "Arn"
|
||||
&/ content
|
||||
roleId =
|
||||
T.concat $
|
||||
r
|
||||
$/ s3Elem' "AssumeRoleResult"
|
||||
&/ s3Elem' "AssumedRoleUser"
|
||||
&/ s3Elem' "AssumedRoleId"
|
||||
&/ content
|
||||
|
||||
convSB :: Text -> BA.ScrubbedBytes
|
||||
convSB = BA.convert . (encodeUtf8 :: Text -> ByteString)
|
||||
|
||||
credsInfo = do
|
||||
cr <-
|
||||
maybe (Left $ MErrVXmlParse "No Credentials Element found") Right $
|
||||
listToMaybe $
|
||||
r $/ s3Elem' "AssumeRoleResult" &/ s3Elem' "Credentials"
|
||||
let cur = fromNode $ node cr
|
||||
return
|
||||
( CredentialValue
|
||||
{ cvAccessKey =
|
||||
coerce $
|
||||
T.concat $
|
||||
cur $/ s3Elem' "AccessKeyId" &/ content,
|
||||
cvSecretKey =
|
||||
coerce $
|
||||
convSB $
|
||||
T.concat $
|
||||
cur
|
||||
$/ s3Elem' "SecretAccessKey"
|
||||
&/ content,
|
||||
cvSessionToken =
|
||||
Just $
|
||||
coerce $
|
||||
convSB $
|
||||
T.concat $
|
||||
cur
|
||||
$/ s3Elem' "SessionToken"
|
||||
&/ content
|
||||
},
|
||||
T.concat $ cur $/ s3Elem' "Expiration" &/ content
|
||||
)
|
||||
creds <- either throwIO pure credsInfo
|
||||
expiry <- parseS3XMLTime $ snd creds
|
||||
let roleCredentials =
|
||||
AssumeRoleCredentials
|
||||
{ arcCredentials = fst creds,
|
||||
arcExpiration = expiry
|
||||
}
|
||||
return
|
||||
AssumeRoleResult
|
||||
{ arrSourceIdentity = sourceIdentity,
|
||||
arrAssumedRoleArn = roleArn,
|
||||
arrAssumedRoleId = roleId,
|
||||
arrRoleCredentials = roleCredentials
|
||||
}
|
||||
|
||||
instance STSCredentialProvider STSAssumeRole where
|
||||
getSTSEndpoint = saroEndpoint . sarOptions
|
||||
retrieveSTSCredentials sar (host', port', isSecure') mgr = do
|
||||
-- Assemble STS request
|
||||
let requiredParams =
|
||||
[ ("Action", "AssumeRole"),
|
||||
("Version", stsVersion)
|
||||
]
|
||||
opts = sarOptions sar
|
||||
|
||||
durSecs :: Int =
|
||||
fromIntegral $
|
||||
fromMaybe defaultDurationSeconds $
|
||||
saroDurationSeconds opts
|
||||
otherParams =
|
||||
[ ("RoleArn",) . encodeUtf8 <$> saroRoleARN opts,
|
||||
("RoleSessionName",) . encodeUtf8 <$> saroRoleSessionName opts,
|
||||
Just ("DurationSeconds", show durSecs),
|
||||
("Policy",) <$> saroPolicyJSON opts
|
||||
]
|
||||
parameters = requiredParams ++ catMaybes otherParams
|
||||
(host, port, isSecure) =
|
||||
case getSTSEndpoint sar of
|
||||
Just ep ->
|
||||
let endPt = NC.parseRequest_ $ toString ep
|
||||
in (NC.host endPt, NC.port endPt, NC.secure endPt)
|
||||
Nothing -> (host', port', isSecure')
|
||||
reqBody = renderSimpleQuery False parameters
|
||||
req =
|
||||
NC.defaultRequest
|
||||
{ NC.host = host,
|
||||
NC.port = port,
|
||||
NC.secure = isSecure,
|
||||
NC.method = methodPost,
|
||||
NC.requestHeaders =
|
||||
[ (hHost, getHostHeader (host, port)),
|
||||
(hContentType, "application/x-www-form-urlencoded")
|
||||
],
|
||||
NC.requestBody = RequestBodyBS reqBody
|
||||
}
|
||||
|
||||
-- Sign the STS request.
|
||||
timeStamp <- liftIO Time.getCurrentTime
|
||||
let sp =
|
||||
SignParams
|
||||
{ spAccessKey = coerce $ cvAccessKey $ sarCredentials sar,
|
||||
spSecretKey = coerce $ cvSecretKey $ sarCredentials sar,
|
||||
spSessionToken = coerce $ cvSessionToken $ sarCredentials sar,
|
||||
spService = ServiceSTS,
|
||||
spTimeStamp = timeStamp,
|
||||
spRegion = saroLocation opts,
|
||||
spExpirySecs = Nothing,
|
||||
spPayloadHash = Just $ hashSHA256 reqBody
|
||||
}
|
||||
signHeaders = signV4 sp req
|
||||
signedReq =
|
||||
req
|
||||
{ NC.requestHeaders = NC.requestHeaders req ++ signHeaders
|
||||
}
|
||||
|
||||
-- Make the STS request
|
||||
resp <- httpLbs signedReq mgr
|
||||
result <-
|
||||
parseSTSAssumeRoleResult
|
||||
(toStrict $ NC.responseBody resp)
|
||||
"https://sts.amazonaws.com/doc/2011-06-15/"
|
||||
return
|
||||
( arcCredentials $ arrRoleCredentials result,
|
||||
coerce $ arcExpiration $ arrRoleCredentials result
|
||||
)
|
||||
90
src/Network/Minio/Credentials/Types.hs
Normal file
90
src/Network/Minio/Credentials/Types.hs
Normal file
@ -0,0 +1,90 @@
|
||||
--
|
||||
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
|
||||
{-# LANGUAGE StrictData #-}
|
||||
|
||||
module Network.Minio.Credentials.Types where
|
||||
|
||||
import qualified Data.ByteArray as BA
|
||||
import Lib.Prelude (UTCTime)
|
||||
import qualified Network.HTTP.Client as NC
|
||||
|
||||
-- | Access Key type.
|
||||
newtype AccessKey = AccessKey {unAccessKey :: Text}
|
||||
deriving stock (Show)
|
||||
deriving newtype (Eq, IsString, Semigroup, Monoid)
|
||||
|
||||
-- | Secret Key type - has a show instance that does not print the value.
|
||||
newtype SecretKey = SecretKey {unSecretKey :: BA.ScrubbedBytes}
|
||||
deriving stock (Show)
|
||||
deriving newtype (Eq, IsString, Semigroup, Monoid)
|
||||
|
||||
-- | Session Token type - has a show instance that does not print the value.
|
||||
newtype SessionToken = SessionToken {unSessionToken :: BA.ScrubbedBytes}
|
||||
deriving stock (Show)
|
||||
deriving newtype (Eq, IsString, Semigroup, Monoid)
|
||||
|
||||
-- | Object storage credential data type. It has support for the optional
|
||||
-- [SessionToken](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
|
||||
-- for using temporary credentials requested via STS.
|
||||
--
|
||||
-- The show instance for this type does not print the value of secrets for
|
||||
-- security.
|
||||
--
|
||||
-- @since 1.7.0
|
||||
data CredentialValue = CredentialValue
|
||||
{ cvAccessKey :: AccessKey,
|
||||
cvSecretKey :: SecretKey,
|
||||
cvSessionToken :: Maybe SessionToken
|
||||
}
|
||||
deriving stock (Eq, Show)
|
||||
|
||||
scrubbedToText :: BA.ScrubbedBytes -> Text
|
||||
scrubbedToText =
|
||||
let b2t :: ByteString -> Text
|
||||
b2t = decodeUtf8
|
||||
s2b :: BA.ScrubbedBytes -> ByteString
|
||||
s2b = BA.convert
|
||||
in b2t . s2b
|
||||
|
||||
-- | Convert a 'CredentialValue' to a text tuple. Use this to output the
|
||||
-- credential to files or other programs.
|
||||
credentialValueText :: CredentialValue -> (Text, Text, Maybe Text)
|
||||
credentialValueText cv =
|
||||
( coerce $ cvAccessKey cv,
|
||||
(scrubbedToText . coerce) $ cvSecretKey cv,
|
||||
scrubbedToText . coerce <$> cvSessionToken cv
|
||||
)
|
||||
|
||||
-- | Endpoint represented by host, port and TLS enabled flag.
|
||||
type Endpoint = (ByteString, Int, Bool)
|
||||
|
||||
-- | Typeclass for STS credential providers.
|
||||
--
|
||||
-- @since 1.7.0
|
||||
class STSCredentialProvider p where
|
||||
retrieveSTSCredentials ::
|
||||
p ->
|
||||
-- | STS Endpoint (host, port, isSecure)
|
||||
Endpoint ->
|
||||
NC.Manager ->
|
||||
IO (CredentialValue, ExpiryTime)
|
||||
getSTSEndpoint :: p -> Maybe Text
|
||||
|
||||
-- | 'ExpiryTime' represents a time at which a credential expires.
|
||||
newtype ExpiryTime = ExpiryTime {unExpiryTime :: UTCTime}
|
||||
deriving stock (Show)
|
||||
deriving newtype (Eq)
|
||||
@ -1,5 +1,5 @@
|
||||
--
|
||||
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
|
||||
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
@ -16,26 +16,32 @@
|
||||
{-# LANGUAGE CPP #-}
|
||||
{-# LANGUAGE GADTs #-}
|
||||
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
|
||||
{-# LANGUAGE StrictData #-}
|
||||
{-# LANGUAGE TypeFamilies #-}
|
||||
|
||||
module Network.Minio.Data where
|
||||
|
||||
import qualified Conduit as C
|
||||
import qualified Control.Concurrent.MVar as M
|
||||
import Control.Monad.Trans.Except (throwE)
|
||||
import Control.Monad.Trans.Resource
|
||||
( MonadResource,
|
||||
MonadThrow (..),
|
||||
MonadUnliftIO,
|
||||
ResourceT,
|
||||
runResourceT,
|
||||
)
|
||||
import qualified Data.Aeson as A
|
||||
import qualified Data.ByteArray as BA
|
||||
import qualified Data.ByteString as B
|
||||
import qualified Data.ByteString.Lazy as LB
|
||||
import Data.CaseInsensitive (mk)
|
||||
import qualified Data.HashMap.Strict as H
|
||||
import qualified Data.Ini as Ini
|
||||
import Data.String (IsString (..))
|
||||
import qualified Data.List as List
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Text.Encoding as TE
|
||||
import Data.Time (defaultTimeLocale, formatTime)
|
||||
import GHC.Show (Show (show))
|
||||
import Lib.Prelude
|
||||
import Lib.Prelude (UTCTime, throwIO)
|
||||
import qualified Network.Connection as Conn
|
||||
import Network.HTTP.Client (defaultManagerSettings)
|
||||
import qualified Network.HTTP.Client.TLS as TLS
|
||||
@ -48,13 +54,22 @@ import Network.HTTP.Types
|
||||
hRange,
|
||||
)
|
||||
import qualified Network.HTTP.Types as HT
|
||||
import Network.Minio.Credentials
|
||||
import Network.Minio.Data.Crypto
|
||||
( encodeToBase64,
|
||||
hashMD5ToBase64,
|
||||
)
|
||||
import Network.Minio.Data.Time (UrlExpiry)
|
||||
import Network.Minio.Errors
|
||||
( MErrV (MErrVInvalidEncryptionKeyLength, MErrVMissingCredentials),
|
||||
MinioErr (..),
|
||||
)
|
||||
import Network.Minio.Utils
|
||||
import System.Directory (doesFileExist, getHomeDirectory)
|
||||
import qualified System.Environment as Env
|
||||
import System.FilePath.Posix (combine)
|
||||
import Text.XML
|
||||
import qualified UnliftIO as U
|
||||
import qualified UnliftIO.MVar as UM
|
||||
|
||||
-- | max obj size is 5TiB
|
||||
maxObjectSize :: Int64
|
||||
@ -79,20 +94,36 @@ maxMultipartParts = 10000
|
||||
awsRegionMap :: H.HashMap Text Text
|
||||
awsRegionMap =
|
||||
H.fromList
|
||||
[ ("us-east-1", "s3.amazonaws.com"),
|
||||
("us-east-2", "s3-us-east-2.amazonaws.com"),
|
||||
("us-west-1", "s3-us-west-1.amazonaws.com"),
|
||||
("us-west-2", "s3-us-west-2.amazonaws.com"),
|
||||
("ca-central-1", "s3-ca-central-1.amazonaws.com"),
|
||||
("ap-south-1", "s3-ap-south-1.amazonaws.com"),
|
||||
("ap-northeast-1", "s3-ap-northeast-1.amazonaws.com"),
|
||||
("ap-northeast-2", "s3-ap-northeast-2.amazonaws.com"),
|
||||
("ap-southeast-1", "s3-ap-southeast-1.amazonaws.com"),
|
||||
("ap-southeast-2", "s3-ap-southeast-2.amazonaws.com"),
|
||||
("eu-west-1", "s3-eu-west-1.amazonaws.com"),
|
||||
("eu-west-2", "s3-eu-west-2.amazonaws.com"),
|
||||
("eu-central-1", "s3-eu-central-1.amazonaws.com"),
|
||||
("sa-east-1", "s3-sa-east-1.amazonaws.com")
|
||||
[ ("us-east-1", "s3.us-east-1.amazonaws.com"),
|
||||
("us-east-2", "s3.us-east-2.amazonaws.com"),
|
||||
("us-west-1", "s3.us-west-1.amazonaws.com"),
|
||||
("us-west-2", "s3.us-west-2.amazonaws.com"),
|
||||
("ca-central-1", "s3.ca-central-1.amazonaws.com"),
|
||||
("ap-south-1", "s3.ap-south-1.amazonaws.com"),
|
||||
("ap-south-2", "s3.ap-south-2.amazonaws.com"),
|
||||
("ap-northeast-1", "s3.ap-northeast-1.amazonaws.com"),
|
||||
("ap-northeast-2", "s3.ap-northeast-2.amazonaws.com"),
|
||||
("ap-northeast-3", "s3.ap-northeast-3.amazonaws.com"),
|
||||
("ap-southeast-1", "s3.ap-southeast-1.amazonaws.com"),
|
||||
("ap-southeast-2", "s3.ap-southeast-2.amazonaws.com"),
|
||||
("ap-southeast-3", "s3.ap-southeast-3.amazonaws.com"),
|
||||
("eu-west-1", "s3.eu-west-1.amazonaws.com"),
|
||||
("eu-west-2", "s3.eu-west-2.amazonaws.com"),
|
||||
("eu-west-3", "s3.eu-west-3.amazonaws.com"),
|
||||
("eu-central-1", "s3.eu-central-1.amazonaws.com"),
|
||||
("eu-central-2", "s3.eu-central-2.amazonaws.com"),
|
||||
("eu-south-1", "s3.eu-south-1.amazonaws.com"),
|
||||
("eu-south-2", "s3.eu-south-2.amazonaws.com"),
|
||||
("af-south-1", "s3.af-south-1.amazonaws.com"),
|
||||
("ap-east-1", "s3.ap-east-1.amazonaws.com"),
|
||||
("cn-north-1", "s3.cn-north-1.amazonaws.com.cn"),
|
||||
("cn-northwest-1", "s3.cn-northwest-1.amazonaws.com.cn"),
|
||||
("eu-north-1", "s3.eu-north-1.amazonaws.com"),
|
||||
("me-south-1", "s3.me-south-1.amazonaws.com"),
|
||||
("me-central-1", "s3.me-central-1.amazonaws.com"),
|
||||
("us-gov-east-1", "s3.us-gov-east-1.amazonaws.com"),
|
||||
("us-gov-west-1", "s3.us-gov-west-1.amazonaws.com"),
|
||||
("sa-east-1", "s3.sa-east-1.amazonaws.com")
|
||||
]
|
||||
|
||||
-- | Connection Info data type. To create a 'ConnectInfo' value,
|
||||
@ -103,14 +134,15 @@ awsRegionMap =
|
||||
data ConnectInfo = ConnectInfo
|
||||
{ connectHost :: Text,
|
||||
connectPort :: Int,
|
||||
connectAccessKey :: Text,
|
||||
connectSecretKey :: Text,
|
||||
connectCreds :: Creds,
|
||||
connectIsSecure :: Bool,
|
||||
connectRegion :: Region,
|
||||
connectAutoDiscoverRegion :: Bool,
|
||||
connectDisableTLSCertValidation :: Bool
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
|
||||
getEndpoint :: ConnectInfo -> Endpoint
|
||||
getEndpoint ci = (encodeUtf8 $ connectHost ci, connectPort ci, connectIsSecure ci)
|
||||
|
||||
instance IsString ConnectInfo where
|
||||
fromString str =
|
||||
@ -118,86 +150,89 @@ instance IsString ConnectInfo where
|
||||
in ConnectInfo
|
||||
{ connectHost = TE.decodeUtf8 $ NC.host req,
|
||||
connectPort = NC.port req,
|
||||
connectAccessKey = "",
|
||||
connectSecretKey = "",
|
||||
connectCreds = CredsStatic $ CredentialValue mempty mempty mempty,
|
||||
connectIsSecure = NC.secure req,
|
||||
connectRegion = "",
|
||||
connectAutoDiscoverRegion = True,
|
||||
connectDisableTLSCertValidation = False
|
||||
}
|
||||
|
||||
-- | Contains access key and secret key to access object storage.
|
||||
data Credentials = Credentials
|
||||
{ cAccessKey :: Text,
|
||||
cSecretKey :: Text
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
-- | A 'CredentialLoader' is an action that may return a 'CredentialValue'.
|
||||
-- Loaders may be chained together using 'findFirst'.
|
||||
--
|
||||
-- @since 1.7.0
|
||||
type CredentialLoader = IO (Maybe CredentialValue)
|
||||
|
||||
-- | A Provider is an action that may return Credentials. Providers
|
||||
-- may be chained together using 'findFirst'.
|
||||
type Provider = IO (Maybe Credentials)
|
||||
|
||||
-- | Combines the given list of providers, by calling each one in
|
||||
-- order until Credentials are found.
|
||||
findFirst :: [Provider] -> Provider
|
||||
-- | Combines the given list of loaders, by calling each one in
|
||||
-- order until a 'CredentialValue' is returned.
|
||||
findFirst :: [CredentialLoader] -> IO (Maybe CredentialValue)
|
||||
findFirst [] = return Nothing
|
||||
findFirst (f : fs) = do
|
||||
c <- f
|
||||
maybe (findFirst fs) (return . Just) c
|
||||
|
||||
-- | This Provider loads `Credentials` from @~\/.aws\/credentials@
|
||||
fromAWSConfigFile :: Provider
|
||||
-- | This action returns a 'CredentialValue' populated from
|
||||
-- @~\/.aws\/credentials@
|
||||
fromAWSConfigFile :: CredentialLoader
|
||||
fromAWSConfigFile = do
|
||||
credsE <- runExceptT $ do
|
||||
homeDir <- lift $ getHomeDirectory
|
||||
homeDir <- lift getHomeDirectory
|
||||
let awsCredsFile = homeDir `combine` ".aws" `combine` "credentials"
|
||||
fileExists <- lift $ doesFileExist awsCredsFile
|
||||
bool (throwE "FileNotFound") (return ()) fileExists
|
||||
ini <- ExceptT $ Ini.readIniFile awsCredsFile
|
||||
akey <-
|
||||
ExceptT $ return $
|
||||
Ini.lookupValue "default" "aws_access_key_id" ini
|
||||
ExceptT $
|
||||
return $
|
||||
Ini.lookupValue "default" "aws_access_key_id" ini
|
||||
skey <-
|
||||
ExceptT $ return $
|
||||
Ini.lookupValue "default" "aws_secret_access_key" ini
|
||||
return $ Credentials akey skey
|
||||
return $ hush credsE
|
||||
ExceptT $
|
||||
return $
|
||||
Ini.lookupValue "default" "aws_secret_access_key" ini
|
||||
return $ CredentialValue (coerce akey) (fromString $ T.unpack skey) Nothing
|
||||
return $ either (const Nothing) Just credsE
|
||||
|
||||
-- | This Provider loads `Credentials` from @AWS_ACCESS_KEY_ID@ and
|
||||
-- @AWS_SECRET_ACCESS_KEY@ environment variables.
|
||||
fromAWSEnv :: Provider
|
||||
-- | This action returns a 'CredentialValue` populated from @AWS_ACCESS_KEY_ID@
|
||||
-- and @AWS_SECRET_ACCESS_KEY@ environment variables.
|
||||
fromAWSEnv :: CredentialLoader
|
||||
fromAWSEnv = runMaybeT $ do
|
||||
akey <- MaybeT $ Env.lookupEnv "AWS_ACCESS_KEY_ID"
|
||||
skey <- MaybeT $ Env.lookupEnv "AWS_SECRET_ACCESS_KEY"
|
||||
return $ Credentials (T.pack akey) (T.pack skey)
|
||||
return $ CredentialValue (fromString akey) (fromString skey) Nothing
|
||||
|
||||
-- | This Provider loads `Credentials` from @MINIO_ACCESS_KEY@ and
|
||||
-- @MINIO_SECRET_KEY@ environment variables.
|
||||
fromMinioEnv :: Provider
|
||||
-- | This action returns a 'CredentialValue' populated from @MINIO_ACCESS_KEY@
|
||||
-- and @MINIO_SECRET_KEY@ environment variables.
|
||||
fromMinioEnv :: CredentialLoader
|
||||
fromMinioEnv = runMaybeT $ do
|
||||
akey <- MaybeT $ Env.lookupEnv "MINIO_ACCESS_KEY"
|
||||
skey <- MaybeT $ Env.lookupEnv "MINIO_SECRET_KEY"
|
||||
return $ Credentials (T.pack akey) (T.pack skey)
|
||||
return $ CredentialValue (fromString akey) (fromString skey) Nothing
|
||||
|
||||
-- | setCredsFrom retrieves access credentials from the first
|
||||
-- `Provider` form the given list that succeeds and sets it in the
|
||||
-- `ConnectInfo`.
|
||||
setCredsFrom :: [Provider] -> ConnectInfo -> IO ConnectInfo
|
||||
-- | setCredsFrom retrieves access credentials from the first action in the
|
||||
-- given list that succeeds and sets it in the 'ConnectInfo'.
|
||||
setCredsFrom :: [CredentialLoader] -> ConnectInfo -> IO ConnectInfo
|
||||
setCredsFrom ps ci = do
|
||||
pMay <- findFirst ps
|
||||
maybe
|
||||
(throwIO MErrVMissingCredentials)
|
||||
(return . (flip setCreds ci))
|
||||
(return . (`setCreds` ci))
|
||||
pMay
|
||||
|
||||
-- | setCreds sets the given `Credentials` in the `ConnectInfo`.
|
||||
setCreds :: Credentials -> ConnectInfo -> ConnectInfo
|
||||
setCreds (Credentials accessKey secretKey) connInfo =
|
||||
-- | setCreds sets the given `CredentialValue` in the `ConnectInfo`.
|
||||
setCreds :: CredentialValue -> ConnectInfo -> ConnectInfo
|
||||
setCreds cv connInfo =
|
||||
connInfo
|
||||
{ connectAccessKey = accessKey,
|
||||
connectSecretKey = secretKey
|
||||
{ connectCreds = CredsStatic cv
|
||||
}
|
||||
|
||||
-- | 'setSTSCredential' configures `ConnectInfo` to retrieve temporary
|
||||
-- credentials via the STS API on demand. It is automatically refreshed on
|
||||
-- expiry.
|
||||
setSTSCredential :: (STSCredentialProvider p) => p -> ConnectInfo -> IO ConnectInfo
|
||||
setSTSCredential p ci = do
|
||||
store <- initSTSCredential p
|
||||
return ci {connectCreds = CredsSTS store}
|
||||
|
||||
-- | Set the S3 region parameter in the `ConnectInfo`
|
||||
setRegion :: Region -> ConnectInfo -> ConnectInfo
|
||||
setRegion r connInfo =
|
||||
@ -219,15 +254,7 @@ disableTLSCertValidation :: ConnectInfo -> ConnectInfo
|
||||
disableTLSCertValidation c = c {connectDisableTLSCertValidation = True}
|
||||
|
||||
getHostAddr :: ConnectInfo -> ByteString
|
||||
getHostAddr ci =
|
||||
if
|
||||
| port == 80 || port == 443 -> TE.encodeUtf8 host
|
||||
| otherwise ->
|
||||
TE.encodeUtf8 $
|
||||
T.concat [host, ":", Lib.Prelude.show port]
|
||||
where
|
||||
port = connectPort ci
|
||||
host = connectHost ci
|
||||
getHostAddr ci = getHostHeader (encodeUtf8 $ connectHost ci, connectPort ci)
|
||||
|
||||
-- | Default Google Compute Storage ConnectInfo. Works only for
|
||||
-- "Simple Migration" use-case with interoperability mode enabled on
|
||||
@ -250,7 +277,7 @@ awsCI = "https://s3.amazonaws.com"
|
||||
-- ConnectInfo. Credentials are already filled in.
|
||||
minioPlayCI :: ConnectInfo
|
||||
minioPlayCI =
|
||||
let playCreds = Credentials "Q3AM3UQ867SPQQA43P2F" "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
|
||||
let playCreds = CredentialValue "Q3AM3UQ867SPQQA43P2F" "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" Nothing
|
||||
in setCreds playCreds $
|
||||
setRegion
|
||||
"us-east-1"
|
||||
@ -273,16 +300,16 @@ type ETag = Text
|
||||
-- | Data type to represent an object encryption key. Create one using
|
||||
-- the `mkSSECKey` function.
|
||||
newtype SSECKey = SSECKey BA.ScrubbedBytes
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Eq, Show)
|
||||
|
||||
-- | Validates that the given ByteString is 32 bytes long and creates
|
||||
-- an encryption key.
|
||||
mkSSECKey :: MonadThrow m => ByteString -> m SSECKey
|
||||
mkSSECKey :: (MonadThrow m) => ByteString -> m SSECKey
|
||||
mkSSECKey keyBytes
|
||||
| B.length keyBytes /= 32 =
|
||||
throwM MErrVInvalidEncryptionKeyLength
|
||||
throwM MErrVInvalidEncryptionKeyLength
|
||||
| otherwise =
|
||||
return $ SSECKey $ BA.convert keyBytes
|
||||
return $ SSECKey $ BA.convert keyBytes
|
||||
|
||||
-- | Data type to represent Server-Side-Encryption settings
|
||||
data SSE where
|
||||
@ -294,7 +321,7 @@ data SSE where
|
||||
-- argument is the optional KMS context that must have a
|
||||
-- `A.ToJSON` instance - please refer to the AWS S3 documentation
|
||||
-- for detailed information.
|
||||
SSEKMS :: A.ToJSON a => Maybe ByteString -> Maybe a -> SSE
|
||||
SSEKMS :: (A.ToJSON a) => Maybe ByteString -> Maybe a -> SSE
|
||||
-- | Specifies server-side encryption with customer provided
|
||||
-- key. The argument is the encryption key to be used.
|
||||
SSEC :: SSECKey -> SSE
|
||||
@ -352,28 +379,10 @@ data PutObjectOptions = PutObjectOptions
|
||||
defaultPutObjectOptions :: PutObjectOptions
|
||||
defaultPutObjectOptions = PutObjectOptions Nothing Nothing Nothing Nothing Nothing Nothing [] Nothing Nothing
|
||||
|
||||
-- | If the given header name has the @X-Amz-Meta-@ prefix, it is
|
||||
-- stripped and a Just is returned.
|
||||
userMetadataHeaderNameMaybe :: Text -> Maybe Text
|
||||
userMetadataHeaderNameMaybe k =
|
||||
let prefix = T.toCaseFold "X-Amz-Meta-"
|
||||
n = T.length prefix
|
||||
in if T.toCaseFold (T.take n k) == prefix
|
||||
then Just (T.drop n k)
|
||||
else Nothing
|
||||
|
||||
addXAmzMetaPrefix :: Text -> Text
|
||||
addXAmzMetaPrefix s
|
||||
| isJust (userMetadataHeaderNameMaybe s) = s
|
||||
| otherwise = "X-Amz-Meta-" <> s
|
||||
|
||||
mkHeaderFromMetadata :: [(Text, Text)] -> [HT.Header]
|
||||
mkHeaderFromMetadata = map (\(x, y) -> (mk $ encodeUtf8 $ addXAmzMetaPrefix $ x, encodeUtf8 y))
|
||||
|
||||
pooToHeaders :: PutObjectOptions -> [HT.Header]
|
||||
pooToHeaders poo =
|
||||
userMetadata
|
||||
++ (catMaybes $ map tupToMaybe (zipWith (,) names values))
|
||||
++ mapMaybe tupToMaybe (zip names values)
|
||||
++ maybe [] toPutObjectHeaders (pooSSE poo)
|
||||
where
|
||||
tupToMaybe (k, Just v) = Just (k, v)
|
||||
@ -404,11 +413,34 @@ data BucketInfo = BucketInfo
|
||||
{ biName :: Bucket,
|
||||
biCreationDate :: UTCTime
|
||||
}
|
||||
deriving (Show, Eq)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | A type alias to represent a part-number for multipart upload
|
||||
type PartNumber = Int16
|
||||
|
||||
-- | Select part sizes - the logic is that the minimum part-size will
|
||||
-- be 64MiB.
|
||||
selectPartSizes :: Int64 -> [(PartNumber, Int64, Int64)]
|
||||
selectPartSizes size =
|
||||
uncurry (List.zip3 [1 ..]) $
|
||||
List.unzip $
|
||||
loop 0 size
|
||||
where
|
||||
ceil :: Double -> Int64
|
||||
ceil = ceiling
|
||||
partSize =
|
||||
max
|
||||
minPartSize
|
||||
( ceil $
|
||||
fromIntegral size
|
||||
/ fromIntegral maxMultipartParts
|
||||
)
|
||||
m = partSize
|
||||
loop st sz
|
||||
| st > sz = []
|
||||
| st + m >= sz = [(st, sz - st)]
|
||||
| otherwise = (st, m) : loop (st + m) sz
|
||||
|
||||
-- | A type alias to represent an upload-id for multipart upload
|
||||
type UploadId = Text
|
||||
|
||||
@ -422,7 +454,7 @@ data ListPartsResult = ListPartsResult
|
||||
lprNextPart :: Maybe Int,
|
||||
lprParts :: [ObjectPartInfo]
|
||||
}
|
||||
deriving (Show, Eq)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | Represents information about an object part in an ongoing
|
||||
-- multipart upload.
|
||||
@ -432,7 +464,7 @@ data ObjectPartInfo = ObjectPartInfo
|
||||
opiSize :: Int64,
|
||||
opiModTime :: UTCTime
|
||||
}
|
||||
deriving (Show, Eq)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | Represents result from a listing of incomplete uploads to a
|
||||
-- bucket.
|
||||
@ -443,7 +475,7 @@ data ListUploadsResult = ListUploadsResult
|
||||
lurUploads :: [(Object, UploadId, UTCTime)],
|
||||
lurCPrefixes :: [Text]
|
||||
}
|
||||
deriving (Show, Eq)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | Represents information about a multipart upload.
|
||||
data UploadInfo = UploadInfo
|
||||
@ -452,7 +484,7 @@ data UploadInfo = UploadInfo
|
||||
uiInitTime :: UTCTime,
|
||||
uiSize :: Int64
|
||||
}
|
||||
deriving (Show, Eq)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | Represents result from a listing of objects in a bucket.
|
||||
data ListObjectsResult = ListObjectsResult
|
||||
@ -461,7 +493,7 @@ data ListObjectsResult = ListObjectsResult
|
||||
lorObjects :: [ObjectInfo],
|
||||
lorCPrefixes :: [Text]
|
||||
}
|
||||
deriving (Show, Eq)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | Represents result from a listing of objects version 1 in a bucket.
|
||||
data ListObjectsV1Result = ListObjectsV1Result
|
||||
@ -470,7 +502,7 @@ data ListObjectsV1Result = ListObjectsV1Result
|
||||
lorObjects' :: [ObjectInfo],
|
||||
lorCPrefixes' :: [Text]
|
||||
}
|
||||
deriving (Show, Eq)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | Represents information about an object.
|
||||
data ObjectInfo = ObjectInfo
|
||||
@ -494,7 +526,7 @@ data ObjectInfo = ObjectInfo
|
||||
-- user-metadata pairs)
|
||||
oiMetadata :: H.HashMap Text Text
|
||||
}
|
||||
deriving (Show, Eq)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | Represents source object in server-side copy object
|
||||
data SourceInfo = SourceInfo
|
||||
@ -526,7 +558,7 @@ data SourceInfo = SourceInfo
|
||||
-- given time.
|
||||
srcIfUnmodifiedSince :: Maybe UTCTime
|
||||
}
|
||||
deriving (Show, Eq)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | Provide a default for `SourceInfo`
|
||||
defaultSourceInfo :: SourceInfo
|
||||
@ -539,7 +571,7 @@ data DestinationInfo = DestinationInfo
|
||||
-- | Destination object key
|
||||
dstObject :: Text
|
||||
}
|
||||
deriving (Show, Eq)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | Provide a default for `DestinationInfo`
|
||||
defaultDestinationInfo :: DestinationInfo
|
||||
@ -573,7 +605,8 @@ defaultGetObjectOptions =
|
||||
|
||||
gooToHeaders :: GetObjectOptions -> [HT.Header]
|
||||
gooToHeaders goo =
|
||||
rangeHdr ++ zip names values
|
||||
rangeHdr
|
||||
++ zip names values
|
||||
++ maybe [] (toPutObjectHeaders . SSEC) (gooSSECKey goo)
|
||||
where
|
||||
names =
|
||||
@ -616,18 +649,18 @@ data Event
|
||||
| ObjectRemovedDelete
|
||||
| ObjectRemovedDeleteMarkerCreated
|
||||
| ReducedRedundancyLostObject
|
||||
deriving (Eq)
|
||||
deriving stock (Eq, Show)
|
||||
|
||||
instance Show Event where
|
||||
show ObjectCreated = "s3:ObjectCreated:*"
|
||||
show ObjectCreatedPut = "s3:ObjectCreated:Put"
|
||||
show ObjectCreatedPost = "s3:ObjectCreated:Post"
|
||||
show ObjectCreatedCopy = "s3:ObjectCreated:Copy"
|
||||
show ObjectCreatedMultipartUpload = "s3:ObjectCreated:MultipartUpload"
|
||||
show ObjectRemoved = "s3:ObjectRemoved:*"
|
||||
show ObjectRemovedDelete = "s3:ObjectRemoved:Delete"
|
||||
show ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated"
|
||||
show ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject"
|
||||
instance ToText Event where
|
||||
toText ObjectCreated = "s3:ObjectCreated:*"
|
||||
toText ObjectCreatedPut = "s3:ObjectCreated:Put"
|
||||
toText ObjectCreatedPost = "s3:ObjectCreated:Post"
|
||||
toText ObjectCreatedCopy = "s3:ObjectCreated:Copy"
|
||||
toText ObjectCreatedMultipartUpload = "s3:ObjectCreated:MultipartUpload"
|
||||
toText ObjectRemoved = "s3:ObjectRemoved:*"
|
||||
toText ObjectRemovedDelete = "s3:ObjectRemoved:Delete"
|
||||
toText ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated"
|
||||
toText ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject"
|
||||
|
||||
textToEvent :: Text -> Maybe Event
|
||||
textToEvent t = case t of
|
||||
@ -643,10 +676,10 @@ textToEvent t = case t of
|
||||
_ -> Nothing
|
||||
|
||||
-- | Filter data type - part of notification configuration
|
||||
data Filter = Filter
|
||||
newtype Filter = Filter
|
||||
{ fFilter :: FilterKey
|
||||
}
|
||||
deriving (Show, Eq)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | defaultFilter is empty, used to create a notification
|
||||
-- configuration.
|
||||
@ -654,10 +687,10 @@ defaultFilter :: Filter
|
||||
defaultFilter = Filter defaultFilterKey
|
||||
|
||||
-- | FilterKey contains FilterRules, and is part of a Filter.
|
||||
data FilterKey = FilterKey
|
||||
newtype FilterKey = FilterKey
|
||||
{ fkKey :: FilterRules
|
||||
}
|
||||
deriving (Show, Eq)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | defaultFilterKey is empty, used to create notification
|
||||
-- configuration.
|
||||
@ -665,10 +698,10 @@ defaultFilterKey :: FilterKey
|
||||
defaultFilterKey = FilterKey defaultFilterRules
|
||||
|
||||
-- | FilterRules represents a collection of `FilterRule`s.
|
||||
data FilterRules = FilterRules
|
||||
newtype FilterRules = FilterRules
|
||||
{ frFilterRules :: [FilterRule]
|
||||
}
|
||||
deriving (Show, Eq)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | defaultFilterRules is empty, used to create notification
|
||||
-- configuration.
|
||||
@ -688,7 +721,7 @@ data FilterRule = FilterRule
|
||||
{ frName :: Text,
|
||||
frValue :: Text
|
||||
}
|
||||
deriving (Show, Eq)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | Arn is an alias of Text
|
||||
type Arn = Text
|
||||
@ -702,7 +735,7 @@ data NotificationConfig = NotificationConfig
|
||||
ncEvents :: [Event],
|
||||
ncFilter :: Filter
|
||||
}
|
||||
deriving (Show, Eq)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | A data-type to represent bucket notification configuration. It is
|
||||
-- a collection of queue, topic or lambda function configurations. The
|
||||
@ -714,7 +747,7 @@ data Notification = Notification
|
||||
nTopicConfigurations :: [NotificationConfig],
|
||||
nCloudFunctionConfigurations :: [NotificationConfig]
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | The default notification configuration is empty.
|
||||
defaultNotification :: Notification
|
||||
@ -733,10 +766,10 @@ data SelectRequest = SelectRequest
|
||||
srOutputSerialization :: OutputSerialization,
|
||||
srRequestProgressEnabled :: Maybe Bool
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
data ExpressionType = SQL
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | InputSerialization represents format information of the input
|
||||
-- object being queried. Use one of the smart constructors such as
|
||||
@ -746,7 +779,7 @@ data InputSerialization = InputSerialization
|
||||
{ isCompressionType :: Maybe CompressionType,
|
||||
isFormatInfo :: InputFormatInfo
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | Data type representing the compression setting in a Select
|
||||
-- request.
|
||||
@ -754,7 +787,7 @@ data CompressionType
|
||||
= CompressionTypeNone
|
||||
| CompressionTypeGzip
|
||||
| CompressionTypeBzip2
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | Data type representing input object format information in a
|
||||
-- Select request.
|
||||
@ -762,7 +795,7 @@ data InputFormatInfo
|
||||
= InputFormatCSV CSVInputProp
|
||||
| InputFormatJSON JSONInputProp
|
||||
| InputFormatParquet
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | defaultCsvInput returns InputSerialization with default CSV
|
||||
-- format, and without any compression setting.
|
||||
@ -841,20 +874,17 @@ type CSVInputProp = CSVProp
|
||||
|
||||
-- | CSVProp represents CSV format properties. It is built up using
|
||||
-- the Monoid instance.
|
||||
data CSVProp = CSVProp (H.HashMap Text Text)
|
||||
deriving (Eq, Show)
|
||||
newtype CSVProp = CSVProp (H.HashMap Text Text)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
#if (__GLASGOW_HASKELL__ >= 804)
|
||||
instance Semigroup CSVProp where
|
||||
(CSVProp a) <> (CSVProp b) = CSVProp (b <> a)
|
||||
#endif
|
||||
(CSVProp a) <> (CSVProp b) = CSVProp (b <> a)
|
||||
|
||||
instance Monoid CSVProp where
|
||||
mempty = CSVProp mempty
|
||||
|
||||
#if (__GLASGOW_HASKELL__ < 804)
|
||||
mappend (CSVProp a) (CSVProp b) = CSVProp (b <> a)
|
||||
#endif
|
||||
csvPropsList :: CSVProp -> [(Text, Text)]
|
||||
csvPropsList (CSVProp h) = sort $ H.toList h
|
||||
|
||||
defaultCSVProp :: CSVProp
|
||||
defaultCSVProp = mempty
|
||||
@ -884,15 +914,15 @@ data FileHeaderInfo
|
||||
FileHeaderUse
|
||||
| -- | Header are present, but should be ignored
|
||||
FileHeaderIgnore
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | Specify the CSV file header info property.
|
||||
fileHeaderInfo :: FileHeaderInfo -> CSVProp
|
||||
fileHeaderInfo = CSVProp . H.singleton "FileHeaderInfo" . toString
|
||||
fileHeaderInfo = CSVProp . H.singleton "FileHeaderInfo" . toStr
|
||||
where
|
||||
toString FileHeaderNone = "NONE"
|
||||
toString FileHeaderUse = "USE"
|
||||
toString FileHeaderIgnore = "IGNORE"
|
||||
toStr FileHeaderNone = "NONE"
|
||||
toStr FileHeaderUse = "USE"
|
||||
toStr FileHeaderIgnore = "IGNORE"
|
||||
|
||||
-- | Specify the CSV comment character property. Lines starting with
|
||||
-- this character are ignored by the server.
|
||||
@ -909,13 +939,13 @@ setInputCSVProps p is = is {isFormatInfo = InputFormatCSV p}
|
||||
|
||||
-- | Set the CSV format properties in the OutputSerialization.
|
||||
outputCSVFromProps :: CSVProp -> OutputSerialization
|
||||
outputCSVFromProps p = OutputSerializationCSV p
|
||||
outputCSVFromProps = OutputSerializationCSV
|
||||
|
||||
data JSONInputProp = JSONInputProp {jsonipType :: JSONType}
|
||||
deriving (Eq, Show)
|
||||
newtype JSONInputProp = JSONInputProp {jsonipType :: JSONType}
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
data JSONType = JSONTypeDocument | JSONTypeLines
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | OutputSerialization represents output serialization settings for
|
||||
-- the SelectRequest. Use `defaultCsvOutput` or `defaultJsonOutput` as
|
||||
@ -923,23 +953,24 @@ data JSONType = JSONTypeDocument | JSONTypeLines
|
||||
data OutputSerialization
|
||||
= OutputSerializationJSON JSONOutputProp
|
||||
| OutputSerializationCSV CSVOutputProp
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
type CSVOutputProp = CSVProp
|
||||
|
||||
-- | quoteFields is an output serialization parameter
|
||||
quoteFields :: QuoteFields -> CSVProp
|
||||
quoteFields q = CSVProp $ H.singleton "QuoteFields" $
|
||||
case q of
|
||||
QuoteFieldsAsNeeded -> "ASNEEDED"
|
||||
QuoteFieldsAlways -> "ALWAYS"
|
||||
quoteFields q = CSVProp $
|
||||
H.singleton "QuoteFields" $
|
||||
case q of
|
||||
QuoteFieldsAsNeeded -> "ASNEEDED"
|
||||
QuoteFieldsAlways -> "ALWAYS"
|
||||
|
||||
-- | Represent the QuoteField setting.
|
||||
data QuoteFields = QuoteFieldsAsNeeded | QuoteFieldsAlways
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
data JSONOutputProp = JSONOutputProp {jsonopRecordDelimiter :: Maybe Text}
|
||||
deriving (Eq, Show)
|
||||
newtype JSONOutputProp = JSONOutputProp {jsonopRecordDelimiter :: Maybe Text}
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | Set the output record delimiter for JSON format
|
||||
outputJSONFromRecordDelimiter :: Text -> OutputSerialization
|
||||
@ -950,14 +981,15 @@ outputJSONFromRecordDelimiter t =
|
||||
|
||||
-- | An EventMessage represents each kind of message received from the server.
|
||||
data EventMessage
|
||||
= ProgressEventMessage {emProgress :: Progress}
|
||||
| StatsEventMessage {emStats :: Stats}
|
||||
= ProgressEventMessage Progress
|
||||
| StatsEventMessage Stats
|
||||
| RequestLevelErrorMessage
|
||||
{ emErrorCode :: Text,
|
||||
emErrorMessage :: Text
|
||||
}
|
||||
| RecordPayloadEventMessage {emPayloadBytes :: ByteString}
|
||||
deriving (Eq, Show)
|
||||
Text
|
||||
-- ^ Error code
|
||||
Text
|
||||
-- ^ Error message
|
||||
| RecordPayloadEventMessage ByteString
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
data MsgHeaderName
|
||||
= MessageType
|
||||
@ -965,7 +997,7 @@ data MsgHeaderName
|
||||
| ContentType
|
||||
| ErrorCode
|
||||
| ErrorMessage
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
msgHeaderValueType :: Word8
|
||||
msgHeaderValueType = 7
|
||||
@ -978,7 +1010,7 @@ data Progress = Progress
|
||||
pBytesProcessed :: Int64,
|
||||
pBytesReturned :: Int64
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | Represent the stats event returned at the end of the Select
|
||||
-- response.
|
||||
@ -1016,7 +1048,8 @@ data S3ReqInfo = S3ReqInfo
|
||||
riPayload :: Payload,
|
||||
riPayloadHash :: Maybe ByteString,
|
||||
riRegion :: Maybe Region,
|
||||
riNeedsLocation :: Bool
|
||||
riNeedsLocation :: Bool,
|
||||
riPresignExpirySecs :: Maybe UrlExpiry
|
||||
}
|
||||
|
||||
defaultS3ReqInfo :: S3ReqInfo
|
||||
@ -1031,16 +1064,13 @@ defaultS3ReqInfo =
|
||||
Nothing
|
||||
Nothing
|
||||
True
|
||||
Nothing
|
||||
|
||||
getS3Path :: Maybe Bucket -> Maybe Object -> ByteString
|
||||
getS3Path b o =
|
||||
let segments = map TE.encodeUtf8 $ catMaybes $ b : bool [] [o] (isJust b)
|
||||
in B.concat ["/", B.intercalate "/" segments]
|
||||
|
||||
-- | Time to expire for a presigned URL. It interpreted as a number of
|
||||
-- seconds. The maximum duration that can be specified is 7 days.
|
||||
type UrlExpiry = Int
|
||||
|
||||
type RegionMap = H.HashMap Bucket Region
|
||||
|
||||
-- | The Minio Monad - all computations accessing object storage
|
||||
@ -1048,7 +1078,7 @@ type RegionMap = H.HashMap Bucket Region
|
||||
newtype Minio a = Minio
|
||||
{ unMinio :: ReaderT MinioConn (ResourceT IO) a
|
||||
}
|
||||
deriving
|
||||
deriving newtype
|
||||
( Functor,
|
||||
Applicative,
|
||||
Monad,
|
||||
@ -1074,11 +1104,10 @@ class HasSvcNamespace env where
|
||||
instance HasSvcNamespace MinioConn where
|
||||
getSvcNamespace env =
|
||||
let host = connectHost $ mcConnInfo env
|
||||
in if
|
||||
| host == "storage.googleapis.com" ->
|
||||
"http://doc.s3.amazonaws.com/2006-03-01"
|
||||
| otherwise ->
|
||||
"http://s3.amazonaws.com/doc/2006-03-01/"
|
||||
in ( if host == "storage.googleapis.com"
|
||||
then "http://doc.s3.amazonaws.com/2006-03-01"
|
||||
else "http://s3.amazonaws.com/doc/2006-03-01/"
|
||||
)
|
||||
|
||||
-- | Takes connection information and returns a connection object to
|
||||
-- be passed to 'runMinio'. The returned value can be kept in the
|
||||
@ -1088,8 +1117,8 @@ connect :: ConnectInfo -> IO MinioConn
|
||||
connect ci = do
|
||||
let settings
|
||||
| connectIsSecure ci && connectDisableTLSCertValidation ci =
|
||||
let badTlsSettings = Conn.TLSSettingsSimple True False False
|
||||
in TLS.mkManagerSettings badTlsSettings Nothing
|
||||
let badTlsSettings = Conn.TLSSettingsSimple True False False
|
||||
in TLS.mkManagerSettings badTlsSettings Nothing
|
||||
| connectIsSecure ci = NC.tlsManagerSettings
|
||||
| otherwise = defaultManagerSettings
|
||||
mgr <- NC.newManager settings
|
||||
@ -1138,9 +1167,22 @@ runMinioRes ci m = do
|
||||
conn <- liftIO $ connect ci
|
||||
runMinioResWith conn m
|
||||
|
||||
s3Name :: Text -> Text -> Name
|
||||
s3Name ns s = Name s (Just ns) Nothing
|
||||
|
||||
-- | Format as per RFC 1123.
|
||||
formatRFC1123 :: UTCTime -> T.Text
|
||||
formatRFC1123 = T.pack . formatTime defaultTimeLocale "%a, %d %b %Y %X %Z"
|
||||
|
||||
lookupRegionCache :: Bucket -> Minio (Maybe Region)
|
||||
lookupRegionCache b = do
|
||||
rMVar <- asks mcRegionMap
|
||||
rMap <- UM.readMVar rMVar
|
||||
return $ H.lookup b rMap
|
||||
|
||||
addToRegionCache :: Bucket -> Region -> Minio ()
|
||||
addToRegionCache b region = do
|
||||
rMVar <- asks mcRegionMap
|
||||
UM.modifyMVar_ rMVar $ return . H.insert b region
|
||||
|
||||
deleteFromRegionCache :: Bucket -> Minio ()
|
||||
deleteFromRegionCache b = do
|
||||
rMVar <- asks mcRegionMap
|
||||
UM.modifyMVar_ rMVar $ return . H.delete b
|
||||
|
||||
@ -25,9 +25,8 @@ import qualified Data.ByteString as B
|
||||
import qualified Data.ByteString.Builder as BB
|
||||
import qualified Data.ByteString.Char8 as BC8
|
||||
import qualified Data.ByteString.Lazy as LB
|
||||
import Data.Char (isAsciiLower, isAsciiUpper, isSpace, isDigit, toUpper)
|
||||
import Data.Char (isAsciiLower, isAsciiUpper, isDigit, isSpace, toUpper)
|
||||
import qualified Data.Text as T
|
||||
import Lib.Prelude
|
||||
import Numeric (showHex)
|
||||
|
||||
stripBS :: ByteString -> ByteString
|
||||
@ -38,8 +37,10 @@ class UriEncodable s where
|
||||
|
||||
instance UriEncodable [Char] where
|
||||
uriEncode encodeSlash payload =
|
||||
LB.toStrict $ BB.toLazyByteString $ mconcat $
|
||||
map (`uriEncodeChar` encodeSlash) payload
|
||||
LB.toStrict $
|
||||
BB.toLazyByteString $
|
||||
mconcat $
|
||||
map (`uriEncodeChar` encodeSlash) payload
|
||||
|
||||
instance UriEncodable ByteString where
|
||||
-- assumes that uriEncode is passed ASCII encoded strings.
|
||||
@ -64,11 +65,11 @@ uriEncodeChar ch _
|
||||
|| (ch == '-')
|
||||
|| (ch == '.')
|
||||
|| (ch == '~') =
|
||||
BB.char7 ch
|
||||
BB.char7 ch
|
||||
| otherwise = mconcat $ map f $ B.unpack $ encodeUtf8 $ T.singleton ch
|
||||
where
|
||||
f :: Word8 -> BB.Builder
|
||||
f n = BB.char7 '%' <> BB.string7 hexStr
|
||||
where
|
||||
hexStr = map toUpper $ showHex q $ showHex r ""
|
||||
(q, r) = divMod (fromIntegral n) (16 :: Word8)
|
||||
(q, r) = divMod n (16 :: Word8)
|
||||
|
||||
@ -39,31 +39,30 @@ import Crypto.MAC.HMAC (HMAC, hmac)
|
||||
import Data.ByteArray (ByteArrayAccess, convert)
|
||||
import Data.ByteArray.Encoding (Base (Base16, Base64), convertToBase)
|
||||
import qualified Data.Conduit as C
|
||||
import Lib.Prelude
|
||||
|
||||
hashSHA256 :: ByteString -> ByteString
|
||||
hashSHA256 = digestToBase16 . hashWith SHA256
|
||||
|
||||
hashSHA256FromSource :: Monad m => C.ConduitM () ByteString m () -> m ByteString
|
||||
hashSHA256FromSource :: (Monad m) => C.ConduitM () ByteString m () -> m ByteString
|
||||
hashSHA256FromSource src = do
|
||||
digest <- C.connect src sinkSHA256Hash
|
||||
return $ digestToBase16 digest
|
||||
where
|
||||
-- To help with type inference
|
||||
sinkSHA256Hash :: Monad m => C.ConduitM ByteString Void m (Digest SHA256)
|
||||
sinkSHA256Hash :: (Monad m) => C.ConduitM ByteString Void m (Digest SHA256)
|
||||
sinkSHA256Hash = sinkHash
|
||||
|
||||
-- Returns MD5 hash hex encoded.
|
||||
hashMD5 :: ByteString -> ByteString
|
||||
hashMD5 = digestToBase16 . hashWith MD5
|
||||
|
||||
hashMD5FromSource :: Monad m => C.ConduitM () ByteString m () -> m ByteString
|
||||
hashMD5FromSource :: (Monad m) => C.ConduitM () ByteString m () -> m ByteString
|
||||
hashMD5FromSource src = do
|
||||
digest <- C.connect src sinkMD5Hash
|
||||
return $ digestToBase16 digest
|
||||
where
|
||||
-- To help with type inference
|
||||
sinkMD5Hash :: Monad m => C.ConduitM ByteString Void m (Digest MD5)
|
||||
sinkMD5Hash :: (Monad m) => C.ConduitM ByteString Void m (Digest MD5)
|
||||
sinkMD5Hash = sinkHash
|
||||
|
||||
hmacSHA256 :: ByteString -> ByteString -> HMAC SHA256
|
||||
@ -72,15 +71,15 @@ hmacSHA256 message key = hmac key message
|
||||
hmacSHA256RawBS :: ByteString -> ByteString -> ByteString
|
||||
hmacSHA256RawBS message key = convert $ hmacSHA256 message key
|
||||
|
||||
digestToBS :: ByteArrayAccess a => a -> ByteString
|
||||
digestToBS :: (ByteArrayAccess a) => a -> ByteString
|
||||
digestToBS = convert
|
||||
|
||||
digestToBase16 :: ByteArrayAccess a => a -> ByteString
|
||||
digestToBase16 :: (ByteArrayAccess a) => a -> ByteString
|
||||
digestToBase16 = convertToBase Base16
|
||||
|
||||
-- Returns MD5 hash base 64 encoded.
|
||||
hashMD5ToBase64 :: ByteArrayAccess a => a -> ByteString
|
||||
hashMD5ToBase64 :: (ByteArrayAccess a) => a -> ByteString
|
||||
hashMD5ToBase64 = convertToBase Base64 . hashWith MD5
|
||||
|
||||
encodeToBase64 :: ByteArrayAccess a => a -> ByteString
|
||||
encodeToBase64 :: (ByteArrayAccess a) => a -> ByteString
|
||||
encodeToBase64 = convertToBase Base64
|
||||
|
||||
@ -21,13 +21,19 @@ module Network.Minio.Data.Time
|
||||
awsDateFormatBS,
|
||||
awsParseTime,
|
||||
iso8601TimeFormat,
|
||||
UrlExpiry,
|
||||
)
|
||||
where
|
||||
|
||||
import Data.ByteString.Char8 (pack)
|
||||
import qualified Data.Time as Time
|
||||
import Data.Time.Format.ISO8601 (iso8601Show)
|
||||
import Lib.Prelude
|
||||
|
||||
-- | Time to expire for a presigned URL. It interpreted as a number of
|
||||
-- seconds. The maximum duration that can be specified is 7 days.
|
||||
type UrlExpiry = Int
|
||||
|
||||
awsTimeFormat :: UTCTime -> [Char]
|
||||
awsTimeFormat = Time.formatTime Time.defaultTimeLocale "%Y%m%dT%H%M%SZ"
|
||||
|
||||
@ -44,4 +50,4 @@ awsParseTime :: [Char] -> Maybe UTCTime
|
||||
awsParseTime = Time.parseTimeM False Time.defaultTimeLocale "%Y%m%dT%H%M%SZ"
|
||||
|
||||
iso8601TimeFormat :: UTCTime -> [Char]
|
||||
iso8601TimeFormat = Time.formatTime Time.defaultTimeLocale (Time.iso8601DateFormat $ Just "%T%QZ")
|
||||
iso8601TimeFormat = iso8601Show
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
--
|
||||
-- MinIO Haskell SDK, (C) 2017-2019 MinIO, Inc.
|
||||
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
@ -14,10 +14,15 @@
|
||||
-- limitations under the License.
|
||||
--
|
||||
|
||||
module Network.Minio.Errors where
|
||||
module Network.Minio.Errors
|
||||
( MErrV (..),
|
||||
ServiceErr (..),
|
||||
MinioErr (..),
|
||||
toServiceErr,
|
||||
)
|
||||
where
|
||||
|
||||
import Control.Exception
|
||||
import Lib.Prelude
|
||||
import Control.Exception (IOException)
|
||||
import qualified Network.HTTP.Conduit as NC
|
||||
|
||||
---------------------------------
|
||||
@ -44,7 +49,8 @@ data MErrV
|
||||
| MErrVInvalidEncryptionKeyLength
|
||||
| MErrVStreamingBodyUnexpectedEOF
|
||||
| MErrVUnexpectedPayload
|
||||
deriving (Show, Eq)
|
||||
| MErrVSTSEndpointNotFound
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
instance Exception MErrV
|
||||
|
||||
@ -57,7 +63,7 @@ data ServiceErr
|
||||
| NoSuchKey
|
||||
| SelectErr Text Text
|
||||
| ServiceErr Text Text
|
||||
deriving (Show, Eq)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
instance Exception ServiceErr
|
||||
|
||||
@ -75,7 +81,7 @@ data MinioErr
|
||||
| MErrIO IOException
|
||||
| MErrService ServiceErr
|
||||
| MErrValidation MErrV
|
||||
deriving (Show)
|
||||
deriving stock (Show)
|
||||
|
||||
instance Eq MinioErr where
|
||||
MErrHTTP _ == MErrHTTP _ = True
|
||||
|
||||
@ -20,11 +20,11 @@ module Network.Minio.JsonParser
|
||||
where
|
||||
|
||||
import Data.Aeson
|
||||
( (.:),
|
||||
FromJSON,
|
||||
( FromJSON,
|
||||
eitherDecode,
|
||||
parseJSON,
|
||||
withObject,
|
||||
(.:),
|
||||
)
|
||||
import qualified Data.Text as T
|
||||
import Lib.Prelude
|
||||
@ -34,7 +34,7 @@ data AdminErrJSON = AdminErrJSON
|
||||
{ aeCode :: Text,
|
||||
aeMessage :: Text
|
||||
}
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Eq, Show)
|
||||
|
||||
instance FromJSON AdminErrJSON where
|
||||
parseJSON = withObject "AdminErrJSON" $ \v ->
|
||||
|
||||
@ -19,16 +19,47 @@ module Network.Minio.ListOps where
|
||||
import qualified Data.Conduit as C
|
||||
import qualified Data.Conduit.Combinators as CC
|
||||
import qualified Data.Conduit.List as CL
|
||||
import Lib.Prelude
|
||||
import Network.Minio.Data
|
||||
( Bucket,
|
||||
ListObjectsResult
|
||||
( lorCPrefixes,
|
||||
lorHasMore,
|
||||
lorNextToken,
|
||||
lorObjects
|
||||
),
|
||||
ListObjectsV1Result
|
||||
( lorCPrefixes',
|
||||
lorHasMore',
|
||||
lorNextMarker,
|
||||
lorObjects'
|
||||
),
|
||||
ListPartsResult (lprHasMore, lprNextPart, lprParts),
|
||||
ListUploadsResult
|
||||
( lurHasMore,
|
||||
lurNextKey,
|
||||
lurNextUpload,
|
||||
lurUploads
|
||||
),
|
||||
Minio,
|
||||
Object,
|
||||
ObjectInfo,
|
||||
ObjectPartInfo (opiSize),
|
||||
UploadId,
|
||||
UploadInfo (UploadInfo),
|
||||
)
|
||||
import Network.Minio.S3API
|
||||
( listIncompleteParts',
|
||||
listIncompleteUploads',
|
||||
listObjects',
|
||||
listObjectsV1',
|
||||
)
|
||||
|
||||
-- | Represents a list output item - either an object or an object
|
||||
-- prefix (i.e. a directory).
|
||||
data ListItem
|
||||
= ListItemObject ObjectInfo
|
||||
| ListItemPrefix Text
|
||||
deriving (Show, Eq)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | @'listObjects' bucket prefix recurse@ lists objects in a bucket
|
||||
-- similar to a file system tree traversal.
|
||||
@ -51,10 +82,10 @@ listObjects bucket prefix recurse = loop Nothing
|
||||
|
||||
res <- lift $ listObjects' bucket prefix nextToken delimiter Nothing
|
||||
CL.sourceList $ map ListItemObject $ lorObjects res
|
||||
unless recurse
|
||||
$ CL.sourceList
|
||||
$ map ListItemPrefix
|
||||
$ lorCPrefixes res
|
||||
unless recurse $
|
||||
CL.sourceList $
|
||||
map ListItemPrefix $
|
||||
lorCPrefixes res
|
||||
when (lorHasMore res) $
|
||||
loop (lorNextToken res)
|
||||
|
||||
@ -73,10 +104,10 @@ listObjectsV1 bucket prefix recurse = loop Nothing
|
||||
|
||||
res <- lift $ listObjectsV1' bucket prefix nextMarker delimiter Nothing
|
||||
CL.sourceList $ map ListItemObject $ lorObjects' res
|
||||
unless recurse
|
||||
$ CL.sourceList
|
||||
$ map ListItemPrefix
|
||||
$ lorCPrefixes' res
|
||||
unless recurse $
|
||||
CL.sourceList $
|
||||
map ListItemPrefix $
|
||||
lorCPrefixes' res
|
||||
when (lorHasMore' res) $
|
||||
loop (lorNextMarker res)
|
||||
|
||||
@ -104,19 +135,23 @@ listIncompleteUploads bucket prefix recurse = loop Nothing Nothing
|
||||
nextUploadIdMarker
|
||||
Nothing
|
||||
|
||||
aggrSizes <- lift $ forM (lurUploads res) $ \(uKey, uId, _) -> do
|
||||
partInfos <-
|
||||
C.runConduit $
|
||||
listIncompleteParts bucket uKey uId
|
||||
C..| CC.sinkList
|
||||
return $ foldl (\sizeSofar p -> opiSize p + sizeSofar) 0 partInfos
|
||||
aggrSizes <- lift $
|
||||
forM (lurUploads res) $ \(uKey, uId, _) -> do
|
||||
partInfos <-
|
||||
C.runConduit $
|
||||
listIncompleteParts bucket uKey uId
|
||||
C..| CC.sinkList
|
||||
return $ foldl' (\sizeSofar p -> opiSize p + sizeSofar) 0 partInfos
|
||||
|
||||
CL.sourceList
|
||||
$ map
|
||||
( \((uKey, uId, uInitTime), size) ->
|
||||
UploadInfo uKey uId uInitTime size
|
||||
CL.sourceList $
|
||||
zipWith
|
||||
( curry
|
||||
( \((uKey, uId, uInitTime), size) ->
|
||||
UploadInfo uKey uId uInitTime size
|
||||
)
|
||||
)
|
||||
$ zip (lurUploads res) aggrSizes
|
||||
(lurUploads res)
|
||||
aggrSizes
|
||||
|
||||
when (lurHasMore res) $
|
||||
loop (lurNextKey res) (lurNextUpload res)
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
--
|
||||
-- MinIO Haskell SDK, (C) 2017 MinIO, Inc.
|
||||
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
@ -13,6 +13,7 @@
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Network.Minio.PresignedOperations
|
||||
( UrlExpiry,
|
||||
@ -43,13 +44,21 @@ import qualified Data.Text as T
|
||||
import qualified Data.Text.Encoding as TE
|
||||
import qualified Data.Time as Time
|
||||
import Lib.Prelude
|
||||
import qualified Network.HTTP.Conduit as NC
|
||||
import qualified Network.HTTP.Client as NClient
|
||||
import qualified Network.HTTP.Types as HT
|
||||
import Network.HTTP.Types.Header (hHost)
|
||||
import Network.Minio.API (buildRequest)
|
||||
import Network.Minio.Credentials
|
||||
import Network.Minio.Data
|
||||
import Network.Minio.Data.Time
|
||||
import Network.Minio.Errors
|
||||
import Network.Minio.Sign.V4
|
||||
import Network.URI (uriToString)
|
||||
|
||||
{- ORMOLU_DISABLE -}
|
||||
#if MIN_VERSION_aeson(2,0,0)
|
||||
import qualified Data.Aeson.Key as A
|
||||
#endif
|
||||
{- ORMOLU_ENABLE -}
|
||||
|
||||
-- | Generate a presigned URL. This function allows for advanced usage
|
||||
-- - for simple cases prefer the `presigned*Url` functions.
|
||||
@ -69,46 +78,26 @@ makePresignedUrl ::
|
||||
HT.RequestHeaders ->
|
||||
Minio ByteString
|
||||
makePresignedUrl expiry method bucket object region extraQuery extraHeaders = do
|
||||
when (expiry > 7 * 24 * 3600 || expiry < 0)
|
||||
$ throwIO
|
||||
$ MErrVInvalidUrlExpiry expiry
|
||||
when (expiry > 7 * 24 * 3600 || expiry < 0) $
|
||||
throwIO $
|
||||
MErrVInvalidUrlExpiry expiry
|
||||
|
||||
ci <- asks mcConnInfo
|
||||
|
||||
let hostHeader = (hHost, getHostAddr ci)
|
||||
req =
|
||||
NC.defaultRequest
|
||||
{ NC.method = method,
|
||||
NC.secure = connectIsSecure ci,
|
||||
NC.host = encodeUtf8 $ connectHost ci,
|
||||
NC.port = connectPort ci,
|
||||
NC.path = getS3Path bucket object,
|
||||
NC.requestHeaders = hostHeader : extraHeaders,
|
||||
NC.queryString = HT.renderQuery True extraQuery
|
||||
let s3ri =
|
||||
defaultS3ReqInfo
|
||||
{ riPresignExpirySecs = Just expiry,
|
||||
riMethod = method,
|
||||
riBucket = bucket,
|
||||
riObject = object,
|
||||
riRegion = region,
|
||||
riQueryParams = extraQuery,
|
||||
riHeaders = extraHeaders
|
||||
}
|
||||
ts <- liftIO Time.getCurrentTime
|
||||
|
||||
let sp =
|
||||
SignParams
|
||||
(connectAccessKey ci)
|
||||
(connectSecretKey ci)
|
||||
ts
|
||||
region
|
||||
(Just expiry)
|
||||
Nothing
|
||||
signPairs = signV4 sp req
|
||||
qpToAdd = (fmap . fmap) Just signPairs
|
||||
queryStr =
|
||||
HT.renderQueryBuilder
|
||||
True
|
||||
((HT.parseQuery $ NC.queryString req) ++ qpToAdd)
|
||||
scheme = byteString $ bool "http://" "https://" $ connectIsSecure ci
|
||||
req <- buildRequest s3ri
|
||||
let uri = NClient.getUri req
|
||||
uriString = uriToString identity uri ""
|
||||
|
||||
return $ toStrictBS $ toLazyByteString $
|
||||
scheme
|
||||
<> byteString (getHostAddr ci)
|
||||
<> byteString (getS3Path bucket object)
|
||||
<> queryStr
|
||||
return $ encodeUtf8 uriString
|
||||
|
||||
-- | Generate a URL with authentication signature to PUT (upload) an
|
||||
-- object. Any extra headers if passed, are signed, and so they are
|
||||
@ -190,29 +179,39 @@ data PostPolicyCondition
|
||||
= PPCStartsWith Text Text
|
||||
| PPCEquals Text Text
|
||||
| PPCRange Text Int64 Int64
|
||||
deriving (Show, Eq)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
{- ORMOLU_DISABLE -}
|
||||
instance Json.ToJSON PostPolicyCondition where
|
||||
toJSON (PPCStartsWith k v) = Json.toJSON ["starts-with", k, v]
|
||||
#if MIN_VERSION_aeson(2,0,0)
|
||||
toJSON (PPCEquals k v) = Json.object [(A.fromText k) .= v]
|
||||
#else
|
||||
toJSON (PPCEquals k v) = Json.object [k .= v]
|
||||
#endif
|
||||
toJSON (PPCRange k minVal maxVal) =
|
||||
Json.toJSON [Json.toJSON k, Json.toJSON minVal, Json.toJSON maxVal]
|
||||
|
||||
toEncoding (PPCStartsWith k v) = Json.foldable ["starts-with", k, v]
|
||||
#if MIN_VERSION_aeson(2,0,0)
|
||||
toEncoding (PPCEquals k v) = Json.pairs ((A.fromText k) .= v)
|
||||
#else
|
||||
toEncoding (PPCEquals k v) = Json.pairs (k .= v)
|
||||
#endif
|
||||
toEncoding (PPCRange k minVal maxVal) =
|
||||
Json.foldable [Json.toJSON k, Json.toJSON minVal, Json.toJSON maxVal]
|
||||
{- ORMOLU_ENABLE -}
|
||||
|
||||
-- | A PostPolicy is required to perform uploads via browser forms.
|
||||
data PostPolicy = PostPolicy
|
||||
{ expiration :: UTCTime,
|
||||
conditions :: [PostPolicyCondition]
|
||||
}
|
||||
deriving (Show, Eq)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
instance Json.ToJSON PostPolicy where
|
||||
toJSON (PostPolicy e c) =
|
||||
Json.object $
|
||||
Json.object
|
||||
[ "expiration" .= iso8601TimeFormat e,
|
||||
"conditions" .= c
|
||||
]
|
||||
@ -225,7 +224,7 @@ data PostPolicyError
|
||||
| PPEBucketNotSpecified
|
||||
| PPEConditionKeyEmpty
|
||||
| PPERangeInvalid
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- | Set the bucket name that the upload should use.
|
||||
ppCondBucket :: Bucket -> PostPolicyCondition
|
||||
@ -266,19 +265,19 @@ newPostPolicy ::
|
||||
newPostPolicy expirationTime conds
|
||||
-- object name condition must be present
|
||||
| not $ any (keyEquals "key") conds =
|
||||
Left PPEKeyNotSpecified
|
||||
Left PPEKeyNotSpecified
|
||||
-- bucket name condition must be present
|
||||
| not $ any (keyEquals "bucket") conds =
|
||||
Left PPEBucketNotSpecified
|
||||
Left PPEBucketNotSpecified
|
||||
-- a condition with an empty key is invalid
|
||||
| any (keyEquals "") conds || any isEmptyRangeKey conds =
|
||||
Left PPEConditionKeyEmpty
|
||||
Left PPEConditionKeyEmpty
|
||||
-- invalid range check
|
||||
| any isInvalidRange conds =
|
||||
Left PPERangeInvalid
|
||||
Left PPERangeInvalid
|
||||
-- all good!
|
||||
| otherwise =
|
||||
return $ PostPolicy expirationTime conds
|
||||
return $ PostPolicy expirationTime conds
|
||||
where
|
||||
keyEquals k' (PPCStartsWith k _) = k == k'
|
||||
keyEquals k' (PPCEquals k _) = k == k'
|
||||
@ -300,50 +299,58 @@ presignedPostPolicy ::
|
||||
Minio (ByteString, H.HashMap Text ByteString)
|
||||
presignedPostPolicy p = do
|
||||
ci <- asks mcConnInfo
|
||||
signTime <- liftIO $ Time.getCurrentTime
|
||||
signTime <- liftIO Time.getCurrentTime
|
||||
mgr <- asks mcConnManager
|
||||
cv <- liftIO $ getCredential (connectCreds ci) (getEndpoint ci) mgr
|
||||
|
||||
let extraConditions =
|
||||
[ PPCEquals "x-amz-date" (toS $ awsTimeFormat signTime),
|
||||
let extraConditions signParams =
|
||||
[ PPCEquals "x-amz-date" (toText $ awsTimeFormat signTime),
|
||||
PPCEquals "x-amz-algorithm" "AWS4-HMAC-SHA256",
|
||||
PPCEquals
|
||||
"x-amz-credential"
|
||||
( T.intercalate
|
||||
"/"
|
||||
[ connectAccessKey ci,
|
||||
decodeUtf8 $ mkScope signTime region
|
||||
[ coerce $ cvAccessKey cv,
|
||||
decodeUtf8 $ credentialScope signParams
|
||||
]
|
||||
)
|
||||
]
|
||||
ppWithCreds =
|
||||
ppWithCreds signParams =
|
||||
p
|
||||
{ conditions = conditions p ++ extraConditions
|
||||
{ conditions = conditions p ++ extraConditions signParams
|
||||
}
|
||||
sp =
|
||||
SignParams
|
||||
(connectAccessKey ci)
|
||||
(connectSecretKey ci)
|
||||
(coerce $ cvAccessKey cv)
|
||||
(coerce $ cvSecretKey cv)
|
||||
(coerce $ cvSessionToken cv)
|
||||
ServiceS3
|
||||
signTime
|
||||
(Just $ connectRegion ci)
|
||||
Nothing
|
||||
Nothing
|
||||
signData = signV4PostPolicy (showPostPolicy ppWithCreds) sp
|
||||
signData = signV4PostPolicy (showPostPolicy $ ppWithCreds sp) sp
|
||||
-- compute form-data
|
||||
mkPair (PPCStartsWith k v) = Just (k, v)
|
||||
mkPair (PPCEquals k v) = Just (k, v)
|
||||
mkPair _ = Nothing
|
||||
formFromPolicy =
|
||||
H.map TE.encodeUtf8 $ H.fromList $ catMaybes $
|
||||
mkPair <$> conditions ppWithCreds
|
||||
H.map TE.encodeUtf8 $
|
||||
H.fromList $
|
||||
mapMaybe
|
||||
mkPair
|
||||
(conditions $ ppWithCreds sp)
|
||||
formData = formFromPolicy `H.union` signData
|
||||
-- compute POST upload URL
|
||||
bucket = H.lookupDefault "" "bucket" formData
|
||||
scheme = byteString $ bool "http://" "https://" $ connectIsSecure ci
|
||||
region = connectRegion ci
|
||||
url =
|
||||
toStrictBS $ toLazyByteString $
|
||||
scheme <> byteString (getHostAddr ci)
|
||||
<> byteString "/"
|
||||
<> byteString bucket
|
||||
<> byteString "/"
|
||||
toStrictBS $
|
||||
toLazyByteString $
|
||||
scheme
|
||||
<> byteString (getHostAddr ci)
|
||||
<> byteString "/"
|
||||
<> byteString bucket
|
||||
<> byteString "/"
|
||||
|
||||
return (url, formData)
|
||||
|
||||
@ -71,13 +71,13 @@ putObjectInternal b o opts (ODStream src sizeMay) = do
|
||||
Just size ->
|
||||
if
|
||||
| size <= 64 * oneMiB -> do
|
||||
bs <- C.runConduit $ src C..| takeC (fromIntegral size) C..| CB.sinkLbs
|
||||
putObjectSingle' b o (pooToHeaders opts) $ LBS.toStrict bs
|
||||
bs <- C.runConduit $ src C..| takeC (fromIntegral size) C..| CB.sinkLbs
|
||||
putObjectSingle' b o (pooToHeaders opts) $ LBS.toStrict bs
|
||||
| size > maxObjectSize -> throwIO $ MErrVPutSizeExceeded size
|
||||
| otherwise -> sequentialMultipartUpload b o opts (Just size) src
|
||||
putObjectInternal b o opts (ODFile fp sizeMay) = do
|
||||
hResE <- withNewHandle fp $ \h ->
|
||||
liftM2 (,) (isHandleSeekable h) (getFileSize h)
|
||||
liftA2 (,) (isHandleSeekable h) (getFileSize h)
|
||||
|
||||
(isSeekable, handleSizeMay) <-
|
||||
either
|
||||
@ -95,13 +95,13 @@ putObjectInternal b o opts (ODFile fp sizeMay) = do
|
||||
Just size ->
|
||||
if
|
||||
| size <= 64 * oneMiB ->
|
||||
either throwIO return
|
||||
=<< withNewHandle fp (\h -> putObjectSingle b o (pooToHeaders opts) h 0 size)
|
||||
either throwIO return
|
||||
=<< withNewHandle fp (\h -> putObjectSingle b o (pooToHeaders opts) h 0 size)
|
||||
| size > maxObjectSize -> throwIO $ MErrVPutSizeExceeded size
|
||||
| isSeekable -> parallelMultipartUpload b o opts fp size
|
||||
| otherwise ->
|
||||
sequentialMultipartUpload b o opts (Just size) $
|
||||
CB.sourceFile fp
|
||||
sequentialMultipartUpload b o opts (Just size) $
|
||||
CB.sourceFile fp
|
||||
|
||||
parallelMultipartUpload ::
|
||||
Bucket ->
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
--
|
||||
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
|
||||
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
@ -14,15 +14,25 @@
|
||||
-- limitations under the License.
|
||||
--
|
||||
|
||||
-- |
|
||||
-- Module: Network.Minio.S3API
|
||||
-- Copyright: (c) 2017-2023 MinIO Dev Team
|
||||
-- License: Apache 2.0
|
||||
-- Maintainer: MinIO Dev Team <dev@min.io>
|
||||
--
|
||||
-- Lower-level API for S3 compatible object stores. Start with @Network.Minio@
|
||||
-- and use this only if needed.
|
||||
module Network.Minio.S3API
|
||||
( Region,
|
||||
getLocation,
|
||||
|
||||
-- * Listing buckets
|
||||
|
||||
--------------------
|
||||
getService,
|
||||
|
||||
-- * Listing objects
|
||||
|
||||
--------------------
|
||||
ListObjectsResult (..),
|
||||
ListObjectsV1Result (..),
|
||||
@ -33,11 +43,13 @@ module Network.Minio.S3API
|
||||
headBucket,
|
||||
|
||||
-- * Retrieving objects
|
||||
|
||||
-----------------------
|
||||
getObject',
|
||||
headObject,
|
||||
|
||||
-- * Creating buckets and objects
|
||||
|
||||
---------------------------------
|
||||
putBucket,
|
||||
ETag,
|
||||
@ -47,6 +59,7 @@ module Network.Minio.S3API
|
||||
copyObjectSingle,
|
||||
|
||||
-- * Multipart Upload APIs
|
||||
|
||||
--------------------------
|
||||
UploadId,
|
||||
PartTuple,
|
||||
@ -63,11 +76,13 @@ module Network.Minio.S3API
|
||||
listIncompleteParts',
|
||||
|
||||
-- * Deletion APIs
|
||||
|
||||
--------------------------
|
||||
deleteBucket,
|
||||
deleteObject,
|
||||
|
||||
-- * Presigned Operations
|
||||
|
||||
-----------------------------
|
||||
module Network.Minio.PresignedOperations,
|
||||
|
||||
@ -76,6 +91,7 @@ module Network.Minio.S3API
|
||||
setBucketPolicy,
|
||||
|
||||
-- * Bucket Notifications
|
||||
|
||||
-------------------------
|
||||
Notification (..),
|
||||
NotificationConfig (..),
|
||||
@ -124,7 +140,8 @@ parseGetObjectHeaders object headers =
|
||||
let metadataPairs = getMetadata headers
|
||||
userMetadata = getUserMetadataMap metadataPairs
|
||||
metadata = getNonUserMetadataMap metadataPairs
|
||||
in ObjectInfo <$> Just object
|
||||
in ObjectInfo
|
||||
<$> Just object
|
||||
<*> getLastModifiedHeader headers
|
||||
<*> getETagHeader headers
|
||||
<*> getContentLength headers
|
||||
@ -158,24 +175,26 @@ getObject' bucket object queryParams headers = do
|
||||
{ riBucket = Just bucket,
|
||||
riObject = Just object,
|
||||
riQueryParams = queryParams,
|
||||
riHeaders = headers
|
||||
-- This header is required for safety as otherwise http-client,
|
||||
-- sends Accept-Encoding: gzip, and the server may actually gzip
|
||||
-- body. In that case Content-Length header will be missing.
|
||||
<> [("Accept-Encoding", "identity")]
|
||||
riHeaders =
|
||||
headers
|
||||
-- This header is required for safety as otherwise http-client,
|
||||
-- sends Accept-Encoding: gzip, and the server may actually gzip
|
||||
-- body. In that case Content-Length header will be missing.
|
||||
<> [("Accept-Encoding", "identity")]
|
||||
}
|
||||
|
||||
-- | Creates a bucket via a PUT bucket call.
|
||||
putBucket :: Bucket -> Region -> Minio ()
|
||||
putBucket bucket location = do
|
||||
ns <- asks getSvcNamespace
|
||||
void $ executeRequest $
|
||||
defaultS3ReqInfo
|
||||
{ riMethod = HT.methodPut,
|
||||
riBucket = Just bucket,
|
||||
riPayload = PayloadBS $ mkCreateBucketConfig ns location,
|
||||
riNeedsLocation = False
|
||||
}
|
||||
void $
|
||||
executeRequest $
|
||||
defaultS3ReqInfo
|
||||
{ riMethod = HT.methodPut,
|
||||
riBucket = Just bucket,
|
||||
riPayload = PayloadBS $ mkCreateBucketConfig ns location,
|
||||
riNeedsLocation = False
|
||||
}
|
||||
|
||||
-- | Single PUT object size.
|
||||
maxSinglePutObjectSizeBytes :: Int64
|
||||
@ -189,9 +208,9 @@ putObjectSingle' :: Bucket -> Object -> [HT.Header] -> ByteString -> Minio ETag
|
||||
putObjectSingle' bucket object headers bs = do
|
||||
let size = fromIntegral (BS.length bs)
|
||||
-- check length is within single PUT object size.
|
||||
when (size > maxSinglePutObjectSizeBytes)
|
||||
$ throwIO
|
||||
$ MErrVSinglePUTSizeExceeded size
|
||||
when (size > maxSinglePutObjectSizeBytes) $
|
||||
throwIO $
|
||||
MErrVSinglePUTSizeExceeded size
|
||||
|
||||
let payload = mkStreamingPayload $ PayloadBS bs
|
||||
resp <-
|
||||
@ -223,9 +242,9 @@ putObjectSingle ::
|
||||
Minio ETag
|
||||
putObjectSingle bucket object headers h offset size = do
|
||||
-- check length is within single PUT object size.
|
||||
when (size > maxSinglePutObjectSizeBytes)
|
||||
$ throwIO
|
||||
$ MErrVSinglePUTSizeExceeded size
|
||||
when (size > maxSinglePutObjectSizeBytes) $
|
||||
throwIO $
|
||||
MErrVSinglePUTSizeExceeded size
|
||||
|
||||
-- content-length header is automatically set by library.
|
||||
let payload = mkStreamingPayload $ PayloadH h offset size
|
||||
@ -302,23 +321,23 @@ listObjects' bucket prefix nextToken delimiter maxKeys = do
|
||||
-- | DELETE a bucket from the service.
|
||||
deleteBucket :: Bucket -> Minio ()
|
||||
deleteBucket bucket =
|
||||
void
|
||||
$ executeRequest
|
||||
$ defaultS3ReqInfo
|
||||
{ riMethod = HT.methodDelete,
|
||||
riBucket = Just bucket
|
||||
}
|
||||
void $
|
||||
executeRequest $
|
||||
defaultS3ReqInfo
|
||||
{ riMethod = HT.methodDelete,
|
||||
riBucket = Just bucket
|
||||
}
|
||||
|
||||
-- | DELETE an object from the service.
|
||||
deleteObject :: Bucket -> Object -> Minio ()
|
||||
deleteObject bucket object =
|
||||
void
|
||||
$ executeRequest
|
||||
$ defaultS3ReqInfo
|
||||
{ riMethod = HT.methodDelete,
|
||||
riBucket = Just bucket,
|
||||
riObject = Just object
|
||||
}
|
||||
void $
|
||||
executeRequest $
|
||||
defaultS3ReqInfo
|
||||
{ riMethod = HT.methodDelete,
|
||||
riBucket = Just bucket,
|
||||
riObject = Just object
|
||||
}
|
||||
|
||||
-- | Create a new multipart upload.
|
||||
newMultipartUpload :: Bucket -> Object -> [HT.Header] -> Minio UploadId
|
||||
@ -397,8 +416,7 @@ srcInfoToHeaders srcInfo =
|
||||
fmap formatRFC1123 . srcIfModifiedSince
|
||||
]
|
||||
rangeHdr =
|
||||
maybe [] (\a -> [("x-amz-copy-source-range", HT.renderByteRanges [a])]) $
|
||||
toByteRange <$> srcRange srcInfo
|
||||
maybe [] ((\a -> [("x-amz-copy-source-range", HT.renderByteRanges [a])]) . toByteRange) (srcRange srcInfo)
|
||||
toByteRange :: (Int64, Int64) -> HT.ByteRange
|
||||
toByteRange (x, y) = HT.ByteRangeFromTo (fromIntegral x) (fromIntegral y)
|
||||
|
||||
@ -478,14 +496,14 @@ completeMultipartUpload bucket object uploadId partTuple = do
|
||||
-- | Abort a multipart upload.
|
||||
abortMultipartUpload :: Bucket -> Object -> UploadId -> Minio ()
|
||||
abortMultipartUpload bucket object uploadId =
|
||||
void
|
||||
$ executeRequest
|
||||
$ defaultS3ReqInfo
|
||||
{ riMethod = HT.methodDelete,
|
||||
riBucket = Just bucket,
|
||||
riObject = Just object,
|
||||
riQueryParams = mkOptionalParams params
|
||||
}
|
||||
void $
|
||||
executeRequest $
|
||||
defaultS3ReqInfo
|
||||
{ riMethod = HT.methodDelete,
|
||||
riBucket = Just bucket,
|
||||
riObject = Just object,
|
||||
riQueryParams = mkOptionalParams params
|
||||
}
|
||||
where
|
||||
params = [("uploadId", Just uploadId)]
|
||||
|
||||
@ -554,15 +572,16 @@ headObject bucket object reqHeaders = do
|
||||
{ riMethod = HT.methodHead,
|
||||
riBucket = Just bucket,
|
||||
riObject = Just object,
|
||||
riHeaders = reqHeaders
|
||||
-- This header is required for safety as otherwise http-client,
|
||||
-- sends Accept-Encoding: gzip, and the server may actually gzip
|
||||
-- body. In that case Content-Length header will be missing.
|
||||
<> [("Accept-Encoding", "identity")]
|
||||
riHeaders =
|
||||
reqHeaders
|
||||
-- This header is required for safety as otherwise http-client,
|
||||
-- sends Accept-Encoding: gzip, and the server may actually gzip
|
||||
-- body. In that case Content-Length header will be missing.
|
||||
<> [("Accept-Encoding", "identity")]
|
||||
}
|
||||
maybe (throwIO MErrVInvalidObjectInfoResponse) return
|
||||
$ parseGetObjectHeaders object
|
||||
$ NC.responseHeaders resp
|
||||
maybe (throwIO MErrVInvalidObjectInfoResponse) return $
|
||||
parseGetObjectHeaders object $
|
||||
NC.responseHeaders resp
|
||||
|
||||
-- | Query the object store if a given bucket exists.
|
||||
headBucket :: Bucket -> Minio Bool
|
||||
@ -595,15 +614,16 @@ headBucket bucket =
|
||||
putBucketNotification :: Bucket -> Notification -> Minio ()
|
||||
putBucketNotification bucket ncfg = do
|
||||
ns <- asks getSvcNamespace
|
||||
void $ executeRequest $
|
||||
defaultS3ReqInfo
|
||||
{ riMethod = HT.methodPut,
|
||||
riBucket = Just bucket,
|
||||
riQueryParams = [("notification", Nothing)],
|
||||
riPayload =
|
||||
PayloadBS $
|
||||
mkPutNotificationRequest ns ncfg
|
||||
}
|
||||
void $
|
||||
executeRequest $
|
||||
defaultS3ReqInfo
|
||||
{ riMethod = HT.methodPut,
|
||||
riBucket = Just bucket,
|
||||
riQueryParams = [("notification", Nothing)],
|
||||
riPayload =
|
||||
PayloadBS $
|
||||
mkPutNotificationRequest ns ncfg
|
||||
}
|
||||
|
||||
-- | Retrieve the notification configuration on a bucket.
|
||||
getBucketNotification :: Bucket -> Minio Notification
|
||||
@ -645,20 +665,22 @@ setBucketPolicy bucket policy = do
|
||||
-- | Save a new policy on a bucket.
|
||||
putBucketPolicy :: Bucket -> Text -> Minio ()
|
||||
putBucketPolicy bucket policy = do
|
||||
void $ executeRequest $
|
||||
defaultS3ReqInfo
|
||||
{ riMethod = HT.methodPut,
|
||||
riBucket = Just bucket,
|
||||
riQueryParams = [("policy", Nothing)],
|
||||
riPayload = PayloadBS $ encodeUtf8 policy
|
||||
}
|
||||
void $
|
||||
executeRequest $
|
||||
defaultS3ReqInfo
|
||||
{ riMethod = HT.methodPut,
|
||||
riBucket = Just bucket,
|
||||
riQueryParams = [("policy", Nothing)],
|
||||
riPayload = PayloadBS $ encodeUtf8 policy
|
||||
}
|
||||
|
||||
-- | Delete any policy set on a bucket.
|
||||
deleteBucketPolicy :: Bucket -> Minio ()
|
||||
deleteBucketPolicy bucket = do
|
||||
void $ executeRequest $
|
||||
defaultS3ReqInfo
|
||||
{ riMethod = HT.methodDelete,
|
||||
riBucket = Just bucket,
|
||||
riQueryParams = [("policy", Nothing)]
|
||||
}
|
||||
void $
|
||||
executeRequest $
|
||||
defaultS3ReqInfo
|
||||
{ riMethod = HT.methodDelete,
|
||||
riBucket = Just bucket,
|
||||
riQueryParams = [("policy", Nothing)]
|
||||
}
|
||||
|
||||
@ -111,7 +111,7 @@ data EventStreamException
|
||||
| ESEInvalidHeaderType
|
||||
| ESEInvalidHeaderValueType
|
||||
| ESEInvalidMessageType
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Eq, Show)
|
||||
|
||||
instance Exception EventStreamException
|
||||
|
||||
@ -119,7 +119,7 @@ instance Exception EventStreamException
|
||||
chunkSize :: Int
|
||||
chunkSize = 32 * 1024
|
||||
|
||||
parseBinary :: Bin.Binary a => ByteString -> IO a
|
||||
parseBinary :: (Bin.Binary a) => ByteString -> IO a
|
||||
parseBinary b = do
|
||||
case Bin.decodeOrFail $ LB.fromStrict b of
|
||||
Left (_, _, msg) -> throwIO $ ESEDecodeFail msg
|
||||
@ -135,7 +135,7 @@ bytesToHeaderName t = case t of
|
||||
_ -> throwIO ESEInvalidHeaderType
|
||||
|
||||
parseHeaders ::
|
||||
MonadUnliftIO m =>
|
||||
(MonadUnliftIO m) =>
|
||||
Word32 ->
|
||||
C.ConduitM ByteString a m [MessageHeader]
|
||||
parseHeaders 0 = return []
|
||||
@ -163,7 +163,7 @@ parseHeaders hdrLen = do
|
||||
|
||||
-- readNBytes returns N bytes read from the string and throws an
|
||||
-- exception if N bytes are not present on the stream.
|
||||
readNBytes :: MonadUnliftIO m => Int -> C.ConduitM ByteString a m ByteString
|
||||
readNBytes :: (MonadUnliftIO m) => Int -> C.ConduitM ByteString a m ByteString
|
||||
readNBytes n = do
|
||||
b <- LB.toStrict <$> (C.takeCE n .| C.sinkLazy)
|
||||
if B.length b /= n
|
||||
@ -171,7 +171,7 @@ readNBytes n = do
|
||||
else return b
|
||||
|
||||
crcCheck ::
|
||||
MonadUnliftIO m =>
|
||||
(MonadUnliftIO m) =>
|
||||
C.ConduitM ByteString ByteString m ()
|
||||
crcCheck = do
|
||||
b <- readNBytes 12
|
||||
@ -186,7 +186,7 @@ crcCheck = do
|
||||
-- 12 bytes have been read off the current message. Now read the
|
||||
-- next (n-12)-4 bytes and accumulate the checksum, and yield it.
|
||||
let startCrc = crc32 b
|
||||
finalCrc <- accumulateYield (fromIntegral n -16) startCrc
|
||||
finalCrc <- accumulateYield (fromIntegral n - 16) startCrc
|
||||
|
||||
bs <- readNBytes 4
|
||||
expectedCrc :: Word32 <- liftIO $ parseBinary bs
|
||||
@ -208,7 +208,7 @@ crcCheck = do
|
||||
then accumulateYield n' c'
|
||||
else return c'
|
||||
|
||||
handleMessage :: MonadUnliftIO m => C.ConduitT ByteString EventMessage m ()
|
||||
handleMessage :: (MonadUnliftIO m) => C.ConduitT ByteString EventMessage m ()
|
||||
handleMessage = do
|
||||
b1 <- readNBytes 4
|
||||
msgLen :: Word32 <- liftIO $ parseBinary b1
|
||||
@ -219,7 +219,7 @@ handleMessage = do
|
||||
hs <- parseHeaders hdrLen
|
||||
|
||||
let payloadLen = msgLen - hdrLen - 16
|
||||
getHdrVal h = fmap snd . headMay . filter ((h ==) . fst)
|
||||
getHdrVal h = fmap snd . find ((h ==) . fst)
|
||||
eventHdrValue = getHdrVal EventType hs
|
||||
msgHdrValue = getHdrVal MessageType hs
|
||||
errCode = getHdrVal ErrorCode hs
|
||||
@ -254,7 +254,7 @@ handleMessage = do
|
||||
passThrough $ n - B.length b
|
||||
|
||||
selectProtoConduit ::
|
||||
MonadUnliftIO m =>
|
||||
(MonadUnliftIO m) =>
|
||||
C.ConduitT ByteString EventMessage m ()
|
||||
selectProtoConduit = crcCheck .| handleMessage
|
||||
|
||||
@ -276,12 +276,12 @@ selectObjectContent b o r = do
|
||||
riNeedsLocation = False,
|
||||
riQueryParams = [("select", Nothing), ("select-type", Just "2")]
|
||||
}
|
||||
--print $ mkSelectRequest r
|
||||
-- print $ mkSelectRequest r
|
||||
resp <- mkStreamRequest reqInfo
|
||||
return $ NC.responseBody resp .| selectProtoConduit
|
||||
|
||||
-- | A helper conduit that returns only the record payload bytes.
|
||||
getPayloadBytes :: MonadIO m => C.ConduitT EventMessage ByteString m ()
|
||||
getPayloadBytes :: (MonadIO m) => C.ConduitT EventMessage ByteString m ()
|
||||
getPayloadBytes = do
|
||||
evM <- C.await
|
||||
case evM of
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
--
|
||||
-- MinIO Haskell SDK, (C) 2017-2019 MinIO, Inc.
|
||||
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
@ -15,9 +15,19 @@
|
||||
--
|
||||
{-# LANGUAGE BangPatterns #-}
|
||||
|
||||
module Network.Minio.Sign.V4 where
|
||||
module Network.Minio.Sign.V4
|
||||
( SignParams (..),
|
||||
signV4QueryParams,
|
||||
signV4,
|
||||
signV4PostPolicy,
|
||||
signV4Stream,
|
||||
Service (..),
|
||||
credentialScope,
|
||||
)
|
||||
where
|
||||
|
||||
import qualified Conduit as C
|
||||
import qualified Data.ByteArray as BA
|
||||
import qualified Data.ByteString as B
|
||||
import qualified Data.ByteString.Base64 as Base64
|
||||
import qualified Data.ByteString.Char8 as B8
|
||||
@ -26,11 +36,14 @@ import Data.CaseInsensitive (mk)
|
||||
import qualified Data.CaseInsensitive as CI
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashSet as Set
|
||||
import Data.List (partition)
|
||||
import qualified Data.List.NonEmpty as NE
|
||||
import qualified Data.Time as Time
|
||||
import Lib.Prelude
|
||||
import qualified Network.HTTP.Conduit as NC
|
||||
import Network.HTTP.Types (Header, parseQuery)
|
||||
import Network.HTTP.Types (Header, SimpleQuery, hContentEncoding, parseQuery)
|
||||
import qualified Network.HTTP.Types as H
|
||||
import Network.HTTP.Types.Header (RequestHeaders)
|
||||
import Network.Minio.Data.ByteString
|
||||
import Network.Minio.Data.Crypto
|
||||
import Network.Minio.Data.Time
|
||||
@ -51,43 +64,24 @@ ignoredHeaders =
|
||||
H.hUserAgent
|
||||
]
|
||||
|
||||
data SignV4Data = SignV4Data
|
||||
{ sv4SignTime :: UTCTime,
|
||||
sv4Scope :: ByteString,
|
||||
sv4CanonicalRequest :: ByteString,
|
||||
sv4HeadersToSign :: [(ByteString, ByteString)],
|
||||
sv4Output :: [(ByteString, ByteString)],
|
||||
sv4StringToSign :: ByteString,
|
||||
sv4SigningKey :: ByteString
|
||||
}
|
||||
deriving (Show)
|
||||
data Service = ServiceS3 | ServiceSTS
|
||||
deriving stock (Eq, Show)
|
||||
|
||||
toByteString :: Service -> ByteString
|
||||
toByteString ServiceS3 = "s3"
|
||||
toByteString ServiceSTS = "sts"
|
||||
|
||||
data SignParams = SignParams
|
||||
{ spAccessKey :: Text,
|
||||
spSecretKey :: Text,
|
||||
spSecretKey :: BA.ScrubbedBytes,
|
||||
spSessionToken :: Maybe BA.ScrubbedBytes,
|
||||
spService :: Service,
|
||||
spTimeStamp :: UTCTime,
|
||||
spRegion :: Maybe Text,
|
||||
spExpirySecs :: Maybe Int,
|
||||
spExpirySecs :: Maybe UrlExpiry,
|
||||
spPayloadHash :: Maybe ByteString
|
||||
}
|
||||
deriving (Show)
|
||||
|
||||
debugPrintSignV4Data :: SignV4Data -> IO ()
|
||||
debugPrintSignV4Data (SignV4Data t s cr h2s o sts sk) = do
|
||||
B8.putStrLn "SignV4Data:"
|
||||
B8.putStr "Timestamp: " >> print t
|
||||
B8.putStr "Scope: " >> B8.putStrLn s
|
||||
B8.putStrLn "Canonical Request:"
|
||||
B8.putStrLn cr
|
||||
B8.putStr "Headers to Sign: " >> print h2s
|
||||
B8.putStr "Output: " >> print o
|
||||
B8.putStr "StringToSign: " >> B8.putStrLn sts
|
||||
B8.putStr "SigningKey: " >> printBytes sk
|
||||
B8.putStrLn "END of SignV4Data ========="
|
||||
where
|
||||
printBytes b = do
|
||||
mapM_ (\x -> B.putStr $ B.singleton x <> " ") $ B.unpack b
|
||||
B8.putStrLn ""
|
||||
deriving stock (Show)
|
||||
|
||||
mkAuthHeader :: Text -> ByteString -> ByteString -> ByteString -> H.Header
|
||||
mkAuthHeader accessKey scope signedHeaderKeys sign =
|
||||
@ -104,6 +98,12 @@ mkAuthHeader accessKey scope signedHeaderKeys sign =
|
||||
]
|
||||
in (H.hAuthorization, authValue)
|
||||
|
||||
data IsStreaming = IsStreamingLength Int64 | NotStreaming
|
||||
deriving stock (Eq, Show)
|
||||
|
||||
amzSecurityToken :: ByteString
|
||||
amzSecurityToken = "X-Amz-Security-Token"
|
||||
|
||||
-- | Given SignParams and request details, including request method,
|
||||
-- request path, headers, query params and payload hash, generates an
|
||||
-- updated set of headers, including the x-amz-date header and the
|
||||
@ -116,36 +116,23 @@ mkAuthHeader accessKey scope signedHeaderKeys sign =
|
||||
-- is being created. The expiry is interpreted as an integer number of
|
||||
-- seconds. The output will be the list of query-parameters to add to
|
||||
-- the request.
|
||||
signV4 :: SignParams -> NC.Request -> [(ByteString, ByteString)]
|
||||
signV4 !sp !req =
|
||||
let region = fromMaybe "" $ spRegion sp
|
||||
ts = spTimeStamp sp
|
||||
scope = mkScope ts region
|
||||
accessKey = TE.encodeUtf8 $ spAccessKey sp
|
||||
secretKey = TE.encodeUtf8 $ spSecretKey sp
|
||||
signV4QueryParams :: SignParams -> NC.Request -> SimpleQuery
|
||||
signV4QueryParams !sp !req =
|
||||
let scope = credentialScope sp
|
||||
expiry = spExpirySecs sp
|
||||
sha256Hdr =
|
||||
( "x-amz-content-sha256",
|
||||
fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp
|
||||
)
|
||||
-- headers to be added to the request
|
||||
datePair = ("X-Amz-Date", awsTimeFormatBS ts)
|
||||
computedHeaders =
|
||||
NC.requestHeaders req
|
||||
++ if isJust $ expiry
|
||||
then []
|
||||
else map (\(x, y) -> (mk x, y)) [datePair, sha256Hdr]
|
||||
headersToSign = getHeadersToSign computedHeaders
|
||||
|
||||
headersToSign = getHeadersToSign $ NC.requestHeaders req
|
||||
signedHeaderKeys = B.intercalate ";" $ sort $ map fst headersToSign
|
||||
-- query-parameters to be added before signing for presigned URLs
|
||||
-- (i.e. when `isJust expiry`)
|
||||
authQP =
|
||||
[ ("X-Amz-Algorithm", "AWS4-HMAC-SHA256"),
|
||||
("X-Amz-Credential", B.concat [accessKey, "/", scope]),
|
||||
datePair,
|
||||
("X-Amz-Credential", B.concat [encodeUtf8 $ spAccessKey sp, "/", scope]),
|
||||
("X-Amz-Date", awsTimeFormatBS $ spTimeStamp sp),
|
||||
("X-Amz-Expires", maybe "" showBS expiry),
|
||||
("X-Amz-SignedHeaders", signedHeaderKeys)
|
||||
]
|
||||
++ maybeToList ((amzSecurityToken,) . BA.convert <$> spSessionToken sp)
|
||||
finalQP =
|
||||
parseQuery (NC.queryString req)
|
||||
++ if isJust expiry
|
||||
@ -158,39 +145,129 @@ signV4 !sp !req =
|
||||
sp
|
||||
(NC.setQueryString finalQP req)
|
||||
headersToSign
|
||||
|
||||
-- 2. compute string to sign
|
||||
stringToSign = mkStringToSign ts scope canonicalRequest
|
||||
stringToSign = mkStringToSign (spTimeStamp sp) scope canonicalRequest
|
||||
-- 3.1 compute signing key
|
||||
signingKey = mkSigningKey ts region secretKey
|
||||
signingKey = getSigningKey sp
|
||||
-- 3.2 compute signature
|
||||
signature = computeSignature stringToSign signingKey
|
||||
in ("X-Amz-Signature", signature) : authQP
|
||||
|
||||
-- | Given SignParams and request details, including request method, request
|
||||
-- path, headers, query params and payload hash, generates an updated set of
|
||||
-- headers, including the x-amz-date header and the Authorization header, which
|
||||
-- includes the signature.
|
||||
--
|
||||
-- The output is the list of headers to be added to authenticate the request.
|
||||
signV4 :: SignParams -> NC.Request -> [Header]
|
||||
signV4 !sp !req =
|
||||
let scope = credentialScope sp
|
||||
|
||||
-- extra headers to be added for signing purposes.
|
||||
extraHeaders =
|
||||
("X-Amz-Date", awsTimeFormatBS $ spTimeStamp sp)
|
||||
: ( -- payload hash is only used for S3 (not STS)
|
||||
[ ( "x-amz-content-sha256",
|
||||
fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp
|
||||
)
|
||||
| spService sp == ServiceS3
|
||||
]
|
||||
)
|
||||
++ maybeToList ((mk amzSecurityToken,) . BA.convert <$> spSessionToken sp)
|
||||
|
||||
-- 1. compute canonical request
|
||||
reqHeaders = NC.requestHeaders req ++ extraHeaders
|
||||
(canonicalRequest, signedHeaderKeys) =
|
||||
getCanonicalRequestAndSignedHeaders
|
||||
NotStreaming
|
||||
sp
|
||||
req
|
||||
reqHeaders
|
||||
|
||||
-- 2. compute string to sign
|
||||
stringToSign = mkStringToSign (spTimeStamp sp) scope canonicalRequest
|
||||
-- 3.1 compute signing key
|
||||
signingKey = getSigningKey sp
|
||||
-- 3.2 compute signature
|
||||
signature = computeSignature stringToSign signingKey
|
||||
-- 4. compute auth header
|
||||
authHeader = mkAuthHeader (spAccessKey sp) scope signedHeaderKeys signature
|
||||
-- finally compute output pairs
|
||||
output =
|
||||
if isJust expiry
|
||||
then ("X-Amz-Signature", signature) : authQP
|
||||
else
|
||||
[ (\(x, y) -> (CI.foldedCase x, y)) authHeader,
|
||||
datePair,
|
||||
sha256Hdr
|
||||
]
|
||||
in output
|
||||
in authHeader : extraHeaders
|
||||
|
||||
mkScope :: UTCTime -> Text -> ByteString
|
||||
mkScope ts region =
|
||||
B.intercalate
|
||||
"/"
|
||||
[ TE.encodeUtf8 . T.pack $ Time.formatTime Time.defaultTimeLocale "%Y%m%d" ts,
|
||||
TE.encodeUtf8 region,
|
||||
"s3",
|
||||
"aws4_request"
|
||||
]
|
||||
credentialScope :: SignParams -> ByteString
|
||||
credentialScope sp =
|
||||
let region = fromMaybe "" $ spRegion sp
|
||||
in B.intercalate
|
||||
"/"
|
||||
[ TE.encodeUtf8 $ Time.formatTime Time.defaultTimeLocale "%Y%m%d" $ spTimeStamp sp,
|
||||
TE.encodeUtf8 region,
|
||||
toByteString $ spService sp,
|
||||
"aws4_request"
|
||||
]
|
||||
|
||||
-- Folds header name, trims whitespace in header values, skips ignored headers
|
||||
-- and sorts headers.
|
||||
getHeadersToSign :: [Header] -> [(ByteString, ByteString)]
|
||||
getHeadersToSign !h =
|
||||
filter ((\hdr -> not $ Set.member hdr ignoredHeaders) . fst) $
|
||||
map (\(x, y) -> (CI.foldedCase x, stripBS y)) h
|
||||
map (bimap CI.foldedCase stripBS) h
|
||||
|
||||
-- | Given the list of headers in the request, computes the canonical headers
|
||||
-- and the signed headers strings.
|
||||
getCanonicalHeaders :: NonEmpty Header -> (ByteString, ByteString)
|
||||
getCanonicalHeaders h =
|
||||
let -- Folds header name, trims spaces in header values, skips ignored
|
||||
-- headers and sorts headers by name (we must not re-order multi-valued
|
||||
-- headers).
|
||||
headersToSign =
|
||||
NE.toList $
|
||||
NE.sortBy (\a b -> compare (fst a) (fst b)) $
|
||||
NE.fromList $
|
||||
NE.filter ((\hdr -> not $ Set.member hdr ignoredHeaders) . fst) $
|
||||
NE.map (bimap CI.foldedCase stripBS) h
|
||||
|
||||
canonicalHeaders = mconcat $ map (\(a, b) -> a <> ":" <> b <> "\n") headersToSign
|
||||
signedHeaderKeys = B.intercalate ";" $ map fst headersToSign
|
||||
in (canonicalHeaders, signedHeaderKeys)
|
||||
|
||||
getCanonicalRequestAndSignedHeaders ::
|
||||
IsStreaming ->
|
||||
SignParams ->
|
||||
NC.Request ->
|
||||
[Header] ->
|
||||
(ByteString, ByteString)
|
||||
getCanonicalRequestAndSignedHeaders isStreaming sp req requestHeaders =
|
||||
let httpMethod = NC.method req
|
||||
|
||||
canonicalUri = uriEncode False $ NC.path req
|
||||
|
||||
canonicalQueryString =
|
||||
B.intercalate "&" $
|
||||
map (\(x, y) -> B.concat [x, "=", y]) $
|
||||
sort $
|
||||
map
|
||||
( bimap (uriEncode True) (maybe "" (uriEncode True))
|
||||
)
|
||||
(parseQuery $ NC.queryString req)
|
||||
|
||||
(canonicalHeaders, signedHeaderKeys) = getCanonicalHeaders $ NE.fromList requestHeaders
|
||||
payloadHashStr =
|
||||
case isStreaming of
|
||||
IsStreamingLength _ -> "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
|
||||
NotStreaming -> fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp
|
||||
|
||||
canonicalRequest =
|
||||
B.intercalate
|
||||
"\n"
|
||||
[ httpMethod,
|
||||
canonicalUri,
|
||||
canonicalQueryString,
|
||||
canonicalHeaders,
|
||||
signedHeaderKeys,
|
||||
payloadHashStr
|
||||
]
|
||||
in (canonicalRequest, signedHeaderKeys)
|
||||
|
||||
mkCanonicalRequest ::
|
||||
Bool ->
|
||||
@ -199,15 +276,16 @@ mkCanonicalRequest ::
|
||||
[(ByteString, ByteString)] ->
|
||||
ByteString
|
||||
mkCanonicalRequest !isStreaming !sp !req !headersForSign =
|
||||
let canonicalQueryString =
|
||||
B.intercalate "&"
|
||||
$ map (\(x, y) -> B.concat [x, "=", y])
|
||||
$ sort
|
||||
$ map
|
||||
( \(x, y) ->
|
||||
(uriEncode True x, maybe "" (uriEncode True) y)
|
||||
)
|
||||
$ (parseQuery $ NC.queryString req)
|
||||
let httpMethod = NC.method req
|
||||
canonicalUri = uriEncode False $ NC.path req
|
||||
canonicalQueryString =
|
||||
B.intercalate "&" $
|
||||
map (\(x, y) -> B.concat [x, "=", y]) $
|
||||
sortBy (\a b -> compare (fst a) (fst b)) $
|
||||
map
|
||||
( bimap (uriEncode True) (maybe "" (uriEncode True))
|
||||
)
|
||||
(parseQuery $ NC.queryString req)
|
||||
sortedHeaders = sort headersForSign
|
||||
canonicalHeaders =
|
||||
B.concat $
|
||||
@ -219,8 +297,8 @@ mkCanonicalRequest !isStreaming !sp !req !headersForSign =
|
||||
else fromMaybe "UNSIGNED-PAYLOAD" $ spPayloadHash sp
|
||||
in B.intercalate
|
||||
"\n"
|
||||
[ NC.method req,
|
||||
uriEncode False $ NC.path req,
|
||||
[ httpMethod,
|
||||
canonicalUri,
|
||||
canonicalQueryString,
|
||||
canonicalHeaders,
|
||||
signedHeaders,
|
||||
@ -237,13 +315,13 @@ mkStringToSign ts !scope !canonicalRequest =
|
||||
hashSHA256 canonicalRequest
|
||||
]
|
||||
|
||||
mkSigningKey :: UTCTime -> Text -> ByteString -> ByteString
|
||||
mkSigningKey ts region !secretKey =
|
||||
getSigningKey :: SignParams -> ByteString
|
||||
getSigningKey sp =
|
||||
hmacSHA256RawBS "aws4_request"
|
||||
. hmacSHA256RawBS "s3"
|
||||
. hmacSHA256RawBS (TE.encodeUtf8 region)
|
||||
. hmacSHA256RawBS (awsDateFormatBS ts)
|
||||
$ B.concat ["AWS4", secretKey]
|
||||
. hmacSHA256RawBS (toByteString $ spService sp)
|
||||
. hmacSHA256RawBS (TE.encodeUtf8 $ fromMaybe "" $ spRegion sp)
|
||||
. hmacSHA256RawBS (awsDateFormatBS $ spTimeStamp sp)
|
||||
$ B.concat ["AWS4", BA.convert $ spSecretKey sp]
|
||||
|
||||
computeSignature :: ByteString -> ByteString -> ByteString
|
||||
computeSignature !toSign !key = digestToBase16 $ hmacSHA256 toSign key
|
||||
@ -257,20 +335,20 @@ signV4PostPolicy ::
|
||||
Map.HashMap Text ByteString
|
||||
signV4PostPolicy !postPolicyJSON !sp =
|
||||
let stringToSign = Base64.encode postPolicyJSON
|
||||
region = fromMaybe "" $ spRegion sp
|
||||
signingKey = mkSigningKey (spTimeStamp sp) region $ TE.encodeUtf8 $ spSecretKey sp
|
||||
signingKey = getSigningKey sp
|
||||
signature = computeSignature stringToSign signingKey
|
||||
in Map.fromList
|
||||
in Map.fromList $
|
||||
[ ("x-amz-signature", signature),
|
||||
("policy", stringToSign)
|
||||
]
|
||||
++ maybeToList ((decodeUtf8 amzSecurityToken,) . BA.convert <$> spSessionToken sp)
|
||||
|
||||
chunkSizeConstant :: Int
|
||||
chunkSizeConstant = 64 * 1024
|
||||
|
||||
-- base16Len computes the number of bytes required to represent @n (> 0)@ in
|
||||
-- hexadecimal.
|
||||
base16Len :: Integral a => a -> Int
|
||||
base16Len :: (Integral a) => a -> Int
|
||||
base16Len n
|
||||
| n == 0 = 0
|
||||
| otherwise = 1 + base16Len (n `div` 16)
|
||||
@ -287,60 +365,60 @@ signedStreamLength dataLen =
|
||||
finalChunkSize = 1 + 17 + 64 + 2 + 2
|
||||
in numChunks * fullChunkSize + lastChunkSize + finalChunkSize
|
||||
|
||||
-- For streaming S3, we need to update the content-encoding header.
|
||||
addContentEncoding :: [Header] -> [Header]
|
||||
addContentEncoding hs =
|
||||
-- assume there is at most one content-encoding header.
|
||||
let (ceHdrs, others) = partition ((== hContentEncoding) . fst) hs
|
||||
in maybe
|
||||
(hContentEncoding, "aws-chunked")
|
||||
(\(k, v) -> (k, v <> ",aws-chunked"))
|
||||
(listToMaybe ceHdrs)
|
||||
: others
|
||||
|
||||
signV4Stream ::
|
||||
Int64 ->
|
||||
SignParams ->
|
||||
NC.Request ->
|
||||
(C.ConduitT () ByteString (C.ResourceT IO) () -> NC.Request)
|
||||
-- -> ([Header], C.ConduitT () ByteString (C.ResourceT IO) () -> NC.RequestBody)
|
||||
signV4Stream !payloadLength !sp !req =
|
||||
let ts = spTimeStamp sp
|
||||
addContentEncoding hs =
|
||||
let ceMay = headMay $ filter (\(x, _) -> x == "content-encoding") hs
|
||||
in case ceMay of
|
||||
Nothing -> ("content-encoding", "aws-chunked") : hs
|
||||
Just (_, ce) ->
|
||||
("content-encoding", ce <> ",aws-chunked")
|
||||
: filter (\(x, _) -> x /= "content-encoding") hs
|
||||
-- headers to be added to the request
|
||||
datePair = ("X-Amz-Date", awsTimeFormatBS ts)
|
||||
computedHeaders =
|
||||
addContentEncoding $
|
||||
datePair : NC.requestHeaders req
|
||||
-- headers specific to streaming signature
|
||||
|
||||
-- compute the updated list of headers to be added for signing purposes.
|
||||
signedContentLength = signedStreamLength payloadLength
|
||||
streamingHeaders :: [Header]
|
||||
streamingHeaders =
|
||||
[ ("x-amz-decoded-content-length", showBS payloadLength),
|
||||
extraHeaders =
|
||||
[ ("X-Amz-Date", awsTimeFormatBS $ spTimeStamp sp),
|
||||
("x-amz-decoded-content-length", showBS payloadLength),
|
||||
("content-length", showBS signedContentLength),
|
||||
("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD")
|
||||
]
|
||||
headersToSign = getHeadersToSign $ computedHeaders ++ streamingHeaders
|
||||
signedHeaderKeys = B.intercalate ";" $ sort $ map fst headersToSign
|
||||
finalQP = parseQuery (NC.queryString req)
|
||||
++ maybeToList ((mk amzSecurityToken,) . BA.convert <$> spSessionToken sp)
|
||||
requestHeaders =
|
||||
addContentEncoding $
|
||||
foldr setHeader (NC.requestHeaders req) extraHeaders
|
||||
|
||||
-- 1. Compute Seed Signature
|
||||
-- 1.1 Canonical Request
|
||||
canonicalReq =
|
||||
mkCanonicalRequest
|
||||
True
|
||||
(canonicalReq, signedHeaderKeys) =
|
||||
getCanonicalRequestAndSignedHeaders
|
||||
(IsStreamingLength payloadLength)
|
||||
sp
|
||||
(NC.setQueryString finalQP req)
|
||||
headersToSign
|
||||
region = fromMaybe "" $ spRegion sp
|
||||
scope = mkScope ts region
|
||||
req
|
||||
requestHeaders
|
||||
|
||||
scope = credentialScope sp
|
||||
accessKey = spAccessKey sp
|
||||
secretKey = spSecretKey sp
|
||||
-- 1.2 String toSign
|
||||
stringToSign = mkStringToSign ts scope canonicalReq
|
||||
-- 1.3 Compute signature
|
||||
-- 1.3.1 compute signing key
|
||||
signingKey = mkSigningKey ts region $ TE.encodeUtf8 secretKey
|
||||
signingKey = getSigningKey sp
|
||||
-- 1.3.2 Compute signature
|
||||
seedSignature = computeSignature stringToSign signingKey
|
||||
-- 1.3.3 Compute Auth Header
|
||||
authHeader = mkAuthHeader accessKey scope signedHeaderKeys seedSignature
|
||||
-- 1.4 Updated headers for the request
|
||||
finalReqHeaders = authHeader : (computedHeaders ++ streamingHeaders)
|
||||
finalReqHeaders = authHeader : requestHeaders
|
||||
-- headersToAdd = authHeader : datePair : streamingHeaders
|
||||
|
||||
toHexStr n = B8.pack $ printf "%x" n
|
||||
@ -367,41 +445,42 @@ signV4Stream !payloadLength !sp !req =
|
||||
-- 'chunkSizeConstant'.
|
||||
if
|
||||
| n > 0 -> do
|
||||
bs <- mustTakeN chunkSizeConstant
|
||||
let strToSign = chunkStrToSign prevSign (hashSHA256 bs)
|
||||
nextSign = computeSignature strToSign signingKey
|
||||
chunkBS =
|
||||
toHexStr chunkSizeConstant
|
||||
<> ";chunk-signature="
|
||||
<> nextSign
|
||||
<> "\r\n"
|
||||
<> bs
|
||||
<> "\r\n"
|
||||
C.yield chunkBS
|
||||
signerConduit (n -1) lps nextSign
|
||||
bs <- mustTakeN chunkSizeConstant
|
||||
let strToSign = chunkStrToSign prevSign (hashSHA256 bs)
|
||||
nextSign = computeSignature strToSign signingKey
|
||||
chunkBS =
|
||||
toHexStr chunkSizeConstant
|
||||
<> ";chunk-signature="
|
||||
<> nextSign
|
||||
<> "\r\n"
|
||||
<> bs
|
||||
<> "\r\n"
|
||||
C.yield chunkBS
|
||||
signerConduit (n - 1) lps nextSign
|
||||
|
||||
-- Second case encodes the last chunk which is smaller than
|
||||
-- 'chunkSizeConstant'
|
||||
| lps > 0 -> do
|
||||
bs <- mustTakeN $ fromIntegral lps
|
||||
let strToSign = chunkStrToSign prevSign (hashSHA256 bs)
|
||||
nextSign = computeSignature strToSign signingKey
|
||||
chunkBS =
|
||||
toHexStr lps <> ";chunk-signature="
|
||||
<> nextSign
|
||||
<> "\r\n"
|
||||
<> bs
|
||||
<> "\r\n"
|
||||
C.yield chunkBS
|
||||
signerConduit 0 0 nextSign
|
||||
bs <- mustTakeN $ fromIntegral lps
|
||||
let strToSign = chunkStrToSign prevSign (hashSHA256 bs)
|
||||
nextSign = computeSignature strToSign signingKey
|
||||
chunkBS =
|
||||
toHexStr lps
|
||||
<> ";chunk-signature="
|
||||
<> nextSign
|
||||
<> "\r\n"
|
||||
<> bs
|
||||
<> "\r\n"
|
||||
C.yield chunkBS
|
||||
signerConduit 0 0 nextSign
|
||||
|
||||
-- Last case encodes the final signature chunk that has no
|
||||
-- data.
|
||||
| otherwise -> do
|
||||
let strToSign = chunkStrToSign prevSign (hashSHA256 "")
|
||||
nextSign = computeSignature strToSign signingKey
|
||||
lastChunkBS = "0;chunk-signature=" <> nextSign <> "\r\n\r\n"
|
||||
C.yield lastChunkBS
|
||||
let strToSign = chunkStrToSign prevSign (hashSHA256 "")
|
||||
nextSign = computeSignature strToSign signingKey
|
||||
lastChunkBS = "0;chunk-signature=" <> nextSign <> "\r\n\r\n"
|
||||
C.yield lastChunkBS
|
||||
in \src ->
|
||||
req
|
||||
{ NC.requestHeaders = finalReqHeaders,
|
||||
@ -409,3 +488,9 @@ signV4Stream !payloadLength !sp !req =
|
||||
NC.requestBodySource signedContentLength $
|
||||
src C..| signerConduit numParts lastPSize seedSignature
|
||||
}
|
||||
|
||||
-- "setHeader r hdr" adds the hdr to r, replacing it in r if it already exists.
|
||||
setHeader :: Header -> RequestHeaders -> RequestHeaders
|
||||
setHeader hdr r =
|
||||
let r' = filter (\(name, _) -> name /= fst hdr) r
|
||||
in hdr : r'
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
--
|
||||
-- MinIO Haskell SDK, (C) 2017-2019 MinIO, Inc.
|
||||
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
@ -24,7 +24,6 @@ import qualified Data.ByteString.Lazy as LB
|
||||
import Data.CaseInsensitive (mk, original)
|
||||
import qualified Data.Conduit.Binary as CB
|
||||
import qualified Data.HashMap.Strict as H
|
||||
import qualified Data.List as List
|
||||
import qualified Data.Text as T
|
||||
import Data.Text.Read (decimal)
|
||||
import Data.Time
|
||||
@ -37,14 +36,12 @@ import Network.HTTP.Conduit (Response)
|
||||
import qualified Network.HTTP.Conduit as NC
|
||||
import qualified Network.HTTP.Types as HT
|
||||
import qualified Network.HTTP.Types.Header as Hdr
|
||||
import Network.Minio.Data
|
||||
import Network.Minio.Data.ByteString
|
||||
import Network.Minio.JsonParser (parseErrResponseJSON)
|
||||
import Network.Minio.XmlParser (parseErrResponse)
|
||||
import Network.Minio.XmlCommon (parseErrResponse)
|
||||
import qualified System.IO as IO
|
||||
import qualified UnliftIO as U
|
||||
import qualified UnliftIO.Async as A
|
||||
import qualified UnliftIO.MVar as UM
|
||||
|
||||
allocateReadFile ::
|
||||
(MonadUnliftIO m, R.MonadResource m) =>
|
||||
@ -52,7 +49,7 @@ allocateReadFile ::
|
||||
m (R.ReleaseKey, Handle)
|
||||
allocateReadFile fp = do
|
||||
(rk, hdlE) <- R.allocate (openReadFile fp) cleanup
|
||||
either (\(e :: IOException) -> throwIO e) (return . (rk,)) hdlE
|
||||
either (\(e :: U.IOException) -> throwIO e) (return . (rk,)) hdlE
|
||||
where
|
||||
openReadFile f = U.try $ IO.openBinaryFile f IO.ReadMode
|
||||
cleanup = either (const $ return ()) IO.hClose
|
||||
@ -60,25 +57,25 @@ allocateReadFile fp = do
|
||||
-- | Queries the file size from the handle. Catches any file operation
|
||||
-- exceptions and returns Nothing instead.
|
||||
getFileSize ::
|
||||
(MonadUnliftIO m, R.MonadResource m) =>
|
||||
(MonadUnliftIO m) =>
|
||||
Handle ->
|
||||
m (Maybe Int64)
|
||||
getFileSize h = do
|
||||
resE <- liftIO $ try $ fromIntegral <$> IO.hFileSize h
|
||||
case resE of
|
||||
Left (_ :: IOException) -> return Nothing
|
||||
Left (_ :: U.IOException) -> return Nothing
|
||||
Right s -> return $ Just s
|
||||
|
||||
-- | Queries if handle is seekable. Catches any file operation
|
||||
-- exceptions and return False instead.
|
||||
isHandleSeekable ::
|
||||
(R.MonadResource m, MonadUnliftIO m) =>
|
||||
(R.MonadResource m) =>
|
||||
Handle ->
|
||||
m Bool
|
||||
isHandleSeekable h = do
|
||||
resE <- liftIO $ try $ IO.hIsSeekable h
|
||||
case resE of
|
||||
Left (_ :: IOException) -> return False
|
||||
Left (_ :: U.IOException) -> return False
|
||||
Right v -> return v
|
||||
|
||||
-- | Helper function that opens a handle to the filepath and performs
|
||||
@ -89,7 +86,7 @@ withNewHandle ::
|
||||
(MonadUnliftIO m, R.MonadResource m) =>
|
||||
FilePath ->
|
||||
(Handle -> m a) ->
|
||||
m (Either IOException a)
|
||||
m (Either U.IOException a)
|
||||
withNewHandle fp fileAction = do
|
||||
-- opening a handle can throw MError exception.
|
||||
handleE <- try $ allocateReadFile fp
|
||||
@ -103,17 +100,27 @@ withNewHandle fp fileAction = do
|
||||
return resE
|
||||
|
||||
mkHeaderFromPairs :: [(ByteString, ByteString)] -> [HT.Header]
|
||||
mkHeaderFromPairs = map ((\(x, y) -> (mk x, y)))
|
||||
mkHeaderFromPairs = map (first mk)
|
||||
|
||||
lookupHeader :: HT.HeaderName -> [HT.Header] -> Maybe ByteString
|
||||
lookupHeader hdr = headMay . map snd . filter (\(h, _) -> h == hdr)
|
||||
lookupHeader hdr = listToMaybe . map snd . filter (\(h, _) -> h == hdr)
|
||||
|
||||
getETagHeader :: [HT.Header] -> Maybe Text
|
||||
getETagHeader hs = decodeUtf8Lenient <$> lookupHeader Hdr.hETag hs
|
||||
|
||||
getMetadata :: [HT.Header] -> [(Text, Text)]
|
||||
getMetadata =
|
||||
map ((\(x, y) -> (decodeUtf8Lenient $ original x, decodeUtf8Lenient $ stripBS y)))
|
||||
map (\(x, y) -> (decodeUtf8Lenient $ original x, decodeUtf8Lenient $ stripBS y))
|
||||
|
||||
-- | If the given header name has the @X-Amz-Meta-@ prefix, it is
|
||||
-- stripped and a Just is returned.
|
||||
userMetadataHeaderNameMaybe :: Text -> Maybe Text
|
||||
userMetadataHeaderNameMaybe k =
|
||||
let prefix = T.toCaseFold "X-Amz-Meta-"
|
||||
n = T.length prefix
|
||||
in if T.toCaseFold (T.take n k) == prefix
|
||||
then Just (T.drop n k)
|
||||
else Nothing
|
||||
|
||||
toMaybeMetadataHeader :: (Text, Text) -> Maybe (Text, Text)
|
||||
toMaybeMetadataHeader (k, v) =
|
||||
@ -128,6 +135,14 @@ getNonUserMetadataMap =
|
||||
. fst
|
||||
)
|
||||
|
||||
addXAmzMetaPrefix :: Text -> Text
|
||||
addXAmzMetaPrefix s
|
||||
| isJust (userMetadataHeaderNameMaybe s) = s
|
||||
| otherwise = "X-Amz-Meta-" <> s
|
||||
|
||||
mkHeaderFromMetadata :: [(Text, Text)] -> [HT.Header]
|
||||
mkHeaderFromMetadata = map (\(x, y) -> (mk $ encodeUtf8 $ addXAmzMetaPrefix x, encodeUtf8 y))
|
||||
|
||||
-- | This function collects all headers starting with `x-amz-meta-`
|
||||
-- and strips off this prefix, and returns a map.
|
||||
getUserMetadataMap :: [(Text, Text)] -> H.HashMap Text Text
|
||||
@ -135,6 +150,12 @@ getUserMetadataMap =
|
||||
H.fromList
|
||||
. mapMaybe toMaybeMetadataHeader
|
||||
|
||||
getHostHeader :: (ByteString, Int) -> ByteString
|
||||
getHostHeader (host_, port_) =
|
||||
if port_ == 80 || port_ == 443
|
||||
then host_
|
||||
else host_ <> ":" <> show port_
|
||||
|
||||
getLastModifiedHeader :: [HT.Header] -> Maybe UTCTime
|
||||
getLastModifiedHeader hs = do
|
||||
modTimebs <- decodeUtf8Lenient <$> lookupHeader Hdr.hLastModified hs
|
||||
@ -143,7 +164,7 @@ getLastModifiedHeader hs = do
|
||||
getContentLength :: [HT.Header] -> Maybe Int64
|
||||
getContentLength hs = do
|
||||
nbs <- decodeUtf8Lenient <$> lookupHeader Hdr.hContentLength hs
|
||||
fst <$> hush (decimal nbs)
|
||||
fst <$> either (const Nothing) Just (decimal nbs)
|
||||
|
||||
decodeUtf8Lenient :: ByteString -> Text
|
||||
decodeUtf8Lenient = decodeUtf8With lenientDecode
|
||||
@ -154,7 +175,7 @@ isSuccessStatus sts =
|
||||
in (s >= 200 && s < 300)
|
||||
|
||||
httpLbs ::
|
||||
MonadIO m =>
|
||||
(MonadIO m) =>
|
||||
NC.Request ->
|
||||
NC.Manager ->
|
||||
m (NC.Response LByteString)
|
||||
@ -170,8 +191,9 @@ httpLbs req mgr = do
|
||||
sErr <- parseErrResponseJSON $ NC.responseBody resp
|
||||
throwIO sErr
|
||||
_ ->
|
||||
throwIO $ NC.HttpExceptionRequest req $
|
||||
NC.StatusCodeException (void resp) (showBS resp)
|
||||
throwIO $
|
||||
NC.HttpExceptionRequest req $
|
||||
NC.StatusCodeException (void resp) (showBS resp)
|
||||
|
||||
return resp
|
||||
where
|
||||
@ -199,8 +221,9 @@ http req mgr = do
|
||||
throwIO sErr
|
||||
_ -> do
|
||||
content <- LB.toStrict . NC.responseBody <$> NC.lbsResponse resp
|
||||
throwIO $ NC.HttpExceptionRequest req $
|
||||
NC.StatusCodeException (void resp) content
|
||||
throwIO $
|
||||
NC.HttpExceptionRequest req $
|
||||
NC.StatusCodeException (void resp) content
|
||||
|
||||
return resp
|
||||
where
|
||||
@ -216,7 +239,7 @@ http req mgr = do
|
||||
-- Similar to mapConcurrently but limits the number of threads that
|
||||
-- can run using a quantity semaphore.
|
||||
limitedMapConcurrently ::
|
||||
MonadUnliftIO m =>
|
||||
(MonadUnliftIO m) =>
|
||||
Int ->
|
||||
(t -> m a) ->
|
||||
[t] ->
|
||||
@ -233,7 +256,7 @@ limitedMapConcurrently count act args = do
|
||||
waitSem t = U.atomically $ do
|
||||
v <- U.readTVar t
|
||||
if v > 0
|
||||
then U.writeTVar t (v -1)
|
||||
then U.writeTVar t (v - 1)
|
||||
else U.retrySTM
|
||||
signalSem t = U.atomically $ do
|
||||
v <- U.readTVar t
|
||||
@ -260,42 +283,3 @@ chunkBSConduit (s : ss) = do
|
||||
| B.length bs == s -> C.yield bs >> chunkBSConduit ss
|
||||
| B.length bs > 0 -> C.yield bs
|
||||
| otherwise -> return ()
|
||||
|
||||
-- | Select part sizes - the logic is that the minimum part-size will
|
||||
-- be 64MiB.
|
||||
selectPartSizes :: Int64 -> [(PartNumber, Int64, Int64)]
|
||||
selectPartSizes size =
|
||||
uncurry (List.zip3 [1 ..])
|
||||
$ List.unzip
|
||||
$ loop 0 size
|
||||
where
|
||||
ceil :: Double -> Int64
|
||||
ceil = ceiling
|
||||
partSize =
|
||||
max
|
||||
minPartSize
|
||||
( ceil $
|
||||
fromIntegral size
|
||||
/ fromIntegral maxMultipartParts
|
||||
)
|
||||
m = fromIntegral partSize
|
||||
loop st sz
|
||||
| st > sz = []
|
||||
| st + m >= sz = [(st, sz - st)]
|
||||
| otherwise = (st, m) : loop (st + m) sz
|
||||
|
||||
lookupRegionCache :: Bucket -> Minio (Maybe Region)
|
||||
lookupRegionCache b = do
|
||||
rMVar <- asks mcRegionMap
|
||||
rMap <- UM.readMVar rMVar
|
||||
return $ H.lookup b rMap
|
||||
|
||||
addToRegionCache :: Bucket -> Region -> Minio ()
|
||||
addToRegionCache b region = do
|
||||
rMVar <- asks mcRegionMap
|
||||
UM.modifyMVar_ rMVar $ return . H.insert b region
|
||||
|
||||
deleteFromRegionCache :: Bucket -> Minio ()
|
||||
deleteFromRegionCache b = do
|
||||
rMVar <- asks mcRegionMap
|
||||
UM.modifyMVar_ rMVar $ return . H.delete b
|
||||
|
||||
65
src/Network/Minio/XmlCommon.hs
Normal file
65
src/Network/Minio/XmlCommon.hs
Normal file
@ -0,0 +1,65 @@
|
||||
--
|
||||
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
|
||||
module Network.Minio.XmlCommon where
|
||||
|
||||
import qualified Data.Text as T
|
||||
import Data.Text.Read (decimal)
|
||||
import Data.Time (UTCTime)
|
||||
import Data.Time.Format.ISO8601 (iso8601ParseM)
|
||||
import Lib.Prelude (throwIO)
|
||||
import Network.Minio.Errors
|
||||
import Text.XML (Name (Name), def, parseLBS)
|
||||
import Text.XML.Cursor (Axis, Cursor, content, element, fromDocument, laxElement, ($/), (&/))
|
||||
|
||||
s3Name :: Text -> Text -> Name
|
||||
s3Name ns s = Name s (Just ns) Nothing
|
||||
|
||||
uncurry4 :: (a -> b -> c -> d -> e) -> (a, b, c, d) -> e
|
||||
uncurry4 f (a, b, c, d) = f a b c d
|
||||
|
||||
uncurry6 :: (a -> b -> c -> d -> e -> f -> g) -> (a, b, c, d, e, f) -> g
|
||||
uncurry6 f (a, b, c, d, e, g) = f a b c d e g
|
||||
|
||||
-- | Parse time strings from XML
|
||||
parseS3XMLTime :: (MonadIO m) => Text -> m UTCTime
|
||||
parseS3XMLTime t =
|
||||
maybe (throwIO $ MErrVXmlParse $ "timestamp parse failure: " <> t) return $
|
||||
iso8601ParseM $
|
||||
toString t
|
||||
|
||||
parseDecimal :: (MonadIO m, Integral a) => Text -> m a
|
||||
parseDecimal numStr =
|
||||
either (throwIO . MErrVXmlParse . show) return $
|
||||
fst <$> decimal numStr
|
||||
|
||||
parseDecimals :: (MonadIO m, Integral a) => [Text] -> m [a]
|
||||
parseDecimals numStr = forM numStr parseDecimal
|
||||
|
||||
s3Elem :: Text -> Text -> Axis
|
||||
s3Elem ns = element . s3Name ns
|
||||
|
||||
parseRoot :: (MonadIO m) => LByteString -> m Cursor
|
||||
parseRoot =
|
||||
either (throwIO . MErrVXmlParse . show) (return . fromDocument)
|
||||
. parseLBS def
|
||||
|
||||
parseErrResponse :: (MonadIO m) => LByteString -> m ServiceErr
|
||||
parseErrResponse xmldata = do
|
||||
r <- parseRoot xmldata
|
||||
let code = T.concat $ r $/ laxElement "Code" &/ content
|
||||
message = T.concat $ r $/ laxElement "Message" &/ content
|
||||
return $ toServiceErr code message
|
||||
@ -1,5 +1,5 @@
|
||||
--
|
||||
-- MinIO Haskell SDK, (C) 2017 MinIO, Inc.
|
||||
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
@ -23,10 +23,9 @@ module Network.Minio.XmlGenerator
|
||||
where
|
||||
|
||||
import qualified Data.ByteString.Lazy as LBS
|
||||
import qualified Data.HashMap.Strict as H
|
||||
import qualified Data.Text as T
|
||||
import Lib.Prelude
|
||||
import Network.Minio.Data
|
||||
import Network.Minio.XmlCommon
|
||||
import Text.XML
|
||||
|
||||
-- | Create a bucketConfig request body XML
|
||||
@ -73,12 +72,13 @@ mkCompleteMultipartUploadRequest partInfo =
|
||||
data XNode
|
||||
= XNode Text [XNode]
|
||||
| XLeaf Text Text
|
||||
deriving (Eq, Show)
|
||||
deriving stock (Eq, Show)
|
||||
|
||||
toXML :: Text -> XNode -> ByteString
|
||||
toXML ns node =
|
||||
LBS.toStrict $ renderLBS def $
|
||||
Document (Prologue [] Nothing []) (xmlNode node) []
|
||||
LBS.toStrict $
|
||||
renderLBS def $
|
||||
Document (Prologue [] Nothing []) (xmlNode node) []
|
||||
where
|
||||
xmlNode :: XNode -> Element
|
||||
xmlNode (XNode name nodes) =
|
||||
@ -94,7 +94,7 @@ class ToXNode a where
|
||||
toXNode :: a -> XNode
|
||||
|
||||
instance ToXNode Event where
|
||||
toXNode = XLeaf "Event" . show
|
||||
toXNode = XLeaf "Event" . toText
|
||||
|
||||
instance ToXNode Notification where
|
||||
toXNode (Notification qc tc lc) =
|
||||
@ -104,9 +104,10 @@ instance ToXNode Notification where
|
||||
++ map (toXNodesWithArnName "CloudFunctionConfiguration" "CloudFunction") lc
|
||||
|
||||
toXNodesWithArnName :: Text -> Text -> NotificationConfig -> XNode
|
||||
toXNodesWithArnName eltName arnName (NotificationConfig id arn events fRule) =
|
||||
toXNodesWithArnName eltName arnName (NotificationConfig itemId arn events fRule) =
|
||||
XNode eltName $
|
||||
[XLeaf "Id" id, XLeaf arnName arn] ++ map toXNode events
|
||||
[XLeaf "Id" itemId, XLeaf arnName arn]
|
||||
++ map toXNode events
|
||||
++ [toXNode fRule]
|
||||
|
||||
instance ToXNode Filter where
|
||||
@ -143,14 +144,14 @@ mkSelectRequest r = LBS.toStrict $ renderLBS def sr
|
||||
[NodeContent $ show $ srExpressionType r]
|
||||
),
|
||||
NodeElement
|
||||
( Element "InputSerialization" mempty
|
||||
$ inputSerializationNodes
|
||||
$ srInputSerialization r
|
||||
( Element "InputSerialization" mempty $
|
||||
inputSerializationNodes $
|
||||
srInputSerialization r
|
||||
),
|
||||
NodeElement
|
||||
( Element "OutputSerialization" mempty
|
||||
$ outputSerializationNodes
|
||||
$ srOutputSerialization r
|
||||
( Element "OutputSerialization" mempty $
|
||||
outputSerializationNodes $
|
||||
srOutputSerialization r
|
||||
)
|
||||
]
|
||||
++ maybe [] reqProgElem (srRequestProgressEnabled r)
|
||||
@ -186,11 +187,11 @@ mkSelectRequest r = LBS.toStrict $ renderLBS def sr
|
||||
]
|
||||
comprTypeNode Nothing = []
|
||||
kvElement (k, v) = Element (Name k Nothing Nothing) mempty [NodeContent v]
|
||||
formatNode (InputFormatCSV (CSVProp h)) =
|
||||
formatNode (InputFormatCSV c) =
|
||||
Element
|
||||
"CSV"
|
||||
mempty
|
||||
(map NodeElement $ map kvElement $ H.toList h)
|
||||
(map (NodeElement . kvElement) (csvPropsList c))
|
||||
formatNode (InputFormatJSON p) =
|
||||
Element
|
||||
"JSON"
|
||||
@ -208,17 +209,17 @@ mkSelectRequest r = LBS.toStrict $ renderLBS def sr
|
||||
formatNode InputFormatParquet = Element "Parquet" mempty []
|
||||
outputSerializationNodes (OutputSerializationJSON j) =
|
||||
[ NodeElement
|
||||
( Element "JSON" mempty
|
||||
$ rdElem
|
||||
$ jsonopRecordDelimiter j
|
||||
( Element "JSON" mempty $
|
||||
rdElem $
|
||||
jsonopRecordDelimiter j
|
||||
)
|
||||
]
|
||||
outputSerializationNodes (OutputSerializationCSV (CSVProp h)) =
|
||||
outputSerializationNodes (OutputSerializationCSV c) =
|
||||
[ NodeElement $
|
||||
Element
|
||||
"CSV"
|
||||
mempty
|
||||
(map NodeElement $ map kvElement $ H.toList h)
|
||||
(map (NodeElement . kvElement) (csvPropsList c))
|
||||
]
|
||||
rdElem Nothing = []
|
||||
rdElem (Just t) =
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
--
|
||||
-- MinIO Haskell SDK, (C) 2017 MinIO, Inc.
|
||||
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
@ -32,50 +32,13 @@ where
|
||||
|
||||
import qualified Data.ByteString.Lazy as LB
|
||||
import qualified Data.HashMap.Strict as H
|
||||
import Data.List (zip3, zip4, zip6)
|
||||
import Data.List (zip4, zip6)
|
||||
import qualified Data.Text as T
|
||||
import Data.Text.Read (decimal)
|
||||
import Data.Time
|
||||
import Lib.Prelude
|
||||
import Network.Minio.Data
|
||||
import Network.Minio.Errors
|
||||
import Text.XML
|
||||
import Network.Minio.XmlCommon
|
||||
import Text.XML.Cursor hiding (bool)
|
||||
|
||||
-- | Represent the time format string returned by S3 API calls.
|
||||
s3TimeFormat :: [Char]
|
||||
s3TimeFormat = iso8601DateFormat $ Just "%T%QZ"
|
||||
|
||||
-- | Helper functions.
|
||||
uncurry4 :: (a -> b -> c -> d -> e) -> (a, b, c, d) -> e
|
||||
uncurry4 f (a, b, c, d) = f a b c d
|
||||
|
||||
uncurry6 :: (a -> b -> c -> d -> e -> f -> g) -> (a, b, c, d, e, f) -> g
|
||||
uncurry6 f (a, b, c, d, e, g) = f a b c d e g
|
||||
|
||||
-- | Parse time strings from XML
|
||||
parseS3XMLTime :: MonadIO m => Text -> m UTCTime
|
||||
parseS3XMLTime t =
|
||||
maybe (throwIO $ MErrVXmlParse $ "timestamp parse failure: " <> t) return
|
||||
$ parseTimeM True defaultTimeLocale s3TimeFormat
|
||||
$ T.unpack t
|
||||
|
||||
parseDecimal :: (MonadIO m, Integral a) => Text -> m a
|
||||
parseDecimal numStr =
|
||||
either (throwIO . MErrVXmlParse . show) return $
|
||||
fst <$> decimal numStr
|
||||
|
||||
parseDecimals :: (MonadIO m, Integral a) => [Text] -> m [a]
|
||||
parseDecimals numStr = forM numStr parseDecimal
|
||||
|
||||
s3Elem :: Text -> Text -> Axis
|
||||
s3Elem ns = element . s3Name ns
|
||||
|
||||
parseRoot :: (MonadIO m) => LByteString -> m Cursor
|
||||
parseRoot =
|
||||
either (throwIO . MErrVXmlParse . show) (return . fromDocument)
|
||||
. parseLBS def
|
||||
|
||||
-- | Parse the response XML of a list buckets call.
|
||||
parseListBuckets :: (MonadReader env m, HasSvcNamespace env, MonadIO m) => LByteString -> m [BucketInfo]
|
||||
parseListBuckets xmldata = do
|
||||
@ -132,7 +95,7 @@ parseListObjectsV1Response xmldata = do
|
||||
ns <- asks getSvcNamespace
|
||||
let s3Elem' = s3Elem ns
|
||||
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
|
||||
nextMarker = headMay $ r $/ s3Elem' "NextMarker" &/ content
|
||||
nextMarker = listToMaybe $ r $/ s3Elem' "NextMarker" &/ content
|
||||
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
|
||||
keys = r $/ s3Elem' "Contents" &/ s3Elem' "Key" &/ content
|
||||
modTimeStr = r $/ s3Elem' "Contents" &/ s3Elem' "LastModified" &/ content
|
||||
@ -158,7 +121,7 @@ parseListObjectsResponse xmldata = do
|
||||
ns <- asks getSvcNamespace
|
||||
let s3Elem' = s3Elem ns
|
||||
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
|
||||
nextToken = headMay $ r $/ s3Elem' "NextContinuationToken" &/ content
|
||||
nextToken = listToMaybe $ r $/ s3Elem' "NextContinuationToken" &/ content
|
||||
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
|
||||
keys = r $/ s3Elem' "Contents" &/ s3Elem' "Key" &/ content
|
||||
modTimeStr = r $/ s3Elem' "Contents" &/ s3Elem' "LastModified" &/ content
|
||||
@ -185,8 +148,8 @@ parseListUploadsResponse xmldata = do
|
||||
let s3Elem' = s3Elem ns
|
||||
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
|
||||
prefixes = r $/ s3Elem' "CommonPrefixes" &/ s3Elem' "Prefix" &/ content
|
||||
nextKey = headMay $ r $/ s3Elem' "NextKeyMarker" &/ content
|
||||
nextUpload = headMay $ r $/ s3Elem' "NextUploadIdMarker" &/ content
|
||||
nextKey = listToMaybe $ r $/ s3Elem' "NextKeyMarker" &/ content
|
||||
nextUpload = listToMaybe $ r $/ s3Elem' "NextUploadIdMarker" &/ content
|
||||
uploadKeys = r $/ s3Elem' "Upload" &/ s3Elem' "Key" &/ content
|
||||
uploadIds = r $/ s3Elem' "Upload" &/ s3Elem' "UploadId" &/ content
|
||||
uploadInitTimeStr = r $/ s3Elem' "Upload" &/ s3Elem' "Initiated" &/ content
|
||||
@ -203,7 +166,7 @@ parseListPartsResponse xmldata = do
|
||||
ns <- asks getSvcNamespace
|
||||
let s3Elem' = s3Elem ns
|
||||
hasMore = ["true"] == (r $/ s3Elem' "IsTruncated" &/ content)
|
||||
nextPartNumStr = headMay $ r $/ s3Elem' "NextPartNumberMarker" &/ content
|
||||
nextPartNumStr = listToMaybe $ r $/ s3Elem' "NextPartNumberMarker" &/ content
|
||||
partNumberStr = r $/ s3Elem' "Part" &/ s3Elem' "PartNumber" &/ content
|
||||
partModTimeStr = r $/ s3Elem' "Part" &/ s3Elem' "LastModified" &/ content
|
||||
partETags = r $/ s3Elem' "Part" &/ s3Elem' "ETag" &/ content
|
||||
@ -220,13 +183,6 @@ parseListPartsResponse xmldata = do
|
||||
|
||||
return $ ListPartsResult hasMore (listToMaybe nextPartNum) partInfos
|
||||
|
||||
parseErrResponse :: (MonadIO m) => LByteString -> m ServiceErr
|
||||
parseErrResponse xmldata = do
|
||||
r <- parseRoot xmldata
|
||||
let code = T.concat $ r $/ element "Code" &/ content
|
||||
message = T.concat $ r $/ element "Message" &/ content
|
||||
return $ toServiceErr code message
|
||||
|
||||
parseNotification :: (MonadReader env m, HasSvcNamespace env, MonadIO m) => LByteString -> m Notification
|
||||
parseNotification xmldata = do
|
||||
r <- parseRoot xmldata
|
||||
@ -235,9 +191,10 @@ parseNotification xmldata = do
|
||||
qcfg = map node $ r $/ s3Elem' "QueueConfiguration"
|
||||
tcfg = map node $ r $/ s3Elem' "TopicConfiguration"
|
||||
lcfg = map node $ r $/ s3Elem' "CloudFunctionConfiguration"
|
||||
Notification <$> (mapM (parseNode ns "Queue") qcfg)
|
||||
<*> (mapM (parseNode ns "Topic") tcfg)
|
||||
<*> (mapM (parseNode ns "CloudFunction") lcfg)
|
||||
Notification
|
||||
<$> mapM (parseNode ns "Queue") qcfg
|
||||
<*> mapM (parseNode ns "Topic") tcfg
|
||||
<*> mapM (parseNode ns "CloudFunction") lcfg
|
||||
where
|
||||
getFilterRule ns c =
|
||||
let name = T.concat $ c $/ s3Elem ns "Name" &/ content
|
||||
@ -245,25 +202,29 @@ parseNotification xmldata = do
|
||||
in FilterRule name value
|
||||
parseNode ns arnName nodeData = do
|
||||
let c = fromNode nodeData
|
||||
id = T.concat $ c $/ s3Elem ns "Id" &/ content
|
||||
itemId = T.concat $ c $/ s3Elem ns "Id" &/ content
|
||||
arn = T.concat $ c $/ s3Elem ns arnName &/ content
|
||||
events = catMaybes $ map textToEvent $ c $/ s3Elem ns "Event" &/ content
|
||||
events = mapMaybe textToEvent (c $/ s3Elem ns "Event" &/ content)
|
||||
rules =
|
||||
c $/ s3Elem ns "Filter" &/ s3Elem ns "S3Key"
|
||||
&/ s3Elem ns "FilterRule" &| getFilterRule ns
|
||||
c
|
||||
$/ s3Elem ns "Filter"
|
||||
&/ s3Elem ns "S3Key"
|
||||
&/ s3Elem ns "FilterRule"
|
||||
&| getFilterRule ns
|
||||
return $
|
||||
NotificationConfig
|
||||
id
|
||||
itemId
|
||||
arn
|
||||
events
|
||||
(Filter $ FilterKey $ FilterRules rules)
|
||||
|
||||
parseSelectProgress :: MonadIO m => ByteString -> m Progress
|
||||
parseSelectProgress :: (MonadIO m) => ByteString -> m Progress
|
||||
parseSelectProgress xmldata = do
|
||||
r <- parseRoot $ LB.fromStrict xmldata
|
||||
let bScanned = T.concat $ r $/ element "BytesScanned" &/ content
|
||||
bProcessed = T.concat $ r $/ element "BytesProcessed" &/ content
|
||||
bReturned = T.concat $ r $/ element "BytesReturned" &/ content
|
||||
Progress <$> parseDecimal bScanned
|
||||
Progress
|
||||
<$> parseDecimal bScanned
|
||||
<*> parseDecimal bProcessed
|
||||
<*> parseDecimal bReturned
|
||||
|
||||
@ -15,7 +15,7 @@
|
||||
# resolver:
|
||||
# name: custom-snapshot
|
||||
# location: "./custom-snapshot.yaml"
|
||||
resolver: lts-16.0
|
||||
resolver: lts-19.7
|
||||
|
||||
# User packages to be built.
|
||||
# Various formats can be used as shown in the example below.
|
||||
@ -39,9 +39,7 @@ packages:
|
||||
- '.'
|
||||
# Dependency packages to be pulled from upstream that are not in the resolver
|
||||
# (e.g., acme-missiles-0.3)
|
||||
extra-deps:
|
||||
- unliftio-core-0.2.0.1
|
||||
- protolude-0.3.0
|
||||
extra-deps: []
|
||||
|
||||
# Override default flag values for local packages and extra-deps
|
||||
flags: {}
|
||||
|
||||
@ -3,24 +3,10 @@
|
||||
# For more information, please see the documentation at:
|
||||
# https://docs.haskellstack.org/en/stable/lock_files
|
||||
|
||||
packages:
|
||||
- completed:
|
||||
hackage: unliftio-core-0.2.0.1@sha256:9b3e44ea9aacacbfc35b3b54015af450091916ac3618a41868ebf6546977659a,1082
|
||||
pantry-tree:
|
||||
size: 328
|
||||
sha256: e81c5a1e82ec2cd68cbbbec9cd60567363abe02257fa1370a906f6754b6818b8
|
||||
original:
|
||||
hackage: unliftio-core-0.2.0.1
|
||||
- completed:
|
||||
hackage: protolude-0.3.0@sha256:8361b811b420585b122a7ba715aa5923834db6e8c36309bf267df2dbf66b95ef,2693
|
||||
pantry-tree:
|
||||
size: 1644
|
||||
sha256: babf32b414f25f790b7a4ce6bae5c960bc51a11a289e7c47335b222e6762560c
|
||||
original:
|
||||
hackage: protolude-0.3.0
|
||||
packages: []
|
||||
snapshots:
|
||||
- completed:
|
||||
size: 531237
|
||||
url: https://raw.githubusercontent.com/commercialhaskell/stackage-snapshots/master/lts/16/0.yaml
|
||||
sha256: 210e15b7043e2783115afe16b0d54914b1611cdaa73f3ca3ca7f8e0847ff54e5
|
||||
original: lts-16.0
|
||||
size: 618884
|
||||
url: https://raw.githubusercontent.com/commercialhaskell/stackage-snapshots/master/lts/19/7.yaml
|
||||
sha256: 57d4ce67cc097fea2058446927987bc1f7408890e3a6df0da74e5e318f051c20
|
||||
original: lts-19.7
|
||||
|
||||
@ -1,7 +1,5 @@
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
|
||||
--
|
||||
-- MinIO Haskell SDK, (C) 2017-2019 MinIO, Inc.
|
||||
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
@ -32,13 +30,13 @@ import qualified Network.HTTP.Client.MultipartFormData as Form
|
||||
import qualified Network.HTTP.Conduit as NC
|
||||
import qualified Network.HTTP.Types as HT
|
||||
import Network.Minio
|
||||
import Network.Minio.Credentials (Creds (CredsStatic))
|
||||
import Network.Minio.Data
|
||||
import Network.Minio.Data.Crypto
|
||||
import Network.Minio.PutObject
|
||||
import Network.Minio.S3API
|
||||
import Network.Minio.Utils
|
||||
import System.Directory (getTemporaryDirectory)
|
||||
import System.Environment (lookupEnv)
|
||||
import qualified System.Environment as Env
|
||||
import qualified Test.QuickCheck as Q
|
||||
import Test.Tasty
|
||||
import Test.Tasty.HUnit
|
||||
@ -52,8 +50,8 @@ tests :: TestTree
|
||||
tests = testGroup "Tests" [liveServerUnitTests]
|
||||
|
||||
-- conduit that generates random binary stream of given length
|
||||
randomDataSrc :: MonadIO m => Int64 -> C.ConduitM () ByteString m ()
|
||||
randomDataSrc s' = genBS s'
|
||||
randomDataSrc :: (MonadIO m) => Int64 -> C.ConduitM () ByteString m ()
|
||||
randomDataSrc = genBS
|
||||
where
|
||||
concatIt bs n =
|
||||
BS.concat $
|
||||
@ -70,7 +68,7 @@ randomDataSrc s' = genBS s'
|
||||
yield $ concatIt byteArr64 oneMiB
|
||||
genBS (s - oneMiB)
|
||||
|
||||
mkRandFile :: R.MonadResource m => Int64 -> m FilePath
|
||||
mkRandFile :: (R.MonadResource m) => Int64 -> m FilePath
|
||||
mkRandFile size = do
|
||||
dir <- liftIO getTemporaryDirectory
|
||||
C.runConduit $ randomDataSrc size C..| CB.sinkTempFile dir "miniohstest.random"
|
||||
@ -78,15 +76,35 @@ mkRandFile size = do
|
||||
funTestBucketPrefix :: Text
|
||||
funTestBucketPrefix = "miniohstest-"
|
||||
|
||||
loadTestServer :: IO ConnectInfo
|
||||
loadTestServer = do
|
||||
val <- lookupEnv "MINIO_LOCAL"
|
||||
isSecure <- lookupEnv "MINIO_SECURE"
|
||||
loadTestServerConnInfo :: IO ConnectInfo
|
||||
loadTestServerConnInfo = do
|
||||
val <- Env.lookupEnv "MINIO_LOCAL"
|
||||
isSecure <- Env.lookupEnv "MINIO_SECURE"
|
||||
return $ case (val, isSecure) of
|
||||
(Just _, Just _) -> setCreds (Credentials "minio" "minio123") "https://localhost:9000"
|
||||
(Just _, Nothing) -> setCreds (Credentials "minio" "minio123") "http://localhost:9000"
|
||||
(Just _, Just _) -> setCreds (CredentialValue "minio" "minio123" mempty) "https://localhost:9000"
|
||||
(Just _, Nothing) -> setCreds (CredentialValue "minio" "minio123" mempty) "http://localhost:9000"
|
||||
(Nothing, _) -> minioPlayCI
|
||||
|
||||
loadTestServerConnInfoSTS :: IO ConnectInfo
|
||||
loadTestServerConnInfoSTS = do
|
||||
val <- Env.lookupEnv "MINIO_LOCAL"
|
||||
isSecure <- Env.lookupEnv "MINIO_SECURE"
|
||||
let cv = CredentialValue "minio" "minio123" mempty
|
||||
assumeRole =
|
||||
STSAssumeRole
|
||||
{ sarCredentials = cv,
|
||||
sarOptions = defaultSTSAssumeRoleOptions
|
||||
}
|
||||
case (val, isSecure) of
|
||||
(Just _, Just _) -> setSTSCredential assumeRole "https://localhost:9000"
|
||||
(Just _, Nothing) -> setSTSCredential assumeRole "http://localhost:9000"
|
||||
(Nothing, _) -> do
|
||||
cv' <- case connectCreds minioPlayCI of
|
||||
CredsStatic c -> return c
|
||||
_ -> error "unexpected play creds"
|
||||
let assumeRole' = assumeRole {sarCredentials = cv'}
|
||||
setSTSCredential assumeRole' minioPlayCI
|
||||
|
||||
funTestWithBucket ::
|
||||
TestName ->
|
||||
(([Char] -> Minio ()) -> Bucket -> Minio ()) ->
|
||||
@ -96,7 +114,7 @@ funTestWithBucket t minioTest = testCaseSteps t $ \step -> do
|
||||
bktSuffix <- liftIO $ generate $ Q.vectorOf 10 (Q.choose ('a', 'z'))
|
||||
let b = T.concat [funTestBucketPrefix, T.pack bktSuffix]
|
||||
liftStep = liftIO . step
|
||||
connInfo <- loadTestServer
|
||||
connInfo <- loadTestServerConnInfo
|
||||
ret <- runMinio connInfo $ do
|
||||
liftStep $ "Creating bucket for test - " ++ t
|
||||
foundBucket <- bucketExists b
|
||||
@ -106,6 +124,17 @@ funTestWithBucket t minioTest = testCaseSteps t $ \step -> do
|
||||
deleteBucket b
|
||||
isRight ret @? ("Functional test " ++ t ++ " failed => " ++ show ret)
|
||||
|
||||
connInfoSTS <- loadTestServerConnInfoSTS
|
||||
let t' = t ++ " (with AssumeRole Credentials)"
|
||||
ret' <- runMinio connInfoSTS $ do
|
||||
liftStep $ "Creating bucket for test - " ++ t'
|
||||
foundBucket <- bucketExists b
|
||||
liftIO $ foundBucket @?= False
|
||||
makeBucket b Nothing
|
||||
minioTest liftStep b
|
||||
deleteBucket b
|
||||
isRight ret' @? ("Functional test " ++ t' ++ " failed => " ++ show ret')
|
||||
|
||||
liveServerUnitTests :: TestTree
|
||||
liveServerUnitTests =
|
||||
testGroup
|
||||
@ -126,7 +155,8 @@ liveServerUnitTests =
|
||||
presignedUrlFunTest,
|
||||
presignedPostPolicyFunTest,
|
||||
bucketPolicyFunTest,
|
||||
getNPutSSECTest
|
||||
getNPutSSECTest,
|
||||
assumeRoleRequestTest
|
||||
]
|
||||
|
||||
basicTests :: TestTree
|
||||
@ -134,12 +164,13 @@ basicTests = funTestWithBucket "Basic tests" $
|
||||
\step bucket -> do
|
||||
step "getService works and contains the test bucket."
|
||||
buckets <- getService
|
||||
unless (length (filter (== bucket) $ map biName buckets) == 1)
|
||||
$ liftIO
|
||||
$ assertFailure
|
||||
( "The bucket " ++ show bucket
|
||||
++ " was expected to exist."
|
||||
)
|
||||
unless (length (filter (== bucket) $ map biName buckets) == 1) $
|
||||
liftIO $
|
||||
assertFailure
|
||||
( "The bucket "
|
||||
++ show bucket
|
||||
++ " was expected to exist."
|
||||
)
|
||||
|
||||
step "makeBucket again to check if BucketAlreadyOwnedByYou exception is raised."
|
||||
mbE <- try $ makeBucket bucket Nothing
|
||||
@ -180,7 +211,7 @@ basicTests = funTestWithBucket "Basic tests" $
|
||||
"test-file"
|
||||
outFile
|
||||
defaultGetObjectOptions
|
||||
{ gooIfUnmodifiedSince = (Just unmodifiedTime)
|
||||
{ gooIfUnmodifiedSince = Just unmodifiedTime
|
||||
}
|
||||
case resE of
|
||||
Left exn -> liftIO $ exn @?= ServiceErr "PreconditionFailed" "At least one of the pre-conditions you specified did not hold"
|
||||
@ -194,7 +225,7 @@ basicTests = funTestWithBucket "Basic tests" $
|
||||
"test-file"
|
||||
outFile
|
||||
defaultGetObjectOptions
|
||||
{ gooIfMatch = (Just "invalid-etag")
|
||||
{ gooIfMatch = Just "invalid-etag"
|
||||
}
|
||||
case resE1 of
|
||||
Left exn -> liftIO $ exn @?= ServiceErr "PreconditionFailed" "At least one of the pre-conditions you specified did not hold"
|
||||
@ -208,7 +239,7 @@ basicTests = funTestWithBucket "Basic tests" $
|
||||
"test-file"
|
||||
outFile
|
||||
defaultGetObjectOptions
|
||||
{ gooRange = (Just $ HT.ByteRangeFromTo 100 300)
|
||||
{ gooRange = Just $ HT.ByteRangeFromTo 100 300
|
||||
}
|
||||
case resE2 of
|
||||
Left exn -> liftIO $ exn @?= ServiceErr "InvalidRange" "The requested range is not satisfiable"
|
||||
@ -220,7 +251,7 @@ basicTests = funTestWithBucket "Basic tests" $
|
||||
"test-file"
|
||||
outFile
|
||||
defaultGetObjectOptions
|
||||
{ gooRange = (Just $ HT.ByteRangeFrom 1)
|
||||
{ gooRange = Just $ HT.ByteRangeFrom 1
|
||||
}
|
||||
|
||||
step "fGetObject a non-existent object and check for NoSuchKey exception"
|
||||
@ -231,7 +262,7 @@ basicTests = funTestWithBucket "Basic tests" $
|
||||
|
||||
step "create new multipart upload works"
|
||||
uid <- newMultipartUpload bucket "newmpupload" []
|
||||
liftIO $ (T.length uid > 0) @? ("Got an empty multipartUpload Id.")
|
||||
liftIO $ (T.length uid > 0) @? "Got an empty multipartUpload Id."
|
||||
|
||||
step "abort a new multipart upload works"
|
||||
abortMultipartUpload bucket "newmpupload" uid
|
||||
@ -247,7 +278,7 @@ basicTests = funTestWithBucket "Basic tests" $
|
||||
|
||||
step "get metadata of the object"
|
||||
res <- statObject bucket object defaultGetObjectOptions
|
||||
liftIO $ (oiSize res) @?= 0
|
||||
liftIO $ oiSize res @?= 0
|
||||
|
||||
step "delete object"
|
||||
deleteObject bucket object
|
||||
@ -262,7 +293,7 @@ lowLevelMultipartTest = funTestWithBucket "Low-level Multipart Test" $
|
||||
step "Prepare for low-level multipart tests."
|
||||
step "create new multipart upload"
|
||||
uid <- newMultipartUpload bucket object []
|
||||
liftIO $ (T.length uid > 0) @? ("Got an empty multipartUpload Id.")
|
||||
liftIO $ (T.length uid > 0) @? "Got an empty multipartUpload Id."
|
||||
|
||||
randFile <- mkRandFile mb15
|
||||
|
||||
@ -279,7 +310,8 @@ lowLevelMultipartTest = funTestWithBucket "Low-level Multipart Test" $
|
||||
fGetObject bucket object destFile defaultGetObjectOptions
|
||||
gotSize <- withNewHandle destFile getFileSize
|
||||
liftIO $
|
||||
gotSize == Right (Just mb15)
|
||||
gotSize
|
||||
== Right (Just mb15)
|
||||
@? "Wrong file size of put file after getting"
|
||||
|
||||
step "Cleanup actions"
|
||||
@ -303,7 +335,8 @@ putObjectSizeTest = funTestWithBucket "PutObject of conduit source with size" $
|
||||
fGetObject bucket obj destFile defaultGetObjectOptions
|
||||
gotSize <- withNewHandle destFile getFileSize
|
||||
liftIO $
|
||||
gotSize == Right (Just mb1)
|
||||
gotSize
|
||||
== Right (Just mb1)
|
||||
@? "Wrong file size of put file after getting"
|
||||
|
||||
step "Cleanup actions"
|
||||
@ -327,7 +360,8 @@ putObjectNoSizeTest = funTestWithBucket "PutObject of conduit source with no siz
|
||||
fGetObject bucket obj destFile defaultGetObjectOptions
|
||||
gotSize <- withNewHandle destFile getFileSize
|
||||
liftIO $
|
||||
gotSize == Right (Just mb70)
|
||||
gotSize
|
||||
== Right (Just mb70)
|
||||
@? "Wrong file size of put file after getting"
|
||||
|
||||
step "Cleanup actions"
|
||||
@ -338,22 +372,20 @@ highLevelListingTest = funTestWithBucket "High-level listObjects Test" $
|
||||
\step bucket -> do
|
||||
step "High-level listObjects Test"
|
||||
step "put 3 objects"
|
||||
let expectedObjects = ["dir/o1", "dir/dir1/o2", "dir/dir2/o3", "o4"]
|
||||
extractObjectsFromList os =
|
||||
let extractObjectsFromList =
|
||||
mapM
|
||||
( \t -> case t of
|
||||
( \case
|
||||
ListItemObject o -> Just $ oiObject o
|
||||
_ -> Nothing
|
||||
)
|
||||
os
|
||||
expectedNonRecList = ["o4", "dir/"]
|
||||
extractObjectsAndDirsFromList os =
|
||||
extractObjectsAndDirsFromList =
|
||||
map
|
||||
( \t -> case t of
|
||||
( \case
|
||||
ListItemObject o -> oiObject o
|
||||
ListItemPrefix d -> d
|
||||
)
|
||||
os
|
||||
expectedObjects = ["dir/o1", "dir/dir1/o2", "dir/dir2/o3", "o4"]
|
||||
expectedNonRecList = ["o4", "dir/"]
|
||||
|
||||
testFilepath <- mkRandFile 200
|
||||
forM_ expectedObjects $
|
||||
@ -361,8 +393,9 @@ highLevelListingTest = funTestWithBucket "High-level listObjects Test" $
|
||||
|
||||
step "High-level listing of objects"
|
||||
items <- C.runConduit $ listObjects bucket Nothing False C..| sinkList
|
||||
liftIO $ assertEqual "Objects/Dirs match failed!" expectedNonRecList $
|
||||
extractObjectsAndDirsFromList items
|
||||
liftIO $
|
||||
assertEqual "Objects/Dirs match failed!" expectedNonRecList $
|
||||
extractObjectsAndDirsFromList items
|
||||
|
||||
step "High-level recursive listing of objects"
|
||||
objects <- C.runConduit $ listObjects bucket Nothing True C..| sinkList
|
||||
@ -375,8 +408,9 @@ highLevelListingTest = funTestWithBucket "High-level listObjects Test" $
|
||||
|
||||
step "High-level listing of objects (version 1)"
|
||||
itemsV1 <- C.runConduit $ listObjectsV1 bucket Nothing False C..| sinkList
|
||||
liftIO $ assertEqual "Objects/Dirs match failed!" expectedNonRecList $
|
||||
extractObjectsAndDirsFromList itemsV1
|
||||
liftIO $
|
||||
assertEqual "Objects/Dirs match failed!" expectedNonRecList $
|
||||
extractObjectsAndDirsFromList itemsV1
|
||||
|
||||
step "High-level recursive listing of objects (version 1)"
|
||||
objectsV1 <-
|
||||
@ -433,7 +467,7 @@ highLevelListingTest = funTestWithBucket "High-level listObjects Test" $
|
||||
step "create 10 multipart uploads"
|
||||
forM_ [1 .. 10 :: Int] $ \_ -> do
|
||||
uid <- newMultipartUpload bucket object []
|
||||
liftIO $ (T.length uid > 0) @? ("Got an empty multipartUpload Id.")
|
||||
liftIO $ (T.length uid > 0) @? "Got an empty multipartUpload Id."
|
||||
|
||||
step "High-level listing of incomplete multipart uploads"
|
||||
uploads <-
|
||||
@ -495,7 +529,7 @@ listingTest = funTestWithBucket "Listing Test" $ \step bucket -> do
|
||||
map
|
||||
( T.concat
|
||||
. ("test-file-" :)
|
||||
. (\x -> [x])
|
||||
. (: [])
|
||||
. T.pack
|
||||
. show
|
||||
)
|
||||
@ -514,7 +548,7 @@ listingTest = funTestWithBucket "Listing Test" $ \step bucket -> do
|
||||
let object = "newmpupload"
|
||||
forM_ [1 .. 10 :: Int] $ \_ -> do
|
||||
uid <- newMultipartUpload bucket object []
|
||||
liftIO $ (T.length uid > 0) @? ("Got an empty multipartUpload Id.")
|
||||
liftIO $ (T.length uid > 0) @? "Got an empty multipartUpload Id."
|
||||
|
||||
step "list incomplete multipart uploads"
|
||||
incompleteUploads <-
|
||||
@ -525,7 +559,7 @@ listingTest = funTestWithBucket "Listing Test" $ \step bucket -> do
|
||||
Nothing
|
||||
Nothing
|
||||
Nothing
|
||||
liftIO $ (length $ lurUploads incompleteUploads) @?= 10
|
||||
liftIO $ length (lurUploads incompleteUploads) @?= 10
|
||||
|
||||
step "cleanup"
|
||||
forM_ (lurUploads incompleteUploads) $
|
||||
@ -536,7 +570,7 @@ listingTest = funTestWithBucket "Listing Test" $ \step bucket -> do
|
||||
|
||||
step "create a multipart upload"
|
||||
uid <- newMultipartUpload bucket object []
|
||||
liftIO $ (T.length uid > 0) @? ("Got an empty multipartUpload Id.")
|
||||
liftIO $ (T.length uid > 0) @? "Got an empty multipartUpload Id."
|
||||
|
||||
step "put object parts 1..10"
|
||||
inputFile <- mkRandFile mb5
|
||||
@ -546,7 +580,7 @@ listingTest = funTestWithBucket "Listing Test" $ \step bucket -> do
|
||||
|
||||
step "fetch list parts"
|
||||
listPartsResult <- listIncompleteParts' bucket object uid Nothing Nothing
|
||||
liftIO $ (length $ lprParts listPartsResult) @?= 10
|
||||
liftIO $ length (lprParts listPartsResult) @?= 10
|
||||
abortMultipartUpload bucket object uid
|
||||
|
||||
presignedUrlFunTest :: TestTree
|
||||
@ -569,6 +603,7 @@ presignedUrlFunTest = funTestWithBucket "presigned Url tests" $
|
||||
[]
|
||||
[]
|
||||
|
||||
print putUrl
|
||||
let size1 = 1000 :: Int64
|
||||
inputFile <- mkRandFile size1
|
||||
|
||||
@ -615,7 +650,7 @@ presignedUrlFunTest = funTestWithBucket "presigned Url tests" $
|
||||
headUrl <- presignedHeadObjectUrl bucket obj2 3600 []
|
||||
|
||||
headResp <- do
|
||||
let req = NC.parseRequest_ $ toS $ decodeUtf8 headUrl
|
||||
let req = NC.parseRequest_ $ decodeUtf8 headUrl
|
||||
NC.httpLbs (req {NC.method = HT.methodHead}) mgr
|
||||
liftIO $
|
||||
(NC.responseStatus headResp == HT.status200)
|
||||
@ -643,7 +678,7 @@ presignedUrlFunTest = funTestWithBucket "presigned Url tests" $
|
||||
mapM_ (removeObject bucket) [obj, obj2]
|
||||
where
|
||||
putR size filePath mgr url = do
|
||||
let req = NC.parseRequest_ $ toS $ decodeUtf8 url
|
||||
let req = NC.parseRequest_ $ decodeUtf8 url
|
||||
let req' =
|
||||
req
|
||||
{ NC.method = HT.methodPut,
|
||||
@ -653,14 +688,14 @@ presignedUrlFunTest = funTestWithBucket "presigned Url tests" $
|
||||
}
|
||||
NC.httpLbs req' mgr
|
||||
getR mgr url = do
|
||||
let req = NC.parseRequest_ $ toS $ decodeUtf8 url
|
||||
let req = NC.parseRequest_ $ decodeUtf8 url
|
||||
NC.httpLbs req mgr
|
||||
|
||||
presignedPostPolicyFunTest :: TestTree
|
||||
presignedPostPolicyFunTest = funTestWithBucket "Presigned Post Policy tests" $
|
||||
\step bucket -> do
|
||||
step "presignedPostPolicy basic test"
|
||||
now <- liftIO $ Time.getCurrentTime
|
||||
now <- liftIO Time.getCurrentTime
|
||||
|
||||
let key = "presignedPostPolicyTest/myfile"
|
||||
policyConds =
|
||||
@ -689,9 +724,9 @@ presignedPostPolicyFunTest = funTestWithBucket "Presigned Post Policy tests" $
|
||||
mapM_ (removeObject bucket) [key]
|
||||
where
|
||||
postForm url formData inputFile = do
|
||||
req <- NC.parseRequest $ toS $ decodeUtf8 url
|
||||
req <- NC.parseRequest $ decodeUtf8 url
|
||||
let parts =
|
||||
map (\(x, y) -> Form.partBS x y) $
|
||||
map (uncurry Form.partBS) $
|
||||
H.toList formData
|
||||
parts' = parts ++ [Form.partFile "file" inputFile]
|
||||
req' <- Form.formDataBody parts' req
|
||||
@ -738,17 +773,17 @@ bucketPolicyFunTest = funTestWithBucket "Bucket Policy tests" $
|
||||
[ proto,
|
||||
getHostAddr connInfo,
|
||||
"/",
|
||||
toUtf8 bucket,
|
||||
encodeUtf8 bucket,
|
||||
"/",
|
||||
toUtf8 obj
|
||||
encodeUtf8 obj
|
||||
]
|
||||
respE <-
|
||||
liftIO $
|
||||
(fmap (Right . toStrictBS) $ NC.simpleHttp $ toS $ decodeUtf8 url)
|
||||
fmap (Right . toStrictBS) (NC.simpleHttp $ decodeUtf8 url)
|
||||
`catch` (\(e :: NC.HttpException) -> return $ Left (show e :: Text))
|
||||
case respE of
|
||||
Left err -> liftIO $ assertFailure $ show err
|
||||
Right s -> liftIO $ s @?= (BS.concat $ replicate 100 "c")
|
||||
Right s -> liftIO $ s @?= BS.concat (replicate 100 "c")
|
||||
|
||||
deleteObject bucket obj
|
||||
|
||||
@ -803,7 +838,7 @@ multipartTest = funTestWithBucket "Multipart Tests" $
|
||||
C.runConduit $
|
||||
listIncompleteUploads bucket (Just object) False
|
||||
C..| sinkList
|
||||
liftIO $ (null uploads) @? "removeIncompleteUploads didn't complete successfully"
|
||||
liftIO $ null uploads @? "removeIncompleteUploads didn't complete successfully"
|
||||
|
||||
putObjectContentTypeTest :: TestTree
|
||||
putObjectContentTypeTest = funTestWithBucket "putObject contentType tests" $
|
||||
@ -910,8 +945,9 @@ putObjectUserMetadataTest = funTestWithBucket "putObject user-metadata test" $
|
||||
let m = oiUserMetadata oi
|
||||
-- need to do a case-insensitive comparison
|
||||
sortedMeta =
|
||||
sort $ map (\(k, v) -> (T.toLower k, T.toLower v)) $
|
||||
H.toList m
|
||||
sort $
|
||||
map (bimap T.toLower T.toLower) $
|
||||
H.toList m
|
||||
ref = sort [("mykey1", "myval1"), ("mykey2", "myval2")]
|
||||
|
||||
liftIO $ (sortedMeta == ref) @? "Metadata mismatch!"
|
||||
@ -944,8 +980,9 @@ getObjectTest = funTestWithBucket "getObject test" $
|
||||
let m = oiUserMetadata $ gorObjectInfo gor
|
||||
-- need to do a case-insensitive comparison
|
||||
sortedMeta =
|
||||
sort $ map (\(k, v) -> (T.toLower k, T.toLower v)) $
|
||||
H.toList m
|
||||
sort $
|
||||
map (bimap T.toLower T.toLower) $
|
||||
H.toList m
|
||||
ref = sort [("mykey1", "myval1"), ("mykey2", "myval2")]
|
||||
|
||||
liftIO $ (sortedMeta == ref) @? "Metadata mismatch!"
|
||||
@ -1073,7 +1110,7 @@ copyObjectTests = funTestWithBucket "copyObject related tests" $
|
||||
copyObjectPart
|
||||
dstInfo'
|
||||
srcInfo'
|
||||
{ srcRange = Just $ (,) ((p -1) * mb5) ((p -1) * mb5 + (mb5 - 1))
|
||||
{ srcRange = Just $ (,) ((p - 1) * mb5) ((p - 1) * mb5 + (mb5 - 1))
|
||||
}
|
||||
uid
|
||||
(fromIntegral p)
|
||||
@ -1174,9 +1211,37 @@ getNPutSSECTest =
|
||||
|
||||
gotSize <- withNewHandle dstFile getFileSize
|
||||
liftIO $
|
||||
gotSize == Right (Just mb1)
|
||||
gotSize
|
||||
== Right (Just mb1)
|
||||
@? "Wrong file size of object when getting"
|
||||
|
||||
step "Cleanup"
|
||||
deleteObject bucket obj
|
||||
else step "Skipping encryption test as server is not using TLS"
|
||||
|
||||
assumeRoleRequestTest :: TestTree
|
||||
assumeRoleRequestTest = testCaseSteps "Assume Role STS API" $ \step -> do
|
||||
step "Load credentials"
|
||||
val <- Env.lookupEnv "MINIO_LOCAL"
|
||||
isSecure <- Env.lookupEnv "MINIO_SECURE"
|
||||
let localMinioCred = Just $ CredentialValue "minio" "minio123" mempty
|
||||
playCreds =
|
||||
case connectCreds minioPlayCI of
|
||||
CredsStatic c -> Just c
|
||||
_ -> Nothing
|
||||
(cvMay, loc) =
|
||||
case (val, isSecure) of
|
||||
(Just _, Just _) -> (localMinioCred, "https://localhost:9000")
|
||||
(Just _, Nothing) -> (localMinioCred, "http://localhost:9000")
|
||||
(Nothing, _) -> (playCreds, "https://play.min.io:9000")
|
||||
cv <- maybe (assertFailure "bad creds") return cvMay
|
||||
let assumeRole =
|
||||
STSAssumeRole cv $
|
||||
defaultSTSAssumeRoleOptions
|
||||
{ saroLocation = Just "us-east-1",
|
||||
saroEndpoint = Just loc
|
||||
}
|
||||
step "AssumeRole request"
|
||||
res <- requestSTSCredential assumeRole
|
||||
let v = credentialValueText $ fst res
|
||||
print (v, snd res)
|
||||
|
||||
@ -24,7 +24,6 @@ module Network.Minio.API.Test
|
||||
where
|
||||
|
||||
import Data.Aeson (eitherDecode)
|
||||
import Lib.Prelude
|
||||
import Network.Minio.API
|
||||
import Network.Minio.AdminAPI
|
||||
import Test.Tasty
|
||||
@ -63,8 +62,9 @@ parseServerInfoJSONTest =
|
||||
testGroup "Parse MinIO Admin API ServerInfo JSON test" $
|
||||
map
|
||||
( \(tName, tDesc, tfn, tVal) ->
|
||||
testCase tName $ assertBool tDesc $
|
||||
tfn (eitherDecode tVal :: Either [Char] [ServerInfo])
|
||||
testCase tName $
|
||||
assertBool tDesc $
|
||||
tfn (eitherDecode tVal :: Either [Char] [ServerInfo])
|
||||
)
|
||||
testCases
|
||||
where
|
||||
@ -82,8 +82,9 @@ parseHealStatusTest =
|
||||
testGroup "Parse MinIO Admin API HealStatus JSON test" $
|
||||
map
|
||||
( \(tName, tDesc, tfn, tVal) ->
|
||||
testCase tName $ assertBool tDesc $
|
||||
tfn (eitherDecode tVal :: Either [Char] HealStatus)
|
||||
testCase tName $
|
||||
assertBool tDesc $
|
||||
tfn (eitherDecode tVal :: Either [Char] HealStatus)
|
||||
)
|
||||
testCases
|
||||
where
|
||||
@ -101,8 +102,9 @@ parseHealStartRespTest =
|
||||
testGroup "Parse MinIO Admin API HealStartResp JSON test" $
|
||||
map
|
||||
( \(tName, tDesc, tfn, tVal) ->
|
||||
testCase tName $ assertBool tDesc $
|
||||
tfn (eitherDecode tVal :: Either [Char] HealStartResp)
|
||||
testCase tName $
|
||||
assertBool tDesc $
|
||||
tfn (eitherDecode tVal :: Either [Char] HealStartResp)
|
||||
)
|
||||
testCases
|
||||
where
|
||||
|
||||
@ -34,7 +34,7 @@ jsonParserTests =
|
||||
]
|
||||
|
||||
tryValidationErr :: (MonadUnliftIO m) => m a -> m (Either MErrV a)
|
||||
tryValidationErr act = try act
|
||||
tryValidationErr = try
|
||||
|
||||
assertValidationErr :: MErrV -> Assertion
|
||||
assertValidationErr e = assertFailure $ "Failed due to validation error => " ++ show e
|
||||
@ -43,9 +43,9 @@ testParseErrResponseJSON :: Assertion
|
||||
testParseErrResponseJSON = do
|
||||
-- 1. Test parsing of an invalid error json.
|
||||
parseResE <- tryValidationErr $ parseErrResponseJSON "ClearlyInvalidJSON"
|
||||
when (isRight parseResE)
|
||||
$ assertFailure
|
||||
$ "Parsing should have failed => " ++ show parseResE
|
||||
when (isRight parseResE) $
|
||||
assertFailure $
|
||||
"Parsing should have failed => " ++ show parseResE
|
||||
|
||||
forM_ cases $ \(jsondata, sErr) -> do
|
||||
parseErr <- tryValidationErr $ parseErrResponseJSON jsondata
|
||||
|
||||
@ -19,7 +19,6 @@ module Network.Minio.TestHelpers
|
||||
)
|
||||
where
|
||||
|
||||
import Lib.Prelude
|
||||
import Network.Minio.Data
|
||||
|
||||
newtype TestNS = TestNS {testNamespace :: Text}
|
||||
|
||||
@ -19,7 +19,6 @@ module Network.Minio.Utils.Test
|
||||
)
|
||||
where
|
||||
|
||||
import Lib.Prelude
|
||||
import Network.Minio.Utils
|
||||
import Test.Tasty
|
||||
import Test.Tasty.HUnit
|
||||
|
||||
@ -20,6 +20,7 @@ module Network.Minio.XmlGenerator.Test
|
||||
)
|
||||
where
|
||||
|
||||
import qualified Data.ByteString.Lazy as LBS
|
||||
import Lib.Prelude
|
||||
import Network.Minio.Data
|
||||
import Network.Minio.TestHelpers
|
||||
@ -28,6 +29,7 @@ import Network.Minio.XmlParser (parseNotification)
|
||||
import Test.Tasty
|
||||
import Test.Tasty.HUnit
|
||||
import Text.RawString.QQ (r)
|
||||
import Text.XML (def, parseLBS)
|
||||
|
||||
xmlGeneratorTests :: TestTree
|
||||
xmlGeneratorTests =
|
||||
@ -90,11 +92,12 @@ testMkPutNotificationRequest =
|
||||
"1"
|
||||
"arn:aws:sqs:us-west-2:444455556666:s3notificationqueue"
|
||||
[ObjectCreatedPut]
|
||||
( Filter $ FilterKey $
|
||||
FilterRules
|
||||
[ FilterRule "prefix" "images/",
|
||||
FilterRule "suffix" ".jpg"
|
||||
]
|
||||
( Filter $
|
||||
FilterKey $
|
||||
FilterRules
|
||||
[ FilterRule "prefix" "images/",
|
||||
FilterRule "suffix" ".jpg"
|
||||
]
|
||||
),
|
||||
NotificationConfig
|
||||
""
|
||||
@ -119,7 +122,13 @@ testMkPutNotificationRequest =
|
||||
testMkSelectRequest :: Assertion
|
||||
testMkSelectRequest = mapM_ assertFn cases
|
||||
where
|
||||
assertFn (a, b) = assertEqual "selectRequest XML should match: " b $ mkSelectRequest a
|
||||
assertFn (a, b) =
|
||||
let generatedReqDoc = parseLBS def $ LBS.fromStrict $ mkSelectRequest a
|
||||
expectedReqDoc = parseLBS def $ LBS.fromStrict b
|
||||
in case (generatedReqDoc, expectedReqDoc) of
|
||||
(Right genDoc, Right expDoc) -> assertEqual "selectRequest XML should match: " expDoc genDoc
|
||||
(Left err, _) -> assertFailure $ "Generated selectRequest failed to parse as XML" ++ show err
|
||||
(_, Left err) -> assertFailure $ "Expected selectRequest failed to parse as XML" ++ show err
|
||||
cases =
|
||||
[ ( SelectRequest
|
||||
"Select * from S3Object"
|
||||
@ -142,32 +151,32 @@ testMkSelectRequest = mapM_ assertFn cases
|
||||
<> quoteEscapeCharacter "\""
|
||||
)
|
||||
(Just False),
|
||||
[r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>GZIP</CompressionType><CSV><QuoteCharacter>"</QuoteCharacter><RecordDelimiter>
|
||||
</RecordDelimiter><FileHeaderInfo>IGNORE</FileHeaderInfo><QuoteEscapeCharacter>"</QuoteEscapeCharacter><FieldDelimiter>,</FieldDelimiter></CSV></InputSerialization><OutputSerialization><CSV><QuoteCharacter>"</QuoteCharacter><QuoteFields>ASNEEDED</QuoteFields><RecordDelimiter>
|
||||
</RecordDelimiter><QuoteEscapeCharacter>"</QuoteEscapeCharacter><FieldDelimiter>,</FieldDelimiter></CSV></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
|
||||
[r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>GZIP</CompressionType><CSV><FieldDelimiter>,</FieldDelimiter><FileHeaderInfo>IGNORE</FileHeaderInfo><QuoteCharacter>"</QuoteCharacter><QuoteEscapeCharacter>"</QuoteEscapeCharacter><RecordDelimiter>
|
||||
</RecordDelimiter></CSV></InputSerialization><OutputSerialization><CSV><FieldDelimiter>,</FieldDelimiter><QuoteCharacter>"</QuoteCharacter><QuoteEscapeCharacter>"</QuoteEscapeCharacter><QuoteFields>ASNEEDED</QuoteFields><RecordDelimiter>
|
||||
</RecordDelimiter></CSV></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
|
||||
),
|
||||
( setRequestProgressEnabled False
|
||||
$ setInputCompressionType CompressionTypeGzip
|
||||
$ selectRequest
|
||||
"Select * from S3Object"
|
||||
documentJsonInput
|
||||
(outputJSONFromRecordDelimiter "\n"),
|
||||
( setRequestProgressEnabled False $
|
||||
setInputCompressionType CompressionTypeGzip $
|
||||
selectRequest
|
||||
"Select * from S3Object"
|
||||
documentJsonInput
|
||||
(outputJSONFromRecordDelimiter "\n"),
|
||||
[r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>GZIP</CompressionType><JSON><Type>DOCUMENT</Type></JSON></InputSerialization><OutputSerialization><JSON><RecordDelimiter>
|
||||
</RecordDelimiter></JSON></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
|
||||
),
|
||||
( setRequestProgressEnabled False
|
||||
$ setInputCompressionType CompressionTypeNone
|
||||
$ selectRequest
|
||||
"Select * from S3Object"
|
||||
defaultParquetInput
|
||||
( outputCSVFromProps $
|
||||
quoteFields QuoteFieldsAsNeeded
|
||||
<> recordDelimiter "\n"
|
||||
<> fieldDelimiter ","
|
||||
<> quoteCharacter "\""
|
||||
<> quoteEscapeCharacter "\""
|
||||
),
|
||||
[r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>NONE</CompressionType><Parquet/></InputSerialization><OutputSerialization><CSV><QuoteCharacter>"</QuoteCharacter><QuoteFields>ASNEEDED</QuoteFields><RecordDelimiter>
|
||||
</RecordDelimiter><QuoteEscapeCharacter>"</QuoteEscapeCharacter><FieldDelimiter>,</FieldDelimiter></CSV></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
|
||||
( setRequestProgressEnabled False $
|
||||
setInputCompressionType CompressionTypeNone $
|
||||
selectRequest
|
||||
"Select * from S3Object"
|
||||
defaultParquetInput
|
||||
( outputCSVFromProps $
|
||||
quoteFields QuoteFieldsAsNeeded
|
||||
<> recordDelimiter "\n"
|
||||
<> fieldDelimiter ","
|
||||
<> quoteCharacter "\""
|
||||
<> quoteEscapeCharacter "\""
|
||||
),
|
||||
[r|<?xml version="1.0" encoding="UTF-8"?><SelectRequest><Expression>Select * from S3Object</Expression><ExpressionType>SQL</ExpressionType><InputSerialization><CompressionType>NONE</CompressionType><Parquet/></InputSerialization><OutputSerialization><CSV><FieldDelimiter>,</FieldDelimiter><QuoteCharacter>"</QuoteCharacter><QuoteEscapeCharacter>"</QuoteEscapeCharacter><QuoteFields>ASNEEDED</QuoteFields><RecordDelimiter>
|
||||
</RecordDelimiter></CSV></OutputSerialization><RequestProgress><Enabled>FALSE</Enabled></RequestProgress></SelectRequest>|]
|
||||
)
|
||||
]
|
||||
|
||||
@ -49,7 +49,7 @@ xmlParserTests =
|
||||
]
|
||||
|
||||
tryValidationErr :: (MonadUnliftIO m) => m a -> m (Either MErrV a)
|
||||
tryValidationErr act = try act
|
||||
tryValidationErr = try
|
||||
|
||||
assertValidtionErr :: MErrV -> Assertion
|
||||
assertValidtionErr e = assertFailure $ "Failed due to validation error => " ++ show e
|
||||
@ -62,9 +62,9 @@ testParseLocation :: Assertion
|
||||
testParseLocation = do
|
||||
-- 1. Test parsing of an invalid location constraint xml.
|
||||
parseResE <- tryValidationErr $ parseLocation "ClearlyInvalidXml"
|
||||
when (isRight parseResE)
|
||||
$ assertFailure
|
||||
$ "Parsing should have failed => " ++ show parseResE
|
||||
when (isRight parseResE) $
|
||||
assertFailure $
|
||||
"Parsing should have failed => " ++ show parseResE
|
||||
|
||||
forM_ cases $ \(xmldata, expectedLocation) -> do
|
||||
parseLocE <- tryValidationErr $ parseLocation xmldata
|
||||
@ -344,11 +344,12 @@ testParseNotification = do
|
||||
"1"
|
||||
"arn:aws:sqs:us-west-2:444455556666:s3notificationqueue"
|
||||
[ObjectCreatedPut]
|
||||
( Filter $ FilterKey $
|
||||
FilterRules
|
||||
[ FilterRule "prefix" "images/",
|
||||
FilterRule "suffix" ".jpg"
|
||||
]
|
||||
( Filter $
|
||||
FilterKey $
|
||||
FilterRules
|
||||
[ FilterRule "prefix" "images/",
|
||||
FilterRule "suffix" ".jpg"
|
||||
]
|
||||
),
|
||||
NotificationConfig
|
||||
""
|
||||
|
||||
34
test/Spec.hs
34
test/Spec.hs
@ -1,5 +1,5 @@
|
||||
--
|
||||
-- MinIO Haskell SDK, (C) 2017, 2018 MinIO, Inc.
|
||||
-- MinIO Haskell SDK, (C) 2017-2023 MinIO, Inc.
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
@ -20,7 +20,6 @@ import Lib.Prelude
|
||||
import Network.Minio.API.Test
|
||||
import Network.Minio.CopyObject
|
||||
import Network.Minio.Data
|
||||
import Network.Minio.PutObject
|
||||
import Network.Minio.Utils.Test
|
||||
import Network.Minio.XmlGenerator.Test
|
||||
import Network.Minio.XmlParser.Test
|
||||
@ -55,31 +54,33 @@ qcProps =
|
||||
\n ->
|
||||
let (pns, offs, sizes) = L.unzip3 (selectPartSizes n)
|
||||
-- check that pns increments from 1.
|
||||
isPNumsAscendingFrom1 = all (\(a, b) -> a == b) $ zip pns [1 ..]
|
||||
isPNumsAscendingFrom1 = all (uncurry (==)) $ zip pns [1 ..]
|
||||
consPairs [] = []
|
||||
consPairs [_] = []
|
||||
consPairs (a : (b : c)) = (a, b) : (consPairs (b : c))
|
||||
consPairs (a : (b : c)) = (a, b) : consPairs (b : c)
|
||||
-- check `offs` is monotonically increasing.
|
||||
isOffsetsAsc = all (\(a, b) -> a < b) $ consPairs offs
|
||||
isOffsetsAsc = all (uncurry (<)) $ consPairs offs
|
||||
-- check sizes sums to n.
|
||||
isSumSizeOk = sum sizes == n
|
||||
-- check sizes are constant except last
|
||||
isSizesConstantExceptLast =
|
||||
all (\(a, b) -> a == b) (consPairs $ L.init sizes)
|
||||
all (uncurry (==)) (consPairs $ L.init sizes)
|
||||
-- check each part except last is at least minPartSize;
|
||||
-- last part may be 0 only if it is the only part.
|
||||
nparts = length sizes
|
||||
isMinPartSizeOk =
|
||||
if
|
||||
| nparts > 1 -> -- last part can be smaller but > 0
|
||||
all (>= minPartSize) (take (nparts - 1) sizes)
|
||||
&& all (\s -> s > 0) (drop (nparts - 1) sizes)
|
||||
all (>= minPartSize) (take (nparts - 1) sizes)
|
||||
&& all (> 0) (drop (nparts - 1) sizes)
|
||||
| nparts == 1 -> -- size may be 0 here.
|
||||
maybe True (\x -> x >= 0 && x <= minPartSize) $
|
||||
headMay sizes
|
||||
maybe True (\x -> x >= 0 && x <= minPartSize) $
|
||||
listToMaybe sizes
|
||||
| otherwise -> False
|
||||
in n < 0
|
||||
|| ( isPNumsAscendingFrom1 && isOffsetsAsc && isSumSizeOk
|
||||
|| ( isPNumsAscendingFrom1
|
||||
&& isOffsetsAsc
|
||||
&& isSumSizeOk
|
||||
&& isSizesConstantExceptLast
|
||||
&& isMinPartSizeOk
|
||||
),
|
||||
@ -89,23 +90,24 @@ qcProps =
|
||||
-- is last part's snd offset end?
|
||||
isLastPartOk = maybe False ((end ==) . snd) $ lastMay pairs
|
||||
-- is first part's fst offset start
|
||||
isFirstPartOk = maybe False ((start ==) . fst) $ headMay pairs
|
||||
isFirstPartOk = maybe False ((start ==) . fst) $ listToMaybe pairs
|
||||
-- each pair is >=64MiB except last, and all those parts
|
||||
-- have same size.
|
||||
initSizes = maybe [] (map (\(a, b) -> b - a + 1)) $ initMay pairs
|
||||
initSizes = maybe [] (map (\(a, b) -> b - a + 1) . init) (nonEmpty pairs)
|
||||
isPartSizesOk =
|
||||
all (>= minPartSize) initSizes
|
||||
&& maybe
|
||||
True
|
||||
(\k -> all (== k) initSizes)
|
||||
(headMay initSizes)
|
||||
(listToMaybe initSizes)
|
||||
-- returned offsets are contiguous.
|
||||
fsts = drop 1 $ map fst pairs
|
||||
snds = take (length pairs - 1) $ map snd pairs
|
||||
isContParts =
|
||||
length fsts == length snds
|
||||
&& and (map (\(a, b) -> a == b + 1) $ zip fsts snds)
|
||||
in start < 0 || start > end
|
||||
&& all (\(a, b) -> a == b + 1) (zip fsts snds)
|
||||
in start < 0
|
||||
|| start > end
|
||||
|| (isLastPartOk && isFirstPartOk && isPartSizesOk && isContParts),
|
||||
QC.testProperty "mkSSECKey:" $
|
||||
\w8s ->
|
||||
|
||||
Loading…
Reference in New Issue
Block a user