Merge pull request #207 from fpco/master

Update prod from master
This commit is contained in:
Michael Snoyman 2016-10-16 10:38:59 +03:00 committed by GitHub
commit 89f8650151
5 changed files with 87 additions and 112 deletions

View File

@ -1,3 +1,10 @@
# This is the simple Travis configuration, which is intended for use
# on applications which do not require cross-platform and
# multiple-GHC-version support. For more information and other
# options, see:
#
# https://docs.haskellstack.org/en/stable/travis_ci/
#
# Copy these contents into the root directory of your Github project in a file
# named .travis.yml
@ -10,100 +17,26 @@ language: c
# Caching so the next build will be fast too.
cache:
directories:
- $HOME/.ghc
- $HOME/.cabal
- $HOME/.stack
# The different configurations we want to test. We have BUILD=cabal which uses
# cabal-install, and BUILD=stack which uses Stack. More documentation on each
# of those below.
#
# We set the compiler values here to tell Travis to use a different
# cache file per set of arguments.
#
# If you need to have different apt packages for each combination in the
# matrix, you can use a line such as:
# addons: {apt: {packages: [libfcgi-dev,libgmp-dev]}}
matrix:
include:
# The Stack builds. We can pass in arbitrary Stack arguments via the ARGS
# variable, such as using --stack-yaml to point to a different file.
- env: BUILD=stack ARGS=""
compiler: ": #stack default"
addons: {apt: {packages: [ghc-7.10.3], sources: [hvr-ghc]}}
# Ensure necessary system libraries are present
addons:
apt:
packages:
- libgmp-dev
before_install:
# Using compiler above sets CC to an invalid value, so unset it
- unset CC
# We want to always allow newer versions of packages when building on GHC HEAD
- CABALARGS=""
- if [ "x$GHCVER" = "xhead" ]; then CABALARGS=--allow-newer; fi
# Download and unpack the stack executable
- export PATH=/opt/ghc/$GHCVER/bin:/opt/cabal/$CABALVER/bin:$HOME/.local/bin:/opt/alex/$ALEXVER/bin:/opt/happy/$HAPPYVER/bin:$HOME/.cabal/bin:$PATH
- mkdir -p ~/.local/bin
- |
if [ `uname` = "Darwin" ]
then
travis_retry curl --insecure -L https://www.stackage.org/stack/osx-x86_64 | tar xz --strip-components=1 --include '*/stack' -C ~/.local/bin
else
travis_retry curl -L https://www.stackage.org/stack/linux-x86_64 | tar xz --wildcards --strip-components=1 -C ~/.local/bin '*/stack'
fi
# Use the more reliable S3 mirror of Hackage
mkdir -p $HOME/.cabal
echo 'remote-repo: hackage.haskell.org:http://hackage.fpcomplete.com/' > $HOME/.cabal/config
echo 'remote-repo-cache: $HOME/.cabal/packages' >> $HOME/.cabal/config
if [ "$CABALVER" != "1.16" ]
then
echo 'jobs: $ncpus' >> $HOME/.cabal/config
fi
# Get the list of packages from the stack.yaml file
- PACKAGES=$(stack --install-ghc query locals | grep '^ *path' | sed 's@^ *path:@@')
- export PATH=$HOME/.local/bin:$PATH
- travis_retry curl -L https://www.stackage.org/stack/linux-x86_64 | tar xz --wildcards --strip-components=1 -C ~/.local/bin '*/stack'
install:
- echo "$(ghc --version) [$(ghc --print-project-git-commit-id 2> /dev/null || echo '?')]"
- if [ -f configure.ac ]; then autoreconf -i; fi
- |
set -ex
case "$BUILD" in
stack)
# Avoid an out-of-memory condition
stack --no-terminal --install-ghc $ARGS build haskell-src-exts
stack --no-terminal --install-ghc $ARGS test --bench --only-dependencies
;;
cabal)
cabal --version
travis_retry cabal update
cabal install --only-dependencies --enable-tests --enable-benchmarks --force-reinstalls --ghc-options=-O0 --reorder-goals --max-backjumps=-1 $CABALARGS $PACKAGES
;;
esac
set +ex
# Build dependencies. Start with just haskell-src-exts since it requires a lot
# of memory and we want it to build by itself.
- stack --no-terminal --install-ghc build haskell-src-exts
- stack --no-terminal --install-ghc test --only-dependencies
script:
- |
set -ex
case "$BUILD" in
stack)
stack --no-terminal $ARGS test --bench --no-run-benchmarks --haddock --no-haddock-deps
;;
cabal)
cabal install --enable-tests --enable-benchmarks --force-reinstalls --ghc-options=-O0 --reorder-goals --max-backjumps=-1 $CABALARGS $PACKAGES
ORIGDIR=$(pwd)
for dir in $PACKAGES
do
cd $dir
cabal check || [ "$CABALVER" == "1.16" ]
cabal sdist
SRC_TGZ=$(cabal info . | awk '{print $2;exit}').tar.gz && \
(cd dist && cabal install --force-reinstalls "$SRC_TGZ")
cd $ORIGDIR
done
;;
esac
set +ex
# Build the package, its tests, and its docs and run the tests
- stack --no-terminal test --haddock --no-haddock-deps

View File

@ -33,7 +33,7 @@ import Yesod.Default.Config2
import Yesod.Default.Handlers
import Yesod.GitRepo
import System.Process (rawSystem)
import Stackage.Database.Cron (loadFromS3)
import Stackage.Database.Cron (loadFromS3, newHoogleLocker)
import Control.AutoUpdate
-- Import all relevant handler modules here.
@ -136,6 +136,7 @@ makeFoundation appSettings = do
appHoogleLock <- newMVar ()
appMirrorStatus <- mkUpdateMirrorStatus
appHoogleLocker <- newHoogleLocker
return App {..}

View File

@ -11,6 +11,7 @@ import Yesod.Core.Types (Logger)
import Yesod.AtomFeed
import Yesod.GitRepo
import Stackage.Database
import Stackage.Database.Cron (HoogleLocker)
import qualified Yesod.Core.Unsafe as Unsafe
-- | The site argument for your application. This can be a good place to
@ -30,6 +31,7 @@ data App = App
-- ^ Avoid concurrent Hoogle queries, see
-- https://github.com/fpco/stackage-server/issues/172
, appMirrorStatus :: IO (Status, WidgetT App IO ())
, appHoogleLocker :: HoogleLocker
}
instance HasHttpManager App where

View File

@ -15,7 +15,7 @@ import qualified Data.Text as T
getHoogleDB :: SnapName -> Handler (Maybe FilePath)
getHoogleDB name = track "Handler.Hoogle.getHoogleDB" $ do
app <- getYesod
liftIO $ Cron.getHoogleDB True (appHttpManager app) name
liftIO $ Cron.getHoogleDB (appHoogleLocker app) True (appHttpManager app) name
getHoogleR :: SnapName -> Handler Html
getHoogleR name = track "Handler.Hoogle.getHoogleR" $ do

View File

@ -2,6 +2,8 @@ module Stackage.Database.Cron
( stackageServerCron
, loadFromS3
, getHoogleDB
, HoogleLocker
, newHoogleLocker
) where
import ClassyPrelude.Conduit
@ -23,7 +25,7 @@ import Network.AWS (Credentials (Discover),
import Control.Monad.Trans.AWS (trying, _Error)
import Network.AWS.Data.Body (toBody)
import Network.AWS.S3 (ObjectCannedACL (OPublicRead),
poACL, putObject,
poACL, poContentType, putObject,
BucketName(BucketName),
ObjectKey(ObjectKey))
import Control.Lens (set, view)
@ -122,31 +124,65 @@ hoogleUrl n = concat
, hoogleKey n
]
getHoogleDB :: Bool -- ^ print exceptions?
newtype HoogleLocker = HoogleLocker (TVar (Map FilePath (MVar ())))
newHoogleLocker :: IO HoogleLocker
newHoogleLocker = HoogleLocker <$> newTVarIO mempty
data Finished a = Finished a | TryAgain
getHoogleDB :: HoogleLocker
-> Bool -- ^ print exceptions?
-> Manager -> SnapName -> IO (Maybe FilePath)
getHoogleDB toPrint man name = do
getHoogleDB (HoogleLocker locker) toPrint man name = do
let fp = fromText $ hoogleKey name
fptmp = encodeString fp <.> "tmp"
exists <- isFile fp
if exists
then return $ Just (encodeString fp)
else do
req' <- parseUrl $ unpack $ hoogleUrl name
let req = req'
{ checkStatus = \_ _ _ -> Nothing
, decompress = const False
}
withResponse req man $ \res -> if responseStatus res == status200
then do
createTree $ parent (fromString fptmp)
runResourceT $ bodyReaderSource (responseBody res)
$= ungzip
$$ sinkFile fptmp
rename (fromString fptmp) fp
return $ Just $ encodeString fp
baton <- newMVar ()
let go :: IO (Finished (Maybe FilePath))
go = withMVar baton $ \() -> bracket acquire fst snd
acquire :: IO (IO (), IO (Finished (Maybe FilePath)))
acquire = atomically $ do
m <- readTVar locker
case lookup (encodeString fp) m of
Just baton' -> return (return (), readMVar baton' $> TryAgain)
Nothing -> do
modifyTVar locker $ insertMap (encodeString fp) baton
let cleanup = modifyTVar locker $ deleteMap (encodeString fp)
return (atomically $ cleanup, Finished <$> inner)
inner = do
exists <- isFile fp
if exists
then return $ Just (encodeString fp)
else do
when toPrint $ mapM brRead res >>= print
return Nothing
req' <- parseUrl $ unpack $ hoogleUrl name
let req = req'
{ checkStatus = \_ _ _ -> Nothing
, decompress = const False
}
withResponse req man $ \res -> if responseStatus res == status200
then do
createTree $ parent (fromString fptmp)
runResourceT $ bodyReaderSource (responseBody res)
$= ungzip
$$ sinkFile fptmp
rename (fromString fptmp) fp
return $ Just $ encodeString fp
else do
when toPrint $ mapM brRead res >>= print
return Nothing
loop :: IO (Maybe FilePath)
loop = do
mres <- go
case mres of
TryAgain -> loop
Finished res -> return res
loop
stackageServerCron :: IO ()
stackageServerCron = do
@ -154,6 +190,8 @@ stackageServerCron = do
void $ catchIO (bindPortTCP 17834 "127.0.0.1") $ \_ ->
error $ "cabal loader process already running, exiting"
locker <- newHoogleLocker
env <- newEnv NorthVirginia Discover
let upload :: FilePath -> ObjectKey -> IO ()
upload fp key = do
@ -182,6 +220,7 @@ stackageServerCron = do
let key = ObjectKey "snapshots.json"
po =
set poACL (Just OPublicRead)
$ set poContentType (Just "application/json")
$ putObject (BucketName "haddock.stackage.org") key (toBody snapshots)
putStrLn $ "Uploading: " ++ tshow key
eres <- runResourceT $ runAWS env $ trying _Error $ send po
@ -192,7 +231,7 @@ stackageServerCron = do
names <- runReaderT last5Lts5Nightly db
let manager = view envManager env
forM_ names $ \name -> do
mfp <- getHoogleDB False manager name
mfp <- getHoogleDB locker False manager name
case mfp of
Just _ -> putStrLn $ "Hoogle database exists for: " ++ toPathPiece name
Nothing -> do