2014-07-12 22:57:22 -07:00
|
|
|
{-# LANGUAGE
|
2014-07-11 12:45:34 +01:00
|
|
|
FlexibleContexts
|
|
|
|
, GeneralizedNewtypeDeriving
|
|
|
|
, TypeSynonymInstances
|
2014-07-26 17:34:11 +01:00
|
|
|
, MultiParamTypeClasses
|
2016-11-29 00:36:36 -05:00
|
|
|
, FlexibleInstances
|
|
|
|
, IncoherentInstances #-}
|
|
|
|
|
2010-07-04 13:43:45 -07:00
|
|
|
{-
|
2016-03-22 17:20:39 -07:00
|
|
|
Copyright (C) 2006-2016 John MacFarlane <jgm@berkeley.edu>
|
2010-07-04 13:43:45 -07:00
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation; either version 2 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program; if not, write to the Free Software
|
|
|
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
|
|
-}
|
|
|
|
|
|
|
|
{- |
|
|
|
|
Module : Text.Pandoc.Parsing
|
2016-03-22 17:20:39 -07:00
|
|
|
Copyright : Copyright (C) 2006-2016 John MacFarlane
|
2012-07-26 22:32:53 -07:00
|
|
|
License : GNU GPL, version 2 or above
|
2010-07-04 13:43:45 -07:00
|
|
|
|
|
|
|
Maintainer : John MacFarlane <jgm@berkeley.edu>
|
|
|
|
Stability : alpha
|
|
|
|
Portability : portable
|
|
|
|
|
|
|
|
A utility library with parsers used in pandoc readers.
|
|
|
|
-}
|
2014-07-11 12:51:26 +01:00
|
|
|
module Text.Pandoc.Parsing ( anyLine,
|
2010-07-04 13:43:45 -07:00
|
|
|
many1Till,
|
|
|
|
notFollowedBy',
|
|
|
|
oneOfStrings,
|
2013-01-15 11:47:35 -08:00
|
|
|
oneOfStringsCI,
|
2010-07-04 13:43:45 -07:00
|
|
|
spaceChar,
|
2011-07-30 18:08:02 -07:00
|
|
|
nonspaceChar,
|
2010-07-04 13:43:45 -07:00
|
|
|
skipSpaces,
|
|
|
|
blankline,
|
|
|
|
blanklines,
|
|
|
|
enclosed,
|
|
|
|
stringAnyCase,
|
|
|
|
parseFromString,
|
|
|
|
lineClump,
|
|
|
|
charsInBalanced,
|
|
|
|
romanNumeral,
|
|
|
|
emailAddress,
|
|
|
|
uri,
|
2013-12-06 16:43:59 -08:00
|
|
|
mathInline,
|
|
|
|
mathDisplay,
|
2010-07-04 13:43:45 -07:00
|
|
|
withHorizDisplacement,
|
2012-01-29 23:54:00 -08:00
|
|
|
withRaw,
|
2010-07-04 13:43:45 -07:00
|
|
|
escaped,
|
2012-02-05 22:52:00 -08:00
|
|
|
characterReference,
|
2010-07-04 13:43:45 -07:00
|
|
|
anyOrderedListMarker,
|
|
|
|
orderedListMarker,
|
|
|
|
charRef,
|
2013-01-13 11:39:32 -08:00
|
|
|
lineBlockLines,
|
2010-07-05 23:43:07 -07:00
|
|
|
tableWith,
|
Major rewrite of markdown reader.
* Use Builder's Inlines/Blocks instead of lists.
* Return values in the reader monad, which are then
run (at the end of parsing) against the final
parser state. This allows links, notes, and
example numbers to be resolved without a second
parser pass.
* An effect of using Builder is that everything is
normalized automatically.
* New exports from Text.Pandoc.Parsing:
widthsFromIndices, NoteTable', KeyTable', Key', toKey',
withQuoteContext, singleQuoteStart, singleQuoteEnd, doubleQuoteStart,
doubleQuoteEnd, ellipses, apostrophe, dash
* Updated opendocument tests.
* Don't derive Show for ParserState.
* Benchmarks: markdown reader takes 82% of the time it took before.
Markdown writer takes 92% of the time (here the speedup is probably
due to the fact that everything is normalized by default).
2012-07-27 21:04:02 -07:00
|
|
|
widthsFromIndices,
|
2010-07-05 23:43:07 -07:00
|
|
|
gridTableWith,
|
2010-07-04 13:43:45 -07:00
|
|
|
readWith,
|
2014-07-20 17:04:18 +01:00
|
|
|
readWithM,
|
2010-07-04 13:43:45 -07:00
|
|
|
testStringWith,
|
2012-07-26 19:10:56 -07:00
|
|
|
guardEnabled,
|
|
|
|
guardDisabled,
|
2014-05-14 14:45:37 +02:00
|
|
|
updateLastStrPos,
|
|
|
|
notAfterString,
|
2017-02-17 12:56:07 +01:00
|
|
|
logMessage,
|
|
|
|
reportLogMessages,
|
2012-07-26 19:10:56 -07:00
|
|
|
ParserState (..),
|
2013-11-17 08:45:21 -08:00
|
|
|
HasReaderOptions (..),
|
|
|
|
HasHeaderMap (..),
|
|
|
|
HasIdentifierList (..),
|
2014-03-25 14:55:18 -07:00
|
|
|
HasMacros (..),
|
2017-02-17 12:56:07 +01:00
|
|
|
HasLogMessages (..),
|
2014-05-14 14:45:37 +02:00
|
|
|
HasLastStrPosition (..),
|
2010-07-04 13:43:45 -07:00
|
|
|
defaultParserState,
|
|
|
|
HeaderType (..),
|
|
|
|
ParserContext (..),
|
|
|
|
QuoteContext (..),
|
2014-07-26 17:34:11 +01:00
|
|
|
HasQuoteContext (..),
|
2010-07-04 13:43:45 -07:00
|
|
|
NoteTable,
|
Major rewrite of markdown reader.
* Use Builder's Inlines/Blocks instead of lists.
* Return values in the reader monad, which are then
run (at the end of parsing) against the final
parser state. This allows links, notes, and
example numbers to be resolved without a second
parser pass.
* An effect of using Builder is that everything is
normalized automatically.
* New exports from Text.Pandoc.Parsing:
widthsFromIndices, NoteTable', KeyTable', Key', toKey',
withQuoteContext, singleQuoteStart, singleQuoteEnd, doubleQuoteStart,
doubleQuoteEnd, ellipses, apostrophe, dash
* Updated opendocument tests.
* Don't derive Show for ParserState.
* Benchmarks: markdown reader takes 82% of the time it took before.
Markdown writer takes 92% of the time (here the speedup is probably
due to the fact that everything is normalized by default).
2012-07-27 21:04:02 -07:00
|
|
|
NoteTable',
|
2010-07-04 13:43:45 -07:00
|
|
|
KeyTable,
|
2012-09-27 15:20:29 -07:00
|
|
|
SubstTable,
|
2012-08-01 22:40:07 -07:00
|
|
|
Key (..),
|
2010-12-05 19:27:00 -08:00
|
|
|
toKey,
|
2013-09-01 08:54:10 -07:00
|
|
|
registerHeader,
|
2011-01-04 19:12:33 -08:00
|
|
|
smartPunctuation,
|
Major rewrite of markdown reader.
* Use Builder's Inlines/Blocks instead of lists.
* Return values in the reader monad, which are then
run (at the end of parsing) against the final
parser state. This allows links, notes, and
example numbers to be resolved without a second
parser pass.
* An effect of using Builder is that everything is
normalized automatically.
* New exports from Text.Pandoc.Parsing:
widthsFromIndices, NoteTable', KeyTable', Key', toKey',
withQuoteContext, singleQuoteStart, singleQuoteEnd, doubleQuoteStart,
doubleQuoteEnd, ellipses, apostrophe, dash
* Updated opendocument tests.
* Don't derive Show for ParserState.
* Benchmarks: markdown reader takes 82% of the time it took before.
Markdown writer takes 92% of the time (here the speedup is probably
due to the fact that everything is normalized by default).
2012-07-27 21:04:02 -07:00
|
|
|
singleQuoteStart,
|
|
|
|
singleQuoteEnd,
|
|
|
|
doubleQuoteStart,
|
|
|
|
doubleQuoteEnd,
|
|
|
|
ellipses,
|
|
|
|
apostrophe,
|
|
|
|
dash,
|
2012-09-10 14:35:21 -07:00
|
|
|
nested,
|
2014-05-14 14:58:05 +02:00
|
|
|
citeKey,
|
2011-01-04 19:12:33 -08:00
|
|
|
macro,
|
2012-07-20 14:41:44 -07:00
|
|
|
applyMacros',
|
2012-07-20 15:54:57 -07:00
|
|
|
Parser,
|
2014-07-12 22:57:22 -07:00
|
|
|
ParserT,
|
2017-04-27 21:48:32 +02:00
|
|
|
F,
|
|
|
|
Future(..),
|
2015-04-18 18:34:55 -07:00
|
|
|
runF,
|
|
|
|
askF,
|
|
|
|
asksF,
|
2017-04-27 21:48:32 +02:00
|
|
|
returnF,
|
|
|
|
trimInlinesF,
|
2014-07-26 17:34:11 +01:00
|
|
|
token,
|
2017-02-07 21:42:35 +01:00
|
|
|
(<+?>),
|
|
|
|
extractIdClass,
|
2017-02-07 22:33:05 +01:00
|
|
|
insertIncludedFile,
|
2012-07-26 19:10:56 -07:00
|
|
|
-- * Re-exports from Text.Pandoc.Parsec
|
2014-07-22 15:24:07 +01:00
|
|
|
Stream,
|
2012-07-20 14:41:44 -07:00
|
|
|
runParser,
|
2014-07-22 15:24:07 +01:00
|
|
|
runParserT,
|
2012-07-20 14:41:44 -07:00
|
|
|
parse,
|
|
|
|
anyToken,
|
|
|
|
getInput,
|
|
|
|
setInput,
|
|
|
|
unexpected,
|
|
|
|
char,
|
|
|
|
letter,
|
|
|
|
digit,
|
|
|
|
alphaNum,
|
|
|
|
skipMany,
|
|
|
|
skipMany1,
|
|
|
|
spaces,
|
|
|
|
space,
|
|
|
|
anyChar,
|
|
|
|
satisfy,
|
|
|
|
newline,
|
|
|
|
string,
|
|
|
|
count,
|
|
|
|
eof,
|
|
|
|
noneOf,
|
|
|
|
oneOf,
|
|
|
|
lookAhead,
|
|
|
|
notFollowedBy,
|
|
|
|
many,
|
|
|
|
many1,
|
|
|
|
manyTill,
|
|
|
|
(<|>),
|
|
|
|
(<?>),
|
|
|
|
choice,
|
|
|
|
try,
|
|
|
|
sepBy,
|
2012-07-22 22:09:15 -07:00
|
|
|
sepBy1,
|
2012-07-20 14:41:44 -07:00
|
|
|
sepEndBy,
|
2012-07-22 22:09:15 -07:00
|
|
|
sepEndBy1,
|
|
|
|
endBy,
|
2012-07-20 14:41:44 -07:00
|
|
|
endBy1,
|
|
|
|
option,
|
|
|
|
optional,
|
|
|
|
optionMaybe,
|
|
|
|
getState,
|
|
|
|
setState,
|
|
|
|
updateState,
|
2013-01-04 12:01:09 -08:00
|
|
|
SourcePos,
|
2012-07-20 14:41:44 -07:00
|
|
|
getPosition,
|
|
|
|
setPosition,
|
|
|
|
sourceColumn,
|
|
|
|
sourceLine,
|
2013-07-02 09:23:43 -07:00
|
|
|
setSourceColumn,
|
|
|
|
setSourceLine,
|
2017-02-07 21:42:35 +01:00
|
|
|
newPos
|
2012-07-20 14:41:44 -07:00
|
|
|
)
|
2010-07-04 13:43:45 -07:00
|
|
|
where
|
|
|
|
|
|
|
|
import Text.Pandoc.Definition
|
2012-07-25 10:45:45 -07:00
|
|
|
import Text.Pandoc.Options
|
2017-04-27 21:48:32 +02:00
|
|
|
import Text.Pandoc.Builder (Blocks, Inlines, rawBlock, HasMeta(..), trimInlines)
|
2013-09-01 08:54:10 -07:00
|
|
|
import qualified Text.Pandoc.Builder as B
|
2013-02-15 22:39:49 -08:00
|
|
|
import Text.Pandoc.XML (fromEntities)
|
2012-09-23 22:53:34 -07:00
|
|
|
import qualified Text.Pandoc.UTF8 as UTF8 (putStrLn)
|
2014-07-26 17:34:11 +01:00
|
|
|
import Text.Parsec hiding (token)
|
2012-07-20 14:41:44 -07:00
|
|
|
import Text.Parsec.Pos (newPos)
|
2014-07-12 22:59:35 -07:00
|
|
|
import Data.Char ( toLower, toUpper, ord, chr, isAscii, isAlphaNum,
|
2016-10-23 23:12:36 +02:00
|
|
|
isHexDigit, isSpace, isPunctuation )
|
2015-07-23 15:34:27 -07:00
|
|
|
import Data.List ( intercalate, transpose, isSuffixOf )
|
2010-07-05 14:34:48 -07:00
|
|
|
import Text.Pandoc.Shared
|
2010-07-04 13:43:45 -07:00
|
|
|
import qualified Data.Map as M
|
2017-03-10 10:12:51 +01:00
|
|
|
import Text.TeXMath.Readers.TeX.Macros (applyMacros, Macro, pMacroDefinition)
|
2016-09-02 11:35:28 -04:00
|
|
|
import Text.HTML.TagSoup.Entity ( lookupEntity )
|
2013-09-01 08:54:10 -07:00
|
|
|
import Text.Pandoc.Asciify (toAsciiChar)
|
2016-08-30 13:43:50 -04:00
|
|
|
import Data.Monoid ((<>))
|
2017-02-10 23:59:47 +01:00
|
|
|
import Text.Pandoc.Class (PandocMonad, readFileFromDirs, report)
|
|
|
|
import Text.Pandoc.Logging
|
2012-07-19 12:38:54 -07:00
|
|
|
import Data.Default
|
2012-07-26 19:10:56 -07:00
|
|
|
import qualified Data.Set as Set
|
Major rewrite of markdown reader.
* Use Builder's Inlines/Blocks instead of lists.
* Return values in the reader monad, which are then
run (at the end of parsing) against the final
parser state. This allows links, notes, and
example numbers to be resolved without a second
parser pass.
* An effect of using Builder is that everything is
normalized automatically.
* New exports from Text.Pandoc.Parsing:
widthsFromIndices, NoteTable', KeyTable', Key', toKey',
withQuoteContext, singleQuoteStart, singleQuoteEnd, doubleQuoteStart,
doubleQuoteEnd, ellipses, apostrophe, dash
* Updated opendocument tests.
* Don't derive Show for ParserState.
* Benchmarks: markdown reader takes 82% of the time it took before.
Markdown writer takes 92% of the time (here the speedup is probably
due to the fact that everything is normalized by default).
2012-07-27 21:04:02 -07:00
|
|
|
import Control.Monad.Reader
|
2014-07-11 12:45:34 +01:00
|
|
|
import Control.Monad.Identity
|
2013-09-01 08:54:10 -07:00
|
|
|
import Data.Maybe (catMaybes)
|
2010-07-04 13:43:45 -07:00
|
|
|
|
2015-02-18 12:55:04 +00:00
|
|
|
import Text.Pandoc.Error
|
2017-02-07 22:33:05 +01:00
|
|
|
import Control.Monad.Except
|
2015-02-18 12:55:04 +00:00
|
|
|
|
2012-07-20 15:54:57 -07:00
|
|
|
type Parser t s = Parsec t s
|
|
|
|
|
2014-07-11 12:45:34 +01:00
|
|
|
type ParserT = ParsecT
|
|
|
|
|
2017-04-27 21:48:32 +02:00
|
|
|
-- | Reader monad wrapping the parser state. This is used to possibly delay
|
|
|
|
-- evaluation until all relevant information has been parsed and made available
|
|
|
|
-- in the parser state.
|
|
|
|
newtype Future s a = Future { runDelayed :: Reader s a }
|
|
|
|
deriving (Monad, Applicative, Functor)
|
2015-04-18 18:34:55 -07:00
|
|
|
|
2017-04-27 21:48:32 +02:00
|
|
|
type F = Future ParserState
|
2015-04-18 18:34:55 -07:00
|
|
|
|
2017-04-27 21:48:32 +02:00
|
|
|
runF :: Future s a -> s -> a
|
|
|
|
runF = runReader . runDelayed
|
2015-04-18 18:34:55 -07:00
|
|
|
|
2017-04-27 21:48:32 +02:00
|
|
|
askF :: Future s s
|
|
|
|
askF = Future ask
|
2015-04-18 18:34:55 -07:00
|
|
|
|
2017-04-27 21:48:32 +02:00
|
|
|
asksF :: (s -> a) -> Future s a
|
|
|
|
asksF f = Future $ asks f
|
|
|
|
|
|
|
|
returnF :: Monad m => a -> m (Future s a)
|
|
|
|
returnF = return . return
|
|
|
|
|
|
|
|
trimInlinesF :: Future s Inlines -> Future s Inlines
|
|
|
|
trimInlinesF = liftM trimInlines
|
|
|
|
|
|
|
|
instance Monoid a => Monoid (Future s a) where
|
2015-04-18 18:34:55 -07:00
|
|
|
mempty = return mempty
|
|
|
|
mappend = liftM2 mappend
|
|
|
|
mconcat = liftM mconcat . sequence
|
|
|
|
|
2010-07-04 13:43:45 -07:00
|
|
|
-- | Parse any line of text
|
2014-07-11 12:45:34 +01:00
|
|
|
anyLine :: Stream [Char] m Char => ParserT [Char] st m [Char]
|
2013-01-25 15:32:10 -08:00
|
|
|
anyLine = do
|
|
|
|
-- This is much faster than:
|
|
|
|
-- manyTill anyChar newline
|
|
|
|
inp <- getInput
|
2013-01-25 17:53:50 -08:00
|
|
|
pos <- getPosition
|
2013-01-25 18:32:06 -08:00
|
|
|
case break (=='\n') inp of
|
|
|
|
(this, '\n':rest) -> do
|
|
|
|
-- needed to persuade parsec that this won't match an empty string:
|
|
|
|
anyChar
|
|
|
|
setInput rest
|
|
|
|
setPosition $ incSourceLine (setSourceColumn pos 1) 1
|
|
|
|
return this
|
|
|
|
_ -> mzero
|
2010-07-04 13:43:45 -07:00
|
|
|
|
|
|
|
-- | Like @manyTill@, but reads at least one item.
|
2014-07-12 22:57:22 -07:00
|
|
|
many1Till :: Stream s m t
|
2014-07-11 12:45:34 +01:00
|
|
|
=> ParserT s st m a
|
|
|
|
-> ParserT s st m end
|
|
|
|
-> ParserT s st m [a]
|
2010-07-04 13:43:45 -07:00
|
|
|
many1Till p end = do
|
|
|
|
first <- p
|
|
|
|
rest <- manyTill p end
|
|
|
|
return (first:rest)
|
|
|
|
|
2012-07-26 22:32:53 -07:00
|
|
|
-- | A more general form of @notFollowedBy@. This one allows any
|
2010-07-04 13:43:45 -07:00
|
|
|
-- type of parser to be specified, and succeeds only if that parser fails.
|
|
|
|
-- It does not consume any input.
|
2014-07-11 12:45:34 +01:00
|
|
|
notFollowedBy' :: (Show b, Stream s m a) => ParserT s st m b -> ParserT s st m ()
|
2010-07-04 13:43:45 -07:00
|
|
|
notFollowedBy' p = try $ join $ do a <- try p
|
|
|
|
return (unexpected (show a))
|
|
|
|
<|>
|
|
|
|
return (return ())
|
|
|
|
-- (This version due to Andrew Pimlott on the Haskell mailing list.)
|
|
|
|
|
2014-07-11 12:45:34 +01:00
|
|
|
oneOfStrings' :: Stream s m Char => (Char -> Char -> Bool) -> [String] -> ParserT s st m String
|
2013-01-15 12:44:50 -08:00
|
|
|
oneOfStrings' _ [] = fail "no strings"
|
2013-01-15 11:47:35 -08:00
|
|
|
oneOfStrings' matches strs = try $ do
|
2012-07-24 22:42:21 -07:00
|
|
|
c <- anyChar
|
2013-01-15 11:47:35 -08:00
|
|
|
let strs' = [xs | (x:xs) <- strs, x `matches` c]
|
2012-07-24 22:42:21 -07:00
|
|
|
case strs' of
|
|
|
|
[] -> fail "not found"
|
2014-07-11 12:53:31 +01:00
|
|
|
_ -> (c:) <$> oneOfStrings' matches strs'
|
2013-01-15 11:47:35 -08:00
|
|
|
<|> if "" `elem` strs'
|
|
|
|
then return [c]
|
|
|
|
else fail "not found"
|
|
|
|
|
|
|
|
-- | Parses one of a list of strings. If the list contains
|
|
|
|
-- two strings one of which is a prefix of the other, the longer
|
|
|
|
-- string will be matched if possible.
|
2014-07-11 12:45:34 +01:00
|
|
|
oneOfStrings :: Stream s m Char => [String] -> ParserT s st m String
|
2013-01-15 11:47:35 -08:00
|
|
|
oneOfStrings = oneOfStrings' (==)
|
|
|
|
|
|
|
|
-- | Parses one of a list of strings (tried in order), case insensitive.
|
2014-07-11 12:45:34 +01:00
|
|
|
oneOfStringsCI :: Stream s m Char => [String] -> ParserT s st m String
|
2013-01-15 11:47:35 -08:00
|
|
|
oneOfStringsCI = oneOfStrings' ciMatch
|
2013-02-02 18:46:10 -08:00
|
|
|
where ciMatch x y = toLower' x == toLower' y
|
|
|
|
-- this optimizes toLower by checking common ASCII case
|
|
|
|
-- first, before calling the expensive unicode-aware
|
|
|
|
-- function:
|
|
|
|
toLower' c | c >= 'A' && c <= 'Z' = chr (ord c + 32)
|
|
|
|
| isAscii c = c
|
|
|
|
| otherwise = toLower c
|
2010-07-04 13:43:45 -07:00
|
|
|
|
|
|
|
-- | Parses a space or tab.
|
2014-07-11 12:45:34 +01:00
|
|
|
spaceChar :: Stream s m Char => ParserT s st m Char
|
2011-01-19 14:45:15 -08:00
|
|
|
spaceChar = satisfy $ \c -> c == ' ' || c == '\t'
|
2010-07-04 13:43:45 -07:00
|
|
|
|
2011-07-30 18:08:02 -07:00
|
|
|
-- | Parses a nonspace, nonnewline character.
|
2014-07-11 12:45:34 +01:00
|
|
|
nonspaceChar :: Stream s m Char => ParserT s st m Char
|
2013-12-19 20:19:24 -05:00
|
|
|
nonspaceChar = satisfy $ flip notElem ['\t', '\n', ' ', '\r']
|
2011-07-30 18:08:02 -07:00
|
|
|
|
2010-07-04 13:43:45 -07:00
|
|
|
-- | Skips zero or more spaces or tabs.
|
2014-07-11 12:45:34 +01:00
|
|
|
skipSpaces :: Stream s m Char => ParserT s st m ()
|
2010-07-04 13:43:45 -07:00
|
|
|
skipSpaces = skipMany spaceChar
|
|
|
|
|
|
|
|
-- | Skips zero or more spaces or tabs, then reads a newline.
|
2014-07-11 12:45:34 +01:00
|
|
|
blankline :: Stream s m Char => ParserT s st m Char
|
2010-07-04 13:43:45 -07:00
|
|
|
blankline = try $ skipSpaces >> newline
|
|
|
|
|
|
|
|
-- | Parses one or more blank lines and returns a string of newlines.
|
2014-07-11 12:45:34 +01:00
|
|
|
blanklines :: Stream s m Char => ParserT s st m [Char]
|
2010-07-04 13:43:45 -07:00
|
|
|
blanklines = many1 blankline
|
|
|
|
|
|
|
|
-- | Parses material enclosed between start and end parsers.
|
2014-07-11 12:45:34 +01:00
|
|
|
enclosed :: Stream s m Char => ParserT s st m t -- ^ start parser
|
|
|
|
-> ParserT s st m end -- ^ end parser
|
|
|
|
-> ParserT s st m a -- ^ content parser (to be used repeatedly)
|
|
|
|
-> ParserT s st m [a]
|
2012-07-26 22:32:53 -07:00
|
|
|
enclosed start end parser = try $
|
2010-07-04 13:43:45 -07:00
|
|
|
start >> notFollowedBy space >> many1Till parser end
|
|
|
|
|
|
|
|
-- | Parse string, case insensitive.
|
2014-07-11 12:45:34 +01:00
|
|
|
stringAnyCase :: Stream s m Char => [Char] -> ParserT s st m String
|
2010-07-04 13:43:45 -07:00
|
|
|
stringAnyCase [] = string ""
|
|
|
|
stringAnyCase (x:xs) = do
|
|
|
|
firstChar <- char (toUpper x) <|> char (toLower x)
|
|
|
|
rest <- stringAnyCase xs
|
|
|
|
return (firstChar:rest)
|
|
|
|
|
|
|
|
-- | Parse contents of 'str' using 'parser' and return result.
|
2014-12-11 19:14:28 +00:00
|
|
|
parseFromString :: Monad m => ParserT String st m a -> String -> ParserT String st m a
|
2010-07-04 13:43:45 -07:00
|
|
|
parseFromString parser str = do
|
|
|
|
oldPos <- getPosition
|
|
|
|
oldInput <- getInput
|
|
|
|
setInput str
|
|
|
|
result <- parser
|
2014-12-11 19:14:28 +00:00
|
|
|
spaces
|
|
|
|
eof
|
2010-07-04 13:43:45 -07:00
|
|
|
setInput oldInput
|
|
|
|
setPosition oldPos
|
|
|
|
return result
|
|
|
|
|
|
|
|
-- | Parse raw line block up to and including blank lines.
|
2014-07-11 12:45:34 +01:00
|
|
|
lineClump :: Stream [Char] m Char => ParserT [Char] st m String
|
2012-07-26 22:32:53 -07:00
|
|
|
lineClump = blanklines
|
2010-07-04 13:43:45 -07:00
|
|
|
<|> (many1 (notFollowedBy blankline >> anyLine) >>= return . unlines)
|
|
|
|
|
|
|
|
-- | Parse a string of characters between an open character
|
|
|
|
-- and a close character, including text between balanced
|
|
|
|
-- pairs of open and close, which must be different. For example,
|
2011-12-05 20:54:46 -08:00
|
|
|
-- @charsInBalanced '(' ')' anyChar@ will parse "(hello (there))"
|
|
|
|
-- and return "hello (there)".
|
2014-07-11 12:45:34 +01:00
|
|
|
charsInBalanced :: Stream s m Char => Char -> Char -> ParserT s st m Char
|
|
|
|
-> ParserT s st m String
|
2011-12-05 20:54:46 -08:00
|
|
|
charsInBalanced open close parser = try $ do
|
2010-07-04 13:43:45 -07:00
|
|
|
char open
|
2011-12-05 20:54:46 -08:00
|
|
|
let isDelim c = c == open || c == close
|
|
|
|
raw <- many $ many1 (notFollowedBy (satisfy isDelim) >> parser)
|
|
|
|
<|> (do res <- charsInBalanced open close parser
|
|
|
|
return $ [open] ++ res ++ [close])
|
2010-07-04 13:43:45 -07:00
|
|
|
char close
|
|
|
|
return $ concat raw
|
|
|
|
|
2011-12-05 20:54:46 -08:00
|
|
|
-- old charsInBalanced would be:
|
|
|
|
-- charsInBalanced open close (noneOf "\n" <|> char '\n' >> notFollowedBy blankline)
|
|
|
|
-- old charsInBalanced' would be:
|
|
|
|
-- charsInBalanced open close anyChar
|
2010-07-04 13:43:45 -07:00
|
|
|
|
|
|
|
-- Auxiliary functions for romanNumeral:
|
|
|
|
|
|
|
|
lowercaseRomanDigits :: [Char]
|
|
|
|
lowercaseRomanDigits = ['i','v','x','l','c','d','m']
|
|
|
|
|
|
|
|
uppercaseRomanDigits :: [Char]
|
|
|
|
uppercaseRomanDigits = map toUpper lowercaseRomanDigits
|
|
|
|
|
|
|
|
-- | Parses a roman numeral (uppercase or lowercase), returns number.
|
2014-07-11 12:45:34 +01:00
|
|
|
romanNumeral :: Stream s m Char => Bool -- ^ Uppercase if true
|
|
|
|
-> ParserT s st m Int
|
2010-07-04 13:43:45 -07:00
|
|
|
romanNumeral upperCase = do
|
2012-07-26 22:32:53 -07:00
|
|
|
let romanDigits = if upperCase
|
|
|
|
then uppercaseRomanDigits
|
2010-07-04 13:43:45 -07:00
|
|
|
else lowercaseRomanDigits
|
2011-01-19 14:59:59 -08:00
|
|
|
lookAhead $ oneOf romanDigits
|
2012-07-26 22:32:53 -07:00
|
|
|
let [one, five, ten, fifty, hundred, fivehundred, thousand] =
|
2010-07-04 13:43:45 -07:00
|
|
|
map char romanDigits
|
|
|
|
thousands <- many thousand >>= (return . (1000 *) . length)
|
|
|
|
ninehundreds <- option 0 $ try $ hundred >> thousand >> return 900
|
|
|
|
fivehundreds <- many fivehundred >>= (return . (500 *) . length)
|
|
|
|
fourhundreds <- option 0 $ try $ hundred >> fivehundred >> return 400
|
|
|
|
hundreds <- many hundred >>= (return . (100 *) . length)
|
|
|
|
nineties <- option 0 $ try $ ten >> hundred >> return 90
|
|
|
|
fifties <- many fifty >>= (return . (50 *) . length)
|
|
|
|
forties <- option 0 $ try $ ten >> fifty >> return 40
|
|
|
|
tens <- many ten >>= (return . (10 *) . length)
|
|
|
|
nines <- option 0 $ try $ one >> ten >> return 9
|
|
|
|
fives <- many five >>= (return . (5 *) . length)
|
|
|
|
fours <- option 0 $ try $ one >> five >> return 4
|
|
|
|
ones <- many one >>= (return . length)
|
|
|
|
let total = thousands + ninehundreds + fivehundreds + fourhundreds +
|
|
|
|
hundreds + nineties + fifties + forties + tens + nines +
|
|
|
|
fives + fours + ones
|
|
|
|
if total == 0
|
|
|
|
then fail "not a roman numeral"
|
|
|
|
else return total
|
|
|
|
|
|
|
|
-- Parsers for email addresses and URIs
|
|
|
|
|
|
|
|
-- | Parses an email address; returns original and corresponding
|
|
|
|
-- escaped mailto: URI.
|
2014-07-11 12:45:34 +01:00
|
|
|
emailAddress :: Stream s m Char => ParserT s st m (String, String)
|
|
|
|
emailAddress = try $ toResult <$> mailbox <*> (char '@' *> domain)
|
2013-02-15 22:39:49 -08:00
|
|
|
where toResult mbox dom = let full = fromEntities $ mbox ++ '@':dom
|
2013-01-09 21:32:42 -08:00
|
|
|
in (full, escapeURI $ "mailto:" ++ full)
|
2014-07-11 12:53:31 +01:00
|
|
|
mailbox = intercalate "." <$> (emailWord `sepby1` dot)
|
|
|
|
domain = intercalate "." <$> (subdomain `sepby1` dot)
|
2013-01-09 21:32:42 -08:00
|
|
|
dot = char '.'
|
|
|
|
subdomain = many1 $ alphaNum <|> innerPunct
|
2016-10-23 23:12:36 +02:00
|
|
|
-- this excludes some valid email addresses, since an
|
|
|
|
-- email could contain e.g. '__', but gives better results
|
|
|
|
-- for our purposes, when combined with markdown parsing:
|
|
|
|
innerPunct = try (satisfy (\c -> isEmailPunct c || c == '@')
|
|
|
|
<* notFollowedBy space
|
|
|
|
<* notFollowedBy (satisfy isPunctuation))
|
|
|
|
-- technically an email address could begin with a symbol,
|
|
|
|
-- but allowing this creates too many problems.
|
|
|
|
-- See e.g. https://github.com/jgm/pandoc/issues/2940
|
|
|
|
emailWord = do x <- satisfy isAlphaNum
|
|
|
|
xs <- many (satisfy isEmailChar)
|
|
|
|
return (x:xs)
|
2013-01-09 21:32:42 -08:00
|
|
|
isEmailChar c = isAlphaNum c || isEmailPunct c
|
2013-02-15 22:56:53 -08:00
|
|
|
isEmailPunct c = c `elem` "!\"#$%&'*+-/=?^_{|}~;"
|
2013-01-09 21:32:42 -08:00
|
|
|
-- note: sepBy1 from parsec consumes input when sep
|
|
|
|
-- succeeds and p fails, so we use this variant here.
|
2014-07-11 12:45:34 +01:00
|
|
|
sepby1 p sep = (:) <$> p <*> (many (try $ sep >> p))
|
2013-01-09 21:32:42 -08:00
|
|
|
|
2010-07-04 13:43:45 -07:00
|
|
|
|
2013-01-15 10:52:02 -08:00
|
|
|
-- Schemes from http://www.iana.org/assignments/uri-schemes.html plus
|
2014-07-27 19:59:57 +01:00
|
|
|
-- the unofficial schemes coap, doi, javascript, isbn, pmid
|
2013-01-15 10:52:02 -08:00
|
|
|
schemes :: [String]
|
|
|
|
schemes = ["coap","doi","javascript","aaa","aaas","about","acap","cap","cid",
|
|
|
|
"crid","data","dav","dict","dns","file","ftp","geo","go","gopher",
|
|
|
|
"h323","http","https","iax","icap","im","imap","info","ipp","iris",
|
|
|
|
"iris.beep","iris.xpc","iris.xpcs","iris.lwz","ldap","mailto","mid",
|
|
|
|
"msrp","msrps","mtqp","mupdate","news","nfs","ni","nih","nntp",
|
|
|
|
"opaquelocktoken","pop","pres","rtsp","service","session","shttp","sieve",
|
|
|
|
"sip","sips","sms","snmp","soap.beep","soap.beeps","tag","tel","telnet",
|
|
|
|
"tftp","thismessage","tn3270","tip","tv","urn","vemmi","ws","wss","xcon",
|
|
|
|
"xcon-userid","xmlrpc.beep","xmlrpc.beeps","xmpp","z39.50r","z39.50s",
|
|
|
|
"adiumxtra","afp","afs","aim","apt","attachment","aw","beshare","bitcoin",
|
|
|
|
"bolo","callto","chrome","chrome-extension","com-eventbrite-attendee",
|
|
|
|
"content", "cvs","dlna-playsingle","dlna-playcontainer","dtn","dvb",
|
|
|
|
"ed2k","facetime","feed","finger","fish","gg","git","gizmoproject",
|
|
|
|
"gtalk","hcp","icon","ipn","irc","irc6","ircs","itms","jar","jms",
|
|
|
|
"keyparc","lastfm","ldaps","magnet","maps","market","message","mms",
|
|
|
|
"ms-help","msnim","mumble","mvn","notes","oid","palm","paparazzi",
|
|
|
|
"platform","proxy","psyc","query","res","resource","rmi","rsync",
|
|
|
|
"rtmp","secondlife","sftp","sgn","skype","smb","soldat","spotify",
|
|
|
|
"ssh","steam","svn","teamspeak","things","udp","unreal","ut2004",
|
|
|
|
"ventrilo","view-source","webcal","wtai","wyciwyg","xfire","xri",
|
2014-07-27 19:59:57 +01:00
|
|
|
"ymsgr", "isbn", "pmid"]
|
2013-01-15 10:52:02 -08:00
|
|
|
|
2014-07-11 12:45:34 +01:00
|
|
|
uriScheme :: Stream s m Char => ParserT s st m String
|
2013-01-15 12:44:50 -08:00
|
|
|
uriScheme = oneOfStringsCI schemes
|
|
|
|
|
2010-07-04 13:43:45 -07:00
|
|
|
-- | Parses a URI. Returns pair of original and URI-escaped version.
|
2014-07-11 12:45:34 +01:00
|
|
|
uri :: Stream [Char] m Char => ParserT [Char] st m (String, String)
|
2010-07-04 13:43:45 -07:00
|
|
|
uri = try $ do
|
2013-01-15 12:44:50 -08:00
|
|
|
scheme <- uriScheme
|
2013-01-15 10:52:02 -08:00
|
|
|
char ':'
|
2017-04-15 13:32:28 +02:00
|
|
|
-- Avoid parsing e.g. "**Notes:**" as a raw URI:
|
|
|
|
notFollowedBy (oneOf "*_]")
|
2015-07-14 10:20:09 -07:00
|
|
|
-- We allow sentence punctuation except at the end, since
|
2013-01-15 10:52:02 -08:00
|
|
|
-- we don't want the trailing '.' in 'http://google.com.' We want to allow
|
2011-03-18 11:27:42 -07:00
|
|
|
-- http://en.wikipedia.org/wiki/State_of_emergency_(disambiguation)
|
|
|
|
-- as a URL, while NOT picking up the closing paren in
|
2013-01-15 10:52:02 -08:00
|
|
|
-- (http://wikipedia.org). So we include balanced parens in the URL.
|
2015-07-14 10:20:09 -07:00
|
|
|
let isWordChar c = isAlphaNum c || c `elem` "#$%*+/@\\_-"
|
2013-01-15 10:52:02 -08:00
|
|
|
let wordChar = satisfy isWordChar
|
|
|
|
let percentEscaped = try $ char '%' >> skipMany1 (satisfy isHexDigit)
|
|
|
|
let entity = () <$ characterReference
|
|
|
|
let punct = skipMany1 (char ',')
|
2014-12-14 12:20:33 -08:00
|
|
|
<|> () <$ (satisfy (\c -> not (isSpace c) && c /= '<' && c /= '>'))
|
2013-01-15 10:52:02 -08:00
|
|
|
let uriChunk = skipMany1 wordChar
|
|
|
|
<|> percentEscaped
|
|
|
|
<|> entity
|
2013-03-28 11:33:01 -07:00
|
|
|
<|> (try $ punct >>
|
|
|
|
lookAhead (void (satisfy isWordChar) <|> percentEscaped))
|
2014-07-11 12:53:31 +01:00
|
|
|
str <- snd <$> withRaw (skipMany1 ( () <$
|
2013-01-15 10:52:02 -08:00
|
|
|
(enclosed (char '(') (char ')') uriChunk
|
|
|
|
<|> enclosed (char '{') (char '}') uriChunk
|
|
|
|
<|> enclosed (char '[') (char ']') uriChunk)
|
|
|
|
<|> uriChunk))
|
2012-09-11 20:47:07 -07:00
|
|
|
str' <- option str $ char '/' >> return (str ++ "/")
|
2013-02-15 22:39:49 -08:00
|
|
|
let uri' = scheme ++ ":" ++ fromEntities str'
|
2013-01-15 10:52:02 -08:00
|
|
|
return (uri', escapeURI uri')
|
2010-07-04 13:43:45 -07:00
|
|
|
|
2014-07-11 12:45:34 +01:00
|
|
|
mathInlineWith :: Stream s m Char => String -> String -> ParserT s st m String
|
2013-12-06 16:43:59 -08:00
|
|
|
mathInlineWith op cl = try $ do
|
|
|
|
string op
|
|
|
|
notFollowedBy space
|
2014-05-27 11:59:28 -07:00
|
|
|
words' <- many1Till (count 1 (noneOf " \t\n\\")
|
2014-10-19 16:42:56 -07:00
|
|
|
<|> (char '\\' >>
|
|
|
|
-- This next clause is needed because \text{..} can
|
|
|
|
-- contain $, \(\), etc.
|
|
|
|
(try (string "text" >>
|
|
|
|
(("\\text" ++) <$> inBalancedBraces 0 ""))
|
|
|
|
<|> (\c -> ['\\',c]) <$> anyChar))
|
2014-05-27 11:59:28 -07:00
|
|
|
<|> do (blankline <* notFollowedBy' blankline) <|>
|
|
|
|
(oneOf " \t" <* skipMany (oneOf " \t"))
|
|
|
|
notFollowedBy (char '$')
|
|
|
|
return " "
|
|
|
|
) (try $ string cl)
|
2013-12-06 16:43:59 -08:00
|
|
|
notFollowedBy digit -- to prevent capture of $5
|
|
|
|
return $ concat words'
|
2014-10-19 16:42:56 -07:00
|
|
|
where
|
|
|
|
inBalancedBraces :: Stream s m Char => Int -> String -> ParserT s st m String
|
|
|
|
inBalancedBraces 0 "" = do
|
|
|
|
c <- anyChar
|
|
|
|
if c == '{'
|
|
|
|
then inBalancedBraces 1 "{"
|
|
|
|
else mzero
|
|
|
|
inBalancedBraces 0 s = return $ reverse s
|
|
|
|
inBalancedBraces numOpen ('\\':xs) = do
|
|
|
|
c <- anyChar
|
|
|
|
inBalancedBraces numOpen (c:'\\':xs)
|
|
|
|
inBalancedBraces numOpen xs = do
|
|
|
|
c <- anyChar
|
|
|
|
case c of
|
|
|
|
'}' -> inBalancedBraces (numOpen - 1) (c:xs)
|
|
|
|
'{' -> inBalancedBraces (numOpen + 1) (c:xs)
|
|
|
|
_ -> inBalancedBraces numOpen (c:xs)
|
2013-12-06 16:43:59 -08:00
|
|
|
|
2014-07-11 12:45:34 +01:00
|
|
|
mathDisplayWith :: Stream s m Char => String -> String -> ParserT s st m String
|
2013-12-06 16:43:59 -08:00
|
|
|
mathDisplayWith op cl = try $ do
|
|
|
|
string op
|
2014-07-11 12:51:26 +01:00
|
|
|
many1Till (noneOf "\n" <|> (newline <* notFollowedBy' blankline)) (try $ string cl)
|
2013-12-06 16:43:59 -08:00
|
|
|
|
2014-07-26 17:34:11 +01:00
|
|
|
mathDisplay :: (HasReaderOptions st, Stream s m Char)
|
|
|
|
=> ParserT s st m String
|
2013-12-06 16:43:59 -08:00
|
|
|
mathDisplay =
|
|
|
|
(guardEnabled Ext_tex_math_dollars >> mathDisplayWith "$$" "$$")
|
|
|
|
<|> (guardEnabled Ext_tex_math_single_backslash >>
|
|
|
|
mathDisplayWith "\\[" "\\]")
|
|
|
|
<|> (guardEnabled Ext_tex_math_double_backslash >>
|
|
|
|
mathDisplayWith "\\\\[" "\\\\]")
|
|
|
|
|
2014-07-26 17:34:11 +01:00
|
|
|
mathInline :: (HasReaderOptions st , Stream s m Char)
|
|
|
|
=> ParserT s st m String
|
2013-12-06 16:43:59 -08:00
|
|
|
mathInline =
|
|
|
|
(guardEnabled Ext_tex_math_dollars >> mathInlineWith "$" "$")
|
|
|
|
<|> (guardEnabled Ext_tex_math_single_backslash >>
|
|
|
|
mathInlineWith "\\(" "\\)")
|
|
|
|
<|> (guardEnabled Ext_tex_math_double_backslash >>
|
|
|
|
mathInlineWith "\\\\(" "\\\\)")
|
|
|
|
|
2010-07-04 13:43:45 -07:00
|
|
|
-- | Applies a parser, returns tuple of its results and its horizontal
|
|
|
|
-- displacement (the difference between the source column at the end
|
|
|
|
-- and the source column at the beginning). Vertical displacement
|
|
|
|
-- (source row) is ignored.
|
2014-07-12 22:57:22 -07:00
|
|
|
withHorizDisplacement :: Stream s m Char
|
2014-07-11 12:45:34 +01:00
|
|
|
=> ParserT s st m a -- ^ Parser to apply
|
|
|
|
-> ParserT s st m (a, Int) -- ^ (result, displacement)
|
2010-07-04 13:43:45 -07:00
|
|
|
withHorizDisplacement parser = do
|
|
|
|
pos1 <- getPosition
|
|
|
|
result <- parser
|
|
|
|
pos2 <- getPosition
|
|
|
|
return (result, sourceColumn pos2 - sourceColumn pos1)
|
|
|
|
|
2012-01-29 23:54:00 -08:00
|
|
|
-- | Applies a parser and returns the raw string that was parsed,
|
|
|
|
-- along with the value produced by the parser.
|
2014-07-11 12:45:34 +01:00
|
|
|
withRaw :: Stream [Char] m Char => ParsecT [Char] st m a -> ParsecT [Char] st m (a, [Char])
|
2012-01-29 23:54:00 -08:00
|
|
|
withRaw parser = do
|
|
|
|
pos1 <- getPosition
|
|
|
|
inp <- getInput
|
|
|
|
result <- parser
|
|
|
|
pos2 <- getPosition
|
|
|
|
let (l1,c1) = (sourceLine pos1, sourceColumn pos1)
|
|
|
|
let (l2,c2) = (sourceLine pos2, sourceColumn pos2)
|
|
|
|
let inplines = take ((l2 - l1) + 1) $ lines inp
|
|
|
|
let raw = case inplines of
|
2012-12-13 19:01:01 -08:00
|
|
|
[] -> ""
|
2012-01-29 23:54:00 -08:00
|
|
|
[l] -> take (c2 - c1) l
|
|
|
|
ls -> unlines (init ls) ++ take (c2 - 1) (last ls)
|
|
|
|
return (result, raw)
|
|
|
|
|
2010-07-04 13:43:45 -07:00
|
|
|
-- | Parses backslash, then applies character parser.
|
2014-07-12 22:57:22 -07:00
|
|
|
escaped :: Stream s m Char
|
2014-07-11 12:45:34 +01:00
|
|
|
=> ParserT s st m Char -- ^ Parser for character to escape
|
|
|
|
-> ParserT s st m Char
|
2011-12-05 20:22:27 -08:00
|
|
|
escaped parser = try $ char '\\' >> parser
|
2010-07-04 13:43:45 -07:00
|
|
|
|
2012-02-05 22:52:00 -08:00
|
|
|
-- | Parse character entity.
|
2014-07-11 12:45:34 +01:00
|
|
|
characterReference :: Stream s m Char => ParserT s st m Char
|
2012-02-05 22:52:00 -08:00
|
|
|
characterReference = try $ do
|
|
|
|
char '&'
|
2012-02-05 23:01:17 -08:00
|
|
|
ent <- many1Till nonspaceChar (char ';')
|
2016-01-08 17:08:01 -08:00
|
|
|
let ent' = case ent of
|
2016-01-08 17:32:50 -08:00
|
|
|
'#':'X':xs -> '#':'x':xs -- workaround tagsoup bug
|
2016-01-08 17:08:01 -08:00
|
|
|
'#':_ -> ent
|
|
|
|
_ -> ent ++ ";"
|
|
|
|
case lookupEntity ent' of
|
2016-09-02 11:35:28 -04:00
|
|
|
Just (c : _) -> return c
|
|
|
|
_ -> fail "entity not found"
|
2012-02-05 22:52:00 -08:00
|
|
|
|
2010-07-04 13:43:45 -07:00
|
|
|
-- | Parses an uppercase roman numeral and returns (UpperRoman, number).
|
2014-07-11 12:45:34 +01:00
|
|
|
upperRoman :: Stream s m Char => ParserT s st m (ListNumberStyle, Int)
|
2010-07-04 13:43:45 -07:00
|
|
|
upperRoman = do
|
|
|
|
num <- romanNumeral True
|
|
|
|
return (UpperRoman, num)
|
|
|
|
|
|
|
|
-- | Parses a lowercase roman numeral and returns (LowerRoman, number).
|
2014-07-11 12:45:34 +01:00
|
|
|
lowerRoman :: Stream s m Char => ParserT s st m (ListNumberStyle, Int)
|
2010-07-04 13:43:45 -07:00
|
|
|
lowerRoman = do
|
|
|
|
num <- romanNumeral False
|
|
|
|
return (LowerRoman, num)
|
|
|
|
|
|
|
|
-- | Parses a decimal numeral and returns (Decimal, number).
|
2014-07-11 12:45:34 +01:00
|
|
|
decimal :: Stream s m Char => ParserT s st m (ListNumberStyle, Int)
|
2010-07-04 13:43:45 -07:00
|
|
|
decimal = do
|
|
|
|
num <- many1 digit
|
|
|
|
return (Decimal, read num)
|
|
|
|
|
2010-07-11 22:47:52 -07:00
|
|
|
-- | Parses a '@' and optional label and
|
|
|
|
-- returns (DefaultStyle, [next example number]). The next
|
|
|
|
-- example number is incremented in parser state, and the label
|
|
|
|
-- (if present) is added to the label table.
|
2014-07-12 22:57:22 -07:00
|
|
|
exampleNum :: Stream s m Char
|
2014-07-11 12:45:34 +01:00
|
|
|
=> ParserT s ParserState m (ListNumberStyle, Int)
|
2010-07-11 22:47:52 -07:00
|
|
|
exampleNum = do
|
|
|
|
char '@'
|
2011-01-19 14:59:59 -08:00
|
|
|
lab <- many (alphaNum <|> satisfy (\c -> c == '_' || c == '-'))
|
2010-07-11 22:47:52 -07:00
|
|
|
st <- getState
|
|
|
|
let num = stateNextExample st
|
|
|
|
let newlabels = if null lab
|
|
|
|
then stateExamples st
|
|
|
|
else M.insert lab num $ stateExamples st
|
|
|
|
updateState $ \s -> s{ stateNextExample = num + 1
|
|
|
|
, stateExamples = newlabels }
|
|
|
|
return (Example, num)
|
|
|
|
|
2010-07-04 13:43:45 -07:00
|
|
|
-- | Parses a '#' returns (DefaultStyle, 1).
|
2014-07-11 12:45:34 +01:00
|
|
|
defaultNum :: Stream s m Char => ParserT s st m (ListNumberStyle, Int)
|
2010-07-04 13:43:45 -07:00
|
|
|
defaultNum = do
|
|
|
|
char '#'
|
|
|
|
return (DefaultStyle, 1)
|
|
|
|
|
|
|
|
-- | Parses a lowercase letter and returns (LowerAlpha, number).
|
2014-07-11 12:45:34 +01:00
|
|
|
lowerAlpha :: Stream s m Char => ParserT s st m (ListNumberStyle, Int)
|
2010-07-04 13:43:45 -07:00
|
|
|
lowerAlpha = do
|
|
|
|
ch <- oneOf ['a'..'z']
|
|
|
|
return (LowerAlpha, ord ch - ord 'a' + 1)
|
|
|
|
|
|
|
|
-- | Parses an uppercase letter and returns (UpperAlpha, number).
|
2014-07-11 12:45:34 +01:00
|
|
|
upperAlpha :: Stream s m Char => ParserT s st m (ListNumberStyle, Int)
|
2010-07-04 13:43:45 -07:00
|
|
|
upperAlpha = do
|
|
|
|
ch <- oneOf ['A'..'Z']
|
|
|
|
return (UpperAlpha, ord ch - ord 'A' + 1)
|
|
|
|
|
|
|
|
-- | Parses a roman numeral i or I
|
2014-07-11 12:45:34 +01:00
|
|
|
romanOne :: Stream s m Char => ParserT s st m (ListNumberStyle, Int)
|
2010-07-04 13:43:45 -07:00
|
|
|
romanOne = (char 'i' >> return (LowerRoman, 1)) <|>
|
|
|
|
(char 'I' >> return (UpperRoman, 1))
|
|
|
|
|
|
|
|
-- | Parses an ordered list marker and returns list attributes.
|
2014-07-11 12:45:34 +01:00
|
|
|
anyOrderedListMarker :: Stream s m Char => ParserT s ParserState m ListAttributes
|
2012-07-26 22:32:53 -07:00
|
|
|
anyOrderedListMarker = choice $
|
2010-07-04 13:43:45 -07:00
|
|
|
[delimParser numParser | delimParser <- [inPeriod, inOneParen, inTwoParens],
|
2010-07-11 22:47:52 -07:00
|
|
|
numParser <- [decimal, exampleNum, defaultNum, romanOne,
|
2010-07-04 13:43:45 -07:00
|
|
|
lowerAlpha, lowerRoman, upperAlpha, upperRoman]]
|
|
|
|
|
|
|
|
-- | Parses a list number (num) followed by a period, returns list attributes.
|
2014-07-12 22:57:22 -07:00
|
|
|
inPeriod :: Stream s m Char
|
2014-07-11 12:45:34 +01:00
|
|
|
=> ParserT s st m (ListNumberStyle, Int)
|
|
|
|
-> ParserT s st m ListAttributes
|
2010-07-04 13:43:45 -07:00
|
|
|
inPeriod num = try $ do
|
|
|
|
(style, start) <- num
|
|
|
|
char '.'
|
|
|
|
let delim = if style == DefaultStyle
|
|
|
|
then DefaultDelim
|
|
|
|
else Period
|
|
|
|
return (start, style, delim)
|
2012-07-26 22:32:53 -07:00
|
|
|
|
2010-07-04 13:43:45 -07:00
|
|
|
-- | Parses a list number (num) followed by a paren, returns list attributes.
|
2014-07-12 22:57:22 -07:00
|
|
|
inOneParen :: Stream s m Char
|
2014-07-11 12:45:34 +01:00
|
|
|
=> ParserT s st m (ListNumberStyle, Int)
|
|
|
|
-> ParserT s st m ListAttributes
|
2010-07-04 13:43:45 -07:00
|
|
|
inOneParen num = try $ do
|
|
|
|
(style, start) <- num
|
|
|
|
char ')'
|
|
|
|
return (start, style, OneParen)
|
|
|
|
|
|
|
|
-- | Parses a list number (num) enclosed in parens, returns list attributes.
|
2014-07-12 22:57:22 -07:00
|
|
|
inTwoParens :: Stream s m Char
|
2014-07-11 12:45:34 +01:00
|
|
|
=> ParserT s st m (ListNumberStyle, Int)
|
|
|
|
-> ParserT s st m ListAttributes
|
2010-07-04 13:43:45 -07:00
|
|
|
inTwoParens num = try $ do
|
|
|
|
char '('
|
|
|
|
(style, start) <- num
|
|
|
|
char ')'
|
|
|
|
return (start, style, TwoParens)
|
|
|
|
|
|
|
|
-- | Parses an ordered list marker with a given style and delimiter,
|
|
|
|
-- returns number.
|
2014-07-12 22:57:22 -07:00
|
|
|
orderedListMarker :: Stream s m Char
|
2014-07-11 12:45:34 +01:00
|
|
|
=> ListNumberStyle
|
2012-07-26 22:32:53 -07:00
|
|
|
-> ListNumberDelim
|
2014-07-11 12:45:34 +01:00
|
|
|
-> ParserT s ParserState m Int
|
2010-07-04 13:43:45 -07:00
|
|
|
orderedListMarker style delim = do
|
|
|
|
let num = defaultNum <|> -- # can continue any kind of list
|
|
|
|
case style of
|
|
|
|
DefaultStyle -> decimal
|
2010-07-11 22:47:52 -07:00
|
|
|
Example -> exampleNum
|
2010-07-04 13:43:45 -07:00
|
|
|
Decimal -> decimal
|
|
|
|
UpperRoman -> upperRoman
|
|
|
|
LowerRoman -> lowerRoman
|
|
|
|
UpperAlpha -> upperAlpha
|
|
|
|
LowerAlpha -> lowerAlpha
|
|
|
|
let context = case delim of
|
|
|
|
DefaultDelim -> inPeriod
|
|
|
|
Period -> inPeriod
|
|
|
|
OneParen -> inOneParen
|
|
|
|
TwoParens -> inTwoParens
|
|
|
|
(start, _, _) <- context num
|
|
|
|
return start
|
|
|
|
|
|
|
|
-- | Parses a character reference and returns a Str element.
|
2014-07-11 12:45:34 +01:00
|
|
|
charRef :: Stream s m Char => ParserT s st m Inline
|
2010-07-04 13:43:45 -07:00
|
|
|
charRef = do
|
|
|
|
c <- characterReference
|
|
|
|
return $ Str [c]
|
|
|
|
|
2014-07-11 12:45:34 +01:00
|
|
|
lineBlockLine :: Stream [Char] m Char => ParserT [Char] st m String
|
2013-01-13 11:39:32 -08:00
|
|
|
lineBlockLine = try $ do
|
|
|
|
char '|'
|
|
|
|
char ' '
|
|
|
|
white <- many (spaceChar >> return '\160')
|
|
|
|
notFollowedBy newline
|
|
|
|
line <- anyLine
|
|
|
|
continuations <- many (try $ char ' ' >> anyLine)
|
|
|
|
return $ white ++ unwords (line : continuations)
|
|
|
|
|
2016-10-13 08:46:44 +02:00
|
|
|
blankLineBlockLine :: Stream [Char] m Char => ParserT [Char] st m Char
|
|
|
|
blankLineBlockLine = try (char '|' >> blankline)
|
|
|
|
|
2013-01-13 11:39:32 -08:00
|
|
|
-- | Parses an RST-style line block and returns a list of strings.
|
2014-07-11 12:45:34 +01:00
|
|
|
lineBlockLines :: Stream [Char] m Char => ParserT [Char] st m [String]
|
2013-01-13 11:39:32 -08:00
|
|
|
lineBlockLines = try $ do
|
2016-10-13 08:46:44 +02:00
|
|
|
lines' <- many1 (lineBlockLine <|> ((:[]) <$> blankLineBlockLine))
|
|
|
|
skipMany1 $ blankline <|> blankLineBlockLine
|
2013-01-13 11:39:32 -08:00
|
|
|
return lines'
|
|
|
|
|
2010-07-05 23:43:07 -07:00
|
|
|
-- | Parse a table using 'headerParser', 'rowParser',
|
|
|
|
-- 'lineParser', and 'footerParser'.
|
2017-05-02 23:41:45 +02:00
|
|
|
tableWith :: (Stream s m Char, HasReaderOptions st,
|
|
|
|
Functor mf, Applicative mf, Monad mf)
|
|
|
|
=> ParserT s st m (mf [Blocks], [Alignment], [Int])
|
|
|
|
-> ([Int] -> ParserT s st m (mf [Blocks]))
|
|
|
|
-> ParserT s st m sep
|
|
|
|
-> ParserT s st m end
|
|
|
|
-> ParserT s st m (mf Blocks)
|
2012-07-24 09:06:13 -07:00
|
|
|
tableWith headerParser rowParser lineParser footerParser = try $ do
|
2010-07-05 23:43:07 -07:00
|
|
|
(heads, aligns, indices) <- headerParser
|
2017-05-02 23:41:45 +02:00
|
|
|
lines' <- sequence <$> rowParser indices `sepEndBy1` lineParser
|
2010-07-05 23:43:07 -07:00
|
|
|
footerParser
|
2012-07-25 11:51:33 -07:00
|
|
|
numColumns <- getOption readerColumns
|
2012-02-21 22:00:10 +01:00
|
|
|
let widths = if (indices == [])
|
|
|
|
then replicate (length aligns) 0.0
|
|
|
|
else widthsFromIndices numColumns indices
|
2017-05-02 23:41:45 +02:00
|
|
|
return $ B.table mempty (zip aligns widths) <$> heads <*> lines'
|
2010-07-05 23:43:07 -07:00
|
|
|
|
|
|
|
-- Calculate relative widths of table columns, based on indices
|
|
|
|
widthsFromIndices :: Int -- Number of columns on terminal
|
|
|
|
-> [Int] -- Indices
|
|
|
|
-> [Double] -- Fractional relative sizes of columns
|
2012-07-26 22:32:53 -07:00
|
|
|
widthsFromIndices _ [] = []
|
|
|
|
widthsFromIndices numColumns' indices =
|
2010-12-12 20:09:14 -08:00
|
|
|
let numColumns = max numColumns' (if null indices then 0 else last indices)
|
|
|
|
lengths' = zipWith (-) indices (0:indices)
|
2010-07-05 23:43:07 -07:00
|
|
|
lengths = reverse $
|
|
|
|
case reverse lengths' of
|
|
|
|
[] -> []
|
|
|
|
[x] -> [x]
|
|
|
|
-- compensate for the fact that intercolumn
|
|
|
|
-- spaces are counted in widths of all columns
|
|
|
|
-- but the last...
|
|
|
|
(x:y:zs) -> if x < y && y - x <= 2
|
|
|
|
then y:y:zs
|
|
|
|
else x:y:zs
|
|
|
|
totLength = sum lengths
|
|
|
|
quotient = if totLength > numColumns
|
|
|
|
then fromIntegral totLength
|
|
|
|
else fromIntegral numColumns
|
|
|
|
fracs = map (\l -> (fromIntegral l) / quotient) lengths in
|
|
|
|
tail fracs
|
|
|
|
|
2012-02-21 22:00:10 +01:00
|
|
|
---
|
|
|
|
|
2010-07-05 23:43:07 -07:00
|
|
|
-- Parse a grid table: starts with row of '-' on top, then header
|
|
|
|
-- (which may be grid), then the rows,
|
|
|
|
-- which may be grid, separated by blank lines, and
|
|
|
|
-- ending with a footer (dashed line followed by blank line).
|
2017-05-02 23:41:45 +02:00
|
|
|
gridTableWith :: (Stream [Char] m Char, HasReaderOptions st,
|
|
|
|
Functor mf, Applicative mf, Monad mf)
|
|
|
|
=> ParserT [Char] st m (mf Blocks) -- ^ Block list parser
|
|
|
|
-> Bool -- ^ Headerless table
|
|
|
|
-> ParserT [Char] st m (mf Blocks)
|
Major rewrite of markdown reader.
* Use Builder's Inlines/Blocks instead of lists.
* Return values in the reader monad, which are then
run (at the end of parsing) against the final
parser state. This allows links, notes, and
example numbers to be resolved without a second
parser pass.
* An effect of using Builder is that everything is
normalized automatically.
* New exports from Text.Pandoc.Parsing:
widthsFromIndices, NoteTable', KeyTable', Key', toKey',
withQuoteContext, singleQuoteStart, singleQuoteEnd, doubleQuoteStart,
doubleQuoteEnd, ellipses, apostrophe, dash
* Updated opendocument tests.
* Don't derive Show for ParserState.
* Benchmarks: markdown reader takes 82% of the time it took before.
Markdown writer takes 92% of the time (here the speedup is probably
due to the fact that everything is normalized by default).
2012-07-27 21:04:02 -07:00
|
|
|
gridTableWith blocks headless =
|
|
|
|
tableWith (gridTableHeader headless blocks) (gridTableRow blocks)
|
|
|
|
(gridTableSep '-') gridTableFooter
|
2010-07-05 14:34:48 -07:00
|
|
|
|
|
|
|
gridTableSplitLine :: [Int] -> String -> [String]
|
2011-01-14 14:16:27 -08:00
|
|
|
gridTableSplitLine indices line = map removeFinalBar $ tail $
|
2012-09-29 17:09:34 -04:00
|
|
|
splitStringByIndices (init indices) $ trimr line
|
2010-07-05 14:34:48 -07:00
|
|
|
|
2014-07-11 12:45:34 +01:00
|
|
|
gridPart :: Stream s m Char => Char -> ParserT s st m (Int, Int)
|
2010-07-05 14:34:48 -07:00
|
|
|
gridPart ch = do
|
|
|
|
dashes <- many1 (char ch)
|
|
|
|
char '+'
|
|
|
|
return (length dashes, length dashes + 1)
|
|
|
|
|
2014-07-11 12:45:34 +01:00
|
|
|
gridDashedLines :: Stream s m Char => Char -> ParserT s st m [(Int,Int)]
|
2014-07-11 12:51:26 +01:00
|
|
|
gridDashedLines ch = try $ char '+' >> many1 (gridPart ch) <* blankline
|
2010-07-05 14:34:48 -07:00
|
|
|
|
|
|
|
removeFinalBar :: String -> String
|
2011-01-14 14:16:27 -08:00
|
|
|
removeFinalBar =
|
|
|
|
reverse . dropWhile (`elem` " \t") . dropWhile (=='|') . reverse
|
2010-07-05 14:34:48 -07:00
|
|
|
|
|
|
|
-- | Separator between rows of grid table.
|
2017-05-02 23:41:45 +02:00
|
|
|
gridTableSep :: Stream s m Char => Char -> ParserT s st m Char
|
2010-07-05 14:34:48 -07:00
|
|
|
gridTableSep ch = try $ gridDashedLines ch >> return '\n'
|
|
|
|
|
|
|
|
-- | Parse header for a grid table.
|
2017-05-02 23:41:45 +02:00
|
|
|
gridTableHeader :: (Stream [Char] m Char, Functor mf, Applicative mf, Monad mf)
|
2014-07-11 12:45:34 +01:00
|
|
|
=> Bool -- ^ Headerless table
|
2017-05-02 23:41:45 +02:00
|
|
|
-> ParserT [Char] st m (mf Blocks)
|
|
|
|
-> ParserT [Char] st m (mf [Blocks], [Alignment], [Int])
|
Major rewrite of markdown reader.
* Use Builder's Inlines/Blocks instead of lists.
* Return values in the reader monad, which are then
run (at the end of parsing) against the final
parser state. This allows links, notes, and
example numbers to be resolved without a second
parser pass.
* An effect of using Builder is that everything is
normalized automatically.
* New exports from Text.Pandoc.Parsing:
widthsFromIndices, NoteTable', KeyTable', Key', toKey',
withQuoteContext, singleQuoteStart, singleQuoteEnd, doubleQuoteStart,
doubleQuoteEnd, ellipses, apostrophe, dash
* Updated opendocument tests.
* Don't derive Show for ParserState.
* Benchmarks: markdown reader takes 82% of the time it took before.
Markdown writer takes 92% of the time (here the speedup is probably
due to the fact that everything is normalized by default).
2012-07-27 21:04:02 -07:00
|
|
|
gridTableHeader headless blocks = try $ do
|
2010-07-05 14:34:48 -07:00
|
|
|
optional blanklines
|
|
|
|
dashes <- gridDashedLines '-'
|
|
|
|
rawContent <- if headless
|
2012-07-26 22:32:53 -07:00
|
|
|
then return $ repeat ""
|
2010-07-05 14:34:48 -07:00
|
|
|
else many1
|
2010-07-05 20:41:42 -07:00
|
|
|
(notFollowedBy (gridTableSep '=') >> char '|' >>
|
|
|
|
many1Till anyChar newline)
|
2010-07-05 14:34:48 -07:00
|
|
|
if headless
|
|
|
|
then return ()
|
|
|
|
else gridTableSep '=' >> return ()
|
|
|
|
let lines' = map snd dashes
|
|
|
|
let indices = scanl (+) 0 lines'
|
2010-07-05 20:41:42 -07:00
|
|
|
let aligns = replicate (length lines') AlignDefault
|
|
|
|
-- RST does not have a notion of alignments
|
2010-07-05 14:34:48 -07:00
|
|
|
let rawHeads = if headless
|
|
|
|
then replicate (length dashes) ""
|
|
|
|
else map (intercalate " ") $ transpose
|
|
|
|
$ map (gridTableSplitLine indices) rawContent
|
2017-05-02 23:41:45 +02:00
|
|
|
heads <- fmap sequence . mapM (parseFromString blocks) $ map trim rawHeads
|
2010-07-05 23:43:07 -07:00
|
|
|
return (heads, aligns, indices)
|
2010-07-05 14:34:48 -07:00
|
|
|
|
2017-05-02 23:41:45 +02:00
|
|
|
gridTableRawLine :: Stream s m Char => [Int] -> ParserT s st m [String]
|
2010-07-05 14:34:48 -07:00
|
|
|
gridTableRawLine indices = do
|
|
|
|
char '|'
|
|
|
|
line <- many1Till anyChar newline
|
2011-01-14 14:16:27 -08:00
|
|
|
return (gridTableSplitLine indices line)
|
2010-07-05 14:34:48 -07:00
|
|
|
|
|
|
|
-- | Parse row of grid table.
|
2017-05-02 23:41:45 +02:00
|
|
|
gridTableRow :: (Stream [Char] m Char, Functor mf, Applicative mf, Monad mf)
|
|
|
|
=> ParserT [Char] st m (mf Blocks)
|
2010-07-05 14:34:48 -07:00
|
|
|
-> [Int]
|
2017-05-02 23:41:45 +02:00
|
|
|
-> ParserT [Char] st m (mf [Blocks])
|
Major rewrite of markdown reader.
* Use Builder's Inlines/Blocks instead of lists.
* Return values in the reader monad, which are then
run (at the end of parsing) against the final
parser state. This allows links, notes, and
example numbers to be resolved without a second
parser pass.
* An effect of using Builder is that everything is
normalized automatically.
* New exports from Text.Pandoc.Parsing:
widthsFromIndices, NoteTable', KeyTable', Key', toKey',
withQuoteContext, singleQuoteStart, singleQuoteEnd, doubleQuoteStart,
doubleQuoteEnd, ellipses, apostrophe, dash
* Updated opendocument tests.
* Don't derive Show for ParserState.
* Benchmarks: markdown reader takes 82% of the time it took before.
Markdown writer takes 92% of the time (here the speedup is probably
due to the fact that everything is normalized by default).
2012-07-27 21:04:02 -07:00
|
|
|
gridTableRow blocks indices = do
|
2010-07-05 14:34:48 -07:00
|
|
|
colLines <- many1 (gridTableRawLine indices)
|
|
|
|
let cols = map ((++ "\n") . unlines . removeOneLeadingSpace) $
|
|
|
|
transpose colLines
|
2017-05-02 23:41:45 +02:00
|
|
|
cells <- sequence <$> mapM (parseFromString blocks) cols
|
|
|
|
return $ fmap (map compactifyCell) cells
|
2010-07-05 14:34:48 -07:00
|
|
|
|
|
|
|
removeOneLeadingSpace :: [String] -> [String]
|
|
|
|
removeOneLeadingSpace xs =
|
|
|
|
if all startsWithSpace xs
|
|
|
|
then map (drop 1) xs
|
|
|
|
else xs
|
|
|
|
where startsWithSpace "" = True
|
|
|
|
startsWithSpace (y:_) = y == ' '
|
|
|
|
|
2017-01-27 21:30:35 +01:00
|
|
|
compactifyCell :: Blocks -> Blocks
|
2017-01-27 21:36:45 +01:00
|
|
|
compactifyCell bs = head $ compactify [bs]
|
2010-07-05 14:34:48 -07:00
|
|
|
|
|
|
|
-- | Parse footer for a grid table.
|
2017-05-02 23:41:45 +02:00
|
|
|
gridTableFooter :: Stream s m Char => ParserT s st m [Char]
|
2010-07-05 14:34:48 -07:00
|
|
|
gridTableFooter = blanklines
|
|
|
|
|
|
|
|
---
|
|
|
|
|
2014-07-20 17:04:18 +01:00
|
|
|
-- | Removes the ParsecT layer from the monad transformer stack
|
2016-07-14 23:38:44 -07:00
|
|
|
readWithM :: (Monad m)
|
2014-07-20 17:04:18 +01:00
|
|
|
=> ParserT [Char] st m a -- ^ parser
|
|
|
|
-> st -- ^ initial state
|
|
|
|
-> String -- ^ input
|
2015-02-18 12:55:04 +00:00
|
|
|
-> m (Either PandocError a)
|
2014-07-20 17:04:18 +01:00
|
|
|
readWithM parser state input =
|
2016-12-01 12:13:51 -05:00
|
|
|
mapLeft (PandocParsecError input) `liftM` runParserT parser state "source" input
|
2015-02-18 12:55:04 +00:00
|
|
|
|
2014-07-20 17:04:18 +01:00
|
|
|
|
|
|
|
-- | Parse a string with a given parser and state
|
|
|
|
readWith :: Parser [Char] st a
|
|
|
|
-> st
|
|
|
|
-> String
|
2015-02-18 12:55:04 +00:00
|
|
|
-> Either PandocError a
|
2014-07-20 17:04:18 +01:00
|
|
|
readWith p t inp = runIdentity $ readWithM p t inp
|
2010-07-04 13:43:45 -07:00
|
|
|
|
|
|
|
-- | Parse a string with @parser@ (for testing).
|
2016-07-14 23:38:44 -07:00
|
|
|
testStringWith :: (Show a)
|
2014-07-20 13:51:03 -07:00
|
|
|
=> ParserT [Char] ParserState Identity a
|
|
|
|
-> [Char]
|
2010-07-04 13:43:45 -07:00
|
|
|
-> IO ()
|
2012-09-23 22:53:34 -07:00
|
|
|
testStringWith parser str = UTF8.putStrLn $ show $
|
2010-07-04 13:43:45 -07:00
|
|
|
readWith parser defaultParserState str
|
|
|
|
|
|
|
|
-- | Parsing options.
|
|
|
|
data ParserState = ParserState
|
2012-07-25 11:08:06 -07:00
|
|
|
{ stateOptions :: ReaderOptions, -- ^ User options
|
2010-07-04 13:43:45 -07:00
|
|
|
stateParserContext :: ParserContext, -- ^ Inside list?
|
|
|
|
stateQuoteContext :: QuoteContext, -- ^ Inside quoted environment?
|
Major rewrite of markdown reader.
* Use Builder's Inlines/Blocks instead of lists.
* Return values in the reader monad, which are then
run (at the end of parsing) against the final
parser state. This allows links, notes, and
example numbers to be resolved without a second
parser pass.
* An effect of using Builder is that everything is
normalized automatically.
* New exports from Text.Pandoc.Parsing:
widthsFromIndices, NoteTable', KeyTable', Key', toKey',
withQuoteContext, singleQuoteStart, singleQuoteEnd, doubleQuoteStart,
doubleQuoteEnd, ellipses, apostrophe, dash
* Updated opendocument tests.
* Don't derive Show for ParserState.
* Benchmarks: markdown reader takes 82% of the time it took before.
Markdown writer takes 92% of the time (here the speedup is probably
due to the fact that everything is normalized by default).
2012-07-27 21:04:02 -07:00
|
|
|
stateAllowLinks :: Bool, -- ^ Allow parsing of links
|
2012-02-07 21:50:55 -08:00
|
|
|
stateMaxNestingLevel :: Int, -- ^ Max # of nested Strong/Emph
|
2011-12-29 23:44:12 -08:00
|
|
|
stateLastStrPos :: Maybe SourcePos, -- ^ Position after last str parsed
|
2015-05-13 23:02:54 -07:00
|
|
|
stateKeys :: KeyTable, -- ^ List of reference keys
|
|
|
|
stateHeaderKeys :: KeyTable, -- ^ List of implicit header ref keys
|
2012-09-27 15:20:29 -07:00
|
|
|
stateSubstitutions :: SubstTable, -- ^ List of substitution references
|
Major rewrite of markdown reader.
* Use Builder's Inlines/Blocks instead of lists.
* Return values in the reader monad, which are then
run (at the end of parsing) against the final
parser state. This allows links, notes, and
example numbers to be resolved without a second
parser pass.
* An effect of using Builder is that everything is
normalized automatically.
* New exports from Text.Pandoc.Parsing:
widthsFromIndices, NoteTable', KeyTable', Key', toKey',
withQuoteContext, singleQuoteStart, singleQuoteEnd, doubleQuoteStart,
doubleQuoteEnd, ellipses, apostrophe, dash
* Updated opendocument tests.
* Don't derive Show for ParserState.
* Benchmarks: markdown reader takes 82% of the time it took before.
Markdown writer takes 92% of the time (here the speedup is probably
due to the fact that everything is normalized by default).
2012-07-27 21:04:02 -07:00
|
|
|
stateNotes :: NoteTable, -- ^ List of notes (raw bodies)
|
|
|
|
stateNotes' :: NoteTable', -- ^ List of notes (parsed bodies)
|
2013-05-10 22:53:35 -07:00
|
|
|
stateMeta :: Meta, -- ^ Document metadata
|
2015-04-18 18:34:55 -07:00
|
|
|
stateMeta' :: F Meta, -- ^ Document metadata
|
2017-03-03 22:23:01 +01:00
|
|
|
stateCitations :: M.Map String String, -- ^ RST-style citations
|
2010-07-04 13:43:45 -07:00
|
|
|
stateHeaderTable :: [HeaderType], -- ^ Ordered list of header types used
|
2013-02-21 19:53:35 -08:00
|
|
|
stateHeaders :: M.Map Inlines String, -- ^ List of headers and ids (used for implicit ref links)
|
2016-01-22 10:16:47 -08:00
|
|
|
stateIdentifiers :: Set.Set String, -- ^ Header identifiers used
|
2010-07-11 22:47:52 -07:00
|
|
|
stateNextExample :: Int, -- ^ Number of next example
|
2012-07-26 22:32:53 -07:00
|
|
|
stateExamples :: M.Map String Int, -- ^ Map from example labels to numbers
|
2012-03-24 21:30:10 -04:00
|
|
|
stateMacros :: [Macro], -- ^ List of macros defined so far
|
2013-01-03 20:52:51 -08:00
|
|
|
stateRstDefaultRole :: String, -- ^ Current rST default interpreted text role
|
2014-12-11 18:50:24 +00:00
|
|
|
stateRstCustomRoles :: M.Map String (String, Maybe String, Attr), -- ^ Current rST custom text roles
|
2014-02-15 17:57:08 +01:00
|
|
|
-- Triple represents: 1) Base role, 2) Optional format (only for :raw:
|
2014-12-11 18:50:24 +00:00
|
|
|
-- roles), 3) Additional classes (rest of Attr is unused)).
|
2014-03-25 22:44:16 -07:00
|
|
|
stateCaption :: Maybe Inlines, -- ^ Caption in current environment
|
2014-07-07 15:40:31 -06:00
|
|
|
stateInHtmlBlock :: Maybe String, -- ^ Tag type of HTML block being parsed
|
2016-12-03 21:55:31 +01:00
|
|
|
stateContainers :: [String], -- ^ parent include files
|
2017-02-17 12:56:07 +01:00
|
|
|
stateLogMessages :: [LogMessage], -- ^ log messages
|
2016-12-03 19:03:33 +01:00
|
|
|
stateMarkdownAttribute :: Bool -- ^ True if in markdown=1 context
|
2010-07-04 13:43:45 -07:00
|
|
|
}
|
|
|
|
|
2012-07-19 12:38:54 -07:00
|
|
|
instance Default ParserState where
|
|
|
|
def = defaultParserState
|
|
|
|
|
2013-05-10 22:53:35 -07:00
|
|
|
instance HasMeta ParserState where
|
|
|
|
setMeta field val st =
|
|
|
|
st{ stateMeta = setMeta field val $ stateMeta st }
|
|
|
|
deleteMeta field st =
|
|
|
|
st{ stateMeta = deleteMeta field $ stateMeta st }
|
|
|
|
|
API changes to HasReaderOptions, HasHeaderMap, HasIdentifierList.
Previously these were typeclasses of monads. They've been changed
to be typeclasses of states. This ismplifies the instance definitions
and provides more flexibility.
This is an API change! However, it should be backwards compatible
unless you're defining instances of HasReaderOptions, HasHeaderMap,
or HasIdentifierList. The old getOption function should work as
before (albeit with a more general type).
The function askReaderOption has been removed.
extractReaderOptions has been added.
getOption has been given a default definition.
In HasHeaderMap, extractHeaderMap and updateHeaderMap have been added.
Default definitions have been given for getHeaderMap, putHeaderMap,
and modifyHeaderMap.
In HasIdentifierList, extractIdentifierList and updateIdentifierList
have been added. Default definitions have been given for
getIdentifierList, putIdentifierList, and modifyIdentifierList.
The ultimate goal here is to allow different parsers to use their
own, tailored parser states (instead of ParserState) while still
using shared functions.
2014-03-25 13:43:34 -07:00
|
|
|
class HasReaderOptions st where
|
|
|
|
extractReaderOptions :: st -> ReaderOptions
|
2014-07-11 12:45:34 +01:00
|
|
|
getOption :: (Stream s m t) => (ReaderOptions -> b) -> ParserT s st m b
|
API changes to HasReaderOptions, HasHeaderMap, HasIdentifierList.
Previously these were typeclasses of monads. They've been changed
to be typeclasses of states. This ismplifies the instance definitions
and provides more flexibility.
This is an API change! However, it should be backwards compatible
unless you're defining instances of HasReaderOptions, HasHeaderMap,
or HasIdentifierList. The old getOption function should work as
before (albeit with a more general type).
The function askReaderOption has been removed.
extractReaderOptions has been added.
getOption has been given a default definition.
In HasHeaderMap, extractHeaderMap and updateHeaderMap have been added.
Default definitions have been given for getHeaderMap, putHeaderMap,
and modifyHeaderMap.
In HasIdentifierList, extractIdentifierList and updateIdentifierList
have been added. Default definitions have been given for
getIdentifierList, putIdentifierList, and modifyIdentifierList.
The ultimate goal here is to allow different parsers to use their
own, tailored parser states (instead of ParserState) while still
using shared functions.
2014-03-25 13:43:34 -07:00
|
|
|
-- default
|
2014-07-11 12:53:31 +01:00
|
|
|
getOption f = (f . extractReaderOptions) <$> getState
|
API changes to HasReaderOptions, HasHeaderMap, HasIdentifierList.
Previously these were typeclasses of monads. They've been changed
to be typeclasses of states. This ismplifies the instance definitions
and provides more flexibility.
This is an API change! However, it should be backwards compatible
unless you're defining instances of HasReaderOptions, HasHeaderMap,
or HasIdentifierList. The old getOption function should work as
before (albeit with a more general type).
The function askReaderOption has been removed.
extractReaderOptions has been added.
getOption has been given a default definition.
In HasHeaderMap, extractHeaderMap and updateHeaderMap have been added.
Default definitions have been given for getHeaderMap, putHeaderMap,
and modifyHeaderMap.
In HasIdentifierList, extractIdentifierList and updateIdentifierList
have been added. Default definitions have been given for
getIdentifierList, putIdentifierList, and modifyIdentifierList.
The ultimate goal here is to allow different parsers to use their
own, tailored parser states (instead of ParserState) while still
using shared functions.
2014-03-25 13:43:34 -07:00
|
|
|
|
2014-07-26 17:34:11 +01:00
|
|
|
class HasQuoteContext st m where
|
|
|
|
getQuoteContext :: (Stream s m t) => ParsecT s st m QuoteContext
|
|
|
|
withQuoteContext :: QuoteContext -> ParsecT s st m a -> ParsecT s st m a
|
|
|
|
|
2017-02-20 15:44:33 +01:00
|
|
|
instance Monad m => HasQuoteContext ParserState m where
|
2014-07-26 17:34:11 +01:00
|
|
|
getQuoteContext = stateQuoteContext <$> getState
|
|
|
|
withQuoteContext context parser = do
|
|
|
|
oldState <- getState
|
|
|
|
let oldQuoteContext = stateQuoteContext oldState
|
|
|
|
setState oldState { stateQuoteContext = context }
|
|
|
|
result <- parser
|
|
|
|
newState <- getState
|
|
|
|
setState newState { stateQuoteContext = oldQuoteContext }
|
|
|
|
return result
|
|
|
|
|
API changes to HasReaderOptions, HasHeaderMap, HasIdentifierList.
Previously these were typeclasses of monads. They've been changed
to be typeclasses of states. This ismplifies the instance definitions
and provides more flexibility.
This is an API change! However, it should be backwards compatible
unless you're defining instances of HasReaderOptions, HasHeaderMap,
or HasIdentifierList. The old getOption function should work as
before (albeit with a more general type).
The function askReaderOption has been removed.
extractReaderOptions has been added.
getOption has been given a default definition.
In HasHeaderMap, extractHeaderMap and updateHeaderMap have been added.
Default definitions have been given for getHeaderMap, putHeaderMap,
and modifyHeaderMap.
In HasIdentifierList, extractIdentifierList and updateIdentifierList
have been added. Default definitions have been given for
getIdentifierList, putIdentifierList, and modifyIdentifierList.
The ultimate goal here is to allow different parsers to use their
own, tailored parser states (instead of ParserState) while still
using shared functions.
2014-03-25 13:43:34 -07:00
|
|
|
instance HasReaderOptions ParserState where
|
|
|
|
extractReaderOptions = stateOptions
|
|
|
|
|
|
|
|
class HasHeaderMap st where
|
|
|
|
extractHeaderMap :: st -> M.Map Inlines String
|
2014-03-25 14:55:18 -07:00
|
|
|
updateHeaderMap :: (M.Map Inlines String -> M.Map Inlines String) ->
|
|
|
|
st -> st
|
2013-11-17 08:45:21 -08:00
|
|
|
|
API changes to HasReaderOptions, HasHeaderMap, HasIdentifierList.
Previously these were typeclasses of monads. They've been changed
to be typeclasses of states. This ismplifies the instance definitions
and provides more flexibility.
This is an API change! However, it should be backwards compatible
unless you're defining instances of HasReaderOptions, HasHeaderMap,
or HasIdentifierList. The old getOption function should work as
before (albeit with a more general type).
The function askReaderOption has been removed.
extractReaderOptions has been added.
getOption has been given a default definition.
In HasHeaderMap, extractHeaderMap and updateHeaderMap have been added.
Default definitions have been given for getHeaderMap, putHeaderMap,
and modifyHeaderMap.
In HasIdentifierList, extractIdentifierList and updateIdentifierList
have been added. Default definitions have been given for
getIdentifierList, putIdentifierList, and modifyIdentifierList.
The ultimate goal here is to allow different parsers to use their
own, tailored parser states (instead of ParserState) while still
using shared functions.
2014-03-25 13:43:34 -07:00
|
|
|
instance HasHeaderMap ParserState where
|
|
|
|
extractHeaderMap = stateHeaders
|
2014-03-25 14:55:18 -07:00
|
|
|
updateHeaderMap f st = st{ stateHeaders = f $ stateHeaders st }
|
API changes to HasReaderOptions, HasHeaderMap, HasIdentifierList.
Previously these were typeclasses of monads. They've been changed
to be typeclasses of states. This ismplifies the instance definitions
and provides more flexibility.
This is an API change! However, it should be backwards compatible
unless you're defining instances of HasReaderOptions, HasHeaderMap,
or HasIdentifierList. The old getOption function should work as
before (albeit with a more general type).
The function askReaderOption has been removed.
extractReaderOptions has been added.
getOption has been given a default definition.
In HasHeaderMap, extractHeaderMap and updateHeaderMap have been added.
Default definitions have been given for getHeaderMap, putHeaderMap,
and modifyHeaderMap.
In HasIdentifierList, extractIdentifierList and updateIdentifierList
have been added. Default definitions have been given for
getIdentifierList, putIdentifierList, and modifyIdentifierList.
The ultimate goal here is to allow different parsers to use their
own, tailored parser states (instead of ParserState) while still
using shared functions.
2014-03-25 13:43:34 -07:00
|
|
|
|
|
|
|
class HasIdentifierList st where
|
2016-01-22 10:16:47 -08:00
|
|
|
extractIdentifierList :: st -> Set.Set String
|
|
|
|
updateIdentifierList :: (Set.Set String -> Set.Set String) -> st -> st
|
2013-11-17 08:45:21 -08:00
|
|
|
|
API changes to HasReaderOptions, HasHeaderMap, HasIdentifierList.
Previously these were typeclasses of monads. They've been changed
to be typeclasses of states. This ismplifies the instance definitions
and provides more flexibility.
This is an API change! However, it should be backwards compatible
unless you're defining instances of HasReaderOptions, HasHeaderMap,
or HasIdentifierList. The old getOption function should work as
before (albeit with a more general type).
The function askReaderOption has been removed.
extractReaderOptions has been added.
getOption has been given a default definition.
In HasHeaderMap, extractHeaderMap and updateHeaderMap have been added.
Default definitions have been given for getHeaderMap, putHeaderMap,
and modifyHeaderMap.
In HasIdentifierList, extractIdentifierList and updateIdentifierList
have been added. Default definitions have been given for
getIdentifierList, putIdentifierList, and modifyIdentifierList.
The ultimate goal here is to allow different parsers to use their
own, tailored parser states (instead of ParserState) while still
using shared functions.
2014-03-25 13:43:34 -07:00
|
|
|
instance HasIdentifierList ParserState where
|
|
|
|
extractIdentifierList = stateIdentifiers
|
2014-03-25 14:55:18 -07:00
|
|
|
updateIdentifierList f st = st{ stateIdentifiers = f $ stateIdentifiers st }
|
|
|
|
|
|
|
|
class HasMacros st where
|
|
|
|
extractMacros :: st -> [Macro]
|
|
|
|
updateMacros :: ([Macro] -> [Macro]) -> st -> st
|
2013-11-17 08:45:21 -08:00
|
|
|
|
2014-03-25 14:55:18 -07:00
|
|
|
instance HasMacros ParserState where
|
|
|
|
extractMacros = stateMacros
|
|
|
|
updateMacros f st = st{ stateMacros = f $ stateMacros st }
|
2014-03-25 13:51:55 -07:00
|
|
|
|
2014-05-14 14:45:37 +02:00
|
|
|
class HasLastStrPosition st where
|
|
|
|
setLastStrPos :: SourcePos -> st -> st
|
|
|
|
getLastStrPos :: st -> Maybe SourcePos
|
|
|
|
|
|
|
|
instance HasLastStrPosition ParserState where
|
|
|
|
setLastStrPos pos st = st{ stateLastStrPos = Just pos }
|
|
|
|
getLastStrPos st = stateLastStrPos st
|
|
|
|
|
2017-02-17 12:56:07 +01:00
|
|
|
class HasLogMessages st where
|
|
|
|
addLogMessage :: LogMessage -> st -> st
|
|
|
|
getLogMessages :: st -> [LogMessage]
|
|
|
|
|
|
|
|
instance HasLogMessages ParserState where
|
|
|
|
addLogMessage msg st = st{ stateLogMessages = msg : stateLogMessages st }
|
|
|
|
getLogMessages st = reverse $ stateLogMessages st
|
|
|
|
|
2010-07-04 13:43:45 -07:00
|
|
|
defaultParserState :: ParserState
|
2012-07-26 22:32:53 -07:00
|
|
|
defaultParserState =
|
2012-07-25 10:45:45 -07:00
|
|
|
ParserState { stateOptions = def,
|
2010-07-04 13:43:45 -07:00
|
|
|
stateParserContext = NullState,
|
|
|
|
stateQuoteContext = NoQuote,
|
Major rewrite of markdown reader.
* Use Builder's Inlines/Blocks instead of lists.
* Return values in the reader monad, which are then
run (at the end of parsing) against the final
parser state. This allows links, notes, and
example numbers to be resolved without a second
parser pass.
* An effect of using Builder is that everything is
normalized automatically.
* New exports from Text.Pandoc.Parsing:
widthsFromIndices, NoteTable', KeyTable', Key', toKey',
withQuoteContext, singleQuoteStart, singleQuoteEnd, doubleQuoteStart,
doubleQuoteEnd, ellipses, apostrophe, dash
* Updated opendocument tests.
* Don't derive Show for ParserState.
* Benchmarks: markdown reader takes 82% of the time it took before.
Markdown writer takes 92% of the time (here the speedup is probably
due to the fact that everything is normalized by default).
2012-07-27 21:04:02 -07:00
|
|
|
stateAllowLinks = True,
|
2012-02-07 21:50:55 -08:00
|
|
|
stateMaxNestingLevel = 6,
|
2011-12-29 23:44:12 -08:00
|
|
|
stateLastStrPos = Nothing,
|
2010-07-04 13:43:45 -07:00
|
|
|
stateKeys = M.empty,
|
2015-05-13 23:02:54 -07:00
|
|
|
stateHeaderKeys = M.empty,
|
2012-09-27 15:20:29 -07:00
|
|
|
stateSubstitutions = M.empty,
|
2010-07-04 13:43:45 -07:00
|
|
|
stateNotes = [],
|
Major rewrite of markdown reader.
* Use Builder's Inlines/Blocks instead of lists.
* Return values in the reader monad, which are then
run (at the end of parsing) against the final
parser state. This allows links, notes, and
example numbers to be resolved without a second
parser pass.
* An effect of using Builder is that everything is
normalized automatically.
* New exports from Text.Pandoc.Parsing:
widthsFromIndices, NoteTable', KeyTable', Key', toKey',
withQuoteContext, singleQuoteStart, singleQuoteEnd, doubleQuoteStart,
doubleQuoteEnd, ellipses, apostrophe, dash
* Updated opendocument tests.
* Don't derive Show for ParserState.
* Benchmarks: markdown reader takes 82% of the time it took before.
Markdown writer takes 92% of the time (here the speedup is probably
due to the fact that everything is normalized by default).
2012-07-27 21:04:02 -07:00
|
|
|
stateNotes' = [],
|
2013-05-10 22:53:35 -07:00
|
|
|
stateMeta = nullMeta,
|
2015-04-18 18:34:55 -07:00
|
|
|
stateMeta' = return nullMeta,
|
2017-03-03 22:23:01 +01:00
|
|
|
stateCitations = M.empty,
|
2010-07-04 13:43:45 -07:00
|
|
|
stateHeaderTable = [],
|
2013-02-21 19:53:35 -08:00
|
|
|
stateHeaders = M.empty,
|
2016-01-22 10:16:47 -08:00
|
|
|
stateIdentifiers = Set.empty,
|
2010-07-11 22:47:52 -07:00
|
|
|
stateNextExample = 1,
|
2010-07-13 19:18:58 -07:00
|
|
|
stateExamples = M.empty,
|
2012-03-24 21:30:10 -04:00
|
|
|
stateMacros = [],
|
2013-01-03 20:52:51 -08:00
|
|
|
stateRstDefaultRole = "title-reference",
|
2014-02-15 17:51:33 +01:00
|
|
|
stateRstCustomRoles = M.empty,
|
2014-03-25 22:44:16 -07:00
|
|
|
stateCaption = Nothing,
|
2014-07-07 15:40:31 -06:00
|
|
|
stateInHtmlBlock = Nothing,
|
2016-12-03 21:55:31 +01:00
|
|
|
stateContainers = [],
|
2017-02-17 12:56:07 +01:00
|
|
|
stateLogMessages = [],
|
2016-12-03 19:03:33 +01:00
|
|
|
stateMarkdownAttribute = False
|
|
|
|
}
|
2010-07-04 13:43:45 -07:00
|
|
|
|
2017-02-17 12:56:07 +01:00
|
|
|
-- | Add a log message.
|
|
|
|
logMessage :: (Stream s m a, HasLogMessages st)
|
|
|
|
=> LogMessage -> ParserT s st m ()
|
|
|
|
logMessage msg = updateState (addLogMessage msg)
|
|
|
|
|
|
|
|
-- | Report all the accumulated log messages, according to verbosity level.
|
2017-02-20 15:12:20 +01:00
|
|
|
reportLogMessages :: (PandocMonad m, HasLogMessages st) => ParserT s st m ()
|
2017-02-17 12:56:07 +01:00
|
|
|
reportLogMessages = do
|
|
|
|
msgs <- getLogMessages <$> getState
|
|
|
|
mapM_ report msgs
|
|
|
|
|
2012-07-26 19:10:56 -07:00
|
|
|
-- | Succeed only if the extension is enabled.
|
2014-07-11 12:45:34 +01:00
|
|
|
guardEnabled :: (Stream s m a, HasReaderOptions st) => Extension -> ParserT s st m ()
|
2017-01-14 13:06:27 +01:00
|
|
|
guardEnabled ext = getOption readerExtensions >>= guard . extensionEnabled ext
|
2012-07-26 19:10:56 -07:00
|
|
|
|
|
|
|
-- | Succeed only if the extension is disabled.
|
2014-07-11 12:45:34 +01:00
|
|
|
guardDisabled :: (Stream s m a, HasReaderOptions st) => Extension -> ParserT s st m ()
|
2017-01-14 13:06:27 +01:00
|
|
|
guardDisabled ext = getOption readerExtensions >>= guard . not . extensionEnabled ext
|
2012-07-26 19:10:56 -07:00
|
|
|
|
2014-05-14 14:45:37 +02:00
|
|
|
-- | Update the position on which the last string ended.
|
2014-07-11 12:45:34 +01:00
|
|
|
updateLastStrPos :: (Stream s m a, HasLastStrPosition st) => ParserT s st m ()
|
2014-05-14 14:45:37 +02:00
|
|
|
updateLastStrPos = getPosition >>= updateState . setLastStrPos
|
|
|
|
|
|
|
|
-- | Whether we are right after the end of a string.
|
2014-07-11 12:45:34 +01:00
|
|
|
notAfterString :: (Stream s m a, HasLastStrPosition st) => ParserT s st m Bool
|
2014-05-14 14:45:37 +02:00
|
|
|
notAfterString = do
|
|
|
|
pos <- getPosition
|
|
|
|
st <- getState
|
|
|
|
return $ getLastStrPos st /= Just pos
|
|
|
|
|
2012-07-26 22:32:53 -07:00
|
|
|
data HeaderType
|
2010-07-04 13:43:45 -07:00
|
|
|
= SingleHeader Char -- ^ Single line of characters underneath
|
|
|
|
| DoubleHeader Char -- ^ Lines of characters above and below
|
|
|
|
deriving (Eq, Show)
|
|
|
|
|
2012-07-26 22:32:53 -07:00
|
|
|
data ParserContext
|
2010-07-04 13:43:45 -07:00
|
|
|
= ListItemState -- ^ Used when running parser on list item contents
|
|
|
|
| NullState -- ^ Default state
|
|
|
|
deriving (Eq, Show)
|
|
|
|
|
|
|
|
data QuoteContext
|
|
|
|
= InSingleQuote -- ^ Used when parsing inside single quotes
|
|
|
|
| InDoubleQuote -- ^ Used when parsing inside double quotes
|
|
|
|
| NoQuote -- ^ Used when not parsing inside quotes
|
|
|
|
deriving (Eq, Show)
|
|
|
|
|
|
|
|
type NoteTable = [(String, String)]
|
|
|
|
|
2015-04-18 18:34:55 -07:00
|
|
|
type NoteTable' = [(String, F Blocks)] -- used in markdown reader
|
Major rewrite of markdown reader.
* Use Builder's Inlines/Blocks instead of lists.
* Return values in the reader monad, which are then
run (at the end of parsing) against the final
parser state. This allows links, notes, and
example numbers to be resolved without a second
parser pass.
* An effect of using Builder is that everything is
normalized automatically.
* New exports from Text.Pandoc.Parsing:
widthsFromIndices, NoteTable', KeyTable', Key', toKey',
withQuoteContext, singleQuoteStart, singleQuoteEnd, doubleQuoteStart,
doubleQuoteEnd, ellipses, apostrophe, dash
* Updated opendocument tests.
* Don't derive Show for ParserState.
* Benchmarks: markdown reader takes 82% of the time it took before.
Markdown writer takes 92% of the time (here the speedup is probably
due to the fact that everything is normalized by default).
2012-07-27 21:04:02 -07:00
|
|
|
|
2012-08-01 22:40:07 -07:00
|
|
|
newtype Key = Key String deriving (Show, Read, Eq, Ord)
|
2010-07-04 13:43:45 -07:00
|
|
|
|
2012-08-01 22:40:07 -07:00
|
|
|
toKey :: String -> Key
|
2015-07-23 15:34:27 -07:00
|
|
|
toKey = Key . map toLower . unwords . words . unbracket
|
|
|
|
where unbracket ('[':xs) | "]" `isSuffixOf` xs = take (length xs - 1) xs
|
|
|
|
unbracket xs = xs
|
2010-07-04 13:43:45 -07:00
|
|
|
|
2015-04-02 21:04:12 -07:00
|
|
|
type KeyTable = M.Map Key (Target, Attr)
|
2010-07-04 13:43:45 -07:00
|
|
|
|
2012-09-27 16:44:49 -07:00
|
|
|
type SubstTable = M.Map Key Inlines
|
2012-09-27 15:20:29 -07:00
|
|
|
|
2013-09-01 08:54:10 -07:00
|
|
|
-- | Add header to the list of headers in state, together
|
|
|
|
-- with its associated identifier. If the identifier is null
|
|
|
|
-- and the auto_identifers extension is set, generate a new
|
|
|
|
-- unique identifier, and update the list of identifiers
|
2017-03-12 22:03:10 +01:00
|
|
|
-- in state. Issue a warning if an explicit identifier
|
|
|
|
-- is encountered that duplicates an earlier identifier
|
|
|
|
-- (explict or automatically generated).
|
|
|
|
registerHeader :: (Stream s m a, HasReaderOptions st,
|
|
|
|
HasHeaderMap st, HasLogMessages st, HasIdentifierList st)
|
2014-07-11 12:45:34 +01:00
|
|
|
=> Attr -> Inlines -> ParserT s st m Attr
|
2013-09-01 08:54:10 -07:00
|
|
|
registerHeader (ident,classes,kvs) header' = do
|
2014-07-11 12:53:31 +01:00
|
|
|
ids <- extractIdentifierList <$> getState
|
API changes to HasReaderOptions, HasHeaderMap, HasIdentifierList.
Previously these were typeclasses of monads. They've been changed
to be typeclasses of states. This ismplifies the instance definitions
and provides more flexibility.
This is an API change! However, it should be backwards compatible
unless you're defining instances of HasReaderOptions, HasHeaderMap,
or HasIdentifierList. The old getOption function should work as
before (albeit with a more general type).
The function askReaderOption has been removed.
extractReaderOptions has been added.
getOption has been given a default definition.
In HasHeaderMap, extractHeaderMap and updateHeaderMap have been added.
Default definitions have been given for getHeaderMap, putHeaderMap,
and modifyHeaderMap.
In HasIdentifierList, extractIdentifierList and updateIdentifierList
have been added. Default definitions have been given for
getIdentifierList, putIdentifierList, and modifyIdentifierList.
The ultimate goal here is to allow different parsers to use their
own, tailored parser states (instead of ParserState) while still
using shared functions.
2014-03-25 13:43:34 -07:00
|
|
|
exts <- getOption readerExtensions
|
2013-09-01 08:54:10 -07:00
|
|
|
let insert' = M.insertWith (\_new old -> old)
|
2017-01-14 13:06:27 +01:00
|
|
|
if null ident && Ext_auto_identifiers `extensionEnabled` exts
|
2013-09-01 08:54:10 -07:00
|
|
|
then do
|
|
|
|
let id' = uniqueIdent (B.toList header') ids
|
2017-01-14 13:06:27 +01:00
|
|
|
let id'' = if Ext_ascii_identifiers `extensionEnabled` exts
|
2013-09-01 08:54:10 -07:00
|
|
|
then catMaybes $ map toAsciiChar id'
|
|
|
|
else id'
|
2016-01-22 10:16:47 -08:00
|
|
|
updateState $ updateIdentifierList $ Set.insert id'
|
|
|
|
updateState $ updateIdentifierList $ Set.insert id''
|
2014-03-25 14:55:18 -07:00
|
|
|
updateState $ updateHeaderMap $ insert' header' id'
|
2013-09-01 08:54:10 -07:00
|
|
|
return (id'',classes,kvs)
|
|
|
|
else do
|
Improved behavior of `auto_identifiers` when there are explicit ids.
Previously only autogenerated ids were added to the list
of header identifiers in state, so explicit ids weren't taken
into account when generating unique identifiers. Duplicated
identifiers could result.
This simple fix ensures that explicitly given identifiers are
also taken into account.
Fixes #1745.
Note some limitations, however. An autogenerated identifier
may still coincide with an explicit identifier that is given
for a header later in the document, or with an identifier on
a div, span, link, or image. Fixing this would be much more
difficult, because we need to run `registerHeader` before
we have the complete parse tree (so we can't get a complete
list of identifiers from the document by walking the tree).
However, it might be worth issuing warnings for duplicate
header identifiers; I think we can do that. It is not
common for headers to have the same text, and the issue
can always be worked around by adding explicit identifiers,
if the user is aware of it.
2017-03-12 21:30:04 +01:00
|
|
|
unless (null ident) $ do
|
2017-03-12 22:03:10 +01:00
|
|
|
when (ident `Set.member` ids) $ do
|
|
|
|
pos <- getPosition
|
|
|
|
logMessage $ DuplicateIdentifier ident pos
|
Improved behavior of `auto_identifiers` when there are explicit ids.
Previously only autogenerated ids were added to the list
of header identifiers in state, so explicit ids weren't taken
into account when generating unique identifiers. Duplicated
identifiers could result.
This simple fix ensures that explicitly given identifiers are
also taken into account.
Fixes #1745.
Note some limitations, however. An autogenerated identifier
may still coincide with an explicit identifier that is given
for a header later in the document, or with an identifier on
a div, span, link, or image. Fixing this would be much more
difficult, because we need to run `registerHeader` before
we have the complete parse tree (so we can't get a complete
list of identifiers from the document by walking the tree).
However, it might be worth issuing warnings for duplicate
header identifiers; I think we can do that. It is not
common for headers to have the same text, and the issue
can always be worked around by adding explicit identifiers,
if the user is aware of it.
2017-03-12 21:30:04 +01:00
|
|
|
updateState $ updateIdentifierList $ Set.insert ident
|
2014-03-25 14:55:18 -07:00
|
|
|
updateState $ updateHeaderMap $ insert' header' ident
|
2013-09-01 08:54:10 -07:00
|
|
|
return (ident,classes,kvs)
|
|
|
|
|
2014-07-26 17:34:11 +01:00
|
|
|
smartPunctuation :: (HasReaderOptions st, HasLastStrPosition st, HasQuoteContext st m, Stream s m Char)
|
|
|
|
=> ParserT s st m Inlines
|
|
|
|
-> ParserT s st m Inlines
|
2010-12-07 19:03:08 -08:00
|
|
|
smartPunctuation inlineParser = do
|
2017-01-14 18:27:06 +01:00
|
|
|
guardEnabled Ext_smart
|
2010-12-07 19:03:08 -08:00
|
|
|
choice [ quoted inlineParser, apostrophe, dash, ellipses ]
|
|
|
|
|
2014-07-11 12:45:34 +01:00
|
|
|
apostrophe :: Stream s m Char => ParserT s st m Inlines
|
2014-03-27 19:56:47 +00:00
|
|
|
apostrophe = (char '\'' <|> char '\8217') >> return (B.str "\x2019")
|
2010-12-07 19:03:08 -08:00
|
|
|
|
2014-07-26 17:34:11 +01:00
|
|
|
quoted :: (HasLastStrPosition st, HasQuoteContext st m, Stream s m Char)
|
|
|
|
=> ParserT s st m Inlines
|
|
|
|
-> ParserT s st m Inlines
|
2010-12-07 19:03:08 -08:00
|
|
|
quoted inlineParser = doubleQuoted inlineParser <|> singleQuoted inlineParser
|
|
|
|
|
2014-07-26 17:34:11 +01:00
|
|
|
singleQuoted :: (HasLastStrPosition st, HasQuoteContext st m, Stream s m Char)
|
|
|
|
=> ParserT s st m Inlines
|
|
|
|
-> ParserT s st m Inlines
|
2010-12-07 19:03:08 -08:00
|
|
|
singleQuoted inlineParser = try $ do
|
|
|
|
singleQuoteStart
|
|
|
|
withQuoteContext InSingleQuote $ many1Till inlineParser singleQuoteEnd >>=
|
2014-03-27 19:56:47 +00:00
|
|
|
return . B.singleQuoted . mconcat
|
2010-12-07 19:03:08 -08:00
|
|
|
|
2014-07-26 17:34:11 +01:00
|
|
|
doubleQuoted :: (HasQuoteContext st m, Stream s m Char)
|
|
|
|
=> ParserT s st m Inlines
|
|
|
|
-> ParserT s st m Inlines
|
2010-12-07 19:03:08 -08:00
|
|
|
doubleQuoted inlineParser = try $ do
|
|
|
|
doubleQuoteStart
|
2014-03-27 19:56:47 +00:00
|
|
|
withQuoteContext InDoubleQuote $ manyTill inlineParser doubleQuoteEnd >>=
|
|
|
|
return . B.doubleQuoted . mconcat
|
2010-12-07 19:03:08 -08:00
|
|
|
|
2014-07-26 17:34:11 +01:00
|
|
|
failIfInQuoteContext :: (HasQuoteContext st m, Stream s m t)
|
2014-07-12 22:57:22 -07:00
|
|
|
=> QuoteContext
|
2014-07-26 17:34:11 +01:00
|
|
|
-> ParserT s st m ()
|
2010-12-07 19:03:08 -08:00
|
|
|
failIfInQuoteContext context = do
|
2014-07-26 17:34:11 +01:00
|
|
|
context' <- getQuoteContext
|
|
|
|
if context' == context
|
2010-12-07 19:03:08 -08:00
|
|
|
then fail "already inside quotes"
|
|
|
|
else return ()
|
|
|
|
|
2014-07-11 12:45:34 +01:00
|
|
|
charOrRef :: Stream s m Char => String -> ParserT s st m Char
|
2010-12-07 20:44:43 -08:00
|
|
|
charOrRef cs =
|
|
|
|
oneOf cs <|> try (do c <- characterReference
|
|
|
|
guard (c `elem` cs)
|
|
|
|
return c)
|
|
|
|
|
2014-07-26 17:34:11 +01:00
|
|
|
singleQuoteStart :: (HasLastStrPosition st, HasQuoteContext st m, Stream s m Char)
|
|
|
|
=> ParserT s st m ()
|
2011-12-29 23:44:12 -08:00
|
|
|
singleQuoteStart = do
|
2010-12-07 19:03:08 -08:00
|
|
|
failIfInQuoteContext InSingleQuote
|
2011-12-29 23:44:12 -08:00
|
|
|
-- single quote start can't be right after str
|
2014-05-14 14:45:37 +02:00
|
|
|
guard =<< notAfterString
|
2013-01-14 16:06:45 -08:00
|
|
|
() <$ charOrRef "'\8216\145"
|
2010-12-07 19:03:08 -08:00
|
|
|
|
2014-07-12 22:57:22 -07:00
|
|
|
singleQuoteEnd :: Stream s m Char
|
2014-07-11 12:45:34 +01:00
|
|
|
=> ParserT s st m ()
|
2010-12-07 19:03:08 -08:00
|
|
|
singleQuoteEnd = try $ do
|
2011-07-23 12:35:01 -07:00
|
|
|
charOrRef "'\8217\146"
|
2010-12-07 19:03:08 -08:00
|
|
|
notFollowedBy alphaNum
|
|
|
|
|
2014-07-26 17:34:11 +01:00
|
|
|
doubleQuoteStart :: (HasQuoteContext st m, Stream s m Char)
|
|
|
|
=> ParserT s st m ()
|
2010-12-07 19:03:08 -08:00
|
|
|
doubleQuoteStart = do
|
|
|
|
failIfInQuoteContext InDoubleQuote
|
2011-07-23 12:35:01 -07:00
|
|
|
try $ do charOrRef "\"\8220\147"
|
2013-12-19 20:19:24 -05:00
|
|
|
notFollowedBy . satisfy $ flip elem [' ', '\t', '\n']
|
2010-12-07 19:03:08 -08:00
|
|
|
|
2014-07-12 22:57:22 -07:00
|
|
|
doubleQuoteEnd :: Stream s m Char
|
2014-07-11 12:45:34 +01:00
|
|
|
=> ParserT s st m ()
|
|
|
|
doubleQuoteEnd = void (charOrRef "\"\8221\148")
|
2010-12-07 19:03:08 -08:00
|
|
|
|
2014-07-12 22:57:22 -07:00
|
|
|
ellipses :: Stream s m Char
|
2014-07-11 12:45:34 +01:00
|
|
|
=> ParserT s st m Inlines
|
2014-07-12 22:59:35 -07:00
|
|
|
ellipses = try (string "..." >> return (B.str "\8230"))
|
2010-12-07 19:03:08 -08:00
|
|
|
|
2014-07-12 22:59:35 -07:00
|
|
|
dash :: (HasReaderOptions st, Stream s m Char)
|
|
|
|
=> ParserT s st m Inlines
|
|
|
|
dash = try $ do
|
2017-01-14 21:00:22 +01:00
|
|
|
oldDashes <- extensionEnabled Ext_old_dashes <$> getOption readerExtensions
|
2012-01-01 13:48:28 -08:00
|
|
|
if oldDashes
|
2014-07-12 22:59:35 -07:00
|
|
|
then do
|
|
|
|
char '-'
|
|
|
|
(char '-' >> return (B.str "\8212"))
|
|
|
|
<|> (lookAhead digit >> return (B.str "\8211"))
|
|
|
|
else do
|
|
|
|
string "--"
|
|
|
|
(char '-' >> return (B.str "\8212"))
|
|
|
|
<|> return (B.str "\8211")
|
2010-12-07 19:03:08 -08:00
|
|
|
|
2012-09-10 14:35:21 -07:00
|
|
|
-- This is used to prevent exponential blowups for things like:
|
|
|
|
-- a**a*a**a*a**a*a**a*a**a*a**a*a**
|
2014-07-12 22:57:22 -07:00
|
|
|
nested :: Stream s m a
|
2014-07-11 12:45:34 +01:00
|
|
|
=> ParserT s ParserState m a
|
|
|
|
-> ParserT s ParserState m a
|
2012-09-10 14:35:21 -07:00
|
|
|
nested p = do
|
2014-07-11 12:53:31 +01:00
|
|
|
nestlevel <- stateMaxNestingLevel <$> getState
|
2012-09-10 14:35:21 -07:00
|
|
|
guard $ nestlevel > 0
|
|
|
|
updateState $ \st -> st{ stateMaxNestingLevel = stateMaxNestingLevel st - 1 }
|
|
|
|
res <- p
|
|
|
|
updateState $ \st -> st{ stateMaxNestingLevel = nestlevel }
|
|
|
|
return res
|
|
|
|
|
2014-07-11 12:45:34 +01:00
|
|
|
citeKey :: (Stream s m Char, HasLastStrPosition st)
|
|
|
|
=> ParserT s st m (Bool, String)
|
2014-05-14 14:58:05 +02:00
|
|
|
citeKey = try $ do
|
|
|
|
guard =<< notAfterString
|
|
|
|
suppress_author <- option False (char '-' *> return True)
|
|
|
|
char '@'
|
2015-05-11 16:17:20 -07:00
|
|
|
firstChar <- alphaNum <|> char '_' <|> char '*' -- @* for wildcard in nocite
|
2014-05-14 14:58:05 +02:00
|
|
|
let regchar = satisfy (\c -> isAlphaNum c || c == '_')
|
|
|
|
let internal p = try $ p <* lookAhead regchar
|
2015-11-13 11:00:56 -08:00
|
|
|
rest <- many $ regchar <|> internal (oneOf ":.#$%&-+?<>~/") <|>
|
2015-12-12 00:27:08 -08:00
|
|
|
try (oneOf ":/" <* lookAhead (char '/'))
|
2014-05-14 14:58:05 +02:00
|
|
|
let key = firstChar:rest
|
|
|
|
return (suppress_author, key)
|
|
|
|
|
2014-07-26 17:34:11 +01:00
|
|
|
|
|
|
|
token :: (Stream s m t)
|
|
|
|
=> (t -> String)
|
|
|
|
-> (t -> SourcePos)
|
|
|
|
-> (t -> Maybe a)
|
|
|
|
-> ParsecT s st m a
|
|
|
|
token pp pos match = tokenPrim pp (\_ t _ -> pos t) match
|
|
|
|
|
2011-01-04 19:12:33 -08:00
|
|
|
--
|
|
|
|
-- Macros
|
|
|
|
--
|
|
|
|
|
2017-03-10 10:12:51 +01:00
|
|
|
-- | Parse a \newcommand or \newenviroment macro definition.
|
2014-07-12 22:57:22 -07:00
|
|
|
macro :: (Stream [Char] m Char, HasMacros st, HasReaderOptions st)
|
2014-07-11 12:45:34 +01:00
|
|
|
=> ParserT [Char] st m Blocks
|
2011-01-05 14:42:47 -08:00
|
|
|
macro = do
|
2012-07-25 20:42:15 -07:00
|
|
|
apply <- getOption readerApplyMacros
|
2017-03-10 10:12:51 +01:00
|
|
|
(m, def') <- withRaw pMacroDefinition
|
|
|
|
if apply
|
|
|
|
then do
|
|
|
|
updateState $ \st -> updateMacros (m:) st
|
|
|
|
return mempty
|
|
|
|
else return $ rawBlock "latex" def'
|
2011-01-04 19:12:33 -08:00
|
|
|
|
|
|
|
-- | Apply current macros to string.
|
2014-07-26 17:34:11 +01:00
|
|
|
applyMacros' :: (HasReaderOptions st, HasMacros st, Stream [Char] m Char)
|
2014-07-12 22:57:22 -07:00
|
|
|
=> String
|
2014-07-26 17:34:11 +01:00
|
|
|
-> ParserT [Char] st m String
|
2011-01-04 19:12:33 -08:00
|
|
|
applyMacros' target = do
|
2012-07-25 20:42:15 -07:00
|
|
|
apply <- getOption readerApplyMacros
|
2011-01-04 19:12:33 -08:00
|
|
|
if apply
|
2014-07-11 12:53:31 +01:00
|
|
|
then do macros <- extractMacros <$> getState
|
2011-01-04 19:12:33 -08:00
|
|
|
return $ applyMacros macros target
|
|
|
|
else return target
|
2014-12-05 21:03:09 +00:00
|
|
|
|
2015-04-18 01:18:06 +03:00
|
|
|
infixr 5 <+?>
|
2016-07-14 23:38:44 -07:00
|
|
|
(<+?>) :: (Monoid a) => ParserT s st m a -> ParserT s st m a -> ParserT s st m a
|
2015-04-18 01:18:06 +03:00
|
|
|
a <+?> b = a >>= flip fmap (try b <|> return mempty) . (<>)
|
2015-04-02 21:04:12 -07:00
|
|
|
|
|
|
|
extractIdClass :: Attr -> Attr
|
|
|
|
extractIdClass (ident, cls, kvs) = (ident', cls', kvs')
|
|
|
|
where
|
|
|
|
ident' = case (lookup "id" kvs) of
|
|
|
|
Just v -> v
|
|
|
|
Nothing -> ident
|
|
|
|
cls' = case (lookup "class" kvs) of
|
|
|
|
Just cl -> words cl
|
|
|
|
Nothing -> cls
|
|
|
|
kvs' = filter (\(k,_) -> k /= "id" || k /= "class") kvs
|
2017-02-07 22:33:05 +01:00
|
|
|
|
|
|
|
insertIncludedFile :: PandocMonad m
|
|
|
|
=> ParserT String ParserState m Blocks
|
|
|
|
-> [FilePath] -> FilePath
|
|
|
|
-> ParserT String ParserState m Blocks
|
|
|
|
insertIncludedFile blocks dirs f = do
|
|
|
|
oldPos <- getPosition
|
|
|
|
oldInput <- getInput
|
|
|
|
containers <- stateContainers <$> getState
|
|
|
|
when (f `elem` containers) $
|
|
|
|
throwError $ PandocParseError $ "Include file loop at " ++ show oldPos
|
|
|
|
updateState $ \s -> s{ stateContainers = f : stateContainers s }
|
2017-02-10 23:59:47 +01:00
|
|
|
mbcontents <- readFileFromDirs dirs f
|
|
|
|
contents <- case mbcontents of
|
|
|
|
Just s -> return s
|
|
|
|
Nothing -> do
|
|
|
|
report $ CouldNotLoadIncludeFile f oldPos
|
|
|
|
return ""
|
2017-02-07 22:33:05 +01:00
|
|
|
setPosition $ newPos f 1 1
|
|
|
|
setInput contents
|
|
|
|
bs <- blocks
|
|
|
|
setInput oldInput
|
|
|
|
setPosition oldPos
|
|
|
|
updateState $ \s -> s{ stateContainers = tail $ stateContainers s }
|
|
|
|
return bs
|