Merge branch 'master' into groff_reader
This commit is contained in:
commit
a337685fe0
416 changed files with 17491 additions and 5810 deletions
|
@ -49,6 +49,10 @@ matrix:
|
|||
compiler: ": #GHC 8.2.2"
|
||||
addons: {apt: {packages: [cabal-install-2.0,ghc-8.2.2,happy-1.19.5], sources: [hvr-ghc]}}
|
||||
|
||||
- env: BUILD=cabal GHCVER=8.4.1 CABALVER=2.0 OPTS="-O0 -Wall -Wincomplete-record-updates -Wnoncanonical-monad-instances -Wnoncanonical-monadfail-instances -fno-warn-unused-do-bind -Werror" FLAGS="fast embed_data_files" CABALARGS="--enable-benchmarks"
|
||||
compiler: ": #GHC 8.4.1"
|
||||
addons: {apt: {packages: [cabal-install-2.0,ghc-8.4.1,happy-1.19.5], sources: [hvr-ghc]}}
|
||||
|
||||
# Build with the newest GHC and cabal-install. This is an accepted failure,
|
||||
# see below.
|
||||
# - env: BUILD=cabal GHCVER=head CABALVER=head CABALAGS="--allow-newer"
|
||||
|
@ -123,7 +127,7 @@ script:
|
|||
stack)
|
||||
ulimit -n 4096
|
||||
stack config set system-ghc --global true
|
||||
stack --no-terminal $ARGS test --fast --flag 'aeson:fast' --flag pandoc:embed_data_files --haddock --no-haddock-deps --ghc-options="$OPTS"
|
||||
stack --no-terminal $ARGS test --fast --flag 'aeson:fast' --flag pandoc:embed_data_files --haddock --no-haddock-deps --ghc-options="$OPTS" --test-arguments='--hide-successes'
|
||||
;;
|
||||
cabal)
|
||||
cabal sdist --output-directory=sourcedist && \
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
# Contributors
|
||||
|
||||
- Anabra
|
||||
- Arata Mizuki
|
||||
- Aaron Wolen
|
||||
- Albert Krewinkel
|
||||
|
@ -49,8 +50,10 @@
|
|||
- Emily Eisenberg
|
||||
- Eric Kow
|
||||
- Eric Seidel
|
||||
- Étienne Bersac
|
||||
- Felix Yan
|
||||
- Florian Eitel
|
||||
- Francesco Occhipinti
|
||||
- François Gannaz
|
||||
- Freiric Barral
|
||||
- Freirich Raabe
|
||||
|
@ -83,6 +86,7 @@
|
|||
- Jens Getreu
|
||||
- Jens Petersen
|
||||
- Jesse Rosenthal
|
||||
- Joe Hermaszewski
|
||||
- Joe Hillenbrand
|
||||
- John MacFarlane
|
||||
- John Muccigrosso
|
||||
|
@ -129,6 +133,7 @@
|
|||
- Nick Bart
|
||||
- Nicolas Kaiser
|
||||
- Nikolay Yakimov
|
||||
- Nokome Bentley
|
||||
- Oliver Matthews
|
||||
- Ophir Lifshitz
|
||||
- Or Neeman
|
||||
|
@ -186,6 +191,7 @@
|
|||
- lwolfsonkin
|
||||
- nkalvi
|
||||
- oltolm
|
||||
- quasicomputational
|
||||
- qerub
|
||||
- robabla
|
||||
- roblabla
|
||||
|
|
|
@ -205,6 +205,10 @@ placed in the source directory):
|
|||
Profiling
|
||||
---------
|
||||
|
||||
To diagnose a performance issue with parsing, first try using
|
||||
the `--trace` option. This will give you a record of when block
|
||||
parsers succeed, so you can spot backtracking issues.
|
||||
|
||||
To use the GHC profiler with cabal:
|
||||
|
||||
cabal clean
|
||||
|
|
35
COPYRIGHT
35
COPYRIGHT
|
@ -69,8 +69,9 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
----------------------------------------------------------------------
|
||||
Pandoc's templates (in `data/templates`) are dual-licensed GPL (v2 or
|
||||
higher, same as pandoc) and the BSD 3-clause license.
|
||||
Pandoc's templates (in `data/templates`) are dual-licensed as either
|
||||
GPL (v2 or higher, same as pandoc) or (at your option) the BSD
|
||||
3-clause license.
|
||||
|
||||
Copyright (c) 2014--2018, John MacFarlane
|
||||
|
||||
|
@ -118,6 +119,24 @@ Copyright (C) 2010-2018 Paul Rivier and John MacFarlane
|
|||
|
||||
Released under the GNU General Public License version 2 or later.
|
||||
|
||||
----------------------------------------------------------------------
|
||||
src/Text/Pandoc/Readers/TikiWiki.hs
|
||||
Copyright (C) 2017 Robin Lee Powell
|
||||
|
||||
Released under the GNU General Public License version 2 or later.
|
||||
|
||||
----------------------------------------------------------------------
|
||||
src/Text/Pandoc/Readers/JATS.hs
|
||||
Copyright (C) 2017-2018 Hamish Mackenzie
|
||||
|
||||
Released under the GNU General Public License version 2 or later.
|
||||
|
||||
----------------------------------------------------------------------
|
||||
src/Text/Pandoc/Readers/EPUB.hs
|
||||
Copyright (C) 2014-2018 Matthew Pickering
|
||||
|
||||
Released under the GNU General Public License version 2 or later.
|
||||
|
||||
----------------------------------------------------------------------
|
||||
src/Text/Pandoc/Readers/Org.hs
|
||||
src/Text/Pandoc/Readers/Org/*
|
||||
|
@ -127,20 +146,12 @@ Copyright (C) 2014-2018 Albert Krewinkel
|
|||
Released under the GNU General Public License version 2 or later.
|
||||
|
||||
----------------------------------------------------------------------
|
||||
data/LaTeXMathML.js
|
||||
Adapted by Jeff Knisely and Douglas Woodall from
|
||||
ASCIIMathML.js v. 1.4.7
|
||||
Copyright (C) 2005 Peter Jipsen
|
||||
src/Text/Pandoc/Readers/FB2.hs
|
||||
Copyright (C) 2018 Alexander Krotov
|
||||
|
||||
Released under the GNU General Public License version 2 or later.
|
||||
|
||||
----------------------------------------------------------------------
|
||||
data/MathMLinHTML.js
|
||||
Copyright (C) 2004 Peter Jipsen http://www.chapman.edu/~jipsen
|
||||
|
||||
Released under the GNU General Public License version 2 or later.
|
||||
|
||||
------------------------------------------------------------------------
|
||||
data/pandoc.lua
|
||||
Copyright (C) 2017-2018 Albert Krewinkel
|
||||
|
||||
|
|
|
@ -125,7 +125,7 @@ The easiest way to build pandoc from source is to use [stack]:
|
|||
2. Change to the pandoc source directory and issue the following commands:
|
||||
|
||||
stack setup
|
||||
stack install --test
|
||||
stack install
|
||||
|
||||
`stack setup` will automatically download the ghc compiler
|
||||
if you don't have it. `stack install` will install the
|
||||
|
@ -145,7 +145,7 @@ The easiest way to build pandoc from source is to use [stack]:
|
|||
|
||||
3. Use `cabal` to install pandoc and its dependencies:
|
||||
|
||||
cabal install pandoc --enable-tests
|
||||
cabal install pandoc
|
||||
|
||||
This procedure will install the released version of pandoc,
|
||||
which will be downloaded automatically from HackageDB.
|
||||
|
@ -272,6 +272,7 @@ test`.
|
|||
To run particular tests (pattern-matching on their names), use
|
||||
the `-p` option:
|
||||
|
||||
cabal install pandoc --enable-tests
|
||||
cabal test --test-options='-p markdown'
|
||||
|
||||
Or with stack:
|
||||
|
|
468
MANUAL.txt
468
MANUAL.txt
|
@ -1,6 +1,6 @@
|
|||
% Pandoc User's Guide
|
||||
% John MacFarlane
|
||||
% January 18, 2018
|
||||
% April 26, 2018
|
||||
|
||||
Synopsis
|
||||
========
|
||||
|
@ -13,37 +13,19 @@ Description
|
|||
Pandoc is a [Haskell] library for converting from one markup format to
|
||||
another, and a command-line tool that uses this library.
|
||||
|
||||
Pandoc can read [Markdown], [CommonMark], [PHP Markdown Extra],
|
||||
[GitHub-Flavored Markdown], [MultiMarkdown], and (subsets of) [Textile],
|
||||
[reStructuredText], [HTML], [LaTeX], [MediaWiki markup], [TWiki
|
||||
markup], [TikiWiki markup], [Creole 1.0], [Haddock markup], [OPML],
|
||||
[Emacs Org mode], [DocBook], [JATS], [Muse], [txt2tags], [Vimwiki],
|
||||
[EPUB], [ODT], and [Word docx].
|
||||
|
||||
Pandoc can write plain text, [Markdown],
|
||||
[CommonMark], [PHP Markdown Extra], [GitHub-Flavored Markdown],
|
||||
[MultiMarkdown], [reStructuredText], [XHTML], [HTML5], [LaTeX]
|
||||
\(including [`beamer`] slide shows\), [ConTeXt], [RTF], [OPML],
|
||||
[DocBook], [JATS], [OpenDocument], [ODT], [Word docx], [GNU Texinfo],
|
||||
[MediaWiki markup], [DokuWiki markup], [ZimWiki markup], [Haddock
|
||||
markup], [EPUB] \(v2 or v3\), [FictionBook2], [Textile], [groff man],
|
||||
[groff ms], [Emacs Org mode], [AsciiDoc], [InDesign ICML], [TEI
|
||||
Simple], [Muse], [PowerPoint] slide shows and [Slidy], [Slideous],
|
||||
[DZSlides], [reveal.js] or [S5] HTML slide shows. It can also produce
|
||||
[PDF] output on systems where LaTeX, ConTeXt, `pdfroff`,
|
||||
`wkhtmltopdf`, `prince`, or `weasyprint` is installed.
|
||||
Pandoc can convert between numerous markup and word processing formats,
|
||||
including, but not limited to, various flavors of [Markdown], [HTML],
|
||||
[LaTeX] and [Word docx]. For the full lists of input and output formats,
|
||||
see the `--from` and `--to` [options below][General options].
|
||||
Pandoc can also produce [PDF] output: see [creating a PDF], below.
|
||||
|
||||
Pandoc's enhanced version of Markdown includes syntax for [tables],
|
||||
[definition lists], [metadata blocks], [`Div` blocks][Extension:
|
||||
`fenced_divs`], [footnotes] and [citations], embedded
|
||||
[LaTeX][Extension: `raw_tex`] (including [math]), [Markdown inside HTML
|
||||
block elements][Extension: `markdown_in_html_blocks`], and much more.
|
||||
These enhancements, described further under [Pandoc's Markdown],
|
||||
can be disabled using the `markdown_strict` format.
|
||||
[definition lists], [metadata blocks], [footnotes], [citations], [math],
|
||||
and much more. See below under [Pandoc's Markdown].
|
||||
|
||||
Pandoc has a modular design: it consists of a set of readers, which parse
|
||||
text in a given format and produce a native representation of the document
|
||||
(like an _abstract syntax tree_ or AST), and a set of writers, which convert
|
||||
(an _abstract syntax tree_ or AST), and a set of writers, which convert
|
||||
this native representation into a target format. Thus, adding an input
|
||||
or output format requires only adding a reader or writer. Users can also
|
||||
run custom [pandoc filters] to modify the intermediate AST.
|
||||
|
@ -58,56 +40,6 @@ model. While conversions from pandoc's Markdown to all formats aspire
|
|||
to be perfect, conversions from formats more expressive than pandoc's
|
||||
Markdown can be expected to be lossy.
|
||||
|
||||
[Markdown]: http://daringfireball.net/projects/markdown/
|
||||
[CommonMark]: http://commonmark.org
|
||||
[PHP Markdown Extra]: https://michelf.ca/projects/php-markdown/extra/
|
||||
[GitHub-Flavored Markdown]: https://help.github.com/articles/github-flavored-markdown/
|
||||
[MultiMarkdown]: http://fletcherpenney.net/multimarkdown/
|
||||
[reStructuredText]: http://docutils.sourceforge.net/docs/ref/rst/introduction.html
|
||||
[S5]: http://meyerweb.com/eric/tools/s5/
|
||||
[Slidy]: http://www.w3.org/Talks/Tools/Slidy/
|
||||
[Slideous]: http://goessner.net/articles/slideous/
|
||||
[HTML]: http://www.w3.org/html/
|
||||
[HTML5]: http://www.w3.org/TR/html5/
|
||||
[polyglot markup]: https://www.w3.org/TR/html-polyglot/
|
||||
[XHTML]: http://www.w3.org/TR/xhtml1/
|
||||
[LaTeX]: http://latex-project.org
|
||||
[`beamer`]: https://ctan.org/pkg/beamer
|
||||
[Beamer User's Guide]: http://ctan.math.utah.edu/ctan/tex-archive/macros/latex/contrib/beamer/doc/beameruserguide.pdf
|
||||
[ConTeXt]: http://www.contextgarden.net/
|
||||
[RTF]: http://en.wikipedia.org/wiki/Rich_Text_Format
|
||||
[DocBook]: http://docbook.org
|
||||
[JATS]: https://jats.nlm.nih.gov
|
||||
[txt2tags]: http://txt2tags.org
|
||||
[EPUB]: http://idpf.org/epub
|
||||
[OPML]: http://dev.opml.org/spec2.html
|
||||
[OpenDocument]: http://opendocument.xml.org
|
||||
[ODT]: http://en.wikipedia.org/wiki/OpenDocument
|
||||
[Textile]: http://redcloth.org/textile
|
||||
[MediaWiki markup]: https://www.mediawiki.org/wiki/Help:Formatting
|
||||
[DokuWiki markup]: https://www.dokuwiki.org/dokuwiki
|
||||
[ZimWiki markup]: http://zim-wiki.org/manual/Help/Wiki_Syntax.html
|
||||
[TWiki markup]: http://twiki.org/cgi-bin/view/TWiki/TextFormattingRules
|
||||
[TikiWiki markup]: https://doc.tiki.org/Wiki-Syntax-Text#The_Markup_Language_Wiki-Syntax
|
||||
[Haddock markup]: https://www.haskell.org/haddock/doc/html/ch03s08.html
|
||||
[Creole 1.0]: http://www.wikicreole.org/wiki/Creole1.0
|
||||
[groff man]: http://man7.org/linux/man-pages/man7/groff_man.7.html
|
||||
[groff ms]: http://man7.org/linux/man-pages/man7/groff_ms.7.html
|
||||
[Haskell]: https://www.haskell.org
|
||||
[GNU Texinfo]: http://www.gnu.org/software/texinfo/
|
||||
[Emacs Org mode]: http://orgmode.org
|
||||
[AsciiDoc]: http://www.methods.co.nz/asciidoc/
|
||||
[DZSlides]: http://paulrouget.com/dzslides/
|
||||
[Word docx]: https://en.wikipedia.org/wiki/Office_Open_XML
|
||||
[PDF]: https://www.adobe.com/pdf/
|
||||
[reveal.js]: http://lab.hakim.se/reveal-js/
|
||||
[FictionBook2]: http://www.fictionbook.org/index.php/Eng:XML_Schema_Fictionbook_2.1
|
||||
[InDesign ICML]: http://wwwimages.adobe.com/www.adobe.com/content/dam/acom/en/devnet/indesign/sdk/cs6/idml/idml-cookbook.pdf
|
||||
[TEI Simple]: https://github.com/TEIC/TEI-Simple
|
||||
[Muse]: https://amusewiki.org/library/manual
|
||||
[PowerPoint]: https://en.wikipedia.org/wiki/Microsoft_PowerPoint
|
||||
[Vimwiki]: https://vimwiki.github.io
|
||||
|
||||
Using `pandoc`
|
||||
--------------
|
||||
|
||||
|
@ -283,20 +215,42 @@ General options
|
|||
|
||||
`-f` *FORMAT*, `-r` *FORMAT*, `--from=`*FORMAT*, `--read=`*FORMAT*
|
||||
|
||||
: Specify input format. *FORMAT* can be `native` (native Haskell),
|
||||
`json` (JSON version of native AST), `markdown` (pandoc's
|
||||
extended Markdown), `markdown_strict` (original unextended
|
||||
Markdown), `markdown_phpextra` (PHP Markdown Extra),
|
||||
`markdown_mmd` (MultiMarkdown), `gfm` (GitHub-Flavored Markdown),
|
||||
`commonmark` (CommonMark Markdown), `textile` (Textile), `rst`
|
||||
(reStructuredText), `html` (HTML), `docbook` (DocBook), `t2t`
|
||||
(txt2tags), `docx` (docx), `odt` (ODT), `epub` (EPUB), `opml` (OPML),
|
||||
`org` (Emacs Org mode), `mediawiki` (MediaWiki markup), `twiki` (TWiki
|
||||
markup), `tikiwiki` (TikiWiki markup), `creole` (Creole 1.0),
|
||||
`haddock` (Haddock markup), or `latex` (LaTeX).
|
||||
(`markdown_github` provides deprecated and less accurate support
|
||||
for Github-Flavored Markdown; please use `gfm` instead, unless you
|
||||
need to use extensions other than `smart`.)
|
||||
: Specify input format. *FORMAT* can be:
|
||||
|
||||
::: {#input-formats}
|
||||
- `commonmark` ([CommonMark] Markdown)
|
||||
- `creole` ([Creole 1.0])
|
||||
- `docbook` ([DocBook])
|
||||
- `docx` ([Word docx])
|
||||
- `epub` ([EPUB])
|
||||
- `fb2` ([FictionBook2] e-book)
|
||||
- `gfm` ([GitHub-Flavored Markdown]),
|
||||
or `markdown_github`, which provides deprecated and less accurate
|
||||
support for Github-Flavored Markdown; please use `gfm` instead,
|
||||
unless you need to use extensions other than `smart`.
|
||||
- `haddock` ([Haddock markup])
|
||||
- `html` ([HTML])
|
||||
- `jats` ([JATS] XML)
|
||||
- `json` (JSON version of native AST)
|
||||
- `latex` ([LaTeX])
|
||||
- `markdown` ([Pandoc's Markdown])
|
||||
- `markdown_mmd` ([MultiMarkdown])
|
||||
- `markdown_phpextra` ([PHP Markdown Extra])
|
||||
- `markdown_strict` (original unextended [Markdown])
|
||||
- `mediawiki` ([MediaWiki markup])
|
||||
- `muse` ([Muse])
|
||||
- `native` (native Haskell)
|
||||
- `odt` ([ODT])
|
||||
- `opml` ([OPML])
|
||||
- `org` ([Emacs Org mode])
|
||||
- `rst` ([reStructuredText])
|
||||
- `t2t` ([txt2tags])
|
||||
- `textile` ([Textile])
|
||||
- `tikiwiki` ([TikiWiki markup])
|
||||
- `twiki` ([TWiki markup])
|
||||
- `vimwiki` ([Vimwiki])
|
||||
:::
|
||||
|
||||
Extensions can be individually enabled or disabled by
|
||||
appending `+EXTENSION` or `-EXTENSION` to the format name.
|
||||
See [Extensions] below, for a list of extensions and
|
||||
|
@ -305,34 +259,64 @@ General options
|
|||
|
||||
`-t` *FORMAT*, `-w` *FORMAT*, `--to=`*FORMAT*, `--write=`*FORMAT*
|
||||
|
||||
: Specify output format. *FORMAT* can be `native` (native Haskell),
|
||||
`json` (JSON version of native AST), `plain` (plain text),
|
||||
`markdown` (pandoc's extended Markdown), `markdown_strict`
|
||||
(original unextended Markdown), `markdown_phpextra` (PHP Markdown
|
||||
Extra), `markdown_mmd` (MultiMarkdown), `gfm` (GitHub-Flavored
|
||||
Markdown), `commonmark` (CommonMark Markdown), `rst`
|
||||
(reStructuredText), `html4` (XHTML 1.0 Transitional), `html` or
|
||||
`html5` (HTML5/XHTML [polyglot markup]), `latex` (LaTeX), `beamer`
|
||||
(LaTeX beamer slide show), `context` (ConTeXt), `man` (groff man),
|
||||
`mediawiki` (MediaWiki markup), `dokuwiki` (DokuWiki markup),
|
||||
`zimwiki` (ZimWiki markup), `textile` (Textile), `org` (Emacs Org
|
||||
mode), `texinfo` (GNU Texinfo), `opml` (OPML), `docbook` or
|
||||
`docbook4` (DocBook 4), `docbook5` (DocBook 5), `jats` (JATS XML),
|
||||
`opendocument` (OpenDocument), `odt` (OpenOffice text document),
|
||||
`docx` (Word docx), `haddock` (Haddock markup), `rtf` (rich text
|
||||
format), `epub2` (EPUB v2 book), `epub` or `epub3` (EPUB v3),
|
||||
`fb2` (FictionBook2 e-book), `asciidoc` (AsciiDoc), `icml`
|
||||
(InDesign ICML), `tei` (TEI Simple), `slidy` (Slidy HTML and
|
||||
JavaScript slide show), `slideous` (Slideous HTML and JavaScript
|
||||
slide show), `dzslides` (DZSlides HTML5 + JavaScript slide show),
|
||||
`revealjs` (reveal.js HTML5 + JavaScript slide show), `s5` (S5
|
||||
HTML and JavaScript slide show), `pptx` (PowerPoint slide show) or
|
||||
the path of a custom lua writer (see [Custom writers],
|
||||
below). (`markdown_github` provides deprecated and less accurate
|
||||
support for Github-Flavored Markdown; please use `gfm` instead,
|
||||
unless you use extensions that do not work with `gfm`.) Note that
|
||||
`odt`, `docx`, and `epub` output will not be directed to *stdout*
|
||||
unless forced with `-o -`. Extensions can be individually enabled or
|
||||
: Specify output format. *FORMAT* can be:
|
||||
|
||||
::: {#output-formats}
|
||||
- `asciidoc` ([AsciiDoc])
|
||||
- `beamer` ([LaTeX beamer][`beamer`] slide show)
|
||||
- `commonmark` ([CommonMark] Markdown)
|
||||
- `context` ([ConTeXt])
|
||||
- `docbook` or `docbook4` ([DocBook] 4)
|
||||
- `docbook5` (DocBook 5)
|
||||
- `docx` ([Word docx])
|
||||
- `dokuwiki` ([DokuWiki markup])
|
||||
- `epub` or `epub3` ([EPUB] v3 book)
|
||||
- `epub2` (EPUB v2)
|
||||
- `fb2` ([FictionBook2] e-book)
|
||||
- `gfm` ([GitHub-Flavored Markdown]),
|
||||
or `markdown_github`, which provides deprecated and less accurate
|
||||
support for Github-Flavored Markdown; please use `gfm` instead,
|
||||
unless you use extensions that do not work with `gfm`.
|
||||
- `haddock` ([Haddock markup])
|
||||
- `html` or `html5` ([HTML], i.e. [HTML5]/XHTML [polyglot markup])
|
||||
- `html4` ([XHTML] 1.0 Transitional)
|
||||
- `icml` ([InDesign ICML])
|
||||
- `jats` ([JATS] XML)
|
||||
- `json` (JSON version of native AST)
|
||||
- `latex` ([LaTeX])
|
||||
- `man` ([groff man])
|
||||
- `markdown` ([Pandoc's Markdown])
|
||||
- `markdown_mmd` ([MultiMarkdown])
|
||||
- `markdown_phpextra` ([PHP Markdown Extra])
|
||||
- `markdown_strict` (original unextended [Markdown])
|
||||
- `mediawiki` ([MediaWiki markup])
|
||||
- `ms` ([groff ms])
|
||||
- `muse` ([Muse]),
|
||||
- `native` (native Haskell),
|
||||
- `odt` ([OpenOffice text document][ODT])
|
||||
- `opml` ([OPML])
|
||||
- `opendocument` ([OpenDocument])
|
||||
- `org` ([Emacs Org mode])
|
||||
- `plain` (plain text),
|
||||
- `pptx` ([PowerPoint] slide show)
|
||||
- `rst` ([reStructuredText])
|
||||
- `rtf` ([Rich Text Format])
|
||||
- `texinfo` ([GNU Texinfo])
|
||||
- `textile` ([Textile])
|
||||
- `slideous` ([Slideous] HTML and JavaScript slide show)
|
||||
- `slidy` ([Slidy] HTML and JavaScript slide show)
|
||||
- `dzslides` ([DZSlides] HTML5 + JavaScript slide show),
|
||||
- `revealjs` ([reveal.js] HTML5 + JavaScript slide show)
|
||||
- `s5` ([S5] HTML and JavaScript slide show)
|
||||
- `tei` ([TEI Simple])
|
||||
- `zimwiki` ([ZimWiki markup])
|
||||
- the path of a custom lua writer, see [Custom writers] below
|
||||
:::
|
||||
|
||||
Note that `odt`, `docx`, and `epub` output will not be directed
|
||||
to *stdout* unless forced with `-o -`.
|
||||
|
||||
Extensions can be individually enabled or
|
||||
disabled by appending `+EXTENSION` or `-EXTENSION` to the format
|
||||
name. See [Extensions] below, for a list of extensions and their
|
||||
names. See `--list-output-formats` and `--list-extensions`, below.
|
||||
|
@ -424,6 +408,56 @@ General options
|
|||
|
||||
: Show usage message.
|
||||
|
||||
[Markdown]: http://daringfireball.net/projects/markdown/
|
||||
[CommonMark]: http://commonmark.org
|
||||
[PHP Markdown Extra]: https://michelf.ca/projects/php-markdown/extra/
|
||||
[GitHub-Flavored Markdown]: https://help.github.com/articles/github-flavored-markdown/
|
||||
[MultiMarkdown]: http://fletcherpenney.net/multimarkdown/
|
||||
[reStructuredText]: http://docutils.sourceforge.net/docs/ref/rst/introduction.html
|
||||
[S5]: http://meyerweb.com/eric/tools/s5/
|
||||
[Slidy]: http://www.w3.org/Talks/Tools/Slidy/
|
||||
[Slideous]: http://goessner.net/articles/slideous/
|
||||
[HTML]: http://www.w3.org/html/
|
||||
[HTML5]: http://www.w3.org/TR/html5/
|
||||
[polyglot markup]: https://www.w3.org/TR/html-polyglot/
|
||||
[XHTML]: http://www.w3.org/TR/xhtml1/
|
||||
[LaTeX]: http://latex-project.org
|
||||
[`beamer`]: https://ctan.org/pkg/beamer
|
||||
[Beamer User's Guide]: http://ctan.math.utah.edu/ctan/tex-archive/macros/latex/contrib/beamer/doc/beameruserguide.pdf
|
||||
[ConTeXt]: http://www.contextgarden.net/
|
||||
[Rich Text Format]: http://en.wikipedia.org/wiki/Rich_Text_Format
|
||||
[DocBook]: http://docbook.org
|
||||
[JATS]: https://jats.nlm.nih.gov
|
||||
[txt2tags]: http://txt2tags.org
|
||||
[EPUB]: http://idpf.org/epub
|
||||
[OPML]: http://dev.opml.org/spec2.html
|
||||
[OpenDocument]: http://opendocument.xml.org
|
||||
[ODT]: http://en.wikipedia.org/wiki/OpenDocument
|
||||
[Textile]: http://redcloth.org/textile
|
||||
[MediaWiki markup]: https://www.mediawiki.org/wiki/Help:Formatting
|
||||
[DokuWiki markup]: https://www.dokuwiki.org/dokuwiki
|
||||
[ZimWiki markup]: http://zim-wiki.org/manual/Help/Wiki_Syntax.html
|
||||
[TWiki markup]: http://twiki.org/cgi-bin/view/TWiki/TextFormattingRules
|
||||
[TikiWiki markup]: https://doc.tiki.org/Wiki-Syntax-Text#The_Markup_Language_Wiki-Syntax
|
||||
[Haddock markup]: https://www.haskell.org/haddock/doc/html/ch03s08.html
|
||||
[Creole 1.0]: http://www.wikicreole.org/wiki/Creole1.0
|
||||
[groff man]: http://man7.org/linux/man-pages/man7/groff_man.7.html
|
||||
[groff ms]: http://man7.org/linux/man-pages/man7/groff_ms.7.html
|
||||
[Haskell]: https://www.haskell.org
|
||||
[GNU Texinfo]: http://www.gnu.org/software/texinfo/
|
||||
[Emacs Org mode]: http://orgmode.org
|
||||
[AsciiDoc]: http://www.methods.co.nz/asciidoc/
|
||||
[DZSlides]: http://paulrouget.com/dzslides/
|
||||
[Word docx]: https://en.wikipedia.org/wiki/Office_Open_XML
|
||||
[PDF]: https://www.adobe.com/pdf/
|
||||
[reveal.js]: http://lab.hakim.se/reveal-js/
|
||||
[FictionBook2]: http://www.fictionbook.org/index.php/Eng:XML_Schema_Fictionbook_2.1
|
||||
[InDesign ICML]: http://wwwimages.adobe.com/www.adobe.com/content/dam/acom/en/devnet/indesign/sdk/cs6/idml/idml-cookbook.pdf
|
||||
[TEI Simple]: https://github.com/TEIC/TEI-Simple
|
||||
[Muse]: https://amusewiki.org/library/manual
|
||||
[PowerPoint]: https://en.wikipedia.org/wiki/Microsoft_PowerPoint
|
||||
[Vimwiki]: https://vimwiki.github.io
|
||||
|
||||
Reader options
|
||||
--------------
|
||||
|
||||
|
@ -521,17 +555,27 @@ Reader options
|
|||
|
||||
return {{Str = expand_hello_world}}
|
||||
|
||||
In order of preference, pandoc will look for lua filters in
|
||||
|
||||
1. a specified full or relative path (executable or
|
||||
non-executable)
|
||||
|
||||
2. `$DATADIR/filters` (executable or non-executable)
|
||||
where `$DATADIR` is the user data directory (see
|
||||
`--data-dir`, above).
|
||||
|
||||
`-M` *KEY*[`=`*VAL*], `--metadata=`*KEY*[`:`*VAL*]
|
||||
|
||||
: Set the metadata field *KEY* to the value *VAL*. A value specified
|
||||
on the command line overrides a value specified in the document.
|
||||
on the command line overrides a value specified in the document
|
||||
using [YAML metadata blocks][Extension: `yaml_metadata_block`].
|
||||
Values will be parsed as YAML boolean or string values. If no value is
|
||||
specified, the value will be treated as Boolean true. Like
|
||||
`--variable`, `--metadata` causes template variables to be set.
|
||||
But unlike `--variable`, `--metadata` affects the metadata of the
|
||||
underlying document (which is accessible from filters and may be
|
||||
printed in some output formats).
|
||||
printed in some output formats) and metadata values will be escaped
|
||||
when inserted into the template.
|
||||
|
||||
`-p`, `--preserve-tabs`
|
||||
|
||||
|
@ -598,7 +642,8 @@ General writer options
|
|||
: Produce output with an appropriate header and footer (e.g. a
|
||||
standalone HTML, LaTeX, TEI, or RTF file, not a fragment). This option
|
||||
is set automatically for `pdf`, `epub`, `epub3`, `fb2`, `docx`, and `odt`
|
||||
output.
|
||||
output. For `native` output, this option causes metadata to
|
||||
be included; otherwise, metadata is suppressed.
|
||||
|
||||
`--template=`*FILE*
|
||||
|
||||
|
@ -795,9 +840,10 @@ Options affecting specific writers
|
|||
|
||||
`--ascii`
|
||||
|
||||
: Use only ASCII characters in output. Currently supported only for
|
||||
HTML and DocBook output (which uses numerical entities instead of
|
||||
UTF-8 when this option is selected).
|
||||
: Use only ASCII characters in output. Currently supported for
|
||||
XML and HTML formats (which use numerical entities instead of
|
||||
UTF-8 when this option is selected) and for groff ms and man
|
||||
(which use hexadecimal escapes).
|
||||
|
||||
`--reference-links`
|
||||
|
||||
|
@ -1171,54 +1217,8 @@ of the following options.
|
|||
not specified, a link to the KaTeX CDN will be inserted. Note that this
|
||||
option does not imply `--katex`.
|
||||
|
||||
`-m` [*URL*], `--latexmathml`[`=`*URL*]
|
||||
|
||||
: *Deprecated.*
|
||||
Use the [LaTeXMathML] script to display embedded TeX math in HTML output.
|
||||
TeX math will be displayed between `$` or `$$` characters and put in
|
||||
`<span>` tags with class `LaTeX`. The LaTeXMathML JavaScript will then
|
||||
change it to MathML. Note that currently only Firefox and Safari
|
||||
(and select e-book readers) natively support MathML.
|
||||
To insert a link the `LaTeXMathML.js` script, provide a *URL*.
|
||||
|
||||
`--jsmath`[`=`*URL*]
|
||||
|
||||
: *Deprecated.*
|
||||
Use [jsMath] (the predecessor of MathJax) to display embedded TeX
|
||||
math in HTML output. TeX math will be put inside `<span>` tags
|
||||
(for inline math) or `<div>` tags (for display math) with class
|
||||
`math` and rendered by the jsMath script. The *URL* should point to
|
||||
the script (e.g. `jsMath/easy/load.js`); if provided, it will be linked
|
||||
to in the header of standalone HTML documents. If a *URL* is not provided,
|
||||
no link to the jsMath load script will be inserted; it is then
|
||||
up to the author to provide such a link in the HTML template.
|
||||
|
||||
`--gladtex`
|
||||
|
||||
: *Deprecated.*
|
||||
Enclose TeX math in `<eq>` tags in HTML output. The resulting HTML
|
||||
can then be processed by [gladTeX] to produce images of the typeset
|
||||
formulas and an HTML file with links to these images.
|
||||
So, the procedure is:
|
||||
|
||||
pandoc -s --gladtex input.md -o myfile.htex
|
||||
gladtex -d myfile-images myfile.htex
|
||||
# produces myfile.html and images in myfile-images
|
||||
|
||||
`--mimetex`[`=`*URL*]
|
||||
|
||||
: *Deprecated.*
|
||||
Render TeX math using the [mimeTeX] CGI script, which generates an
|
||||
image for each TeX formula. This should work in all browsers. If
|
||||
*URL* is not specified, it is assumed that the script is at
|
||||
`/cgi-bin/mimetex.cgi`.
|
||||
|
||||
[MathML]: http://www.w3.org/Math/
|
||||
[LaTeXMathML]: http://math.etsu.edu/LaTeXMathML/
|
||||
[jsMath]: http://www.math.union.edu/~dpvc/jsmath/
|
||||
[MathJax]: https://www.mathjax.org
|
||||
[gladTeX]: http://ans.hsh.no/home/mgg/gladtex/
|
||||
[mimeTeX]: http://www.forkosh.com/mimetex.html
|
||||
[KaTeX]: https://github.com/Khan/KaTeX
|
||||
|
||||
Options for wrapper scripts
|
||||
|
@ -1266,23 +1266,22 @@ directory (see `--data-dir`, above). *Exceptions:*
|
|||
- For `pdf` output, customize the `default.latex` template
|
||||
(or the `default.context` template, if you use `-t context`,
|
||||
or the `default.ms` template, if you use `-t ms`, or the
|
||||
`default.html5` template, if you use `-t html5`).
|
||||
`default.html` template, if you use `-t html`).
|
||||
- `docx` has no template (however, you can use
|
||||
`--reference-doc` to customize the output).
|
||||
|
||||
Templates contain *variables*, which allow for the inclusion of
|
||||
arbitrary information at any point in the file. Variables may be set
|
||||
within the document using [YAML metadata blocks][Extension:
|
||||
`yaml_metadata_block`]. They may also be set at the
|
||||
command line using the `-V/--variable` option: variables set in this
|
||||
way override metadata fields with the same name.
|
||||
arbitrary information at any point in the file. They may be set at the
|
||||
command line using the `-V/--variable` option. If a variable is not set,
|
||||
pandoc will look for the key in the document's metadata – which can be set
|
||||
using either [YAML metadata blocks][Extension: `yaml_metadata_block`]
|
||||
or with the `--metadata` option.
|
||||
|
||||
Variables set by pandoc
|
||||
-----------------------
|
||||
|
||||
Some variables are set automatically by pandoc. These vary somewhat
|
||||
depending on the output format, but include metadata fields as well
|
||||
as the following:
|
||||
depending on the output format, but include the following:
|
||||
|
||||
`sourcefile`, `outputfile`
|
||||
: source and destination filenames, as given on the command line.
|
||||
|
@ -1467,6 +1466,9 @@ LaTeX variables are used when [creating a PDF].
|
|||
: option for document class, e.g. `oneside`; may be repeated
|
||||
for multiple options
|
||||
|
||||
`beameroption`
|
||||
: In beamer, add extra beamer option with `\setbeameroption{}`
|
||||
|
||||
`geometry`
|
||||
: option for [`geometry`] package, e.g. `margin=1in`;
|
||||
may be repeated for multiple options
|
||||
|
@ -1624,12 +1626,22 @@ Variables for ConTeXt
|
|||
`lof`, `lot`
|
||||
: include list of figures, list of tables
|
||||
|
||||
`pdfa`
|
||||
: adds to the preamble the setup necessary to generate PDF/A-1b:2005.
|
||||
To successfully generate PDF/A the required ICC color profiles have to
|
||||
be available and the content and all included files (such as images)
|
||||
have to be standard conforming. The ICC profiles can be obtained
|
||||
from [ConTeXt ICC Profiles]. See also [ConTeXt PDFA] for more
|
||||
details.
|
||||
|
||||
[ConTeXt Paper Setup]: http://wiki.contextgarden.net/PaperSetup
|
||||
[ConTeXt Layout]: http://wiki.contextgarden.net/Layout
|
||||
[ConTeXt Font Switching]: http://wiki.contextgarden.net/Font_Switching
|
||||
[ConTeXt Color]: http://wiki.contextgarden.net/Color
|
||||
[ConTeXt Headers and Footers]: http://wiki.contextgarden.net/Headers_and_Footers
|
||||
[ConTeXt Indentation]: http://wiki.contextgarden.net/Indentation
|
||||
[ConTeXt ICC Profiles]: http://wiki.contextgarden.net/PDFX#ICC_profiles
|
||||
[ConTeXt PDFA]: http://wiki.contextgarden.net/PDF/A
|
||||
[`setupwhitespace`]: http://wiki.contextgarden.net/Command/setupwhitespace
|
||||
[`setupinterlinespace`]: http://wiki.contextgarden.net/Command/setupinterlinespace
|
||||
[`setuppagenumbering`]: http://wiki.contextgarden.net/Command/setuppagenumbering
|
||||
|
@ -1689,10 +1701,24 @@ Templates may contain conditionals. The syntax is as follows:
|
|||
Y
|
||||
$endif$
|
||||
|
||||
This will include `X` in the template if `variable` has a non-null
|
||||
value; otherwise it will include `Y`. `X` and `Y` are placeholders for
|
||||
any valid template text, and may include interpolated variables or other
|
||||
conditionals. The `$else$` section may be omitted.
|
||||
This will include `X` in the template if `variable` has a truthy
|
||||
value; otherwise it will include `Y`. Here a truthy value is any
|
||||
of the following:
|
||||
|
||||
- a string that is not entirely white space,
|
||||
- a non-empty array where the first value is truthy,
|
||||
- any number (including zero),
|
||||
- any object,
|
||||
- the boolean `true` (to specify the boolean `true`
|
||||
value using YAML metadata or the `--metadata` flag,
|
||||
use `y`, `Y`, `yes`, `Yes`, `YES`, `true`, `True`,
|
||||
`TRUE`, `on`, `On`, or `ON`; with the `--variable`
|
||||
flag, simply omit a value for the variable, e.g.
|
||||
`--variable draft`).
|
||||
|
||||
`X` and `Y` are placeholders for any valid template text,
|
||||
and may include interpolated variables or other conditionals.
|
||||
The `$else$` section may be omitted.
|
||||
|
||||
When variables can have multiple values (for example, `author` in
|
||||
a multi-author document), you can use the `$for$` keyword:
|
||||
|
@ -1954,9 +1980,9 @@ ordinary HTML (without bird tracks).
|
|||
writes HTML with the Haskell code in bird tracks, so it can be copied
|
||||
and pasted as literate Haskell source.
|
||||
|
||||
Note that GHC expects the bird tracks in the first column, so indentend literate
|
||||
code blocks (e.g. inside an itemized environment) will not be picked up by the
|
||||
Haskell compiler.
|
||||
Note that GHC expects the bird tracks in the first column, so indented
|
||||
literate code blocks (e.g. inside an itemized environment) will not be
|
||||
picked up by the Haskell compiler.
|
||||
|
||||
Other extensions
|
||||
----------------
|
||||
|
@ -1972,7 +1998,7 @@ input formats
|
|||
: `docx`, `html`
|
||||
|
||||
output formats
|
||||
: `markdown`, `docx`, `odt`, `opendocument`, `html`
|
||||
: `docx`, `odt`, `opendocument`, `html`
|
||||
|
||||
#### Extension: `styles` #### {#ext-styles}
|
||||
|
||||
|
@ -3086,7 +3112,8 @@ the [`raw_attribute` extension](#extension-raw_attribute)), or it
|
|||
will be interpreted as markdown. For example:
|
||||
|
||||
header-includes:
|
||||
- ```{=latex}
|
||||
- |
|
||||
```{=latex}
|
||||
\let\oldsection\section
|
||||
\renewcommand{\section}[1]{\clearpage\oldsection{#1}}
|
||||
```
|
||||
|
@ -3405,9 +3432,24 @@ And the following produces a raw `html` inline element:
|
|||
|
||||
This is `<a>html</a>`{=html}
|
||||
|
||||
This can be useful to insert raw xml into `docx` documents, e.g.
|
||||
a pagebreak:
|
||||
|
||||
```{=openxml}
|
||||
<w:p>
|
||||
<w:r>
|
||||
<w:br w:type="page"/>
|
||||
</w:r>
|
||||
</w:p>
|
||||
```
|
||||
|
||||
The format name should match the target format name (see
|
||||
`-t/--to`, above, for a list, or use `pandoc
|
||||
--list-output-formats`).
|
||||
--list-output-formats`). Use `openxml` for `docx` output,
|
||||
`opendocument` for `odt` output, `html5` for `epub3` output,
|
||||
`html4` for `epub2` output, and `latex`, `beamer`,
|
||||
`ms`, or `html5` for `pdf` output (depending on what you
|
||||
use for `--pdf-engine`).
|
||||
|
||||
This extension presupposes that the relevant kind of
|
||||
inline code or fenced code block is enabled. Thus, for
|
||||
|
@ -4384,6 +4426,50 @@ the [Beamer User's Guide] may also be used: `allowdisplaybreaks`,
|
|||
`allowframebreaks`, `b`, `c`, `t`, `environment`, `label`, `plain`,
|
||||
`shrink`.
|
||||
|
||||
Background in reveal.js
|
||||
-----------------------
|
||||
|
||||
Background images can be added to self-contained reveal.js slideshows.
|
||||
|
||||
For the same image on every slide, use the reveal.js configuration
|
||||
option `parallaxBackgroundImage` either in the YAML metadata block
|
||||
or as a command-line variable. You can also set
|
||||
`parallaxBackgroundHorizontal` and `parallaxBackgroundVertical` the same
|
||||
way and must also set `parallaxBackgroundSize` to have your values
|
||||
take effect.
|
||||
|
||||
To set an image for a particular slide, add
|
||||
`{data-background-image="/path/to/image"}`
|
||||
to the first slide-level header on the slide (which may even be empty).
|
||||
|
||||
In reveal.js's overview mode, the parallaxBackgroundImage will show up
|
||||
only on the first slide.
|
||||
|
||||
Other background settings also work on individual slides, including
|
||||
`data-background-size`, `data-background-repeat`, `data-background-color`,
|
||||
`data-transition`, and `data-transition-speed`.
|
||||
|
||||
See the [reveal.js
|
||||
documentation](https://github.com/hakimel/reveal.js#slide-backgrounds)
|
||||
for more details.
|
||||
|
||||
For example:
|
||||
|
||||
```
|
||||
---
|
||||
title: My Slideshow
|
||||
parallaxBackgroundImage: /path/to/my/background_image.png
|
||||
---
|
||||
|
||||
## Slide One
|
||||
|
||||
Slide 1 has background_image.png as its background.
|
||||
|
||||
## {data-background-image="/path/to/special_image.jpg"}
|
||||
|
||||
Slide 2 has a special image for its background, even though the header has no content.
|
||||
```
|
||||
|
||||
Creating EPUBs with pandoc
|
||||
==========================
|
||||
|
||||
|
@ -4562,8 +4648,8 @@ And with the extension:
|
|||
:::
|
||||
|
||||
::: {custom-style="BodyText"}
|
||||
This is text with an [*emphasized*]{custom-style="Emphatic"} text style.
|
||||
And this is text with a [**strengthened**]{custom-style="Strengthened"}
|
||||
This is text with an [emphasized]{custom-style="Emphatic"} text style.
|
||||
And this is text with a [strengthened]{custom-style="Strengthened"}
|
||||
text style.
|
||||
:::
|
||||
|
||||
|
|
19
Makefile
19
Makefile
|
@ -54,9 +54,9 @@ debpkg: man/pandoc.1
|
|||
macospkg: man/pandoc.1
|
||||
./macos/make_macos_package.sh
|
||||
|
||||
winpkg: pandoc-$(version)-windows.msi pandoc-$(version)-windows.zip
|
||||
winpkg: pandoc-$(version)-windows-i386.msi pandoc-$(version)-windows-i386.zip pandoc-$(version)-windows-x86_64.msi pandoc-$(version)-windows-x86_64.zip
|
||||
|
||||
pandoc-$(version)-windows.zip: pandoc-$(version)-windows.msi
|
||||
pandoc-$(version)-windows-%.zip: pandoc-$(version)-windows-%.msi
|
||||
-rm -rf wintmp && \
|
||||
msiextract -C wintmp $< && \
|
||||
cd wintmp/"Program Files" && \
|
||||
|
@ -66,10 +66,17 @@ pandoc-$(version)-windows.zip: pandoc-$(version)-windows.msi
|
|||
cd ../.. && \
|
||||
rm -rf wintmp
|
||||
|
||||
pandoc-$(version)-windows.msi:
|
||||
wget 'https://ci.appveyor.com/api/projects/jgm/pandoc/artifacts/windows/pandoc-windows-i386.msi?branch=$(BRANCH)' -O pandoc.msi && \
|
||||
osslsigncode sign -pkcs12 ~/Private/ComodoCodeSigning.exp2019.p12 -in pandoc.msi -i http://johnmacfarlane.net/ -t http://timestamp.comodoca.com/ -out $@ -askpass
|
||||
rm pandoc.msi
|
||||
pandoc-$(version)-windows-%.msi: pandoc-windows-%.msi
|
||||
osslsigncode sign -pkcs12 ~/Private/ComodoCodeSigning.exp2019.p12 -in $< -i http://johnmacfarlane.net/ -t http://timestamp.comodoca.com/ -out $@ -askpass
|
||||
rm $<
|
||||
|
||||
pandoc-windows-i386.msi:
|
||||
JOBID=$(shell curl 'https://ci.appveyor.com/api/projects/jgm/pandoc' | jq -r '.build.jobs[0].jobId') && \
|
||||
wget "https://ci.appveyor.com/api/buildjobs/$$JOBID/artifacts/windows%2F$@" -O $@
|
||||
|
||||
pandoc-windows-x86_64.msi:
|
||||
JOBID=$(shell curl 'https://ci.appveyor.com/api/projects/jgm/pandoc' | jq -r '.build.jobs[1].jobId') && \
|
||||
wget "https://ci.appveyor.com/api/buildjobs/$$JOBID/artifacts/windows%2F$@" -O $@
|
||||
|
||||
man/pandoc.1: MANUAL.txt man/pandoc.1.template
|
||||
pandoc $< -f markdown-smart -t man -s --template man/pandoc.1.template \
|
||||
|
|
220
README.md
220
README.md
|
@ -22,93 +22,153 @@ groups](https://img.shields.io/badge/pandoc-discuss-red.svg?style=social)](https
|
|||
|
||||
## The universal markup converter
|
||||
|
||||
<div id="description">
|
||||
Pandoc is a [Haskell](http://haskell.org) library for converting from
|
||||
one markup format to another, and a command-line tool that uses this
|
||||
library. It can convert *from*
|
||||
|
||||
Pandoc is a [Haskell](https://www.haskell.org) library for converting
|
||||
from one markup format to another, and a command-line tool that uses
|
||||
this library.
|
||||
<div id="input-formats">
|
||||
|
||||
Pandoc can read
|
||||
[Markdown](http://daringfireball.net/projects/markdown/),
|
||||
[CommonMark](http://commonmark.org), [PHP Markdown
|
||||
Extra](https://michelf.ca/projects/php-markdown/extra/),
|
||||
[GitHub-Flavored
|
||||
Markdown](https://help.github.com/articles/github-flavored-markdown/),
|
||||
[MultiMarkdown](http://fletcherpenney.net/multimarkdown/), and (subsets
|
||||
of) [Textile](http://redcloth.org/textile),
|
||||
[reStructuredText](http://docutils.sourceforge.net/docs/ref/rst/introduction.html),
|
||||
[HTML](http://www.w3.org/html/), [LaTeX](http://latex-project.org),
|
||||
[MediaWiki markup](https://www.mediawiki.org/wiki/Help:Formatting),
|
||||
[TWiki markup](http://twiki.org/cgi-bin/view/TWiki/TextFormattingRules),
|
||||
[TikiWiki
|
||||
markup](https://doc.tiki.org/Wiki-Syntax-Text#The_Markup_Language_Wiki-Syntax),
|
||||
[Creole 1.0](http://www.wikicreole.org/wiki/Creole1.0), [Haddock
|
||||
markup](https://www.haskell.org/haddock/doc/html/ch03s08.html),
|
||||
[OPML](http://dev.opml.org/spec2.html), [Emacs Org
|
||||
mode](http://orgmode.org), [DocBook](http://docbook.org),
|
||||
[JATS](https://jats.nlm.nih.gov),
|
||||
[Muse](https://amusewiki.org/library/manual),
|
||||
[txt2tags](http://txt2tags.org), [Vimwiki](https://vimwiki.github.io),
|
||||
[EPUB](http://idpf.org/epub),
|
||||
[ODT](http://en.wikipedia.org/wiki/OpenDocument), and [Word
|
||||
docx](https://en.wikipedia.org/wiki/Office_Open_XML).
|
||||
- `commonmark` ([CommonMark](http://commonmark.org) Markdown)
|
||||
- `creole` ([Creole 1.0](http://www.wikicreole.org/wiki/Creole1.0))
|
||||
- `docbook` ([DocBook](http://docbook.org))
|
||||
- `docx` ([Word docx](https://en.wikipedia.org/wiki/Office_Open_XML))
|
||||
- `epub` ([EPUB](http://idpf.org/epub))
|
||||
- `fb2`
|
||||
([FictionBook2](http://www.fictionbook.org/index.php/Eng:XML_Schema_Fictionbook_2.1)
|
||||
e-book)
|
||||
- `gfm` ([GitHub-Flavored
|
||||
Markdown](https://help.github.com/articles/github-flavored-markdown/)),
|
||||
or `markdown_github`, which provides deprecated and less accurate
|
||||
support for Github-Flavored Markdown; please use `gfm` instead,
|
||||
unless you need to use extensions other than `smart`.
|
||||
- `haddock` ([Haddock
|
||||
markup](https://www.haskell.org/haddock/doc/html/ch03s08.html))
|
||||
- `html` ([HTML](http://www.w3.org/html/))
|
||||
- `jats` ([JATS](https://jats.nlm.nih.gov) XML)
|
||||
- `json` (JSON version of native AST)
|
||||
- `latex` ([LaTeX](http://latex-project.org))
|
||||
- `markdown` ([Pandoc’s Markdown](#pandocs-markdown))
|
||||
- `markdown_mmd`
|
||||
([MultiMarkdown](http://fletcherpenney.net/multimarkdown/))
|
||||
- `markdown_phpextra` ([PHP Markdown
|
||||
Extra](https://michelf.ca/projects/php-markdown/extra/))
|
||||
- `markdown_strict` (original unextended
|
||||
[Markdown](http://daringfireball.net/projects/markdown/))
|
||||
- `mediawiki` ([MediaWiki
|
||||
markup](https://www.mediawiki.org/wiki/Help:Formatting))
|
||||
- `muse` ([Muse](https://amusewiki.org/library/manual))
|
||||
- `native` (native Haskell)
|
||||
- `odt` ([ODT](http://en.wikipedia.org/wiki/OpenDocument))
|
||||
- `opml` ([OPML](http://dev.opml.org/spec2.html))
|
||||
- `org` ([Emacs Org mode](http://orgmode.org))
|
||||
- `rst`
|
||||
([reStructuredText](http://docutils.sourceforge.net/docs/ref/rst/introduction.html))
|
||||
- `t2t` ([txt2tags](http://txt2tags.org))
|
||||
- `textile` ([Textile](http://redcloth.org/textile))
|
||||
- `tikiwiki` ([TikiWiki
|
||||
markup](https://doc.tiki.org/Wiki-Syntax-Text#The_Markup_Language_Wiki-Syntax))
|
||||
- `twiki` ([TWiki
|
||||
markup](http://twiki.org/cgi-bin/view/TWiki/TextFormattingRules))
|
||||
- `vimwiki` ([Vimwiki](https://vimwiki.github.io))
|
||||
|
||||
Pandoc can write plain text,
|
||||
[Markdown](http://daringfireball.net/projects/markdown/),
|
||||
[CommonMark](http://commonmark.org), [PHP Markdown
|
||||
Extra](https://michelf.ca/projects/php-markdown/extra/),
|
||||
[GitHub-Flavored
|
||||
Markdown](https://help.github.com/articles/github-flavored-markdown/),
|
||||
[MultiMarkdown](http://fletcherpenney.net/multimarkdown/),
|
||||
[reStructuredText](http://docutils.sourceforge.net/docs/ref/rst/introduction.html),
|
||||
[XHTML](http://www.w3.org/TR/xhtml1/),
|
||||
[HTML5](http://www.w3.org/TR/html5/), [LaTeX](http://latex-project.org)
|
||||
(including [`beamer`](https://ctan.org/pkg/beamer) slide shows),
|
||||
[ConTeXt](http://www.contextgarden.net/),
|
||||
[RTF](http://en.wikipedia.org/wiki/Rich_Text_Format),
|
||||
[OPML](http://dev.opml.org/spec2.html), [DocBook](http://docbook.org),
|
||||
[JATS](https://jats.nlm.nih.gov),
|
||||
[OpenDocument](http://opendocument.xml.org),
|
||||
[ODT](http://en.wikipedia.org/wiki/OpenDocument), [Word
|
||||
docx](https://en.wikipedia.org/wiki/Office_Open_XML), [GNU
|
||||
Texinfo](http://www.gnu.org/software/texinfo/), [MediaWiki
|
||||
markup](https://www.mediawiki.org/wiki/Help:Formatting), [DokuWiki
|
||||
markup](https://www.dokuwiki.org/dokuwiki), [ZimWiki
|
||||
markup](http://zim-wiki.org/manual/Help/Wiki_Syntax.html), [Haddock
|
||||
markup](https://www.haskell.org/haddock/doc/html/ch03s08.html),
|
||||
[EPUB](http://idpf.org/epub) (v2 or v3),
|
||||
[FictionBook2](http://www.fictionbook.org/index.php/Eng:XML_Schema_Fictionbook_2.1),
|
||||
[Textile](http://redcloth.org/textile), [groff
|
||||
man](http://man7.org/linux/man-pages/man7/groff_man.7.html), [groff
|
||||
ms](http://man7.org/linux/man-pages/man7/groff_ms.7.html), [Emacs Org
|
||||
mode](http://orgmode.org),
|
||||
[AsciiDoc](http://www.methods.co.nz/asciidoc/), [InDesign
|
||||
ICML](http://wwwimages.adobe.com/www.adobe.com/content/dam/acom/en/devnet/indesign/sdk/cs6/idml/idml-cookbook.pdf),
|
||||
[TEI Simple](https://github.com/TEIC/TEI-Simple),
|
||||
[Muse](https://amusewiki.org/library/manual),
|
||||
[PowerPoint](https://en.wikipedia.org/wiki/Microsoft_PowerPoint) slide
|
||||
shows and [Slidy](http://www.w3.org/Talks/Tools/Slidy/),
|
||||
[Slideous](http://goessner.net/articles/slideous/),
|
||||
[DZSlides](http://paulrouget.com/dzslides/),
|
||||
[reveal.js](http://lab.hakim.se/reveal-js/) or
|
||||
[S5](http://meyerweb.com/eric/tools/s5/) HTML slide shows. It can also
|
||||
produce [PDF](https://www.adobe.com/pdf/) output on systems where LaTeX,
|
||||
ConTeXt, `pdfroff`, `wkhtmltopdf`, `prince`, or `weasyprint` is
|
||||
installed.
|
||||
</div>
|
||||
|
||||
It can convert *to*
|
||||
|
||||
<div id="output-formats">
|
||||
|
||||
- `asciidoc` ([AsciiDoc](http://www.methods.co.nz/asciidoc/))
|
||||
- `beamer` ([LaTeX beamer](https://ctan.org/pkg/beamer) slide show)
|
||||
- `commonmark` ([CommonMark](http://commonmark.org) Markdown)
|
||||
- `context` ([ConTeXt](http://www.contextgarden.net/))
|
||||
- `docbook` or `docbook4` ([DocBook](http://docbook.org) 4)
|
||||
- `docbook5` (DocBook 5)
|
||||
- `docx` ([Word docx](https://en.wikipedia.org/wiki/Office_Open_XML))
|
||||
- `dokuwiki` ([DokuWiki markup](https://www.dokuwiki.org/dokuwiki))
|
||||
- `epub` or `epub3` ([EPUB](http://idpf.org/epub) v3 book)
|
||||
- `epub2` (EPUB v2)
|
||||
- `fb2`
|
||||
([FictionBook2](http://www.fictionbook.org/index.php/Eng:XML_Schema_Fictionbook_2.1)
|
||||
e-book)
|
||||
- `gfm` ([GitHub-Flavored
|
||||
Markdown](https://help.github.com/articles/github-flavored-markdown/)),
|
||||
or `markdown_github`, which provides deprecated and less accurate
|
||||
support for Github-Flavored Markdown; please use `gfm` instead,
|
||||
unless you use extensions that do not work with `gfm`.
|
||||
- `haddock` ([Haddock
|
||||
markup](https://www.haskell.org/haddock/doc/html/ch03s08.html))
|
||||
- `html` or `html5` ([HTML](http://www.w3.org/html/), i.e.
|
||||
[HTML5](http://www.w3.org/TR/html5/)/XHTML [polyglot
|
||||
markup](https://www.w3.org/TR/html-polyglot/))
|
||||
- `html4` ([XHTML](http://www.w3.org/TR/xhtml1/) 1.0 Transitional)
|
||||
- `icml` ([InDesign
|
||||
ICML](http://wwwimages.adobe.com/www.adobe.com/content/dam/acom/en/devnet/indesign/sdk/cs6/idml/idml-cookbook.pdf))
|
||||
- `jats` ([JATS](https://jats.nlm.nih.gov) XML)
|
||||
- `json` (JSON version of native AST)
|
||||
- `latex` ([LaTeX](http://latex-project.org))
|
||||
- `man` ([groff
|
||||
man](http://man7.org/linux/man-pages/man7/groff_man.7.html))
|
||||
- `markdown` ([Pandoc’s Markdown](#pandocs-markdown))
|
||||
- `markdown_mmd`
|
||||
([MultiMarkdown](http://fletcherpenney.net/multimarkdown/))
|
||||
- `markdown_phpextra` ([PHP Markdown
|
||||
Extra](https://michelf.ca/projects/php-markdown/extra/))
|
||||
- `markdown_strict` (original unextended
|
||||
[Markdown](http://daringfireball.net/projects/markdown/))
|
||||
- `mediawiki` ([MediaWiki
|
||||
markup](https://www.mediawiki.org/wiki/Help:Formatting))
|
||||
- `ms` ([groff
|
||||
ms](http://man7.org/linux/man-pages/man7/groff_ms.7.html))
|
||||
- `muse` ([Muse](https://amusewiki.org/library/manual)),
|
||||
- `native` (native Haskell),
|
||||
- `odt` ([OpenOffice text
|
||||
document](http://en.wikipedia.org/wiki/OpenDocument))
|
||||
- `opml` ([OPML](http://dev.opml.org/spec2.html))
|
||||
- `opendocument` ([OpenDocument](http://opendocument.xml.org))
|
||||
- `org` ([Emacs Org mode](http://orgmode.org))
|
||||
- `plain` (plain text),
|
||||
- `pptx`
|
||||
([PowerPoint](https://en.wikipedia.org/wiki/Microsoft_PowerPoint)
|
||||
slide show)
|
||||
- `rst`
|
||||
([reStructuredText](http://docutils.sourceforge.net/docs/ref/rst/introduction.html))
|
||||
- `rtf` ([Rich Text
|
||||
Format](http://en.wikipedia.org/wiki/Rich_Text_Format))
|
||||
- `texinfo` ([GNU Texinfo](http://www.gnu.org/software/texinfo/))
|
||||
- `textile` ([Textile](http://redcloth.org/textile))
|
||||
- `slideous` ([Slideous](http://goessner.net/articles/slideous/) HTML
|
||||
and JavaScript slide show)
|
||||
- `slidy` ([Slidy](http://www.w3.org/Talks/Tools/Slidy/) HTML and
|
||||
JavaScript slide show)
|
||||
- `dzslides` ([DZSlides](http://paulrouget.com/dzslides/) HTML5 +
|
||||
JavaScript slide show),
|
||||
- `revealjs` ([reveal.js](http://lab.hakim.se/reveal-js/) HTML5 +
|
||||
JavaScript slide show)
|
||||
- `s5` ([S5](http://meyerweb.com/eric/tools/s5/) HTML and JavaScript
|
||||
slide show)
|
||||
- `tei` ([TEI Simple](https://github.com/TEIC/TEI-Simple))
|
||||
- `zimwiki` ([ZimWiki
|
||||
markup](http://zim-wiki.org/manual/Help/Wiki_Syntax.html))
|
||||
- the path of a custom lua writer, see [Custom
|
||||
writers](#custom-writers) below
|
||||
|
||||
</div>
|
||||
|
||||
Pandoc can also produce PDF output via LaTeX, Groff ms, or HTML.
|
||||
|
||||
Pandoc’s enhanced version of Markdown includes syntax for tables,
|
||||
definition lists, metadata blocks, `Div` blocks, footnotes and
|
||||
citations, embedded LaTeX (including math), Markdown inside HTML block
|
||||
elements, and much more. These enhancements, described further under
|
||||
Pandoc’s Markdown, can be disabled using the `markdown_strict` format.
|
||||
definition lists, metadata blocks, footnotes, citations, math, and much
|
||||
more. See the User’s Manual below under [Pandoc’s
|
||||
Markdown](https://pandoc.org/MANUAL.html#pandocs-markdown).
|
||||
|
||||
Pandoc has a modular design: it consists of a set of readers, which
|
||||
parse text in a given format and produce a native representation of the
|
||||
document (like an *abstract syntax tree* or AST), and a set of writers,
|
||||
which convert this native representation into a target format. Thus,
|
||||
adding an input or output format requires only adding a reader or
|
||||
writer. Users can also run custom [pandoc
|
||||
filters](http://pandoc.org/filters.html) to modify the intermediate AST.
|
||||
document (an *abstract syntax tree* or AST), and a set of writers, which
|
||||
convert this native representation into a target format. Thus, adding an
|
||||
input or output format requires only adding a reader or writer. Users
|
||||
can also run custom pandoc filters to modify the intermediate AST (see
|
||||
the documentation for [filters](https://pandoc.org/filters.html) and
|
||||
[lua filters](https://pandoc.org/lua-filters.html)).
|
||||
|
||||
Because pandoc’s intermediate representation of a document is less
|
||||
expressive than many of the formats it converts between, one should not
|
||||
|
@ -120,8 +180,6 @@ While conversions from pandoc’s Markdown to all formats aspire to be
|
|||
perfect, conversions from formats more expressive than pandoc’s Markdown
|
||||
can be expected to be lossy.
|
||||
|
||||
</div>
|
||||
|
||||
## Installing
|
||||
|
||||
Here’s [how to install pandoc](INSTALL.md).
|
||||
|
|
|
@ -19,9 +19,44 @@ Pandoc
|
|||
The universal markup converter
|
||||
------------------------------
|
||||
|
||||
::: description
|
||||
Pandoc is a [Haskell] library for converting from one markup format to
|
||||
another, and a command-line tool that uses this library. It can convert *from*
|
||||
|
||||
::: {#input-formats}
|
||||
:::
|
||||
|
||||
It can convert *to*
|
||||
|
||||
::: {#output-formats}
|
||||
:::
|
||||
|
||||
Pandoc can also produce PDF output via LaTeX, Groff ms, or HTML.
|
||||
|
||||
Pandoc's enhanced version of Markdown includes syntax for tables,
|
||||
definition lists, metadata blocks, footnotes, citations, math,
|
||||
and much more. See the User's Manual below under
|
||||
[Pandoc's Markdown](https://pandoc.org/MANUAL.html#pandocs-markdown).
|
||||
|
||||
Pandoc has a modular design: it consists of a set of readers, which parse
|
||||
text in a given format and produce a native representation of the document
|
||||
(an _abstract syntax tree_ or AST), and a set of writers, which convert
|
||||
this native representation into a target format. Thus, adding an input
|
||||
or output format requires only adding a reader or writer. Users can also
|
||||
run custom pandoc filters to modify the intermediate AST (see
|
||||
the documentation for [filters](https://pandoc.org/filters.html)
|
||||
and [lua filters](https://pandoc.org/lua-filters.html)).
|
||||
|
||||
Because pandoc's intermediate representation of a document is less
|
||||
expressive than many of the formats it converts between, one should
|
||||
not expect perfect conversions between every format and every other.
|
||||
Pandoc attempts to preserve the structural elements of a document, but
|
||||
not formatting details such as margin size. And some document elements,
|
||||
such as complex tables, may not fit into pandoc's simple document
|
||||
model. While conversions from pandoc's Markdown to all formats aspire
|
||||
to be perfect, conversions from formats more expressive than pandoc's
|
||||
Markdown can be expected to be lossy.
|
||||
|
||||
|
||||
Installing
|
||||
----------
|
||||
|
||||
|
@ -52,3 +87,4 @@ License
|
|||
any kind. (See COPYRIGHT for full copyright and warranty notices.)
|
||||
|
||||
[GPL]: http://www.gnu.org/copyleft/gpl.html "GNU General Public License"
|
||||
[Haskell]: http://haskell.org
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
image: Visual Studio 2013
|
||||
clone_folder: "c:\\pandoc"
|
||||
environment:
|
||||
global:
|
||||
|
@ -10,10 +11,10 @@ environment:
|
|||
STACK_ROOT: "c:\\sr32"
|
||||
STACK: "%STACK_ROOT%\\stack.exe"
|
||||
STACK_FLAGS: "--flag=hslua:lua_32bits"
|
||||
# - STACK_VERSION: "windows-x86_64"
|
||||
# STACK_ROOT: "c:\\sr64"
|
||||
# STACK: "%STACK_ROOT%\\stack.exe"
|
||||
# STACK_FLAGS: ""
|
||||
- STACK_VERSION: "windows-x86_64"
|
||||
STACK_ROOT: "c:\\sr64"
|
||||
STACK: "%STACK_ROOT%\\stack.exe"
|
||||
STACK_FLAGS: ""
|
||||
|
||||
skip_commits:
|
||||
files:
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE TupleSections #-}
|
||||
{-
|
||||
Copyright (C) 2012-2018 John MacFarlane <jgm@berkeley.edu>
|
||||
|
@ -16,6 +17,7 @@ You should have received a copy of the GNU General Public License
|
|||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
-}
|
||||
import Prelude
|
||||
import Text.Pandoc
|
||||
import qualified Text.Pandoc.UTF8 as UTF8
|
||||
import qualified Data.ByteString as B
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
import Prelude
|
||||
import Weigh
|
||||
import Text.Pandoc
|
||||
import Data.Text (Text)
|
||||
|
|
709
changelog
709
changelog
|
@ -1,3 +1,708 @@
|
|||
pandoc (2.2)
|
||||
|
||||
* New input format: `fb2` (FictionBook2) (Alexander Krotov).
|
||||
|
||||
* Make `--ascii` work for all XML formats (ICML, OPML, JATS,...),
|
||||
and for `ms` and `man`.
|
||||
|
||||
* Remove deprecated `--latexmathml`, `--gladtex`, `--mimetex`, `--jsmath`, `-m`,
|
||||
`--asciimathml` options.
|
||||
|
||||
* New module Text.Pandoc.Readers.FB2, exporting readFB2 (Alexander
|
||||
Krotov, API change).
|
||||
|
||||
* Markdown reader:
|
||||
|
||||
+ Allow empty key-value attributes, like `title=""` (#2944).
|
||||
+ Handle table w/o following blank line in fenced div (#4560).
|
||||
+ Remove "fallback" for `doubleQuote` parser. Previously the
|
||||
parser tried to be efficient -- if no end double quote was found,
|
||||
it would just return the contents. But this could backfire in a
|
||||
case `**this should "be bold**`, since the fallback would return
|
||||
the content `"be bold**` and the closing boldface delimiter
|
||||
would never be encountered.
|
||||
+ Improve computation of the relative width of the last column in a
|
||||
multiline table, so we can round-trip tables without constantly
|
||||
shrinking the last column.
|
||||
|
||||
* EPUB reader:
|
||||
|
||||
+ Fix images with space in file path (#4344).
|
||||
|
||||
* LaTeX reader:
|
||||
|
||||
+ Properly resolve section numbers with `\ref` and chapters (#4529).
|
||||
+ Parse sloppypar environment (#4517, Marc Schreiber).
|
||||
+ Improve handling of raw LaTeX (for markdown etc.) (#4589, #4594).
|
||||
Previously there were some bugs in how macros were handled.
|
||||
+ Support `\MakeUppercase`, `\MakeLowercase', `\uppercase`, `\lowercase`,
|
||||
and also `\MakeTextUppercase` and `\MakeTextLowercase` from textcase
|
||||
(#4959).
|
||||
|
||||
* Textile reader:
|
||||
|
||||
+ Fixed tables with no body rows (#4513).
|
||||
Previously these raised an exception.
|
||||
|
||||
* Mediawiki reader:
|
||||
|
||||
+ Improve table parsing (#4508). This fixes detection of table
|
||||
attributes and also handles `!` characters in cells.
|
||||
|
||||
* DocBook reader:
|
||||
|
||||
+ Properly handle title in `section` element (#4526).
|
||||
Previously we just got `section_title` for `section` (though `sect1`,
|
||||
`sect2`, etc. were handled properly).
|
||||
+ Read tex math as output by asciidoctor (#4569, Joe Hermaszewski).
|
||||
|
||||
* Docx reader:
|
||||
|
||||
+ Combine adjacent CodeBlocks with the same attributes into
|
||||
a single CodeBlock. This prevents a multiline codeblock in
|
||||
Word from being read as different paragraphs.
|
||||
|
||||
* RST reader:
|
||||
|
||||
+ Allow < 3 spaces indent under directives (#4579).
|
||||
+ Fix anonymous redirects with backticks (#4598).
|
||||
|
||||
* Muse reader (Alexander Krotov):
|
||||
|
||||
+ Add support for Text::Amuse multiline headings.
|
||||
+ Add `<math>` tag support.
|
||||
+ Add support for `<biblio>` and `<play>` tags.
|
||||
+ Allow links to have empty descriptions.
|
||||
+ Require block `<literal>` tags to be on separate lines.
|
||||
+ Allow `-` in anchors.
|
||||
+ Allow verse to be indented.
|
||||
+ Allow nested footnotes.
|
||||
+ Internal improvements.
|
||||
|
||||
* Muse writer (Alexander Krotov):
|
||||
|
||||
+ Escape `>` only at the beginning of a line.
|
||||
+ Escape `]` in image title.
|
||||
+ Escape `]` brackets in URLs as `%5D`.
|
||||
+ Only escape brackets when necessary.
|
||||
+ Escape ordered list markers.
|
||||
+ Do not escape list markers unless preceded by space.
|
||||
+ Escape strings starting with space.
|
||||
+ Escape semicolons and markers after line break.
|
||||
+ Escape `;` to avoid accidental comments.
|
||||
+ Don't break headers, line blocks and tables with line breaks.
|
||||
+ Correctly output empty headings.
|
||||
+ Escape horizontal rule only if at the beginning of the line.
|
||||
+ Escape definition list terms starting with list markers.
|
||||
+ Place header IDs before header.
|
||||
+ Improve span writing.
|
||||
+ Do not join Spans in normalization.
|
||||
+ Don't align ordered list items.
|
||||
+ Remove key-value pairs from attributes before normalization.
|
||||
+ Enable `--wrap=preserve` for all tests by default.
|
||||
+ Reduced `<verbatim>` tags in output.
|
||||
+ Internal changes.
|
||||
|
||||
* RST writer:
|
||||
|
||||
+ Use more consistent indentation (#4563). Previously we
|
||||
used an odd mix of 3- and 4-space indentation. Now we use 3-space
|
||||
indentation, except for ordered lists, where indentation must
|
||||
depend on the width of the list marker.
|
||||
+ Flatten nested inlines (#4368, Francesco Occhipinti).
|
||||
Nested inlines are not valid RST syntax, so we flatten them following
|
||||
some readability criteria discussed in #4368.
|
||||
|
||||
* EPUB writer:
|
||||
|
||||
+ Ensure that `pagetitle` is always set, even when structured titles
|
||||
are used. This prevents spurious warnings about empty title
|
||||
elements (#4486).
|
||||
|
||||
* FB2 writer (Alexander Krotov):
|
||||
|
||||
+ Output links inline instead of producing notes. Previously all links
|
||||
were turned into footnotes with unclickable URLs inside.
|
||||
+ Allow emphasis and notes in titles.
|
||||
+ Don't intersperse paragraph with empty lines.
|
||||
+ Convert metadata value `abstract` to book annotation.
|
||||
+ Use `<empty-line />` for `HorizontalRule` rather than `LineBreak`.
|
||||
FB2 does not have a way to represent line breaks inside paragraphs;
|
||||
previously we used `<empty-line />` elements, but these are not allowed
|
||||
inside paragraphs.
|
||||
|
||||
* Powerpoint writer (Jesse Rosenthal):
|
||||
|
||||
+ Handle Quoted Inlines (#4532).
|
||||
+ Simplify code with `ParseXml`.
|
||||
+ Allow fallback options when looking for placeholder type.
|
||||
+ Check reference-doc for all layouts.
|
||||
+ Simplify speaker notes logic.
|
||||
+ Change notes state to a simpler per-slide value.
|
||||
+ Remove `Maybe` from `SpeakerNotes` in `Slide`. `mempty`
|
||||
means no speaker notes.
|
||||
+ Add tests for improved speaker notes.
|
||||
+ Handle speaker notes earlier in the conversion process.
|
||||
+ Keep notes with related blocks (#4477). Some blocks automatically
|
||||
split slides (imgs, tables, `column` divs). We assume that any
|
||||
speaker notes immediately following these are connected to these
|
||||
elements, and keep them with the related blocks, splitting after them.
|
||||
+ Remove `docProps/thumbnail.jpeg` in data dir (Jesse Rosenthal, #4588).
|
||||
It contained a nonfree ICC color calibration profile and is not needed
|
||||
for production of a powerpoint document.
|
||||
|
||||
* Markdown writer:
|
||||
|
||||
+ Include a blank line at the end of the row in a single-row multiline
|
||||
table, to prevent it from being interpreted as a simple table (#4578).
|
||||
|
||||
* CommonMark writer:
|
||||
|
||||
+ Correctly ignore LaTeX raw blocks when `raw_tex` is not
|
||||
enabled (#4527, quasicomputational).
|
||||
|
||||
* EPUB writer:
|
||||
|
||||
+ Add `epub:type="footnotes"` to notes section in EPUB3 (#4489).
|
||||
|
||||
* LaTeX writer:
|
||||
|
||||
+ In beamer, don't use format specifier for default ordered lists
|
||||
(#4556). This gives better results for styles that put ordered list
|
||||
markers in boxes or circles.
|
||||
+ Update `\lstinline` delimiters (#4369, Tim Parenti).
|
||||
|
||||
* Ms writer:
|
||||
|
||||
+ Use `\f[R]` rather than `\f[]` to reset font (#4552).
|
||||
+ Use `\f[BI]` and `\f[CB]` in headers, instead of `\f[I]` and `\f[C]`,
|
||||
since the header font is automatically bold (#4552).
|
||||
+ Use `\f[CB]` rather than `\f[BC]` for monospace bold (#4552).
|
||||
+ Create pdf anchor for a Div with an identifier (#4515).
|
||||
+ Escape `/` character in anchor ids (#4515).
|
||||
+ Improve escaping for anchor ids: we now use _uNNN_ instead of uNNN
|
||||
to avoid ambiguity.
|
||||
|
||||
* Man writer:
|
||||
|
||||
+ Don't escape U+2019 as `'` (#4550).
|
||||
|
||||
* Text.Pandoc.Options:
|
||||
|
||||
+ Removed `JsMath`, `LaTeXMathML`, and `GladTeX` constructors from
|
||||
`Text.Pandoc.Options.HTMLMathMethod` [API change].
|
||||
|
||||
* Text.Pandoc.Class:
|
||||
|
||||
+ `writeMedia`: unescape URI-escaping in file path. This avoids
|
||||
writing things like `file%20one.png` to the file system.
|
||||
|
||||
* Text.Pandoc.Parsing:
|
||||
|
||||
+ Fix `romanNumeral` parser (#4480). We previously accepted 'DDC'
|
||||
as 1100.
|
||||
+ `uri`: don't treat `*` characters at end as part of URI (#4561).
|
||||
|
||||
* Text.Pandoc.MIME:
|
||||
|
||||
+ Use the alias `application/eps` for EPS (#2067).
|
||||
This will ensure that we retain the eps extension after reading the
|
||||
image into a mediabag and writing it again.
|
||||
|
||||
* Text.Pandoc.PDF:
|
||||
|
||||
+ Use `withTempDir` in `html2pdf`.
|
||||
+ With `xelatex`, don't compress images til the last run (#4484).
|
||||
This saves time for image-heavy documents.
|
||||
+ Don't try to convert EPS files (#2067). `pdflatex` converts them
|
||||
itself, and JuicyPixels can't do it.
|
||||
+ For `pdflatex`, use a temp directory in the working directory.
|
||||
Otherwise we can have problems with the EPS conversion pdflatex
|
||||
tries to do, which can't operate on a file above the working
|
||||
directory without `--shell-escape`.
|
||||
|
||||
* Changes to tests to accommodate changes in pandoc-types.
|
||||
In <https://github.com/jgm/pandoc-types/pull/36> we changed
|
||||
the table builder to pad cells. This commit changes tests
|
||||
(and two readers) to accord with this behavior.
|
||||
|
||||
* Set default extensions for `beamer` same as `latex`.
|
||||
|
||||
* LaTeX template:
|
||||
|
||||
+ Add `beameroption` variable (#4359, Étienne Bersac).
|
||||
+ Use `pgfpages` package; this is needed for notes on second
|
||||
screen in beamer (Étienne Bersac).
|
||||
+ Add `background-image` variable (#4601, John Muccigrosso).
|
||||
|
||||
* reveal.js template: Add `background-image` variable (#4600,
|
||||
John Muccigrosso).
|
||||
|
||||
* ms template: Fix date. Previously `.ND` was used, but this only
|
||||
works if you have a title page, which we don't. Thanks to @teoric.
|
||||
|
||||
* Removed pragmas for unused extensions (#4506, Anabra).
|
||||
|
||||
* Fix bash completion for `--print-default-data-file` (#4549).
|
||||
Previously this looked in the filesystem, even if pandoc
|
||||
was compiled with `embed_data_files` (and sometimes it looked
|
||||
in a nonexistent build directory). Now the bash completion
|
||||
script just includes a hard-coded list of data file names.
|
||||
|
||||
* MANUAL:
|
||||
|
||||
+ Clarify template vs metadata variables (#4501, Mauro Bieg).
|
||||
+ Fix raw content example (#4479, Mauro Bieg).
|
||||
+ Specify that you use html for raw output in epub.
|
||||
+ Add examples for raw docx blocks (#4472, Tristan Stenner).
|
||||
The documentation states that the target format name should match
|
||||
the output format, which isn't the case for `docx`/`openxml` and
|
||||
some others.
|
||||
+ Don't say that `empty_paragraphs` affects markdown output (#4540).
|
||||
+ Consolidate input/output format documentation (#4577, Mauro Bieg).
|
||||
|
||||
* New README template. Take in/out formats from manual.
|
||||
|
||||
* Fix example in lua-filters docs (#4459, HeirOfNorton).
|
||||
|
||||
* Use the `-threaded` GHC flag when building benchmarks (#4587,
|
||||
Francesco Occhipinti).
|
||||
|
||||
* Bump temporary upper bound to 1.4.
|
||||
|
||||
* Use pandoc-citeproc 0.14.3.1.
|
||||
|
||||
* Use texmath-0.10.1.2 (fixes escapes in math in ms, #4597).
|
||||
|
||||
* Removed old lib directory. This was used for something long ago,
|
||||
but plays no role now.
|
||||
|
||||
* Removed unneeded data file `LaTeXMathML.js`.
|
||||
|
||||
* Create 64- and 32-bit versions of Windows binary packages.
|
||||
|
||||
pandoc (2.1.3)
|
||||
|
||||
* Docx reader (Jesse Rosenthal):
|
||||
|
||||
+ Add tests for nested smart tags.
|
||||
+ Parse nested smart tags.
|
||||
+ Make unwrapSDT into a general `unwrap` function that can unwrap both
|
||||
nested SDT tags and smartTags. This makes the SmartTags constructor in
|
||||
the Docx type unnecessary, so we remove it (#4446).
|
||||
+ Remove unused `docxWarnings` (Alexander Krotov).
|
||||
|
||||
* RST reader: Allow unicode bullet characters (#4454).
|
||||
|
||||
* Haddock reader: Better table handling, using haddock-library's
|
||||
new table support, if compiled against a version that
|
||||
includes it. Note that tables with col/rowspans will not
|
||||
translate well into Pandoc.
|
||||
|
||||
* Muse reader (Alexander Krotov):
|
||||
|
||||
+ Require closing tag to have the same indentation as opening.
|
||||
+ Do not reparse blocks inside unclosed block tag (#4425).
|
||||
+ Parse `<class>` tag (supported by Emacs Muse).
|
||||
+ Do not produce empty Str element for unindented verse lines.
|
||||
+ Don't allow footnote references inside links.
|
||||
+ Allow URL to be empty.
|
||||
+ Require that comment semicolons are in the first column (#4551).
|
||||
+ Various internal improvements.
|
||||
|
||||
* LaTeX reader:
|
||||
|
||||
+ Add support to parse unit string of `\SI` command (closes #4296,
|
||||
Marc Schreiber).
|
||||
|
||||
* Haddock writer: In the writer, we now render tables always as
|
||||
grid tables, since Haddock supports these.
|
||||
|
||||
* DokuWiki writer: rewrite backSlashLineBreaks (#4445, Mauro Bieg).
|
||||
|
||||
* Docx writer: Fixed formatting of `DefaultStyle` ordered lists in
|
||||
docx writer. We want decimal for the top level, not lower roman.
|
||||
|
||||
* RST writer:
|
||||
|
||||
+ Strip whitespace at beginning and ending of inline containers
|
||||
(#4327, Francesco Occhipinti).
|
||||
+ Filter out empty inline containers (#4434). There is nothing in
|
||||
RST that corresponds to e.g. `Emph []`, so we just ignore elements
|
||||
like this (Francesco Occhipinti).
|
||||
|
||||
* Muse writer (Alexander Krotov):
|
||||
|
||||
+ Support spans with anchors.
|
||||
+ Replace smallcaps with emphasis before normalization.
|
||||
+ Output smallcaps as emphasis.
|
||||
+ Expand Cite before list normalization.
|
||||
+ Write empty inline lists as `<verbatim></verbatim>`.
|
||||
+ Remove empty Str from the beginning of inline lists during normalization.
|
||||
+ Escape "-" to avoid creating bullet lists.
|
||||
+ Fix math expansion for more than one expression per paragraph.
|
||||
+ Expand math before inline list normalization.
|
||||
|
||||
* Dokuwiki writer: fix LineBreaks in Tables (#4313, Mauro Bieg).
|
||||
|
||||
* Ms writer:
|
||||
|
||||
+ Asciify pdf anchors, since unicode anchors don't work (#4436).
|
||||
Internal links should be converted automatically, so this shouldn't
|
||||
affect users directly.
|
||||
+ Don't escape hyphens as `\-`; that's for a minus sign (#4467).
|
||||
|
||||
* Beamer writer: put hyperlink after `\begin{frame}` and not in the title
|
||||
(#4307). If it's in the title, then we get a titlebar on slides with
|
||||
the `plain` attribute, when the id is non-null. This fixes a regression
|
||||
in 2.0.
|
||||
|
||||
* EPUB writer: Remove notes from TOC in nav.xhtml (#4453, Mauro Bieg).
|
||||
|
||||
* JATS writer: Remove extraneous, significant whitespace (#4335,
|
||||
Nokome Bentley).
|
||||
|
||||
* html2pdf: inject base tag wih current working directory (#4413, Mauro
|
||||
Bieg). This helps ensure that linked resources are included.
|
||||
|
||||
* Add Semigroup instances for everything for which we defined a
|
||||
Monoid instance previously (API change):
|
||||
|
||||
+ Text.Pandoc.Class.FileTree.
|
||||
+ Text.Pandoc.Translations.Translations.
|
||||
+ Text.Pandoc.Extensions.Extensions.
|
||||
+ Text.Pandoc.Readers.Odt.StyleReader.Styles.
|
||||
+ Text.Pandoc.Pretty.Doc.
|
||||
+ Text.Pandoc.MediaBag.MediaBag.
|
||||
|
||||
* Add custom Prelude to give clean code for Monoid and Semigroup
|
||||
that works with ghc 7.10-8.4. The custom Prelude (`prelude/Prelude`)
|
||||
is used for ghc versions < 8.4. `NoImplicitPrelude` is used
|
||||
in all source files, and Prelude is explicitly imported
|
||||
(this is necessary for ghci to work properly with the custom prelude).
|
||||
|
||||
* Text.Pandoc.Writers.Shared (Francesco Occhipinti):
|
||||
|
||||
+ Export `stripLeadingTrailingSpace`.
|
||||
+ Don't wrap lines in grid tables when `--wrap=none` (#4320).
|
||||
+ `gridTable`: Don't wrap lines in tables when `--wrap=none`. Instead,
|
||||
expand cells, even if it results in cells that don't respect relative
|
||||
widths or surpass page column width. This change affects RST,
|
||||
Markdown, and Haddock writers.
|
||||
|
||||
* Raise error if someone tries to print docx, odt, etc. template (#4441).
|
||||
|
||||
* LaTeX template: Provide `bidi` package's option using
|
||||
`\PassOptionsToPackage` (#4357, Václav Haisman). This avoid a
|
||||
clash when `polyglossia` loads it first and then it is loaded again
|
||||
for XeLaTeX.
|
||||
|
||||
* ConTeXt template: Added `pdfa` variable to generate PDF/A (#4294, Henri
|
||||
Menke). Instructions on how to install the ICC profiles on ConTeXt
|
||||
standalone can be found in the wiki:
|
||||
<http://wiki.contextgarden.net/PDFX#ICC_profiles>.
|
||||
If the ICC profiles are not available the log will contain error
|
||||
messages.
|
||||
|
||||
* Use latest pandoc-types, skylighting
|
||||
|
||||
* Use latest pandoc-citeproc in binary package.
|
||||
|
||||
* Bump upper bound for time, criterion, haddock-library, exceptions,
|
||||
http-types, aeson, haddock-library.
|
||||
|
||||
* Bump upper bound tasty-quickcheck 0.10 (#4429, Felix Yan).
|
||||
|
||||
* pandoc.cabal: fix up other-extensions and language fields.
|
||||
Language is now consistently `Haskell2010`, and other-extensions
|
||||
is consistently `NoImplicitPrelude`. Everything else to be specified
|
||||
in the module header as needed.
|
||||
|
||||
* Removed `old-locale` flag and Text.Pandoc.Compat.Time.
|
||||
This is no longer necessary since we no longer support ghc 7.8.
|
||||
|
||||
* Make `weigh-pandoc` into a benchmark program.
|
||||
Remove `weigh-pandoc` flag. `weigh-pandoc` is now built (and run)
|
||||
automatically when you build (and run) benchmarks.
|
||||
|
||||
* MANUAL: add instructions for background images reveal.js (#4325, John
|
||||
Muccigrosso).
|
||||
|
||||
* appveyor: use VS 2013 environment instead of VS 2015 for Windows builds.
|
||||
|
||||
|
||||
pandoc (2.1.2)
|
||||
|
||||
* Markdown reader:
|
||||
|
||||
+ Fix parsing bug with nested fenced divs (#4281). Previously we allowed
|
||||
"nonindent spaces" before the opening and closing `:::`, but this
|
||||
interfered with list parsing, so now we require the fences to be flush with
|
||||
the margin of the containing block.
|
||||
|
||||
* Commonmark reader:
|
||||
|
||||
+ `raw_html` is now on by default. It can be disabled explicitly
|
||||
using `-f commonmark-raw_html`.
|
||||
|
||||
* Org reader (Albert Krewinkel):
|
||||
|
||||
+ Move citation tests to separate module.
|
||||
+ Allow changing emphasis syntax (#4378). The characters allowed before
|
||||
and after emphasis can be configured via `#+pandoc-emphasis-pre` and
|
||||
`#+pandoc-emphasis-post`, respectively. This allows to change which
|
||||
strings are recognized as emphasized text on a per-document or even
|
||||
per-paragraph basis. Example:
|
||||
|
||||
#+pandoc-emphasis-pre: "-\t ('\"{"
|
||||
#+pandoc-emphasis-post: "-\t\n .,:!?;'\")}["
|
||||
|
||||
* LaTeX reader:
|
||||
|
||||
+ Fixed comments inside citations (#4374).
|
||||
+ Fix regression in package options including underscore (#4424).
|
||||
+ Make `--trace` work.
|
||||
+ Fixed parsing of `tabular*` environment (#4279).
|
||||
|
||||
* RST reader:
|
||||
|
||||
+ Fix regression in parsing of headers with trailing space (#4280).
|
||||
|
||||
* Muse reader (Alexander Krotov):
|
||||
|
||||
+ Enable `<literal>` tags even if amuse extension is enabled.
|
||||
Amusewiki disables <literal> tags for security reasons.
|
||||
If user wants similar behavior in pandoc, RawBlocks and RawInlines
|
||||
can be removed or replaced with filters.
|
||||
+ Remove space prefix from `<literal>` tag contents.
|
||||
+ Do not consume whitespace while looking for closing end tag.
|
||||
+ Convert alphabetical list markers to decimal in round-trip test.
|
||||
Alphabetical lists are an addition of Text::Amuse.
|
||||
They are not present in Emacs Muse and can be ambiguous
|
||||
when list starts with "i.", "c." etc.
|
||||
+ Allow `<quote>` and other tags to be indented.
|
||||
+ Allow single colon in definition list term.
|
||||
+ Fix parsing of verse in lists.
|
||||
+ Improved parsing efficiency. Avoid `parseFromString`.
|
||||
Lists are parsed in linear instead of exponential time now.
|
||||
+ Replace ParserState with MuseState.
|
||||
+ Prioritize lists with roman numerals over alphabetical lists.
|
||||
This is to make sure "i." starts a roman numbered list,
|
||||
instead of a list with letter "i" (followed by "j", "k", ...").
|
||||
+ Fix directive parsing.
|
||||
+ Parse definition lists with multiple descriptions.
|
||||
+ Parse next list item before parsing more item contents.
|
||||
+ Fixed a bug: headers did not terminate lists.
|
||||
+ Move indentation parsing from `definitionListItem` to `definitionList`.
|
||||
+ Paragraph indentation does not indicate nested quote.
|
||||
Muse allows indentation to indicate quotation or alignment,
|
||||
but only on the top level, not within a <quote> or list.
|
||||
+ Require that block tags are on separate lines.
|
||||
Text::Amuse already explicitly requires it anyway.
|
||||
+ Fix matching of closing inline tags.
|
||||
+ Various internal changes.
|
||||
+ Fix parsing of nested definition lists.
|
||||
+ Require only one space for nested definition list indentation.
|
||||
+ Do not remove trailing whitespace from `<code>`.
|
||||
+ Fix parsing of trailing whitespace. Newline after whitespace now
|
||||
results in softbreak instead of space.
|
||||
|
||||
* Docx reader (Jesse Rosenthal, except where noted):
|
||||
|
||||
+ Handle nested sdt tags (#4415).
|
||||
+ Don't look up dependant run styles if `+styles` is enabled.
|
||||
+ Move pandoc inline styling inside custom-style span.
|
||||
+ Read custom styles (#1843). This will read all paragraph and
|
||||
character classes as divs and spans, respectively. Dependent styles
|
||||
will still be resolved, but will be wrapped with appropriate style
|
||||
tags. It is controlled by the `+styles` extension (`-f docx+styles`).
|
||||
This can be used in conjunction with the `custom-style` feature in the
|
||||
docx writer for a pandoc-docx editing workflow. Users can convert from
|
||||
an input docx, reading the custom-styles, and then use that same input
|
||||
docx file as a reference-doc for producing an output docx file. Styles
|
||||
will be maintained across the conversion, even if pandoc doesn't
|
||||
understand them.
|
||||
+ Small change to Fields hyperlink parser. Previously, unquoted string
|
||||
required a space at the end of the line (and consumed it). Now we
|
||||
either take a space (and don't consume it), or end of input.
|
||||
+ Pick table width from the longest row or header (Francesco Occhipinti,
|
||||
#4360).
|
||||
|
||||
* Muse writer (Alexander Krotov):
|
||||
|
||||
+ Change verse markup: `> ` instead of `<verse>` tag.
|
||||
+ Remove empty strings during inline normalization.
|
||||
+ Don't indent nested definition lists.
|
||||
+ Use unicode quotes for quoted text.
|
||||
+ Write image width specified in percent in Text::Amuse mode.
|
||||
+ Don't wrap displayMath into `<verse>`.
|
||||
+ Escape nonbreaking space (`~~`).
|
||||
+ Join code with different attributes during normalization.
|
||||
+ Indent lists inside Div.
|
||||
+ Support definitions with multiple descriptions.
|
||||
|
||||
* Powerpoint writer (Jesse Rosenthal):
|
||||
|
||||
+ Use table styles This will use the default table style in the
|
||||
reference-doc file. As a result they will be easier when using
|
||||
in a template, and match the color scheme.
|
||||
+ Remove empty slides. Because of the way that slides were split, these
|
||||
could be accidentally produced by comments after images. When animations
|
||||
are added, there will be a way to add an empty slide with either
|
||||
incremental lists or pauses.
|
||||
+ Implement syntax highlighting. Note that background colors can't
|
||||
be implemented in PowerPoint, so highlighting styles that require
|
||||
these will be incomplete.
|
||||
+ New test framework for pptx. We now compare the output of the
|
||||
Powerpoint writer with files that we know to (a) not be corrupt,
|
||||
and (b) to show the desired output behavior (details below).
|
||||
+ Add `notesMaster` to `presentation.xml` if necessary.
|
||||
+ Ignore links and (end)notes in speaker notes.
|
||||
+ Output speaker notes.
|
||||
+ Read speaker note templates conditionally. If there are speaker
|
||||
notes in the presentation, we read in the notesMasters templates
|
||||
from the reference pptx file.
|
||||
+ Fix deletion track changes (#4303, Jesse Rosenthal).
|
||||
|
||||
* Markdown writer: properly escape @ to avoid capture as citation
|
||||
(#4366).
|
||||
|
||||
* LaTeX writer:
|
||||
|
||||
+ Put hypertarget inside figure environment (#4388).
|
||||
This works around a problem with the endfloat package and
|
||||
makes pandoc's output compatible with it.
|
||||
+ Fix image height with percentage (#4389). This previously caused
|
||||
the image to be resized to a percentage of textwidth, rather than
|
||||
textheight.
|
||||
|
||||
* ConTeXt writer (Henri Menke):
|
||||
|
||||
+ New section syntax and support `--section-divs` (#2609).
|
||||
`\section[my-header]{My Header}` ->
|
||||
`\section[title={My Header},reference={my-header}]`.
|
||||
The ConTeXt writer now supports the `--section-divs` option to
|
||||
write sections in the fenced style, with `\startsection` and
|
||||
`\stopsection`.
|
||||
+ xtables: correct wrong usage of caption (Henri Menke).
|
||||
|
||||
* Docx writer:
|
||||
|
||||
+ Fix image resizing with multiple images (#3930, Andrew Pritchard).
|
||||
+ Use new golden framework (Jesse Rosenthal).
|
||||
+ Make more deterministic to facilitate testing (Jesse Rosenthal).
|
||||
- `getUniqueId` now calls to the state to get an incremented digit,
|
||||
instead of calling to P.uniqueHash.
|
||||
- we always start the PRNG in mkNumbering/mkAbstractNum with the same
|
||||
seed (1848), so our randoms should be the same each time.
|
||||
+ Fix ids in comment writing (Jesse Rosenthal). Comments from
|
||||
`--track-changes=all` were producing corrupt docx, because the
|
||||
writer was trying to get id from the `(ID,_,_)` field of
|
||||
the attributes, and ignoring the "id" entry in the key-value pairs. We
|
||||
now check both.
|
||||
|
||||
* Ms writer: Added papersize variable.
|
||||
|
||||
* TEI writer:
|
||||
|
||||
+ Use `height` instead of `depth` for images (#4331).
|
||||
+ Ensure that id prefix is always used.
|
||||
+ Don't emit `role` attribute; that was a leftover from the
|
||||
Docbook writer.
|
||||
+ Use 'xml:id', not 'id' attribute (#4371).
|
||||
|
||||
* AsciiDoc writer:
|
||||
|
||||
+ Do not output implicit heading IDs (#4363, Alexander
|
||||
Krotov). Convert to `asciidoc-auto_identifiers` for old behaviour.
|
||||
|
||||
* RST writer:
|
||||
|
||||
+ Remove `blockToRST'` moving its logic into `fixBlocks`
|
||||
(Francesco Occhipinti).
|
||||
+ Insert comment between lists and quotes (#4248, Francesco Occchipinti).
|
||||
|
||||
* RST template: remove definition of 'math' role as raw.
|
||||
This used to be needed prior to v 0.8 of docutils, but
|
||||
now math support is built-in.
|
||||
|
||||
* Slides: Use divs to set incremental/non-incremental (#4381,
|
||||
Jesse Rosenthal). The old method (list inside blockquote) still
|
||||
works, but we are encouraging the use of divs with class
|
||||
`incremental` or `nonincremental`.
|
||||
|
||||
* Text.Pandoc.ImageSize:
|
||||
|
||||
+ Make image size detection for PDFs more robust (#4322).
|
||||
+ Determine image size for PDFs (#4322).
|
||||
+ EMF Image size support (#4375, Andrew Pritchard).
|
||||
|
||||
* Text.Pandoc.Extensions:
|
||||
|
||||
+ Add `Ext_styles` (Jesse Rosenthal, API change). This will be used in
|
||||
the docx reader (defaulting to off) to read pargraph and character
|
||||
styles not understood by pandoc (as divs and spans, respectively).
|
||||
+ Made `Ext_raw_html` default for `commonmark` format.
|
||||
|
||||
* Text.Pandoc.Parsing:
|
||||
|
||||
+ Export `manyUntil` (Alexander Krotov, API change).
|
||||
+ Export improved `sepBy1` (Alexander Krotov).
|
||||
+ Export list marker parsers: `upperRoman`, `lowerRoman`,
|
||||
`decimal`, `lowerAlpha`, `upperAlpha` (Alexander Krotov, API change).
|
||||
|
||||
* Tests/Lua: fix tests on windows (Albert Krewinkel).
|
||||
|
||||
* Lua: register script name in global variable (#4393). The name of the Lua
|
||||
script which is executed is made available in the global Lua variable
|
||||
`PANDOC_SCRIPT_FILE`, both for Lua filters and custom writers.
|
||||
|
||||
* Tests: Abstract powerpoint tests out to OOXML tests (Jesse Rosenthal).
|
||||
There is very little pptx-specific in these tests, so we abstract out
|
||||
the basic testing function so it can be used for docx as well. This
|
||||
should allow us to catch some errors in the docx writer that slipped
|
||||
by the roundtrip testing.
|
||||
|
||||
* Lua filters: store constructors in registry (Albert Krewinkel). Lua
|
||||
functions used to construct AST element values are stored in the Lua
|
||||
registry for quicker access. Getting a value from the registry is much
|
||||
faster than getting a global value (partly to idiosyncrasies of hslua);
|
||||
this change results in a considerable performance boost.
|
||||
|
||||
* Documentation:
|
||||
|
||||
+ `doc/org.md` Add draft of Org-mode documentation (Albert Krewinkel).
|
||||
+ `doc/lua-filters.md`: document global vars set for filters
|
||||
(Albert Krewinkel).
|
||||
+ INSTALL.md: mention Stack version. (#4343, Adam Brandizzi).
|
||||
+ MANUAL: add documentation on custom styles (Jesse Rosenthal).
|
||||
+ MANUAL.txt: Document incremental and nonincremental divs (Jesse
|
||||
Rosenthal). Blockquoted lists are still described, but fenced divs are
|
||||
presented in preference.
|
||||
+ MANUAL.txt: document header and footer variables (newmana).
|
||||
+ MANUAL.txt: self-contained implies standalone (#4304, Daniel Lublin).
|
||||
+ CONTRIBUTING.md: label was renamed. (#4310, Alexander Brandizzi).
|
||||
|
||||
* Require tagsoup 0.14.3 (#4282), fixing HTML tokenization bug.
|
||||
|
||||
* Use latest texmath.
|
||||
|
||||
* Use latest pandoc-citeproc.
|
||||
|
||||
* Allow exceptions 0.9.
|
||||
|
||||
* Require aeson-pretty 0.8.5 (#4394).
|
||||
|
||||
* Bump blaze-markup, blaze-html lower bounds to 0.8, 0.9 (#4334).
|
||||
|
||||
* Update tagsoup to 0.14.6 (Alexander Krotov, #4282).
|
||||
|
||||
* Removed ghc-prof-options. As of cabal 1.24, sensible defaults are used.
|
||||
|
||||
* Update default.nix to current nixpkgs-unstable for hslua-0.9.5 (#4348,
|
||||
jarlg).
|
||||
|
||||
|
||||
pandoc (2.1.1)
|
||||
|
||||
* Markdown reader:
|
||||
|
@ -1299,7 +2004,7 @@ pandoc (2.0)
|
|||
directory. However, the working directory must be explicitly
|
||||
specified if the `--resource-path` option is used.
|
||||
|
||||
* Added --abbreviations=FILE option for custom abbreviations file
|
||||
* Added `--abbreviations=FILE` option for custom abbreviations file
|
||||
(#256). Default abbreviations file (`data/abbreviations`) contains
|
||||
a list of strings that will be recognized by pandoc's
|
||||
Markdown parser as abbreviations. (A nonbreaking space will
|
||||
|
@ -1704,7 +2409,7 @@ pandoc (2.0)
|
|||
* HTML reader: parse a span with class `smallcaps` as `SmallCaps`.
|
||||
|
||||
* LaTeX reader:
|
||||
|
||||
|
||||
+ Implemented `\graphicspath` (#736).
|
||||
+ Properly handle column prefixes/suffixes. For example, in
|
||||
`\begin{tabular}{>{$}l<{$}>{$}l<{$} >{$}l<{$}}`
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -4,7 +4,7 @@
|
|||
|
||||
_pandoc()
|
||||
{
|
||||
local cur prev opts lastc informats outformats datadir
|
||||
local cur prev opts lastc informats outformats datafiles
|
||||
COMPREPLY=()
|
||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||
prev="${COMP_WORDS[COMP_CWORD-1]}"
|
||||
|
@ -14,7 +14,7 @@ _pandoc()
|
|||
informats="%s"
|
||||
outformats="%s"
|
||||
highlight_styles="%s"
|
||||
datadir="%s"
|
||||
datafiles="%s"
|
||||
|
||||
case "${prev}" in
|
||||
--from|-f|--read|-r)
|
||||
|
@ -34,7 +34,7 @@ _pandoc()
|
|||
return 0
|
||||
;;
|
||||
--print-default-data-file)
|
||||
COMPREPLY=( $(compgen -W "reference.odt reference.docx $(find ${datadir} | sed -e 's/.*\/data\///')" -- ${cur}) )
|
||||
COMPREPLY=( $(compgen -W "${datafiles}" -- ${cur}) )
|
||||
return 0
|
||||
;;
|
||||
--wrap)
|
||||
|
|
Binary file not shown.
Before Width: | Height: | Size: 5.6 KiB |
|
@ -38,6 +38,13 @@ $endif$
|
|||
$if(pagenumbering)$
|
||||
\setuppagenumbering[$for(pagenumbering)$$pagenumbering$$sep$,$endfor$]
|
||||
$endif$
|
||||
$if(pdfa)$
|
||||
% attempt to generate PDF/A
|
||||
\setupbackend
|
||||
[format=PDF/A-1b:2005,
|
||||
intent=sRGB IEC61966-2.1,
|
||||
profile=sRGB.icc]
|
||||
$endif$
|
||||
|
||||
% use microtypography
|
||||
\definefontfeature[default][default][script=latn, protrusion=quality, expansion=quality, itlc=yes, textitalics=yes, onum=yes, pnum=yes]
|
||||
|
@ -47,7 +54,7 @@ $endif$
|
|||
|
||||
\setupbodyfontenvironment[default][em=italic] % use italic as em, not slanted
|
||||
|
||||
\definefallbackfamily[mainface][rm][DejaVu Serif][preset=range:greek, force=yes]
|
||||
\definefallbackfamily[mainface][rm][CMU Serif][preset=range:greek, force=yes]
|
||||
\definefontfamily[mainface][rm][$if(mainfont)$$mainfont$$else$Latin Modern Roman$endif$]
|
||||
\definefontfamily[mainface][mm][$if(mathfont)$$mathfont$$else$Latin Modern Math$endif$]
|
||||
\definefontfamily[mainface][ss][$if(sansfont)$$sansfont$$else$Latin Modern Sans$endif$]
|
||||
|
|
|
@ -29,7 +29,7 @@ $highlighting-css$
|
|||
</style>
|
||||
$endif$
|
||||
$for(css)$
|
||||
<link rel="stylesheet" href="$css$">
|
||||
<link rel="stylesheet" href="$css$" />
|
||||
$endfor$
|
||||
$if(math)$
|
||||
$math$
|
||||
|
|
|
@ -2,14 +2,24 @@
|
|||
\PassOptionsToPackage{hyphens}{url}
|
||||
$if(colorlinks)$
|
||||
\PassOptionsToPackage{dvipsnames,svgnames*,x11names*}{xcolor}
|
||||
$endif$
|
||||
%
|
||||
$endif$$if(dir)$$if(latex-dir-rtl)$
|
||||
\PassOptionsToPackage{RTLdocument}{bidi}
|
||||
$endif$$endif$%
|
||||
\documentclass[$if(fontsize)$$fontsize$,$endif$$if(lang)$$babel-lang$,$endif$$if(papersize)$$papersize$paper,$endif$$if(beamer)$ignorenonframetext,$if(handout)$handout,$endif$$if(aspectratio)$aspectratio=$aspectratio$,$endif$$endif$$for(classoption)$$classoption$$sep$,$endfor$]{$documentclass$}
|
||||
$if(beamer)$
|
||||
$if(background-image)$
|
||||
\usebackgroundtemplate{%
|
||||
\includegraphics[width=\paperwidth]{$background-image$}%
|
||||
}
|
||||
$endif$
|
||||
\usepackage{pgfpages}
|
||||
\setbeamertemplate{caption}[numbered]
|
||||
\setbeamertemplate{caption label separator}{: }
|
||||
\setbeamercolor{caption name}{fg=normal text.fg}
|
||||
\beamertemplatenavigationsymbols$if(navigation)$$navigation$$else$empty$endif$
|
||||
$for(beameroption)$
|
||||
\setbeameroption{$beameroption$}
|
||||
$endfor$
|
||||
$endif$
|
||||
$if(beamerarticle)$
|
||||
\usepackage{beamerarticle} % needs to be loaded first
|
||||
|
@ -287,11 +297,7 @@ $endif$
|
|||
$if(dir)$
|
||||
\ifxetex
|
||||
% load bidi as late as possible as it modifies e.g. graphicx
|
||||
$if(latex-dir-rtl)$
|
||||
\usepackage[RTLdocument]{bidi}
|
||||
$else$
|
||||
\usepackage{bidi}
|
||||
$endif$
|
||||
\fi
|
||||
\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex
|
||||
\TeXXeTstate=1
|
||||
|
|
|
@ -90,7 +90,10 @@ $for(author)$
|
|||
$author$
|
||||
$endfor$
|
||||
$if(date)$
|
||||
.ND "$date$"
|
||||
.AU
|
||||
.sp 0.5
|
||||
.ft R
|
||||
$date$
|
||||
$endif$
|
||||
$if(abstract)$
|
||||
.AB
|
||||
|
|
|
@ -197,6 +197,11 @@ $endif$
|
|||
$if(parallaxBackgroundImage)$
|
||||
// Parallax background image
|
||||
parallaxBackgroundImage: '$parallaxBackgroundImage$', // e.g. "'https://s3.amazonaws.com/hakim-static/reveal-js/reveal-parallax-1.jpg'"
|
||||
$else$
|
||||
$if(background-image)$
|
||||
// Parallax background image
|
||||
parallaxBackgroundImage: '$background-image$', // e.g. "'https://s3.amazonaws.com/hakim-static/reveal-js/reveal-parallax-1.jpg'"
|
||||
$endif$
|
||||
$endif$
|
||||
$if(parallaxBackgroundSize)$
|
||||
// Parallax background size
|
||||
|
|
|
@ -135,9 +135,42 @@ Elements without matching functions are left untouched.
|
|||
See [module documentation](#module-pandoc) for a list of pandoc
|
||||
elements.
|
||||
|
||||
The global `FORMAT` is set to the format of the pandoc writer
|
||||
being used (`html5`, `latex`, etc.), so the behavior of a filter
|
||||
can be made conditional on the eventual output format.
|
||||
|
||||
## Global variables
|
||||
|
||||
Pandoc passes additional data to Lua filters by setting global
|
||||
variables.
|
||||
|
||||
`FORMAT`
|
||||
: The global `FORMAT` is set to the format of the pandoc
|
||||
writer being used (`html5`, `latex`, etc.), so the behavior
|
||||
of a filter can be made conditional on the eventual output
|
||||
format.
|
||||
|
||||
`PANDOC_READER_OPTIONS`
|
||||
: Table of the options which were provided to the parser.
|
||||
|
||||
`PANDOC_VERSION`
|
||||
: Contains the pandoc version as a numerically indexed table,
|
||||
most significant number first. E.g., for pandoc 2.1.1, the
|
||||
value of the variable is a table `{2, 1, 1}`. Use
|
||||
`table.concat(PANDOC_VERSION, '.')` to produce a version
|
||||
string. This variable is also set in custom writers.
|
||||
|
||||
`PANDOC_API_VERSION`
|
||||
: Contains the version of the pandoc-types API against which
|
||||
pandoc was compiled. It is given as a numerically indexed
|
||||
table, most significant number first. E.g., if pandoc was
|
||||
compiled against pandoc-types 1.17.3, then the value of the
|
||||
variable will be a table `{1, 17, 3}`. Use
|
||||
`table.concat(PANDOC_API_VERSION, '.')` to produce a version
|
||||
string from this table. This variable is also set in custom
|
||||
writers.
|
||||
|
||||
`PANDOC_SCRIPT_FILE`
|
||||
: The name used to involve the filter. This value can be used
|
||||
to find files relative to the script file. This variable is
|
||||
also set in custom writers.
|
||||
|
||||
# Pandoc Module
|
||||
|
||||
|
@ -318,7 +351,7 @@ local vars = {}
|
|||
function get_vars (meta)
|
||||
for k, v in pairs(meta) do
|
||||
if v.t == 'MetaInlines' then
|
||||
vars["$" .. k .. "$"] = v
|
||||
vars["$" .. k .. "$"] = {table.unpack(v)}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
95
doc/org.md
Normal file
95
doc/org.md
Normal file
|
@ -0,0 +1,95 @@
|
|||
---
|
||||
title: Org-mode features and differences
|
||||
author: Albert Krewinkel
|
||||
---
|
||||
|
||||
Pandoc handles org files very similarly to Emacs org-mode.
|
||||
However, there are differences worth highlighting.
|
||||
|
||||
|
||||
Citations
|
||||
=========
|
||||
|
||||
Emacs org-mode lacks an official citation syntax, leading to
|
||||
multiple syntaxes coexisting. Pandoc recognizes four different
|
||||
syntaxes for citations.
|
||||
|
||||
Berkeley-style citations
|
||||
------------------------
|
||||
|
||||
The semi-offical Org-mode citation syntax is based on John
|
||||
MacFarlane's Pandoc syntax and org-oriented enhancements
|
||||
contributed by Richard Lawrence and others. It's dubbed Berkeley
|
||||
syntax due the place of activity of its main contributors.
|
||||
|
||||
Example:
|
||||
|
||||
See @john_doe_2006.
|
||||
[cite: See; @Mandelkern1981; and @Watson1953]
|
||||
[(cite): See; @Mandelkern1981; and @Watson1953]
|
||||
|
||||
|
||||
org-ref citations
|
||||
-----------------
|
||||
|
||||
The [org-ref] package is in wide use to handle citations and has
|
||||
excellent tooling support in Emacs. Its citation syntax is
|
||||
geared towards users in the natural sciences but still very
|
||||
flexible regardless.
|
||||
|
||||
cite:doe_john_2000
|
||||
citep:doe_jane_1989
|
||||
[[citep:Dominik201408][See page 20 of::, for example]]
|
||||
|
||||
|
||||
Pandoc-Markdown-like syntax
|
||||
---------------------------
|
||||
|
||||
Historically, Markdown-style citations syntax was the first that
|
||||
was added to pandoc's org reader. It is almost identical to
|
||||
Markdown's citation syntax.
|
||||
|
||||
Example:
|
||||
|
||||
[prefix @citekey suffix]
|
||||
[see @doe2000 p. 23-42]
|
||||
|
||||
|
||||
LaTeX-Syntax
|
||||
------------
|
||||
|
||||
Use normal latex citation commands like `\cite{x}` or
|
||||
`\citet{y}`.
|
||||
|
||||
[org-ref]: https://github.com/jkitchin/org-ref
|
||||
|
||||
|
||||
Emphasis rules
|
||||
==============
|
||||
|
||||
Org-mode uses complex rules to decide whether a string
|
||||
represents emphasized text. In Emacs, this can be customized via
|
||||
the variable `org-emphasis-regexp-components`. A variable like
|
||||
this doesn't fit well with pandoc's model. Instead, it is
|
||||
possible to use special lines to change these values:
|
||||
|
||||
#+pandoc-emphasis-pre: "-\t ('\"{"
|
||||
#+pandoc-emphasis-post: "-\t\n .,:!?;'\")}["
|
||||
|
||||
The above describes the default values of these variables. The
|
||||
arguments must be valid (Haskell) strings. If interpretation of
|
||||
the argument as string fails, the default is restored.
|
||||
|
||||
Changing emphasis rules only affect the part of the document
|
||||
following the special lines. They must be some of the first
|
||||
lines to alter parsing behavior for the whole document. It is
|
||||
also possible to change the values temporarily for selected
|
||||
sections only. The string `test` in the following snippet will
|
||||
be read as emphasized text, while the rest of the document will
|
||||
be parsed using default emphasis rules:
|
||||
|
||||
#+pandoc-emphasis-pre: "["
|
||||
#+pandoc-emphasis-post: "]"
|
||||
[/test/]
|
||||
#+pandoc-emphasis-pre:
|
||||
#+pandoc-emphasis-post:
|
|
@ -1,6 +0,0 @@
|
|||
symbol.hs: symbol.txt
|
||||
runghc parseUnicodeMapping.hs symbol.txt
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
-rm symbol.hs
|
|
@ -1,40 +0,0 @@
|
|||
import System.FilePath
|
||||
import Text.Parsec
|
||||
import Data.Char
|
||||
import System.Environment
|
||||
import Control.Applicative hiding (many)
|
||||
import Data.List
|
||||
|
||||
main :: IO ()
|
||||
main = (head <$> getArgs) >>= parseUnicodeMapping
|
||||
|
||||
|
||||
parseUnicodeMapping :: FilePath -> IO ()
|
||||
parseUnicodeMapping fname = do
|
||||
fin <- readFile fname
|
||||
let mapname = dropExtension . takeFileName $ fname
|
||||
let res = runParse fin
|
||||
let header = "-- Generated from " ++ fname ++ "\n" ++
|
||||
mapname ++ " :: [(Char, Char)]\n" ++ mapname ++" =\n [ "
|
||||
let footer = "]"
|
||||
writeFile (replaceExtension fname ".hs")
|
||||
(header ++ (concat $ intersperse "\n , " (map show res)) ++ footer)
|
||||
|
||||
type Unicode = Char
|
||||
|
||||
runParse :: String -> [(Char, Unicode)]
|
||||
runParse s= either (error . show) id (parse parseMap "" s)
|
||||
|
||||
anyline = manyTill anyChar newline
|
||||
|
||||
getHexChar :: Parsec String () Char
|
||||
getHexChar = do
|
||||
[(c,_)] <- readLitChar . ("\\x" ++) <$> many1 hexDigit
|
||||
return c
|
||||
|
||||
parseMap :: Parsec String () [(Char, Unicode)]
|
||||
parseMap = do
|
||||
skipMany (char '#' >> anyline)
|
||||
many (flip (,) <$> getHexChar <* tab <*> getHexChar <* anyline)
|
||||
|
||||
|
|
@ -1,256 +0,0 @@
|
|||
#
|
||||
# Name: Adobe Symbol Encoding to Unicode
|
||||
# Unicode version: 2.0
|
||||
# Table version: 1.0
|
||||
# Date: 2011 July 12
|
||||
#
|
||||
# Copyright (c) 1991-2011 Unicode, Inc. All Rights reserved.
|
||||
#
|
||||
# This file is provided as-is by Unicode, Inc. (The Unicode Consortium). No
|
||||
# claims are made as to fitness for any particular purpose. No warranties of
|
||||
# any kind are expressed or implied. The recipient agrees to determine
|
||||
# applicability of information provided. If this file has been provided on
|
||||
# magnetic media by Unicode, Inc., the sole remedy for any claim will be
|
||||
# exchange of defective media within 90 days of receipt.
|
||||
#
|
||||
# Unicode, Inc. hereby grants the right to freely use the information
|
||||
# supplied in this file in the creation of products supporting the
|
||||
# Unicode Standard, and to make copies of this file in any form for
|
||||
# internal or external distribution as long as this notice remains
|
||||
# attached.
|
||||
#
|
||||
# Format: 4 tab-delimited fields:
|
||||
#
|
||||
# (1) The Unicode value (in hexadecimal)
|
||||
# (2) The Symbol Encoding code point (in hexadecimal)
|
||||
# (3) # Unicode name
|
||||
# (4) # PostScript character name
|
||||
#
|
||||
# General Notes:
|
||||
#
|
||||
# The Unicode values in this table were produced as the result of applying
|
||||
# the algorithm described in the section "Populating a Unicode space" in the
|
||||
# document "Unicode and Glyph Names," at
|
||||
# http://partners.adobe.com/asn/developer/typeforum/unicodegn.html
|
||||
# to the characters in Symbol. Note that some characters, such as "space",
|
||||
# are mapped to 2 Unicode values. 29 characters have assignments in the
|
||||
# Corporate Use Subarea; these are indicated by "(CUS)" in field 4. Refer to
|
||||
# the above document for more details.
|
||||
#
|
||||
# 2011 July 12: The above link is no longer valid. For comparable,
|
||||
# more current information, see the document, "Glyph", at:
|
||||
# <http://www.adobe.com/devnet/opentype/archives/glyph.html>
|
||||
#
|
||||
# Revision History:
|
||||
#
|
||||
# [v1.0, 2011 July 12]
|
||||
# Updated terms of use to current wording.
|
||||
# Updated contact information and document link.
|
||||
# No changes to the mapping data.
|
||||
#
|
||||
# [v0.2, 30 March 1999]
|
||||
# Different algorithm to produce Unicode values (see notes above) results in
|
||||
# some character codes being mapped to 2 Unicode values; use of Corporate
|
||||
# Use subarea values; addition of the euro character; changed assignments of
|
||||
# some characters such as the COPYRIGHT SIGNs and RADICAL EXTENDER. Updated
|
||||
# Unicode names to Unicode 2.0 names.
|
||||
#
|
||||
# [v0.1, 5 May 1995] First release.
|
||||
#
|
||||
# Use the Unicode reporting form <http://www.unicode.org/reporting.html>
|
||||
# for any questions or comments or to report errors in the data.
|
||||
#
|
||||
0020 20 # SPACE # space
|
||||
00A0 20 # NO-BREAK SPACE # space
|
||||
0021 21 # EXCLAMATION MARK # exclam
|
||||
2200 22 # FOR ALL # universal
|
||||
0023 23 # NUMBER SIGN # numbersign
|
||||
2203 24 # THERE EXISTS # existential
|
||||
0025 25 # PERCENT SIGN # percent
|
||||
0026 26 # AMPERSAND # ampersand
|
||||
220B 27 # CONTAINS AS MEMBER # suchthat
|
||||
0028 28 # LEFT PARENTHESIS # parenleft
|
||||
0029 29 # RIGHT PARENTHESIS # parenright
|
||||
2217 2A # ASTERISK OPERATOR # asteriskmath
|
||||
002B 2B # PLUS SIGN # plus
|
||||
002C 2C # COMMA # comma
|
||||
2212 2D # MINUS SIGN # minus
|
||||
002E 2E # FULL STOP # period
|
||||
002F 2F # SOLIDUS # slash
|
||||
0030 30 # DIGIT ZERO # zero
|
||||
0031 31 # DIGIT ONE # one
|
||||
0032 32 # DIGIT TWO # two
|
||||
0033 33 # DIGIT THREE # three
|
||||
0034 34 # DIGIT FOUR # four
|
||||
0035 35 # DIGIT FIVE # five
|
||||
0036 36 # DIGIT SIX # six
|
||||
0037 37 # DIGIT SEVEN # seven
|
||||
0038 38 # DIGIT EIGHT # eight
|
||||
0039 39 # DIGIT NINE # nine
|
||||
003A 3A # COLON # colon
|
||||
003B 3B # SEMICOLON # semicolon
|
||||
003C 3C # LESS-THAN SIGN # less
|
||||
003D 3D # EQUALS SIGN # equal
|
||||
003E 3E # GREATER-THAN SIGN # greater
|
||||
003F 3F # QUESTION MARK # question
|
||||
2245 40 # APPROXIMATELY EQUAL TO # congruent
|
||||
0391 41 # GREEK CAPITAL LETTER ALPHA # Alpha
|
||||
0392 42 # GREEK CAPITAL LETTER BETA # Beta
|
||||
03A7 43 # GREEK CAPITAL LETTER CHI # Chi
|
||||
0394 44 # GREEK CAPITAL LETTER DELTA # Delta
|
||||
2206 44 # INCREMENT # Delta
|
||||
0395 45 # GREEK CAPITAL LETTER EPSILON # Epsilon
|
||||
03A6 46 # GREEK CAPITAL LETTER PHI # Phi
|
||||
0393 47 # GREEK CAPITAL LETTER GAMMA # Gamma
|
||||
0397 48 # GREEK CAPITAL LETTER ETA # Eta
|
||||
0399 49 # GREEK CAPITAL LETTER IOTA # Iota
|
||||
03D1 4A # GREEK THETA SYMBOL # theta1
|
||||
039A 4B # GREEK CAPITAL LETTER KAPPA # Kappa
|
||||
039B 4C # GREEK CAPITAL LETTER LAMDA # Lambda
|
||||
039C 4D # GREEK CAPITAL LETTER MU # Mu
|
||||
039D 4E # GREEK CAPITAL LETTER NU # Nu
|
||||
039F 4F # GREEK CAPITAL LETTER OMICRON # Omicron
|
||||
03A0 50 # GREEK CAPITAL LETTER PI # Pi
|
||||
0398 51 # GREEK CAPITAL LETTER THETA # Theta
|
||||
03A1 52 # GREEK CAPITAL LETTER RHO # Rho
|
||||
03A3 53 # GREEK CAPITAL LETTER SIGMA # Sigma
|
||||
03A4 54 # GREEK CAPITAL LETTER TAU # Tau
|
||||
03A5 55 # GREEK CAPITAL LETTER UPSILON # Upsilon
|
||||
03C2 56 # GREEK SMALL LETTER FINAL SIGMA # sigma1
|
||||
03A9 57 # GREEK CAPITAL LETTER OMEGA # Omega
|
||||
2126 57 # OHM SIGN # Omega
|
||||
039E 58 # GREEK CAPITAL LETTER XI # Xi
|
||||
03A8 59 # GREEK CAPITAL LETTER PSI # Psi
|
||||
0396 5A # GREEK CAPITAL LETTER ZETA # Zeta
|
||||
005B 5B # LEFT SQUARE BRACKET # bracketleft
|
||||
2234 5C # THEREFORE # therefore
|
||||
005D 5D # RIGHT SQUARE BRACKET # bracketright
|
||||
22A5 5E # UP TACK # perpendicular
|
||||
005F 5F # LOW LINE # underscore
|
||||
F8E5 60 # RADICAL EXTENDER # radicalex (CUS)
|
||||
03B1 61 # GREEK SMALL LETTER ALPHA # alpha
|
||||
03B2 62 # GREEK SMALL LETTER BETA # beta
|
||||
03C7 63 # GREEK SMALL LETTER CHI # chi
|
||||
03B4 64 # GREEK SMALL LETTER DELTA # delta
|
||||
03B5 65 # GREEK SMALL LETTER EPSILON # epsilon
|
||||
03C6 66 # GREEK SMALL LETTER PHI # phi
|
||||
03B3 67 # GREEK SMALL LETTER GAMMA # gamma
|
||||
03B7 68 # GREEK SMALL LETTER ETA # eta
|
||||
03B9 69 # GREEK SMALL LETTER IOTA # iota
|
||||
03D5 6A # GREEK PHI SYMBOL # phi1
|
||||
03BA 6B # GREEK SMALL LETTER KAPPA # kappa
|
||||
03BB 6C # GREEK SMALL LETTER LAMDA # lambda
|
||||
00B5 6D # MICRO SIGN # mu
|
||||
03BC 6D # GREEK SMALL LETTER MU # mu
|
||||
03BD 6E # GREEK SMALL LETTER NU # nu
|
||||
03BF 6F # GREEK SMALL LETTER OMICRON # omicron
|
||||
03C0 70 # GREEK SMALL LETTER PI # pi
|
||||
03B8 71 # GREEK SMALL LETTER THETA # theta
|
||||
03C1 72 # GREEK SMALL LETTER RHO # rho
|
||||
03C3 73 # GREEK SMALL LETTER SIGMA # sigma
|
||||
03C4 74 # GREEK SMALL LETTER TAU # tau
|
||||
03C5 75 # GREEK SMALL LETTER UPSILON # upsilon
|
||||
03D6 76 # GREEK PI SYMBOL # omega1
|
||||
03C9 77 # GREEK SMALL LETTER OMEGA # omega
|
||||
03BE 78 # GREEK SMALL LETTER XI # xi
|
||||
03C8 79 # GREEK SMALL LETTER PSI # psi
|
||||
03B6 7A # GREEK SMALL LETTER ZETA # zeta
|
||||
007B 7B # LEFT CURLY BRACKET # braceleft
|
||||
007C 7C # VERTICAL LINE # bar
|
||||
007D 7D # RIGHT CURLY BRACKET # braceright
|
||||
223C 7E # TILDE OPERATOR # similar
|
||||
20AC A0 # EURO SIGN # Euro
|
||||
03D2 A1 # GREEK UPSILON WITH HOOK SYMBOL # Upsilon1
|
||||
2032 A2 # PRIME # minute
|
||||
2264 A3 # LESS-THAN OR EQUAL TO # lessequal
|
||||
2044 A4 # FRACTION SLASH # fraction
|
||||
2215 A4 # DIVISION SLASH # fraction
|
||||
221E A5 # INFINITY # infinity
|
||||
0192 A6 # LATIN SMALL LETTER F WITH HOOK # florin
|
||||
2663 A7 # BLACK CLUB SUIT # club
|
||||
2666 A8 # BLACK DIAMOND SUIT # diamond
|
||||
2665 A9 # BLACK HEART SUIT # heart
|
||||
2660 AA # BLACK SPADE SUIT # spade
|
||||
2194 AB # LEFT RIGHT ARROW # arrowboth
|
||||
2190 AC # LEFTWARDS ARROW # arrowleft
|
||||
2191 AD # UPWARDS ARROW # arrowup
|
||||
2192 AE # RIGHTWARDS ARROW # arrowright
|
||||
2193 AF # DOWNWARDS ARROW # arrowdown
|
||||
00B0 B0 # DEGREE SIGN # degree
|
||||
00B1 B1 # PLUS-MINUS SIGN # plusminus
|
||||
2033 B2 # DOUBLE PRIME # second
|
||||
2265 B3 # GREATER-THAN OR EQUAL TO # greaterequal
|
||||
00D7 B4 # MULTIPLICATION SIGN # multiply
|
||||
221D B5 # PROPORTIONAL TO # proportional
|
||||
2202 B6 # PARTIAL DIFFERENTIAL # partialdiff
|
||||
2022 B7 # BULLET # bullet
|
||||
00F7 B8 # DIVISION SIGN # divide
|
||||
2260 B9 # NOT EQUAL TO # notequal
|
||||
2261 BA # IDENTICAL TO # equivalence
|
||||
2248 BB # ALMOST EQUAL TO # approxequal
|
||||
2026 BC # HORIZONTAL ELLIPSIS # ellipsis
|
||||
F8E6 BD # VERTICAL ARROW EXTENDER # arrowvertex (CUS)
|
||||
F8E7 BE # HORIZONTAL ARROW EXTENDER # arrowhorizex (CUS)
|
||||
21B5 BF # DOWNWARDS ARROW WITH CORNER LEFTWARDS # carriagereturn
|
||||
2135 C0 # ALEF SYMBOL # aleph
|
||||
2111 C1 # BLACK-LETTER CAPITAL I # Ifraktur
|
||||
211C C2 # BLACK-LETTER CAPITAL R # Rfraktur
|
||||
2118 C3 # SCRIPT CAPITAL P # weierstrass
|
||||
2297 C4 # CIRCLED TIMES # circlemultiply
|
||||
2295 C5 # CIRCLED PLUS # circleplus
|
||||
2205 C6 # EMPTY SET # emptyset
|
||||
2229 C7 # INTERSECTION # intersection
|
||||
222A C8 # UNION # union
|
||||
2283 C9 # SUPERSET OF # propersuperset
|
||||
2287 CA # SUPERSET OF OR EQUAL TO # reflexsuperset
|
||||
2284 CB # NOT A SUBSET OF # notsubset
|
||||
2282 CC # SUBSET OF # propersubset
|
||||
2286 CD # SUBSET OF OR EQUAL TO # reflexsubset
|
||||
2208 CE # ELEMENT OF # element
|
||||
2209 CF # NOT AN ELEMENT OF # notelement
|
||||
2220 D0 # ANGLE # angle
|
||||
2207 D1 # NABLA # gradient
|
||||
F6DA D2 # REGISTERED SIGN SERIF # registerserif (CUS)
|
||||
F6D9 D3 # COPYRIGHT SIGN SERIF # copyrightserif (CUS)
|
||||
F6DB D4 # TRADE MARK SIGN SERIF # trademarkserif (CUS)
|
||||
220F D5 # N-ARY PRODUCT # product
|
||||
221A D6 # SQUARE ROOT # radical
|
||||
22C5 D7 # DOT OPERATOR # dotmath
|
||||
00AC D8 # NOT SIGN # logicalnot
|
||||
2227 D9 # LOGICAL AND # logicaland
|
||||
2228 DA # LOGICAL OR # logicalor
|
||||
21D4 DB # LEFT RIGHT DOUBLE ARROW # arrowdblboth
|
||||
21D0 DC # LEFTWARDS DOUBLE ARROW # arrowdblleft
|
||||
21D1 DD # UPWARDS DOUBLE ARROW # arrowdblup
|
||||
21D2 DE # RIGHTWARDS DOUBLE ARROW # arrowdblright
|
||||
21D3 DF # DOWNWARDS DOUBLE ARROW # arrowdbldown
|
||||
25CA E0 # LOZENGE # lozenge
|
||||
2329 E1 # LEFT-POINTING ANGLE BRACKET # angleleft
|
||||
F8E8 E2 # REGISTERED SIGN SANS SERIF # registersans (CUS)
|
||||
F8E9 E3 # COPYRIGHT SIGN SANS SERIF # copyrightsans (CUS)
|
||||
F8EA E4 # TRADE MARK SIGN SANS SERIF # trademarksans (CUS)
|
||||
2211 E5 # N-ARY SUMMATION # summation
|
||||
F8EB E6 # LEFT PAREN TOP # parenlefttp (CUS)
|
||||
F8EC E7 # LEFT PAREN EXTENDER # parenleftex (CUS)
|
||||
F8ED E8 # LEFT PAREN BOTTOM # parenleftbt (CUS)
|
||||
F8EE E9 # LEFT SQUARE BRACKET TOP # bracketlefttp (CUS)
|
||||
F8EF EA # LEFT SQUARE BRACKET EXTENDER # bracketleftex (CUS)
|
||||
F8F0 EB # LEFT SQUARE BRACKET BOTTOM # bracketleftbt (CUS)
|
||||
F8F1 EC # LEFT CURLY BRACKET TOP # bracelefttp (CUS)
|
||||
F8F2 ED # LEFT CURLY BRACKET MID # braceleftmid (CUS)
|
||||
F8F3 EE # LEFT CURLY BRACKET BOTTOM # braceleftbt (CUS)
|
||||
F8F4 EF # CURLY BRACKET EXTENDER # braceex (CUS)
|
||||
232A F1 # RIGHT-POINTING ANGLE BRACKET # angleright
|
||||
222B F2 # INTEGRAL # integral
|
||||
2320 F3 # TOP HALF INTEGRAL # integraltp
|
||||
F8F5 F4 # INTEGRAL EXTENDER # integralex (CUS)
|
||||
2321 F5 # BOTTOM HALF INTEGRAL # integralbt
|
||||
F8F6 F6 # RIGHT PAREN TOP # parenrighttp (CUS)
|
||||
F8F7 F7 # RIGHT PAREN EXTENDER # parenrightex (CUS)
|
||||
F8F8 F8 # RIGHT PAREN BOTTOM # parenrightbt (CUS)
|
||||
F8F9 F9 # RIGHT SQUARE BRACKET TOP # bracketrighttp (CUS)
|
||||
F8FA FA # RIGHT SQUARE BRACKET EXTENDER # bracketrightex (CUS)
|
||||
F8FB FB # RIGHT SQUARE BRACKET BOTTOM # bracketrightbt (CUS)
|
||||
F8FC FC # RIGHT CURLY BRACKET TOP # bracerighttp (CUS)
|
||||
F8FD FD # RIGHT CURLY BRACKET MID # bracerightmid (CUS)
|
||||
F8FE FE # RIGHT CURLY BRACKET BOTTOM # bracerightbt (CUS)
|
|
@ -1,18 +1,25 @@
|
|||
# USE ALPINE LINUX
|
||||
FROM alpine:edge
|
||||
FROM alpine
|
||||
RUN apk update
|
||||
# INSTALL BASIC DEV TOOLS, GHC, GMP & ZLIB
|
||||
RUN echo "https://s3-us-west-2.amazonaws.com/alpine-ghc/8.0" >> /etc/apk/repositories
|
||||
ADD https://raw.githubusercontent.com/mitchty/alpine-ghc/master/mitch.tishmack%40gmail.com-55881c97.rsa.pub \
|
||||
/etc/apk/keys/mitch.tishmack@gmail.com-55881c97.rsa.pub
|
||||
RUN apk update
|
||||
RUN apk add alpine-sdk git ca-certificates ghc cabal stack zlib-dev \
|
||||
dpkg fakeroot sed gawk grep bash linux-headers
|
||||
RUN stack update
|
||||
RUN apk add alpine-sdk git ca-certificates ghc gmp-dev zlib-dev bash dpkg fakeroot
|
||||
# GRAB A RECENT BINARY OF STACK
|
||||
RUN curl -L https://www.stackage.org/stack/linux-x86_64-static | tar xz --wildcards --strip-components=1 -C /usr/local/bin '*/stack'
|
||||
# COMPRESS WITH UPX
|
||||
ADD https://github.com/lalyos/docker-upx/releases/download/v3.91/upx /usr/local/bin/upx
|
||||
RUN chmod 755 /usr/local/bin/upx
|
||||
RUN ulimit -n 4096
|
||||
RUN stack config set system-ghc --global true
|
||||
RUN stack --resolver lts-9 setup --install-cabal 2.0.1.1
|
||||
#RUN mkdir -p /etc/stack
|
||||
#RUN echo "build: { split-objs: true }" > /etc/stack/config.yaml
|
||||
RUN mkdir -p /usr/src/
|
||||
WORKDIR /usr/src/
|
||||
RUN git clone https://github.com/jgm/pandoc
|
||||
RUN git clone https://github.com/jgm/pandoc
|
||||
WORKDIR /usr/src/pandoc
|
||||
RUN stack install --stack-yaml stack.lts9.yaml \
|
||||
--only-dependencies \
|
||||
|
|
580
man/pandoc.1
580
man/pandoc.1
|
@ -1,5 +1,5 @@
|
|||
.\"t
|
||||
.TH PANDOC 1 "January 18, 2018" "pandoc 2.1.1"
|
||||
.TH PANDOC 1 "April 26, 2018" "pandoc 2.2"
|
||||
.SH NAME
|
||||
pandoc - general markup converter
|
||||
.SH SYNOPSIS
|
||||
|
@ -10,34 +10,21 @@ pandoc - general markup converter
|
|||
Pandoc is a Haskell library for converting from one markup format to
|
||||
another, and a command\-line tool that uses this library.
|
||||
.PP
|
||||
Pandoc can read Markdown, CommonMark, PHP Markdown Extra,
|
||||
GitHub\-Flavored Markdown, MultiMarkdown, and (subsets of) Textile,
|
||||
reStructuredText, HTML, LaTeX, MediaWiki markup, TWiki markup, TikiWiki
|
||||
markup, Creole 1.0, Haddock markup, OPML, Emacs Org mode, DocBook, JATS,
|
||||
Muse, txt2tags, Vimwiki, EPUB, ODT, and Word docx.
|
||||
.PP
|
||||
Pandoc can write plain text, Markdown, CommonMark, PHP Markdown Extra,
|
||||
GitHub\-Flavored Markdown, MultiMarkdown, reStructuredText, XHTML,
|
||||
HTML5, LaTeX (including \f[C]beamer\f[] slide shows), ConTeXt, RTF,
|
||||
OPML, DocBook, JATS, OpenDocument, ODT, Word docx, GNU Texinfo,
|
||||
MediaWiki markup, DokuWiki markup, ZimWiki markup, Haddock markup, EPUB
|
||||
(v2 or v3), FictionBook2, Textile, groff man, groff ms, Emacs Org mode,
|
||||
AsciiDoc, InDesign ICML, TEI Simple, Muse, PowerPoint slide shows and
|
||||
Slidy, Slideous, DZSlides, reveal.js or S5 HTML slide shows.
|
||||
It can also produce PDF output on systems where LaTeX, ConTeXt,
|
||||
\f[C]pdfroff\f[], \f[C]wkhtmltopdf\f[], \f[C]prince\f[], or
|
||||
\f[C]weasyprint\f[] is installed.
|
||||
Pandoc can convert between numerous markup and word processing formats,
|
||||
including, but not limited to, various flavors of Markdown, HTML, LaTeX
|
||||
and Word docx.
|
||||
For the full lists of input and output formats, see the
|
||||
\f[C]\-\-from\f[] and \f[C]\-\-to\f[] options below.
|
||||
Pandoc can also produce PDF output: see creating a PDF, below.
|
||||
.PP
|
||||
Pandoc\[aq]s enhanced version of Markdown includes syntax for tables,
|
||||
definition lists, metadata blocks, \f[C]Div\f[] blocks, footnotes and
|
||||
citations, embedded LaTeX (including math), Markdown inside HTML block
|
||||
elements, and much more.
|
||||
These enhancements, described further under Pandoc\[aq]s Markdown, can
|
||||
be disabled using the \f[C]markdown_strict\f[] format.
|
||||
definition lists, metadata blocks, footnotes, citations, math, and much
|
||||
more.
|
||||
See below under Pandoc\[aq]s Markdown.
|
||||
.PP
|
||||
Pandoc has a modular design: it consists of a set of readers, which
|
||||
parse text in a given format and produce a native representation of the
|
||||
document (like an \f[I]abstract syntax tree\f[] or AST), and a set of
|
||||
document (an \f[I]abstract syntax tree\f[] or AST), and a set of
|
||||
writers, which convert this native representation into a target format.
|
||||
Thus, adding an input or output format requires only adding a reader or
|
||||
writer.
|
||||
|
@ -175,7 +162,8 @@ When using an HTML/CSS\-to\-PDF\-engine, \f[C]\-\-css\f[] affects the
|
|||
output.
|
||||
If \f[C]wkhtmltopdf\f[] is used, then the variables
|
||||
\f[C]margin\-left\f[], \f[C]margin\-right\f[], \f[C]margin\-top\f[],
|
||||
\f[C]margin\-bottom\f[], and \f[C]papersize\f[] will affect the output.
|
||||
\f[C]margin\-bottom\f[], \f[C]footer\-html\f[], \f[C]header\-html\f[]
|
||||
and \f[C]papersize\f[] will affect the output.
|
||||
.PP
|
||||
To debug the PDF creation, it can be useful to look at the intermediate
|
||||
representation: instead of \f[C]\-o\ test.pdf\f[], use for example
|
||||
|
@ -228,69 +216,184 @@ pandoc\ \-f\ html\ \-t\ markdown\ \-\-request\-header\ User\-Agent:"Mozilla/5.0"
|
|||
.TP
|
||||
.B \f[C]\-f\f[] \f[I]FORMAT\f[], \f[C]\-r\f[] \f[I]FORMAT\f[], \f[C]\-\-from=\f[]\f[I]FORMAT\f[], \f[C]\-\-read=\f[]\f[I]FORMAT\f[]
|
||||
Specify input format.
|
||||
\f[I]FORMAT\f[] can be \f[C]native\f[] (native Haskell), \f[C]json\f[]
|
||||
(JSON version of native AST), \f[C]markdown\f[] (pandoc\[aq]s extended
|
||||
Markdown), \f[C]markdown_strict\f[] (original unextended Markdown),
|
||||
\f[C]markdown_phpextra\f[] (PHP Markdown Extra), \f[C]markdown_mmd\f[]
|
||||
(MultiMarkdown), \f[C]gfm\f[] (GitHub\-Flavored Markdown),
|
||||
\f[C]commonmark\f[] (CommonMark Markdown), \f[C]textile\f[] (Textile),
|
||||
\f[C]rst\f[] (reStructuredText), \f[C]html\f[] (HTML), \f[C]docbook\f[]
|
||||
(DocBook), \f[C]t2t\f[] (txt2tags), \f[C]docx\f[] (docx), \f[C]odt\f[]
|
||||
(ODT), \f[C]epub\f[] (EPUB), \f[C]opml\f[] (OPML), \f[C]org\f[] (Emacs
|
||||
Org mode), \f[C]mediawiki\f[] (MediaWiki markup), \f[C]twiki\f[] (TWiki
|
||||
markup), \f[C]tikiwiki\f[] (TikiWiki markup), \f[C]creole\f[] (Creole
|
||||
1.0), \f[C]haddock\f[] (Haddock markup), or \f[C]latex\f[] (LaTeX).
|
||||
(\f[C]markdown_github\f[] provides deprecated and less accurate support
|
||||
for Github\-Flavored Markdown; please use \f[C]gfm\f[] instead, unless
|
||||
you need to use extensions other than \f[C]smart\f[].) Extensions can be
|
||||
individually enabled or disabled by appending \f[C]+EXTENSION\f[] or
|
||||
\f[C]\-EXTENSION\f[] to the format name.
|
||||
\f[I]FORMAT\f[] can be:
|
||||
.RS
|
||||
.IP \[bu] 2
|
||||
\f[C]commonmark\f[] (CommonMark Markdown)
|
||||
.IP \[bu] 2
|
||||
\f[C]creole\f[] (Creole 1.0)
|
||||
.IP \[bu] 2
|
||||
\f[C]docbook\f[] (DocBook)
|
||||
.IP \[bu] 2
|
||||
\f[C]docx\f[] (Word docx)
|
||||
.IP \[bu] 2
|
||||
\f[C]epub\f[] (EPUB)
|
||||
.IP \[bu] 2
|
||||
\f[C]fb2\f[] (FictionBook2 e\-book)
|
||||
.IP \[bu] 2
|
||||
\f[C]gfm\f[] (GitHub\-Flavored Markdown), or \f[C]markdown_github\f[],
|
||||
which provides deprecated and less accurate support for Github\-Flavored
|
||||
Markdown; please use \f[C]gfm\f[] instead, unless you need to use
|
||||
extensions other than \f[C]smart\f[].
|
||||
.IP \[bu] 2
|
||||
\f[C]haddock\f[] (Haddock markup)
|
||||
.IP \[bu] 2
|
||||
\f[C]html\f[] (HTML)
|
||||
.IP \[bu] 2
|
||||
\f[C]jats\f[] (JATS XML)
|
||||
.IP \[bu] 2
|
||||
\f[C]json\f[] (JSON version of native AST)
|
||||
.IP \[bu] 2
|
||||
\f[C]latex\f[] (LaTeX)
|
||||
.IP \[bu] 2
|
||||
\f[C]markdown\f[] (Pandoc\[aq]s Markdown)
|
||||
.IP \[bu] 2
|
||||
\f[C]markdown_mmd\f[] (MultiMarkdown)
|
||||
.IP \[bu] 2
|
||||
\f[C]markdown_phpextra\f[] (PHP Markdown Extra)
|
||||
.IP \[bu] 2
|
||||
\f[C]markdown_strict\f[] (original unextended Markdown)
|
||||
.IP \[bu] 2
|
||||
\f[C]mediawiki\f[] (MediaWiki markup)
|
||||
.IP \[bu] 2
|
||||
\f[C]muse\f[] (Muse)
|
||||
.IP \[bu] 2
|
||||
\f[C]native\f[] (native Haskell)
|
||||
.IP \[bu] 2
|
||||
\f[C]odt\f[] (ODT)
|
||||
.IP \[bu] 2
|
||||
\f[C]opml\f[] (OPML)
|
||||
.IP \[bu] 2
|
||||
\f[C]org\f[] (Emacs Org mode)
|
||||
.IP \[bu] 2
|
||||
\f[C]rst\f[] (reStructuredText)
|
||||
.IP \[bu] 2
|
||||
\f[C]t2t\f[] (txt2tags)
|
||||
.IP \[bu] 2
|
||||
\f[C]textile\f[] (Textile)
|
||||
.IP \[bu] 2
|
||||
\f[C]tikiwiki\f[] (TikiWiki markup)
|
||||
.IP \[bu] 2
|
||||
\f[C]twiki\f[] (TWiki markup)
|
||||
.IP \[bu] 2
|
||||
\f[C]vimwiki\f[] (Vimwiki)
|
||||
.PP
|
||||
Extensions can be individually enabled or disabled by appending
|
||||
\f[C]+EXTENSION\f[] or \f[C]\-EXTENSION\f[] to the format name.
|
||||
See Extensions below, for a list of extensions and their names.
|
||||
See \f[C]\-\-list\-input\-formats\f[] and \f[C]\-\-list\-extensions\f[],
|
||||
below.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \f[C]\-t\f[] \f[I]FORMAT\f[], \f[C]\-w\f[] \f[I]FORMAT\f[], \f[C]\-\-to=\f[]\f[I]FORMAT\f[], \f[C]\-\-write=\f[]\f[I]FORMAT\f[]
|
||||
Specify output format.
|
||||
\f[I]FORMAT\f[] can be \f[C]native\f[] (native Haskell), \f[C]json\f[]
|
||||
(JSON version of native AST), \f[C]plain\f[] (plain text),
|
||||
\f[C]markdown\f[] (pandoc\[aq]s extended Markdown),
|
||||
\f[C]markdown_strict\f[] (original unextended Markdown),
|
||||
\f[C]markdown_phpextra\f[] (PHP Markdown Extra), \f[C]markdown_mmd\f[]
|
||||
(MultiMarkdown), \f[C]gfm\f[] (GitHub\-Flavored Markdown),
|
||||
\f[C]commonmark\f[] (CommonMark Markdown), \f[C]rst\f[]
|
||||
(reStructuredText), \f[C]html4\f[] (XHTML 1.0 Transitional),
|
||||
\f[C]html\f[] or \f[C]html5\f[] (HTML5/XHTML polyglot markup),
|
||||
\f[C]latex\f[] (LaTeX), \f[C]beamer\f[] (LaTeX beamer slide show),
|
||||
\f[C]context\f[] (ConTeXt), \f[C]man\f[] (groff man), \f[C]mediawiki\f[]
|
||||
(MediaWiki markup), \f[C]dokuwiki\f[] (DokuWiki markup),
|
||||
\f[C]zimwiki\f[] (ZimWiki markup), \f[C]textile\f[] (Textile),
|
||||
\f[C]org\f[] (Emacs Org mode), \f[C]texinfo\f[] (GNU Texinfo),
|
||||
\f[C]opml\f[] (OPML), \f[C]docbook\f[] or \f[C]docbook4\f[] (DocBook 4),
|
||||
\f[C]docbook5\f[] (DocBook 5), \f[C]jats\f[] (JATS XML),
|
||||
\f[C]opendocument\f[] (OpenDocument), \f[C]odt\f[] (OpenOffice text
|
||||
document), \f[C]docx\f[] (Word docx), \f[C]haddock\f[] (Haddock markup),
|
||||
\f[C]rtf\f[] (rich text format), \f[C]epub2\f[] (EPUB v2 book),
|
||||
\f[C]epub\f[] or \f[C]epub3\f[] (EPUB v3), \f[C]fb2\f[] (FictionBook2
|
||||
e\-book), \f[C]asciidoc\f[] (AsciiDoc), \f[C]icml\f[] (InDesign ICML),
|
||||
\f[C]tei\f[] (TEI Simple), \f[C]slidy\f[] (Slidy HTML and JavaScript
|
||||
slide show), \f[C]slideous\f[] (Slideous HTML and JavaScript slide
|
||||
show), \f[C]dzslides\f[] (DZSlides HTML5 + JavaScript slide show),
|
||||
\f[C]revealjs\f[] (reveal.js HTML5 + JavaScript slide show), \f[C]s5\f[]
|
||||
(S5 HTML and JavaScript slide show), \f[C]pptx\f[] (PowerPoint slide
|
||||
show) or the path of a custom lua writer (see Custom writers, below).
|
||||
(\f[C]markdown_github\f[] provides deprecated and less accurate support
|
||||
for Github\-Flavored Markdown; please use \f[C]gfm\f[] instead, unless
|
||||
you use extensions that do not work with \f[C]gfm\f[].) Note that
|
||||
\f[C]odt\f[], \f[C]docx\f[], and \f[C]epub\f[] output will not be
|
||||
directed to \f[I]stdout\f[] unless forced with \f[C]\-o\ \-\f[].
|
||||
\f[I]FORMAT\f[] can be:
|
||||
.RS
|
||||
.IP \[bu] 2
|
||||
\f[C]asciidoc\f[] (AsciiDoc)
|
||||
.IP \[bu] 2
|
||||
\f[C]beamer\f[] (LaTeX beamer slide show)
|
||||
.IP \[bu] 2
|
||||
\f[C]commonmark\f[] (CommonMark Markdown)
|
||||
.IP \[bu] 2
|
||||
\f[C]context\f[] (ConTeXt)
|
||||
.IP \[bu] 2
|
||||
\f[C]docbook\f[] or \f[C]docbook4\f[] (DocBook 4)
|
||||
.IP \[bu] 2
|
||||
\f[C]docbook5\f[] (DocBook 5)
|
||||
.IP \[bu] 2
|
||||
\f[C]docx\f[] (Word docx)
|
||||
.IP \[bu] 2
|
||||
\f[C]dokuwiki\f[] (DokuWiki markup)
|
||||
.IP \[bu] 2
|
||||
\f[C]epub\f[] or \f[C]epub3\f[] (EPUB v3 book)
|
||||
.IP \[bu] 2
|
||||
\f[C]epub2\f[] (EPUB v2)
|
||||
.IP \[bu] 2
|
||||
\f[C]fb2\f[] (FictionBook2 e\-book)
|
||||
.IP \[bu] 2
|
||||
\f[C]gfm\f[] (GitHub\-Flavored Markdown), or \f[C]markdown_github\f[],
|
||||
which provides deprecated and less accurate support for Github\-Flavored
|
||||
Markdown; please use \f[C]gfm\f[] instead, unless you use extensions
|
||||
that do not work with \f[C]gfm\f[].
|
||||
.IP \[bu] 2
|
||||
\f[C]haddock\f[] (Haddock markup)
|
||||
.IP \[bu] 2
|
||||
\f[C]html\f[] or \f[C]html5\f[] (HTML, i.e.
|
||||
HTML5/XHTML polyglot markup)
|
||||
.IP \[bu] 2
|
||||
\f[C]html4\f[] (XHTML 1.0 Transitional)
|
||||
.IP \[bu] 2
|
||||
\f[C]icml\f[] (InDesign ICML)
|
||||
.IP \[bu] 2
|
||||
\f[C]jats\f[] (JATS XML)
|
||||
.IP \[bu] 2
|
||||
\f[C]json\f[] (JSON version of native AST)
|
||||
.IP \[bu] 2
|
||||
\f[C]latex\f[] (LaTeX)
|
||||
.IP \[bu] 2
|
||||
\f[C]man\f[] (groff man)
|
||||
.IP \[bu] 2
|
||||
\f[C]markdown\f[] (Pandoc\[aq]s Markdown)
|
||||
.IP \[bu] 2
|
||||
\f[C]markdown_mmd\f[] (MultiMarkdown)
|
||||
.IP \[bu] 2
|
||||
\f[C]markdown_phpextra\f[] (PHP Markdown Extra)
|
||||
.IP \[bu] 2
|
||||
\f[C]markdown_strict\f[] (original unextended Markdown)
|
||||
.IP \[bu] 2
|
||||
\f[C]mediawiki\f[] (MediaWiki markup)
|
||||
.IP \[bu] 2
|
||||
\f[C]ms\f[] (groff ms)
|
||||
.IP \[bu] 2
|
||||
\f[C]muse\f[] (Muse),
|
||||
.IP \[bu] 2
|
||||
\f[C]native\f[] (native Haskell),
|
||||
.IP \[bu] 2
|
||||
\f[C]odt\f[] (OpenOffice text document)
|
||||
.IP \[bu] 2
|
||||
\f[C]opml\f[] (OPML)
|
||||
.IP \[bu] 2
|
||||
\f[C]opendocument\f[] (OpenDocument)
|
||||
.IP \[bu] 2
|
||||
\f[C]org\f[] (Emacs Org mode)
|
||||
.IP \[bu] 2
|
||||
\f[C]plain\f[] (plain text),
|
||||
.IP \[bu] 2
|
||||
\f[C]pptx\f[] (PowerPoint slide show)
|
||||
.IP \[bu] 2
|
||||
\f[C]rst\f[] (reStructuredText)
|
||||
.IP \[bu] 2
|
||||
\f[C]rtf\f[] (Rich Text Format)
|
||||
.IP \[bu] 2
|
||||
\f[C]texinfo\f[] (GNU Texinfo)
|
||||
.IP \[bu] 2
|
||||
\f[C]textile\f[] (Textile)
|
||||
.IP \[bu] 2
|
||||
\f[C]slideous\f[] (Slideous HTML and JavaScript slide show)
|
||||
.IP \[bu] 2
|
||||
\f[C]slidy\f[] (Slidy HTML and JavaScript slide show)
|
||||
.IP \[bu] 2
|
||||
\f[C]dzslides\f[] (DZSlides HTML5 + JavaScript slide show),
|
||||
.IP \[bu] 2
|
||||
\f[C]revealjs\f[] (reveal.js HTML5 + JavaScript slide show)
|
||||
.IP \[bu] 2
|
||||
\f[C]s5\f[] (S5 HTML and JavaScript slide show)
|
||||
.IP \[bu] 2
|
||||
\f[C]tei\f[] (TEI Simple)
|
||||
.IP \[bu] 2
|
||||
\f[C]zimwiki\f[] (ZimWiki markup)
|
||||
.IP \[bu] 2
|
||||
the path of a custom lua writer, see Custom writers below
|
||||
.PP
|
||||
Note that \f[C]odt\f[], \f[C]docx\f[], and \f[C]epub\f[] output will not
|
||||
be directed to \f[I]stdout\f[] unless forced with \f[C]\-o\ \-\f[].
|
||||
.PP
|
||||
Extensions can be individually enabled or disabled by appending
|
||||
\f[C]+EXTENSION\f[] or \f[C]\-EXTENSION\f[] to the format name.
|
||||
See Extensions below, for a list of extensions and their names.
|
||||
See \f[C]\-\-list\-output\-formats\f[] and
|
||||
\f[C]\-\-list\-extensions\f[], below.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \f[C]\-o\f[] \f[I]FILE\f[], \f[C]\-\-output=\f[]\f[I]FILE\f[]
|
||||
|
@ -531,19 +634,29 @@ end
|
|||
return\ {{Str\ =\ expand_hello_world}}
|
||||
\f[]
|
||||
.fi
|
||||
.PP
|
||||
In order of preference, pandoc will look for lua filters in
|
||||
.IP "1." 3
|
||||
a specified full or relative path (executable or non\-executable)
|
||||
.IP "2." 3
|
||||
\f[C]$DATADIR/filters\f[] (executable or non\-executable) where
|
||||
\f[C]$DATADIR\f[] is the user data directory (see
|
||||
\f[C]\-\-data\-dir\f[], above).
|
||||
.RE
|
||||
.TP
|
||||
.B \f[C]\-M\f[] \f[I]KEY\f[][\f[C]=\f[]\f[I]VAL\f[]], \f[C]\-\-metadata=\f[]\f[I]KEY\f[][\f[C]:\f[]\f[I]VAL\f[]]
|
||||
Set the metadata field \f[I]KEY\f[] to the value \f[I]VAL\f[].
|
||||
A value specified on the command line overrides a value specified in the
|
||||
document.
|
||||
document using [YAML metadata
|
||||
blocks][Extension:\f[C]yaml_metadata_block\f[]].
|
||||
Values will be parsed as YAML boolean or string values.
|
||||
If no value is specified, the value will be treated as Boolean true.
|
||||
Like \f[C]\-\-variable\f[], \f[C]\-\-metadata\f[] causes template
|
||||
variables to be set.
|
||||
But unlike \f[C]\-\-variable\f[], \f[C]\-\-metadata\f[] affects the
|
||||
metadata of the underlying document (which is accessible from filters
|
||||
and may be printed in some output formats).
|
||||
and may be printed in some output formats) and metadata values will be
|
||||
escaped when inserted into the template.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
|
@ -821,6 +934,7 @@ line, or when resources used in a document must be downloaded).
|
|||
Produce a standalone HTML file with no external dependencies, using
|
||||
\f[C]data:\f[] URIs to incorporate the contents of linked scripts,
|
||||
stylesheets, images, and videos.
|
||||
Implies \f[C]\-\-standalone\f[].
|
||||
The resulting file should be "self\-contained," in the sense that it
|
||||
needs no external files and no net access to be displayed properly by a
|
||||
browser.
|
||||
|
@ -850,8 +964,9 @@ Use \f[C]<q>\f[] tags for quotes in HTML.
|
|||
.TP
|
||||
.B \f[C]\-\-ascii\f[]
|
||||
Use only ASCII characters in output.
|
||||
Currently supported only for HTML and DocBook output (which uses
|
||||
numerical entities instead of UTF\-8 when this option is selected).
|
||||
Currently supported for XML and HTML formats (which use numerical
|
||||
entities instead of UTF\-8 when this option is selected) and for groff
|
||||
ms and man (which use hexadecimal escapes).
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
|
@ -1044,6 +1159,34 @@ default \f[C]reference.odt\f[]:
|
|||
Then open \f[C]custom\-reference.odt\f[] in LibreOffice, modify the
|
||||
styles as you wish, and save the file.
|
||||
.RE
|
||||
.TP
|
||||
.B PowerPoint
|
||||
Any template included with a recent install of Microsoft PowerPoint
|
||||
(either with \f[C]\&.pptx\f[] or \f[C]\&.potx\f[] extension) should
|
||||
work, as will most templates derived from these.
|
||||
.RS
|
||||
.PP
|
||||
The specific requirement is that the template should contain the
|
||||
following four layouts as its first four layouts:
|
||||
.IP "1." 3
|
||||
Title Slide
|
||||
.IP "2." 3
|
||||
Title and Content
|
||||
.IP "3." 3
|
||||
Section Header
|
||||
.IP "4." 3
|
||||
Two Content
|
||||
.PP
|
||||
All templates included with a recent version of MS PowerPoint will fit
|
||||
these criteria.
|
||||
(You can click on \f[C]Layout\f[] under the \f[C]Home\f[] menu to
|
||||
check.)
|
||||
.PP
|
||||
You can also modify the default \f[C]reference.pptx\f[]: first run
|
||||
\f[C]pandoc\ \-\-print\-default\-data\-file\ reference.pptx\ >\ custom\-reference.pptx\f[],
|
||||
and then modify \f[C]custom\-reference.pptx\f[] in MS PowerPoint (pandoc
|
||||
will use the first four layout slides, as mentioned above).
|
||||
.RE
|
||||
.RE
|
||||
.TP
|
||||
.B \f[C]\-\-epub\-cover\-image=\f[]\f[I]FILE\f[]
|
||||
|
@ -1270,60 +1413,6 @@ inserted.
|
|||
Note that this option does not imply \f[C]\-\-katex\f[].
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \f[C]\-m\f[] [\f[I]URL\f[]], \f[C]\-\-latexmathml\f[][\f[C]=\f[]\f[I]URL\f[]]
|
||||
\f[I]Deprecated.\f[] Use the LaTeXMathML script to display embedded TeX
|
||||
math in HTML output.
|
||||
TeX math will be displayed between \f[C]$\f[] or \f[C]$$\f[] characters
|
||||
and put in \f[C]<span>\f[] tags with class \f[C]LaTeX\f[].
|
||||
The LaTeXMathML JavaScript will then change it to MathML.
|
||||
Note that currently only Firefox and Safari (and select e\-book readers)
|
||||
natively support MathML.
|
||||
To insert a link the \f[C]LaTeXMathML.js\f[] script, provide a
|
||||
\f[I]URL\f[].
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \f[C]\-\-jsmath\f[][\f[C]=\f[]\f[I]URL\f[]]
|
||||
\f[I]Deprecated.\f[] Use jsMath (the predecessor of MathJax) to display
|
||||
embedded TeX math in HTML output.
|
||||
TeX math will be put inside \f[C]<span>\f[] tags (for inline math) or
|
||||
\f[C]<div>\f[] tags (for display math) with class \f[C]math\f[] and
|
||||
rendered by the jsMath script.
|
||||
The \f[I]URL\f[] should point to the script (e.g.
|
||||
\f[C]jsMath/easy/load.js\f[]); if provided, it will be linked to in the
|
||||
header of standalone HTML documents.
|
||||
If a \f[I]URL\f[] is not provided, no link to the jsMath load script
|
||||
will be inserted; it is then up to the author to provide such a link in
|
||||
the HTML template.
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \f[C]\-\-gladtex\f[]
|
||||
\f[I]Deprecated.\f[] Enclose TeX math in \f[C]<eq>\f[] tags in HTML
|
||||
output.
|
||||
The resulting HTML can then be processed by gladTeX to produce images of
|
||||
the typeset formulas and an HTML file with links to these images.
|
||||
So, the procedure is:
|
||||
.RS
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
pandoc\ \-s\ \-\-gladtex\ input.md\ \-o\ myfile.htex
|
||||
gladtex\ \-d\ myfile\-images\ myfile.htex
|
||||
#\ produces\ myfile.html\ and\ images\ in\ myfile\-images
|
||||
\f[]
|
||||
.fi
|
||||
.RE
|
||||
.TP
|
||||
.B \f[C]\-\-mimetex\f[][\f[C]=\f[]\f[I]URL\f[]]
|
||||
\f[I]Deprecated.\f[] Render TeX math using the mimeTeX CGI script, which
|
||||
generates an image for each TeX formula.
|
||||
This should work in all browsers.
|
||||
If \f[I]URL\f[] is not specified, it is assumed that the script is at
|
||||
\f[C]/cgi\-bin/mimetex.cgi\f[].
|
||||
.RS
|
||||
.RE
|
||||
.SS Options for wrapper scripts
|
||||
.TP
|
||||
.B \f[C]\-\-dump\-args\f[]
|
||||
|
@ -1389,23 +1478,25 @@ template.
|
|||
For \f[C]pdf\f[] output, customize the \f[C]default.latex\f[] template
|
||||
(or the \f[C]default.context\f[] template, if you use
|
||||
\f[C]\-t\ context\f[], or the \f[C]default.ms\f[] template, if you use
|
||||
\f[C]\-t\ ms\f[], or the \f[C]default.html5\f[] template, if you use
|
||||
\f[C]\-t\ html5\f[]).
|
||||
\f[C]\-t\ ms\f[], or the \f[C]default.html\f[] template, if you use
|
||||
\f[C]\-t\ html\f[]).
|
||||
.IP \[bu] 2
|
||||
\f[C]docx\f[] has no template (however, you can use
|
||||
\f[C]\-\-reference\-doc\f[] to customize the output).
|
||||
.PP
|
||||
Templates contain \f[I]variables\f[], which allow for the inclusion of
|
||||
arbitrary information at any point in the file.
|
||||
Variables may be set within the document using YAML metadata blocks.
|
||||
They may also be set at the command line using the
|
||||
\f[C]\-V/\-\-variable\f[] option: variables set in this way override
|
||||
metadata fields with the same name.
|
||||
They may be set at the command line using the \f[C]\-V/\-\-variable\f[]
|
||||
option.
|
||||
If a variable is not set, pandoc will look for the key in the
|
||||
document\[aq]s metadata \[en] which can be set using either [YAML
|
||||
metadata blocks][Extension:\f[C]yaml_metadata_block\f[]] or with the
|
||||
\f[C]\-\-metadata\f[] option.
|
||||
.SS Variables set by pandoc
|
||||
.PP
|
||||
Some variables are set automatically by pandoc.
|
||||
These vary somewhat depending on the output format, but include metadata
|
||||
fields as well as the following:
|
||||
These vary somewhat depending on the output format, but include the
|
||||
following:
|
||||
.TP
|
||||
.B \f[C]sourcefile\f[], \f[C]outputfile\f[]
|
||||
source and destination filenames, as given on the command line.
|
||||
|
@ -1648,6 +1739,11 @@ option for document class, e.g.
|
|||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \f[C]beameroption\f[]
|
||||
In beamer, add extra beamer option with \f[C]\\setbeameroption{}\f[]
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \f[C]geometry\f[]
|
||||
option for \f[C]geometry\f[] package, e.g.
|
||||
\f[C]margin=1in\f[]; may be repeated for multiple options
|
||||
|
@ -1891,6 +1987,16 @@ include table of contents (can also be set using
|
|||
include list of figures, list of tables
|
||||
.RS
|
||||
.RE
|
||||
.TP
|
||||
.B \f[C]pdfa\f[]
|
||||
adds to the preamble the setup necessary to generate PDF/A\-1b:2005.
|
||||
To successfully generate PDF/A the required ICC color profiles have to
|
||||
be available and the content and all included files (such as images)
|
||||
have to be standard conforming.
|
||||
The ICC profiles can be obtained from ConTeXt ICC Profiles.
|
||||
See also ConTeXt PDFA for more details.
|
||||
.RS
|
||||
.RE
|
||||
.SS Variables for man pages
|
||||
.TP
|
||||
.B \f[C]section\f[]
|
||||
|
@ -2319,7 +2425,7 @@ pandoc\ \-f\ markdown+lhs\ \-t\ html+lhs
|
|||
writes HTML with the Haskell code in bird tracks, so it can be copied
|
||||
and pasted as literate Haskell source.
|
||||
.PP
|
||||
Note that GHC expects the bird tracks in the first column, so indentend
|
||||
Note that GHC expects the bird tracks in the first column, so indented
|
||||
literate code blocks (e.g.
|
||||
inside an itemized environment) will not be picked up by the Haskell
|
||||
compiler.
|
||||
|
@ -2337,8 +2443,19 @@ This extension can be enabled/disabled for the following formats:
|
|||
.RE
|
||||
.TP
|
||||
.B output formats
|
||||
\f[C]markdown\f[], \f[C]docx\f[], \f[C]odt\f[], \f[C]opendocument\f[],
|
||||
\f[C]html\f[]
|
||||
\f[C]docx\f[], \f[C]odt\f[], \f[C]opendocument\f[], \f[C]html\f[]
|
||||
.RS
|
||||
.RE
|
||||
.SS Extension: \f[C]styles\f[]
|
||||
.PP
|
||||
Read all docx styles as divs (for paragraph styles) and spans (for
|
||||
character styles) regardless of whether pandoc understands the meaning
|
||||
of these styles.
|
||||
This can be used with docx custom styles.
|
||||
Disabled by default.
|
||||
.TP
|
||||
.B input formats
|
||||
\f[C]docx\f[]
|
||||
.RS
|
||||
.RE
|
||||
.SS Extension: \f[C]amuse\f[]
|
||||
|
@ -2741,7 +2858,7 @@ Here \f[C]mycode\f[] is an identifier, \f[C]haskell\f[] and
|
|||
with value \f[C]100\f[].
|
||||
Some output formats can use this information to do syntax highlighting.
|
||||
Currently, the only output formats that uses this information are HTML,
|
||||
LaTeX, Docx, and Ms.
|
||||
LaTeX, Docx, Ms, and PowerPoint.
|
||||
If highlighting is supported for your output format and language, then
|
||||
the code block above will appear highlighted, with numbered lines.
|
||||
(To see which languages are supported, type
|
||||
|
@ -3720,7 +3837,8 @@ For example:
|
|||
.nf
|
||||
\f[C]
|
||||
header\-includes:
|
||||
\-\ ```{=latex}
|
||||
\-\ |
|
||||
\ \ ```{=latex}
|
||||
\ \ \\let\\oldsection\\section
|
||||
\ \ \\renewcommand{\\section}[1]{\\clearpage\\oldsection{#1}}
|
||||
\ \ ```
|
||||
|
@ -4142,9 +4260,29 @@ This\ is\ `<a>html</a>`{=html}
|
|||
\f[]
|
||||
.fi
|
||||
.PP
|
||||
This can be useful to insert raw xml into \f[C]docx\f[] documents, e.g.
|
||||
a pagebreak:
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
```{=openxml}
|
||||
<w:p>
|
||||
\ \ <w:r>
|
||||
\ \ \ \ <w:br\ w:type="page"/>
|
||||
\ \ </w:r>
|
||||
</w:p>
|
||||
```
|
||||
\f[]
|
||||
.fi
|
||||
.PP
|
||||
The format name should match the target format name (see
|
||||
\f[C]\-t/\-\-to\f[], above, for a list, or use
|
||||
\f[C]pandoc\ \-\-list\-output\-formats\f[]).
|
||||
Use \f[C]openxml\f[] for \f[C]docx\f[] output, \f[C]opendocument\f[] for
|
||||
\f[C]odt\f[] output, \f[C]html5\f[] for \f[C]epub3\f[] output,
|
||||
\f[C]html4\f[] for \f[C]epub2\f[] output, and \f[C]latex\f[],
|
||||
\f[C]beamer\f[], \f[C]ms\f[], or \f[C]html5\f[] for \f[C]pdf\f[] output
|
||||
(depending on what you use for \f[C]\-\-pdf\-engine\f[]).
|
||||
.PP
|
||||
This extension presupposes that the relevant kind of inline code or
|
||||
fenced code block is enabled.
|
||||
|
@ -5204,9 +5342,42 @@ reveal.js.
|
|||
By default, these writers produce lists that display "all at once." If
|
||||
you want your lists to display incrementally (one item at a time), use
|
||||
the \f[C]\-i\f[] option.
|
||||
If you want a particular list to depart from the default (that is, to
|
||||
If you want a particular list to depart from the default, put it in a
|
||||
\f[C]div\f[] block with class \f[C]incremental\f[] or
|
||||
\f[C]nonincremental\f[].
|
||||
So, for example, using the \f[C]fenced\ div\f[] syntax, the following
|
||||
would be incremental regardless of the document default:
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
:::\ incremental
|
||||
|
||||
\-\ Eat\ spaghetti
|
||||
\-\ Drink\ wine
|
||||
|
||||
:::
|
||||
\f[]
|
||||
.fi
|
||||
.PP
|
||||
or
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
:::\ nonincremental
|
||||
|
||||
\-\ Eat\ spaghetti
|
||||
\-\ Drink\ wine
|
||||
|
||||
:::
|
||||
\f[]
|
||||
.fi
|
||||
.PP
|
||||
While using \f[C]incremental\f[] and \f[C]nonincremental\f[] divs are
|
||||
the recommended method of setting incremental lists on a per\-case
|
||||
basis, an older method is also supported: putting lists inside a
|
||||
blockquote will depart from the document default (that is, it will
|
||||
display incrementally without the \f[C]\-i\f[] option and all at once
|
||||
with the \f[C]\-i\f[] option), put it in a block quote:
|
||||
with the \f[C]\-i\f[] option):
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
|
@ -5215,7 +5386,7 @@ with the \f[C]\-i\f[] option), put it in a block quote:
|
|||
\f[]
|
||||
.fi
|
||||
.PP
|
||||
In this way incremental and nonincremental lists can be mixed in a
|
||||
Both methods allow incremental and nonincremental lists to be mixed in a
|
||||
single document.
|
||||
.SS Inserting pauses
|
||||
.PP
|
||||
|
@ -5286,7 +5457,7 @@ This is recommended especially for bibliographies:
|
|||
.fi
|
||||
.SS Speaker notes
|
||||
.PP
|
||||
reveal.js has good support for speaker notes.
|
||||
Speaker notes are supported in reveal.js and PowerPoint (pptx) output.
|
||||
You can add notes to your Markdown document thus:
|
||||
.IP
|
||||
.nf
|
||||
|
@ -5302,8 +5473,11 @@ This\ is\ my\ note.
|
|||
\f[]
|
||||
.fi
|
||||
.PP
|
||||
To show the notes window, press \f[C]s\f[] while viewing the
|
||||
presentation.
|
||||
To show the notes window in reveal.js, press \f[C]s\f[] while viewing
|
||||
the presentation.
|
||||
Speaker notes in PowerPoint will be available, as usual, in handouts and
|
||||
presenter view.
|
||||
.PP
|
||||
Notes are not yet supported for other slide formats, but the notes will
|
||||
not appear on the slides themselves.
|
||||
.SS Columns
|
||||
|
@ -5342,6 +5516,49 @@ All of the other frame attributes described in Section 8.1 of the Beamer
|
|||
User\[aq]s Guide may also be used: \f[C]allowdisplaybreaks\f[],
|
||||
\f[C]allowframebreaks\f[], \f[C]b\f[], \f[C]c\f[], \f[C]t\f[],
|
||||
\f[C]environment\f[], \f[C]label\f[], \f[C]plain\f[], \f[C]shrink\f[].
|
||||
.SS Background in reveal.js
|
||||
.PP
|
||||
Background images can be added to self\-contained reveal.js slideshows.
|
||||
.PP
|
||||
For the same image on every slide, use the reveal.js configuration
|
||||
option \f[C]parallaxBackgroundImage\f[] either in the YAML metadata
|
||||
block or as a command\-line variable.
|
||||
You can also set \f[C]parallaxBackgroundHorizontal\f[] and
|
||||
\f[C]parallaxBackgroundVertical\f[] the same way and must also set
|
||||
\f[C]parallaxBackgroundSize\f[] to have your values take effect.
|
||||
.PP
|
||||
To set an image for a particular slide, add
|
||||
\f[C]{data\-background\-image="/path/to/image"}\f[] to the first
|
||||
slide\-level header on the slide (which may even be empty).
|
||||
.PP
|
||||
In reveal.js\[aq]s overview mode, the parallaxBackgroundImage will show
|
||||
up only on the first slide.
|
||||
.PP
|
||||
Other background settings also work on individual slides, including
|
||||
\f[C]data\-background\-size\f[], \f[C]data\-background\-repeat\f[],
|
||||
\f[C]data\-background\-color\f[], \f[C]data\-transition\f[], and
|
||||
\f[C]data\-transition\-speed\f[].
|
||||
.PP
|
||||
See the reveal.js documentation for more details.
|
||||
.PP
|
||||
For example:
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
\-\-\-
|
||||
title:\ My\ Slideshow
|
||||
parallaxBackgroundImage:\ /path/to/my/background_image.png
|
||||
\-\-\-
|
||||
|
||||
##\ Slide\ One
|
||||
|
||||
Slide\ 1\ has\ background_image.png\ as\ its\ background.
|
||||
|
||||
##\ {data\-background\-image="/path/to/special_image.jpg"}
|
||||
|
||||
Slide\ 2\ has\ a\ special\ image\ for\ its\ background,\ even\ though\ the\ header\ has\ no\ content.
|
||||
\f[]
|
||||
.fi
|
||||
.SH CREATING EPUBS WITH PANDOC
|
||||
.SS EPUB Metadata
|
||||
.PP
|
||||
|
@ -5532,7 +5749,62 @@ To see a list of highlight styles, type
|
|||
\f[C]pandoc\ \-\-list\-highlight\-styles\f[].
|
||||
.PP
|
||||
To disable highlighting, use the \f[C]\-\-no\-highlight\f[] option.
|
||||
.SH CUSTOM STYLES IN DOCX OUTPUT
|
||||
.SH CUSTOM STYLES IN DOCX
|
||||
.SS Input
|
||||
.PP
|
||||
The docx reader, by default, only reads those styles that it can convert
|
||||
into pandoc elements, either by direct conversion or interpreting the
|
||||
derivation of the input document\[aq]s styles.
|
||||
.PP
|
||||
By enabling the \f[C]styles\f[] extension in the docx reader
|
||||
(\f[C]\-f\ docx+styles\f[]), you can produce output that maintains the
|
||||
styles of the input document, using the \f[C]custom\-style\f[] class.
|
||||
Paragraph styles are interpreted as divs, while character styles are
|
||||
interpreted as spans.
|
||||
.PP
|
||||
For example, using the \f[C]custom\-style\-reference.docx\f[] file in
|
||||
the test directory, we have the following different outputs:
|
||||
.PP
|
||||
Without the \f[C]+styles\f[] extension:
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
$\ pandoc\ test/docx/custom\-style\-reference.docx\ \-f\ docx\ \-t\ markdown
|
||||
This\ is\ some\ text.
|
||||
|
||||
This\ is\ text\ with\ an\ *emphasized*\ text\ style.\ And\ this\ is\ text\ with\ a
|
||||
**strengthened**\ text\ style.
|
||||
|
||||
>\ Here\ is\ a\ styled\ paragraph\ that\ inherits\ from\ Block\ Text.
|
||||
\f[]
|
||||
.fi
|
||||
.PP
|
||||
And with the extension:
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
$\ pandoc\ test/docx/custom\-style\-reference.docx\ \-f\ docx+styles\ \-t\ markdown
|
||||
|
||||
:::\ {custom\-style="FirstParagraph"}
|
||||
This\ is\ some\ text.
|
||||
:::
|
||||
|
||||
:::\ {custom\-style="BodyText"}
|
||||
This\ is\ text\ with\ an\ [emphasized]{custom\-style="Emphatic"}\ text\ style.
|
||||
And\ this\ is\ text\ with\ a\ [strengthened]{custom\-style="Strengthened"}
|
||||
text\ style.
|
||||
:::
|
||||
|
||||
:::\ {custom\-style="MyBlockStyle"}
|
||||
>\ Here\ is\ a\ styled\ paragraph\ that\ inherits\ from\ Block\ Text.
|
||||
:::
|
||||
\f[]
|
||||
.fi
|
||||
.PP
|
||||
With these custom styles, you can use your input document as a
|
||||
reference\-doc while creating docx output (see below), and maintain the
|
||||
same styles in your input and output files.
|
||||
.SS Output
|
||||
.PP
|
||||
By default, pandoc\[aq]s docx output applies a predefined set of styles
|
||||
for blocks such as paragraphs and block quotes, and uses largely default
|
||||
|
|
159
pandoc.cabal
159
pandoc.cabal
|
@ -1,17 +1,17 @@
|
|||
name: pandoc
|
||||
version: 2.1.1
|
||||
version: 2.2
|
||||
cabal-version: >= 1.10
|
||||
build-type: Custom
|
||||
license: GPL
|
||||
license: GPL-2
|
||||
license-file: COPYING.md
|
||||
copyright: (c) 2006-2018 John MacFarlane
|
||||
author: John MacFarlane <jgm@berkeley.edu>
|
||||
maintainer: John MacFarlane <jgm@berkeley.edu>
|
||||
bug-reports: https://github.com/jgm/pandoc/issues
|
||||
stability: alpha
|
||||
homepage: http://pandoc.org
|
||||
homepage: https://pandoc.org
|
||||
category: Text
|
||||
tested-with: GHC == 7.10.3, GHC == 8.0.2, GHC == 8.2.2
|
||||
tested-with: GHC == 7.10.3, GHC == 8.0.2, GHC == 8.2.2, GHC == 8.4.1
|
||||
synopsis: Conversion between markup formats
|
||||
description: Pandoc is a Haskell library for converting from one markup
|
||||
format to another, and a command-line tool that uses
|
||||
|
@ -19,11 +19,11 @@ description: Pandoc is a Haskell library for converting from one markup
|
|||
(subsets of) HTML, reStructuredText, LaTeX, DocBook, JATS,
|
||||
MediaWiki markup, TWiki markup, TikiWiki markup, Creole 1.0,
|
||||
Haddock markup, OPML, Emacs Org-Mode, Emacs Muse, txt2tags,
|
||||
Vimwiki, Word Docx, ODT, and Textile, and it can write
|
||||
Markdown, reStructuredText, XHTML, HTML 5, LaTeX, ConTeXt,
|
||||
DocBook, JATS, OPML, TEI, OpenDocument, ODT, Word docx,
|
||||
RTF, MediaWiki, DokuWiki, ZimWiki, Textile, groff man,
|
||||
groff ms, plain text, Emacs Org-Mode, AsciiDoc,
|
||||
Vimwiki, Word Docx, ODT, EPUB, FictionBook2, and Textile,
|
||||
and it can write Markdown, reStructuredText, XHTML, HTML 5,
|
||||
LaTeX, ConTeXt, DocBook, JATS, OPML, TEI, OpenDocument,
|
||||
ODT, Word docx, RTF, MediaWiki, DokuWiki, ZimWiki, Textile,
|
||||
groff man, groff ms, plain text, Emacs Org-Mode, AsciiDoc,
|
||||
Haddock markup, EPUB (v2 and v3), FictionBook2, InDesign
|
||||
ICML, Muse, LaTeX beamer slides, PowerPoint, and several
|
||||
kinds of HTML/JavaScript slide shows (S5, Slidy, Slideous,
|
||||
|
@ -102,7 +102,6 @@ data-files:
|
|||
data/odt/META-INF/manifest.xml
|
||||
-- source files for reference.pptx
|
||||
data/pptx/_rels/.rels
|
||||
data/pptx/docProps/thumbnail.jpeg
|
||||
data/pptx/docProps/app.xml
|
||||
data/pptx/docProps/core.xml
|
||||
data/pptx/ppt/slideLayouts/_rels/slideLayout1.xml.rels
|
||||
|
@ -149,8 +148,6 @@ data-files:
|
|||
data/pptx/[Content_Types].xml
|
||||
-- stylesheet for EPUB writer
|
||||
data/epub.css
|
||||
-- data for LaTeXMathML writer
|
||||
data/LaTeXMathML.js
|
||||
-- data for dzslides writer
|
||||
data/dzslides/template.html
|
||||
-- default abbreviations file
|
||||
|
@ -304,6 +301,8 @@ extra-source-files:
|
|||
test/fb2/images-embedded.html
|
||||
test/fb2/images-embedded.fb2
|
||||
test/fb2/test-small.png
|
||||
test/fb2/reader/*.fb2
|
||||
test/fb2/reader/*.native
|
||||
test/fb2/test.jpg
|
||||
test/docx/*.docx
|
||||
test/docx/golden/*.docx
|
||||
|
@ -335,18 +334,10 @@ flag trypandoc
|
|||
Description: Build trypandoc cgi executable.
|
||||
Default: False
|
||||
|
||||
flag weigh-pandoc
|
||||
Description: Build weigh-pandoc to measure memory usage.
|
||||
Default: False
|
||||
|
||||
flag network-uri
|
||||
Description: Get Network.URI from the network-uri package
|
||||
Default: True
|
||||
|
||||
flag old-locale
|
||||
Description: Use old-locale and time < 1.5
|
||||
Default: False
|
||||
|
||||
custom-setup
|
||||
setup-depends: base, Cabal
|
||||
|
||||
|
@ -357,12 +348,13 @@ library
|
|||
unordered-containers >= 0.2 && < 0.3,
|
||||
parsec >= 3.1 && < 3.2,
|
||||
mtl >= 2.2 && < 2.3,
|
||||
exceptions >= 0.8 && < 0.9,
|
||||
exceptions >= 0.8 && < 0.11,
|
||||
filepath >= 1.1 && < 1.5,
|
||||
process >= 1.2.3 && < 1.7,
|
||||
directory >= 1 && < 1.4,
|
||||
bytestring >= 0.9 && < 0.11,
|
||||
text >= 0.11 && < 1.3,
|
||||
time >= 1.5 && < 1.10,
|
||||
safe >= 0.3 && < 0.4,
|
||||
zip-archive >= 0.2.3.4 && < 0.4,
|
||||
HTTP >= 4000.0.5 && < 4000.4,
|
||||
|
@ -370,25 +362,25 @@ library
|
|||
xml >= 1.3.12 && < 1.4,
|
||||
split >= 0.2 && < 0.3,
|
||||
random >= 1 && < 1.2,
|
||||
pandoc-types >= 1.17.3 && < 1.18,
|
||||
aeson >= 0.7 && < 1.3,
|
||||
pandoc-types >= 1.17.4.2 && < 1.18,
|
||||
aeson >= 0.7 && < 1.4,
|
||||
aeson-pretty >= 0.8.5 && < 0.9,
|
||||
tagsoup >= 0.14.3 && < 0.15,
|
||||
tagsoup >= 0.14.6 && < 0.15,
|
||||
base64-bytestring >= 0.1 && < 1.1,
|
||||
zlib >= 0.5 && < 0.7,
|
||||
skylighting >= 0.5.1 && < 0.7,
|
||||
skylighting >= 0.5.1 && < 0.8,
|
||||
data-default >= 0.4 && < 0.8,
|
||||
temporary >= 1.1 && < 1.3,
|
||||
temporary >= 1.1 && < 1.4,
|
||||
blaze-html >= 0.9 && < 0.10,
|
||||
blaze-markup >= 0.8 && < 0.9,
|
||||
yaml >= 0.8.8.2 && < 0.9,
|
||||
scientific >= 0.2 && < 0.4,
|
||||
vector >= 0.10 && < 0.13,
|
||||
hslua >= 0.9.5 && < 0.10,
|
||||
hslua >= 0.9.5 && < 0.9.6,
|
||||
hslua-module-text >= 0.1.2 && < 0.2,
|
||||
binary >= 0.5 && < 0.10,
|
||||
SHA >= 1.6 && < 1.7,
|
||||
haddock-library >= 1.1 && < 1.5,
|
||||
haddock-library >= 1.1 && < 1.6,
|
||||
deepseq >= 1.3 && < 1.5,
|
||||
JuicyPixels >= 3.1.6.1 && < 3.3,
|
||||
Glob >= 0.7 && < 0.10,
|
||||
|
@ -396,17 +388,18 @@ library
|
|||
doctemplates >= 0.2.1 && < 0.3,
|
||||
http-client >= 0.4.30 && < 0.6,
|
||||
http-client-tls >= 0.2.4 && < 0.4,
|
||||
http-types >= 0.8 && < 0.12,
|
||||
http-types >= 0.8 && < 0.13,
|
||||
case-insensitive >= 1.2 && < 1.3
|
||||
if impl(ghc < 8.0)
|
||||
build-depends: semigroups == 0.18.*
|
||||
if impl(ghc < 8.4)
|
||||
hs-source-dirs: prelude
|
||||
other-modules: Prelude
|
||||
build-depends: base-compat >= 0.9
|
||||
if os(windows)
|
||||
cpp-options: -D_WINDOWS
|
||||
else
|
||||
build-depends: unix >= 2.4 && < 2.8
|
||||
if flag(old-locale)
|
||||
build-depends: old-locale >= 1 && < 1.1,
|
||||
time >= 1.2 && < 1.5
|
||||
else
|
||||
build-depends: time >= 1.5 && < 1.9
|
||||
if flag(network-uri)
|
||||
build-depends: network-uri >= 2.6 && < 2.7, network >= 2.6
|
||||
else
|
||||
|
@ -418,12 +411,8 @@ library
|
|||
if os(windows)
|
||||
cpp-options: -D_WINDOWS
|
||||
ghc-options: -Wall -fno-warn-unused-do-bind
|
||||
ghc-prof-options: -fprof-auto-exported
|
||||
default-language: Haskell98
|
||||
other-extensions: PatternGuards, OverloadedStrings,
|
||||
ScopedTypeVariables, GeneralizedNewtypeDeriving,
|
||||
RelaxedPolyRec, DeriveDataTypeable, TypeSynonymInstances,
|
||||
FlexibleInstances
|
||||
default-language: Haskell2010
|
||||
other-extensions: NoImplicitPrelude
|
||||
hs-source-dirs: src
|
||||
|
||||
exposed-modules: Text.Pandoc,
|
||||
|
@ -459,6 +448,7 @@ library
|
|||
Text.Pandoc.Readers.EPUB,
|
||||
Text.Pandoc.Readers.Muse,
|
||||
Text.Pandoc.Readers.Man,
|
||||
Text.Pandoc.Readers.FB2,
|
||||
Text.Pandoc.Writers,
|
||||
Text.Pandoc.Writers.Native,
|
||||
Text.Pandoc.Writers.Docbook,
|
||||
|
@ -555,22 +545,23 @@ library
|
|||
Text.Pandoc.UUID,
|
||||
Text.Pandoc.Translations,
|
||||
Text.Pandoc.Slides,
|
||||
Text.Pandoc.Compat.Time,
|
||||
Paths_pandoc
|
||||
|
||||
buildable: True
|
||||
|
||||
executable pandoc
|
||||
build-depends: pandoc, base >= 4.7 && < 5
|
||||
if impl(ghc < 8.0)
|
||||
build-depends: semigroups == 0.18.*
|
||||
if impl(ghc < 8.4)
|
||||
hs-source-dirs: prelude
|
||||
other-modules: Prelude
|
||||
build-depends: base-compat >= 0.9
|
||||
ghc-options: -rtsopts -with-rtsopts=-K16m -Wall -fno-warn-unused-do-bind -threaded
|
||||
ghc-prof-options: -fprof-auto-exported -rtsopts -with-rtsopts=-K16m
|
||||
if flag(static)
|
||||
ld-options: -static
|
||||
default-language: Haskell98
|
||||
other-extensions: PatternGuards, OverloadedStrings,
|
||||
ScopedTypeVariables, GeneralizedNewtypeDeriving,
|
||||
RelaxedPolyRec, DeriveDataTypeable, TypeSynonymInstances,
|
||||
FlexibleInstances
|
||||
default-language: Haskell2010
|
||||
other-extensions: NoImplicitPrelude
|
||||
hs-source-dirs: .
|
||||
main-is: pandoc.hs
|
||||
buildable: True
|
||||
|
@ -580,27 +571,38 @@ executable trypandoc
|
|||
main-is: trypandoc.hs
|
||||
hs-source-dirs: trypandoc
|
||||
default-language: Haskell2010
|
||||
other-extensions: NoImplicitPrelude
|
||||
if flag(trypandoc)
|
||||
build-depends: base, aeson, pandoc,
|
||||
text, wai-extra, wai >= 0.3, http-types
|
||||
buildable: True
|
||||
else
|
||||
buildable: False
|
||||
if impl(ghc < 8.0)
|
||||
build-depends: semigroups == 0.18.*
|
||||
if impl(ghc < 8.4)
|
||||
hs-source-dirs: prelude
|
||||
other-modules: Prelude
|
||||
build-depends: base-compat >= 0.9
|
||||
|
||||
executable weigh-pandoc
|
||||
benchmark weigh-pandoc
|
||||
type: exitcode-stdio-1.0
|
||||
main-is: weigh-pandoc.hs
|
||||
hs-source-dirs: benchmark
|
||||
if flag(weigh-pandoc)
|
||||
build-depends: pandoc,
|
||||
base >= 4.2 && < 5,
|
||||
text,
|
||||
weigh >= 0.0 && < 0.1,
|
||||
mtl >= 2.2 && < 2.3
|
||||
buildable: True
|
||||
else
|
||||
buildable: False
|
||||
ghc-options: -rtsopts -Wall -fno-warn-unused-do-bind
|
||||
default-language: Haskell98
|
||||
build-depends: pandoc,
|
||||
base >= 4.2 && < 5,
|
||||
text,
|
||||
weigh >= 0.0 && < 0.1,
|
||||
mtl >= 2.2 && < 2.3
|
||||
if impl(ghc < 8.0)
|
||||
build-depends: semigroups == 0.18.*
|
||||
if impl(ghc < 8.4)
|
||||
hs-source-dirs: prelude
|
||||
other-modules: Prelude
|
||||
build-depends: base-compat >= 0.9
|
||||
ghc-options: -rtsopts -Wall -fno-warn-unused-do-bind -threaded
|
||||
default-language: Haskell2010
|
||||
other-extensions: NoImplicitPrelude
|
||||
|
||||
test-suite test-pandoc
|
||||
type: exitcode-stdio-1.0
|
||||
|
@ -608,20 +610,20 @@ test-suite test-pandoc
|
|||
hs-source-dirs: test
|
||||
build-depends: base >= 4.2 && < 5,
|
||||
pandoc,
|
||||
pandoc-types >= 1.17.3 && < 1.18,
|
||||
pandoc-types >= 1.17.4.2 && < 1.18,
|
||||
bytestring >= 0.9 && < 0.11,
|
||||
base64-bytestring >= 0.1 && < 1.1,
|
||||
text >= 0.11 && < 1.3,
|
||||
time >= 1.5 && < 1.9,
|
||||
time >= 1.5 && < 1.10,
|
||||
directory >= 1 && < 1.4,
|
||||
filepath >= 1.1 && < 1.5,
|
||||
hslua >= 0.9 && < 0.10,
|
||||
hslua >= 0.9.5 && < 0.9.6,
|
||||
process >= 1.2.3 && < 1.7,
|
||||
temporary >= 1.1 && < 1.3,
|
||||
temporary >= 1.1 && < 1.4,
|
||||
Diff >= 0.2 && < 0.4,
|
||||
tasty >= 0.11 && < 1.1,
|
||||
tasty-hunit >= 0.9 && < 0.11,
|
||||
tasty-quickcheck >= 0.8 && < 0.10,
|
||||
tasty-quickcheck >= 0.8 && < 0.11,
|
||||
tasty-golden >= 2.3 && < 2.4,
|
||||
QuickCheck >= 2.4 && < 2.12,
|
||||
containers >= 0.4.2.1 && < 0.6,
|
||||
|
@ -629,11 +631,12 @@ test-suite test-pandoc
|
|||
zip-archive >= 0.2.3.4 && < 0.4,
|
||||
xml >= 1.3.12 && < 1.4,
|
||||
Glob >= 0.7 && < 0.10
|
||||
if flag(old-locale)
|
||||
build-depends: old-locale >= 1 && < 1.1,
|
||||
time >= 1.2 && < 1.5
|
||||
else
|
||||
build-depends: time >= 1.5 && < 1.9
|
||||
if impl(ghc < 8.0)
|
||||
build-depends: semigroups == 0.18.*
|
||||
if impl(ghc < 8.4)
|
||||
hs-source-dirs: prelude
|
||||
other-modules: Prelude
|
||||
build-depends: base-compat >= 0.9
|
||||
other-modules: Tests.Old
|
||||
Tests.Command
|
||||
Tests.Helpers
|
||||
|
@ -652,6 +655,7 @@ test-suite test-pandoc
|
|||
Tests.Readers.Org.Block.Table
|
||||
Tests.Readers.Org.Directive
|
||||
Tests.Readers.Org.Inline
|
||||
Tests.Readers.Org.Inline.Citation
|
||||
Tests.Readers.Org.Inline.Note
|
||||
Tests.Readers.Org.Inline.Smart
|
||||
Tests.Readers.Org.Meta
|
||||
|
@ -664,6 +668,7 @@ test-suite test-pandoc
|
|||
Tests.Readers.Muse
|
||||
Tests.Readers.Creole
|
||||
Tests.Readers.Man
|
||||
Tests.Readers.FB2
|
||||
Tests.Writers.Native
|
||||
Tests.Writers.ConTeXt
|
||||
Tests.Writers.Docbook
|
||||
|
@ -682,7 +687,8 @@ test-suite test-pandoc
|
|||
Tests.Writers.Powerpoint
|
||||
Tests.Writers.OOXML
|
||||
ghc-options: -rtsopts -Wall -fno-warn-unused-do-bind -threaded
|
||||
default-language: Haskell98
|
||||
default-language: Haskell2010
|
||||
other-extensions: NoImplicitPrelude
|
||||
|
||||
benchmark benchmark-pandoc
|
||||
type: exitcode-stdio-1.0
|
||||
|
@ -692,6 +698,13 @@ benchmark benchmark-pandoc
|
|||
time, bytestring, containers,
|
||||
base >= 4.2 && < 5,
|
||||
text >= 0.11 && < 1.3,
|
||||
criterion >= 1.0 && < 1.4
|
||||
ghc-options: -rtsopts -Wall -fno-warn-unused-do-bind
|
||||
default-language: Haskell98
|
||||
criterion >= 1.0 && < 1.5
|
||||
if impl(ghc < 8.0)
|
||||
build-depends: semigroups == 0.18.*
|
||||
if impl(ghc < 8.4)
|
||||
hs-source-dirs: prelude
|
||||
other-modules: Prelude
|
||||
build-depends: base-compat >= 0.9
|
||||
ghc-options: -rtsopts -Wall -fno-warn-unused-do-bind -threaded
|
||||
default-language: Haskell2010
|
||||
other-extensions: NoImplicitPrelude
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2006-2018 John MacFarlane <jgm@berkeley.edu>
|
||||
|
||||
|
@ -29,6 +30,7 @@ Parses command-line options and calls the appropriate readers and
|
|||
writers.
|
||||
-}
|
||||
module Main where
|
||||
import Prelude
|
||||
import qualified Control.Exception as E
|
||||
import Text.Pandoc.App (convertWithOpts, defaultOpts, options, parseOptions)
|
||||
import Text.Pandoc.Error (handleError)
|
||||
|
|
17
prelude/Prelude.hs
Normal file
17
prelude/Prelude.hs
Normal file
|
@ -0,0 +1,17 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE PackageImports #-}
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
-- The intent is that this Prelude provide the API of
|
||||
-- the base 4.11 Prelude in a way that is portable for
|
||||
-- all base versions.
|
||||
|
||||
module Prelude
|
||||
(
|
||||
module Prelude.Compat
|
||||
, Semigroup(..)
|
||||
)
|
||||
where
|
||||
|
||||
import Prelude.Compat
|
||||
import Data.Semigroup (Semigroup(..)) -- includes (<>)
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE CPP #-}
|
||||
{-# LANGUAGE DeriveGeneric #-}
|
||||
{-# LANGUAGE ScopedTypeVariables #-}
|
||||
|
@ -42,6 +43,7 @@ module Text.Pandoc.App (
|
|||
, options
|
||||
, applyFilters
|
||||
) where
|
||||
import Prelude
|
||||
import qualified Control.Exception as E
|
||||
import Control.Monad
|
||||
import Control.Monad.Except (catchError, throwError)
|
||||
|
@ -50,11 +52,10 @@ import Data.Aeson (defaultOptions)
|
|||
import Data.Aeson.TH (deriveJSON)
|
||||
import qualified Data.ByteString as BS
|
||||
import qualified Data.ByteString.Lazy as B
|
||||
import Data.Char (toLower, toUpper)
|
||||
import Data.Char (toLower, toUpper, isAscii, ord)
|
||||
import Data.List (find, intercalate, isPrefixOf, isSuffixOf, sort)
|
||||
import qualified Data.Map as M
|
||||
import Data.Maybe (fromMaybe, isJust, isNothing)
|
||||
import Data.Monoid
|
||||
import qualified Data.Set as Set
|
||||
import Data.Text (Text)
|
||||
import qualified Data.Text as T
|
||||
|
@ -65,7 +66,12 @@ import Data.Yaml (decode)
|
|||
import qualified Data.Yaml as Yaml
|
||||
import GHC.Generics
|
||||
import Network.URI (URI (..), parseURI)
|
||||
#ifdef EMBED_DATA_FILES
|
||||
import Text.Pandoc.Data (dataFiles)
|
||||
#else
|
||||
import System.Directory (getDirectoryContents)
|
||||
import Paths_pandoc (getDataDir)
|
||||
#endif
|
||||
import Data.Aeson.Encode.Pretty (encodePretty', Config(..), keyOrder,
|
||||
defConfig, Indent(..), NumberFormat(..))
|
||||
import Skylighting (Style, Syntax (..), defaultSyntaxMap, parseTheme,
|
||||
|
@ -87,7 +93,7 @@ import Text.Pandoc.Highlighting (highlightingStyles)
|
|||
import Text.Pandoc.PDF (makePDF)
|
||||
import Text.Pandoc.SelfContained (makeDataURI, makeSelfContained)
|
||||
import Text.Pandoc.Shared (eastAsianLineBreakFilter, stripEmptyParagraphs,
|
||||
headerShift, isURI, ordNub, safeRead, tabFilter)
|
||||
headerShift, isURI, ordNub, safeRead, tabFilter, uriPathToPath)
|
||||
import qualified Text.Pandoc.UTF8 as UTF8
|
||||
import Text.Pandoc.Writers.Math (defaultKaTeXURL, defaultMathJaxURL)
|
||||
import Text.Pandoc.XML (toEntities)
|
||||
|
@ -217,17 +223,16 @@ convertWithOpts opts = do
|
|||
then pdfWriterAndProg (optWriter opts) (optPdfEngine opts)
|
||||
else return (nonPdfWriterName $ optWriter opts, Nothing)
|
||||
|
||||
let format = baseWriterName
|
||||
let format = map toLower $ baseWriterName
|
||||
$ takeFileName writerName -- in case path to lua script
|
||||
|
||||
-- disabling the custom writer for now
|
||||
(writer, writerExts) <-
|
||||
if ".lua" `isSuffixOf` format
|
||||
-- note: use non-lowercased version writerName
|
||||
then return (TextWriter
|
||||
(\o d -> writeCustom writerName o d)
|
||||
:: Writer PandocIO, mempty)
|
||||
else case getWriter writerName of
|
||||
else case getWriter (map toLower writerName) of
|
||||
Left e -> E.throwIO $ PandocAppError $
|
||||
if format == "pdf"
|
||||
then e ++
|
||||
|
@ -351,12 +356,6 @@ convertWithOpts opts = do
|
|||
maybe return (addStringAsVariable "epub-cover-image")
|
||||
(optEpubCoverImage opts)
|
||||
>>=
|
||||
(\vars -> case optHTMLMathMethod opts of
|
||||
LaTeXMathML Nothing -> do
|
||||
s <- UTF8.toString <$> readDataFile "LaTeXMathML.js"
|
||||
return $ ("mathml-script", s) : vars
|
||||
_ -> return vars)
|
||||
>>=
|
||||
(\vars -> if format == "dzslides"
|
||||
then do
|
||||
dztempl <- UTF8.toString <$> readDataFile
|
||||
|
@ -513,16 +512,19 @@ convertWithOpts opts = do
|
|||
let htmlFormat = format `elem`
|
||||
["html","html4","html5","s5","slidy",
|
||||
"slideous","dzslides","revealjs"]
|
||||
handleEntities = if (htmlFormat ||
|
||||
format == "docbook4" ||
|
||||
format == "docbook5" ||
|
||||
format == "docbook") && optAscii opts
|
||||
then toEntities
|
||||
else id
|
||||
escape
|
||||
| optAscii opts
|
||||
, htmlFormat || format == "docbook4" ||
|
||||
format == "docbook5" || format == "docbook" ||
|
||||
format == "jats" || format == "opml" ||
|
||||
format == "icml" = toEntities
|
||||
| optAscii opts
|
||||
, format == "ms" || format == "man" = groffEscape
|
||||
| otherwise = id
|
||||
addNl = if standalone
|
||||
then id
|
||||
else (<> T.singleton '\n')
|
||||
output <- (addNl . handleEntities) <$> f writerOptions doc
|
||||
output <- (addNl . escape) <$> f writerOptions doc
|
||||
writerFn eol outputFile =<<
|
||||
if optSelfContained opts && htmlFormat
|
||||
-- TODO not maximally efficient; change type
|
||||
|
@ -530,6 +532,12 @@ convertWithOpts opts = do
|
|||
then T.pack <$> makeSelfContained (T.unpack output)
|
||||
else return output
|
||||
|
||||
groffEscape :: Text -> Text
|
||||
groffEscape = T.concatMap toUchar
|
||||
where toUchar c
|
||||
| isAscii c = T.singleton c
|
||||
| otherwise = T.pack $ printf "\\[u%04X]" (ord c)
|
||||
|
||||
type Transform = Pandoc -> Pandoc
|
||||
|
||||
isTextFormat :: String -> Bool
|
||||
|
@ -729,6 +737,7 @@ defaultReaderName fallback (x:xs) =
|
|||
".odt" -> "odt"
|
||||
".pdf" -> "pdf" -- so we get an "unknown reader" error
|
||||
".doc" -> "doc" -- so we get an "unknown reader" error
|
||||
".fb2" -> "fb2"
|
||||
_ -> defaultReaderName fallback xs
|
||||
|
||||
-- Determine default writer based on output file extension
|
||||
|
@ -786,7 +795,7 @@ readSource src = case parseURI src of
|
|||
readURI src
|
||||
| uriScheme u == "file:" ->
|
||||
liftIO $ UTF8.toText <$>
|
||||
BS.readFile (uriPath u)
|
||||
BS.readFile (uriPathToPath $ uriPath u)
|
||||
_ -> liftIO $ UTF8.toText <$>
|
||||
BS.readFile src
|
||||
|
||||
|
@ -834,8 +843,7 @@ options =
|
|||
|
||||
, Option "tw" ["to","write"]
|
||||
(ReqArg
|
||||
(\arg opt -> return opt { optWriter =
|
||||
Just (map toLower arg) })
|
||||
(\arg opt -> return opt { optWriter = Just arg })
|
||||
"FORMAT")
|
||||
""
|
||||
|
||||
|
@ -967,6 +975,9 @@ options =
|
|||
setUserDataDir Nothing
|
||||
getDefaultTemplate arg
|
||||
case templ of
|
||||
Right "" -> do -- e.g. for docx, odt, json:
|
||||
E.throwIO $ PandocCouldNotFindDataFileError
|
||||
("templates/default." ++ arg)
|
||||
Right t -> UTF8.hPutStr stdout t
|
||||
Left e -> E.throwIO e
|
||||
exitSuccess)
|
||||
|
@ -1392,40 +1403,6 @@ options =
|
|||
"URL")
|
||||
"" -- Use KaTeX for HTML Math
|
||||
|
||||
, Option "m" ["latexmathml", "asciimathml"]
|
||||
(OptArg
|
||||
(\arg opt -> do
|
||||
deprecatedOption "--latexmathml, --asciimathml, -m" ""
|
||||
return opt { optHTMLMathMethod = LaTeXMathML arg })
|
||||
"URL")
|
||||
"" -- "Use LaTeXMathML script in html output"
|
||||
|
||||
, Option "" ["mimetex"]
|
||||
(OptArg
|
||||
(\arg opt -> do
|
||||
deprecatedOption "--mimetex" ""
|
||||
let url' = case arg of
|
||||
Just u -> u ++ "?"
|
||||
Nothing -> "/cgi-bin/mimetex.cgi?"
|
||||
return opt { optHTMLMathMethod = WebTeX url' })
|
||||
"URL")
|
||||
"" -- "Use mimetex for HTML math"
|
||||
|
||||
, Option "" ["jsmath"]
|
||||
(OptArg
|
||||
(\arg opt -> do
|
||||
deprecatedOption "--jsmath" ""
|
||||
return opt { optHTMLMathMethod = JsMath arg})
|
||||
"URL")
|
||||
"" -- "Use jsMath for HTML math"
|
||||
|
||||
, Option "" ["gladtex"]
|
||||
(NoArg
|
||||
(\opt -> do
|
||||
deprecatedOption "--gladtex" ""
|
||||
return opt { optHTMLMathMethod = GladTeX }))
|
||||
"" -- "Use gladtex for HTML math"
|
||||
|
||||
, Option "" ["abbreviations"]
|
||||
(ReqArg
|
||||
(\arg opt -> return opt { optAbbreviations = Just arg })
|
||||
|
@ -1471,7 +1448,7 @@ options =
|
|||
, Option "" ["bash-completion"]
|
||||
(NoArg
|
||||
(\_ -> do
|
||||
ddir <- getDataDir
|
||||
datafiles <- getDataFileNames
|
||||
tpl <- runIOorExplode $
|
||||
UTF8.toString <$>
|
||||
readDefaultDataFile "bash_completion.tpl"
|
||||
|
@ -1483,7 +1460,7 @@ options =
|
|||
(unwords readersNames)
|
||||
(unwords writersNames)
|
||||
(unwords $ map fst highlightingStyles)
|
||||
ddir
|
||||
(unwords datafiles)
|
||||
exitSuccess ))
|
||||
"" -- "Print bash completion script"
|
||||
|
||||
|
@ -1557,6 +1534,16 @@ options =
|
|||
|
||||
]
|
||||
|
||||
getDataFileNames :: IO [FilePath]
|
||||
getDataFileNames = do
|
||||
#ifdef EMBED_DATA_FILES
|
||||
let allDataFiles = map fst dataFiles
|
||||
#else
|
||||
allDataFiles <- filter (\x -> x /= "." && x /= "..") <$>
|
||||
(getDataDir >>= getDirectoryContents)
|
||||
#endif
|
||||
return $ "reference.docx" : "reference.odt" : "reference.pptx" : allDataFiles
|
||||
|
||||
-- Returns usage message
|
||||
usageMessage :: String -> [OptDescr (Opt -> IO Opt)] -> String
|
||||
usageMessage programName = usageInfo (programName ++ " [OPTIONS] [FILES]")
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2013-2018 John MacFarlane <jgm@berkeley.edu>
|
||||
|
||||
|
@ -30,6 +31,7 @@ ascii equivalents (used in constructing HTML identifiers).
|
|||
-}
|
||||
module Text.Pandoc.Asciify (toAsciiChar)
|
||||
where
|
||||
import Prelude
|
||||
import Data.Char (isAscii)
|
||||
import qualified Data.Map as M
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2017–2018 John MacFarlane <jgm@berkeley.edu>
|
||||
|
||||
|
@ -34,6 +35,7 @@ module Text.Pandoc.BCP47 (
|
|||
, renderLang
|
||||
)
|
||||
where
|
||||
import Prelude
|
||||
import Control.Monad (guard)
|
||||
import Data.Char (isAlphaNum, isAscii, isLetter, isLower, isUpper, toLower,
|
||||
toUpper)
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
module Text.Pandoc.CSS ( foldOrElse
|
||||
, pickStyleAttrProps
|
||||
, pickStylesToKVs
|
||||
)
|
||||
where
|
||||
|
||||
import Prelude
|
||||
import Text.Pandoc.Shared (trim)
|
||||
import Text.Parsec
|
||||
import Text.Parsec.String
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2017–2018 John MacFarlane <jgm@berkeley.edu>
|
||||
|
||||
|
@ -34,6 +35,7 @@ module Text.Pandoc.CSV (
|
|||
ParseError
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Control.Monad (void)
|
||||
import Data.Text (Text)
|
||||
import qualified Data.Text as T
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE CPP #-}
|
||||
{-# LANGUAGE PatternGuards #-}
|
||||
{-# LANGUAGE DeriveFunctor #-}
|
||||
|
@ -96,6 +97,7 @@ module Text.Pandoc.Class ( PandocMonad(..)
|
|||
, Translations
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Prelude hiding (readFile)
|
||||
import System.Random (StdGen, next, mkStdGen)
|
||||
import qualified System.Random as IO (newStdGen)
|
||||
|
@ -106,10 +108,11 @@ import Data.List (stripPrefix)
|
|||
import qualified Data.Unique as IO (newUnique)
|
||||
import qualified Text.Pandoc.UTF8 as UTF8
|
||||
import qualified System.Directory as Directory
|
||||
import Text.Pandoc.Compat.Time (UTCTime)
|
||||
import Data.Time (UTCTime)
|
||||
import Text.Pandoc.Logging
|
||||
import Text.Pandoc.Shared (uriPathToPath)
|
||||
import Text.Parsec (ParsecT, getPosition, sourceLine, sourceName)
|
||||
import qualified Text.Pandoc.Compat.Time as IO (getCurrentTime)
|
||||
import qualified Data.Time as IO (getCurrentTime)
|
||||
import Text.Pandoc.MIME (MimeType, getMimeType, extensionFromMimeType)
|
||||
import Text.Pandoc.Definition
|
||||
import Data.Digest.Pure.SHA (sha1, showDigest)
|
||||
|
@ -475,6 +478,14 @@ liftIOError f u = do
|
|||
Left e -> throwError $ PandocIOError u e
|
||||
Right r -> return r
|
||||
|
||||
-- | Show potential IO errors to the user continuing execution anyway
|
||||
logIOError :: IO () -> PandocIO ()
|
||||
logIOError f = do
|
||||
res <- liftIO $ tryIOError f
|
||||
case res of
|
||||
Left e -> report $ IgnoredIOError (E.displayException e)
|
||||
Right _ -> pure ()
|
||||
|
||||
instance PandocMonad PandocIO where
|
||||
lookupEnv = liftIO . IO.lookupEnv
|
||||
getCurrentTime = liftIO IO.getCurrentTime
|
||||
|
@ -588,7 +599,7 @@ downloadOrRead s = do
|
|||
-- We don't want to treat C:/ as a scheme:
|
||||
Just u' | length (uriScheme u') > 2 -> openURL (show u')
|
||||
Just u' | uriScheme u' == "file:" ->
|
||||
readLocalFile $ dropWhile (=='/') (uriPath u')
|
||||
readLocalFile $ uriPathToPath (uriPath u')
|
||||
_ -> readLocalFile fp -- get from local file system
|
||||
where readLocalFile f = do
|
||||
resourcePath <- getResourcePath
|
||||
|
@ -853,14 +864,14 @@ writeMedia :: FilePath -> MediaBag -> FilePath -> PandocIO ()
|
|||
writeMedia dir mediabag subpath = do
|
||||
-- we join and split to convert a/b/c to a\b\c on Windows;
|
||||
-- in zip containers all paths use /
|
||||
let fullpath = dir </> normalise subpath
|
||||
let fullpath = dir </> unEscapeString (normalise subpath)
|
||||
let mbcontents = lookupMedia subpath mediabag
|
||||
case mbcontents of
|
||||
Nothing -> throwError $ PandocResourceNotFound subpath
|
||||
Just (_, bs) -> do
|
||||
report $ Extracting fullpath
|
||||
liftIOError (createDirectoryIfMissing True) (takeDirectory fullpath)
|
||||
liftIOError (\p -> BL.writeFile p bs) fullpath
|
||||
logIOError $ BL.writeFile fullpath bs
|
||||
|
||||
adjustImagePath :: FilePath -> [FilePath] -> Inline -> Inline
|
||||
adjustImagePath dir paths (Image attr lab (src, tit))
|
||||
|
@ -923,7 +934,7 @@ data FileInfo = FileInfo { infoFileMTime :: UTCTime
|
|||
}
|
||||
|
||||
newtype FileTree = FileTree {unFileTree :: M.Map FilePath FileInfo}
|
||||
deriving (Monoid)
|
||||
deriving (Semigroup, Monoid)
|
||||
|
||||
getFileInfo :: FilePath -> FileTree -> Maybe FileInfo
|
||||
getFileInfo fp tree =
|
||||
|
|
|
@ -1,30 +0,0 @@
|
|||
{-# LANGUAGE CPP #-}
|
||||
|
||||
{-
|
||||
This compatibility module is needed because, in time 1.5, the
|
||||
`defaultTimeLocale` function was moved from System.Locale (in the
|
||||
old-locale library) into Data.Time.
|
||||
|
||||
We support both behaviors because time 1.4 is a boot library for GHC
|
||||
7.8. time 1.5 is a boot library for GHC 7.10.
|
||||
|
||||
When support is dropped for GHC 7.8, this module may be obsoleted.
|
||||
-}
|
||||
|
||||
#if MIN_VERSION_time(1,5,0)
|
||||
module Text.Pandoc.Compat.Time (
|
||||
module Data.Time
|
||||
)
|
||||
where
|
||||
import Data.Time
|
||||
|
||||
#else
|
||||
module Text.Pandoc.Compat.Time (
|
||||
module Data.Time,
|
||||
defaultTimeLocale
|
||||
)
|
||||
where
|
||||
import Data.Time
|
||||
import System.Locale ( defaultTimeLocale )
|
||||
|
||||
#endif
|
|
@ -1,7 +1,9 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE TemplateHaskell #-}
|
||||
|
||||
module Text.Pandoc.Data (dataFiles) where
|
||||
|
||||
import Prelude
|
||||
import qualified Data.ByteString as B
|
||||
import Data.FileEmbed
|
||||
import System.FilePath (splitDirectories)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2015 John MacFarlane <jgm@berkeley.edu>
|
||||
|
||||
|
@ -28,6 +29,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|||
Emoji symbol lookup from canonical string identifier.
|
||||
-}
|
||||
module Text.Pandoc.Emoji ( emojis ) where
|
||||
import Prelude
|
||||
import qualified Data.Map as M
|
||||
|
||||
emojis :: M.Map String String
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE DeriveDataTypeable #-}
|
||||
{-# LANGUAGE DeriveGeneric #-}
|
||||
{-
|
||||
|
@ -34,6 +35,7 @@ module Text.Pandoc.Error (
|
|||
PandocError(..),
|
||||
handleError) where
|
||||
|
||||
import Prelude
|
||||
import Control.Exception (Exception)
|
||||
import Data.Typeable (Typeable)
|
||||
import GHC.Generics (Generic)
|
||||
|
|
|
@ -1,3 +1,9 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE CPP #-}
|
||||
{-# LANGUAGE DeriveDataTypeable #-}
|
||||
{-# LANGUAGE DeriveGeneric #-}
|
||||
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
|
||||
{-# LANGUAGE TemplateHaskell #-}
|
||||
{-
|
||||
Copyright (C) 2012-2018 John MacFarlane <jgm@berkeley.edu>
|
||||
|
||||
|
@ -15,10 +21,6 @@ You should have received a copy of the GNU General Public License
|
|||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
-}
|
||||
{-# LANGUAGE DeriveDataTypeable #-}
|
||||
{-# LANGUAGE DeriveGeneric #-}
|
||||
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
|
||||
{-# LANGUAGE TemplateHaskell #-}
|
||||
|
||||
{- |
|
||||
Module : Text.Pandoc.Extensions
|
||||
|
@ -47,6 +49,7 @@ module Text.Pandoc.Extensions ( Extension(..)
|
|||
, githubMarkdownExtensions
|
||||
, multimarkdownExtensions )
|
||||
where
|
||||
import Prelude
|
||||
import Data.Aeson (FromJSON (..), ToJSON (..), defaultOptions)
|
||||
import Data.Aeson.TH (deriveJSON)
|
||||
import Data.Bits (clearBit, setBit, testBit, (.|.))
|
||||
|
@ -59,9 +62,11 @@ import Text.Parsec
|
|||
newtype Extensions = Extensions Integer
|
||||
deriving (Show, Read, Eq, Ord, Data, Typeable, Generic, ToJSON, FromJSON)
|
||||
|
||||
instance Semigroup Extensions where
|
||||
(Extensions a) <> (Extensions b) = Extensions (a .|. b)
|
||||
instance Monoid Extensions where
|
||||
mempty = Extensions 0
|
||||
mappend (Extensions a) (Extensions b) = Extensions (a .|. b)
|
||||
mappend = (<>)
|
||||
|
||||
extensionsFromList :: [Extension] -> Extensions
|
||||
extensionsFromList = foldr enableExtension emptyExtensions
|
||||
|
@ -317,6 +322,8 @@ getDefaultExtensions "muse" = extensionsFromList
|
|||
Ext_auto_identifiers]
|
||||
getDefaultExtensions "plain" = plainExtensions
|
||||
getDefaultExtensions "gfm" = githubMarkdownExtensions
|
||||
getDefaultExtensions "commonmark" = extensionsFromList
|
||||
[Ext_raw_html]
|
||||
getDefaultExtensions "org" = extensionsFromList
|
||||
[Ext_citations,
|
||||
Ext_auto_identifiers]
|
||||
|
@ -338,6 +345,10 @@ getDefaultExtensions "latex" = extensionsFromList
|
|||
[Ext_smart,
|
||||
Ext_latex_macros,
|
||||
Ext_auto_identifiers]
|
||||
getDefaultExtensions "beamer" = extensionsFromList
|
||||
[Ext_smart,
|
||||
Ext_latex_macros,
|
||||
Ext_auto_identifiers]
|
||||
getDefaultExtensions "context" = extensionsFromList
|
||||
[Ext_smart,
|
||||
Ext_auto_identifiers]
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2006-2017 John MacFarlane <jgm@berkeley.edu>
|
||||
|
||||
|
@ -33,6 +34,7 @@ module Text.Pandoc.Filter
|
|||
, applyFilters
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Data.Aeson (defaultOptions)
|
||||
import Data.Aeson.TH (deriveJSON)
|
||||
import Data.Foldable (foldrM)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2006-2018 John MacFarlane <jgm@berkeley.edu>
|
||||
|
||||
|
@ -29,6 +30,7 @@ Programmatically modifications of pandoc documents via JSON filters.
|
|||
-}
|
||||
module Text.Pandoc.Filter.JSON (apply) where
|
||||
|
||||
import Prelude
|
||||
import Control.Monad (unless, when)
|
||||
import Control.Monad.Trans (MonadIO (liftIO))
|
||||
import Data.Aeson (eitherDecode', encode)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2006-2018 John MacFarlane <jgm@berkeley.edu>
|
||||
|
||||
|
@ -29,6 +30,7 @@ Apply Lua filters to modify a pandoc documents programmatically.
|
|||
-}
|
||||
module Text.Pandoc.Filter.Lua (apply) where
|
||||
|
||||
import Prelude
|
||||
import Control.Exception (throw)
|
||||
import Text.Pandoc.Class (PandocIO)
|
||||
import Text.Pandoc.Definition (Pandoc)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2006-2018 John MacFarlane <jgm@berkeley.edu>
|
||||
|
||||
|
@ -31,6 +32,7 @@ module Text.Pandoc.Filter.Path
|
|||
( expandFilterPath
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Text.Pandoc.Class (PandocMonad, fileExists, getUserDataDir)
|
||||
import System.FilePath ((</>), isRelative)
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2008-2018 John MacFarlane <jgm@berkeley.edu>
|
||||
|
||||
|
@ -49,6 +50,7 @@ module Text.Pandoc.Highlighting ( highlightingStyles
|
|||
, fromListingsLanguage
|
||||
, toListingsLanguage
|
||||
) where
|
||||
import Prelude
|
||||
import Control.Monad
|
||||
import Data.Char (toLower)
|
||||
import qualified Data.Map as M
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE OverloadedStrings, ScopedTypeVariables, CPP #-}
|
||||
{-# OPTIONS_GHC -fno-warn-type-defaults #-}
|
||||
{-
|
||||
|
@ -49,6 +50,7 @@ module Text.Pandoc.ImageSize ( ImageType(..)
|
|||
, showInPixel
|
||||
, showFl
|
||||
) where
|
||||
import Prelude
|
||||
import Data.ByteString (ByteString, unpack)
|
||||
import qualified Data.ByteString.Char8 as B
|
||||
import qualified Data.ByteString.Lazy as BL
|
||||
|
@ -126,7 +128,7 @@ imageType img = case B.take 4 img of
|
|||
| B.take 4 (B.drop 1 $ B.dropWhile (/=' ') img) == "EPSF"
|
||||
-> return Eps
|
||||
"\x01\x00\x00\x00"
|
||||
| B.take 4 (B.drop 40 img) == " EMF"
|
||||
| B.take 4 (B.drop 40 img) == " EMF"
|
||||
-> return Emf
|
||||
_ -> mzero
|
||||
|
||||
|
@ -361,9 +363,9 @@ svgSize opts img = do
|
|||
, dpiX = dpi
|
||||
, dpiY = dpi
|
||||
}
|
||||
|
||||
|
||||
emfSize :: ByteString -> Maybe ImageSize
|
||||
emfSize img =
|
||||
emfSize img =
|
||||
let
|
||||
parseheader = runGetOrFail $ do
|
||||
skip 0x18 -- 0x00
|
||||
|
@ -388,11 +390,11 @@ emfSize img =
|
|||
, dpiX = fromIntegral dpiW
|
||||
, dpiY = fromIntegral dpiH
|
||||
}
|
||||
in
|
||||
in
|
||||
case parseheader . BL.fromStrict $ img of
|
||||
Left _ -> Nothing
|
||||
Right (_, _, size) -> Just size
|
||||
|
||||
|
||||
|
||||
jpegSize :: ByteString -> Either String ImageSize
|
||||
jpegSize img =
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE DeriveDataTypeable #-}
|
||||
{-# LANGUAGE DeriveGeneric #-}
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
|
@ -39,6 +40,7 @@ module Text.Pandoc.Logging (
|
|||
, messageVerbosity
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Control.Monad (mzero)
|
||||
import Data.Aeson
|
||||
import Data.Aeson.Encode.Pretty (Config (..), defConfig, encodePretty',
|
||||
|
@ -83,6 +85,7 @@ data LogMessage =
|
|||
| InlineNotRendered Inline
|
||||
| BlockNotRendered Block
|
||||
| DocxParserWarning String
|
||||
| IgnoredIOError String
|
||||
| CouldNotFetchResource String String
|
||||
| CouldNotDetermineImageSize String String
|
||||
| CouldNotConvertImage String String
|
||||
|
@ -99,6 +102,7 @@ data LogMessage =
|
|||
| Deprecated String String
|
||||
| NoTranslation String
|
||||
| CouldNotLoadTranslations String String
|
||||
| UnexpectedXmlElement String String
|
||||
deriving (Show, Eq, Data, Ord, Typeable, Generic)
|
||||
|
||||
instance ToJSON LogMessage where
|
||||
|
@ -172,6 +176,8 @@ instance ToJSON LogMessage where
|
|||
["contents" .= toJSON bl]
|
||||
DocxParserWarning s ->
|
||||
["contents" .= Text.pack s]
|
||||
IgnoredIOError s ->
|
||||
["contents" .= Text.pack s]
|
||||
CouldNotFetchResource fp s ->
|
||||
["path" .= Text.pack fp,
|
||||
"message" .= Text.pack s]
|
||||
|
@ -209,6 +215,9 @@ instance ToJSON LogMessage where
|
|||
CouldNotLoadTranslations lang msg ->
|
||||
["lang" .= Text.pack lang,
|
||||
"message" .= Text.pack msg]
|
||||
UnexpectedXmlElement element parent ->
|
||||
["element" .= Text.pack element,
|
||||
"parent" .= Text.pack parent]
|
||||
|
||||
|
||||
showPos :: SourcePos -> String
|
||||
|
@ -259,6 +268,8 @@ showLogMessage msg =
|
|||
"Not rendering " ++ show bl
|
||||
DocxParserWarning s ->
|
||||
"Docx parser warning: " ++ s
|
||||
IgnoredIOError s ->
|
||||
"IO Error (ignored): " ++ s
|
||||
CouldNotFetchResource fp s ->
|
||||
"Could not fetch resource '" ++ fp ++ "'" ++
|
||||
if null s then "" else ": " ++ s
|
||||
|
@ -303,6 +314,8 @@ showLogMessage msg =
|
|||
CouldNotLoadTranslations lang m ->
|
||||
"Could not load translations for " ++ lang ++
|
||||
if null m then "" else '\n' : m
|
||||
UnexpectedXmlElement element parent ->
|
||||
"Unexpected XML element " ++ element ++ " in " ++ parent
|
||||
|
||||
messageVerbosity:: LogMessage -> Verbosity
|
||||
messageVerbosity msg =
|
||||
|
@ -324,6 +337,7 @@ messageVerbosity msg =
|
|||
InlineNotRendered{} -> INFO
|
||||
BlockNotRendered{} -> INFO
|
||||
DocxParserWarning{} -> INFO
|
||||
IgnoredIOError{} -> WARNING
|
||||
CouldNotFetchResource{} -> WARNING
|
||||
CouldNotDetermineImageSize{} -> WARNING
|
||||
CouldNotConvertImage{} -> WARNING
|
||||
|
@ -340,3 +354,4 @@ messageVerbosity msg =
|
|||
Deprecated{} -> WARNING
|
||||
NoTranslation{} -> WARNING
|
||||
CouldNotLoadTranslations{} -> WARNING
|
||||
UnexpectedXmlElement {} -> WARNING
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright © 2017–2018 Albert Krewinkel <tarleb+pandoc@moltkeplatz.de>
|
||||
|
||||
|
@ -31,13 +32,14 @@ module Text.Pandoc.Lua
|
|||
, runPandocLua
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Control.Monad ((>=>))
|
||||
import Foreign.Lua (FromLuaStack (peek), Lua, LuaException (..),
|
||||
Status (OK), ToLuaStack (push))
|
||||
import Text.Pandoc.Class (PandocIO)
|
||||
import Text.Pandoc.Definition (Pandoc)
|
||||
import Text.Pandoc.Lua.Filter (LuaFilter, walkMWithLuaFilter)
|
||||
import Text.Pandoc.Lua.Init (runPandocLua)
|
||||
import Text.Pandoc.Lua.Init (runPandocLua, registerScriptPath)
|
||||
import Text.Pandoc.Lua.Util (popValue)
|
||||
import Text.Pandoc.Options (ReaderOptions)
|
||||
import qualified Foreign.Lua as Lua
|
||||
|
@ -55,11 +57,12 @@ runLuaFilter' :: ReaderOptions -> FilePath -> String
|
|||
runLuaFilter' ropts filterPath format pd = do
|
||||
registerFormat
|
||||
registerReaderOptions
|
||||
registerScriptPath filterPath
|
||||
top <- Lua.gettop
|
||||
stat <- Lua.dofile filterPath
|
||||
if stat /= OK
|
||||
then do
|
||||
luaErrMsg <- peek (-1) <* Lua.pop 1
|
||||
luaErrMsg <- popValue
|
||||
Lua.throwLuaError luaErrMsg
|
||||
else do
|
||||
newtop <- Lua.gettop
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE FlexibleContexts #-}
|
||||
|
||||
module Text.Pandoc.Lua.Filter ( LuaFilterFunction
|
||||
|
@ -10,6 +11,7 @@ module Text.Pandoc.Lua.Filter ( LuaFilterFunction
|
|||
, blockElementNames
|
||||
, inlineElementNames
|
||||
) where
|
||||
import Prelude
|
||||
import Control.Monad (mplus, unless, when, (>=>))
|
||||
import Control.Monad.Catch (finally)
|
||||
import Text.Pandoc.Definition
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright © 2017-2018 Albert Krewinkel <tarleb+pandoc@moltkeplatz.de>
|
||||
|
||||
|
@ -31,8 +32,10 @@ module Text.Pandoc.Lua.Init
|
|||
, runPandocLua
|
||||
, initLuaState
|
||||
, luaPackageParams
|
||||
, registerScriptPath
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Control.Monad.Trans (MonadIO (..))
|
||||
import Data.Data (Data, dataTypeConstrs, dataTypeOf, showConstr)
|
||||
import Data.IORef (newIORef, readIORef)
|
||||
|
@ -88,6 +91,11 @@ initLuaState luaPkgParams = do
|
|||
loadScriptFromDataDir (luaPkgDataDir luaPkgParams) "init.lua"
|
||||
putConstructorsInRegistry
|
||||
|
||||
registerScriptPath :: FilePath -> Lua ()
|
||||
registerScriptPath fp = do
|
||||
Lua.push fp
|
||||
Lua.setglobal "PANDOC_SCRIPT_FILE"
|
||||
|
||||
putConstructorsInRegistry :: Lua ()
|
||||
putConstructorsInRegistry = do
|
||||
Lua.getglobal "pandoc"
|
||||
|
@ -101,7 +109,7 @@ putConstructorsInRegistry = do
|
|||
Lua.pop 1
|
||||
where
|
||||
constrsToReg :: Data a => a -> Lua ()
|
||||
constrsToReg = mapM_ putInReg . map showConstr . dataTypeConstrs . dataTypeOf
|
||||
constrsToReg = mapM_ (putInReg . showConstr) . dataTypeConstrs . dataTypeOf
|
||||
|
||||
putInReg :: String -> Lua ()
|
||||
putInReg name = do
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright © 2017-2018 Albert Krewinkel <tarleb+pandoc@moltkeplatz.de>
|
||||
|
||||
|
@ -29,6 +30,7 @@ module Text.Pandoc.Lua.Module.MediaBag
|
|||
( pushModule
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Control.Monad (zipWithM_)
|
||||
import Data.IORef (IORef, modifyIORef', readIORef)
|
||||
import Data.Maybe (fromMaybe)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright © 2017-2018 Albert Krewinkel <tarleb+pandoc@moltkeplatz.de>
|
||||
|
||||
|
@ -30,6 +31,7 @@ module Text.Pandoc.Lua.Module.Pandoc
|
|||
( pushModule
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Control.Monad (when)
|
||||
import Data.Default (Default (..))
|
||||
import Data.Maybe (fromMaybe)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright © 2017-2018 Albert Krewinkel <tarleb+pandoc@moltkeplatz.de>
|
||||
|
||||
|
@ -29,6 +30,7 @@ module Text.Pandoc.Lua.Module.Utils
|
|||
( pushModule
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Control.Applicative ((<|>))
|
||||
import Data.Default (def)
|
||||
import Foreign.Lua (FromLuaStack, Lua, LuaInteger, NumResults)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright © 2017-2018 Albert Krewinkel <tarleb+pandoc@moltkeplatz.de>
|
||||
|
||||
|
@ -32,6 +33,7 @@ module Text.Pandoc.Lua.Packages
|
|||
, installPandocPackageSearcher
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Control.Monad (forM_)
|
||||
import Data.ByteString.Char8 (unpack)
|
||||
import Data.IORef (IORef)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright © 2012-2018 John MacFarlane <jgm@berkeley.edu>
|
||||
2017-2018 Albert Krewinkel <tarleb+pandoc@moltkeplatz.de>
|
||||
|
@ -33,6 +34,7 @@ StackValue instances for pandoc types.
|
|||
-}
|
||||
module Text.Pandoc.Lua.StackInstances () where
|
||||
|
||||
import Prelude
|
||||
import Control.Applicative ((<|>))
|
||||
import Control.Monad (when)
|
||||
import Control.Monad.Catch (finally)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright © 2012-2018 John MacFarlane <jgm@berkeley.edu>
|
||||
2017-2018 Albert Krewinkel <tarleb+pandoc@moltkeplatz.de>
|
||||
|
@ -46,6 +47,7 @@ module Text.Pandoc.Lua.Util
|
|||
, dostring'
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Control.Monad (when)
|
||||
import Control.Monad.Catch (finally)
|
||||
import Data.ByteString.Char8 (unpack)
|
||||
|
@ -132,7 +134,7 @@ class PushViaCall a where
|
|||
instance PushViaCall (Lua ()) where
|
||||
pushViaCall' fn pushArgs num = do
|
||||
Lua.push fn
|
||||
Lua.rawget (Lua.registryindex)
|
||||
Lua.rawget Lua.registryindex
|
||||
pushArgs
|
||||
call num 1
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2011-2018 John MacFarlane <jgm@berkeley.edu>
|
||||
|
||||
|
@ -29,6 +30,7 @@ Mime type lookup for ODT writer.
|
|||
-}
|
||||
module Text.Pandoc.MIME ( MimeType, getMimeType, getMimeTypeDef,
|
||||
extensionFromMimeType )where
|
||||
import Prelude
|
||||
import Data.Char (toLower)
|
||||
import Data.List (isPrefixOf, isSuffixOf)
|
||||
import qualified Data.Map as M
|
||||
|
@ -172,7 +174,7 @@ mimeTypesList = -- List borrowed from happstack-server.
|
|||
,("eml","message/rfc822")
|
||||
,("ent","chemical/x-ncbi-asn1-ascii")
|
||||
,("eot","application/vnd.ms-fontobject")
|
||||
,("eps","application/postscript")
|
||||
,("eps","application/eps")
|
||||
,("etx","text/x-setext")
|
||||
,("exe","application/x-msdos-program")
|
||||
,("ez","application/andrew-inset")
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE CPP #-}
|
||||
{-# LANGUAGE DeriveDataTypeable #-}
|
||||
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
|
||||
{-
|
||||
|
@ -36,6 +38,7 @@ module Text.Pandoc.MediaBag (
|
|||
insertMedia,
|
||||
mediaDirectory,
|
||||
) where
|
||||
import Prelude
|
||||
import qualified Data.ByteString.Lazy as BL
|
||||
import Data.Data (Data)
|
||||
import qualified Data.Map as M
|
||||
|
@ -50,7 +53,7 @@ import Text.Pandoc.MIME (MimeType, getMimeTypeDef)
|
|||
-- can be used for an empty 'MediaBag', and '<>' can be used to append
|
||||
-- two 'MediaBag's.
|
||||
newtype MediaBag = MediaBag (M.Map [String] (MimeType, BL.ByteString))
|
||||
deriving (Monoid, Data, Typeable)
|
||||
deriving (Semigroup, Monoid, Data, Typeable)
|
||||
|
||||
instance Show MediaBag where
|
||||
show bag = "MediaBag " ++ show (mediaDirectory bag)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE DeriveDataTypeable #-}
|
||||
{-# LANGUAGE DeriveGeneric #-}
|
||||
{-# LANGUAGE TemplateHaskell #-}
|
||||
|
@ -46,6 +47,7 @@ module Text.Pandoc.Options ( module Text.Pandoc.Extensions
|
|||
, def
|
||||
, isEnabled
|
||||
) where
|
||||
import Prelude
|
||||
import Data.Aeson (defaultOptions)
|
||||
import Data.Aeson.TH (deriveJSON)
|
||||
import Data.Data (Data)
|
||||
|
@ -104,9 +106,6 @@ defaultAbbrevs = Set.fromList
|
|||
data EPUBVersion = EPUB2 | EPUB3 deriving (Eq, Show, Read, Data, Typeable, Generic)
|
||||
|
||||
data HTMLMathMethod = PlainMath
|
||||
| LaTeXMathML (Maybe String) -- url of LaTeXMathML.js
|
||||
| JsMath (Maybe String) -- url of jsMath load script
|
||||
| GladTeX
|
||||
| WebTeX String -- url of TeX->image script.
|
||||
| MathML
|
||||
| MathJax String -- url of MathJax.js
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE CPP #-}
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
{-# LANGUAGE ScopedTypeVariables #-}
|
||||
|
@ -32,6 +33,7 @@ Conversion of LaTeX documents to PDF.
|
|||
-}
|
||||
module Text.Pandoc.PDF ( makePDF ) where
|
||||
|
||||
import Prelude
|
||||
import qualified Codec.Picture as JP
|
||||
import qualified Control.Exception as E
|
||||
import Control.Monad (unless, when)
|
||||
|
@ -41,10 +43,8 @@ import Data.ByteString.Lazy (ByteString)
|
|||
import qualified Data.ByteString.Lazy as BL
|
||||
import qualified Data.ByteString.Lazy.Char8 as BC
|
||||
import Data.Maybe (fromMaybe)
|
||||
import Data.Monoid ((<>))
|
||||
import Data.Text (Text)
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Text.IO as TextIO
|
||||
import System.Directory
|
||||
import System.Environment
|
||||
import System.Exit (ExitCode (..))
|
||||
|
@ -61,7 +61,7 @@ import Text.Pandoc.Error (PandocError (PandocPDFProgramNotFoundError))
|
|||
import Text.Pandoc.MIME (getMimeType)
|
||||
import Text.Pandoc.Options (HTMLMathMethod (..), WriterOptions (..))
|
||||
import Text.Pandoc.Process (pipeProcess)
|
||||
import Text.Pandoc.Shared (inDirectory, stringify, withTempDir)
|
||||
import Text.Pandoc.Shared (inDirectory, stringify)
|
||||
import qualified Text.Pandoc.UTF8 as UTF8
|
||||
import Text.Pandoc.Walk (walkM)
|
||||
import Text.Pandoc.Writers.Shared (getField, metaToJSON)
|
||||
|
@ -127,9 +127,11 @@ makePDF "pdfroff" pdfargs writer opts doc = do
|
|||
verbosity <- getVerbosity
|
||||
liftIO $ ms2pdf verbosity args source
|
||||
makePDF program pdfargs writer opts doc = do
|
||||
let withTemp = if takeBaseName program == "context"
|
||||
then withTempDirectory "."
|
||||
else withTempDir
|
||||
-- With context and latex, we create a temp directory within
|
||||
-- the working directory, since pdflatex sometimes tries to
|
||||
-- use tools like epstopdf.pl, which are restricted if run
|
||||
-- on files outside the working directory.
|
||||
let withTemp = withTempDirectory "."
|
||||
commonState <- getCommonState
|
||||
verbosity <- getVerbosity
|
||||
liftIO $ withTemp "tex2pdf." $ \tmpdir -> do
|
||||
|
@ -170,6 +172,8 @@ convertImage tmpdir fname =
|
|||
Just "image/png" -> doNothing
|
||||
Just "image/jpeg" -> doNothing
|
||||
Just "application/pdf" -> doNothing
|
||||
-- Note: eps is converted by pdflatex using epstopdf.pl
|
||||
Just "application/eps" -> doNothing
|
||||
Just "image/svg+xml" -> E.catch (do
|
||||
(exit, _) <- pipeProcess Nothing "rsvg-convert"
|
||||
["-f","pdf","-a","-o",pdfOut,fname] BL.empty
|
||||
|
@ -274,7 +278,12 @@ runTeXProgram verbosity program args runNumber numRuns tmpDir source = do
|
|||
let file' = file
|
||||
#endif
|
||||
let programArgs = ["-halt-on-error", "-interaction", "nonstopmode",
|
||||
"-output-directory", tmpDir'] ++ args ++ [file']
|
||||
"-output-directory", tmpDir'] ++
|
||||
-- see #4484, only compress images on last run:
|
||||
if program == "xelatex" && runNumber < numRuns
|
||||
then ["-output-driver", "xdvipdfmx -z0"]
|
||||
else []
|
||||
++ args ++ [file']
|
||||
env' <- getEnvironment
|
||||
let sep = [searchPathSeparator]
|
||||
let texinputs = maybe (tmpDir' ++ sep) ((tmpDir' ++ sep) ++)
|
||||
|
@ -354,9 +363,14 @@ html2pdf :: Verbosity -- ^ Verbosity level
|
|||
-> Text -- ^ HTML5 source
|
||||
-> IO (Either ByteString ByteString)
|
||||
html2pdf verbosity program args source = do
|
||||
-- write HTML to temp file so we don't have to rewrite
|
||||
-- all links in `a`, `img`, `style`, `script`, etc. tags,
|
||||
-- and piping to weasyprint didn't work on Windows either.
|
||||
file <- withTempFile "." "html2pdf.html" $ \fp _ -> return fp
|
||||
pdfFile <- withTempFile "." "html2pdf.pdf" $ \fp _ -> return fp
|
||||
BS.writeFile file $ UTF8.fromText source
|
||||
let pdfFileArgName = ["-o" | program == "prince"]
|
||||
let programArgs = args ++ ["-"] ++ pdfFileArgName ++ [pdfFile]
|
||||
let programArgs = args ++ [file] ++ pdfFileArgName ++ [pdfFile]
|
||||
env' <- getEnvironment
|
||||
when (verbosity >= INFO) $ do
|
||||
putStrLn "[makePDF] Command line:"
|
||||
|
@ -365,15 +379,16 @@ html2pdf verbosity program args source = do
|
|||
putStrLn "[makePDF] Environment:"
|
||||
mapM_ print env'
|
||||
putStr "\n"
|
||||
putStrLn "[makePDF] Contents of intermediate HTML:"
|
||||
TextIO.putStr source
|
||||
putStrLn $ "[makePDF] Contents of " ++ file ++ ":"
|
||||
BL.readFile file >>= BL.putStr
|
||||
putStr "\n"
|
||||
(exit, out) <- E.catch
|
||||
(pipeProcess (Just env') program programArgs $ BL.fromStrict $ UTF8.fromText source)
|
||||
(pipeProcess (Just env') program programArgs BL.empty)
|
||||
(\(e :: IOError) -> if isDoesNotExistError e
|
||||
then E.throwIO $
|
||||
PandocPDFProgramNotFoundError program
|
||||
else E.throwIO e)
|
||||
removeFile file
|
||||
when (verbosity >= INFO) $ do
|
||||
BL.hPutStr stdout out
|
||||
putStr "\n"
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE CPP #-}
|
||||
{-# LANGUAGE ExplicitForAll #-}
|
||||
{-# LANGUAGE FlexibleContexts #-}
|
||||
{-# LANGUAGE FlexibleInstances #-}
|
||||
|
@ -133,7 +135,7 @@ module Text.Pandoc.Parsing ( takeWhileP,
|
|||
extractIdClass,
|
||||
insertIncludedFile,
|
||||
insertIncludedFileF,
|
||||
-- * Re-exports from Text.Pandoc.Parsec
|
||||
-- * Re-exports from Text.Parsec
|
||||
Stream,
|
||||
runParser,
|
||||
runParserT,
|
||||
|
@ -194,6 +196,7 @@ module Text.Pandoc.Parsing ( takeWhileP,
|
|||
)
|
||||
where
|
||||
|
||||
import Prelude
|
||||
import Control.Monad.Identity
|
||||
import Control.Monad.Reader
|
||||
import Data.Char (chr, isAlphaNum, isAscii, isAsciiUpper, isHexDigit,
|
||||
|
@ -202,7 +205,6 @@ import Data.Default
|
|||
import Data.List (intercalate, isSuffixOf, transpose)
|
||||
import qualified Data.Map as M
|
||||
import Data.Maybe (mapMaybe, fromMaybe)
|
||||
import Data.Monoid ((<>))
|
||||
import qualified Data.Set as Set
|
||||
import Data.Text (Text)
|
||||
import Text.HTML.TagSoup.Entity (lookupEntity)
|
||||
|
@ -250,10 +252,11 @@ returnF = return . return
|
|||
trimInlinesF :: Future s Inlines -> Future s Inlines
|
||||
trimInlinesF = liftM trimInlines
|
||||
|
||||
instance Monoid a => Monoid (Future s a) where
|
||||
instance Semigroup a => Semigroup (Future s a) where
|
||||
(<>) = liftM2 (<>)
|
||||
instance (Semigroup a, Monoid a) => Monoid (Future s a) where
|
||||
mempty = return mempty
|
||||
mappend = liftM2 mappend
|
||||
mconcat = liftM mconcat . sequence
|
||||
mappend = (<>)
|
||||
|
||||
-- | Parse characters while a predicate is true.
|
||||
takeWhileP :: Monad m
|
||||
|
@ -529,15 +532,15 @@ romanNumeral upperCase = do
|
|||
map char romanDigits
|
||||
thousands <- ((1000 *) . length) <$> many thousand
|
||||
ninehundreds <- option 0 $ try $ hundred >> thousand >> return 900
|
||||
fivehundreds <- ((500 *) . length) <$> many fivehundred
|
||||
fivehundreds <- option 0 $ 500 <$ fivehundred
|
||||
fourhundreds <- option 0 $ try $ hundred >> fivehundred >> return 400
|
||||
hundreds <- ((100 *) . length) <$> many hundred
|
||||
nineties <- option 0 $ try $ ten >> hundred >> return 90
|
||||
fifties <- ((50 *) . length) <$> many fifty
|
||||
fifties <- option 0 $ (50 <$ fifty)
|
||||
forties <- option 0 $ try $ ten >> fifty >> return 40
|
||||
tens <- ((10 *) . length) <$> many ten
|
||||
nines <- option 0 $ try $ one >> ten >> return 9
|
||||
fives <- ((5 *) . length) <$> many five
|
||||
fives <- option 0 $ (5 <$ five)
|
||||
fours <- option 0 $ try $ one >> five >> return 4
|
||||
ones <- length <$> many one
|
||||
let total = thousands + ninehundreds + fivehundreds + fourhundreds +
|
||||
|
@ -590,7 +593,7 @@ uri = try $ do
|
|||
-- http://en.wikipedia.org/wiki/State_of_emergency_(disambiguation)
|
||||
-- as a URL, while NOT picking up the closing paren in
|
||||
-- (http://wikipedia.org). So we include balanced parens in the URL.
|
||||
let isWordChar c = isAlphaNum c || c `elem` "#$%*+/@\\_-&="
|
||||
let isWordChar c = isAlphaNum c || c `elem` "#$%+/@\\_-&="
|
||||
let wordChar = satisfy isWordChar
|
||||
let percentEscaped = try $ char '%' >> skipMany1 (satisfy isHexDigit)
|
||||
let entity = () <$ characterReference
|
||||
|
@ -1437,7 +1440,7 @@ token pp pos match = tokenPrim pp (\_ t _ -> pos t) match
|
|||
|
||||
infixr 5 <+?>
|
||||
(<+?>) :: (Monoid a) => ParserT s st m a -> ParserT s st m a -> ParserT s st m a
|
||||
a <+?> b = a >>= flip fmap (try b <|> return mempty) . (<>)
|
||||
a <+?> b = a >>= flip fmap (try b <|> return mempty) . mappend
|
||||
|
||||
extractIdClass :: Attr -> Attr
|
||||
extractIdClass (ident, cls, kvs) = (ident', cls', kvs')
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE CPP #-}
|
||||
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
|
||||
{-
|
||||
|
@ -77,12 +78,12 @@ module Text.Pandoc.Pretty (
|
|||
)
|
||||
|
||||
where
|
||||
import Prelude
|
||||
import Control.Monad
|
||||
import Control.Monad.State.Strict
|
||||
import Data.Char (isSpace)
|
||||
import Data.Foldable (toList)
|
||||
import Data.List (intersperse)
|
||||
import Data.Monoid ((<>))
|
||||
import Data.Sequence (Seq, ViewL (..), fromList, mapWithIndex, singleton, viewl,
|
||||
(<|))
|
||||
import qualified Data.Sequence as Seq
|
||||
|
@ -112,7 +113,7 @@ data D = Text Int String
|
|||
deriving (Show, Eq)
|
||||
|
||||
newtype Doc = Doc { unDoc :: Seq D }
|
||||
deriving (Monoid, Show, Eq)
|
||||
deriving (Semigroup, Monoid, Show, Eq)
|
||||
|
||||
instance IsString Doc where
|
||||
fromString = text
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2013-2018 John MacFarlane <jgm@berkeley.edu>
|
||||
|
||||
|
@ -29,6 +30,7 @@ ByteString variant of 'readProcessWithExitCode'.
|
|||
-}
|
||||
module Text.Pandoc.Process (pipeProcess)
|
||||
where
|
||||
import Prelude
|
||||
import Control.Concurrent (forkIO, newEmptyMVar, putMVar, takeMVar)
|
||||
import Control.Exception
|
||||
import Control.Monad (unless)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
{-# LANGUAGE FlexibleInstances #-}
|
||||
{-# LANGUAGE GADTs #-}
|
||||
{-# LANGUAGE MonoLocalBinds #-}
|
||||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE ScopedTypeVariables #-}
|
||||
|
||||
{-
|
||||
Copyright (C) 2006-2018 John MacFarlane <jgm@berkeley.edu>
|
||||
|
||||
|
@ -64,11 +65,13 @@ module Text.Pandoc.Readers
|
|||
, readTxt2Tags
|
||||
, readEPUB
|
||||
, readMuse
|
||||
, readFB2
|
||||
-- * Miscellaneous
|
||||
, getReader
|
||||
, getDefaultExtensions
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Control.Monad.Except (throwError)
|
||||
import Data.Aeson
|
||||
import qualified Data.ByteString.Lazy as BL
|
||||
|
@ -84,6 +87,7 @@ import Text.Pandoc.Readers.Creole
|
|||
import Text.Pandoc.Readers.DocBook
|
||||
import Text.Pandoc.Readers.Docx
|
||||
import Text.Pandoc.Readers.EPUB
|
||||
import Text.Pandoc.Readers.FB2
|
||||
import Text.Pandoc.Readers.Haddock
|
||||
import Text.Pandoc.Readers.HTML (readHtml)
|
||||
import Text.Pandoc.Readers.JATS (readJATS)
|
||||
|
@ -143,6 +147,7 @@ readers = [ ("native" , TextReader readNative)
|
|||
,("epub" , ByteStringReader readEPUB)
|
||||
,("muse" , TextReader readMuse)
|
||||
,("man" , TextReader readMan)
|
||||
,("fb2" , TextReader readFB2)
|
||||
]
|
||||
|
||||
-- | Retrieve reader, extensions based on formatSpec (format+extensions).
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2015-2018 John MacFarlane <jgm@berkeley.edu>
|
||||
|
||||
|
@ -32,6 +33,7 @@ CommonMark is a strongly specified variant of Markdown: http://commonmark.org.
|
|||
module Text.Pandoc.Readers.CommonMark (readCommonMark)
|
||||
where
|
||||
|
||||
import Prelude
|
||||
import CMarkGFM
|
||||
import Control.Monad.State
|
||||
import Data.Char (isAlphaNum, isLetter, isSpace, toLower)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2017 Sascha Wilde <wilde@sha-bang.de>
|
||||
|
||||
|
@ -35,10 +36,10 @@ Conversion of creole text to 'Pandoc' document.
|
|||
module Text.Pandoc.Readers.Creole ( readCreole
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Control.Monad.Except (guard, liftM2, throwError)
|
||||
import qualified Data.Foldable as F
|
||||
import Data.Maybe (fromMaybe)
|
||||
import Data.Monoid
|
||||
import Data.Text (Text)
|
||||
import qualified Data.Text as T
|
||||
import qualified Text.Pandoc.Builder as B
|
||||
|
@ -67,7 +68,7 @@ type CRLParser = ParserT [Char] ParserState
|
|||
-- Utility functions
|
||||
--
|
||||
|
||||
(<+>) :: (Monad m, Monoid a) => m a -> m a -> m a
|
||||
(<+>) :: (Monad m, Semigroup a) => m a -> m a -> m a
|
||||
(<+>) = liftM2 (<>)
|
||||
|
||||
-- we have to redefine `enclosed' from Text.Pandoc.Parsing, because it
|
||||
|
|
|
@ -1,5 +1,35 @@
|
|||
{-# LANGUAGE ExplicitForAll #-}
|
||||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2006-2018 John MacFarlane <jgm@berkeley.edu>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
-}
|
||||
|
||||
{- |
|
||||
Module : Text.Pandoc.Readers.DocBook
|
||||
Copyright : Copyright (C) 2006-2018 John MacFarlane
|
||||
License : GNU GPL, version 2 or above
|
||||
|
||||
Maintainer : John MacFarlane <jgm@berkeley.edu>
|
||||
Stability : alpha
|
||||
Portability : portable
|
||||
|
||||
Conversion of DocBook XML to 'Pandoc' document.
|
||||
-}
|
||||
module Text.Pandoc.Readers.DocBook ( readDocBook ) where
|
||||
import Prelude
|
||||
import Control.Monad.State.Strict
|
||||
import Data.Char (isSpace, toUpper)
|
||||
import Data.Default
|
||||
|
@ -235,7 +265,7 @@ List of all DocBook tags, with [x] indicating implemented,
|
|||
[ ] manvolnum - A reference volume number
|
||||
[x] markup - A string of formatting markup in text that is to be
|
||||
represented literally
|
||||
[ ] mathphrase - A mathematical phrase, an expression that can be represented
|
||||
[x] mathphrase - A mathematical phrase, an expression that can be represented
|
||||
with ordinary text and a small amount of markup
|
||||
[ ] medialabel - A name that identifies the physical medium on which some
|
||||
information resides
|
||||
|
@ -697,6 +727,8 @@ parseBlock (Elem e) =
|
|||
"bibliodiv" -> sect 1
|
||||
"biblioentry" -> parseMixed para (elContent e)
|
||||
"bibliomixed" -> parseMixed para (elContent e)
|
||||
"equation" -> para <$> equation e displayMath
|
||||
"informalequation" -> para <$> equation e displayMath
|
||||
"glosssee" -> para . (\ils -> text "See " <> ils <> str ".")
|
||||
<$> getInlines e
|
||||
"glossseealso" -> para . (\ils -> text "See also " <> ils <> str ".")
|
||||
|
@ -923,9 +955,9 @@ parseInline (CRef ref) =
|
|||
return $ maybe (text $ map toUpper ref) text $ lookupEntity ref
|
||||
parseInline (Elem e) =
|
||||
case qName (elName e) of
|
||||
"equation" -> equation displayMath
|
||||
"informalequation" -> equation displayMath
|
||||
"inlineequation" -> equation math
|
||||
"equation" -> equation e displayMath
|
||||
"informalequation" -> equation e displayMath
|
||||
"inlineequation" -> equation e math
|
||||
"subscript" -> subscript <$> innerInlines
|
||||
"superscript" -> superscript <$> innerInlines
|
||||
"inlinemediaobject" -> getMediaobject e
|
||||
|
@ -1004,13 +1036,6 @@ parseInline (Elem e) =
|
|||
_ -> innerInlines
|
||||
where innerInlines = (trimInlines . mconcat) <$>
|
||||
mapM parseInline (elContent e)
|
||||
equation constructor = return $ mconcat $
|
||||
map (constructor . writeTeX)
|
||||
$ rights
|
||||
$ map (readMathML . showElement . everywhere (mkT removePrefix))
|
||||
$ filterChildren (\x -> qName (elName x) == "math" &&
|
||||
qPrefix (elName x) == Just "mml") e
|
||||
removePrefix elname = elname { qPrefix = Nothing }
|
||||
codeWithLang = do
|
||||
let classes' = case attrValue "language" e of
|
||||
"" -> []
|
||||
|
@ -1048,6 +1073,7 @@ parseInline (Elem e) =
|
|||
| not (null xrefLabel) = xrefLabel
|
||||
| otherwise = case qName (elName el) of
|
||||
"chapter" -> descendantContent "title" el
|
||||
"section" -> descendantContent "title" el
|
||||
"sect1" -> descendantContent "title" el
|
||||
"sect2" -> descendantContent "title" el
|
||||
"sect3" -> descendantContent "title" el
|
||||
|
@ -1060,3 +1086,45 @@ parseInline (Elem e) =
|
|||
xrefLabel = attrValue "xreflabel" el
|
||||
descendantContent name = maybe "???" strContent
|
||||
. filterElementName (\n -> qName n == name)
|
||||
|
||||
-- | Extract a math equation from an element
|
||||
--
|
||||
-- asciidoc can generate Latex math in CDATA sections.
|
||||
--
|
||||
-- Note that if some MathML can't be parsed it is silently ignored!
|
||||
equation
|
||||
:: Monad m
|
||||
=> Element
|
||||
-- ^ The element from which to extract a mathematical equation
|
||||
-> (String -> Inlines)
|
||||
-- ^ A constructor for some Inlines, taking the TeX code as input
|
||||
-> m Inlines
|
||||
equation e constructor =
|
||||
return $ mconcat $ map constructor $ mathMLEquations ++ latexEquations
|
||||
where
|
||||
mathMLEquations :: [String]
|
||||
mathMLEquations = map writeTeX $ rights $ readMath
|
||||
(\x -> qName (elName x) == "math" && qPrefix (elName x) == Just "mml")
|
||||
(readMathML . showElement)
|
||||
|
||||
latexEquations :: [String]
|
||||
latexEquations = readMath (\x -> qName (elName x) == "mathphrase")
|
||||
(concat . fmap showVerbatimCData . elContent)
|
||||
|
||||
readMath :: (Element -> Bool) -> (Element -> b) -> [b]
|
||||
readMath childPredicate fromElement =
|
||||
( map (fromElement . everywhere (mkT removePrefix))
|
||||
$ filterChildren childPredicate e
|
||||
)
|
||||
|
||||
-- | Get the actual text stored in a verbatim CData block. 'showContent'
|
||||
-- returns the text still surrounded by the [[CDATA]] tags.
|
||||
--
|
||||
-- Returns 'showContent' if this is not a verbatim CData
|
||||
showVerbatimCData :: Content -> String
|
||||
showVerbatimCData (Text (CData CDataVerbatim d _)) = d
|
||||
showVerbatimCData c = showContent c
|
||||
|
||||
-- | Set the prefix of a name to 'Nothing'
|
||||
removePrefix :: QName -> QName
|
||||
removePrefix elname = elname { qPrefix = Nothing }
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE CPP #-}
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
{-# LANGUAGE PatternGuards #-}
|
||||
|
@ -74,6 +75,7 @@ module Text.Pandoc.Readers.Docx
|
|||
( readDocx
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Codec.Archive.Zip
|
||||
import Control.Monad.Reader
|
||||
import Control.Monad.State.Strict
|
||||
|
@ -122,7 +124,6 @@ data DState = DState { docxAnchorMap :: M.Map String String
|
|||
, docxImmedPrevAnchor :: Maybe String
|
||||
, docxMediaBag :: MediaBag
|
||||
, docxDropCap :: Inlines
|
||||
, docxWarnings :: [String]
|
||||
-- keep track of (numId, lvl) values for
|
||||
-- restarting
|
||||
, docxListState :: M.Map (String, String) Integer
|
||||
|
@ -135,18 +136,16 @@ instance Default DState where
|
|||
, docxImmedPrevAnchor = Nothing
|
||||
, docxMediaBag = mempty
|
||||
, docxDropCap = mempty
|
||||
, docxWarnings = []
|
||||
, docxListState = M.empty
|
||||
, docxPrevPara = mempty
|
||||
}
|
||||
|
||||
data DEnv = DEnv { docxOptions :: ReaderOptions
|
||||
, docxInHeaderBlock :: Bool
|
||||
, docxCustomStyleAlready :: Bool
|
||||
}
|
||||
|
||||
instance Default DEnv where
|
||||
def = DEnv def False False
|
||||
def = DEnv def False
|
||||
|
||||
type DocxContext m = ReaderT DEnv (StateT DState m)
|
||||
|
||||
|
@ -252,103 +251,88 @@ parPartToString _ = ""
|
|||
blacklistedCharStyles :: [String]
|
||||
blacklistedCharStyles = ["Hyperlink"]
|
||||
|
||||
resolveDependentRunStyle :: RunStyle -> RunStyle
|
||||
resolveDependentRunStyle :: PandocMonad m => RunStyle -> DocxContext m RunStyle
|
||||
resolveDependentRunStyle rPr
|
||||
| Just (s, _) <- rStyle rPr, s `elem` blacklistedCharStyles =
|
||||
rPr
|
||||
| Just (_, cs) <- rStyle rPr =
|
||||
let rPr' = resolveDependentRunStyle cs
|
||||
in
|
||||
RunStyle { isBold = case isBold rPr of
|
||||
Just bool -> Just bool
|
||||
Nothing -> isBold rPr'
|
||||
, isItalic = case isItalic rPr of
|
||||
Just bool -> Just bool
|
||||
Nothing -> isItalic rPr'
|
||||
, isSmallCaps = case isSmallCaps rPr of
|
||||
Just bool -> Just bool
|
||||
Nothing -> isSmallCaps rPr'
|
||||
, isStrike = case isStrike rPr of
|
||||
Just bool -> Just bool
|
||||
Nothing -> isStrike rPr'
|
||||
, rVertAlign = case rVertAlign rPr of
|
||||
Just valign -> Just valign
|
||||
Nothing -> rVertAlign rPr'
|
||||
, rUnderline = case rUnderline rPr of
|
||||
Just ulstyle -> Just ulstyle
|
||||
Nothing -> rUnderline rPr'
|
||||
, rStyle = rStyle rPr }
|
||||
| otherwise = rPr
|
||||
|
||||
extraRunStyleInfo :: PandocMonad m => RunStyle -> DocxContext m (Inlines -> Inlines)
|
||||
extraRunStyleInfo rPr
|
||||
| Just (s, _) <- rStyle rPr = do
|
||||
already <- asks docxCustomStyleAlready
|
||||
return rPr
|
||||
| Just (_, cs) <- rStyle rPr = do
|
||||
opts <- asks docxOptions
|
||||
return $ if isEnabled Ext_styles opts && not already
|
||||
then spanWith ("", [], [("custom-style", s)])
|
||||
else id
|
||||
| otherwise = return id
|
||||
if isEnabled Ext_styles opts
|
||||
then return rPr
|
||||
else do rPr' <- resolveDependentRunStyle cs
|
||||
return $
|
||||
RunStyle { isBold = case isBold rPr of
|
||||
Just bool -> Just bool
|
||||
Nothing -> isBold rPr'
|
||||
, isItalic = case isItalic rPr of
|
||||
Just bool -> Just bool
|
||||
Nothing -> isItalic rPr'
|
||||
, isSmallCaps = case isSmallCaps rPr of
|
||||
Just bool -> Just bool
|
||||
Nothing -> isSmallCaps rPr'
|
||||
, isStrike = case isStrike rPr of
|
||||
Just bool -> Just bool
|
||||
Nothing -> isStrike rPr'
|
||||
, rVertAlign = case rVertAlign rPr of
|
||||
Just valign -> Just valign
|
||||
Nothing -> rVertAlign rPr'
|
||||
, rUnderline = case rUnderline rPr of
|
||||
Just ulstyle -> Just ulstyle
|
||||
Nothing -> rUnderline rPr'
|
||||
, rStyle = rStyle rPr }
|
||||
| otherwise = return rPr
|
||||
|
||||
runStyleToTransform :: PandocMonad m => RunStyle -> DocxContext m (Inlines -> Inlines)
|
||||
runStyleToTransform rPr
|
||||
| Just (s, _) <- rStyle rPr
|
||||
, s `elem` spansToKeep = do
|
||||
let rPr' = rPr{rStyle = Nothing}
|
||||
transform <- runStyleToTransform rPr'
|
||||
transform <- runStyleToTransform rPr{rStyle = Nothing}
|
||||
return $ spanWith ("", [s], []) . transform
|
||||
| Just (s, _) <- rStyle rPr = do
|
||||
opts <- asks docxOptions
|
||||
let extraInfo = if isEnabled Ext_styles opts
|
||||
then spanWith ("", [], [("custom-style", s)])
|
||||
else id
|
||||
transform <- runStyleToTransform rPr{rStyle = Nothing}
|
||||
return $ extraInfo . transform
|
||||
| Just True <- isItalic rPr = do
|
||||
extraInfo <- extraRunStyleInfo rPr
|
||||
transform <- local (\e -> e{docxCustomStyleAlready = True}) $
|
||||
runStyleToTransform rPr {isItalic = Nothing}
|
||||
return $ extraInfo . emph . transform
|
||||
transform <- runStyleToTransform rPr{isItalic = Nothing}
|
||||
return $ emph . transform
|
||||
| Just True <- isBold rPr = do
|
||||
extraInfo <- extraRunStyleInfo rPr
|
||||
transform <- local (\e -> e{docxCustomStyleAlready = True}) $
|
||||
runStyleToTransform rPr {isBold = Nothing}
|
||||
return $ extraInfo . strong . transform
|
||||
transform <- runStyleToTransform rPr{isBold = Nothing}
|
||||
return $ strong . transform
|
||||
| Just True <- isSmallCaps rPr = do
|
||||
extraInfo <- extraRunStyleInfo rPr
|
||||
transform <- local (\e -> e{docxCustomStyleAlready = True}) $
|
||||
runStyleToTransform rPr {isSmallCaps = Nothing}
|
||||
return $ extraInfo . smallcaps . transform
|
||||
transform <- runStyleToTransform rPr{isSmallCaps = Nothing}
|
||||
return $ smallcaps . transform
|
||||
| Just True <- isStrike rPr = do
|
||||
extraInfo <- extraRunStyleInfo rPr
|
||||
transform <- local (\e -> e{docxCustomStyleAlready = True}) $
|
||||
runStyleToTransform rPr {isStrike = Nothing}
|
||||
return $ extraInfo . strikeout . transform
|
||||
transform <- runStyleToTransform rPr{isStrike = Nothing}
|
||||
return $ strikeout . transform
|
||||
| Just SupScrpt <- rVertAlign rPr = do
|
||||
extraInfo <- extraRunStyleInfo rPr
|
||||
transform <- local (\e -> e{docxCustomStyleAlready = True}) $
|
||||
runStyleToTransform rPr {rVertAlign = Nothing}
|
||||
return $ extraInfo . superscript . transform
|
||||
transform <- runStyleToTransform rPr{rVertAlign = Nothing}
|
||||
return $ superscript . transform
|
||||
| Just SubScrpt <- rVertAlign rPr = do
|
||||
extraInfo <- extraRunStyleInfo rPr
|
||||
transform <- local (\e -> e{docxCustomStyleAlready = True}) $
|
||||
runStyleToTransform rPr {rVertAlign = Nothing}
|
||||
return $ extraInfo . subscript . transform
|
||||
transform <- runStyleToTransform rPr{rVertAlign = Nothing}
|
||||
return $ subscript . transform
|
||||
| Just "single" <- rUnderline rPr = do
|
||||
extraInfo <- extraRunStyleInfo rPr
|
||||
transform <- local (\e -> e{docxCustomStyleAlready = True}) $
|
||||
runStyleToTransform rPr {rUnderline = Nothing}
|
||||
return $ extraInfo . underlineSpan . transform
|
||||
| otherwise = extraRunStyleInfo rPr
|
||||
transform <- runStyleToTransform rPr{rUnderline = Nothing}
|
||||
return $ underlineSpan . transform
|
||||
| otherwise = return id
|
||||
|
||||
runToInlines :: PandocMonad m => Run -> DocxContext m Inlines
|
||||
runToInlines (Run rs runElems)
|
||||
| Just (s, _) <- rStyle rs
|
||||
, s `elem` codeStyles =
|
||||
let rPr = resolveDependentRunStyle rs
|
||||
codeString = code $ concatMap runElemToString runElems
|
||||
in
|
||||
return $ case rVertAlign rPr of
|
||||
Just SupScrpt -> superscript codeString
|
||||
Just SubScrpt -> subscript codeString
|
||||
_ -> codeString
|
||||
, s `elem` codeStyles = do
|
||||
rPr <- resolveDependentRunStyle rs
|
||||
let codeString = code $ concatMap runElemToString runElems
|
||||
return $ case rVertAlign rPr of
|
||||
Just SupScrpt -> superscript codeString
|
||||
Just SubScrpt -> subscript codeString
|
||||
_ -> codeString
|
||||
| otherwise = do
|
||||
let ils = smushInlines (map runElemToInlines runElems)
|
||||
transform <- runStyleToTransform $ resolveDependentRunStyle rs
|
||||
return $ transform ils
|
||||
rPr <- resolveDependentRunStyle rs
|
||||
let ils = smushInlines (map runElemToInlines runElems)
|
||||
transform <- runStyleToTransform rPr
|
||||
return $ transform ils
|
||||
runToInlines (Footnote bps) = do
|
||||
blksList <- smushBlocks <$> mapM bodyPartToBlocks bps
|
||||
return $ note blksList
|
||||
|
@ -385,7 +369,7 @@ blocksToInlinesWarn cmtId blks = do
|
|||
parPartToInlines :: PandocMonad m => ParPart -> DocxContext m Inlines
|
||||
parPartToInlines parPart =
|
||||
case parPart of
|
||||
(BookMark _ anchor) | notElem anchor dummyAnchors -> do
|
||||
(BookMark _ anchor) | anchor `notElem` dummyAnchors -> do
|
||||
inHdrBool <- asks docxInHeaderBlock
|
||||
ils <- parPartToInlines' parPart
|
||||
immedPrevAnchor <- gets docxImmedPrevAnchor
|
||||
|
@ -478,8 +462,6 @@ parPartToInlines' (ExternalHyperLink target runs) = do
|
|||
return $ link target "" ils
|
||||
parPartToInlines' (PlainOMath exps) =
|
||||
return $ math $ writeTeX exps
|
||||
parPartToInlines' (SmartTag runs) =
|
||||
smushInlines <$> mapM runToInlines runs
|
||||
parPartToInlines' (Field info runs) =
|
||||
case info of
|
||||
HyperlinkField url -> parPartToInlines' $ ExternalHyperLink url runs
|
||||
|
@ -706,6 +688,10 @@ bodyPartToBlocks (Tbl cap _ look parts@(r:rs)) = do
|
|||
rowLength :: Row -> Int
|
||||
rowLength (Row c) = length c
|
||||
|
||||
-- pad cells. New Text.Pandoc.Builder will do that for us,
|
||||
-- so this is for compatibility while we switch over.
|
||||
let cells' = map (\row -> take width (row ++ repeat mempty)) cells
|
||||
|
||||
hdrCells <- case hdr of
|
||||
Just r' -> rowToBlocksList r'
|
||||
Nothing -> return $ replicate width mempty
|
||||
|
@ -718,7 +704,7 @@ bodyPartToBlocks (Tbl cap _ look parts@(r:rs)) = do
|
|||
let alignments = replicate width AlignDefault
|
||||
widths = replicate width 0 :: [Double]
|
||||
|
||||
return $ table caption (zip alignments widths) hdrCells cells
|
||||
return $ table caption (zip alignments widths) hdrCells cells'
|
||||
bodyPartToBlocks (OMathPara e) =
|
||||
return $ para $ displayMath (writeTeX e)
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE FlexibleInstances #-}
|
||||
{-# LANGUAGE PatternGuards #-}
|
||||
{-# LANGUAGE TypeSynonymInstances #-}
|
||||
|
@ -7,6 +8,7 @@ module Text.Pandoc.Readers.Docx.Combine ( smushInlines
|
|||
)
|
||||
where
|
||||
|
||||
import Prelude
|
||||
import Data.List
|
||||
import Data.Sequence (ViewL (..), ViewR (..), viewl, viewr, (><), (|>))
|
||||
import qualified Data.Sequence as Seq (null)
|
||||
|
@ -133,6 +135,10 @@ combineBlocks bs cs
|
|||
| bs' :> BlockQuote bs'' <- viewr (unMany bs)
|
||||
, BlockQuote cs'' :< cs' <- viewl (unMany cs) =
|
||||
Many $ (bs' |> BlockQuote (bs'' <> cs'')) >< cs'
|
||||
| bs' :> CodeBlock attr codeStr <- viewr (unMany bs)
|
||||
, CodeBlock attr' codeStr' :< cs' <- viewl (unMany cs)
|
||||
, attr == attr' =
|
||||
Many $ (bs' |> CodeBlock attr (codeStr <> "\n" <> codeStr')) >< cs'
|
||||
combineBlocks bs cs = bs <> cs
|
||||
|
||||
instance (Monoid a, Eq a) => Eq (Modifier a) where
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2014-2018 Jesse Rosenthal <jrosenthal@jhu.edu>
|
||||
|
||||
|
@ -32,6 +33,7 @@ module Text.Pandoc.Readers.Docx.Fields ( FieldInfo(..)
|
|||
, parseFieldInfo
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Text.Parsec
|
||||
import Text.Parsec.String (Parser)
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2014-2018 Jesse Rosenthal <jrosenthal@jhu.edu>
|
||||
|
||||
|
@ -33,6 +34,7 @@ module Text.Pandoc.Readers.Docx.Lists ( blocksToBullets
|
|||
, listParagraphDivs
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Data.List
|
||||
import Data.Maybe
|
||||
import Text.Pandoc.Generic (bottomUp)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE FlexibleInstances #-}
|
||||
{-# LANGUAGE PatternGuards #-}
|
||||
{-# LANGUAGE ViewPatterns #-}
|
||||
|
@ -58,6 +59,7 @@ module Text.Pandoc.Readers.Docx.Parse ( Docx(..)
|
|||
, archiveToDocx
|
||||
, archiveToDocxWithWarnings
|
||||
) where
|
||||
import Prelude
|
||||
import Codec.Archive.Zip
|
||||
import Control.Applicative ((<|>))
|
||||
import Control.Monad.Except
|
||||
|
@ -132,21 +134,23 @@ mapD f xs =
|
|||
in
|
||||
concatMapM handler xs
|
||||
|
||||
unwrapSDT :: NameSpaces -> Content -> [Content]
|
||||
unwrapSDT ns (Elem element)
|
||||
unwrap :: NameSpaces -> Content -> [Content]
|
||||
unwrap ns (Elem element)
|
||||
| isElem ns "w" "sdt" element
|
||||
, Just sdtContent <- findChildByName ns "w" "sdtContent" element
|
||||
= map Elem $ elChildren sdtContent
|
||||
unwrapSDT _ content = [content]
|
||||
= concatMap ((unwrap ns) . Elem) (elChildren sdtContent)
|
||||
| isElem ns "w" "smartTag" element
|
||||
= concatMap ((unwrap ns) . Elem) (elChildren element)
|
||||
unwrap _ content = [content]
|
||||
|
||||
unwrapSDTchild :: NameSpaces -> Content -> Content
|
||||
unwrapSDTchild ns (Elem element) =
|
||||
Elem $ element { elContent = concatMap (unwrapSDT ns) (elContent element) }
|
||||
unwrapSDTchild _ content = content
|
||||
unwrapChild :: NameSpaces -> Content -> Content
|
||||
unwrapChild ns (Elem element) =
|
||||
Elem $ element { elContent = concatMap (unwrap ns) (elContent element) }
|
||||
unwrapChild _ content = content
|
||||
|
||||
walkDocument' :: NameSpaces -> XMLC.Cursor -> XMLC.Cursor
|
||||
walkDocument' ns cur =
|
||||
let modifiedCur = XMLC.modifyContent (unwrapSDTchild ns) cur
|
||||
let modifiedCur = XMLC.modifyContent (unwrapChild ns) cur
|
||||
in
|
||||
case XMLC.nextDF modifiedCur of
|
||||
Just cur' -> walkDocument' ns cur'
|
||||
|
@ -275,7 +279,6 @@ data ParPart = PlainRun Run
|
|||
| Drawing FilePath String String B.ByteString Extent -- title, alt
|
||||
| Chart -- placeholder for now
|
||||
| PlainOMath [Exp]
|
||||
| SmartTag [Run]
|
||||
| Field FieldInfo [Run]
|
||||
| NullParPart -- when we need to return nothing, but
|
||||
-- not because of an error.
|
||||
|
@ -825,10 +828,6 @@ elemToParPart ns element
|
|||
| Just change <- getTrackedChange ns element = do
|
||||
runs <- mapD (elemToRun ns) (elChildren element)
|
||||
return $ ChangedRuns change runs
|
||||
elemToParPart ns element
|
||||
| isElem ns "w" "smartTag" element = do
|
||||
runs <- mapD (elemToRun ns) (elChildren element)
|
||||
return $ SmartTag runs
|
||||
elemToParPart ns element
|
||||
| isElem ns "w" "bookmarkStart" element
|
||||
, Just bmId <- findAttrByName ns "w" "id" element
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
module Text.Pandoc.Readers.Docx.StyleMap ( StyleMaps(..)
|
||||
, alterMap
|
||||
, getMap
|
||||
|
@ -7,6 +8,7 @@ module Text.Pandoc.Readers.Docx.StyleMap ( StyleMaps(..)
|
|||
, hasStyleName
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Control.Monad.State.Strict
|
||||
import Data.Char (toLower)
|
||||
import qualified Data.Map as M
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
module Text.Pandoc.Readers.Docx.Util (
|
||||
NameSpaces
|
||||
, elemName
|
||||
|
@ -8,6 +9,7 @@ module Text.Pandoc.Readers.Docx.Util (
|
|||
, findAttrByName
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Data.Maybe (mapMaybe)
|
||||
import Text.XML.Light
|
||||
|
||||
|
|
|
@ -1,12 +1,41 @@
|
|||
{-# LANGUAGE FlexibleContexts #-}
|
||||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE TupleSections #-}
|
||||
{-# LANGUAGE ViewPatterns #-}
|
||||
{-
|
||||
Copyright (C) 2014-2018 Matthew Pickering
|
||||
|
||||
{-# LANGUAGE TupleSections #-}
|
||||
{-# LANGUAGE ViewPatterns #-}
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
-}
|
||||
|
||||
{- |
|
||||
Module : Text.Pandoc.Readers.EPUB
|
||||
Copyright : Copyright (C) 2014-2018 Matthew Pickering
|
||||
License : GNU GPL, version 2 or above
|
||||
|
||||
Maintainer : John MacFarlane <jgm@berkeley.edu>
|
||||
Stability : alpha
|
||||
Portability : portable
|
||||
|
||||
Conversion of EPUB to 'Pandoc' document.
|
||||
-}
|
||||
|
||||
module Text.Pandoc.Readers.EPUB
|
||||
(readEPUB)
|
||||
where
|
||||
|
||||
import Prelude
|
||||
import Codec.Archive.Zip (Archive (..), Entry, findEntryByPath, fromEntry,
|
||||
toArchiveOrFail)
|
||||
import Control.DeepSeq (NFData, deepseq)
|
||||
|
@ -16,7 +45,6 @@ import qualified Data.ByteString.Lazy as BL (ByteString)
|
|||
import Data.List (isInfixOf, isPrefixOf)
|
||||
import qualified Data.Map as M (Map, elems, fromList, lookup)
|
||||
import Data.Maybe (fromMaybe, mapMaybe)
|
||||
import Data.Monoid ((<>))
|
||||
import qualified Data.Text.Lazy as TL
|
||||
import qualified Data.Text.Lazy.Encoding as TL
|
||||
import Network.URI (unEscapeString)
|
||||
|
@ -93,7 +121,7 @@ fetchImages mimes root arc (query iq -> links) =
|
|||
mapM_ (uncurry3 insertMedia) (mapMaybe getEntry links)
|
||||
where
|
||||
getEntry link =
|
||||
let abslink = normalise (root </> link) in
|
||||
let abslink = normalise (unEscapeString (root </> link)) in
|
||||
(link , lookup link mimes, ) . fromEntry
|
||||
<$> findEntryByPath abslink arc
|
||||
|
||||
|
@ -264,7 +292,7 @@ findAttrE :: PandocMonad m => QName -> Element -> m String
|
|||
findAttrE q e = mkE "findAttr" $ findAttr q e
|
||||
|
||||
findEntryByPathE :: PandocMonad m => FilePath -> Archive -> m Entry
|
||||
findEntryByPathE (normalise -> path) a =
|
||||
findEntryByPathE (normalise . unEscapeString -> path) a =
|
||||
mkE ("No entry on path: " ++ path) $ findEntryByPath path a
|
||||
|
||||
parseXMLDocE :: PandocMonad m => String -> m Element
|
||||
|
|
404
src/Text/Pandoc/Readers/FB2.hs
Normal file
404
src/Text/Pandoc/Readers/FB2.hs
Normal file
|
@ -0,0 +1,404 @@
|
|||
{-# LANGUAGE FlexibleContexts #-}
|
||||
{-# LANGUAGE TupleSections #-}
|
||||
{-
|
||||
Copyright (C) 2018 Alexander Krotov <ilabdsf@gmail.com>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
-}
|
||||
|
||||
{- |
|
||||
Module : Text.Pandoc.Readers.FB2
|
||||
Copyright : Copyright (C) 2018 Alexander Krotov
|
||||
License : GNU GPL, version 2 or above
|
||||
|
||||
Maintainer : Alexander Krotov <ilabdsf@gmail.com>
|
||||
Stability : alpha
|
||||
Portability : portable
|
||||
|
||||
Conversion of FB2 to 'Pandoc' document.
|
||||
-}
|
||||
|
||||
{-
|
||||
|
||||
TODO:
|
||||
- Tables
|
||||
- Named styles
|
||||
- Parse ID attribute for all elements that have it
|
||||
|
||||
-}
|
||||
|
||||
module Text.Pandoc.Readers.FB2 ( readFB2 ) where
|
||||
import Prelude
|
||||
import Control.Monad.Except (throwError)
|
||||
import Control.Monad.State.Strict
|
||||
import Data.ByteString.Lazy.Char8 ( pack )
|
||||
import Data.ByteString.Base64.Lazy
|
||||
import Data.Char (isSpace, toUpper)
|
||||
import Data.Functor
|
||||
import Data.List (dropWhileEnd, intersperse)
|
||||
import Data.List.Split (splitOn)
|
||||
import Data.Text (Text)
|
||||
import Data.Default
|
||||
import Data.Maybe
|
||||
import Text.HTML.TagSoup.Entity (lookupEntity)
|
||||
import Text.Pandoc.Builder
|
||||
import Text.Pandoc.Class (PandocMonad, insertMedia, report)
|
||||
import Text.Pandoc.Error
|
||||
import Text.Pandoc.Logging
|
||||
import Text.Pandoc.Options
|
||||
import Text.Pandoc.Shared (crFilter)
|
||||
import Text.XML.Light
|
||||
|
||||
type FB2 m = StateT FB2State m
|
||||
|
||||
data FB2State = FB2State{ fb2SectionLevel :: Int
|
||||
, fb2Meta :: Meta
|
||||
, fb2Authors :: [String]
|
||||
} deriving Show
|
||||
|
||||
instance Default FB2State where
|
||||
def = FB2State{ fb2SectionLevel = 1
|
||||
, fb2Meta = mempty
|
||||
, fb2Authors = []
|
||||
}
|
||||
|
||||
instance HasMeta FB2State where
|
||||
setMeta field v s = s {fb2Meta = setMeta field v (fb2Meta s)}
|
||||
deleteMeta field s = s {fb2Meta = deleteMeta field (fb2Meta s)}
|
||||
|
||||
readFB2 :: PandocMonad m => ReaderOptions -> Text -> m Pandoc
|
||||
readFB2 _ inp = do
|
||||
(bs, st) <- runStateT (mapM parseBlock $ parseXML (crFilter inp)) def
|
||||
let authors = if null $ fb2Authors st
|
||||
then id
|
||||
else setMeta "author" (map text $ reverse $ fb2Authors st)
|
||||
pure $ Pandoc (authors $ fb2Meta st) (toList . mconcat $ bs)
|
||||
|
||||
-- * Utility functions
|
||||
|
||||
trim :: String -> String
|
||||
trim = dropWhileEnd isSpace . dropWhile isSpace
|
||||
|
||||
removeHash :: String -> String
|
||||
removeHash ('#':xs) = xs
|
||||
removeHash xs = xs
|
||||
|
||||
convertEntity :: String -> String
|
||||
convertEntity e = fromMaybe (map toUpper e) (lookupEntity e)
|
||||
|
||||
parseInline :: PandocMonad m => Content -> FB2 m Inlines
|
||||
parseInline (Elem e) =
|
||||
case qName $ elName e of
|
||||
"strong" -> strong <$> parseStyleType e
|
||||
"emphasis" -> emph <$> parseStyleType e
|
||||
"style" -> parseNamedStyle e
|
||||
"a" -> parseLinkType e
|
||||
"strikethrough" -> strikeout <$> parseStyleType e
|
||||
"sub" -> subscript <$> parseStyleType e
|
||||
"sup" -> superscript <$> parseStyleType e
|
||||
"code" -> pure $ code $ strContent e
|
||||
"image" -> parseInlineImageElement e
|
||||
name -> throwError $ PandocParseError ("Couldn't parse FB2 file: unexpected element " ++ name ++ ".")
|
||||
parseInline (Text x) = pure $ text $ cdData x
|
||||
parseInline (CRef r) = pure $ str $ convertEntity r
|
||||
|
||||
parseSubtitle :: PandocMonad m => Element -> FB2 m Blocks
|
||||
parseSubtitle e = headerWith ("", ["unnumbered"], []) <$> gets fb2SectionLevel <*> parsePType e
|
||||
|
||||
-- * Root element parser
|
||||
|
||||
parseBlock :: PandocMonad m => Content -> FB2 m Blocks
|
||||
parseBlock (Elem e) =
|
||||
case qName $ elName e of
|
||||
"?xml" -> pure mempty
|
||||
"FictionBook" -> mconcat <$> mapM parseFictionBookChild (elChildren e)
|
||||
name -> report (UnexpectedXmlElement name "root") $> mempty
|
||||
parseBlock _ = pure mempty
|
||||
|
||||
-- | Parse a child of @\<FictionBook>@ element.
|
||||
parseFictionBookChild :: PandocMonad m => Element -> FB2 m Blocks
|
||||
parseFictionBookChild e =
|
||||
case qName $ elName e of
|
||||
"stylesheet" -> pure mempty -- stylesheet is ignored
|
||||
"description" -> mempty <$ mapM_ parseDescriptionChild (elChildren e)
|
||||
"body" -> mconcat <$> mapM parseBodyChild (elChildren e)
|
||||
"binary" -> mempty <$ parseBinaryElement e
|
||||
name -> report (UnexpectedXmlElement name "FictionBook") $> mempty
|
||||
|
||||
-- | Parse a child of @\<description>@ element.
|
||||
parseDescriptionChild :: PandocMonad m => Element -> FB2 m ()
|
||||
parseDescriptionChild e =
|
||||
case qName $ elName e of
|
||||
"title-info" -> mapM_ parseTitleInfoChild (elChildren e)
|
||||
"src-title-info" -> pure () -- ignore
|
||||
"document-info" -> pure ()
|
||||
"publish-info" -> pure ()
|
||||
"custom-info" -> pure ()
|
||||
"output" -> pure ()
|
||||
name -> throwError $ PandocParseError ("Couldn't parse FB2 file: unexpected element " ++ name ++ "in description.")
|
||||
|
||||
-- | Parse a child of @\<body>@ element.
|
||||
parseBodyChild :: PandocMonad m => Element -> FB2 m Blocks
|
||||
parseBodyChild e =
|
||||
case qName $ elName e of
|
||||
"image" -> parseImageElement e
|
||||
"title" -> header <$> gets fb2SectionLevel <*> parseTitleType (elContent e)
|
||||
"epigraph" -> parseEpigraph e
|
||||
"section" -> parseSection e
|
||||
name -> throwError $ PandocParseError ("Couldn't parse FB2 file: unexpected element " ++ name ++ " in body.")
|
||||
|
||||
-- | Parse a @\<binary>@ element.
|
||||
parseBinaryElement :: PandocMonad m => Element -> FB2 m ()
|
||||
parseBinaryElement e =
|
||||
case (findAttr (QName "id" Nothing Nothing) e, findAttr (QName "content-type" Nothing Nothing) e) of
|
||||
(Nothing, _) -> throwError $ PandocParseError "<binary> element must have an \"id\" attribute"
|
||||
(Just _, Nothing) -> throwError $ PandocParseError "<binary> element must have a \"content-type\" attribute"
|
||||
(Just filename, contentType) -> insertMedia filename contentType (decodeLenient (pack (strContent e)))
|
||||
|
||||
-- * Type parsers
|
||||
|
||||
-- | Parse @authorType@
|
||||
parseAuthor :: PandocMonad m => Element -> FB2 m String
|
||||
parseAuthor e = unwords <$> mapM parseAuthorChild (elChildren e)
|
||||
|
||||
parseAuthorChild :: PandocMonad m => Element -> FB2 m String
|
||||
parseAuthorChild e =
|
||||
case qName $ elName e of
|
||||
"first-name" -> pure $ strContent e
|
||||
"middle-name" -> pure $ strContent e
|
||||
"last-name" -> pure $ strContent e
|
||||
"nickname" -> pure $ strContent e
|
||||
"home-page" -> pure $ strContent e
|
||||
"email" -> pure $ strContent e
|
||||
name -> throwError $ PandocParseError ("Couldn't parse FB2 file: unexpected element " ++ name ++ " in author.")
|
||||
|
||||
-- | Parse @titleType@
|
||||
parseTitle :: PandocMonad m => Element -> FB2 m Blocks
|
||||
parseTitle e = header <$> gets fb2SectionLevel <*> parseTitleType (elContent e)
|
||||
|
||||
parseTitleType :: PandocMonad m => [Content] -> FB2 m Inlines
|
||||
parseTitleType c = mconcat . intersperse linebreak . catMaybes <$> mapM parseTitleContent c
|
||||
|
||||
parseTitleContent :: PandocMonad m => Content -> FB2 m (Maybe Inlines)
|
||||
parseTitleContent (Elem e) =
|
||||
case qName $ elName e of
|
||||
"p" -> Just <$> parsePType e
|
||||
"empty-line" -> pure $ Just mempty
|
||||
_ -> pure mempty
|
||||
parseTitleContent _ = pure Nothing
|
||||
|
||||
-- | Parse @imageType@
|
||||
parseImageElement :: PandocMonad m => Element -> FB2 m Blocks
|
||||
parseImageElement e =
|
||||
case href of
|
||||
Just src -> pure $ para $ imageWith (imgId, [], []) (removeHash src) title alt
|
||||
Nothing -> throwError $ PandocParseError "Couldn't parse FB2 file: image without href."
|
||||
where alt = maybe mempty str $ findAttr (QName "alt" Nothing Nothing) e
|
||||
title = fromMaybe "" $ findAttr (QName "title" Nothing Nothing) e
|
||||
imgId = fromMaybe "" $ findAttr (QName "id" Nothing Nothing) e
|
||||
href = findAttr (QName "href" (Just "http://www.w3.org/1999/xlink") Nothing) e
|
||||
|
||||
-- | Parse @pType@
|
||||
parsePType :: PandocMonad m => Element -> FB2 m Inlines
|
||||
parsePType = parseStyleType -- TODO add support for optional "id" and "style" attributes
|
||||
|
||||
-- | Parse @citeType@
|
||||
parseCite :: PandocMonad m => Element -> FB2 m Blocks
|
||||
parseCite e = blockQuote . mconcat <$> mapM parseCiteChild (elChildren e)
|
||||
|
||||
-- | Parse @citeType@ child
|
||||
parseCiteChild :: PandocMonad m => Element -> FB2 m Blocks
|
||||
parseCiteChild e =
|
||||
case qName $ elName e of
|
||||
"p" -> para <$> parsePType e
|
||||
"poem" -> parsePoem e
|
||||
"empty-line" -> pure horizontalRule
|
||||
"subtitle" -> parseSubtitle e
|
||||
"table" -> parseTable e
|
||||
"text-author" -> para <$> parsePType e
|
||||
name -> report (UnexpectedXmlElement name "cite") $> mempty
|
||||
|
||||
-- | Parse @poemType@
|
||||
parsePoem :: PandocMonad m => Element -> FB2 m Blocks
|
||||
parsePoem e = mconcat <$> mapM parsePoemChild (elChildren e)
|
||||
|
||||
parsePoemChild :: PandocMonad m => Element -> FB2 m Blocks
|
||||
parsePoemChild e =
|
||||
case qName $ elName e of
|
||||
"title" -> parseTitle e
|
||||
"subtitle" -> parseSubtitle e
|
||||
"epigraph" -> parseEpigraph e
|
||||
"stanza" -> parseStanza e
|
||||
"text-author" -> para <$> parsePType e
|
||||
"date" -> pure $ para $ text $ strContent e
|
||||
name -> report (UnexpectedXmlElement name "poem") $> mempty
|
||||
|
||||
parseStanza :: PandocMonad m => Element -> FB2 m Blocks
|
||||
parseStanza e = fromList . joinLineBlocks . toList . mconcat <$> mapM parseStanzaChild (elChildren e)
|
||||
|
||||
joinLineBlocks :: [Block] -> [Block]
|
||||
joinLineBlocks (LineBlock xs:LineBlock ys:zs) = joinLineBlocks (LineBlock (xs ++ ys) : zs)
|
||||
joinLineBlocks (x:xs) = x:joinLineBlocks xs
|
||||
joinLineBlocks [] = []
|
||||
|
||||
parseStanzaChild :: PandocMonad m => Element -> FB2 m Blocks
|
||||
parseStanzaChild e =
|
||||
case qName $ elName e of
|
||||
"title" -> parseTitle e
|
||||
"subtitle" -> parseSubtitle e
|
||||
"v" -> lineBlock . (:[]) <$> parsePType e
|
||||
name -> report (UnexpectedXmlElement name "stanza") $> mempty
|
||||
|
||||
-- | Parse @epigraphType@
|
||||
parseEpigraph :: PandocMonad m => Element -> FB2 m Blocks
|
||||
parseEpigraph e =
|
||||
divWith (divId, ["epigraph"], []) . mconcat <$> mapM parseEpigraphChild (elChildren e)
|
||||
where divId = fromMaybe "" $ findAttr (QName "id" Nothing Nothing) e
|
||||
|
||||
parseEpigraphChild :: PandocMonad m => Element -> FB2 m Blocks
|
||||
parseEpigraphChild e =
|
||||
case qName $ elName e of
|
||||
"p" -> para <$> parsePType e
|
||||
"poem" -> parsePoem e
|
||||
"cite" -> parseCite e
|
||||
"empty-line" -> pure horizontalRule
|
||||
"text-author" -> para <$> parsePType e
|
||||
name -> report (UnexpectedXmlElement name "epigraph") $> mempty
|
||||
|
||||
-- | Parse @annotationType@
|
||||
parseAnnotation :: PandocMonad m => Element -> FB2 m Blocks
|
||||
parseAnnotation e = mconcat <$> mapM parseAnnotationChild (elChildren e)
|
||||
|
||||
parseAnnotationChild :: PandocMonad m => Element -> FB2 m Blocks
|
||||
parseAnnotationChild e =
|
||||
case qName $ elName e of
|
||||
"p" -> para <$> parsePType e
|
||||
"poem" -> parsePoem e
|
||||
"cite" -> parseCite e
|
||||
"subtitle" -> parseSubtitle e
|
||||
"table" -> parseTable e
|
||||
"empty-line" -> pure horizontalRule
|
||||
name -> report (UnexpectedXmlElement name "annotation") $> mempty
|
||||
|
||||
-- | Parse @sectionType@
|
||||
parseSection :: PandocMonad m => Element -> FB2 m Blocks
|
||||
parseSection e = do
|
||||
n <- gets fb2SectionLevel
|
||||
modify $ \st -> st{ fb2SectionLevel = n + 1 }
|
||||
let sectionId = fromMaybe "" $ findAttr (QName "id" Nothing Nothing) e
|
||||
bs <- divWith (sectionId, ["section"], []) . mconcat <$> mapM parseSectionChild (elChildren e)
|
||||
modify $ \st -> st{ fb2SectionLevel = n }
|
||||
pure bs
|
||||
|
||||
parseSectionChild :: PandocMonad m => Element -> FB2 m Blocks
|
||||
parseSectionChild e =
|
||||
case qName $ elName e of
|
||||
"title" -> parseBodyChild e
|
||||
"epigraph" -> parseEpigraph e
|
||||
"image" -> parseImageElement e
|
||||
"annotation" -> parseAnnotation e
|
||||
"poem" -> parsePoem e
|
||||
"cite" -> parseCite e
|
||||
"empty-line" -> pure horizontalRule
|
||||
"table" -> parseTable e
|
||||
"subtitle" -> parseSubtitle e
|
||||
"p" -> para <$> parsePType e
|
||||
"section" -> parseSection e
|
||||
name -> report (UnexpectedXmlElement name "section") $> mempty
|
||||
|
||||
-- | parse @styleType@
|
||||
parseStyleType :: PandocMonad m => Element -> FB2 m Inlines
|
||||
parseStyleType e = mconcat <$> mapM parseInline (elContent e)
|
||||
|
||||
-- | Parse @namedStyleType@
|
||||
parseNamedStyle :: PandocMonad m => Element -> FB2 m Inlines
|
||||
parseNamedStyle e = do
|
||||
content <- mconcat <$> mapM parseNamedStyleChild (elContent e)
|
||||
let lang = maybeToList $ ("lang",) <$> findAttr (QName "lang" Nothing (Just "xml")) e
|
||||
case findAttr (QName "name" Nothing Nothing) e of
|
||||
Just name -> pure $ spanWith ("", [name], lang) content
|
||||
Nothing -> throwError $ PandocParseError "Couldn't parse FB2 file: link without required name."
|
||||
|
||||
parseNamedStyleChild :: PandocMonad m => Content -> FB2 m Inlines
|
||||
parseNamedStyleChild (Elem e) =
|
||||
case qName (elName e) of
|
||||
"strong" -> strong <$> parseStyleType e
|
||||
"emphasis" -> emph <$> parseStyleType e
|
||||
"style" -> parseNamedStyle e
|
||||
"a" -> parseLinkType e
|
||||
"strikethrough" -> strikeout <$> parseStyleType e
|
||||
"sub" -> subscript <$> parseStyleType e
|
||||
"sup" -> superscript <$> parseStyleType e
|
||||
"code" -> pure $ code $ strContent e
|
||||
"image" -> parseInlineImageElement e
|
||||
name -> throwError $ PandocParseError ("Couldn't parse FB2 file: unexpected element " ++ name ++ ".")
|
||||
parseNamedStyleChild x = parseInline x
|
||||
|
||||
-- | Parse @linkType@
|
||||
parseLinkType :: PandocMonad m => Element -> FB2 m Inlines
|
||||
parseLinkType e = do
|
||||
content <- mconcat <$> mapM parseStyleLinkType (elContent e)
|
||||
case findAttr (QName "href" (Just "http://www.w3.org/1999/xlink") Nothing) e of
|
||||
Just href -> pure $ link href "" content
|
||||
Nothing -> throwError $ PandocParseError "Couldn't parse FB2 file: link without required href."
|
||||
|
||||
-- | Parse @styleLinkType@
|
||||
parseStyleLinkType :: PandocMonad m => Content -> FB2 m Inlines
|
||||
parseStyleLinkType x@(Elem e) =
|
||||
case qName (elName e) of
|
||||
"a" -> throwError $ PandocParseError "Couldn't parse FB2 file: links cannot be nested."
|
||||
_ -> parseInline x
|
||||
parseStyleLinkType x = parseInline x
|
||||
|
||||
-- | Parse @tableType@
|
||||
parseTable :: PandocMonad m => Element -> FB2 m Blocks
|
||||
parseTable _ = pure mempty -- TODO: tables are not supported yet
|
||||
|
||||
-- | Parse @title-infoType@
|
||||
parseTitleInfoChild :: PandocMonad m => Element -> FB2 m ()
|
||||
parseTitleInfoChild e =
|
||||
case qName (elName e) of
|
||||
"genre" -> pure ()
|
||||
"author" -> parseAuthor e >>= \author -> modify (\st -> st {fb2Authors = author:fb2Authors st})
|
||||
"book-title" -> modify (setMeta "title" (text $ strContent e))
|
||||
"annotation" -> parseAnnotation e >>= modify . setMeta "abstract"
|
||||
"keywords" -> modify (setMeta "keywords" (map (MetaString . trim) $ splitOn "," $ strContent e))
|
||||
"date" -> modify (setMeta "date" (text $ strContent e))
|
||||
"coverpage" -> parseCoverPage e
|
||||
"lang" -> pure ()
|
||||
"src-lang" -> pure ()
|
||||
"translator" -> pure ()
|
||||
"sequence" -> pure ()
|
||||
name -> throwError $ PandocParseError ("Couldn't parse FB2 file: unexpected element " ++ name ++ " in title-info.")
|
||||
|
||||
parseCoverPage :: PandocMonad m => Element -> FB2 m ()
|
||||
parseCoverPage e =
|
||||
case findChild (QName "image" (Just "http://www.gribuser.ru/xml/fictionbook/2.0") Nothing) e of
|
||||
Just img -> case href of
|
||||
Just src -> modify (setMeta "cover-image" (MetaString $ removeHash src))
|
||||
Nothing -> pure ()
|
||||
where href = findAttr (QName "href" (Just "http://www.w3.org/1999/xlink") Nothing) img
|
||||
Nothing -> pure ()
|
||||
|
||||
-- | Parse @inlineImageType@ element
|
||||
parseInlineImageElement :: PandocMonad m
|
||||
=> Element
|
||||
-> FB2 m Inlines
|
||||
parseInlineImageElement e =
|
||||
case href of
|
||||
Just src -> pure $ imageWith ("", [], []) (removeHash src) "" alt
|
||||
Nothing -> throwError $ PandocParseError "Couldn't parse FB2 file: inline image without href."
|
||||
where alt = maybe mempty str $ findAttr (QName "alt" Nothing Nothing) e
|
||||
href = findAttr (QName "href" (Just "http://www.w3.org/1999/xlink") Nothing) e
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE FlexibleContexts #-}
|
||||
{-# LANGUAGE FlexibleInstances #-}
|
||||
{-# LANGUAGE MultiParamTypeClasses #-}
|
||||
|
@ -42,6 +43,7 @@ module Text.Pandoc.Readers.HTML ( readHtml
|
|||
, isCommentTag
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Control.Applicative ((<|>))
|
||||
import Control.Arrow (first)
|
||||
import Control.Monad (guard, mplus, msum, mzero, unless, void)
|
||||
|
@ -54,7 +56,7 @@ import Data.List (isPrefixOf)
|
|||
import Data.List.Split (wordsBy, splitWhen)
|
||||
import qualified Data.Map as M
|
||||
import Data.Maybe (fromMaybe, isJust, isNothing)
|
||||
import Data.Monoid (First (..), (<>))
|
||||
import Data.Monoid (First (..))
|
||||
import qualified Data.Set as Set
|
||||
import Data.Text (Text)
|
||||
import qualified Data.Text as T
|
||||
|
@ -508,14 +510,16 @@ pTable = try $ do
|
|||
[Plain _] -> True
|
||||
_ -> False
|
||||
let isSimple = all isSinglePlain $ concat (head':rows''')
|
||||
let cols = length $ if null head' then head rows''' else head'
|
||||
let cols = if null head'
|
||||
then maximum (map length rows''')
|
||||
else length head'
|
||||
-- add empty cells to short rows
|
||||
let addEmpties r = case cols - length r of
|
||||
n | n > 0 -> r <> replicate n mempty
|
||||
| otherwise -> r
|
||||
let rows = map addEmpties rows'''
|
||||
let aligns = case rows'' of
|
||||
(cs:_) -> map fst cs
|
||||
(cs:_) -> take cols $ map fst cs ++ repeat AlignDefault
|
||||
_ -> replicate cols AlignDefault
|
||||
let widths = if null widths'
|
||||
then if isSimple
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE CPP #-}
|
||||
{- |
|
||||
Module : Text.Pandoc.Readers.Haddock
|
||||
|
@ -14,13 +15,13 @@ module Text.Pandoc.Readers.Haddock
|
|||
( readHaddock
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Control.Monad.Except (throwError)
|
||||
import Data.List (intersperse, stripPrefix)
|
||||
import Data.Maybe (fromMaybe)
|
||||
import Data.Monoid ((<>))
|
||||
import Data.Text (Text, unpack)
|
||||
import Documentation.Haddock.Parser
|
||||
import Documentation.Haddock.Types
|
||||
import Documentation.Haddock.Types as H
|
||||
import Text.Pandoc.Builder (Blocks, Inlines)
|
||||
import qualified Text.Pandoc.Builder as B
|
||||
import Text.Pandoc.Class (PandocMonad)
|
||||
|
@ -86,6 +87,20 @@ docHToBlocks d' =
|
|||
DocProperty s -> B.codeBlockWith ("",["property","haskell"],[]) (trim s)
|
||||
DocExamples es -> mconcat $ map (\e ->
|
||||
makeExample ">>>" (exampleExpression e) (exampleResult e)) es
|
||||
#if MIN_VERSION_haddock_library(1,5,0)
|
||||
DocTable H.Table{ tableHeaderRows = headerRows
|
||||
, tableBodyRows = bodyRows
|
||||
}
|
||||
-> let toCells = map (docHToBlocks . tableCellContents) . tableRowCells
|
||||
(header, body) =
|
||||
if null headerRows
|
||||
then ([], map toCells bodyRows)
|
||||
else (toCells (head headerRows),
|
||||
map toCells (tail headerRows ++ bodyRows))
|
||||
colspecs = replicate (maximum (map length body))
|
||||
(AlignDefault, 0.0)
|
||||
in B.table mempty colspecs header body
|
||||
#endif
|
||||
|
||||
where inlineFallback = B.plain $ docHToInlines False d'
|
||||
consolidatePlains = B.fromList . consolidatePlains' . B.toList
|
||||
|
@ -134,6 +149,9 @@ docHToInlines isCode d' =
|
|||
DocAName s -> B.spanWith (s,["anchor"],[]) mempty
|
||||
DocProperty _ -> mempty
|
||||
DocExamples _ -> mempty
|
||||
#if MIN_VERSION_haddock_library(1,5,0)
|
||||
DocTable _ -> mempty
|
||||
#endif
|
||||
|
||||
-- | Create an 'Example', stripping superfluous characters as appropriate
|
||||
makeExample :: String -> String -> [String] -> Blocks
|
||||
|
|
|
@ -1,5 +1,37 @@
|
|||
{-# LANGUAGE ExplicitForAll, TupleSections #-}
|
||||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE TupleSections #-}
|
||||
{-
|
||||
Copyright (C) 2017-2018 Hamish Mackenzie
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
-}
|
||||
|
||||
{- |
|
||||
Module : Text.Pandoc.Readers.JATS
|
||||
Copyright : Copyright (C) 2017-2018 Hamish Mackenzie
|
||||
License : GNU GPL, version 2 or above
|
||||
|
||||
Maintainer : John MacFarlane <jgm@berkeley.edu>
|
||||
Stability : alpha
|
||||
Portability : portable
|
||||
|
||||
Conversion of JATS XML to 'Pandoc' document.
|
||||
-}
|
||||
|
||||
module Text.Pandoc.Readers.JATS ( readJATS ) where
|
||||
import Prelude
|
||||
import Control.Monad.State.Strict
|
||||
import Data.Char (isDigit, isSpace, toUpper)
|
||||
import Data.Default
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE FlexibleInstances #-}
|
||||
{-# LANGUAGE MultiParamTypeClasses #-}
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
|
@ -42,11 +43,12 @@ module Text.Pandoc.Readers.LaTeX ( readLaTeX,
|
|||
untokenize
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Control.Applicative (many, optional, (<|>))
|
||||
import Control.Monad
|
||||
import Control.Monad.Except (throwError)
|
||||
import Control.Monad.Trans (lift)
|
||||
import Data.Char (chr, isAlphaNum, isDigit, isLetter, ord, toLower)
|
||||
import Data.Char (chr, isAlphaNum, isDigit, isLetter, ord, toLower, toUpper)
|
||||
import Data.Default
|
||||
import Data.List (intercalate, isPrefixOf)
|
||||
import qualified Data.Map as M
|
||||
|
@ -60,7 +62,7 @@ import Text.Pandoc.BCP47 (Lang (..), renderLang)
|
|||
import Text.Pandoc.Builder
|
||||
import Text.Pandoc.Class (PandocMonad, PandocPure, getResourcePath, lookupEnv,
|
||||
readFileFromDirs, report, setResourcePath,
|
||||
setTranslations, translateTerm)
|
||||
setTranslations, translateTerm, trace)
|
||||
import Text.Pandoc.Error (PandocError (PandocMacroLoop, PandocParseError, PandocParsecError))
|
||||
import Text.Pandoc.Highlighting (fromListingsLanguage, languagesByExtension)
|
||||
import Text.Pandoc.ImageSize (numUnit, showFl)
|
||||
|
@ -74,6 +76,7 @@ import Text.Pandoc.Shared
|
|||
import qualified Text.Pandoc.Translations as Translations
|
||||
import Text.Pandoc.Walk
|
||||
import Text.Parsec.Pos
|
||||
import qualified Text.Pandoc.Builder as B
|
||||
|
||||
-- for debugging:
|
||||
-- import Text.Pandoc.Extensions (getDefaultExtensions)
|
||||
|
@ -161,6 +164,7 @@ data LaTeXState = LaTeXState{ sOptions :: ReaderOptions
|
|||
, sInTableCell :: Bool
|
||||
, sLastHeaderNum :: HeaderNum
|
||||
, sLabels :: M.Map String [Inline]
|
||||
, sHasChapters :: Bool
|
||||
, sToggles :: M.Map String Bool
|
||||
}
|
||||
deriving Show
|
||||
|
@ -180,6 +184,7 @@ defaultLaTeXState = LaTeXState{ sOptions = def
|
|||
, sInTableCell = False
|
||||
, sLastHeaderNum = HeaderNum []
|
||||
, sLabels = M.empty
|
||||
, sHasChapters = False
|
||||
, sToggles = M.empty
|
||||
}
|
||||
|
||||
|
@ -237,21 +242,30 @@ withVerbatimMode parser = do
|
|||
return result
|
||||
|
||||
rawLaTeXParser :: (PandocMonad m, HasMacros s, HasReaderOptions s)
|
||||
=> LP m a -> ParserT String s m (a, String)
|
||||
rawLaTeXParser parser = do
|
||||
=> LP m a -> LP m a -> ParserT String s m (a, String)
|
||||
rawLaTeXParser parser valParser = do
|
||||
inp <- getInput
|
||||
let toks = tokenize "source" $ T.pack inp
|
||||
pstate <- getState
|
||||
let lstate = def{ sOptions = extractReaderOptions pstate
|
||||
, sMacros = extractMacros pstate }
|
||||
let rawparser = (,) <$> withRaw parser <*> getState
|
||||
res <- lift $ runParserT rawparser lstate "chunk" toks
|
||||
case res of
|
||||
let lstate = def{ sOptions = extractReaderOptions pstate }
|
||||
let lstate' = lstate { sMacros = extractMacros pstate }
|
||||
let rawparser = (,) <$> withRaw valParser <*> getState
|
||||
res' <- lift $ runParserT (snd <$> withRaw parser) lstate "chunk" toks
|
||||
case res' of
|
||||
Left _ -> mzero
|
||||
Right ((val, raw), st) -> do
|
||||
updateState (updateMacros (sMacros st <>))
|
||||
rawstring <- takeP (T.length (untokenize raw))
|
||||
return (val, rawstring)
|
||||
Right toks' -> do
|
||||
res <- lift $ runParserT (do doMacros 0
|
||||
-- retokenize, applying macros
|
||||
ts <- many (satisfyTok (const True))
|
||||
setInput ts
|
||||
rawparser)
|
||||
lstate' "chunk" toks'
|
||||
case res of
|
||||
Left _ -> mzero
|
||||
Right ((val, raw), st) -> do
|
||||
updateState (updateMacros (sMacros st <>))
|
||||
_ <- takeP (T.length (untokenize toks'))
|
||||
return (val, T.unpack (untokenize raw))
|
||||
|
||||
applyMacros :: (PandocMonad m, HasMacros s, HasReaderOptions s)
|
||||
=> String -> ParserT String s m String
|
||||
|
@ -272,19 +286,18 @@ rawLaTeXBlock = do
|
|||
lookAhead (try (char '\\' >> letter))
|
||||
-- we don't want to apply newly defined latex macros to their own
|
||||
-- definitions:
|
||||
snd <$> rawLaTeXParser macroDef
|
||||
<|> ((snd <$> rawLaTeXParser (environment <|> blockCommand)) >>= applyMacros)
|
||||
snd <$> rawLaTeXParser (environment <|> macroDef <|> blockCommand) blocks
|
||||
|
||||
rawLaTeXInline :: (PandocMonad m, HasMacros s, HasReaderOptions s)
|
||||
=> ParserT String s m String
|
||||
rawLaTeXInline = do
|
||||
lookAhead (try (char '\\' >> letter))
|
||||
rawLaTeXParser (inlineEnvironment <|> inlineCommand') >>= applyMacros . snd
|
||||
snd <$> rawLaTeXParser (inlineEnvironment <|> inlineCommand') inlines
|
||||
|
||||
inlineCommand :: PandocMonad m => ParserT String ParserState m Inlines
|
||||
inlineCommand = do
|
||||
lookAhead (try (char '\\' >> letter))
|
||||
fst <$> rawLaTeXParser (inlineEnvironment <|> inlineCommand')
|
||||
fst <$> rawLaTeXParser (inlineEnvironment <|> inlineCommand') inlines
|
||||
|
||||
tokenize :: SourceName -> Text -> [Tok]
|
||||
tokenize sourcename = totoks (initialPos sourcename)
|
||||
|
@ -665,7 +678,7 @@ dosiunitx = do
|
|||
skipopts
|
||||
value <- tok
|
||||
valueprefix <- option "" $ bracketed tok
|
||||
unit <- tok
|
||||
unit <- inlineCommand' <|> tok
|
||||
let emptyOr160 "" = ""
|
||||
emptyOr160 _ = "\160"
|
||||
return . mconcat $ [valueprefix,
|
||||
|
@ -674,6 +687,12 @@ dosiunitx = do
|
|||
emptyOr160 unit,
|
||||
unit]
|
||||
|
||||
-- siunitx's \square command
|
||||
dosquare :: PandocMonad m => LP m Inlines
|
||||
dosquare = do
|
||||
unit <- inlineCommand' <|> tok
|
||||
return . mconcat $ [unit, "\178"]
|
||||
|
||||
lit :: String -> LP m Inlines
|
||||
lit = pure . str
|
||||
|
||||
|
@ -1034,13 +1053,28 @@ dollarsMath :: PandocMonad m => LP m Inlines
|
|||
dollarsMath = do
|
||||
symbol '$'
|
||||
display <- option False (True <$ symbol '$')
|
||||
contents <- trim . toksToString <$>
|
||||
many (notFollowedBy (symbol '$') >> anyTok)
|
||||
if display
|
||||
then
|
||||
mathDisplay contents <$ try (symbol '$' >> symbol '$')
|
||||
<|> (guard (null contents) >> return (mathInline ""))
|
||||
else mathInline contents <$ symbol '$'
|
||||
(do contents <- try $ T.unpack <$> pDollarsMath 0
|
||||
if display
|
||||
then (mathDisplay contents <$ symbol '$')
|
||||
else return $ mathInline contents)
|
||||
<|> (guard display >> return (mathInline ""))
|
||||
|
||||
-- Int is number of embedded groupings
|
||||
pDollarsMath :: PandocMonad m => Int -> LP m Text
|
||||
pDollarsMath n = do
|
||||
Tok _ toktype t <- anyTok
|
||||
case toktype of
|
||||
Symbol | t == "$"
|
||||
, n == 0 -> return mempty
|
||||
| t == "\\" -> do
|
||||
Tok _ _ t' <- anyTok
|
||||
return (t <> t')
|
||||
| t == "{" -> (t <>) <$> pDollarsMath (n+1)
|
||||
| t == "}" ->
|
||||
if n > 0
|
||||
then (t <>) <$> pDollarsMath (n-1)
|
||||
else mzero
|
||||
_ -> (t <>) <$> pDollarsMath n
|
||||
|
||||
-- citations
|
||||
|
||||
|
@ -1161,7 +1195,7 @@ singleChar = try $ do
|
|||
else return $ Tok pos toktype t
|
||||
|
||||
opt :: PandocMonad m => LP m Inlines
|
||||
opt = bracketed inline
|
||||
opt = bracketed inline <|> (str . T.unpack <$> rawopt)
|
||||
|
||||
rawopt :: PandocMonad m => LP m Text
|
||||
rawopt = do
|
||||
|
@ -1304,6 +1338,12 @@ inlineCommands = M.union inlineLanguageCommands $ M.fromList
|
|||
, ("slshape", extractSpaces emph <$> inlines)
|
||||
, ("scshape", extractSpaces smallcaps <$> inlines)
|
||||
, ("bfseries", extractSpaces strong <$> inlines)
|
||||
, ("MakeUppercase", makeUppercase <$> tok)
|
||||
, ("MakeTextUppercase", makeUppercase <$> tok) -- textcase
|
||||
, ("uppercase", makeUppercase <$> tok)
|
||||
, ("MakeLowercase", makeLowercase <$> tok)
|
||||
, ("MakeTextLowercase", makeLowercase <$> tok)
|
||||
, ("lowercase", makeLowercase <$> tok)
|
||||
, ("/", pure mempty) -- italic correction
|
||||
, ("aa", lit "å")
|
||||
, ("AA", lit "Å")
|
||||
|
@ -1467,6 +1507,13 @@ inlineCommands = M.union inlineLanguageCommands $ M.fromList
|
|||
, ("acsp", doAcronymPlural "abbrv")
|
||||
-- siuntix
|
||||
, ("SI", dosiunitx)
|
||||
-- units of siuntix
|
||||
, ("celsius", lit "°C")
|
||||
, ("degreeCelsius", lit "°C")
|
||||
, ("gram", lit "g")
|
||||
, ("meter", lit "m")
|
||||
, ("milli", lit "m")
|
||||
, ("square", dosquare)
|
||||
-- hyphenat
|
||||
, ("bshyp", lit "\\\173")
|
||||
, ("fshyp", lit "/\173")
|
||||
|
@ -1497,6 +1544,16 @@ inlineCommands = M.union inlineLanguageCommands $ M.fromList
|
|||
, ("foreignlanguage", foreignlanguage)
|
||||
]
|
||||
|
||||
makeUppercase :: Inlines -> Inlines
|
||||
makeUppercase = fromList . walk (alterStr (map toUpper)) . toList
|
||||
|
||||
makeLowercase :: Inlines -> Inlines
|
||||
makeLowercase = fromList . walk (alterStr (map toLower)) . toList
|
||||
|
||||
alterStr :: (String -> String) -> Inline -> Inline
|
||||
alterStr f (Str xs) = Str (f xs)
|
||||
alterStr _ x = x
|
||||
|
||||
foreignlanguage :: PandocMonad m => LP m Inlines
|
||||
foreignlanguage = do
|
||||
babelLang <- T.unpack . untokenize <$> braced
|
||||
|
@ -1669,6 +1726,9 @@ treatAsBlock = Set.fromList
|
|||
, "clearpage"
|
||||
, "pagebreak"
|
||||
, "titleformat"
|
||||
, "listoffigures"
|
||||
, "listoftables"
|
||||
, "write"
|
||||
]
|
||||
|
||||
isInlineCommand :: Text -> Bool
|
||||
|
@ -1968,9 +2028,13 @@ section starred (ident, classes, kvs) lvl = do
|
|||
try (spaces >> controlSeq "label"
|
||||
>> spaces >> toksToString <$> braced)
|
||||
let classes' = if starred then "unnumbered" : classes else classes
|
||||
when (lvl == 0) $
|
||||
updateState $ \st -> st{ sHasChapters = True }
|
||||
unless starred $ do
|
||||
hn <- sLastHeaderNum <$> getState
|
||||
let num = incrementHeaderNum lvl hn
|
||||
hasChapters <- sHasChapters <$> getState
|
||||
let lvl' = lvl + if hasChapters then 1 else 0
|
||||
let num = incrementHeaderNum lvl' hn
|
||||
updateState $ \st -> st{ sLastHeaderNum = num }
|
||||
updateState $ \st -> st{ sLabels = M.insert lab
|
||||
[Str (renderHeaderNum num)]
|
||||
|
@ -2095,6 +2159,7 @@ environments :: PandocMonad m => M.Map Text (LP m Blocks)
|
|||
environments = M.fromList
|
||||
[ ("document", env "document" blocks)
|
||||
, ("abstract", mempty <$ (env "abstract" blocks >>= addMeta "abstract"))
|
||||
, ("sloppypar", env "sloppypar" $ blocks)
|
||||
, ("letter", env "letter" letterContents)
|
||||
, ("minipage", env "minipage" $
|
||||
skipopts *> spaces *> optional braced *> spaces *> blocks)
|
||||
|
@ -2126,19 +2191,6 @@ environments = M.fromList
|
|||
codeBlockWith attr <$> verbEnv "lstlisting")
|
||||
, ("minted", minted)
|
||||
, ("obeylines", obeylines)
|
||||
, ("displaymath", mathEnvWith para Nothing "displaymath")
|
||||
, ("equation", mathEnvWith para Nothing "equation")
|
||||
, ("equation*", mathEnvWith para Nothing "equation*")
|
||||
, ("gather", mathEnvWith para (Just "gathered") "gather")
|
||||
, ("gather*", mathEnvWith para (Just "gathered") "gather*")
|
||||
, ("multline", mathEnvWith para (Just "gathered") "multline")
|
||||
, ("multline*", mathEnvWith para (Just "gathered") "multline*")
|
||||
, ("eqnarray", mathEnvWith para (Just "aligned") "eqnarray")
|
||||
, ("eqnarray*", mathEnvWith para (Just "aligned") "eqnarray*")
|
||||
, ("align", mathEnvWith para (Just "aligned") "align")
|
||||
, ("align*", mathEnvWith para (Just "aligned") "align*")
|
||||
, ("alignat", mathEnvWith para (Just "aligned") "alignat")
|
||||
, ("alignat*", mathEnvWith para (Just "aligned") "alignat*")
|
||||
, ("tikzpicture", rawVerbEnv "tikzpicture")
|
||||
-- etoolbox
|
||||
, ("ifstrequal", ifstrequal)
|
||||
|
@ -2149,11 +2201,14 @@ environments = M.fromList
|
|||
]
|
||||
|
||||
environment :: PandocMonad m => LP m Blocks
|
||||
environment = do
|
||||
environment = try $ do
|
||||
controlSeq "begin"
|
||||
name <- untokenize <$> braced
|
||||
M.findWithDefault mzero name environments
|
||||
<|> rawEnv name
|
||||
M.findWithDefault mzero name environments <|>
|
||||
if M.member name (inlineEnvironments
|
||||
:: M.Map Text (LP PandocPure Inlines))
|
||||
then mzero
|
||||
else rawEnv name
|
||||
|
||||
env :: PandocMonad m => Text -> LP m a -> LP m a
|
||||
env name p = p <* end_ name
|
||||
|
@ -2532,13 +2587,16 @@ addTableCaption = walkM go
|
|||
|
||||
|
||||
block :: PandocMonad m => LP m Blocks
|
||||
block = (mempty <$ spaces1)
|
||||
block = do
|
||||
res <- (mempty <$ spaces1)
|
||||
<|> environment
|
||||
<|> include
|
||||
<|> macroDef
|
||||
<|> blockCommand
|
||||
<|> paragraph
|
||||
<|> grouped block
|
||||
trace (take 60 $ show $ B.toList res)
|
||||
return res
|
||||
|
||||
blocks :: PandocMonad m => LP m Blocks
|
||||
blocks = mconcat <$> many block
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2017-2018 John MacFarlane <jgm@berkeley.edu>
|
||||
|
||||
|
@ -34,6 +35,7 @@ module Text.Pandoc.Readers.LaTeX.Types ( Tok(..)
|
|||
, SourcePos
|
||||
)
|
||||
where
|
||||
import Prelude
|
||||
import Data.Text (Text)
|
||||
import Text.Parsec.Pos (SourcePos)
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE RelaxedPolyRec #-}
|
||||
{-# LANGUAGE ScopedTypeVariables #-}
|
||||
|
||||
|
@ -32,6 +33,7 @@ Conversion of markdown-formatted plain text to 'Pandoc' document.
|
|||
-}
|
||||
module Text.Pandoc.Readers.Markdown ( readMarkdown ) where
|
||||
|
||||
import Prelude
|
||||
import Control.Monad
|
||||
import Control.Monad.Except (throwError)
|
||||
import Data.Char (isAlphaNum, isPunctuation, isSpace, toLower)
|
||||
|
@ -39,7 +41,6 @@ import qualified Data.HashMap.Strict as H
|
|||
import Data.List (intercalate, sortBy, transpose, elemIndex)
|
||||
import qualified Data.Map as M
|
||||
import Data.Maybe
|
||||
import Data.Monoid ((<>))
|
||||
import Data.Ord (comparing)
|
||||
import Data.Scientific (base10Exponent, coefficient)
|
||||
import qualified Data.Set as Set
|
||||
|
@ -162,7 +163,7 @@ inlinesInBalancedBrackets =
|
|||
stripBracket xs = if last xs == ']' then init xs else xs
|
||||
go :: PandocMonad m => Int -> MarkdownParser m ()
|
||||
go 0 = return ()
|
||||
go openBrackets =
|
||||
go openBrackets =
|
||||
(() <$ (escapedChar <|>
|
||||
code <|>
|
||||
rawHtmlInline <|>
|
||||
|
@ -673,6 +674,8 @@ keyValAttr = try $ do
|
|||
char '='
|
||||
val <- enclosed (char '"') (char '"') litChar
|
||||
<|> enclosed (char '\'') (char '\'') litChar
|
||||
<|> ("" <$ try (string "\"\""))
|
||||
<|> ("" <$ try (string "''"))
|
||||
<|> many (escapedChar' <|> noneOf " \t\n\r}")
|
||||
return $ \(id',cs,kvs) ->
|
||||
case key of
|
||||
|
@ -909,6 +912,17 @@ listContinuation continuationIndent = try $ do
|
|||
blanks <- many blankline
|
||||
return $ concat (x:xs) ++ blanks
|
||||
|
||||
-- Variant of blanklines that doesn't require blank lines
|
||||
-- before a fence or eof.
|
||||
blanklines' :: PandocMonad m => MarkdownParser m [Char]
|
||||
blanklines' = blanklines <|> try checkDivCloser
|
||||
where checkDivCloser = do
|
||||
guardEnabled Ext_fenced_divs
|
||||
divLevel <- stateFencedDivLevel <$> getState
|
||||
guard (divLevel >= 1)
|
||||
lookAhead divFenceEnd
|
||||
return ""
|
||||
|
||||
notFollowedByDivCloser :: PandocMonad m => MarkdownParser m ()
|
||||
notFollowedByDivCloser =
|
||||
guardDisabled Ext_fenced_divs <|>
|
||||
|
@ -1250,7 +1264,7 @@ alignType strLst len =
|
|||
|
||||
-- Parse a table footer - dashed lines followed by blank line.
|
||||
tableFooter :: PandocMonad m => MarkdownParser m String
|
||||
tableFooter = try $ skipNonindentSpaces >> many1 (dashedLine '-') >> blanklines
|
||||
tableFooter = try $ skipNonindentSpaces >> many1 (dashedLine '-') >> blanklines'
|
||||
|
||||
-- Parse a table separator - dashed line.
|
||||
tableSep :: PandocMonad m => MarkdownParser m Char
|
||||
|
@ -1261,7 +1275,7 @@ rawTableLine :: PandocMonad m
|
|||
=> [Int]
|
||||
-> MarkdownParser m [String]
|
||||
rawTableLine indices = do
|
||||
notFollowedBy' (blanklines <|> tableFooter)
|
||||
notFollowedBy' (blanklines' <|> tableFooter)
|
||||
line <- many1Till anyChar newline
|
||||
return $ map trim $ tail $
|
||||
splitStringByIndices (init indices) line
|
||||
|
@ -1299,7 +1313,7 @@ simpleTable headless = do
|
|||
(aligns, _widths, heads', lines') <-
|
||||
tableWith (simpleTableHeader headless) tableLine
|
||||
(return ())
|
||||
(if headless then tableFooter else tableFooter <|> blanklines)
|
||||
(if headless then tableFooter else tableFooter <|> blanklines')
|
||||
-- Simple tables get 0s for relative column widths (i.e., use default)
|
||||
return (aligns, replicate (length aligns) 0, heads', lines')
|
||||
|
||||
|
@ -1327,11 +1341,16 @@ multilineTableHeader headless = try $ do
|
|||
newline
|
||||
let (lengths, lines') = unzip dashes
|
||||
let indices = scanl (+) (length initSp) lines'
|
||||
-- compensate for the fact that intercolumn spaces are
|
||||
-- not included in the last index:
|
||||
let indices' = case reverse indices of
|
||||
[] -> []
|
||||
(x:xs) -> reverse (x+1:xs)
|
||||
rawHeadsList <- if headless
|
||||
then fmap (map (:[]) . tail .
|
||||
splitStringByIndices (init indices)) $ lookAhead anyLine
|
||||
splitStringByIndices (init indices')) $ lookAhead anyLine
|
||||
else return $ transpose $ map
|
||||
(tail . splitStringByIndices (init indices))
|
||||
(tail . splitStringByIndices (init indices'))
|
||||
rawContent
|
||||
let aligns = zipWith alignType rawHeadsList lengths
|
||||
let rawHeads = if headless
|
||||
|
@ -1339,7 +1358,7 @@ multilineTableHeader headless = try $ do
|
|||
else map (unlines . map trim) rawHeadsList
|
||||
heads <- fmap sequence $
|
||||
mapM ((parseFromString' (mconcat <$> many plain)).trim) rawHeads
|
||||
return (heads, aligns, indices)
|
||||
return (heads, aligns, indices')
|
||||
|
||||
-- Parse a grid table: starts with row of '-' on top, then header
|
||||
-- (which may be grid), then the rows,
|
||||
|
@ -2145,7 +2164,6 @@ singleQuoted = try $ do
|
|||
doubleQuoted :: PandocMonad m => MarkdownParser m (F Inlines)
|
||||
doubleQuoted = try $ do
|
||||
doubleQuoteStart
|
||||
contents <- mconcat <$> many (try $ notFollowedBy doubleQuoteEnd >> inline)
|
||||
withQuoteContext InDoubleQuote (doubleQuoteEnd >> return
|
||||
(fmap B.doubleQuoted . trimInlinesF $ contents))
|
||||
<|> return (return (B.str "\8220") <> contents)
|
||||
withQuoteContext InDoubleQuote $
|
||||
fmap B.doubleQuoted . trimInlinesF . mconcat <$>
|
||||
many1Till inline doubleQuoteEnd
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
{-# LANGUAGE FlexibleInstances #-}
|
||||
{-# LANGUAGE RelaxedPolyRec #-}
|
||||
{-# LANGUAGE TypeSynonymInstances #-}
|
||||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE RelaxedPolyRec #-}
|
||||
-- RelaxedPolyRec needed for inlinesBetween on GHC < 7
|
||||
{-
|
||||
Copyright (C) 2012-2018 John MacFarlane <jgm@berkeley.edu>
|
||||
|
@ -38,6 +37,7 @@ _ parse templates?
|
|||
-}
|
||||
module Text.Pandoc.Readers.MediaWiki ( readMediaWiki ) where
|
||||
|
||||
import Prelude
|
||||
import Control.Monad
|
||||
import Control.Monad.Except (throwError)
|
||||
import Data.Char (isDigit, isSpace)
|
||||
|
@ -45,7 +45,6 @@ import qualified Data.Foldable as F
|
|||
import Data.List (intercalate, intersperse, isPrefixOf)
|
||||
import qualified Data.Map as M
|
||||
import Data.Maybe (fromMaybe, maybeToList)
|
||||
import Data.Monoid ((<>))
|
||||
import Data.Sequence (ViewL (..), viewl, (<|))
|
||||
import qualified Data.Set as Set
|
||||
import Data.Text (Text, unpack)
|
||||
|
@ -231,7 +230,8 @@ para = do
|
|||
table :: PandocMonad m => MWParser m Blocks
|
||||
table = do
|
||||
tableStart
|
||||
styles <- option [] parseAttrs
|
||||
styles <- option [] $
|
||||
parseAttrs <* skipMany spaceChar <* optional (char '|')
|
||||
skipMany spaceChar
|
||||
optional blanklines
|
||||
let tableWidth = case lookup "width" styles of
|
||||
|
@ -282,17 +282,29 @@ rowsep = try $ guardColumnOne *> skipSpaces *> sym "|-" <*
|
|||
|
||||
cellsep :: PandocMonad m => MWParser m ()
|
||||
cellsep = try $ do
|
||||
col <- sourceColumn <$> getPosition
|
||||
skipSpaces
|
||||
(char '|' *> notFollowedBy (oneOf "-}+") *> optional (char '|'))
|
||||
<|> (char '!' *> optional (char '!'))
|
||||
let pipeSep = do
|
||||
char '|'
|
||||
notFollowedBy (oneOf "-}+")
|
||||
if col == 1
|
||||
then optional (char '|')
|
||||
else void (char '|')
|
||||
let exclSep = do
|
||||
char '!'
|
||||
if col == 1
|
||||
then optional (char '!')
|
||||
else void (char '!')
|
||||
pipeSep <|> exclSep
|
||||
|
||||
tableCaption :: PandocMonad m => MWParser m Inlines
|
||||
tableCaption = try $ do
|
||||
guardColumnOne
|
||||
skipSpaces
|
||||
sym "|+"
|
||||
optional (try $ parseAttr *> skipSpaces *> char '|' *> skipSpaces)
|
||||
(trimInlines . mconcat) <$> many (notFollowedBy (cellsep <|> rowsep) *> inline)
|
||||
optional (try $ parseAttr *> skipSpaces *> char '|' *> blanklines)
|
||||
(trimInlines . mconcat) <$>
|
||||
many (notFollowedBy (cellsep <|> rowsep) *> inline)
|
||||
|
||||
tableRow :: PandocMonad m => MWParser m [((Alignment, Double), Blocks)]
|
||||
tableRow = try $ skipMany htmlComment *> many tableCell
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2011-2018 John MacFarlane <jgm@berkeley.edu>
|
||||
|
||||
|
@ -30,6 +31,7 @@ Conversion of a string representation of a pandoc type (@Pandoc@,
|
|||
-}
|
||||
module Text.Pandoc.Readers.Native ( readNative ) where
|
||||
|
||||
import Prelude
|
||||
import Text.Pandoc.Definition
|
||||
import Text.Pandoc.Options (ReaderOptions)
|
||||
import Text.Pandoc.Shared (safeRead)
|
||||
|
|
|
@ -1,5 +1,36 @@
|
|||
{-# LANGUAGE FlexibleContexts #-}
|
||||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2013-2018 John MacFarlane <jgm@berkeley.edu>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
-}
|
||||
|
||||
{- |
|
||||
Module : Text.Pandoc.Readers.OPML
|
||||
Copyright : Copyright (C) 2013-2018 John MacFarlane
|
||||
License : GNU GPL, version 2 or above
|
||||
|
||||
Maintainer : John MacFarlane <jgm@berkeley.edu>
|
||||
Stability : alpha
|
||||
Portability : portable
|
||||
|
||||
Conversion of OPML to 'Pandoc' document.
|
||||
-}
|
||||
|
||||
module Text.Pandoc.Readers.OPML ( readOPML ) where
|
||||
import Prelude
|
||||
import Control.Monad.State.Strict
|
||||
import Data.Char (toUpper)
|
||||
import Data.Default
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE PatternGuards #-}
|
||||
|
||||
{-
|
||||
|
@ -32,6 +33,7 @@ Entry point to the odt reader.
|
|||
|
||||
module Text.Pandoc.Readers.Odt ( readOdt ) where
|
||||
|
||||
import Prelude
|
||||
import Codec.Archive.Zip
|
||||
import qualified Text.XML.Light as XML
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
|
||||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE FlexibleInstances #-}
|
||||
{-# LANGUAGE TupleSections #-}
|
||||
{-
|
||||
|
@ -38,15 +38,13 @@ faster and easier to implement this way.
|
|||
|
||||
module Text.Pandoc.Readers.Odt.Arrows.State where
|
||||
|
||||
import Prelude
|
||||
import Prelude hiding (foldl, foldr)
|
||||
|
||||
import Control.Arrow
|
||||
import qualified Control.Category as Cat
|
||||
import Control.Monad
|
||||
|
||||
import Data.Foldable
|
||||
import Data.Monoid
|
||||
|
||||
import Text.Pandoc.Readers.Odt.Arrows.Utils
|
||||
import Text.Pandoc.Readers.Odt.Generic.Fallible
|
||||
|
||||
|
@ -131,7 +129,7 @@ withSubStateF' unlift a = ArrowState go
|
|||
-- and one with any function.
|
||||
foldS :: (Foldable f, Monoid m) => ArrowState s x m -> ArrowState s (f x) m
|
||||
foldS a = ArrowState $ \(s,f) -> foldr a' (s,mempty) f
|
||||
where a' x (s',m) = second (m <>) $ runArrowState a (s',x)
|
||||
where a' x (s',m) = second (mappend m) $ runArrowState a (s',x)
|
||||
|
||||
-- | Fold a state arrow through something 'Foldable'. Collect the results in a
|
||||
-- 'MonadPlus'.
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2015 Martin Linnemann <theCodingMarlin@googlemail.com>
|
||||
|
||||
|
@ -39,6 +40,7 @@ with an equivalent return value.
|
|||
-- We export everything
|
||||
module Text.Pandoc.Readers.Odt.Arrows.Utils where
|
||||
|
||||
import Prelude
|
||||
import Control.Arrow
|
||||
import Control.Monad (join)
|
||||
|
||||
|
@ -61,13 +63,13 @@ and6 :: (Arrow a)
|
|||
=> a b c0->a b c1->a b c2->a b c3->a b c4->a b c5
|
||||
-> a b (c0,c1,c2,c3,c4,c5 )
|
||||
|
||||
and3 a b c = (and2 a b ) &&& c
|
||||
and3 a b c = and2 a b &&& c
|
||||
>>^ \((z,y ) , x) -> (z,y,x )
|
||||
and4 a b c d = (and3 a b c ) &&& d
|
||||
and4 a b c d = and3 a b c &&& d
|
||||
>>^ \((z,y,x ) , w) -> (z,y,x,w )
|
||||
and5 a b c d e = (and4 a b c d ) &&& e
|
||||
and5 a b c d e = and4 a b c d &&& e
|
||||
>>^ \((z,y,x,w ) , v) -> (z,y,x,w,v )
|
||||
and6 a b c d e f = (and5 a b c d e ) &&& f
|
||||
and6 a b c d e f = and5 a b c d e &&& f
|
||||
>>^ \((z,y,x,w,v ) , u) -> (z,y,x,w,v,u )
|
||||
|
||||
liftA2 :: (Arrow a) => (x -> y -> z) -> a b x -> a b y -> a b z
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
|
||||
|
||||
{-
|
||||
Copyright (C) 2015 Martin Linnemann <theCodingMarlin@googlemail.com>
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE Arrows #-}
|
||||
{-# LANGUAGE PatternGuards #-}
|
||||
{-# LANGUAGE RecordWildCards #-}
|
||||
|
@ -39,6 +40,7 @@ module Text.Pandoc.Readers.Odt.ContentReader
|
|||
, read_body
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Control.Applicative hiding (liftA, liftA2, liftA3)
|
||||
import Control.Arrow
|
||||
|
||||
|
@ -520,7 +522,7 @@ matchingElement :: (Monoid e)
|
|||
matchingElement ns name reader = (ns, name, asResultAccumulator reader)
|
||||
where
|
||||
asResultAccumulator :: (ArrowChoice a, Monoid m) => a m m -> a m (Fallible m)
|
||||
asResultAccumulator a = liftAsSuccess $ keepingTheValue a >>% (<>)
|
||||
asResultAccumulator a = liftAsSuccess $ keepingTheValue a >>% mappend
|
||||
|
||||
--
|
||||
matchChildContent' :: (Monoid result)
|
||||
|
@ -554,7 +556,7 @@ read_plain_text = fst ^&&& read_plain_text' >>% recover
|
|||
read_plain_text' = ( second ( arr extractText )
|
||||
>>^ spreadChoice >>?! second text
|
||||
)
|
||||
>>?% (<>)
|
||||
>>?% mappend
|
||||
--
|
||||
extractText :: XML.Content -> Fallible String
|
||||
extractText (XML.Text cData) = succeedWith (XML.cdData cData)
|
||||
|
@ -565,7 +567,7 @@ read_text_seq = matchingElement NsText "sequence"
|
|||
$ matchChildContent [] read_plain_text
|
||||
|
||||
|
||||
-- specifically. I honor that, although the current implementation of '(<>)'
|
||||
-- specifically. I honor that, although the current implementation of 'mappend'
|
||||
-- for 'Inlines' in "Text.Pandoc.Builder" will collapse them again.
|
||||
-- The rational is to be prepared for future modifications.
|
||||
read_spaces :: InlineMatcher
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
|
||||
|
||||
{-
|
||||
|
@ -38,8 +39,7 @@ compatible instances of "ArrowChoice".
|
|||
|
||||
-- We export everything
|
||||
module Text.Pandoc.Readers.Odt.Generic.Fallible where
|
||||
|
||||
import Data.Monoid ((<>))
|
||||
import Prelude
|
||||
|
||||
-- | Default for now. Will probably become a class at some point.
|
||||
type Failure = ()
|
||||
|
@ -90,7 +90,7 @@ collapseEither (Right (Right x)) = Right x
|
|||
-- (possibly combined) non-error. If both values represent an error, an error
|
||||
-- is returned.
|
||||
chooseMax :: (Monoid a, Monoid b) => Either a b -> Either a b -> Either a b
|
||||
chooseMax = chooseMaxWith (<>)
|
||||
chooseMax = chooseMaxWith mappend
|
||||
|
||||
-- | If either of the values represents a non-error, the result is a
|
||||
-- (possibly combined) non-error. If both values represent an error, an error
|
||||
|
@ -100,7 +100,7 @@ chooseMaxWith :: (Monoid a) => (b -> b -> b)
|
|||
-> Either a b
|
||||
-> Either a b
|
||||
chooseMaxWith (><) (Right a) (Right b) = Right $ a >< b
|
||||
chooseMaxWith _ (Left a) (Left b) = Left $ a <> b
|
||||
chooseMaxWith _ (Left a) (Left b) = Left $ a `mappend` b
|
||||
chooseMaxWith _ (Right a) _ = Right a
|
||||
chooseMaxWith _ _ (Right b) = Right b
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2015 Martin Linnemann <theCodingMarlin@googlemail.com>
|
||||
|
||||
|
@ -31,6 +32,7 @@ typesafe Haskell namespace identifiers and unsafe "real world" namespaces.
|
|||
|
||||
module Text.Pandoc.Readers.Odt.Generic.Namespaces where
|
||||
|
||||
import Prelude
|
||||
import qualified Data.Map as M
|
||||
|
||||
--
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2015 Martin Linnemann <theCodingMarlin@googlemail.com>
|
||||
|
||||
|
@ -30,6 +31,7 @@ A map of values to sets of values.
|
|||
|
||||
module Text.Pandoc.Readers.Odt.Generic.SetMap where
|
||||
|
||||
import Prelude
|
||||
import qualified Data.Map as M
|
||||
import qualified Data.Set as S
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
|
||||
|
||||
{-# LANGUAGE TypeOperators #-}
|
||||
|
@ -51,6 +52,7 @@ module Text.Pandoc.Readers.Odt.Generic.Utils
|
|||
, composition
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Control.Category (Category, (<<<), (>>>))
|
||||
import qualified Control.Category as Cat (id)
|
||||
import Control.Monad (msum)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE Arrows #-}
|
||||
{-# LANGUAGE TupleSections #-}
|
||||
{-# LANGUAGE GADTs #-}
|
||||
|
@ -67,6 +68,7 @@ module Text.Pandoc.Readers.Odt.Generic.XMLConverter
|
|||
, matchContent
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Control.Applicative hiding ( liftA, liftA2 )
|
||||
import Control.Monad ( MonadPlus )
|
||||
import Control.Arrow
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-
|
||||
Copyright (C) 2015 Martin Linnemann <theCodingMarlin@googlemail.com>
|
||||
|
||||
|
@ -31,6 +32,7 @@ Namespaces used in odt files.
|
|||
module Text.Pandoc.Readers.Odt.Namespaces ( Namespace (..)
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Data.List (isPrefixOf)
|
||||
import qualified Data.Map as M (empty, insert)
|
||||
import Data.Maybe (fromMaybe, listToMaybe)
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
{-# LANGUAGE NoImplicitPrelude #-}
|
||||
{-# LANGUAGE CPP #-}
|
||||
{-# LANGUAGE Arrows #-}
|
||||
|
||||
{-# LANGUAGE RecordWildCards #-}
|
||||
{-# LANGUAGE TupleSections #-}
|
||||
|
||||
|
@ -57,6 +58,7 @@ module Text.Pandoc.Readers.Odt.StyleReader
|
|||
, readStylesAt
|
||||
) where
|
||||
|
||||
import Prelude
|
||||
import Control.Applicative hiding (liftA, liftA2, liftA3)
|
||||
import Control.Arrow
|
||||
|
||||
|
@ -80,7 +82,6 @@ import Text.Pandoc.Readers.Odt.Generic.XMLConverter
|
|||
import Text.Pandoc.Readers.Odt.Base
|
||||
import Text.Pandoc.Readers.Odt.Namespaces
|
||||
|
||||
|
||||
readStylesAt :: XML.Element -> Fallible Styles
|
||||
readStylesAt e = runConverter' readAllStyles mempty e
|
||||
|
||||
|
@ -183,13 +184,14 @@ data Styles = Styles
|
|||
deriving ( Show )
|
||||
|
||||
-- Styles from a monoid under union
|
||||
instance Monoid Styles where
|
||||
mempty = Styles M.empty M.empty M.empty
|
||||
mappend (Styles sBn1 dSm1 lsBn1)
|
||||
(Styles sBn2 dSm2 lsBn2)
|
||||
instance Semigroup Styles where
|
||||
(Styles sBn1 dSm1 lsBn1) <> (Styles sBn2 dSm2 lsBn2)
|
||||
= Styles (M.union sBn1 sBn2)
|
||||
(M.union dSm1 dSm2)
|
||||
(M.union lsBn1 lsBn2)
|
||||
instance Monoid Styles where
|
||||
mempty = Styles M.empty M.empty M.empty
|
||||
mappend = (<>)
|
||||
|
||||
-- Not all families from the specifications are implemented, only those we need.
|
||||
-- But there are none that are not mentioned here.
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue