Merge branch 'next' into create-pull-request/patch-1607975269

This commit is contained in:
Vincent Prouillet 2020-12-14 20:54:24 +01:00
commit e1af2c940d
82 changed files with 1849 additions and 662 deletions

View file

@ -1,5 +1,23 @@
# Changelog # Changelog
## 0.13.0 (unreleased)
- Enable HTML minification
- Support `output_dir` in `config.toml`
- Allow sections to be drafted
- Allow specifying default language in filenames
- Render emoji in Markdown content if the `render_emoji` option is enabled
- Enable YouTube privacy mode for the YouTube shortcode
- Add language as class to the `<code>` block
- Add bibtex to `load_data`
- Add a `[markdown]` section to `config.toml` to configure rendering
- Add `highlight_code` and `highlight_theme` to a `[markdown]` section in `config.toml`
- Add `external_links_target_blank`, `external_links_no_follow` and `external_links_no_referrer`
- Add a `smart_punctuation` option in the `[markdown]` section in `config.toml` to turn elements like dots and dashes
into their typographic forms
- Add iteration count variable `nth` for shortcodes to know how many times a shortcode has been invoked in a given
content
## 0.12.2 (2020-09-28) ## 0.12.2 (2020-09-28)
- Fix `zola serve` being broken on reload - Fix `zola serve` being broken on reload

760
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
[package] [package]
name = "zola" name = "zola"
version = "0.12.2" version = "0.13.0"
authors = ["Vincent Prouillet <hello@vincentprouillet.com>"] authors = ["Vincent Prouillet <hello@vincentprouillet.com>"]
edition = "2018" edition = "2018"
license = "MIT" license = "MIT"

View file

@ -0,0 +1,71 @@
use serde_derive::{Deserialize, Serialize};
pub const DEFAULT_HIGHLIGHT_THEME: &str = "base16-ocean-dark";
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(default)]
pub struct Markdown {
/// Whether to highlight all code blocks found in markdown files. Defaults to false
pub highlight_code: bool,
/// Which themes to use for code highlighting. See Readme for supported themes
/// Defaults to "base16-ocean-dark"
pub highlight_theme: String,
/// Whether to render emoji aliases (e.g.: :smile: => 😄) in the markdown files
pub render_emoji: bool,
/// Whether external links are to be opened in a new tab
/// If this is true, a `rel="noopener"` will always automatically be added for security reasons
pub external_links_target_blank: bool,
/// Whether to set rel="nofollow" for all external links
pub external_links_no_follow: bool,
/// Whether to set rel="noreferrer" for all external links
pub external_links_no_referrer: bool,
/// Whether smart punctuation is enabled (changing quotes, dashes, dots etc in their typographic form)
pub smart_punctuation: bool,
}
impl Markdown {
pub fn has_external_link_tweaks(&self) -> bool {
self.external_links_target_blank
|| self.external_links_no_follow
|| self.external_links_no_referrer
}
pub fn construct_external_link_tag(&self, url: &str, title: &str) -> String {
let mut rel_opts = Vec::new();
let mut target = "".to_owned();
let title = if title == "" { "".to_owned() } else { format!("title=\"{}\" ", title) };
if self.external_links_target_blank {
// Security risk otherwise
rel_opts.push("noopener");
target = "target=\"_blank\" ".to_owned();
}
if self.external_links_no_follow {
rel_opts.push("nofollow");
}
if self.external_links_no_referrer {
rel_opts.push("noreferrer");
}
let rel = if rel_opts.is_empty() {
"".to_owned()
} else {
format!("rel=\"{}\" ", rel_opts.join(" "))
};
format!("<a {}{}{}href=\"{}\">", rel, target, title, url)
}
}
impl Default for Markdown {
fn default() -> Markdown {
Markdown {
highlight_code: false,
highlight_theme: DEFAULT_HIGHLIGHT_THEME.to_owned(),
render_emoji: false,
external_links_target_blank: false,
external_links_no_follow: false,
external_links_no_referrer: false,
smart_punctuation: false,
}
}
}

View file

@ -1,5 +1,6 @@
pub mod languages; pub mod languages;
pub mod link_checker; pub mod link_checker;
pub mod markup;
pub mod search; pub mod search;
pub mod slugify; pub mod slugify;
pub mod taxonomies; pub mod taxonomies;
@ -96,6 +97,8 @@ pub struct Config {
#[serde(skip_serializing, skip_deserializing)] // not a typo, 2 are need #[serde(skip_serializing, skip_deserializing)] // not a typo, 2 are need
pub extra_syntax_set: Option<SyntaxSet>, pub extra_syntax_set: Option<SyntaxSet>,
pub output_dir: String,
pub link_checker: link_checker::LinkChecker, pub link_checker: link_checker::LinkChecker,
/// The setup for which slugification strategies to use for paths, taxonomies and anchors /// The setup for which slugification strategies to use for paths, taxonomies and anchors
@ -104,6 +107,9 @@ pub struct Config {
/// The search config, telling what to include in the search index /// The search config, telling what to include in the search index
pub search: search::Search, pub search: search::Search,
/// The config for the Markdown rendering: syntax highlighting and everything
pub markdown: markup::Markdown,
/// All user params set in [extra] in the config /// All user params set in [extra] in the config
pub extra: HashMap<String, Toml>, pub extra: HashMap<String, Toml>,
} }
@ -153,8 +159,9 @@ impl Config {
} }
} }
// TODO: re-enable once it's a bit more tested if config.highlight_code {
config.minify_html = false; println!("`highlight_code` has been moved to a [markdown] section. Top level `highlight_code` and `highlight_theme` will stop working in 0.14.");
}
Ok(config) Ok(config)
} }
@ -170,6 +177,30 @@ impl Config {
Config::parse(&content) Config::parse(&content)
} }
/// Temporary, while we have the settings in 2 places
/// TODO: remove me in 0.14
pub fn highlight_code(&self) -> bool {
if !self.highlight_code && !self.markdown.highlight_code {
return false;
}
if self.highlight_code {
true
} else {
self.markdown.highlight_code
}
}
/// Temporary, while we have the settings in 2 places
/// TODO: remove me in 0.14
pub fn highlight_theme(&self) -> &str {
if self.highlight_theme != markup::DEFAULT_HIGHLIGHT_THEME {
&self.highlight_theme
} else {
&self.markdown.highlight_theme
}
}
/// Attempt to load any extra syntax found in the extra syntaxes of the config /// Attempt to load any extra syntax found in the extra syntaxes of the config
pub fn load_extra_syntaxes(&mut self, base_path: &Path) -> Result<()> { pub fn load_extra_syntaxes(&mut self, base_path: &Path) -> Result<()> {
if self.extra_syntaxes.is_empty() { if self.extra_syntaxes.is_empty() {
@ -333,9 +364,11 @@ impl Default for Config {
translations: HashMap::new(), translations: HashMap::new(),
extra_syntaxes: Vec::new(), extra_syntaxes: Vec::new(),
extra_syntax_set: None, extra_syntax_set: None,
output_dir: "public".to_string(),
link_checker: link_checker::LinkChecker::default(), link_checker: link_checker::LinkChecker::default(),
slugify: slugify::Slugify::default(), slugify: slugify::Slugify::default(),
search: search::Search::default(), search: search::Search::default(),
markdown: markup::Markdown::default(),
extra: HashMap::new(), extra: HashMap::new(),
} }
} }
@ -654,4 +687,27 @@ bar = "baz"
// We expect an error here // We expect an error here
assert_eq!(false, config.add_theme_extra(&theme).is_ok()); assert_eq!(false, config.add_theme_extra(&theme).is_ok());
} }
#[test]
fn default_output_dir() {
let config = r#"
title = "My site"
base_url = "https://replace-this-with-your-url.com"
"#;
let config = Config::parse(config).unwrap();
assert_eq!(config.output_dir, "public".to_string());
}
#[test]
fn can_add_output_dir() {
let config = r#"
title = "My site"
base_url = "https://replace-this-with-your-url.com"
output_dir = "docs"
"#;
let config = Config::parse(config).unwrap();
assert_eq!(config.output_dir, "docs".to_string());
}
} }

View file

@ -9,9 +9,14 @@ tera = "1"
chrono = "0.4" chrono = "0.4"
serde = "1" serde = "1"
serde_derive = "1" serde_derive = "1"
serde_yaml = "0.8"
toml = "0.5" toml = "0.5"
regex = "1" regex = "1"
lazy_static = "1" lazy_static = "1"
errors = { path = "../errors" } errors = { path = "../errors" }
utils = { path = "../utils" } utils = { path = "../utils" }
[dev-dependencies]
test-case = "1.0"

View file

@ -3,7 +3,9 @@ use serde_derive::{Deserialize, Serialize};
use errors::{bail, Error, Result}; use errors::{bail, Error, Result};
use regex::Regex; use regex::Regex;
use serde_yaml;
use std::path::Path; use std::path::Path;
use toml;
mod page; mod page;
mod section; mod section;
@ -12,8 +14,31 @@ pub use page::PageFrontMatter;
pub use section::SectionFrontMatter; pub use section::SectionFrontMatter;
lazy_static! { lazy_static! {
static ref PAGE_RE: Regex = static ref TOML_RE: Regex =
Regex::new(r"^[[:space:]]*\+\+\+(\r?\n(?s).*?(?-s))\+\+\+\r?\n?((?s).*(?-s))$").unwrap(); Regex::new(r"^[[:space:]]*\+\+\+(\r?\n(?s).*?(?-s))\+\+\+\r?\n?((?s).*(?-s))$").unwrap();
static ref YAML_RE: Regex =
Regex::new(r"^[[:space:]]*---(\r?\n(?s).*?(?-s))---\r?\n?((?s).*(?-s))$").unwrap();
}
pub enum RawFrontMatter<'a> {
Toml(&'a str),
Yaml(&'a str),
}
impl RawFrontMatter<'_> {
fn deserialize<T>(&self) -> Result<T>
where
T: serde::de::DeserializeOwned,
{
let f: T = match self {
RawFrontMatter::Toml(s) => toml::from_str(s)?,
RawFrontMatter::Yaml(s) => match serde_yaml::from_str(s) {
Ok(d) => d,
Err(e) => bail!(format!("YAML deserialize error: {:?}", e)),
},
};
Ok(f)
}
} }
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
@ -37,20 +62,30 @@ pub enum InsertAnchor {
/// Split a file between the front matter and its content /// Split a file between the front matter and its content
/// Will return an error if the front matter wasn't found /// Will return an error if the front matter wasn't found
fn split_content<'c>(file_path: &Path, content: &'c str) -> Result<(&'c str, &'c str)> { fn split_content<'c>(file_path: &Path, content: &'c str) -> Result<(RawFrontMatter<'c>, &'c str)> {
if !PAGE_RE.is_match(content) { let (re, is_toml) = if TOML_RE.is_match(content) {
(&TOML_RE as &Regex, true)
} else if YAML_RE.is_match(content) {
(&YAML_RE as &Regex, false)
} else {
bail!( bail!(
"Couldn't find front matter in `{}`. Did you forget to add `+++`?", "Couldn't find front matter in `{}`. Did you forget to add `+++` or `---`?",
file_path.to_string_lossy() file_path.to_string_lossy()
); );
} };
// 2. extract the front matter and the content // 2. extract the front matter and the content
let caps = PAGE_RE.captures(content).unwrap(); let caps = re.captures(content).unwrap();
// caps[0] is the full match // caps[0] is the full match
// caps[1] => front matter // caps[1] => front matter
// caps[2] => content // caps[2] => content
Ok((caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str())) let front_matter = caps.get(1).unwrap().as_str();
let content = caps.get(2).unwrap().as_str();
if is_toml {
Ok((RawFrontMatter::Toml(front_matter), content))
} else {
Ok((RawFrontMatter::Yaml(front_matter), content))
}
} }
/// Split a file between the front matter and its content. /// Split a file between the front matter and its content.
@ -88,71 +123,125 @@ pub fn split_page_content<'c>(
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::path::Path; use std::path::Path;
use test_case::test_case;
use super::{split_page_content, split_section_content}; use super::{split_page_content, split_section_content};
#[test] #[test_case(r#"
fn can_split_page_content_valid() {
let content = r#"
+++ +++
title = "Title" title = "Title"
description = "hey there" description = "hey there"
date = 2002-10-12 date = 2002-10-12
+++ +++
Hello Hello
"#; "#; "toml")]
#[test_case(r#"
---
title: Title
description: hey there
date: 2002-10-12
---
Hello
"#; "yaml")]
fn can_split_page_content_valid(content: &str) {
let (front_matter, content) = split_page_content(Path::new(""), content).unwrap(); let (front_matter, content) = split_page_content(Path::new(""), content).unwrap();
assert_eq!(content, "Hello\n"); assert_eq!(content, "Hello\n");
assert_eq!(front_matter.title.unwrap(), "Title"); assert_eq!(front_matter.title.unwrap(), "Title");
} }
#[test] #[test_case(r#"
fn can_split_section_content_valid() {
let content = r#"
+++ +++
paginate_by = 10 paginate_by = 10
+++ +++
Hello Hello
"#; "#; "toml")]
#[test_case(r#"
---
paginate_by: 10
---
Hello
"#; "yaml")]
fn can_split_section_content_valid(content: &str) {
let (front_matter, content) = split_section_content(Path::new(""), content).unwrap(); let (front_matter, content) = split_section_content(Path::new(""), content).unwrap();
assert_eq!(content, "Hello\n"); assert_eq!(content, "Hello\n");
assert!(front_matter.is_paginated()); assert!(front_matter.is_paginated());
} }
#[test] #[test_case(r#"
fn can_split_content_with_only_frontmatter_valid() {
let content = r#"
+++ +++
title = "Title" title = "Title"
description = "hey there" description = "hey there"
date = 2002-10-12 date = 2002-10-12
+++"#; +++"#; "toml")]
#[test_case(r#"
---
title: Title
description: hey there
date: 2002-10-12
---"#; "yaml")]
fn can_split_content_with_only_frontmatter_valid(content: &str) {
let (front_matter, content) = split_page_content(Path::new(""), content).unwrap(); let (front_matter, content) = split_page_content(Path::new(""), content).unwrap();
assert_eq!(content, ""); assert_eq!(content, "");
assert_eq!(front_matter.title.unwrap(), "Title"); assert_eq!(front_matter.title.unwrap(), "Title");
} }
#[test] #[test_case(r#"
fn can_split_content_lazily() {
let content = r#"
+++ +++
title = "Title" title = "Title"
description = "hey there" description = "hey there"
date = 2002-10-02T15:00:00Z date = 2002-10-02T15:00:00Z
+++ +++
+++"#; +++"#, "+++"; "toml with pluses in content")]
let (front_matter, content) = split_page_content(Path::new(""), content).unwrap(); #[test_case(r#"
assert_eq!(content, "+++");
assert_eq!(front_matter.title.unwrap(), "Title");
}
#[test]
fn errors_if_cannot_locate_frontmatter() {
let content = r#"
+++ +++
title = "Title" title = "Title"
description = "hey there" description = "hey there"
date = 2002-10-12"#; date = 2002-10-02T15:00:00Z
+++
---"#, "---"; "toml with minuses in content")]
#[test_case(r#"
---
title: Title
description: hey there
date: 2002-10-02T15:00:00Z
---
+++"#, "+++"; "yaml with pluses in content")]
#[test_case(r#"
---
title: Title
description: hey there
date: 2002-10-02T15:00:00Z
---
---"#, "---"; "yaml with minuses in content")]
fn can_split_content_lazily(content: &str, expected: &str) {
let (front_matter, content) = split_page_content(Path::new(""), content).unwrap();
assert_eq!(content, expected);
assert_eq!(front_matter.title.unwrap(), "Title");
}
#[test_case(r#"
+++
title = "Title"
description = "hey there"
date = 2002-10-12"#; "toml")]
#[test_case(r#"
+++
title = "Title"
description = "hey there"
date = 2002-10-12
---"#; "toml unmatched")]
#[test_case(r#"
---
title: Title
description: hey there
date: 2002-10-12"#; "yaml")]
#[test_case(r#"
---
title: Title
description: hey there
date: 2002-10-12
+++"#; "yaml unmatched")]
fn errors_if_cannot_locate_frontmatter(content: &str) {
let res = split_page_content(Path::new(""), content); let res = split_page_content(Path::new(""), content);
assert!(res.is_err()); assert!(res.is_err());
} }

View file

@ -7,6 +7,8 @@ use tera::{Map, Value};
use errors::{bail, Result}; use errors::{bail, Result};
use utils::de::{fix_toml_dates, from_toml_datetime}; use utils::de::{fix_toml_dates, from_toml_datetime};
use crate::RawFrontMatter;
/// The front matter of every page /// The front matter of every page
#[derive(Debug, Clone, PartialEq, Deserialize)] #[derive(Debug, Clone, PartialEq, Deserialize)]
#[serde(default)] #[serde(default)]
@ -69,11 +71,8 @@ fn parse_datetime(d: &str) -> Option<NaiveDateTime> {
} }
impl PageFrontMatter { impl PageFrontMatter {
pub fn parse(toml: &str) -> Result<PageFrontMatter> { pub fn parse(raw: &RawFrontMatter) -> Result<PageFrontMatter> {
let mut f: PageFrontMatter = match toml::from_str(toml) { let mut f: PageFrontMatter = raw.deserialize()?;
Ok(d) => d,
Err(e) => bail!(e),
};
if let Some(ref slug) = f.slug { if let Some(ref slug) = f.slug {
if slug == "" { if slug == "" {
@ -140,21 +139,27 @@ impl Default for PageFrontMatter {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::PageFrontMatter; use super::PageFrontMatter;
use super::RawFrontMatter;
use tera::to_value; use tera::to_value;
use test_case::test_case;
#[test] #[test_case(&RawFrontMatter::Toml(r#" "#); "toml")]
fn can_have_empty_front_matter() { #[test_case(&RawFrontMatter::Toml(r#" "#); "yaml")]
let content = r#" "#; fn can_have_empty_front_matter(content: &RawFrontMatter) {
let res = PageFrontMatter::parse(content); let res = PageFrontMatter::parse(content);
println!("{:?}", res); println!("{:?}", res);
assert!(res.is_ok()); assert!(res.is_ok());
} }
#[test] #[test_case(&RawFrontMatter::Toml(r#"
fn can_parse_valid_front_matter() { title = "Hello"
let content = r#" description = "hey there"
title = "Hello" "#); "toml")]
description = "hey there""#; #[test_case(&RawFrontMatter::Yaml(r#"
title: Hello
description: hey there
"#); "yaml")]
fn can_parse_valid_front_matter(content: &RawFrontMatter) {
let res = PageFrontMatter::parse(content); let res = PageFrontMatter::parse(content);
assert!(res.is_ok()); assert!(res.is_ok());
let res = res.unwrap(); let res = res.unwrap();
@ -162,183 +167,281 @@ mod tests {
assert_eq!(res.description.unwrap(), "hey there".to_string()) assert_eq!(res.description.unwrap(), "hey there".to_string())
} }
#[test] #[test_case(&RawFrontMatter::Toml(r#"title = |\n"#); "toml")]
fn errors_with_invalid_front_matter() { #[test_case(&RawFrontMatter::Yaml(r#"title: |\n"#); "yaml")]
let content = r#"title = 1\n"#; fn errors_with_invalid_front_matter(content: &RawFrontMatter) {
let res = PageFrontMatter::parse(content); let res = PageFrontMatter::parse(content);
assert!(res.is_err()); assert!(res.is_err());
} }
#[test] #[test_case(&RawFrontMatter::Toml(r#"
fn errors_on_present_but_empty_slug() { title = "Hello"
let content = r#" description = "hey there"
title = "Hello" slug = ""
description = "hey there" "#); "toml")]
slug = """#; #[test_case(&RawFrontMatter::Yaml(r#"
title: Hello
description: hey there
slug: ""
"#); "yaml")]
fn errors_on_present_but_empty_slug(content: &RawFrontMatter) {
let res = PageFrontMatter::parse(content); let res = PageFrontMatter::parse(content);
assert!(res.is_err()); assert!(res.is_err());
} }
#[test] #[test_case(&RawFrontMatter::Toml(r#"
fn errors_on_present_but_empty_path() { title = "Hello"
let content = r#" description = "hey there"
title = "Hello" path = ""
description = "hey there" "#); "toml")]
path = """#; #[test_case(&RawFrontMatter::Yaml(r#"
title: Hello
description: hey there
path: ""
"#); "yaml")]
fn errors_on_present_but_empty_path(content: &RawFrontMatter) {
let res = PageFrontMatter::parse(content); let res = PageFrontMatter::parse(content);
assert!(res.is_err()); assert!(res.is_err());
} }
#[test] #[test_case(&RawFrontMatter::Toml(r#"
fn can_parse_date_yyyy_mm_dd() { title = "Hello"
let content = r#" description = "hey there"
title = "Hello" date = 2016-10-10
description = "hey there" "#); "toml")]
date = 2016-10-10 #[test_case(&RawFrontMatter::Yaml(r#"
"#; title: Hello
description: hey there
date: 2016-10-10
"#); "yaml")]
fn can_parse_date_yyyy_mm_dd(content: &RawFrontMatter) {
let res = PageFrontMatter::parse(content).unwrap(); let res = PageFrontMatter::parse(content).unwrap();
assert!(res.datetime.is_some()); assert!(res.datetime.is_some());
} }
#[test] #[test_case(&RawFrontMatter::Toml(r#"
fn can_parse_date_rfc3339() { title = "Hello"
let content = r#" description = "hey there"
title = "Hello" date = 2002-10-02T15:00:00Z
description = "hey there" "#); "toml")]
date = 2002-10-02T15:00:00Z #[test_case(&RawFrontMatter::Yaml(r#"
"#; title: Hello
description: hey there
date: 2002-10-02T15:00:00Z
"#); "yaml")]
fn can_parse_date_rfc3339(content: &RawFrontMatter) {
let res = PageFrontMatter::parse(content).unwrap(); let res = PageFrontMatter::parse(content).unwrap();
assert!(res.datetime.is_some()); assert!(res.datetime.is_some());
} }
#[test] #[test_case(&RawFrontMatter::Toml(r#"
fn can_parse_date_rfc3339_without_timezone() { title = "Hello"
let content = r#" description = "hey there"
title = "Hello" date = 2002-10-02T15:00:00
description = "hey there" "#); "toml")]
date = 2002-10-02T15:00:00 #[test_case(&RawFrontMatter::Yaml(r#"
"#; title: Hello
description: hey there
date: 2002-10-02T15:00:00
"#); "yaml")]
fn can_parse_date_rfc3339_without_timezone(content: &RawFrontMatter) {
let res = PageFrontMatter::parse(content).unwrap(); let res = PageFrontMatter::parse(content).unwrap();
assert!(res.datetime.is_some()); assert!(res.datetime.is_some());
} }
#[test] #[test_case(&RawFrontMatter::Toml(r#"
fn can_parse_date_rfc3339_with_space() { title = "Hello"
let content = r#" description = "hey there"
title = "Hello" date = 2002-10-02 15:00:00+02:00
description = "hey there" "#); "toml")]
date = 2002-10-02 15:00:00+02:00 #[test_case(&RawFrontMatter::Yaml(r#"
"#; title: Hello
description: hey there
date: 2002-10-02 15:00:00+02:00
"#); "yaml")]
fn can_parse_date_rfc3339_with_space(content: &RawFrontMatter) {
let res = PageFrontMatter::parse(content).unwrap(); let res = PageFrontMatter::parse(content).unwrap();
assert!(res.datetime.is_some()); assert!(res.datetime.is_some());
} }
#[test] #[test_case(&RawFrontMatter::Toml(r#"
fn can_parse_date_rfc3339_with_space_without_timezone() { title = "Hello"
let content = r#" description = "hey there"
title = "Hello" date = 2002-10-02 15:00:00
description = "hey there" "#); "toml")]
date = 2002-10-02 15:00:00 #[test_case(&RawFrontMatter::Yaml(r#"
"#; title: Hello
description: hey there
date: 2002-10-02 15:00:00
"#); "yaml")]
fn can_parse_date_rfc3339_with_space_without_timezone(content: &RawFrontMatter) {
let res = PageFrontMatter::parse(content).unwrap(); let res = PageFrontMatter::parse(content).unwrap();
assert!(res.datetime.is_some()); assert!(res.datetime.is_some());
} }
#[test] #[test_case(&RawFrontMatter::Toml(r#"
fn can_parse_date_rfc3339_with_microseconds() { title = "Hello"
let content = r#" description = "hey there"
title = "Hello" date = 2002-10-02T15:00:00.123456Z
description = "hey there" "#); "toml")]
date = 2002-10-02T15:00:00.123456Z #[test_case(&RawFrontMatter::Yaml(r#"
"#; title: Hello
description: hey there
date: 2002-10-02T15:00:00.123456Z
"#); "yaml")]
fn can_parse_date_rfc3339_with_microseconds(content: &RawFrontMatter) {
let res = PageFrontMatter::parse(content).unwrap(); let res = PageFrontMatter::parse(content).unwrap();
assert!(res.datetime.is_some()); assert!(res.datetime.is_some());
} }
#[test] #[test_case(&RawFrontMatter::Toml(r#"
fn cannot_parse_random_date_format() { title = "Hello"
let content = r#" description = "hey there"
title = "Hello" date = 2002/10/12
description = "hey there" "#); "toml")]
date = 2002/10/12"#; #[test_case(&RawFrontMatter::Yaml(r#"
title: Hello
description: hey there
date: 2002/10/12
"#); "yaml")]
fn cannot_parse_random_date_format(content: &RawFrontMatter) {
let res = PageFrontMatter::parse(content); let res = PageFrontMatter::parse(content);
assert!(res.is_err()); assert!(res.is_err());
} }
#[test] #[test_case(&RawFrontMatter::Toml(r#"
fn cannot_parse_invalid_date_format() { title = "Hello"
let content = r#" description = "hey there"
title = "Hello" date = 2002-14-01
description = "hey there" "#); "toml")]
date = 2002-14-01"#; #[test_case(&RawFrontMatter::Yaml(r#"
title: Hello
description: hey there
date: 2002-14-01
"#); "yaml")]
fn cannot_parse_invalid_date_format(content: &RawFrontMatter) {
let res = PageFrontMatter::parse(content); let res = PageFrontMatter::parse(content);
assert!(res.is_err()); assert!(res.is_err());
} }
#[test] #[test_case(&RawFrontMatter::Toml(r#"
fn cannot_parse_date_as_string() { title = "Hello"
let content = r#" description = "hey there"
title = "Hello" date = "2016-10-10"
description = "hey there" "#); "toml")]
date = "2002-14-01""#; #[test_case(&RawFrontMatter::Yaml(r#"
title: Hello
description: hey there
date: "2016-10-10"
"#); "yaml")]
fn can_parse_valid_date_as_string(content: &RawFrontMatter) {
let res = PageFrontMatter::parse(content).unwrap();
assert!(res.date.is_some());
}
#[test_case(&RawFrontMatter::Toml(r#"
title = "Hello"
description = "hey there"
date = "2002-14-01"
"#); "toml")]
#[test_case(&RawFrontMatter::Yaml(r#"
title: Hello
description: hey there
date: "2002-14-01"
"#); "yaml")]
fn cannot_parse_invalid_date_as_string(content: &RawFrontMatter) {
let res = PageFrontMatter::parse(content); let res = PageFrontMatter::parse(content);
assert!(res.is_err()); assert!(res.is_err());
} }
#[test] #[test_case(&RawFrontMatter::Toml(r#"
fn can_parse_dates_in_extra() { title = "Hello"
let content = r#" description = "hey there"
title = "Hello"
description = "hey there"
[extra] [extra]
some-date = 2002-14-01"#; some-date = 2002-14-01
"#); "toml")]
#[test_case(&RawFrontMatter::Yaml(r#"
title: Hello
description: hey there
extra:
some-date: 2002-14-01
"#); "yaml")]
fn can_parse_dates_in_extra(content: &RawFrontMatter) {
let res = PageFrontMatter::parse(content); let res = PageFrontMatter::parse(content);
println!("{:?}", res); println!("{:?}", res);
assert!(res.is_ok()); assert!(res.is_ok());
assert_eq!(res.unwrap().extra["some-date"], to_value("2002-14-01").unwrap()); assert_eq!(res.unwrap().extra["some-date"], to_value("2002-14-01").unwrap());
} }
#[test] #[test_case(&RawFrontMatter::Toml(r#"
fn can_parse_nested_dates_in_extra() { title = "Hello"
let content = r#" description = "hey there"
title = "Hello"
description = "hey there"
[extra.something] [extra.something]
some-date = 2002-14-01"#; some-date = 2002-14-01
"#); "toml")]
#[test_case(&RawFrontMatter::Yaml(r#"
title: Hello
description: hey there
extra:
something:
some-date: 2002-14-01
"#); "yaml")]
fn can_parse_nested_dates_in_extra(content: &RawFrontMatter) {
let res = PageFrontMatter::parse(content); let res = PageFrontMatter::parse(content);
println!("{:?}", res); println!("{:?}", res);
assert!(res.is_ok()); assert!(res.is_ok());
assert_eq!(res.unwrap().extra["something"]["some-date"], to_value("2002-14-01").unwrap()); assert_eq!(res.unwrap().extra["something"]["some-date"], to_value("2002-14-01").unwrap());
} }
#[test] #[test_case(&RawFrontMatter::Toml(r#"
fn can_parse_fully_nested_dates_in_extra() { title = "Hello"
let content = r#" description = "hey there"
title = "Hello"
description = "hey there"
[extra] [extra]
date_example = 2020-05-04 date_example = 2020-05-04
[[extra.questions]] [[extra.questions]]
date = 2020-05-03 date = 2020-05-03
name = "Who is the prime minister of Uganda?""#; name = "Who is the prime minister of Uganda?"
"#); "toml")]
#[test_case(&RawFrontMatter::Yaml(r#"
title: Hello
description: hey there
extra:
date_example: 2020-05-04
questions:
- date: 2020-05-03
name: "Who is the prime minister of Uganda?"
"#); "yaml")]
fn can_parse_fully_nested_dates_in_extra(content: &RawFrontMatter) {
let res = PageFrontMatter::parse(content); let res = PageFrontMatter::parse(content);
println!("{:?}", res); println!("{:?}", res);
assert!(res.is_ok()); assert!(res.is_ok());
assert_eq!(res.unwrap().extra["questions"][0]["date"], to_value("2020-05-03").unwrap()); assert_eq!(res.unwrap().extra["questions"][0]["date"], to_value("2020-05-03").unwrap());
} }
#[test] #[test_case(&RawFrontMatter::Toml(r#"
fn can_parse_taxonomies() {
let content = r#"
title = "Hello World" title = "Hello World"
[taxonomies] [taxonomies]
tags = ["Rust", "JavaScript"] tags = ["Rust", "JavaScript"]
categories = ["Dev"] categories = ["Dev"]
"#; "#); "toml")]
#[test_case(&RawFrontMatter::Yaml(r#"
title: Hello World
taxonomies:
tags:
- Rust
- JavaScript
categories:
- Dev
"#); "yaml")]
fn can_parse_taxonomies(content: &RawFrontMatter) {
let res = PageFrontMatter::parse(content); let res = PageFrontMatter::parse(content);
println!("{:?}", res); println!("{:?}", res);
assert!(res.is_ok()); assert!(res.is_ok());

View file

@ -2,9 +2,11 @@ use serde_derive::{Deserialize, Serialize};
use tera::{Map, Value}; use tera::{Map, Value};
use super::{InsertAnchor, SortBy}; use super::{InsertAnchor, SortBy};
use errors::{bail, Result}; use errors::Result;
use utils::de::fix_toml_dates; use utils::de::fix_toml_dates;
use crate::RawFrontMatter;
static DEFAULT_PAGINATE_PATH: &str = "page"; static DEFAULT_PAGINATE_PATH: &str = "page";
/// The front matter of every section /// The front matter of every section
@ -22,6 +24,8 @@ pub struct SectionFrontMatter {
/// Higher values means it will be at the end. Defaults to `0` /// Higher values means it will be at the end. Defaults to `0`
#[serde(skip_serializing)] #[serde(skip_serializing)]
pub weight: usize, pub weight: usize,
/// whether the section is a draft
pub draft: bool,
/// Optional template, if we want to specify which template to render for that section /// Optional template, if we want to specify which template to render for that section
#[serde(skip_serializing)] #[serde(skip_serializing)]
pub template: Option<String>, pub template: Option<String>,
@ -71,11 +75,8 @@ pub struct SectionFrontMatter {
} }
impl SectionFrontMatter { impl SectionFrontMatter {
pub fn parse(toml: &str) -> Result<SectionFrontMatter> { pub fn parse(raw: &RawFrontMatter) -> Result<SectionFrontMatter> {
let mut f: SectionFrontMatter = match toml::from_str(toml) { let mut f: SectionFrontMatter = raw.deserialize()?;
Ok(d) => d,
Err(e) => bail!(e),
};
f.extra = match fix_toml_dates(f.extra) { f.extra = match fix_toml_dates(f.extra) {
Value::Object(o) => o, Value::Object(o) => o,
@ -114,6 +115,7 @@ impl Default for SectionFrontMatter {
aliases: Vec::new(), aliases: Vec::new(),
generate_feed: false, generate_feed: false,
extra: Map::new(), extra: Map::new(),
draft: false,
} }
} }
} }

View file

@ -129,6 +129,11 @@ impl FileInfo {
// We can document that // We can document that
let mut parts: Vec<String> = self.name.splitn(2, '.').map(|s| s.to_string()).collect(); let mut parts: Vec<String> = self.name.splitn(2, '.').map(|s| s.to_string()).collect();
// If language code is same as default language, go for default
if config.default_language == parts[1].as_str() {
return Ok(config.default_language.clone());
}
// The language code is not present in the config: typo or the user forgot to add it to the // The language code is not present in the config: typo or the user forgot to add it to the
// config // config
if !config.languages_codes().contains(&parts[1].as_ref()) { if !config.languages_codes().contains(&parts[1].as_ref()) {
@ -189,6 +194,19 @@ mod tests {
assert_eq!(res.unwrap(), "fr"); assert_eq!(res.unwrap(), "fr");
} }
#[test]
fn can_find_valid_language_with_default_locale() {
let mut config = Config::default();
config.languages.push(Language { code: String::from("fr"), feed: false, search: false });
let mut file = FileInfo::new_page(
&Path::new("/home/vincent/code/site/content/posts/tutorials/python.en.md"),
&PathBuf::new(),
);
let res = file.find_language(&config);
assert!(res.is_ok());
assert_eq!(res.unwrap(), config.default_language);
}
#[test] #[test]
fn can_find_valid_language_in_page_with_assets() { fn can_find_valid_language_in_page_with_assets() {
let mut config = Config::default(); let mut config = Config::default();

View file

@ -112,7 +112,7 @@ impl Library {
subsections subsections
// Using the original filename to work for multi-lingual sections // Using the original filename to work for multi-lingual sections
.entry(grand_parent.join(&section.file.filename)) .entry(grand_parent.join(&section.file.filename))
.or_insert_with(|| vec![]) .or_insert_with(Vec::new)
.push(section.file.path.clone()); .push(section.file.path.clone());
} }
@ -157,7 +157,7 @@ impl Library {
parent_is_transparent = section.meta.transparent; parent_is_transparent = section.meta.transparent;
} }
page.ancestors = page.ancestors =
ancestors.get(&parent_section_path).cloned().unwrap_or_else(|| vec![]); ancestors.get(&parent_section_path).cloned().unwrap_or_else(Vec::new);
// Don't forget to push the actual parent // Don't forget to push the actual parent
page.ancestors.push(*section_key); page.ancestors.push(*section_key);
@ -201,8 +201,7 @@ impl Library {
children.sort_by(|a, b| sections_weight[a].cmp(&sections_weight[b])); children.sort_by(|a, b| sections_weight[a].cmp(&sections_weight[b]));
section.subsections = children; section.subsections = children;
} }
section.ancestors = section.ancestors = ancestors.get(&section.file.path).cloned().unwrap_or_else(Vec::new);
ancestors.get(&section.file.path).cloned().unwrap_or_else(|| vec![]);
} }
} }

View file

@ -237,7 +237,7 @@ pub fn find_taxonomies(config: &Config, library: &Library) -> Result<Vec<Taxonom
.get_mut(&taxo_key) .get_mut(&taxo_key)
.unwrap() .unwrap()
.entry(term.to_string()) .entry(term.to_string())
.or_insert_with(|| vec![]) .or_insert_with(Vec::new)
.push(key); .push(key);
} }
} else { } else {

View file

@ -16,4 +16,4 @@ default-features = false
features = ["blocking", "rustls-tls"] features = ["blocking", "rustls-tls"]
[dev-dependencies] [dev-dependencies]
mockito = "0.27" mockito = "0.28"

View file

@ -15,6 +15,7 @@ pest = "2"
pest_derive = "2" pest_derive = "2"
regex = "1" regex = "1"
lazy_static = "1" lazy_static = "1"
gh-emoji = "1.0"
errors = { path = "../errors" } errors = { path = "../errors" }
front_matter = { path = "../front_matter" } front_matter = { path = "../front_matter" }

View file

@ -17,12 +17,12 @@ Lorem markdownum litora, care ponto nomina, et ut aspicit gelidas sui et
purpureo genuit. Tamen colla venientis [delphina](http://nil-sol.com/ecquis) purpureo genuit. Tamen colla venientis [delphina](http://nil-sol.com/ecquis)
Tusci et temptata citaeque curam isto ubi vult vulnere reppulit. Tusci et temptata citaeque curam isto ubi vult vulnere reppulit.
- Seque vidit flendoque de quodam - :one: Seque vidit flendoque de quodam
- Dabit minimos deiecto caputque noctis pluma - :two: Dabit minimos deiecto caputque noctis pluma
- Leti coniunx est Helicen - :three: Leti coniunx est Helicen
- Illius pulvereumque Icare inpositos - :four: Illius pulvereumque Icare inpositos
- Vivunt pereo pluvio tot ramos Olenios gelidis - :five: Vivunt pereo pluvio tot ramos Olenios gelidis
- Quater teretes natura inde - :six: Quater teretes natura inde
### A subsection ### A subsection
@ -35,7 +35,7 @@ granum captantur potuisse Minervae, frugum.
> Clivo sub inprovisoque nostrum minus fama est, discordia patrem petebat precatur > Clivo sub inprovisoque nostrum minus fama est, discordia patrem petebat precatur
absumitur, poena per sit. Foramina *tamen cupidine* memor supplex tollentes absumitur, poena per sit. Foramina *tamen cupidine* memor supplex tollentes
dictum unam orbem, Anubis caecae. Viderat formosior tegebat satis, Aethiopasque dictum unam orbem, Anubis caecae. Viderat formosior tegebat satis, Aethiopasque
sit submisso coniuge tristis ubi! sit submisso coniuge tristis ubi! :exclamation:
## Praeceps Corinthus totidem quem crus vultum cape ## Praeceps Corinthus totidem quem crus vultum cape
@ -68,7 +68,7 @@ And a shortcode:
### Another subsection ### Another subsection
Gotta make the toc do a little bit of work Gotta make the toc do a little bit of work
# A big title # A big title :fire:
- hello - hello
- world - world
@ -96,7 +96,7 @@ fn bench_render_content_without_highlighting(b: &mut test::Bencher) {
tera.add_raw_template("shortcodes/youtube.html", "{{id}}").unwrap(); tera.add_raw_template("shortcodes/youtube.html", "{{id}}").unwrap();
let permalinks_ctx = HashMap::new(); let permalinks_ctx = HashMap::new();
let mut config = Config::default(); let mut config = Config::default();
config.highlight_code = false; config.markdown.highlight_code = false;
let context = RenderContext::new(&tera, &config, "", &permalinks_ctx, InsertAnchor::None); let context = RenderContext::new(&tera, &config, "", &permalinks_ctx, InsertAnchor::None);
b.iter(|| render_content(CONTENT, &context).unwrap()); b.iter(|| render_content(CONTENT, &context).unwrap());
} }
@ -106,7 +106,7 @@ fn bench_render_content_no_shortcode(b: &mut test::Bencher) {
let tera = Tera::default(); let tera = Tera::default();
let content2 = CONTENT.replace(r#"{{ youtube(id="my_youtube_id") }}"#, ""); let content2 = CONTENT.replace(r#"{{ youtube(id="my_youtube_id") }}"#, "");
let mut config = Config::default(); let mut config = Config::default();
config.highlight_code = false; config.markdown.highlight_code = false;
let permalinks_ctx = HashMap::new(); let permalinks_ctx = HashMap::new();
let context = RenderContext::new(&tera, &config, "", &permalinks_ctx, InsertAnchor::None); let context = RenderContext::new(&tera, &config, "", &permalinks_ctx, InsertAnchor::None);
@ -123,3 +123,16 @@ fn bench_render_shortcodes_one_present(b: &mut test::Bencher) {
b.iter(|| render_shortcodes(CONTENT, &context)); b.iter(|| render_shortcodes(CONTENT, &context));
} }
#[bench]
fn bench_render_content_no_shortcode_with_emoji(b: &mut test::Bencher) {
let tera = Tera::default();
let content2 = CONTENT.replace(r#"{{ youtube(id="my_youtube_id") }}"#, "");
let mut config = Config::default();
config.markdown.highlight_code = false;
config.markdown.render_emoji = true;
let permalinks_ctx = HashMap::new();
let context = RenderContext::new(&tera, &config, "", &permalinks_ctx, InsertAnchor::None);
b.iter(|| render_content(&content2, &context).unwrap());
}

View file

@ -1,3 +1,4 @@
use std::borrow::Cow;
use std::collections::HashMap; use std::collections::HashMap;
use config::Config; use config::Config;
@ -7,11 +8,11 @@ use tera::{Context, Tera};
/// All the information from the zola site that is needed to render HTML from markdown /// All the information from the zola site that is needed to render HTML from markdown
#[derive(Debug)] #[derive(Debug)]
pub struct RenderContext<'a> { pub struct RenderContext<'a> {
pub tera: &'a Tera, pub tera: Cow<'a, Tera>,
pub config: &'a Config, pub config: &'a Config,
pub tera_context: Context, pub tera_context: Context,
pub current_page_permalink: &'a str, pub current_page_permalink: &'a str,
pub permalinks: &'a HashMap<String, String>, pub permalinks: Cow<'a, HashMap<String, String>>,
pub insert_anchor: InsertAnchor, pub insert_anchor: InsertAnchor,
} }
@ -25,13 +26,25 @@ impl<'a> RenderContext<'a> {
) -> RenderContext<'a> { ) -> RenderContext<'a> {
let mut tera_context = Context::new(); let mut tera_context = Context::new();
tera_context.insert("config", config); tera_context.insert("config", config);
RenderContext { Self {
tera, tera: Cow::Borrowed(tera),
tera_context, tera_context,
current_page_permalink, current_page_permalink,
permalinks, permalinks: Cow::Borrowed(permalinks),
insert_anchor, insert_anchor,
config, config,
} }
} }
// In use in the markdown filter
pub fn from_config(config: &'a Config) -> RenderContext<'a> {
Self {
tera: Cow::Owned(Tera::default()),
tera_context: Context::new(),
current_page_permalink: "",
permalinks: Cow::Owned(HashMap::new()),
insert_anchor: InsertAnchor::None,
config,
}
}
} }

View file

@ -13,7 +13,6 @@ use utils::slugs::slugify_anchors;
use utils::vec::InsertMany; use utils::vec::InsertMany;
use self::cmark::{Event, LinkType, Options, Parser, Tag}; use self::cmark::{Event, LinkType, Options, Parser, Tag};
use pulldown_cmark::CodeBlockKind;
mod codeblock; mod codeblock;
mod fence; mod fence;
@ -101,17 +100,12 @@ fn fix_link(
return Ok(link.to_string()); return Ok(link.to_string());
} }
// TODO: remove me in a few versions when people have upgraded
if link.starts_with("./") && link.contains(".md") {
println!("It looks like the link `{}` is using the previous syntax for internal links: start with @/ instead", link);
}
// A few situations here: // A few situations here:
// - it could be a relative link (starting with `@/`) // - it could be a relative link (starting with `@/`)
// - it could be a link to a co-located asset // - it could be a link to a co-located asset
// - it could be a normal link // - it could be a normal link
let result = if link.starts_with("@/") { let result = if link.starts_with("@/") {
match resolve_internal_link(&link, context.permalinks) { match resolve_internal_link(&link, &context.permalinks) {
Ok(resolved) => { Ok(resolved) => {
if resolved.anchor.is_some() { if resolved.anchor.is_some() {
internal_links_with_anchors internal_links_with_anchors
@ -168,6 +162,10 @@ fn get_heading_refs(events: &[Event]) -> Vec<HeadingRef> {
} }
pub fn markdown_to_html(content: &str, context: &RenderContext) -> Result<Rendered> { pub fn markdown_to_html(content: &str, context: &RenderContext) -> Result<Rendered> {
lazy_static! {
static ref EMOJI_REPLACER: gh_emoji::Replacer = gh_emoji::Replacer::new();
}
// the rendered html // the rendered html
let mut html = String::with_capacity(content.len()); let mut html = String::with_capacity(content.len());
// Set while parsing // Set while parsing
@ -188,6 +186,10 @@ pub fn markdown_to_html(content: &str, context: &RenderContext) -> Result<Render
opts.insert(Options::ENABLE_STRIKETHROUGH); opts.insert(Options::ENABLE_STRIKETHROUGH);
opts.insert(Options::ENABLE_TASKLISTS); opts.insert(Options::ENABLE_TASKLISTS);
if context.config.markdown.smart_punctuation {
opts.insert(Options::ENABLE_SMART_PUNCTUATION);
}
{ {
let mut events = Parser::new_ext(content, opts) let mut events = Parser::new_ext(content, opts)
.map(|event| { .map(|event| {
@ -197,20 +199,35 @@ pub fn markdown_to_html(content: &str, context: &RenderContext) -> Result<Render
if let Some(ref mut code_block) = highlighter { if let Some(ref mut code_block) = highlighter {
let html = code_block.highlight(&text); let html = code_block.highlight(&text);
Event::Html(html.into()) Event::Html(html.into())
} else if context.config.markdown.render_emoji {
let processed_text = EMOJI_REPLACER.replace_all(&text);
Event::Text(processed_text.to_string().into())
} else { } else {
// Business as usual // Business as usual
Event::Text(text) Event::Text(text)
} }
} }
Event::Start(Tag::CodeBlock(ref kind)) => { Event::Start(Tag::CodeBlock(ref kind)) => {
if !context.config.highlight_code { let language = match kind {
cmark::CodeBlockKind::Fenced(fence_info) => {
let fence_info = fence::FenceSettings::new(fence_info);
fence_info.language
}
_ => None,
};
if !context.config.highlight_code() {
if let Some(lang) = language {
let html = format!(r#"<pre><code class="language-{}">"#, lang);
return Event::Html(html.into());
}
return Event::Html("<pre><code>".into()); return Event::Html("<pre><code>".into());
} }
let theme = &THEME_SET.themes[&context.config.highlight_theme]; let theme = &THEME_SET.themes[context.config.highlight_theme()];
match kind { match kind {
CodeBlockKind::Indented => (), cmark::CodeBlockKind::Indented => (),
CodeBlockKind::Fenced(fence_info) => { cmark::CodeBlockKind::Fenced(fence_info) => {
// This selects the background color the same way that // This selects the background color the same way that
// start_coloured_html_snippet does // start_coloured_html_snippet does
let color = theme let color = theme
@ -227,11 +244,17 @@ pub fn markdown_to_html(content: &str, context: &RenderContext) -> Result<Render
}; };
let snippet = start_highlighted_html_snippet(theme); let snippet = start_highlighted_html_snippet(theme);
let mut html = snippet.0; let mut html = snippet.0;
html.push_str("<code>"); if let Some(lang) = language {
html.push_str(r#"<code class="language-"#);
html.push_str(lang);
html.push_str(r#"">"#);
} else {
html.push_str("<code>");
}
Event::Html(html.into()) Event::Html(html.into())
} }
Event::End(Tag::CodeBlock(_)) => { Event::End(Tag::CodeBlock(_)) => {
if !context.config.highlight_code { if !context.config.highlight_code() {
return Event::Html("</code></pre>\n".into()); return Event::Html("</code></pre>\n".into());
} }
// reset highlight and close the code block // reset highlight and close the code block
@ -264,29 +287,42 @@ pub fn markdown_to_html(content: &str, context: &RenderContext) -> Result<Render
return Event::Html("".into()); return Event::Html("".into());
} }
}; };
if is_external_link(&link)
Event::Start(Tag::Link(link_type, fixed_link.into(), title)) && context.config.markdown.has_external_link_tweaks()
{
let mut escaped = String::new();
// write_str can fail but here there are no reasons it should (afaik?)
cmark::escape::escape_href(&mut escaped, &link)
.expect("Could not write to buffer");
Event::Html(
context
.config
.markdown
.construct_external_link_tag(&escaped, &title)
.into(),
)
} else {
Event::Start(Tag::Link(link_type, fixed_link.into(), title))
}
} }
Event::Html(ref markup) => { Event::Html(ref markup) => {
if markup.contains("<!-- more -->") { if markup.contains("<!-- more -->") {
has_summary = true; has_summary = true;
Event::Html(CONTINUE_READING.into()) Event::Html(CONTINUE_READING.into())
} else { } else if in_html_block && markup.contains("</pre>") {
if in_html_block && markup.contains("</pre>") { in_html_block = false;
Event::Html(markup.replacen("</pre>", "", 1).into())
} else if markup.contains("pre data-shortcode") {
in_html_block = true;
let m = markup.replacen("<pre data-shortcode>", "", 1);
if m.contains("</pre>") {
in_html_block = false; in_html_block = false;
Event::Html(markup.replacen("</pre>", "", 1).into()) Event::Html(m.replacen("</pre>", "", 1).into())
} else if markup.contains("pre data-shortcode") {
in_html_block = true;
let m = markup.replacen("<pre data-shortcode>", "", 1);
if m.contains("</pre>") {
in_html_block = false;
Event::Html(m.replacen("</pre>", "", 1).into())
} else {
Event::Html(m.into())
}
} else { } else {
event Event::Html(m.into())
} }
} else {
event
} }
} }
_ => event, _ => event,
@ -348,7 +384,7 @@ pub fn markdown_to_html(content: &str, context: &RenderContext) -> Result<Render
let anchor_link = utils::templates::render_template( let anchor_link = utils::templates::render_template(
&ANCHOR_LINK_TEMPLATE, &ANCHOR_LINK_TEMPLATE,
context.tera, &context.tera,
c, c,
&None, &None,
) )

View file

@ -3,6 +3,7 @@ use pest::iterators::Pair;
use pest::Parser; use pest::Parser;
use pest_derive::Parser; use pest_derive::Parser;
use regex::Regex; use regex::Regex;
use std::collections::HashMap;
use tera::{to_value, Context, Map, Value}; use tera::{to_value, Context, Map, Value};
use crate::context::RenderContext; use crate::context::RenderContext;
@ -102,6 +103,7 @@ fn render_shortcode(
name: &str, name: &str,
args: &Map<String, Value>, args: &Map<String, Value>,
context: &RenderContext, context: &RenderContext,
invocation_count: u32,
body: Option<&str>, body: Option<&str>,
) -> Result<String> { ) -> Result<String> {
let mut tera_context = Context::new(); let mut tera_context = Context::new();
@ -112,6 +114,7 @@ fn render_shortcode(
// Trimming right to avoid most shortcodes with bodies ending up with a HTML new line // Trimming right to avoid most shortcodes with bodies ending up with a HTML new line
tera_context.insert("body", b.trim_end()); tera_context.insert("body", b.trim_end());
} }
tera_context.insert("nth", &invocation_count);
tera_context.extend(context.tera_context.clone()); tera_context.extend(context.tera_context.clone());
let mut template_name = format!("shortcodes/{}.md", name); let mut template_name = format!("shortcodes/{}.md", name);
@ -139,6 +142,12 @@ fn render_shortcode(
pub fn render_shortcodes(content: &str, context: &RenderContext) -> Result<String> { pub fn render_shortcodes(content: &str, context: &RenderContext) -> Result<String> {
let mut res = String::with_capacity(content.len()); let mut res = String::with_capacity(content.len());
let mut invocation_map: HashMap<String, u32> = HashMap::new();
let mut get_invocation_count = |name: &str| {
let invocation_number = invocation_map.entry(String::from(name)).or_insert(0);
*invocation_number += 1;
*invocation_number
};
let mut pairs = match ContentParser::parse(Rule::page, content) { let mut pairs = match ContentParser::parse(Rule::page, content) {
Ok(p) => p, Ok(p) => p,
@ -184,7 +193,13 @@ pub fn render_shortcodes(content: &str, context: &RenderContext) -> Result<Strin
Rule::text => res.push_str(p.as_span().as_str()), Rule::text => res.push_str(p.as_span().as_str()),
Rule::inline_shortcode => { Rule::inline_shortcode => {
let (name, args) = parse_shortcode_call(p); let (name, args) = parse_shortcode_call(p);
res.push_str(&render_shortcode(&name, &args, context, None)?); res.push_str(&render_shortcode(
&name,
&args,
context,
get_invocation_count(&name),
None,
)?);
} }
Rule::shortcode_with_body => { Rule::shortcode_with_body => {
let mut inner = p.into_inner(); let mut inner = p.into_inner();
@ -192,7 +207,13 @@ pub fn render_shortcodes(content: &str, context: &RenderContext) -> Result<Strin
// we don't care about the closing tag // we don't care about the closing tag
let (name, args) = parse_shortcode_call(inner.next().unwrap()); let (name, args) = parse_shortcode_call(inner.next().unwrap());
let body = inner.next().unwrap().as_span().as_str(); let body = inner.next().unwrap().as_span().as_str();
res.push_str(&render_shortcode(&name, &args, context, Some(body))?); res.push_str(&render_shortcode(
&name,
&args,
context,
get_invocation_count(&name),
Some(body),
)?);
} }
Rule::ignored_inline_shortcode => { Rule::ignored_inline_shortcode => {
res.push_str( res.push_str(

View file

@ -37,7 +37,7 @@ fn hl_lines_simple() {
let tera_ctx = Tera::default(); let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new(); let permalinks_ctx = HashMap::new();
let mut config = Config::default(); let mut config = Config::default();
config.highlight_code = true; config.markdown.highlight_code = true;
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None); let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content( let res = render_content(
r#" r#"
@ -66,7 +66,7 @@ fn hl_lines_in_middle() {
let tera_ctx = Tera::default(); let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new(); let permalinks_ctx = HashMap::new();
let mut config = Config::default(); let mut config = Config::default();
config.highlight_code = true; config.markdown.highlight_code = true;
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None); let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content( let res = render_content(
r#" r#"
@ -95,7 +95,7 @@ fn hl_lines_all() {
let tera_ctx = Tera::default(); let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new(); let permalinks_ctx = HashMap::new();
let mut config = Config::default(); let mut config = Config::default();
config.highlight_code = true; config.markdown.highlight_code = true;
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None); let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content( let res = render_content(
r#" r#"
@ -122,7 +122,7 @@ fn hl_lines_start_from_one() {
let tera_ctx = Tera::default(); let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new(); let permalinks_ctx = HashMap::new();
let mut config = Config::default(); let mut config = Config::default();
config.highlight_code = true; config.markdown.highlight_code = true;
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None); let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content( let res = render_content(
r#" r#"
@ -150,7 +150,7 @@ fn hl_lines_start_from_zero() {
let tera_ctx = Tera::default(); let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new(); let permalinks_ctx = HashMap::new();
let mut config = Config::default(); let mut config = Config::default();
config.highlight_code = true; config.markdown.highlight_code = true;
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None); let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content( let res = render_content(
r#" r#"
@ -178,7 +178,7 @@ fn hl_lines_end() {
let tera_ctx = Tera::default(); let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new(); let permalinks_ctx = HashMap::new();
let mut config = Config::default(); let mut config = Config::default();
config.highlight_code = true; config.markdown.highlight_code = true;
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None); let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content( let res = render_content(
r#" r#"
@ -206,7 +206,7 @@ fn hl_lines_end_out_of_bounds() {
let tera_ctx = Tera::default(); let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new(); let permalinks_ctx = HashMap::new();
let mut config = Config::default(); let mut config = Config::default();
config.highlight_code = true; config.markdown.highlight_code = true;
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None); let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content( let res = render_content(
r#" r#"
@ -234,7 +234,7 @@ fn hl_lines_overlap() {
let tera_ctx = Tera::default(); let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new(); let permalinks_ctx = HashMap::new();
let mut config = Config::default(); let mut config = Config::default();
config.highlight_code = true; config.markdown.highlight_code = true;
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None); let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content( let res = render_content(
r#" r#"
@ -261,7 +261,7 @@ fn hl_lines_multiple() {
let tera_ctx = Tera::default(); let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new(); let permalinks_ctx = HashMap::new();
let mut config = Config::default(); let mut config = Config::default();
config.highlight_code = true; config.markdown.highlight_code = true;
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None); let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content( let res = render_content(
r#" r#"
@ -289,7 +289,7 @@ fn hl_lines_extra_spaces() {
let tera_ctx = Tera::default(); let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new(); let permalinks_ctx = HashMap::new();
let mut config = Config::default(); let mut config = Config::default();
config.highlight_code = true; config.markdown.highlight_code = true;
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None); let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content( let res = render_content(
r#" r#"
@ -317,7 +317,7 @@ fn hl_lines_int_and_range() {
let tera_ctx = Tera::default(); let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new(); let permalinks_ctx = HashMap::new();
let mut config = Config::default(); let mut config = Config::default();
config.highlight_code = true; config.markdown.highlight_code = true;
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None); let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content( let res = render_content(
r#" r#"
@ -346,7 +346,7 @@ fn hl_lines_single_line_range() {
let tera_ctx = Tera::default(); let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new(); let permalinks_ctx = HashMap::new();
let mut config = Config::default(); let mut config = Config::default();
config.highlight_code = true; config.markdown.highlight_code = true;
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None); let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content( let res = render_content(
r#" r#"
@ -375,7 +375,7 @@ fn hl_lines_reverse_range() {
let tera_ctx = Tera::default(); let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new(); let permalinks_ctx = HashMap::new();
let mut config = Config::default(); let mut config = Config::default();
config.highlight_code = true; config.markdown.highlight_code = true;
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None); let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content( let res = render_content(
r#" r#"

View file

@ -23,7 +23,7 @@ fn doesnt_highlight_code_block_with_highlighting_off() {
let tera_ctx = Tera::default(); let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new(); let permalinks_ctx = HashMap::new();
let mut config = Config::default(); let mut config = Config::default();
config.highlight_code = false; config.markdown.highlight_code = false;
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None); let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content("```\n$ gutenberg server\n```", &context).unwrap(); let res = render_content("```\n$ gutenberg server\n```", &context).unwrap();
assert_eq!(res.body, "<pre><code>$ gutenberg server\n</code></pre>\n"); assert_eq!(res.body, "<pre><code>$ gutenberg server\n</code></pre>\n");
@ -34,7 +34,7 @@ fn can_highlight_code_block_no_lang() {
let tera_ctx = Tera::default(); let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new(); let permalinks_ctx = HashMap::new();
let mut config = Config::default(); let mut config = Config::default();
config.highlight_code = true; config.markdown.highlight_code = true;
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None); let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content("```\n$ gutenberg server\n$ ping\n```", &context).unwrap(); let res = render_content("```\n$ gutenberg server\n$ ping\n```", &context).unwrap();
assert_eq!( assert_eq!(
@ -48,12 +48,12 @@ fn can_highlight_code_block_with_lang() {
let tera_ctx = Tera::default(); let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new(); let permalinks_ctx = HashMap::new();
let mut config = Config::default(); let mut config = Config::default();
config.highlight_code = true; config.markdown.highlight_code = true;
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None); let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content("```python\nlist.append(1)\n```", &context).unwrap(); let res = render_content("```python\nlist.append(1)\n```", &context).unwrap();
assert_eq!( assert_eq!(
res.body, res.body,
"<pre style=\"background-color:#2b303b;\">\n<code><span style=\"color:#c0c5ce;\">list.</span><span style=\"color:#bf616a;\">append</span><span style=\"color:#c0c5ce;\">(</span><span style=\"color:#d08770;\">1</span><span style=\"color:#c0c5ce;\">)\n</span></code></pre>" "<pre style=\"background-color:#2b303b;\">\n<code class=\"language-python\"><span style=\"color:#c0c5ce;\">list.</span><span style=\"color:#bf616a;\">append</span><span style=\"color:#c0c5ce;\">(</span><span style=\"color:#d08770;\">1</span><span style=\"color:#c0c5ce;\">)\n</span></code></pre>"
); );
} }
@ -62,13 +62,13 @@ fn can_higlight_code_block_with_unknown_lang() {
let tera_ctx = Tera::default(); let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new(); let permalinks_ctx = HashMap::new();
let mut config = Config::default(); let mut config = Config::default();
config.highlight_code = true; config.markdown.highlight_code = true;
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None); let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content("```yolo\nlist.append(1)\n```", &context).unwrap(); let res = render_content("```yolo\nlist.append(1)\n```", &context).unwrap();
// defaults to plain text // defaults to plain text
assert_eq!( assert_eq!(
res.body, res.body,
"<pre style=\"background-color:#2b303b;\">\n<code><span style=\"color:#c0c5ce;\">list.append(1)\n</span></code></pre>" "<pre style=\"background-color:#2b303b;\">\n<code class=\"language-yolo\"><span style=\"color:#c0c5ce;\">list.append(1)\n</span></code></pre>"
); );
} }
@ -87,7 +87,9 @@ Hello
) )
.unwrap(); .unwrap();
assert!(res.body.contains("<p>Hello</p>\n<div >")); assert!(res.body.contains("<p>Hello</p>\n<div >"));
assert!(res.body.contains(r#"<iframe src="https://www.youtube.com/embed/ub36ffWAqgQ""#)); assert!(res
.body
.contains(r#"<iframe src="https://www.youtube-nocookie.com/embed/ub36ffWAqgQ""#));
} }
#[test] #[test]
@ -99,7 +101,7 @@ fn can_render_shortcode_with_markdown_char_in_args_name() {
for i in input { for i in input {
let res = let res =
render_content(&format!("{{{{ youtube(id=\"hey\", {}=1) }}}}", i), &context).unwrap(); render_content(&format!("{{{{ youtube(id=\"hey\", {}=1) }}}}", i), &context).unwrap();
assert!(res.body.contains(r#"<iframe src="https://www.youtube.com/embed/hey""#)); assert!(res.body.contains(r#"<iframe src="https://www.youtube-nocookie.com/embed/hey""#));
} }
} }
@ -119,7 +121,7 @@ fn can_render_shortcode_with_markdown_char_in_args_value() {
let res = render_content(&format!("{{{{ youtube(id=\"{}\") }}}}", i), &context).unwrap(); let res = render_content(&format!("{{{{ youtube(id=\"{}\") }}}}", i), &context).unwrap();
assert!(res assert!(res
.body .body
.contains(&format!(r#"<iframe src="https://www.youtube.com/embed/{}""#, i))); .contains(&format!(r#"<iframe src="https://www.youtube-nocookie.com/embed/{}""#, i)));
} }
} }
@ -232,10 +234,12 @@ Hello
) )
.unwrap(); .unwrap();
assert!(res.body.contains("<p>Hello</p>\n<div >")); assert!(res.body.contains("<p>Hello</p>\n<div >"));
assert!(res.body.contains(r#"<iframe src="https://www.youtube.com/embed/ub36ffWAqgQ""#));
assert!(res assert!(res
.body .body
.contains(r#"<iframe src="https://www.youtube.com/embed/ub36ffWAqgQ?autoplay=1""#)); .contains(r#"<iframe src="https://www.youtube-nocookie.com/embed/ub36ffWAqgQ""#));
assert!(res.body.contains(
r#"<iframe src="https://www.youtube-nocookie.com/embed/ub36ffWAqgQ?autoplay=1""#
));
assert!(res.body.contains(r#"<iframe src="https://www.streamable.com/e/c0ic""#)); assert!(res.body.contains(r#"<iframe src="https://www.streamable.com/e/c0ic""#));
assert!(res.body.contains(r#"//player.vimeo.com/video/210073083""#)); assert!(res.body.contains(r#"//player.vimeo.com/video/210073083""#));
} }
@ -244,7 +248,7 @@ Hello
fn doesnt_render_ignored_shortcodes() { fn doesnt_render_ignored_shortcodes() {
let permalinks_ctx = HashMap::new(); let permalinks_ctx = HashMap::new();
let mut config = Config::default(); let mut config = Config::default();
config.highlight_code = false; config.markdown.highlight_code = false;
let context = RenderContext::new(&ZOLA_TERA, &config, "", &permalinks_ctx, InsertAnchor::None); let context = RenderContext::new(&ZOLA_TERA, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content(r#"```{{/* youtube(id="w7Ft2ymGmfc") */}}```"#, &context).unwrap(); let res = render_content(r#"```{{/* youtube(id="w7Ft2ymGmfc") */}}```"#, &context).unwrap();
assert_eq!(res.body, "<p><code>{{ youtube(id=&quot;w7Ft2ymGmfc&quot;) }}</code></p>\n"); assert_eq!(res.body, "<p><code>{{ youtube(id=&quot;w7Ft2ymGmfc&quot;) }}</code></p>\n");
@ -1004,7 +1008,6 @@ fn can_render_commented_out_shortcodes_fine() {
assert_eq!(res.body, expected); assert_eq!(res.body, expected);
} }
// https://zola.discourse.group/t/zola-12-issue-with-continue-reading/590/7 // https://zola.discourse.group/t/zola-12-issue-with-continue-reading/590/7
#[test] #[test]
fn can_render_read_more_after_shortcode() { fn can_render_read_more_after_shortcode() {
@ -1036,3 +1039,120 @@ Again more text"#;
let res = render_content(markdown_string, &context).unwrap(); let res = render_content(markdown_string, &context).unwrap();
assert_eq!(res.body, expected); assert_eq!(res.body, expected);
} }
#[test]
fn can_render_emoji_alias() {
let permalinks_ctx = HashMap::new();
let mut config = Config::default();
config.markdown.render_emoji = true;
let context = RenderContext::new(&ZOLA_TERA, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content("Hello, World! :smile:", &context).unwrap();
assert_eq!(res.body, "<p>Hello, World! 😄</p>\n");
}
#[test]
fn emoji_aliases_are_ignored_when_disabled_in_config() {
let permalinks_ctx = HashMap::new();
let config = Config::default();
let context = RenderContext::new(&ZOLA_TERA, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content("Hello, World! :smile:", &context).unwrap();
assert_eq!(res.body, "<p>Hello, World! :smile:</p>\n");
}
#[test]
fn invocation_count_increments_in_shortcode() {
let permalinks_ctx = HashMap::new();
let mut tera = Tera::default();
tera.extend(&ZOLA_TERA).unwrap();
let shortcode_template_a = r#"<p>a: {{ nth }}</p>"#;
let shortcode_template_b = r#"<p>b: {{ nth }}</p>"#;
let markdown_string = r#"{{ a() }}
{{ b() }}
{{ a() }}
{{ b() }}
"#;
let expected = r#"<p>a: 1</p>
<p>b: 1</p>
<p>a: 2</p>
<p>b: 2</p>
"#;
tera.add_raw_template("shortcodes/a.html", shortcode_template_a).unwrap();
tera.add_raw_template("shortcodes/b.html", shortcode_template_b).unwrap();
let config = Config::default();
let context = RenderContext::new(&tera, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content(markdown_string, &context).unwrap();
assert_eq!(res.body, expected);
}
#[test]
fn basic_external_links_unchanged() {
let permalinks_ctx = HashMap::new();
let config = Config::default();
let context = RenderContext::new(&ZOLA_TERA, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content("<https://google.com>", &context).unwrap();
assert_eq!(res.body, "<p><a href=\"https://google.com\">https://google.com</a></p>\n");
}
#[test]
fn can_set_target_blank_for_external_link() {
let permalinks_ctx = HashMap::new();
let mut config = Config::default();
config.markdown.external_links_target_blank = true;
let context = RenderContext::new(&ZOLA_TERA, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content("<https://google.com>", &context).unwrap();
assert_eq!(res.body, "<p><a rel=\"noopener\" target=\"_blank\" href=\"https://google.com\">https://google.com</a></p>\n");
}
#[test]
fn can_set_nofollow_for_external_link() {
let permalinks_ctx = HashMap::new();
let mut config = Config::default();
config.markdown.external_links_no_follow = true;
let context = RenderContext::new(&ZOLA_TERA, &config, "", &permalinks_ctx, InsertAnchor::None);
// Testing href escaping while we're there
let res = render_content("<https://google.com/éllo>", &context).unwrap();
assert_eq!(
res.body,
"<p><a rel=\"nofollow\" href=\"https://google.com/%C3%A9llo\">https://google.com/éllo</a></p>\n"
);
}
#[test]
fn can_set_noreferrer_for_external_link() {
let permalinks_ctx = HashMap::new();
let mut config = Config::default();
config.markdown.external_links_no_referrer = true;
let context = RenderContext::new(&ZOLA_TERA, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content("<https://google.com>", &context).unwrap();
assert_eq!(
res.body,
"<p><a rel=\"noreferrer\" href=\"https://google.com\">https://google.com</a></p>\n"
);
}
#[test]
fn can_set_all_options_for_external_link() {
let permalinks_ctx = HashMap::new();
let mut config = Config::default();
config.markdown.external_links_target_blank = true;
config.markdown.external_links_no_follow = true;
config.markdown.external_links_no_referrer = true;
let context = RenderContext::new(&ZOLA_TERA, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content("<https://google.com>", &context).unwrap();
assert_eq!(res.body, "<p><a rel=\"noopener nofollow noreferrer\" target=\"_blank\" href=\"https://google.com\">https://google.com</a></p>\n");
}
#[test]
fn can_use_smart_punctuation() {
let permalinks_ctx = HashMap::new();
let mut config = Config::default();
config.markdown.smart_punctuation = true;
let context = RenderContext::new(&ZOLA_TERA, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content(r#"This -- is "it"..."#, &context).unwrap();
assert_eq!(res.body, "<p>This is “it”…</p>\n");
}

View file

@ -1,6 +1,6 @@
/** /**
* elasticlunr - http://weixsong.github.io * elasticlunr - http://weixsong.github.io
* Lightweight full-text search engine in Javascript for browser search and offline search. - 0.9.5 * Lightweight full-text search engine in Javascript for browser search and offline search. - 0.9.6
* *
* Copyright (C) 2017 Oliver Nightingale * Copyright (C) 2017 Oliver Nightingale
* Copyright (C) 2017 Wei Song * Copyright (C) 2017 Wei Song

View file

@ -8,6 +8,7 @@ include = ["src/**/*"]
[dependencies] [dependencies]
tera = "1" tera = "1"
glob = "0.3" glob = "0.3"
walkdir = "2"
minify-html = "0.3.8" minify-html = "0.3.8"
rayon = "1" rayon = "1"
serde = "1" serde = "1"
@ -15,6 +16,7 @@ serde_derive = "1"
sass-rs = "0.2" sass-rs = "0.2"
lazy_static = "1.1" lazy_static = "1.1"
relative-path = "1" relative-path = "1"
slotmap = "0.4"
errors = { path = "../errors" } errors = { path = "../errors" }
config = { path = "../config" } config = { path = "../config" }

View file

@ -71,7 +71,7 @@ fn bench_render_paginated(b: &mut test::Bencher) {
let section = library.sections_values()[0]; let section = library.sections_values()[0];
let paginator = Paginator::from_section(&section, &library); let paginator = Paginator::from_section(&section, &library);
b.iter(|| site.render_paginated(public, &paginator)); b.iter(|| site.render_paginated(Vec::new(), &paginator));
} }
#[bench] #[bench]

View file

@ -9,11 +9,11 @@ use std::fs::remove_dir_all;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex, RwLock}; use std::sync::{Arc, Mutex, RwLock};
use glob::glob;
use lazy_static::lazy_static; use lazy_static::lazy_static;
use minify_html::{with_friendly_error, Cfg}; use minify_html::{with_friendly_error, Cfg};
use rayon::prelude::*; use rayon::prelude::*;
use tera::{Context, Tera}; use tera::{Context, Tera};
use walkdir::{DirEntry, WalkDir};
use config::{get_config, Config}; use config::{get_config, Config};
use errors::{bail, Error, Result}; use errors::{bail, Error, Result};
@ -85,7 +85,7 @@ impl Site {
let static_path = path.join("static"); let static_path = path.join("static");
let imageproc = let imageproc =
imageproc::Processor::new(content_path.clone(), &static_path, &config.base_url); imageproc::Processor::new(content_path.clone(), &static_path, &config.base_url);
let output_path = path.join("public"); let output_path = path.join(config.output_dir.clone());
let site = Site { let site = Site {
base_path: path.to_path_buf(), base_path: path.to_path_buf(),
@ -166,72 +166,107 @@ impl Site {
/// out of them /// out of them
pub fn load(&mut self) -> Result<()> { pub fn load(&mut self) -> Result<()> {
let base_path = self.base_path.to_string_lossy().replace("\\", "/"); let base_path = self.base_path.to_string_lossy().replace("\\", "/");
let content_glob = format!("{}/{}", base_path, "content/**/*.md");
let (section_entries, page_entries): (Vec<_>, Vec<_>) = glob(&content_glob)
.expect("Invalid glob")
.filter_map(|e| e.ok())
.filter(|e| !e.as_path().file_name().unwrap().to_str().unwrap().starts_with('.'))
.partition(|entry| {
entry.as_path().file_name().unwrap().to_str().unwrap().starts_with("_index.")
});
self.library = Arc::new(RwLock::new(Library::new(
page_entries.len(),
section_entries.len(),
self.config.is_multilingual(),
)));
let sections = {
let config = &self.config;
section_entries
.into_par_iter()
.map(|entry| {
let path = entry.as_path();
Section::from_file(path, config, &self.base_path)
})
.collect::<Vec<_>>()
};
let pages = {
let config = &self.config;
page_entries
.into_par_iter()
.filter(|entry| match &config.ignored_content_globset {
Some(gs) => !gs.is_match(entry.as_path()),
None => true,
})
.map(|entry| {
let path = entry.as_path();
Page::from_file(path, config, &self.base_path)
})
.collect::<Vec<_>>()
};
// Kinda duplicated code for add_section/add_page but necessary to do it that
// way because of the borrow checker
for section in sections {
let s = section?;
self.add_section(s, false)?;
}
self.create_default_index_sections()?;
self.library = Arc::new(RwLock::new(Library::new(0, 0, self.config.is_multilingual())));
let mut pages_insert_anchors = HashMap::new(); let mut pages_insert_anchors = HashMap::new();
for page in pages {
let p = page?; // not the most elegant loop, but this is necessary to use skip_current_dir
// Should draft pages be ignored? // which we can only decide to use after we've deserialised the section
if p.meta.draft && !self.include_drafts { // so it's kinda necessecary
let mut dir_walker = WalkDir::new(format!("{}/{}", base_path, "content/")).into_iter();
loop {
let entry: DirEntry = match dir_walker.next() {
None => break,
Some(Err(_)) => continue,
Some(Ok(entry)) => entry,
};
let path = entry.path();
let file_name = match path.file_name() {
None => continue,
Some(name) => name.to_str().unwrap(),
};
// ignore excluded content
match &self.config.ignored_content_globset {
Some(gs) => {
if gs.is_match(path) {
continue;
}
}
None => (),
}
// we process a section when we encounter the dir
// so we can process it before any of the pages
// therefore we should skip the actual file to avoid duplication
if file_name.starts_with("_index.") {
continue; continue;
} }
pages_insert_anchors.insert(
p.file.path.clone(), // skip hidden files and non md files
self.find_parent_section_insert_anchor(&p.file.parent.clone(), &p.lang), if !path.is_dir() && (!file_name.ends_with(".md") || file_name.starts_with('.')) {
); continue;
self.add_page(p, false)?; }
// is it a section or not?
if path.is_dir() {
// if we are processing a section we have to collect
// index files for all languages and process them simultaniously
// before any of the pages
let index_files = WalkDir::new(&path)
.max_depth(1)
.into_iter()
.filter_map(|e| match e {
Err(_) => None,
Ok(f) => {
let path_str = f.path().file_name().unwrap().to_str().unwrap();
if f.path().is_file()
&& path_str.starts_with("_index.")
&& path_str.ends_with(".md")
{
Some(f)
} else {
None
}
}
})
.collect::<Vec<DirEntry>>();
for index_file in index_files {
let section = match Section::from_file(
index_file.path(),
&self.config,
&self.base_path,
) {
Err(_) => continue,
Ok(sec) => sec,
};
// if the section is drafted we can skip the enitre dir
if section.meta.draft && !self.include_drafts {
dir_walker.skip_current_dir();
continue;
}
self.add_section(section, false)?;
}
} else {
let page = Page::from_file(path, &self.config, &self.base_path)
.expect("error deserialising page");
// should we skip drafts?
if page.meta.draft && !self.include_drafts {
continue;
}
pages_insert_anchors.insert(
page.file.path.clone(),
self.find_parent_section_insert_anchor(&page.file.parent.clone(), &page.lang),
);
self.add_page(page, false)?;
}
} }
self.create_default_index_sections()?;
{ {
let library = self.library.read().unwrap(); let library = self.library.read().unwrap();

View file

@ -5,7 +5,7 @@ use tera::Tera;
use crate::Site; use crate::Site;
use config::Config; use config::Config;
use errors::{bail, Error, Result}; use errors::{bail, Error, Result};
use templates::{global_fns, ZOLA_TERA}; use templates::{filters, global_fns, ZOLA_TERA};
use utils::templates::rewrite_theme_paths; use utils::templates::rewrite_theme_paths;
pub fn load_tera(path: &Path, config: &Config) -> Result<Tera> { pub fn load_tera(path: &Path, config: &Config) -> Result<Tera> {
@ -50,6 +50,8 @@ pub fn load_tera(path: &Path, config: &Config) -> Result<Tera> {
/// Adds global fns that are to be available to shortcodes while rendering markdown /// Adds global fns that are to be available to shortcodes while rendering markdown
pub fn register_early_global_fns(site: &mut Site) { pub fn register_early_global_fns(site: &mut Site) {
site.tera.register_filter("markdown", filters::MarkdownFilter::new(site.config.clone()));
site.tera.register_function( site.tera.register_function(
"get_url", "get_url",
global_fns::GetUrl::new( global_fns::GetUrl::new(

View file

@ -177,6 +177,9 @@ fn can_build_site_without_live_reload() {
assert!(file_exists!(public, "nested_sass/sass.css")); assert!(file_exists!(public, "nested_sass/sass.css"));
assert!(file_exists!(public, "nested_sass/scss.css")); assert!(file_exists!(public, "nested_sass/scss.css"));
assert!(!file_exists!(public, "secret_section/index.html"));
assert!(!file_exists!(public, "secret_section/page.html"));
assert!(!file_exists!(public, "secret_section/secret_sub_section/hello.html"));
// no live reload code // no live reload code
assert_eq!( assert_eq!(
file_contains!(public, "index.html", "/livereload.js?port=1112&amp;mindelay=10"), file_contains!(public, "index.html", "/livereload.js?port=1112&amp;mindelay=10"),
@ -210,7 +213,7 @@ fn can_build_site_without_live_reload() {
#[test] #[test]
fn can_build_site_with_live_reload_and_drafts() { fn can_build_site_with_live_reload_and_drafts() {
let (_, _tmp_dir, public) = build_site_with_setup("test_site", |mut site| { let (site, _tmp_dir, public) = build_site_with_setup("test_site", |mut site| {
site.enable_live_reload(1000); site.enable_live_reload(1000);
site.include_drafts(); site.include_drafts();
(site, true) (site, true)
@ -254,6 +257,15 @@ fn can_build_site_with_live_reload_and_drafts() {
// Drafts are included // Drafts are included
assert!(file_exists!(public, "posts/draft/index.html")); assert!(file_exists!(public, "posts/draft/index.html"));
assert!(file_contains!(public, "sitemap.xml", "draft")); assert!(file_contains!(public, "sitemap.xml", "draft"));
// drafted sections are included
let library = site.library.read().unwrap();
assert_eq!(library.sections().len(), 14);
assert!(file_exists!(public, "secret_section/index.html"));
assert!(file_exists!(public, "secret_section/draft-page/index.html"));
assert!(file_exists!(public, "secret_section/page/index.html"));
assert!(file_exists!(public, "secret_section/secret_sub_section/hello/index.html"));
} }
#[test] #[test]

View file

@ -6,22 +6,23 @@ edition = "2018"
[dependencies] [dependencies]
tera = "1" tera = "1"
base64 = "0.12" base64 = "0.13"
lazy_static = "1" lazy_static = "1"
pulldown-cmark = { version = "0.8", default-features = false }
toml = "0.5" toml = "0.5"
csv = "1" csv = "1"
image = "0.23" image = "0.23"
serde_json = "1.0" serde_json = "1.0"
sha2 = "0.9" sha2 = "0.9"
url = "2" url = "2"
nom-bibtex = "0.3"
svg_metadata = "0.4.1"
errors = { path = "../errors" } errors = { path = "../errors" }
utils = { path = "../utils" } utils = { path = "../utils" }
library = { path = "../library" } library = { path = "../library" }
config = { path = "../config" } config = { path = "../config" }
imageproc = { path = "../imageproc" } imageproc = { path = "../imageproc" }
svg_metadata = "0.4.1" rendering = { path = "../rendering" }
[dependencies.reqwest] [dependencies.reqwest]
version = "0.10" version = "0.10"
@ -29,4 +30,4 @@ default-features = false
features = ["blocking", "rustls-tls"] features = ["blocking", "rustls-tls"]
[dev-dependencies] [dev-dependencies]
mockito = "0.27" mockito = "0.28"

View file

@ -1,2 +1,3 @@
User-agent: * User-agent: *
Allow: /
Sitemap: {{ get_url(path="sitemap.xml") }} Sitemap: {{ get_url(path="sitemap.xml") }}

View file

@ -1,3 +1,3 @@
<div {% if class %}class="{{class}}"{% endif %}> <div {% if class %}class="{{class}}"{% endif %}>
<iframe src="https://www.youtube.com/embed/{{id}}{% if autoplay %}?autoplay=1{% endif %}" webkitallowfullscreen mozallowfullscreen allowfullscreen></iframe> <iframe src="https://www.youtube-nocookie.com/embed/{{id}}{% if autoplay %}?autoplay=1{% endif %}" webkitallowfullscreen mozallowfullscreen allowfullscreen></iframe>
</div> </div>

View file

@ -2,38 +2,44 @@ use std::collections::HashMap;
use std::hash::BuildHasher; use std::hash::BuildHasher;
use base64::{decode, encode}; use base64::{decode, encode};
use pulldown_cmark as cmark; use config::Config;
use tera::{to_value, try_get_value, Result as TeraResult, Value}; use rendering::{render_content, RenderContext};
use tera::{to_value, try_get_value, Filter as TeraFilter, Result as TeraResult, Value};
pub fn markdown<S: BuildHasher>( #[derive(Debug)]
value: &Value, pub struct MarkdownFilter {
args: &HashMap<String, Value, S>, config: Config,
) -> TeraResult<Value> { }
let s = try_get_value!("markdown", "value", String, value);
let inline = match args.get("inline") {
Some(val) => try_get_value!("markdown", "inline", bool, val),
None => false,
};
let mut opts = cmark::Options::empty(); impl MarkdownFilter {
opts.insert(cmark::Options::ENABLE_TABLES); pub fn new(config: Config) -> Self {
opts.insert(cmark::Options::ENABLE_FOOTNOTES); Self { config }
opts.insert(cmark::Options::ENABLE_STRIKETHROUGH);
opts.insert(cmark::Options::ENABLE_TASKLISTS);
let mut html = String::new();
let parser = cmark::Parser::new_ext(&s, opts);
cmark::html::push_html(&mut html, parser);
if inline {
html = html
.trim_start_matches("<p>")
// pulldown_cmark finishes a paragraph with `</p>\n`
.trim_end_matches("</p>\n")
.to_string();
} }
}
Ok(to_value(&html).unwrap()) impl TeraFilter for MarkdownFilter {
fn filter(&self, value: &Value, args: &HashMap<String, Value>) -> TeraResult<Value> {
let context = RenderContext::from_config(&self.config);
let s = try_get_value!("markdown", "value", String, value);
let inline = match args.get("inline") {
Some(val) => try_get_value!("markdown", "inline", bool, val),
None => false,
};
let mut html = match render_content(&s, &context) {
Ok(res) => res.body,
Err(e) => return Err(format!("Failed to render markdown filter: {:?}", e).into()),
};
if inline {
html = html
.trim_start_matches("<p>")
// pulldown_cmark finishes a paragraph with `</p>\n`
.trim_end_matches("</p>\n")
.to_string();
}
Ok(to_value(&html).unwrap())
}
} }
pub fn base64_encode<S: BuildHasher>( pub fn base64_encode<S: BuildHasher>(
@ -56,22 +62,24 @@ pub fn base64_decode<S: BuildHasher>(
mod tests { mod tests {
use std::collections::HashMap; use std::collections::HashMap;
use tera::to_value; use tera::{to_value, Filter};
use super::{base64_decode, base64_encode, markdown}; use super::{base64_decode, base64_encode, MarkdownFilter};
use config::Config;
#[test] #[test]
fn markdown_filter() { fn markdown_filter() {
let result = markdown(&to_value(&"# Hey").unwrap(), &HashMap::new()); let result = MarkdownFilter::new(Config::default())
.filter(&to_value(&"# Hey").unwrap(), &HashMap::new());
assert!(result.is_ok()); assert!(result.is_ok());
assert_eq!(result.unwrap(), to_value(&"<h1>Hey</h1>\n").unwrap()); assert_eq!(result.unwrap(), to_value(&"<h1 id=\"hey\">Hey</h1>\n").unwrap());
} }
#[test] #[test]
fn markdown_filter_inline() { fn markdown_filter_inline() {
let mut args = HashMap::new(); let mut args = HashMap::new();
args.insert("inline".to_string(), to_value(true).unwrap()); args.insert("inline".to_string(), to_value(true).unwrap());
let result = markdown( let result = MarkdownFilter::new(Config::default()).filter(
&to_value(&"Using `map`, `filter`, and `fold` instead of `for`").unwrap(), &to_value(&"Using `map`, `filter`, and `fold` instead of `for`").unwrap(),
&args, &args,
); );
@ -84,7 +92,7 @@ mod tests {
fn markdown_filter_inline_tables() { fn markdown_filter_inline_tables() {
let mut args = HashMap::new(); let mut args = HashMap::new();
args.insert("inline".to_string(), to_value(true).unwrap()); args.insert("inline".to_string(), to_value(true).unwrap());
let result = markdown( let result = MarkdownFilter::new(Config::default()).filter(
&to_value( &to_value(
&r#" &r#"
|id|author_id| timestamp_created|title |content | |id|author_id| timestamp_created|title |content |
@ -100,6 +108,26 @@ mod tests {
assert!(result.unwrap().as_str().unwrap().contains("<table>")); assert!(result.unwrap().as_str().unwrap().contains("<table>"));
} }
#[test]
fn markdown_filter_use_config_options() {
let mut config = Config::default();
config.markdown.highlight_code = true;
config.markdown.smart_punctuation = true;
config.markdown.render_emoji = true;
config.markdown.external_links_target_blank = true;
let md = "Hello <https://google.com> :smile: ...";
let result =
MarkdownFilter::new(config.clone()).filter(&to_value(&md).unwrap(), &HashMap::new());
assert!(result.is_ok());
assert_eq!(result.unwrap(), to_value(&"<p>Hello <a rel=\"noopener\" target=\"_blank\" href=\"https://google.com\">https://google.com</a> 😄 …</p>\n").unwrap());
let md = "```py\ni=0\n```";
let result = MarkdownFilter::new(config).filter(&to_value(&md).unwrap(), &HashMap::new());
assert!(result.is_ok());
assert!(result.unwrap().as_str().unwrap().contains("<pre style"));
}
#[test] #[test]
fn base64_encode_filter() { fn base64_encode_filter() {
// from https://tools.ietf.org/html/rfc4648#section-10 // from https://tools.ietf.org/html/rfc4648#section-10

View file

@ -28,6 +28,7 @@ enum OutputFormat {
Toml, Toml,
Json, Json,
Csv, Csv,
Bibtex,
Plain, Plain,
} }
@ -51,6 +52,7 @@ impl FromStr for OutputFormat {
"toml" => Ok(OutputFormat::Toml), "toml" => Ok(OutputFormat::Toml),
"csv" => Ok(OutputFormat::Csv), "csv" => Ok(OutputFormat::Csv),
"json" => Ok(OutputFormat::Json), "json" => Ok(OutputFormat::Json),
"bibtex" => Ok(OutputFormat::Bibtex),
"plain" => Ok(OutputFormat::Plain), "plain" => Ok(OutputFormat::Plain),
format => Err(format!("Unknown output format {}", format).into()), format => Err(format!("Unknown output format {}", format).into()),
} }
@ -63,6 +65,7 @@ impl OutputFormat {
OutputFormat::Json => "application/json", OutputFormat::Json => "application/json",
OutputFormat::Csv => "text/csv", OutputFormat::Csv => "text/csv",
OutputFormat::Toml => "application/toml", OutputFormat::Toml => "application/toml",
OutputFormat::Bibtex => "application/x-bibtex",
OutputFormat::Plain => "text/plain", OutputFormat::Plain => "text/plain",
}) })
} }
@ -148,7 +151,7 @@ fn get_output_format_from_args(
let format_arg = optional_arg!( let format_arg = optional_arg!(
String, String,
args.get("format"), args.get("format"),
"`load_data`: `format` needs to be an argument with a string value, being one of the supported `load_data` file types (csv, json, toml, plain)" "`load_data`: `format` needs to be an argument with a string value, being one of the supported `load_data` file types (csv, json, toml, bibtex, plain)"
); );
if let Some(format) = format_arg { if let Some(format) = format_arg {
@ -165,11 +168,11 @@ fn get_output_format_from_args(
}; };
// Always default to Plain if we don't know what it is // Always default to Plain if we don't know what it is
OutputFormat::from_str(from_extension).or_else(|_| Ok(OutputFormat::Plain)) OutputFormat::from_str(from_extension).or(Ok(OutputFormat::Plain))
} }
/// A Tera function to load data from a file or from a URL /// A Tera function to load data from a file or from a URL
/// Currently the supported formats are json, toml, csv and plain text /// Currently the supported formats are json, toml, csv, bibtex and plain text
#[derive(Debug)] #[derive(Debug)]
pub struct LoadData { pub struct LoadData {
base_path: PathBuf, base_path: PathBuf,
@ -223,6 +226,7 @@ impl TeraFn for LoadData {
OutputFormat::Toml => load_toml(data), OutputFormat::Toml => load_toml(data),
OutputFormat::Csv => load_csv(data), OutputFormat::Csv => load_csv(data),
OutputFormat::Json => load_json(data), OutputFormat::Json => load_json(data),
OutputFormat::Bibtex => load_bibtex(data),
OutputFormat::Plain => to_value(data).map_err(|e| e.into()), OutputFormat::Plain => to_value(data).map_err(|e| e.into()),
}; };
@ -252,6 +256,47 @@ fn load_toml(toml_data: String) -> Result<Value> {
} }
} }
/// Parse a BIBTEX string and convert it to a Tera Value
fn load_bibtex(bibtex_data: String) -> Result<Value> {
let bibtex_model = nom_bibtex::Bibtex::parse(&bibtex_data).map_err(|e| format!("{:?}", e))?;
let mut bibtex_map = Map::new();
let preambles_array =
bibtex_model.preambles().iter().map(|v| Value::String(v.to_string())).collect();
bibtex_map.insert(String::from("preambles"), Value::Array(preambles_array));
let comments_array =
bibtex_model.comments().iter().map(|v| Value::String(v.to_string())).collect();
bibtex_map.insert(String::from("comments"), Value::Array(comments_array));
let mut variables_map = Map::new();
for (key, val) in bibtex_model.variables() {
variables_map.insert(key.to_string(), Value::String(val.to_string()));
}
bibtex_map.insert(String::from("variables"), Value::Object(variables_map));
let bibliographies_array = bibtex_model
.bibliographies()
.iter()
.map(|b| {
let mut m = Map::new();
m.insert(String::from("entry_type"), Value::String(b.entry_type().to_string()));
m.insert(String::from("citation_key"), Value::String(b.citation_key().to_string()));
let mut tags = Map::new();
for (key, val) in b.tags() {
tags.insert(key.to_lowercase().to_string(), Value::String(val.to_string()));
}
m.insert(String::from("tags"), Value::Object(tags));
Value::Object(m)
})
.collect();
bibtex_map.insert(String::from("bibliographies"), Value::Array(bibliographies_array));
let bibtex_value: Value = Value::Object(bibtex_map);
to_value(bibtex_value).map_err(|err| err.into())
}
/// Parse a CSV string and convert it to a Tera Value /// Parse a CSV string and convert it to a Tera Value
/// ///
/// An example csv file `example.csv` could be: /// An example csv file `example.csv` could be:

View file

@ -39,7 +39,7 @@ impl TeraFn for Trans {
let term = self let term = self
.config .config
.get_translation(lang, key) .get_translation(lang, key)
.map_err(|e| Error::chain("Failed to retreive term translation", e))?; .map_err(|e| Error::chain("Failed to retrieve term translation", e))?;
Ok(to_value(term).unwrap()) Ok(to_value(term).unwrap())
} }
@ -331,7 +331,7 @@ impl GetTaxonomyUrl {
} }
taxonomies.insert(format!("{}-{}", taxo.kind.name, taxo.kind.lang), items); taxonomies.insert(format!("{}-{}", taxo.kind.name, taxo.kind.lang), items);
} }
Self { taxonomies, default_lang: default_lang.to_string(), slugify: slugify } Self { taxonomies, default_lang: default_lang.to_string(), slugify }
} }
} }
impl TeraFn for GetTaxonomyUrl { impl TeraFn for GetTaxonomyUrl {
@ -735,7 +735,7 @@ title = "A title"
let config = Config::parse(TRANS_CONFIG).unwrap(); let config = Config::parse(TRANS_CONFIG).unwrap();
let error = Trans::new(config).call(&args).unwrap_err(); let error = Trans::new(config).call(&args).unwrap_err();
assert_eq!("Failed to retreive term translation", format!("{}", error)); assert_eq!("Failed to retrieve term translation", format!("{}", error));
} }
#[test] #[test]
@ -746,7 +746,7 @@ title = "A title"
let config = Config::parse(TRANS_CONFIG).unwrap(); let config = Config::parse(TRANS_CONFIG).unwrap();
let error = Trans::new(config).call(&args).unwrap_err(); let error = Trans::new(config).call(&args).unwrap_err();
assert_eq!("Failed to retreive term translation", format!("{}", error)); assert_eq!("Failed to retrieve term translation", format!("{}", error));
} }
#[test] #[test]

View file

@ -36,7 +36,6 @@ lazy_static! {
("internal/alias.html", include_str!("builtins/internal/alias.html")), ("internal/alias.html", include_str!("builtins/internal/alias.html")),
]) ])
.unwrap(); .unwrap();
tera.register_filter("markdown", filters::markdown);
tera.register_filter("base64_encode", filters::base64_encode); tera.register_filter("base64_encode", filters::base64_encode);
tera.register_filter("base64_decode", filters::base64_decode); tera.register_filter("base64_decode", filters::base64_decode);
tera tera

View file

@ -1,12 +1,32 @@
use serde::{Deserialize, Deserializer}; use serde::{Deserialize, Deserializer};
use serde_derive::Deserialize;
use tera::{Map, Value}; use tera::{Map, Value};
/// Used as an attribute when we want to convert from TOML to a string date /// Used as an attribute when we want to convert from TOML to a string date
/// If a TOML datetime isn't present, it will accept a string and push it through
/// TOML's date time parser to ensure only valid dates are accepted.
/// Inspired by this proposal: https://github.com/alexcrichton/toml-rs/issues/269
pub fn from_toml_datetime<'de, D>(deserializer: D) -> Result<Option<String>, D::Error> pub fn from_toml_datetime<'de, D>(deserializer: D) -> Result<Option<String>, D::Error>
where where
D: Deserializer<'de>, D: Deserializer<'de>,
{ {
toml::value::Datetime::deserialize(deserializer).map(|s| Some(s.to_string())) use serde::de::Error;
use std::str::FromStr;
#[derive(Deserialize)]
#[serde(untagged)]
enum MaybeDatetime {
Datetime(toml::value::Datetime),
String(String),
}
match MaybeDatetime::deserialize(deserializer)? {
MaybeDatetime::Datetime(d) => Ok(Some(d.to_string())),
MaybeDatetime::String(s) => match toml::value::Datetime::from_str(&s) {
Ok(d) => Ok(Some(d.to_string())),
Err(e) => Err(D::Error::custom(e)),
},
}
} }
/// Returns key/value for a converted date from TOML. /// Returns key/value for a converted date from TOML.

View file

@ -20,8 +20,8 @@ pub fn is_path_in_directory(parent: &Path, path: &Path) -> Result<bool> {
/// Create a file with the content given /// Create a file with the content given
pub fn create_file(path: &Path, content: &str) -> Result<()> { pub fn create_file(path: &Path, content: &str) -> Result<()> {
let mut file = let mut file = File::create(&path)
File::create(&path).map_err(|e| Error::chain(format!("Failed to create {:?}", path), e))?; .map_err(|e| Error::chain(format!("Failed to create file {}", path.display()), e))?;
file.write_all(content.as_bytes())?; file.write_all(content.as_bytes())?;
Ok(()) Ok(())
} }
@ -62,7 +62,7 @@ pub fn read_file(path: &Path) -> Result<String> {
/// Return the content of a file, with error handling added. /// Return the content of a file, with error handling added.
/// The default error message is overwritten by the message given. /// The default error message is overwritten by the message given.
/// That means it is allocation 2 strings, oh well /// That means it is allocating 2 strings, oh well
pub fn read_file_with_error(path: &Path, message: &str) -> Result<String> { pub fn read_file_with_error(path: &Path, message: &str) -> Result<String> {
let res = read_file(&path); let res = read_file(&path);
if res.is_ok() { if res.is_ok() {
@ -101,7 +101,9 @@ pub fn copy_file(src: &Path, dest: &PathBuf, base_path: &PathBuf, hard_link: boo
let target_path = dest.join(relative_path); let target_path = dest.join(relative_path);
if let Some(parent_directory) = target_path.parent() { if let Some(parent_directory) = target_path.parent() {
create_dir_all(parent_directory)?; create_dir_all(parent_directory).map_err(|e| {
Error::chain(format!("Was not able to create folder {}", parent_directory.display()), e)
})?;
} }
copy_file_if_needed(src, &target_path, hard_link) copy_file_if_needed(src, &target_path, hard_link)
@ -113,7 +115,9 @@ pub fn copy_file(src: &Path, dest: &PathBuf, base_path: &PathBuf, hard_link: boo
/// 3. Its filesize is identical to that of the src file. /// 3. Its filesize is identical to that of the src file.
pub fn copy_file_if_needed(src: &Path, dest: &PathBuf, hard_link: bool) -> Result<()> { pub fn copy_file_if_needed(src: &Path, dest: &PathBuf, hard_link: bool) -> Result<()> {
if let Some(parent_directory) = dest.parent() { if let Some(parent_directory) = dest.parent() {
create_dir_all(parent_directory)?; create_dir_all(parent_directory).map_err(|e| {
Error::chain(format!("Was not able to create folder {}", parent_directory.display()), e)
})?;
} }
if hard_link { if hard_link {
@ -125,11 +129,25 @@ pub fn copy_file_if_needed(src: &Path, dest: &PathBuf, hard_link: bool) -> Resul
let target_metadata = metadata(&dest)?; let target_metadata = metadata(&dest)?;
let target_mtime = FileTime::from_last_modification_time(&target_metadata); let target_mtime = FileTime::from_last_modification_time(&target_metadata);
if !(src_mtime == target_mtime && src_metadata.len() == target_metadata.len()) { if !(src_mtime == target_mtime && src_metadata.len() == target_metadata.len()) {
copy(src, &dest)?; copy(src, &dest).map_err(|e| {
Error::chain(
format!(
"Was not able to copy file {} to {}",
src.display(),
dest.display()
),
e,
)
})?;
set_file_mtime(&dest, src_mtime)?; set_file_mtime(&dest, src_mtime)?;
} }
} else { } else {
copy(src, &dest)?; copy(src, &dest).map_err(|e| {
Error::chain(
format!("Was not able to copy file {} to {}", src.display(), dest.display()),
e,
)
})?;
set_file_mtime(&dest, src_mtime)?; set_file_mtime(&dest, src_mtime)?;
} }
} }
@ -146,7 +164,16 @@ pub fn copy_directory(src: &PathBuf, dest: &PathBuf, hard_link: bool) -> Result<
create_directory(&target_path)?; create_directory(&target_path)?;
} }
} else { } else {
copy_file(entry.path(), dest, src, hard_link)?; copy_file(entry.path(), dest, src, hard_link).map_err(|e| {
Error::chain(
format!(
"Was not able to copy file {} to {}",
entry.path().display(),
dest.display()
),
e,
)
})?;
} }
} }
Ok(()) Ok(())

View file

@ -33,8 +33,8 @@ uses the filename to detect the language:
- `content/an-article.md`: this will be the default language - `content/an-article.md`: this will be the default language
- `content/an-article.fr.md`: this will be in French - `content/an-article.fr.md`: this will be in French
If the language code in the filename does not correspond to one of the languages configured, If the language code in the filename does not correspond to one of the languages or
an error will be shown. the default language configured, an error will be shown.
If your default language has an `_index.md` in a directory, you will need to add an `_index.{code}.md` If your default language has an `_index.md` in a directory, you will need to add an `_index.{code}.md`
file with the desired front-matter options as there is no language fallback. file with the desired front-matter options as there is no language fallback.

View file

@ -79,6 +79,9 @@ by triple pluses (`+++`).
Although none of the front matter variables are mandatory, the opening and closing `+++` are required. Although none of the front matter variables are mandatory, the opening and closing `+++` are required.
Note that even though the use of TOML is encouraged, YAML front matter is also supported to ease porting
legacy content. In this case the embedded metadata must be enclosed by triple minuses (`---`).
Here is an example page with all the available variables. The values provided below are the Here is an example page with all the available variables. The values provided below are the
default values. default values.

View file

@ -18,6 +18,9 @@ Any non-Markdown file in a section directory is added to the `assets` collection
[content overview](@/documentation/content/overview.md#asset-colocation). These files are then available in the [content overview](@/documentation/content/overview.md#asset-colocation). These files are then available in the
Markdown file using relative links. Markdown file using relative links.
## Drafting
Just like pages sections can be drafted by setting the `draft` option in the front matter. By default this is not done. When a section is drafted it's descendants like pages, subsections and assets will not be processed unless the `--drafts` flag is passed. Note that even pages that don't have a `draft` status will not be processed if one of their parent sections is drafted.
## Front matter ## Front matter
The `_index.md` file within a directory defines the content and metadata for that section. To set The `_index.md` file within a directory defines the content and metadata for that section. To set
@ -30,6 +33,9 @@ to your templates through the `section.content` variable.
Although none of the front matter variables are mandatory, the opening and closing `+++` are required. Although none of the front matter variables are mandatory, the opening and closing `+++` are required.
Note that even though the use of TOML is encouraged, YAML front matter is also supported to ease porting
legacy content. In this case the embedded metadata must be enclosed by triple minuses (`---`).
Here is an example `_index.md` with all the available variables. The values provided below are the Here is an example `_index.md` with all the available variables. The values provided below are the
default values. default values.
@ -39,6 +45,9 @@ title = ""
description = "" description = ""
# A draft section is only loaded if the `--drafts` flag is passed to `zola build`, `zola serve` or `zola check`.
draft = false
# Used to sort pages by "date", "weight" or "none". See below for more information. # Used to sort pages by "date", "weight" or "none". See below for more information.
sort_by = "none" sort_by = "none"

View file

@ -134,6 +134,24 @@ If you want to have some content that looks like a shortcode but not have Zola t
you will need to escape it by using `{%/*` and `*/%}` instead of `{%` and `%}`. You won't need to escape you will need to escape it by using `{%/*` and `*/%}` instead of `{%` and `%}`. You won't need to escape
anything else until the closing tag. anything else until the closing tag.
### Invocation Count
Every shortcode context is passed in a variable named `nth` that tracks how many times a particular shortcode has
been invoked in a Markdown file. Given a shortcode `true_statement.html` template:
```jinja2
<p id="number{{ nth }}">{{ value }} is equal to {{ nth }}.</p>
```
It could be used in our Markdown as follows:
```md
{{/* true_statement(value=1) */}}
{{/* true_statement(value=2) */}}
```
This is useful when implementing custom markup for features such as sidenotes or end notes.
## Built-in shortcodes ## Built-in shortcodes
Zola comes with a few built-in shortcodes. If you want to override a default shortcode template, Zola comes with a few built-in shortcodes. If you want to override a default shortcode template,

View file

@ -3,7 +3,62 @@ title = "Taxonomies"
weight = 90 weight = 90
+++ +++
Zola has built-in support for taxonomies. Zola has built-in support for taxonomies. Taxonomies are a way for users to group content according to user-defined categories.
## Definitions
- Taxonomy: A category that can be used to group content
- Term: A specific group within a taxonomy
- Value: A piece of content that can be associated with a term
## Example: a movie website
Imagine that you want to make a website to display information about various movies. In that case you could use the following taxonomies:
- Director
- Genres
- Awards
- Release year
Then at build time Zola can create pages for each taxonomy listing all of the known terms as well as pages for each term in a taxonomy, listing all of the pieces of content associated with that term.
Imagine again we have the following movies:
```
- Shape of water <--- Value
- Director <--- Taxonomy
- Guillermo Del Toro <--- Term
- Genres <--- Taxonomy
- Thriller <--- Term
- Drama <--- Term
- Awards <--- Taxonomy
- Golden globe <--- Term
- Academy award <--- Term
- BAFTA <--- Term
- Release year <--- Taxonomy
- 2017 <--- Term
- The Room: <--- Value
- Director <--- Taxonomy
- Tommy Wiseau <--- Term
- Genres <--- Taxonomy
- Romance <--- Term
- Drama <--- Term
- Release Year <--- Taxonomy
- 2003 <--- Term
- Bright <--- Value
- Director <--- Taxonomy
- David Ayer <--- Term
- Genres <--- Taxonomy
- Fantasy <--- Term
- Action <--- Term
- Awards <--- Taxonomy
- California on Location Awards <--- Term
- Release Year <--- Taxonomy
- 2017 <--- Term
```
In this example the page for `Release year` would include links to pages for both 2003 and 2017, where the page for 2017 would list both Shape of Water and Bright.
## Configuration ## Configuration
@ -23,16 +78,30 @@ Insert into the configuration file (config.toml):
**Example 1:** (one language) **Example 1:** (one language)
```toml ```toml
taxonomies = [ name = "categories", rss = true ] taxonomies = [
{ name = "director", feed = true},
{ name = "genres", feed = true},
{ name = "awards", feed = true},
{ name = "release-year", feed = true},
]
``` ```
**Example 2:** (multilingual site) **Example 2:** (multilingual site)
```toml ```toml
taxonomies = [ taxonomies = [
{name = "tags", lang = "fr"}, {name = "director", feed = true, lang = "fr"},
{name = "tags", lang = "eo"}, {name = "director", feed = true, lang = "eo"},
{name = "tags", lang = "en"}, {name = "director", feed = true, lang = "en"},
{name = "genres", feed = true, lang = "fr"},
{name = "genres", feed = true, lang = "eo"},
{name = "genres", feed = true, lang = "en"},
{name = "awards", feed = true, lang = "fr"},
{name = "awards", feed = true, lang = "eo"},
{name = "awards", feed = true, lang = "en"},
{name = "release-year", feed = true, lang = "fr"},
{name = "release-year", feed = true, lang = "eo"},
{name = "release-year", feed = true, lang = "en"},
] ]
``` ```
@ -44,11 +113,13 @@ Once the configuration is done, you can then set taxonomies in your content and
```toml ```toml
+++ +++
title = "Writing a static-site generator in Rust" title = "Shape of water"
date = 2019-08-15 date = 2019-08-15 # date of the post, not the movie
[taxonomies] [taxonomies]
tags = ["rust", "web"] director=["Guillermo Del Toro"]
categories = ["programming"] genres=["Thriller","Drama"]
awards=["Golden Globe", "Academy award", "BAFTA"]
release-year = ["2017"]
+++ +++
``` ```

View file

@ -36,13 +36,6 @@ default_language = "en"
# The site theme to use. # The site theme to use.
theme = "" theme = ""
# When set to "true", all code blocks are highlighted.
highlight_code = false
# The theme to use for code highlighting.
# See below for list of allowed values.
highlight_theme = "base16-ocean-dark"
# When set to "true", a feed is automatically generated. # When set to "true", a feed is automatically generated.
generate_feed = false generate_feed = false
@ -86,6 +79,9 @@ languages = []
# Sass files in theme directories are always compiled. # Sass files in theme directories are always compiled.
compile_sass = false compile_sass = false
# When set to "true", the generated HTML files are minified.
minify_html = false
# A list of glob patterns specifying asset files to ignore when the content # A list of glob patterns specifying asset files to ignore when the content
# directory is processed. Defaults to none, which means that all asset files are # directory is processed. Defaults to none, which means that all asset files are
# copied over to the `public` directory. # copied over to the `public` directory.
@ -96,6 +92,36 @@ ignored_content = []
# A list of directories used to search for additional `.sublime-syntax` files. # A list of directories used to search for additional `.sublime-syntax` files.
extra_syntaxes = [] extra_syntaxes = []
# You can override the default output directory `public` by setting an another value.
# output_dir = "docs"
# Configuration of the Markdown rendering
[markdown]
# When set to "true", all code blocks are highlighted.
highlight_code = false
# The theme to use for code highlighting.
# See below for list of allowed values.
highlight_theme = "base16-ocean-dark"
# When set to "true", emoji aliases translated to their corresponding
# Unicode emoji equivalent in the rendered Markdown files. (e.g.: :smile: => 😄)
render_emoji = false
# Whether external links are to be opened in a new tab
# If this is true, a `rel="noopener"` will always automatically be added for security reasons
external_links_target_blank = false
# Whether to set rel="nofollow" for all external links
external_links_no_follow = false
# Whether to set rel="noreferrer" for all external links
external_links_no_referrer = false
# Whether smart punctuation is enabled (changing quotes, dashes, dots in their typographic form)
# For example, `...` into `…`, `"quote"` into `“curly”` etc
smart_punctuation = false
# Configuration of the link checker. # Configuration of the link checker.
[link_checker] [link_checker]
# Skip link checking for external URLs that start with these prefixes # Skip link checking for external URLs that start with these prefixes

View file

@ -109,7 +109,7 @@ If you only need the metadata of the section, you can pass `metadata_only=true`
{% set section = get_section(path="blog/_index.md", metadata_only=true) %} {% set section = get_section(path="blog/_index.md", metadata_only=true) %}
``` ```
### ` get_url` ### `get_url`
Gets the permalink for the given path. Gets the permalink for the given path.
If the path starts with `@/`, it will be treated as an internal If the path starts with `@/`, it will be treated as an internal
link like the ones used in Markdown, starting from the root `content` directory. link like the ones used in Markdown, starting from the root `content` directory.
@ -146,7 +146,7 @@ In the case of non-internal links, you can also add a cachebust of the format `?
by passing `cachebust=true` to the `get_url` function. by passing `cachebust=true` to the `get_url` function.
### 'get_file_hash` ### `get_file_hash`
Gets the hash digest for a static file. Supported hashes are SHA-256, SHA-384 (default) and SHA-512. Requires `path`. The `sha_type` key is optional and must be one of 256, 384 or 512. Gets the hash digest for a static file. Supported hashes are SHA-256, SHA-384 (default) and SHA-512. Requires `path`. The `sha_type` key is optional and must be one of 256, 384 or 512.
@ -202,7 +202,7 @@ items: Array<TaxonomyTerm>;
See the [Taxonomies documentation](@/documentation/templates/taxonomies.md) for a full documentation of those types. See the [Taxonomies documentation](@/documentation/templates/taxonomies.md) for a full documentation of those types.
### `load_data` ### `load_data`
Loads data from a file or URL. Supported file types include *toml*, *json* and *csv*. Loads data from a file or URL. Supported file types include *toml*, *json*, *csv* and *bibtex*.
Any other file type will be loaded as plain text. Any other file type will be loaded as plain text.
The `path` argument specifies the path to the data file relative to your base directory, where your `config.toml` is. The `path` argument specifies the path to the data file relative to your base directory, where your `config.toml` is.
@ -213,7 +213,7 @@ As a security precaution, if this file is outside the main site directory, your
``` ```
The optional `format` argument allows you to specify and override which data type is contained The optional `format` argument allows you to specify and override which data type is contained
within the file specified in the `path` argument. Valid entries are `toml`, `json`, `csv` within the file specified in the `path` argument. Valid entries are `toml`, `json`, `csv`, `bibtex`
or `plain`. If the `format` argument isn't specified, then the path extension is used. or `plain`. If the `format` argument isn't specified, then the path extension is used.
```jinja2 ```jinja2
@ -251,6 +251,58 @@ template:
} }
``` ```
The `bibtex` format loads data into a structure matching the format used by the
[nom-bibtex crate](https://crates.io/crates/nom-bibtex). The following is an example of data
in bibtex format:
```
@preamble{"A bibtex preamble" # " this is."}
@Comment{
Here is a comment.
}
Another comment!
@string(name = "Vincent Prouillet")
@string(github = "https://github.com/getzola/zola")
@misc {my_citation_key,
author= name,
title = "Zola",
note = "github: " # github
} }
```
The following is the json-equivalent format of the produced bibtex data structure:
```json
{
"preambles": ["A bibtex preamble this is."],
"comments": ["Here is a comment.", "Another comment!"],
"variables": {
"name": "Vincent Prouillet",
"github": "https://github.com/getzola/zola"
},
"bibliographies": [
{
"entry_type": "misc",
"citation_key": "my_citation_key",
"tags": {
"author": "Vincent Prouillet",
"title": "Zola",
"note": "github: https://github.com/getzola/zola"
}
}
]
}
```
Finally, the bibtex data can be accessed from the template as follows:
```jinja2
{% set tags = data.bibliographies[0].tags %}
This was generated using {{ tags.title }}, authored by {{ tags.author }}.
```
#### Remote content #### Remote content
Instead of using a file, you can load data from a remote URL. This can be done by specifying a `url` parameter Instead of using a file, you can load data from a remote URL. This can be done by specifying a `url` parameter

View file

@ -11,4 +11,6 @@ and the default is what most sites want:
```jinja2 ```jinja2
User-agent: * User-agent: *
Allow: /
Sitemap: {{ get_url(path="sitemap.xml") }}
``` ```

Binary file not shown.

Before

Width:  |  Height:  |  Size: 538 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 421 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 84 KiB

View file

@ -1,6 +0,0 @@
+++
template = "themes.html"
sort_by = "date"
+++

Binary file not shown.

Before

Width:  |  Height:  |  Size: 131 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 261 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 142 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.8 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 244 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 408 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 158 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 104 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 720 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 150 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 326 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 136 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 442 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

View file

@ -44,7 +44,6 @@ pub fn build_cli() -> App<'static, 'static> {
Arg::with_name("output_dir") Arg::with_name("output_dir")
.short("o") .short("o")
.long("output-dir") .long("output-dir")
.default_value("public")
.takes_value(true) .takes_value(true)
.help("Outputs the generated site in the given path"), .help("Outputs the generated site in the given path"),
Arg::with_name("drafts") Arg::with_name("drafts")
@ -68,7 +67,6 @@ pub fn build_cli() -> App<'static, 'static> {
Arg::with_name("output_dir") Arg::with_name("output_dir")
.short("o") .short("o")
.long("output-dir") .long("output-dir")
.default_value("public")
.takes_value(true) .takes_value(true)
.help("Outputs the generated site in the given path"), .help("Outputs the generated site in the given path"),
Arg::with_name("base_url") Arg::with_name("base_url")

View file

@ -9,11 +9,13 @@ pub fn build(
root_dir: &Path, root_dir: &Path,
config_file: &Path, config_file: &Path,
base_url: Option<&str>, base_url: Option<&str>,
output_dir: &Path, output_dir: Option<&Path>,
include_drafts: bool, include_drafts: bool,
) -> Result<()> { ) -> Result<()> {
let mut site = Site::new(root_dir, config_file)?; let mut site = Site::new(root_dir, config_file)?;
site.set_output_path(output_dir); if let Some(output_dir) = output_dir {
site.set_output_path(output_dir);
}
if let Some(b) = base_url { if let Some(b) = base_url {
site.set_base_url(b.to_string()); site.set_base_url(b.to_string());
} }

View file

@ -15,13 +15,14 @@ base_url = "%BASE_URL%"
# Whether to automatically compile all Sass files in the sass directory # Whether to automatically compile all Sass files in the sass directory
compile_sass = %COMPILE_SASS% compile_sass = %COMPILE_SASS%
# Whether to build a search index to be used later on by a JavaScript library
build_search_index = %SEARCH%
[markdown]
# Whether to do syntax highlighting # Whether to do syntax highlighting
# Theme can be customised by setting the `highlight_theme` variable to a theme supported by Zola # Theme can be customised by setting the `highlight_theme` variable to a theme supported by Zola
highlight_code = %HIGHLIGHT% highlight_code = %HIGHLIGHT%
# Whether to build a search index to be used later on by a JavaScript library
build_search_index = %SEARCH%
[extra] [extra]
# Put all your custom variables here # Put all your custom variables here
"#; "#;

View file

@ -22,11 +22,11 @@
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
use std::fs::{read_dir, remove_dir_all}; use std::fs::{read_dir, remove_dir_all};
use std::net::{SocketAddrV4, TcpListener};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::mpsc::channel; use std::sync::mpsc::channel;
use std::thread; use std::thread;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use std::net::{SocketAddrV4, TcpListener};
use hyper::header; use hyper::header;
use hyper::service::{make_service_fn, service_fn}; use hyper::service::{make_service_fn, service_fn};
@ -173,7 +173,7 @@ fn create_new_site(
root_dir: &Path, root_dir: &Path,
interface: &str, interface: &str,
interface_port: u16, interface_port: u16,
output_dir: &Path, output_dir: Option<&Path>,
base_url: &str, base_url: &str,
config_file: &Path, config_file: &Path,
include_drafts: bool, include_drafts: bool,
@ -192,7 +192,9 @@ fn create_new_site(
site.enable_serve_mode(); site.enable_serve_mode();
site.set_base_url(base_url); site.set_base_url(base_url);
site.set_output_path(output_dir); if let Some(output_dir) = output_dir {
site.set_output_path(output_dir);
}
if include_drafts { if include_drafts {
site.include_drafts(); site.include_drafts();
} }
@ -212,7 +214,7 @@ pub fn serve(
root_dir: &Path, root_dir: &Path,
interface: &str, interface: &str,
interface_port: u16, interface_port: u16,
output_dir: &Path, output_dir: Option<&Path>,
base_url: &str, base_url: &str,
config_file: &Path, config_file: &Path,
watch_only: bool, watch_only: bool,
@ -236,7 +238,7 @@ pub fn serve(
// Stop right there if we can't bind to the address // Stop right there if we can't bind to the address
let bind_address: SocketAddrV4 = address.parse().unwrap(); let bind_address: SocketAddrV4 = address.parse().unwrap();
if (TcpListener::bind(&bind_address)).is_err() { if (TcpListener::bind(&bind_address)).is_err() {
return Err(format!("Cannot start server on address {}.", address))?; return Err(format!("Cannot start server on address {}.", address).into());
} }
// An array of (path, bool, bool) where the path should be watched for changes, and the boolean value // An array of (path, bool, bool) where the path should be watched for changes, and the boolean value
@ -277,7 +279,7 @@ pub fn serve(
let ws_port = site.live_reload; let ws_port = site.live_reload;
let ws_address = format!("{}:{}", interface, ws_port.unwrap()); let ws_address = format!("{}:{}", interface, ws_port.unwrap());
let output_path = Path::new(output_dir).to_path_buf(); let output_path = site.output_path.clone();
// output path is going to need to be moved later on, so clone it for the // output path is going to need to be moved later on, so clone it for the
// http closure to avoid contention. // http closure to avoid contention.
@ -440,10 +442,7 @@ pub fn serve(
loop { loop {
match rx.recv() { match rx.recv() {
Ok(event) => { Ok(event) => {
let can_do_fast_reload = match event { let can_do_fast_reload = !matches!(event, Remove(_));
Remove(_) => false,
_ => true,
};
match event { match event {
// Intellij does weird things on edit, chmod is there to count those changes // Intellij does weird things on edit, chmod is there to count those changes
@ -503,10 +502,8 @@ pub fn serve(
site = s; site = s;
} }
} }
} else { } else if let Some(s) = recreate_site() {
if let Some(s) = recreate_site() { site = s;
site = s;
}
} }
} }
(ChangeKind::Templates, partial_path) => { (ChangeKind::Templates, partial_path) => {

View file

@ -1,5 +1,5 @@
use std::env; use std::env;
use std::path::PathBuf; use std::path::{Path, PathBuf};
use std::time::Instant; use std::time::Instant;
use utils::net::{get_available_port, port_is_available}; use utils::net::{get_available_port, port_is_available};
@ -37,12 +37,12 @@ fn main() {
("build", Some(matches)) => { ("build", Some(matches)) => {
console::info("Building site..."); console::info("Building site...");
let start = Instant::now(); let start = Instant::now();
let output_dir = PathBuf::from(matches.value_of("output_dir").unwrap()); let output_dir = matches.value_of("output_dir").map(|output_dir| Path::new(output_dir));
match cmd::build( match cmd::build(
&root_dir, &root_dir,
&config_file, &config_file,
matches.value_of("base_url"), matches.value_of("base_url"),
&output_dir, output_dir,
matches.is_present("drafts"), matches.is_present("drafts"),
) { ) {
Ok(()) => console::report_elapsed_time(start), Ok(()) => console::report_elapsed_time(start),
@ -80,14 +80,14 @@ fn main() {
::std::process::exit(1); ::std::process::exit(1);
} }
} }
let output_dir = PathBuf::from(matches.value_of("output_dir").unwrap()); let output_dir = matches.value_of("output_dir").map(|output_dir| Path::new(output_dir));
let base_url = matches.value_of("base_url").unwrap(); let base_url = matches.value_of("base_url").unwrap();
console::info("Building site..."); console::info("Building site...");
match cmd::serve( match cmd::serve(
&root_dir, &root_dir,
interface, interface,
port, port,
&output_dir, output_dir,
base_url, base_url,
&config_file, &config_file,
watch_only, watch_only,

View file

@ -0,0 +1,4 @@
+++
title="Drafted section"
draft=true
+++

View file

@ -0,0 +1,4 @@
+++
title="drafted page in drafted section"
draft=true
+++

View file

@ -0,0 +1,3 @@
+++
title="non draft page"
+++

View file

@ -0,0 +1,3 @@
+++
title="subsection of a secret section"
+++

View file

@ -0,0 +1,3 @@
+++
title="Is anyone ever going to read this?"
+++