Merge pull request #290 from Keats/next

Next version
This commit is contained in:
Vincent Prouillet 2018-08-03 21:34:03 +02:00 committed by GitHub
commit 8fcb4c3ae6
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
112 changed files with 4430 additions and 1858 deletions

View file

@ -21,7 +21,7 @@ matrix:
rust: nightly
# The earliest stable Rust version that works
- env: TARGET=x86_64-unknown-linux-gnu
rust: 1.23.0
rust: 1.27.0
before_install: set -e

View file

@ -1,5 +1,29 @@
# Changelog
## 0.4.0 (unreleased)
### Breaking
- Taxonomies have been rewritten from scratch to allow custom ones with RSS and pagination
- `order` sorting has been removed in favour of only having `weight`
- `page.next/page.previous` have been renamed to `page.later/page.earlier` and `page.heavier/page.lighter` depending on the sort method
### Others
- Fix `serve` not working with the config flag
- Websocket port on `live` will not get the first available port instead of a fixed one
- Rewrite markdown rendering to fix all known issues with shortcodes
- Add array arguments to shortcodes and allow single-quote/backtick strings
- Co-located assets are now permalinks
- Words are now counted using unicode rather than whitespaces
- Aliases can now be pointing directly to specific HTML files
- Add `year`, `month` and `day` variables to pages with a date
- Fix panic when live reloading a change on a file without extensions
- Add image resizing support
- Add a 404 template
- Enable preserve-order feature of Tera
- Add an external link checker
- Add `get_taxonomy` global function to return the full taxonomy
## 0.3.4 (2018-06-22)
- `cargo update` as some dependencies didn't compile with current Rust version

1882
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
[package]
name = "gutenberg"
version = "0.3.4"
version = "0.4.0"
authors = ["Vincent Prouillet <prouillet.vincent@gmail.com>"]
license = "MIT"
readme = "README.md"
@ -24,9 +24,7 @@ term-painter = "0.2"
# Used in init to ensure the url given as base_url is a valid one
url = "1.5"
# Below is for the serve cmd
staticfile = "0.5"
iron = "0.6"
mount = "0.4"
actix-web = { version = "0.7", default-features = false, features = [] }
notify = "4"
ws = "0.7"
ctrlc = "3"
@ -53,4 +51,6 @@ members = [
"components/templates",
"components/utils",
"components/search",
"components/imageproc",
"components/link_checker",
]

View file

@ -14,21 +14,24 @@ in the `docs/content` folder of the repository.
| Single binary | ✔ | ✔ | ✔ | ✕ |
| Language | Rust | Rust | Go | Python |
| Syntax highlighting | ✔ | ✔ | ✔ | ✔ |
| Sass compilation | ✔ | | ✔ | ✔ |
| Sass compilation | ✔ | | ✔ | ✔ |
| Assets co-location | ✔ | ✔ | ✔ | ✔ |
| i18n | ✕ | ✕ | ✔ | ✔ |
| Image processing | | ✕ | ✔ | ✔ |
| Image processing | | ✕ | ✔ | ✔ |
| Sane template engine | ✔ | ✔ | ✕✕✕ | ✔ |
| Themes | ✔ | ✕ | ✔ | ✔ |
| Shortcodes | ✔ | ✕ | ✔ | ✔ |
| Internal links | ✔ | ✕ | ✔ | ✔ |
| Link checker | ✔ | ✕ | ✕ | ✔ |
| Table of contents | ✔ | ✕ | ✔ | ✔ |
| Automatic header anchors | ✔ | ✕ | ✔ | ✔ |
| Aliases | ✔ | ✕ | ✔ | ✔ |
| Pagination | ✔ | ✕ | ✔ | ✔ |
| Custom taxonomies | | ✕ | ✔ | ✕ |
| Custom taxonomies | | ✕ | ✔ | ✕ |
| Search | ✔ | ✕ | ✕ | ✔ |
| Data files | ✕ | ✔ | ✔ | ✕ |
| LiveReload | ✔ | ✕ | ✔ | ✔ |
| Netlify support | ✔ | ✕ | ✔ | ✕ |
Supported content formats:
@ -38,7 +41,8 @@ Supported content formats:
- Pelican: reStructuredText, markdown, asciidoc, org-mode, whatever-you-want
Note that many features of Pelican are coming from plugins, which might be tricky
to use because of version mismatch or lacking documentation.
to use because of version mismatch or lacking documentation. Netlify supports Python
and Pipenv but you still need to install your dependencies manually.
## Contributing
As the documentation site is automatically built on commits to master, all development
@ -75,7 +79,7 @@ You can check for any updates to the current packages by running:
$ git submodule update --remote --merge
```
And finally from the root of the components/rendering crate run the following command:
And finally from the root of the components/highlighting crate run the following command:
```bash
$ cargo run --example generate_sublime synpack ../../sublime_syntaxes ../../sublime_syntaxes/newlines.packdump ../../sublime_syntaxes/nonewlines.packdump

View file

@ -10,7 +10,7 @@ environment:
matrix:
- target: x86_64-pc-windows-msvc
RUST_VERSION: 1.25.0
RUST_VERSION: 1.27.0
- target: x86_64-pc-windows-msvc
RUST_VERSION: stable

View file

@ -12,7 +12,7 @@ use std::fs::File;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use toml::{Value as Toml};
use toml::Value as Toml;
use chrono::Utc;
use globset::{Glob, GlobSet, GlobSetBuilder};
@ -28,6 +28,40 @@ use theme::Theme;
static DEFAULT_BASE_URL: &'static str = "http://a-website.com";
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(default)]
pub struct Taxonomy {
/// The name used in the URL, usually the plural
pub name: String,
/// If this is set, the list of individual taxonomy term page will be paginated
/// by this much
pub paginate_by: Option<usize>,
pub paginate_path: Option<String>,
/// Whether to generate a RSS feed only for each taxonomy term, defaults to false
pub rss: bool,
}
impl Taxonomy {
pub fn is_paginated(&self) -> bool {
if let Some(paginate_by) = self.paginate_by {
paginate_by > 0
} else {
false
}
}
}
impl Default for Taxonomy {
fn default() -> Taxonomy {
Taxonomy {
name: String::new(),
paginate_by: None,
paginate_path: None,
rss: false,
}
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct Config {
@ -56,10 +90,8 @@ pub struct Config {
pub generate_rss: bool,
/// The number of articles to include in the RSS feed. Defaults to 10_000
pub rss_limit: usize,
/// Whether to generate tags and individual tag pages if some pages have them. Defaults to true
pub generate_tags_pages: bool,
/// Whether to generate categories and individual tag categories if some pages have them. Defaults to true
pub generate_categories_pages: bool,
pub taxonomies: Vec<Taxonomy>,
/// Whether to compile the `sass` directory and output the css files into the static folder
pub compile_sass: bool,
@ -72,6 +104,9 @@ pub struct Config {
#[serde(skip_serializing, skip_deserializing)] // not a typo, 2 are needed
pub ignored_content_globset: Option<GlobSet>,
/// Whether to check all external links for validity
pub check_external_links: bool,
/// All user params set in [extra] in the config
pub extra: HashMap<String, Toml>,
@ -191,9 +226,9 @@ impl Default for Config {
default_language: "en".to_string(),
generate_rss: false,
rss_limit: 10_000,
generate_tags_pages: true,
generate_categories_pages: true,
taxonomies: Vec::new(),
compile_sass: false,
check_external_links: false,
build_search_index: false,
ignored_content: Vec::new(),
ignored_content_globset: None,

View file

@ -3,7 +3,7 @@ use std::fs::File;
use std::io::prelude::*;
use std::path::PathBuf;
use toml::{Value as Toml};
use toml::Value as Toml;
use errors::{Result, ResultExt};

View file

@ -8,6 +8,7 @@ tera = "0.11"
serde = "1"
slug = "0.1"
rayon = "1"
chrono = "0.4"
errors = { path = "../errors" }
config = { path = "../config" }
@ -16,6 +17,6 @@ rendering = { path = "../rendering" }
front_matter = { path = "../front_matter" }
[dev-dependencies]
tempdir = "0.3"
tempfile = "3"
toml = "0.4"
globset = "0.4"

View file

@ -11,7 +11,7 @@ use std::collections::HashMap;
use config::Config;
use tera::Tera;
use front_matter::{SortBy, InsertAnchor};
use content::{Page, sort_pages, populate_previous_and_next_pages};
use content::{Page, sort_pages, populate_siblings};
fn create_pages(number: usize, sort_by: SortBy) -> Vec<Page> {
@ -23,8 +23,8 @@ fn create_pages(number: usize, sort_by: SortBy) -> Vec<Page> {
for i in 0..number {
let mut page = Page::default();
match sort_by {
SortBy::Weight => { page.meta.weight = Some(i); },
SortBy::Order => { page.meta.order = Some(i); },
SortBy::Weight => { page.meta.weight = Some(i); }
SortBy::Order => { page.meta.order = Some(i); }
_ => (),
};
page.raw_content = r#"
@ -128,17 +128,17 @@ fn bench_sorting_order(b: &mut test::Bencher) {
}
#[bench]
fn bench_populate_previous_and_next_pages(b: &mut test::Bencher) {
fn bench_populate_siblings(b: &mut test::Bencher) {
let pages = create_pages(250, SortBy::Order);
let (sorted_pages, _) = sort_pages(pages, SortBy::Order);
b.iter(|| populate_previous_and_next_pages(&sorted_pages.clone()));
b.iter(|| populate_siblings(&sorted_pages.clone()));
}
#[bench]
fn bench_page_render_html(b: &mut test::Bencher) {
let pages = create_pages(10, SortBy::Order);
let (mut sorted_pages, _) = sort_pages(pages, SortBy::Order);
sorted_pages = populate_previous_and_next_pages(&sorted_pages);
sorted_pages = populate_siblings(&sorted_pages);
let config = Config::default();
let mut tera = Tera::default();

View file

@ -2,6 +2,7 @@ extern crate tera;
extern crate slug;
extern crate serde;
extern crate rayon;
extern crate chrono;
extern crate errors;
extern crate config;
@ -10,7 +11,7 @@ extern crate rendering;
extern crate utils;
#[cfg(test)]
extern crate tempdir;
extern crate tempfile;
#[cfg(test)]
extern crate toml;
#[cfg(test)]
@ -25,4 +26,4 @@ mod sorting;
pub use file_info::FileInfo;
pub use page::Page;
pub use section::Section;
pub use sorting::{sort_pages, populate_previous_and_next_pages};
pub use sorting::{sort_pages, populate_siblings};

View file

@ -3,7 +3,7 @@ use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::result::Result as StdResult;
use chrono::Datelike;
use tera::{Tera, Context as TeraContext};
use serde::ser::{SerializeStruct, self};
use slug::slugify;
@ -14,7 +14,7 @@ use utils::fs::{read_file, find_related_assets};
use utils::site::get_reading_analytics;
use utils::templates::render_template;
use front_matter::{PageFrontMatter, InsertAnchor, split_page_content};
use rendering::{Context, Header, markdown_to_html};
use rendering::{RenderContext, Header, render_content};
use file_info::FileInfo;
@ -44,10 +44,14 @@ pub struct Page {
/// When <!-- more --> is found in the text, will take the content up to that part
/// as summary
pub summary: Option<String>,
/// The previous page, by whatever sorting is used for the index/section
pub previous: Option<Box<Page>>,
/// The next page, by whatever sorting is used for the index/section
pub next: Option<Box<Page>>,
/// The earlier page, for pages sorted by date
pub earlier: Option<Box<Page>>,
/// The later page, for pages sorted by date
pub later: Option<Box<Page>>,
/// The lighter page, for pages sorted by weight
pub lighter: Option<Box<Page>>,
/// The heavier page, for pages sorted by weight
pub heavier: Option<Box<Page>>,
/// Toc made from the headers of the markdown file
pub toc: Vec<Header>,
}
@ -68,8 +72,10 @@ impl Page {
components: vec![],
permalink: "".to_string(),
summary: None,
previous: None,
next: None,
earlier: None,
later: None,
lighter: None,
heavier: None,
toc: vec![],
}
}
@ -156,27 +162,32 @@ impl Page {
}
Ok(page)
}
/// We need access to all pages url to render links relative to content
/// so that can't happen at the same time as parsing
pub fn render_markdown(&mut self, permalinks: &HashMap<String, String>, tera: &Tera, config: &Config, anchor_insert: InsertAnchor) -> Result<()> {
let context = Context::new(
let mut context = RenderContext::new(
tera,
config.highlight_code,
config.highlight_theme.clone(),
config,
&self.permalink,
permalinks,
anchor_insert
anchor_insert,
);
let res = markdown_to_html(&self.raw_content.replacen("<!-- more -->", "<a name=\"continue-reading\"></a>", 1), &context)?;
context.tera_context.add("page", self);
let res = render_content(
&self.raw_content.replacen("<!-- more -->", "<a name=\"continue-reading\"></a>", 1),
&context,
).chain_err(|| format!("Failed to render content of {}", self.file.path.display()))?;
self.content = res.0;
self.toc = res.1;
if self.raw_content.contains("<!-- more -->") {
self.summary = Some({
let summary = self.raw_content.splitn(2, "<!-- more -->").collect::<Vec<&str>>()[0];
markdown_to_html(summary, &context)?.0
render_content(summary, &context)
.chain_err(|| format!("Failed to render content of {}", self.file.path.display()))?.0
})
}
@ -199,6 +210,15 @@ impl Page {
render_template(&tpl_name, tera, &context, &config.theme)
.chain_err(|| format!("Failed to render page '{}'", self.file.path.display()))
}
/// Creates a vectors of asset URLs.
fn serialize_assets(&self) -> Vec<String> {
self.assets.iter()
.filter_map(|asset| asset.file_name())
.filter_map(|filename| filename.to_str())
.map(|filename| self.path.clone() + filename)
.collect()
}
}
impl Default for Page {
@ -214,8 +234,10 @@ impl Default for Page {
components: vec![],
permalink: "".to_string(),
summary: None,
previous: None,
next: None,
earlier: None,
later: None,
lighter: None,
heavier: None,
toc: vec![],
}
}
@ -223,26 +245,39 @@ impl Default for Page {
impl ser::Serialize for Page {
fn serialize<S>(&self, serializer: S) -> StdResult<S::Ok, S::Error> where S: ser::Serializer {
let mut state = serializer.serialize_struct("page", 18)?;
let mut state = serializer.serialize_struct("page", 20)?;
state.serialize_field("content", &self.content)?;
state.serialize_field("title", &self.meta.title)?;
state.serialize_field("description", &self.meta.description)?;
state.serialize_field("date", &self.meta.date)?;
if let Some(chrono_datetime) = self.meta.date() {
let d = chrono_datetime.date();
state.serialize_field("year", &d.year())?;
state.serialize_field("month", &d.month())?;
state.serialize_field("day", &d.day())?;
} else {
state.serialize_field::<Option<usize>>("year", &None)?;
state.serialize_field::<Option<usize>>("month", &None)?;
state.serialize_field::<Option<usize>>("day", &None)?;
}
state.serialize_field("slug", &self.slug)?;
state.serialize_field("path", &self.path)?;
state.serialize_field("components", &self.components)?;
state.serialize_field("permalink", &self.permalink)?;
state.serialize_field("summary", &self.summary)?;
state.serialize_field("tags", &self.meta.tags)?;
state.serialize_field("category", &self.meta.category)?;
state.serialize_field("taxonomies", &self.meta.taxonomies)?;
state.serialize_field("extra", &self.meta.extra)?;
let (word_count, reading_time) = get_reading_analytics(&self.raw_content);
state.serialize_field("word_count", &word_count)?;
state.serialize_field("reading_time", &reading_time)?;
state.serialize_field("previous", &self.previous)?;
state.serialize_field("next", &self.next)?;
state.serialize_field("earlier", &self.earlier)?;
state.serialize_field("later", &self.later)?;
state.serialize_field("lighter", &self.lighter)?;
state.serialize_field("heavier", &self.heavier)?;
state.serialize_field("toc", &self.toc)?;
state.serialize_field("draft", &self.is_draft())?;
let assets = self.serialize_assets();
state.serialize_field("assets", &assets)?;
state.end()
}
}
@ -255,7 +290,7 @@ mod tests {
use std::path::Path;
use tera::Tera;
use tempdir::TempDir;
use tempfile::tempdir;
use globset::{Glob, GlobSetBuilder};
use config::Config;
@ -387,7 +422,7 @@ Hello world
#[test]
fn page_with_assets_gets_right_info() {
let tmp_dir = TempDir::new("example").expect("create temp dir");
let tmp_dir = tempdir().expect("create temp dir");
let path = tmp_dir.path();
create_dir(&path.join("content")).expect("create content temp dir");
create_dir(&path.join("content").join("posts")).expect("create posts temp dir");
@ -401,7 +436,7 @@ Hello world
let res = Page::from_file(
nested_path.join("index.md").as_path(),
&Config::default()
&Config::default(),
);
assert!(res.is_ok());
let page = res.unwrap();
@ -413,7 +448,7 @@ Hello world
#[test]
fn page_with_assets_and_slug_overrides_path() {
let tmp_dir = TempDir::new("example").expect("create temp dir");
let tmp_dir = tempdir().expect("create temp dir");
let path = tmp_dir.path();
create_dir(&path.join("content")).expect("create content temp dir");
create_dir(&path.join("content").join("posts")).expect("create posts temp dir");
@ -427,7 +462,7 @@ Hello world
let res = Page::from_file(
nested_path.join("index.md").as_path(),
&Config::default()
&Config::default(),
);
assert!(res.is_ok());
let page = res.unwrap();
@ -439,7 +474,7 @@ Hello world
#[test]
fn page_with_ignored_assets_filters_out_correct_files() {
let tmp_dir = TempDir::new("example").expect("create temp dir");
let tmp_dir = tempdir().expect("create temp dir");
let path = tmp_dir.path();
create_dir(&path.join("content")).expect("create content temp dir");
create_dir(&path.join("content").join("posts")).expect("create posts temp dir");
@ -458,7 +493,7 @@ Hello world
let res = Page::from_file(
nested_path.join("index.md").as_path(),
&config
&config,
);
assert!(res.is_ok());

View file

@ -11,7 +11,7 @@ use errors::{Result, ResultExt};
use utils::fs::read_file;
use utils::templates::render_template;
use utils::site::get_reading_analytics;
use rendering::{Context, Header, markdown_to_html};
use rendering::{RenderContext, Header, render_content};
use page::Page;
use file_info::FileInfo;
@ -91,22 +91,25 @@ impl Section {
return "index.html".to_string();
}
"section.html".to_string()
},
}
}
}
/// We need access to all pages url to render links relative to content
/// so that can't happen at the same time as parsing
pub fn render_markdown(&mut self, permalinks: &HashMap<String, String>, tera: &Tera, config: &Config) -> Result<()> {
let context = Context::new(
let mut context = RenderContext::new(
tera,
config.highlight_code,
config.highlight_theme.clone(),
config,
&self.permalink,
permalinks,
self.meta.insert_anchor_links,
);
let res = markdown_to_html(&self.raw_content, &context)?;
context.tera_context.add("section", self);
let res = render_content(&self.raw_content, &context)
.chain_err(|| format!("Failed to render content of {}", self.file.path.display()))?;
self.content = res.0;
self.toc = res.1;
Ok(())

View file

@ -7,7 +7,7 @@ use front_matter::SortBy;
/// Sort pages by the given criteria
///
/// Any pages that doesn't have a the required field when the sorting method is other than none
/// Any pages that doesn't have a required field when the sorting method is other than none
/// will be ignored.
pub fn sort_pages(pages: Vec<Page>, sort_by: SortBy) -> (Vec<Page>, Vec<Page>) {
if sort_by == SortBy::None {
@ -19,7 +19,6 @@ pub fn sort_pages(pages: Vec<Page>, sort_by: SortBy) -> (Vec<Page>, Vec<Page>) {
.partition(|page| {
match sort_by {
SortBy::Date => page.meta.date.is_some(),
SortBy::Order => page.meta.order.is_some(),
SortBy::Weight => page.meta.weight.is_some(),
_ => unreachable!()
}
@ -35,17 +34,7 @@ pub fn sort_pages(pages: Vec<Page>, sort_by: SortBy) -> (Vec<Page>, Vec<Page>) {
ord
}
})
},
SortBy::Order => {
can_be_sorted.par_sort_unstable_by(|a, b| {
let ord = b.meta.order().cmp(&a.meta.order());
if ord == Ordering::Equal {
a.permalink.cmp(&b.permalink)
} else {
ord
}
})
},
SortBy::Weight => {
can_be_sorted.par_sort_unstable_by(|a, b| {
let ord = a.meta.weight().cmp(&b.meta.weight());
@ -55,7 +44,7 @@ pub fn sort_pages(pages: Vec<Page>, sort_by: SortBy) -> (Vec<Page>, Vec<Page>) {
ord
}
})
},
}
_ => unreachable!()
};
@ -64,7 +53,7 @@ pub fn sort_pages(pages: Vec<Page>, sort_by: SortBy) -> (Vec<Page>, Vec<Page>) {
/// Horribly inefficient way to set previous and next on each pages that skips drafts
/// So many clones
pub fn populate_previous_and_next_pages(input: &[Page]) -> Vec<Page> {
pub fn populate_siblings(input: &[Page], sort_by: SortBy) -> Vec<Page> {
let mut res = Vec::with_capacity(input.len());
// The input is already sorted
@ -91,9 +80,20 @@ pub fn populate_previous_and_next_pages(input: &[Page]) -> Vec<Page> {
// Remove prev/next otherwise we serialise the whole thing...
let mut next_page = input[j].clone();
next_page.previous = None;
next_page.next = None;
new_page.next = Some(Box::new(next_page));
match sort_by {
SortBy::Weight => {
next_page.lighter = None;
next_page.heavier = None;
new_page.lighter = Some(Box::new(next_page));
}
SortBy::Date => {
next_page.earlier = None;
next_page.later = None;
new_page.later = Some(Box::new(next_page));
}
SortBy::None => ()
}
break;
}
}
@ -113,9 +113,19 @@ pub fn populate_previous_and_next_pages(input: &[Page]) -> Vec<Page> {
// Remove prev/next otherwise we serialise the whole thing...
let mut previous_page = input[j].clone();
previous_page.previous = None;
previous_page.next = None;
new_page.previous = Some(Box::new(previous_page));
match sort_by {
SortBy::Weight => {
previous_page.lighter = None;
previous_page.heavier = None;
new_page.heavier = Some(Box::new(previous_page));
}
SortBy::Date => {
previous_page.earlier = None;
previous_page.later = None;
new_page.earlier = Some(Box::new(previous_page));
}
SortBy::None => {}
}
break;
}
}
@ -129,7 +139,7 @@ pub fn populate_previous_and_next_pages(input: &[Page]) -> Vec<Page> {
mod tests {
use front_matter::{PageFrontMatter, SortBy};
use page::Page;
use super::{sort_pages, populate_previous_and_next_pages};
use super::{sort_pages, populate_siblings};
fn create_page_with_date(date: &str) -> Page {
let mut front_matter = PageFrontMatter::default();
@ -137,22 +147,6 @@ mod tests {
Page::new("content/hello.md", front_matter)
}
fn create_page_with_order(order: usize, filename: &str) -> Page {
let mut front_matter = PageFrontMatter::default();
front_matter.order = Some(order);
let mut p = Page::new("content/".to_string() + filename, front_matter);
// Faking a permalink to test sorting with equal order
p.permalink = filename.to_string();
p
}
fn create_draft_page_with_order(order: usize) -> Page {
let mut front_matter = PageFrontMatter::default();
front_matter.order = Some(order);
front_matter.draft = true;
Page::new("content/hello.md", front_matter)
}
fn create_page_with_weight(weight: usize) -> Page {
let mut front_matter = PageFrontMatter::default();
front_matter.weight = Some(weight);
@ -173,37 +167,6 @@ mod tests {
assert_eq!(pages[2].clone().meta.date.unwrap().to_string(), "2017-01-01");
}
#[test]
fn can_sort_by_order() {
let input = vec![
create_page_with_order(2, "hello.md"),
create_page_with_order(3, "hello2.md"),
create_page_with_order(1, "hello3.md"),
];
let (pages, _) = sort_pages(input, SortBy::Order);
// Should be sorted by order
assert_eq!(pages[0].clone().meta.order.unwrap(), 3);
assert_eq!(pages[1].clone().meta.order.unwrap(), 2);
assert_eq!(pages[2].clone().meta.order.unwrap(), 1);
}
#[test]
fn can_sort_by_order_uses_permalink_to_break_ties() {
let input = vec![
create_page_with_order(3, "b.md"),
create_page_with_order(3, "a.md"),
create_page_with_order(3, "c.md"),
];
let (pages, _) = sort_pages(input, SortBy::Order);
// Should be sorted by order
assert_eq!(pages[0].clone().meta.order.unwrap(), 3);
assert_eq!(pages[0].clone().permalink, "a.md");
assert_eq!(pages[1].clone().meta.order.unwrap(), 3);
assert_eq!(pages[1].clone().permalink, "b.md");
assert_eq!(pages[2].clone().meta.order.unwrap(), 3);
assert_eq!(pages[2].clone().permalink, "c.md");
}
#[test]
fn can_sort_by_weight() {
let input = vec![
@ -221,80 +184,48 @@ mod tests {
#[test]
fn can_sort_by_none() {
let input = vec![
create_page_with_order(2, "a.md"),
create_page_with_order(3, "a.md"),
create_page_with_order(1, "a.md"),
create_page_with_weight(2),
create_page_with_weight(3),
create_page_with_weight(1),
];
let (pages, _) = sort_pages(input, SortBy::None);
// Should be sorted by date
assert_eq!(pages[0].clone().meta.order.unwrap(), 2);
assert_eq!(pages[1].clone().meta.order.unwrap(), 3);
assert_eq!(pages[2].clone().meta.order.unwrap(), 1);
assert_eq!(pages[0].clone().meta.weight.unwrap(), 2);
assert_eq!(pages[1].clone().meta.weight.unwrap(), 3);
assert_eq!(pages[2].clone().meta.weight.unwrap(), 1);
}
#[test]
fn ignore_page_with_missing_field() {
let input = vec![
create_page_with_order(2, "a.md"),
create_page_with_order(3, "a.md"),
create_page_with_weight(2),
create_page_with_weight(3),
create_page_with_date("2019-01-01"),
];
let (pages, unsorted) = sort_pages(input, SortBy::Order);
let (pages, unsorted) = sort_pages(input, SortBy::Weight);
assert_eq!(pages.len(), 2);
assert_eq!(unsorted.len(), 1);
}
#[test]
fn can_populate_previous_and_next_pages() {
fn can_populate_siblings() {
let input = vec![
create_page_with_order(1, "a.md"),
create_page_with_order(2, "b.md"),
create_page_with_order(3, "a.md"),
create_page_with_weight(1),
create_page_with_weight(2),
create_page_with_weight(3),
];
let pages = populate_previous_and_next_pages(&input);
let pages = populate_siblings(&input, SortBy::Weight);
assert!(pages[0].clone().next.is_none());
assert!(pages[0].clone().previous.is_some());
assert_eq!(pages[0].clone().previous.unwrap().meta.order.unwrap(), 2);
assert!(pages[0].clone().lighter.is_none());
assert!(pages[0].clone().heavier.is_some());
assert_eq!(pages[0].clone().heavier.unwrap().meta.weight.unwrap(), 2);
assert!(pages[1].clone().next.is_some());
assert!(pages[1].clone().previous.is_some());
assert_eq!(pages[1].clone().previous.unwrap().meta.order.unwrap(), 3);
assert_eq!(pages[1].clone().next.unwrap().meta.order.unwrap(), 1);
assert!(pages[1].clone().heavier.is_some());
assert!(pages[1].clone().lighter.is_some());
assert_eq!(pages[1].clone().lighter.unwrap().meta.weight.unwrap(), 1);
assert_eq!(pages[1].clone().heavier.unwrap().meta.weight.unwrap(), 3);
assert!(pages[2].clone().next.is_some());
assert!(pages[2].clone().previous.is_none());
assert_eq!(pages[2].clone().next.unwrap().meta.order.unwrap(), 2);
}
#[test]
fn can_populate_previous_and_next_pages_skip_drafts() {
let input = vec![
create_draft_page_with_order(0),
create_page_with_order(1, "a.md"),
create_page_with_order(2, "b.md"),
create_page_with_order(3, "c.md"),
create_draft_page_with_order(4),
];
let pages = populate_previous_and_next_pages(&input);
assert!(pages[0].clone().next.is_none());
assert!(pages[0].clone().previous.is_none());
assert!(pages[1].clone().next.is_none());
assert!(pages[1].clone().previous.is_some());
assert_eq!(pages[1].clone().previous.unwrap().meta.order.unwrap(), 2);
assert!(pages[2].clone().next.is_some());
assert!(pages[2].clone().previous.is_some());
assert_eq!(pages[2].clone().previous.unwrap().meta.order.unwrap(), 3);
assert_eq!(pages[2].clone().next.unwrap().meta.order.unwrap(), 1);
assert!(pages[3].clone().next.is_some());
assert!(pages[3].clone().previous.is_none());
assert_eq!(pages[3].clone().next.unwrap().meta.order.unwrap(), 2);
assert!(pages[4].clone().next.is_none());
assert!(pages[4].clone().previous.is_none());
assert!(pages[2].clone().lighter.is_some());
assert!(pages[2].clone().heavier.is_none());
assert_eq!(pages[2].clone().lighter.unwrap().meta.weight.unwrap(), 2);
}
}

View file

@ -4,6 +4,7 @@ version = "0.1.0"
authors = ["Vincent Prouillet <prouillet.vincent@gmail.com>"]
[dependencies]
error-chain = "0.11"
error-chain = "0.12"
tera = "0.11"
toml = "0.4"
image = "0.19.0"

View file

@ -1,9 +1,10 @@
#![allow(unused_doc_comment)]
#![allow(unused_doc_comments)]
#[macro_use]
extern crate error_chain;
extern crate tera;
extern crate toml;
extern crate image;
error_chain! {
errors {}
@ -15,6 +16,7 @@ error_chain! {
foreign_links {
Io(::std::io::Error);
Toml(toml::de::Error);
Image(image::ImageError);
}
}

View file

@ -12,5 +12,4 @@ toml = "0.4"
regex = "1"
lazy_static = "1"
errors = { path = "../errors" }

View file

@ -30,8 +30,6 @@ lazy_static! {
pub enum SortBy {
/// Most recent to oldest
Date,
/// Lower order comes last
Order,
/// Lower weight comes first
Weight,
/// No sorting
@ -151,5 +149,4 @@ date = 2002-10-12"#;
let res = split_page_content(Path::new(""), content);
assert!(res.is_err());
}
}

View file

@ -1,4 +1,5 @@
use std::result::{Result as StdResult};
use std::collections::HashMap;
use std::result::Result as StdResult;
use chrono::prelude::*;
use tera::{Map, Value};
@ -21,7 +22,7 @@ fn from_toml_datetime<'de, D>(deserializer: D) -> StdResult<Option<String>, D::E
fn convert_toml_date(table: Map<String, Value>) -> Value {
let mut new = Map::new();
for (k, v) in table.into_iter() {
for (k, v) in table {
if k == "$__toml_private_datetime" {
return v;
}
@ -34,7 +35,7 @@ fn convert_toml_date(table: Map<String, Value>) -> Value {
return Value::Object(new);
}
new.insert(k, convert_toml_date(o));
},
}
_ => { new.insert(k, v); }
}
}
@ -51,8 +52,8 @@ fn fix_toml_dates(table: Map<String, Value>) -> Value {
match value {
Value::Object(mut o) => {
new.insert(key, convert_toml_date(o));
},
_ => { new.insert(key, value); },
}
_ => { new.insert(key, value); }
}
}
@ -80,10 +81,7 @@ pub struct PageFrontMatter {
/// otherwise is set after parsing front matter and sections
/// Can't be an empty string if present
pub path: Option<String>,
/// Tags, not to be confused with categories
pub tags: Option<Vec<String>>,
/// Only one category allowed. Can't be an empty string if present
pub category: Option<String>,
pub taxonomies: HashMap<String, Vec<String>>,
/// Integer to use to order content. Lowest is at the bottom, highest first
pub order: Option<usize>,
/// Integer to use to order content. Highest is at the bottom, lowest first
@ -122,12 +120,6 @@ impl PageFrontMatter {
}
}
if let Some(ref category) = f.category {
if category == "" {
bail!("`category` can't be empty if present")
}
}
f.extra = match fix_toml_dates(f.extra) {
Value::Object(o) => o,
_ => unreachable!("Got something other than a table in page extra"),
@ -155,13 +147,6 @@ impl PageFrontMatter {
pub fn weight(&self) -> usize {
self.weight.unwrap()
}
pub fn has_tags(&self) -> bool {
match self.tags {
Some(ref t) => !t.is_empty(),
None => false
}
}
}
impl Default for PageFrontMatter {
@ -173,8 +158,7 @@ impl Default for PageFrontMatter {
draft: false,
slug: None,
path: None,
tags: None,
category: None,
taxonomies: HashMap::new(),
order: None,
weight: None,
aliases: Vec::new(),
@ -211,21 +195,6 @@ mod tests {
assert_eq!(res.description.unwrap(), "hey there".to_string())
}
#[test]
fn can_parse_tags() {
let content = r#"
title = "Hello"
description = "hey there"
slug = "hello-world"
tags = ["rust", "html"]"#;
let res = PageFrontMatter::parse(content);
assert!(res.is_ok());
let res = res.unwrap();
assert_eq!(res.title.unwrap(), "Hello".to_string());
assert_eq!(res.slug.unwrap(), "hello-world".to_string());
assert_eq!(res.tags.unwrap(), ["rust".to_string(), "html".to_string()]);
}
#[test]
fn errors_with_invalid_front_matter() {
@ -234,17 +203,6 @@ mod tests {
assert!(res.is_err());
}
#[test]
fn errors_on_non_string_tag() {
let content = r#"
title = "Hello"
description = "hey there"
slug = "hello-world"
tags = ["rust", 1]"#;
let res = PageFrontMatter::parse(content);
assert!(res.is_err());
}
#[test]
fn errors_on_present_but_empty_slug() {
let content = r#"
@ -344,4 +302,21 @@ mod tests {
assert!(res.is_ok());
assert_eq!(res.unwrap().extra["something"]["some-date"], to_value("2002-14-01").unwrap());
}
#[test]
fn can_parse_taxonomies() {
let content = r#"
title = "Hello World"
[taxonomies]
tags = ["Rust", "JavaScript"]
categories = ["Dev"]
"#;
let res = PageFrontMatter::parse(content);
println!("{:?}", res);
assert!(res.is_ok());
let res2 = res.unwrap();
assert_eq!(res2.taxonomies["categories"], vec!["Dev"]);
assert_eq!(res2.taxonomies["tags"], vec!["Rust", "JavaScript"]);
}
}

View file

@ -4,7 +4,8 @@ extern crate syntect;
use syntect::dumps::from_binary;
use syntect::parsing::SyntaxSet;
use syntect::highlighting::ThemeSet;
use syntect::highlighting::{ThemeSet, Theme};
use syntect::easy::HighlightLines;
thread_local! {
pub static SYNTAX_SET: SyntaxSet = {
@ -17,3 +18,15 @@ thread_local!{
lazy_static! {
pub static ref THEME_SET: ThemeSet = from_binary(include_bytes!("../../../sublime_themes/all.themedump"));
}
pub fn get_highlighter<'a>(theme: &'a Theme, info: &str) -> HighlightLines<'a> {
SYNTAX_SET.with(|ss| {
let syntax = info
.split(' ')
.next()
.and_then(|lang| ss.find_syntax_by_token(lang))
.unwrap_or_else(|| ss.find_syntax_plain_text());
HighlightLines::new(syntax, theme)
})
}

View file

@ -0,0 +1,14 @@
[package]
name = "imageproc"
version = "0.1.0"
authors = ["Vojtěch Král <vojtech@kral.hk>"]
[dependencies]
lazy_static = "1"
regex = "1.0"
tera = "0.11"
image = "0.19"
rayon = "1"
errors = { path = "../errors" }
utils = { path = "../utils" }

View file

@ -0,0 +1,384 @@
#[macro_use]
extern crate lazy_static;
extern crate regex;
extern crate image;
extern crate rayon;
extern crate utils;
extern crate errors;
use std::path::{Path, PathBuf};
use std::hash::{Hash, Hasher};
use std::collections::HashMap;
use std::collections::hash_map::Entry as HEntry;
use std::collections::hash_map::DefaultHasher;
use std::fs::{self, File};
use regex::Regex;
use image::{GenericImage, FilterType};
use image::jpeg::JPEGEncoder;
use rayon::prelude::*;
use utils::fs as ufs;
use errors::{Result, ResultExt};
static RESIZED_SUBDIR: &'static str = "_processed_images";
lazy_static! {
pub static ref RESIZED_FILENAME: Regex = Regex::new(r#"([0-9a-f]{16})([0-9a-f]{2})[.]jpg"#).unwrap();
}
/// Describes the precise kind of a resize operation
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ResizeOp {
/// A simple scale operation that doesn't take aspect ratio into account
Scale(u32, u32),
/// Scales the image to a specified width with height computed such
/// that aspect ratio is preserved
FitWidth(u32),
/// Scales the image to a specified height with width computed such
/// that aspect ratio is preserved
FitHeight(u32),
/// Scales the image such that it fits within the specified width and
/// height preserving aspect ratio.
/// Either dimension may end up being smaller, but never larger than specified.
Fit(u32, u32),
/// Scales the image such that it fills the specified width and height.
/// Output will always have the exact dimensions specified.
/// The part of the image that doesn't fit in the thumbnail due to differing
/// aspect ratio will be cropped away, if any.
Fill(u32, u32),
}
impl ResizeOp {
pub fn from_args(op: &str, width: Option<u32>, height: Option<u32>) -> Result<ResizeOp> {
use ResizeOp::*;
// Validate args:
match op {
"fit_width" => if width.is_none() {
return Err("op=\"fit_width\" requires a `width` argument".to_string().into());
},
"fit_height" => if height.is_none() {
return Err("op=\"fit_height\" requires a `height` argument".to_string().into());
},
"scale" | "fit" | "fill" => if width.is_none() || height.is_none() {
return Err(format!("op={} requires a `width` and `height` argument", op).into());
},
_ => return Err(format!("Invalid image resize operation: {}", op).into())
};
Ok(match op {
"scale" => Scale(width.unwrap(), height.unwrap()),
"fit_width" => FitWidth(width.unwrap()),
"fit_height" => FitHeight(height.unwrap()),
"fit" => Fit(width.unwrap(), height.unwrap()),
"fill" => Fill(width.unwrap(), height.unwrap()),
_ => unreachable!(),
})
}
pub fn width(self) -> Option<u32> {
use ResizeOp::*;
match self {
Scale(w, _) => Some(w),
FitWidth(w) => Some(w),
FitHeight(_) => None,
Fit(w, _) => Some(w),
Fill(w, _) => Some(w),
}
}
pub fn height(self) -> Option<u32> {
use ResizeOp::*;
match self {
Scale(_, h) => Some(h),
FitWidth(_) => None,
FitHeight(h) => Some(h),
Fit(_, h) => Some(h),
Fill(_, h) => Some(h),
}
}
}
impl From<ResizeOp> for u8 {
fn from(op: ResizeOp) -> u8 {
use ResizeOp::*;
match op {
Scale(_, _) => 1,
FitWidth(_) => 2,
FitHeight(_) => 3,
Fit(_, _) => 4,
Fill(_, _) => 5,
}
}
}
impl Hash for ResizeOp {
fn hash<H: Hasher>(&self, hasher: &mut H) {
hasher.write_u8(u8::from(*self));
if let Some(w) = self.width() { hasher.write_u32(w); }
if let Some(h) = self.height() { hasher.write_u32(h); }
}
}
/// Holds all data needed to perform a resize operation
#[derive(Debug, PartialEq, Eq)]
pub struct ImageOp {
source: String,
op: ResizeOp,
quality: u8,
/// Hash of the above parameters
hash: u64,
/// If there is a hash collision with another ImageOp, this contains a sequential ID > 1
/// identifying the collision in the order as encountered (which is essentially random).
/// Therefore, ImageOps with collisions (ie. collision_id > 0) are always considered out of date.
/// Note that this is very unlikely to happen in practice
collision_id: u32,
}
impl ImageOp {
pub fn new(source: String, op: ResizeOp, quality: u8) -> ImageOp {
let mut hasher = DefaultHasher::new();
hasher.write(source.as_ref());
op.hash(&mut hasher);
hasher.write_u8(quality);
let hash = hasher.finish();
ImageOp { source, op, quality, hash, collision_id: 0 }
}
pub fn from_args(
source: String,
op: &str,
width: Option<u32>,
height: Option<u32>,
quality: u8,
) -> Result<ImageOp> {
let op = ResizeOp::from_args(op, width, height)?;
Ok(Self::new(source, op, quality))
}
fn perform(&self, content_path: &Path, target_path: &Path) -> Result<()> {
use ResizeOp::*;
let src_path = content_path.join(&self.source);
if !ufs::file_stale(&src_path, target_path) {
return Ok(());
}
let mut img = image::open(&src_path)?;
let (img_w, img_h) = img.dimensions();
const RESIZE_FILTER: FilterType = FilterType::Gaussian;
const RATIO_EPSILLION: f32 = 0.1;
let img = match self.op {
Scale(w, h) => img.resize_exact(w, h, RESIZE_FILTER),
FitWidth(w) => img.resize(w, u32::max_value(), RESIZE_FILTER),
FitHeight(h) => img.resize(u32::max_value(), h, RESIZE_FILTER),
Fit(w, h) => img.resize(w, h, RESIZE_FILTER),
Fill(w, h) => {
let factor_w = img_w as f32 / w as f32;
let factor_h = img_h as f32 / h as f32;
if (factor_w - factor_h).abs() <= RATIO_EPSILLION {
// If the horizontal and vertical factor is very similar,
// that means the aspect is similar enough that there's not much point
// in cropping, so just perform a simple scale in this case.
img.resize_exact(w, h, RESIZE_FILTER)
} else {
// We perform the fill such that a crop is performed first
// and then resize_exact can be used, which should be cheaper than
// resizing and then cropping (smaller number of pixels to resize).
let (crop_w, crop_h) = if factor_w < factor_h {
(img_w, (factor_w * h as f32).round() as u32)
} else {
((factor_h * w as f32).round() as u32, img_h)
};
let (offset_w, offset_h) = if factor_w < factor_h {
(0, (img_h - crop_h) / 2)
} else {
((img_w - crop_w) / 2, 0)
};
img.crop(offset_w, offset_h, crop_w, crop_h)
.resize_exact(w, h, RESIZE_FILTER)
}
}
};
let mut f = File::create(target_path)?;
let mut enc = JPEGEncoder::new_with_quality(&mut f, self.quality);
let (img_w, img_h) = img.dimensions();
enc.encode(&img.raw_pixels(), img_w, img_h, img.color())?;
Ok(())
}
}
/// A strcture into which image operations can be enqueued and then performed.
/// All output is written in a subdirectory in `static_path`,
/// taking care of file stale status based on timestamps and possible hash collisions.
#[derive(Debug)]
pub struct Processor {
content_path: PathBuf,
resized_path: PathBuf,
resized_url: String,
/// A map of a ImageOps by their stored hash.
/// Note that this cannot be a HashSet, because hashset handles collisions and we don't want that,
/// we need to be aware of and handle collisions ourselves.
img_ops: HashMap<u64, ImageOp>,
/// Hash collisions go here:
img_ops_collisions: Vec<ImageOp>,
}
impl Processor {
pub fn new(content_path: PathBuf, static_path: &Path, base_url: &str) -> Processor {
Processor {
content_path,
resized_path: static_path.join(RESIZED_SUBDIR),
resized_url: Self::resized_url(base_url),
img_ops: HashMap::new(),
img_ops_collisions: Vec::new(),
}
}
fn resized_url(base_url: &str) -> String {
if base_url.ends_with('/') {
format!("{}{}", base_url, RESIZED_SUBDIR)
} else {
format!("{}/{}", base_url, RESIZED_SUBDIR)
}
}
pub fn set_base_url(&mut self, base_url: &str) {
self.resized_url = Self::resized_url(base_url);
}
pub fn source_exists(&self, source: &str) -> bool {
self.content_path.join(source).exists()
}
pub fn num_img_ops(&self) -> usize {
self.img_ops.len() + self.img_ops_collisions.len()
}
fn insert_with_collisions(&mut self, mut img_op: ImageOp) -> u32 {
match self.img_ops.entry(img_op.hash) {
HEntry::Occupied(entry) => if *entry.get() == img_op { return 0; },
HEntry::Vacant(entry) => {
entry.insert(img_op);
return 0;
}
}
// If we get here, that means a hash collision.
// This is detected when there is an ImageOp with the same hash in the `img_ops`
// map but which is not equal to this one.
// To deal with this, all collisions get a (random) sequential ID number.
// First try to look up this ImageOp in `img_ops_collisions`, maybe we've
// already seen the same ImageOp.
// At the same time, count IDs to figure out the next free one.
// Start with the ID of 2, because we'll need to use 1 for the ImageOp
// already present in the map:
let mut collision_id = 2;
for op in self.img_ops_collisions.iter().filter(|op| op.hash == img_op.hash) {
if *op == img_op {
// This is a colliding ImageOp, but we've already seen an equal one
// (not just by hash, but by content too), so just return its ID:
return collision_id;
} else {
collision_id += 1;
}
}
// If we get here, that means this is a new colliding ImageOp and
// `collision_id` is the next free ID
if collision_id == 2 {
// This is the first collision found with this hash, update the ID
// of the matching ImageOp in the map.
self.img_ops.get_mut(&img_op.hash).unwrap().collision_id = 1;
}
img_op.collision_id = collision_id;
self.img_ops_collisions.push(img_op);
collision_id
}
fn op_filename(hash: u64, collision_id: u32) -> String {
// Please keep this in sync with RESIZED_FILENAME
assert!(collision_id < 256, "Unexpectedly large number of collisions: {}", collision_id);
format!("{:016x}{:02x}.jpg", hash, collision_id)
}
fn op_url(&self, hash: u64, collision_id: u32) -> String {
format!("{}/{}", &self.resized_url, Self::op_filename(hash, collision_id))
}
pub fn insert(&mut self, img_op: ImageOp) -> String {
let hash = img_op.hash;
let collision_id = self.insert_with_collisions(img_op);
self.op_url(hash, collision_id)
}
pub fn prune(&self) -> Result<()> {
// Do not create folders if they don't exist
if !self.resized_path.exists() {
return Ok(());
}
ufs::ensure_directory_exists(&self.resized_path)?;
let entries = fs::read_dir(&self.resized_path)?;
for entry in entries {
let entry_path = entry?.path();
if entry_path.is_file() {
let filename = entry_path.file_name().unwrap().to_string_lossy();
if let Some(capts) = RESIZED_FILENAME.captures(filename.as_ref()) {
let hash = u64::from_str_radix(capts.get(1).unwrap().as_str(), 16).unwrap();
let collision_id = u32::from_str_radix(
capts.get(2).unwrap().as_str(), 16,
).unwrap();
if collision_id > 0 || !self.img_ops.contains_key(&hash) {
fs::remove_file(&entry_path)?;
}
}
}
}
Ok(())
}
pub fn do_process(&mut self) -> Result<()> {
if !self.img_ops.is_empty() {
ufs::ensure_directory_exists(&self.resized_path)?;
}
self.img_ops.par_iter().map(|(hash, op)| {
let target = self.resized_path.join(Self::op_filename(*hash, op.collision_id));
op.perform(&self.content_path, &target)
.chain_err(|| format!("Failed to process image: {}", op.source))
})
.fold(|| Ok(()), Result::and)
.reduce(|| Ok(()), Result::and)
}
}
/// Looks at file's extension and returns whether it's a supported image format
pub fn file_is_img<P: AsRef<Path>>(p: P) -> bool {
p.as_ref().extension().and_then(|s| s.to_str()).map(|ext| {
match ext.to_lowercase().as_str() {
"jpg" | "jpeg" => true,
"png" => true,
"gif" => true,
"bmp" => true,
_ => false,
}
}).unwrap_or(false)
}

View file

@ -0,0 +1,8 @@
[package]
name = "link_checker"
version = "0.1.0"
authors = ["Vincent Prouillet <prouillet.vincent@gmail.com>"]
[dependencies]
reqwest = "0.8"
lazy_static = "1"

View file

@ -0,0 +1,88 @@
extern crate reqwest;
#[macro_use]
extern crate lazy_static;
use std::collections::HashMap;
use std::error::Error;
use std::sync::{Arc, RwLock};
use reqwest::StatusCode;
#[derive(Clone, Debug, PartialEq)]
pub struct LinkResult {
pub code: Option<StatusCode>,
/// Whether the HTTP request didn't make it to getting a HTTP code
pub error: Option<String>,
}
impl LinkResult {
pub fn is_valid(&self) -> bool {
if self.error.is_some() {
return false;
}
if let Some(c) = self.code {
return c.is_success();
}
true
}
pub fn message(&self) -> String {
if let Some(ref e) = self.error {
return e.clone();
}
if let Some(c) = self.code {
return format!("{}", c);
}
"Unknown error".to_string()
}
}
lazy_static! {
// Keep history of link checks so a rebuild doesn't have to check again
static ref LINKS: Arc<RwLock<HashMap<String, LinkResult>>> = Arc::new(RwLock::new(HashMap::new()));
}
pub fn check_url(url: &str) -> LinkResult {
{
let guard = LINKS.read().unwrap();
if let Some(res) = guard.get(url) {
return res.clone();
}
}
// Need to actually do the link checking
let res = match reqwest::get(url) {
Ok(response) => LinkResult { code: Some(response.status()), error: None },
Err(e) => LinkResult { code: None, error: Some(e.description().to_string()) },
};
LINKS.write().unwrap().insert(url.to_string(), res.clone());
return res;
}
#[cfg(test)]
mod tests {
use super::{LINKS, check_url};
#[test]
fn can_validate_ok_links() {
let url = "https://google.com";
let res = check_url(url);
assert!(res.is_valid());
assert!(LINKS.read().unwrap().get(url).is_some());
let res = check_url(url);
assert!(res.is_valid());
}
#[test]
fn can_fail_404_links() {
let res = check_url("https://google.comys");
assert_eq!(res.is_valid(), false);
assert!(res.code.is_none());
assert!(res.error.is_some());
}
}

View file

@ -12,6 +12,7 @@ errors = { path = "../errors" }
config = { path = "../config" }
content = { path = "../content" }
utils = { path = "../utils" }
taxonomies = { path = "../taxonomies" }
[dev-dependencies]
front_matter = { path = "../front_matter" }

View file

@ -6,6 +6,7 @@ extern crate errors;
extern crate config;
extern crate content;
extern crate utils;
extern crate taxonomies;
#[cfg(test)]
extern crate front_matter;
@ -18,6 +19,14 @@ use errors::{Result, ResultExt};
use config::Config;
use content::{Page, Section};
use utils::templates::render_template;
use taxonomies::{Taxonomy, TaxonomyItem};
#[derive(Clone, Debug, PartialEq)]
enum PaginationRoot<'a> {
Section(&'a Section),
Taxonomy(&'a Taxonomy),
}
/// A list of all the pages in the paginator with their index and links
@ -30,7 +39,7 @@ pub struct Pager<'a> {
/// Path to that page
path: String,
/// All pages for the pager
pages: Vec<&'a Page>
pages: Vec<&'a Page>,
}
impl<'a> Pager<'a> {
@ -63,22 +72,62 @@ pub struct Paginator<'a> {
pub pagers: Vec<Pager<'a>>,
/// How many content pages on a paginated page at max
paginate_by: usize,
/// The section struct we're building the paginator for
section: &'a Section,
/// The thing we are creating the paginator for: section or taxonomy
root: PaginationRoot<'a>,
// Those below can be obtained from the root but it would make the code more complex than needed
pub permalink: String,
path: String,
pub paginate_path: String,
is_index: bool,
}
impl<'a> Paginator<'a> {
/// Create a new paginator
/// Create a new paginator from a section
/// It will always at least create one pager (the first) even if there are no pages to paginate
pub fn new(all_pages: &'a [Page], section: &'a Section) -> Paginator<'a> {
pub fn from_section(all_pages: &'a [Page], section: &'a Section) -> Paginator<'a> {
let paginate_by = section.meta.paginate_by.unwrap();
let mut paginator = Paginator {
all_pages,
pagers: vec![],
paginate_by,
root: PaginationRoot::Section(section),
permalink: section.permalink.clone(),
path: section.path.clone(),
paginate_path: section.meta.paginate_path.clone(),
is_index: section.is_index(),
};
paginator.fill_pagers();
paginator
}
/// Create a new paginator from a taxonomy
/// It will always at least create one pager (the first) even if there are no pages to paginate
pub fn from_taxonomy(taxonomy: &'a Taxonomy, item: &'a TaxonomyItem) -> Paginator<'a> {
let paginate_by = taxonomy.kind.paginate_by.unwrap();
let mut paginator = Paginator {
all_pages: &item.pages,
pagers: vec![],
paginate_by,
root: PaginationRoot::Taxonomy(taxonomy),
permalink: item.permalink.clone(),
path: format!("{}/{}", taxonomy.kind.name, item.slug),
paginate_path: taxonomy.kind.paginate_path.clone().unwrap_or_else(|| "pages".to_string()),
is_index: false,
};
paginator.fill_pagers();
paginator
}
fn fill_pagers(&mut self) {
let mut pages = vec![];
let mut current_page = vec![];
for page in all_pages {
for page in self.all_pages {
current_page.push(page);
if current_page.len() == paginate_by {
if current_page.len() == self.paginate_by {
pages.push(current_page);
current_page = vec![];
}
@ -91,17 +140,23 @@ impl<'a> Paginator<'a> {
for (index, page) in pages.iter().enumerate() {
// First page has no pagination path
if index == 0 {
pagers.push(Pager::new(1, page.clone(), section.permalink.clone(), section.path.clone()));
pagers.push(Pager::new(1, page.clone(), self.permalink.clone(), self.path.clone()));
continue;
}
let page_path = format!("{}/{}/", section.meta.paginate_path, index + 1);
let permalink = format!("{}{}", section.permalink, page_path);
let pager_path = if section.is_index() {
let page_path = format!("{}/{}/", self.paginate_path, index + 1);
let permalink = format!("{}{}", self.permalink, page_path);
let pager_path = if self.is_index {
page_path
} else {
format!("{}{}", section.path, page_path)
if self.path.ends_with("/") {
format!("{}{}", self.path, page_path)
} else {
format!("{}/{}", self.path, page_path)
}
};
pagers.push(Pager::new(
index + 1,
page.clone(),
@ -112,15 +167,10 @@ impl<'a> Paginator<'a> {
// We always have the index one at least
if pagers.is_empty() {
pagers.push(Pager::new(1, vec![], section.permalink.clone(), section.path.clone()));
pagers.push(Pager::new(1, vec![], self.permalink.clone(), self.path.clone()));
}
Paginator {
all_pages,
pagers,
paginate_by,
section,
}
self.pagers = pagers;
}
pub fn build_paginator_context(&self, current_pager: &Pager) -> HashMap<&str, Value> {
@ -130,14 +180,14 @@ impl<'a> Paginator<'a> {
// Global variables
paginator.insert("paginate_by", to_value(self.paginate_by).unwrap());
paginator.insert("first", to_value(&self.section.permalink).unwrap());
paginator.insert("first", to_value(&self.permalink).unwrap());
let last_pager = &self.pagers[self.pagers.len() - 1];
paginator.insert("last", to_value(&last_pager.permalink).unwrap());
paginator.insert(
"pagers",
to_value(
&self.pagers.iter().map(|p| p.clone_without_pages()).collect::<Vec<_>>()
).unwrap()
).unwrap(),
);
// Variables for this specific page
@ -163,13 +213,22 @@ impl<'a> Paginator<'a> {
pub fn render_pager(&self, pager: &Pager, config: &Config, tera: &Tera) -> Result<String> {
let mut context = Context::new();
context.add("config", &config);
context.add("section", self.section);
let template_name = match self.root {
PaginationRoot::Section(s) => {
context.add("section", &s);
s.get_template_name()
}
PaginationRoot::Taxonomy(t) => {
context.add("taxonomy", &t.kind);
format!("{}/single.html", t.kind.name)
}
};
context.add("current_url", &pager.permalink);
context.add("current_path", &pager.path);
context.add("paginator", &self.build_paginator_context(pager));
render_template(&self.section.get_template_name(), tera, &context, &config.theme)
.chain_err(|| format!("Failed to render pager {} of section '{}'", pager.index, self.section.file.path.display()))
render_template(&template_name, tera, &context, &config.theme)
.chain_err(|| format!("Failed to render pager {}", pager.index))
}
}
@ -179,6 +238,8 @@ mod tests {
use front_matter::SectionFrontMatter;
use content::{Page, Section};
use config::Taxonomy as TaxonomyConfig;
use taxonomies::{Taxonomy, TaxonomyItem};
use super::Paginator;
@ -205,7 +266,7 @@ mod tests {
Page::default(),
];
let section = create_section(false);
let paginator = Paginator::new(pages.as_slice(), &section);
let paginator = Paginator::from_section(pages.as_slice(), &section);
assert_eq!(paginator.pagers.len(), 2);
assert_eq!(paginator.pagers[0].index, 1);
@ -227,7 +288,7 @@ mod tests {
Page::default(),
];
let section = create_section(true);
let paginator = Paginator::new(pages.as_slice(), &section);
let paginator = Paginator::from_section(pages.as_slice(), &section);
assert_eq!(paginator.pagers.len(), 2);
assert_eq!(paginator.pagers[0].index, 1);
@ -249,7 +310,7 @@ mod tests {
Page::default(),
];
let section = create_section(false);
let paginator = Paginator::new(pages.as_slice(), &section);
let paginator = Paginator::from_section(pages.as_slice(), &section);
assert_eq!(paginator.pagers.len(), 2);
let context = paginator.build_paginator_context(&paginator.pagers[0]);
@ -268,4 +329,37 @@ mod tests {
assert_eq!(context["previous"], to_value("https://vincent.is/posts/").unwrap());
assert_eq!(context["current_index"], to_value(2).unwrap());
}
#[test]
fn test_can_create_paginator_for_taxonomy() {
let pages = vec![
Page::default(),
Page::default(),
Page::default(),
];
let taxonomy_def = TaxonomyConfig {
name: "tags".to_string(),
paginate_by: Some(2),
..TaxonomyConfig::default()
};
let taxonomy_item = TaxonomyItem {
name: "Something".to_string(),
slug: "something".to_string(),
permalink: "https://vincent.is/tags/something/".to_string(),
pages,
};
let taxonomy = Taxonomy { kind: taxonomy_def, items: vec![taxonomy_item.clone()] };
let paginator = Paginator::from_taxonomy(&taxonomy, &taxonomy_item);
assert_eq!(paginator.pagers.len(), 2);
assert_eq!(paginator.pagers[0].index, 1);
assert_eq!(paginator.pagers[0].pages.len(), 2);
assert_eq!(paginator.pagers[0].permalink, "https://vincent.is/tags/something/");
assert_eq!(paginator.pagers[0].path, "tags/something");
assert_eq!(paginator.pagers[1].index, 2);
assert_eq!(paginator.pagers[1].pages.len(), 1);
assert_eq!(paginator.pagers[1].permalink, "https://vincent.is/tags/something/pages/2/");
assert_eq!(paginator.pagers[1].path, "tags/something/pages/2/");
}
}

View file

@ -11,5 +11,5 @@ content = { path = "../content" }
site = { path = "../site" }
[dev-dependencies]
tempdir = "0.3"
tempfile = "3"
fs_extra = "1.1"

View file

@ -16,7 +16,7 @@ use front_matter::{PageFrontMatter, SectionFrontMatter};
pub fn find_parent_section<'a>(site: &'a Site, page: &Page) -> Option<&'a Section> {
for section in site.sections.values() {
if section.is_child_page(&page.file.path) {
return Some(section)
return Some(section);
}
}
@ -26,10 +26,8 @@ pub fn find_parent_section<'a>(site: &'a Site, page: &Page) -> Option<&'a Sectio
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum PageChangesNeeded {
/// Editing `tags`
Tags,
/// Editing `categories`
Categories,
/// Editing `taxonomies`
Taxonomies,
/// Editing `date`, `order` or `weight`
Sort,
/// Editing anything causes a re-render of the page
@ -85,12 +83,8 @@ fn find_section_front_matter_changes(current: &SectionFrontMatter, new: &Section
fn find_page_front_matter_changes(current: &PageFrontMatter, other: &PageFrontMatter) -> Vec<PageChangesNeeded> {
let mut changes_needed = vec![];
if current.tags != other.tags {
changes_needed.push(PageChangesNeeded::Tags);
}
if current.category != other.category {
changes_needed.push(PageChangesNeeded::Categories);
if current.taxonomies != other.taxonomies {
changes_needed.push(PageChangesNeeded::Taxonomies);
}
if current.date != other.date || current.order != other.order || current.weight != other.weight {
@ -117,8 +111,8 @@ fn delete_element(site: &mut Site, path: &Path, is_section: bool) -> Result<()>
if let Some(p) = site.pages.remove(path) {
site.permalinks.remove(&p.file.relative);
if p.meta.has_tags() || p.meta.category.is_some() {
site.populate_tags_and_categories();
if !p.meta.taxonomies.is_empty() {
site.populate_taxonomies()?;
}
// if there is a parent section, we will need to re-render it
@ -155,18 +149,18 @@ fn handle_section_editing(site: &mut Site, path: &Path) -> Result<()> {
SectionChangesNeeded::Sort => {
site.sort_sections_pages(Some(path));
site.register_tera_global_fns();
},
}
SectionChangesNeeded::Render => site.render_section(&site.sections[path], false)?,
SectionChangesNeeded::RenderWithPages => site.render_section(&site.sections[path], true)?,
// not a common enough operation to make it worth optimizing
SectionChangesNeeded::Delete => {
site.populate_sections();
site.build()?;
},
}
};
}
return Ok(());
},
}
// New section, only render that one
None => {
site.populate_sections();
@ -208,27 +202,15 @@ fn handle_page_editing(site: &mut Site, path: &Path) -> Result<()> {
}
// Front matter changed
let mut taxonomies_populated = false;
let mut sections_populated = false;
for changes in find_page_front_matter_changes(&site.pages[path].meta, &prev.meta) {
// Sort always comes first if present so the rendering will be fine
match changes {
PageChangesNeeded::Tags => {
if !taxonomies_populated {
site.populate_tags_and_categories();
taxonomies_populated = true;
}
PageChangesNeeded::Taxonomies => {
site.populate_taxonomies()?;
site.register_tera_global_fns();
site.render_tags()?;
},
PageChangesNeeded::Categories => {
if !taxonomies_populated {
site.populate_tags_and_categories();
taxonomies_populated = true;
site.render_taxonomies()?;
}
site.register_tera_global_fns();
site.render_categories()?;
},
PageChangesNeeded::Sort => {
let section_path = match find_parent_section(site, &site.pages[path]) {
Some(s) => s.file.path.clone(),
@ -241,7 +223,7 @@ fn handle_page_editing(site: &mut Site, path: &Path) -> Result<()> {
site.sort_sections_pages(Some(&section_path));
site.register_tera_global_fns();
site.render_index()?;
},
}
PageChangesNeeded::Render => {
if !sections_populated {
site.populate_sections();
@ -250,15 +232,15 @@ fn handle_page_editing(site: &mut Site, path: &Path) -> Result<()> {
site.register_tera_global_fns();
render_parent_section!(site, path);
site.render_page(&site.pages[path])?;
},
}
};
}
Ok(())
},
}
// It's a new page!
None => {
site.populate_sections();
site.populate_tags_and_categories();
site.populate_taxonomies()?;
site.register_tera_global_fns();
// No need to optimise that yet, we can revisit if it becomes an issue
site.build()
@ -322,14 +304,13 @@ pub fn after_template_change(site: &mut Site, path: &Path) -> Result<()> {
match filename {
"sitemap.xml" => site.render_sitemap(),
"rss.xml" => site.render_rss_feed(),
"rss.xml" => site.render_rss_feed(None, None),
"robots.txt" => site.render_robots(),
"categories.html" | "category.html" => site.render_categories(),
"tags.html" | "tag.html" => site.render_tags(),
"single.html" | "list.html" => site.render_taxonomies(),
"page.html" => {
site.render_sections()?;
site.render_orphan_pages()
},
}
"section.html" => site.render_sections(),
// Either the index or some unknown template changed
// We can't really know what this change affects so rebuild all
@ -345,40 +326,38 @@ pub fn after_template_change(site: &mut Site, path: &Path) -> Result<()> {
site.populate_sections();
site.render_sections()?;
site.render_orphan_pages()?;
site.render_categories()?;
site.render_tags()
},
site.render_taxonomies()
}
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use front_matter::{PageFrontMatter, SectionFrontMatter, SortBy};
use super::{
find_page_front_matter_changes, find_section_front_matter_changes,
PageChangesNeeded, SectionChangesNeeded
PageChangesNeeded, SectionChangesNeeded,
};
#[test]
fn can_find_tag_changes_in_page_frontmatter() {
let new = PageFrontMatter { tags: Some(vec!["a tag".to_string()]), ..PageFrontMatter::default() };
fn can_find_taxonomy_changes_in_page_frontmatter() {
let mut taxonomies = HashMap::new();
taxonomies.insert("tags".to_string(), vec!["a tag".to_string()]);
let new = PageFrontMatter { taxonomies, ..PageFrontMatter::default() };
let changes = find_page_front_matter_changes(&PageFrontMatter::default(), &new);
assert_eq!(changes, vec![PageChangesNeeded::Tags, PageChangesNeeded::Render]);
}
#[test]
fn can_find_category_changes_in_page_frontmatter() {
let current = PageFrontMatter { category: Some("a category".to_string()), ..PageFrontMatter::default() };
let changes = find_page_front_matter_changes(&current, &PageFrontMatter::default());
assert_eq!(changes, vec![PageChangesNeeded::Categories, PageChangesNeeded::Render]);
assert_eq!(changes, vec![PageChangesNeeded::Taxonomies, PageChangesNeeded::Render]);
}
#[test]
fn can_find_multiple_changes_in_page_frontmatter() {
let current = PageFrontMatter { category: Some("a category".to_string()), order: Some(1), ..PageFrontMatter::default() };
let mut taxonomies = HashMap::new();
taxonomies.insert("categories".to_string(), vec!["a category".to_string()]);
let current = PageFrontMatter { taxonomies, order: Some(1), ..PageFrontMatter::default() };
let changes = find_page_front_matter_changes(&current, &PageFrontMatter::default());
assert_eq!(changes, vec![PageChangesNeeded::Categories, PageChangesNeeded::Sort, PageChangesNeeded::Render]);
assert_eq!(changes, vec![PageChangesNeeded::Taxonomies, PageChangesNeeded::Sort, PageChangesNeeded::Render]);
}
#[test]

View file

@ -1,6 +1,6 @@
extern crate rebuild;
extern crate site;
extern crate tempdir;
extern crate tempfile;
extern crate fs_extra;
use std::env;
@ -8,7 +8,7 @@ use std::fs::{remove_dir_all, File};
use std::io::prelude::*;
use fs_extra::dir;
use tempdir::TempDir;
use tempfile::tempdir;
use site::Site;
use rebuild::after_content_change;
@ -74,12 +74,12 @@ macro_rules! file_contains {
#[test]
fn can_rebuild_after_simple_change_to_page_content() {
let tmp_dir = TempDir::new("example").expect("create temp dir");
let tmp_dir = tempdir().expect("create temp dir");
let (site_path, mut site) = load_and_build_site!(tmp_dir);
let file_path = edit_file!(site_path, "content/rebuild/first.md", br#"
+++
title = "first"
order = 1
weight = 1
date = 2017-01-01
+++
@ -92,12 +92,12 @@ Some content"#);
#[test]
fn can_rebuild_after_title_change_page_global_func_usage() {
let tmp_dir = TempDir::new("example").expect("create temp dir");
let tmp_dir = tempdir().expect("create temp dir");
let (site_path, mut site) = load_and_build_site!(tmp_dir);
let file_path = edit_file!(site_path, "content/rebuild/first.md", br#"
+++
title = "Premier"
order = 10
weight = 10
date = 2017-01-01
+++
@ -110,17 +110,17 @@ date = 2017-01-01
#[test]
fn can_rebuild_after_sort_change_in_section() {
let tmp_dir = TempDir::new("example").expect("create temp dir");
let tmp_dir = tempdir().expect("create temp dir");
let (site_path, mut site) = load_and_build_site!(tmp_dir);
let file_path = edit_file!(site_path, "content/rebuild/_index.md", br#"
+++
paginate_by = 1
sort_by = "order"
sort_by = "weight"
template = "rebuild.html"
+++
"#);
let res = after_content_change(&mut site, &file_path);
assert!(res.is_ok());
assert!(file_contains!(site_path, "public/rebuild/index.html", "<h1>second</h1><h1>first</h1>"));
assert!(file_contains!(site_path, "public/rebuild/index.html", "<h1>first</h1><h1>second</h1>"));
}

View file

@ -4,19 +4,22 @@ version = "0.1.0"
authors = ["Vincent Prouillet <prouillet.vincent@gmail.com>"]
[dependencies]
tera = "0.11"
regex = "1"
lazy_static = "1"
tera = { version = "0.11", features = ["preserve_order"] }
syntect = "2"
pulldown-cmark = "0"
slug = "0.1"
serde = "1"
serde_derive = "1"
pest = "1"
pest_derive = "1"
errors = { path = "../errors" }
front_matter = { path = "../front_matter" }
highlighting = { path = "../highlighting"}
utils = { path = "../utils" }
config = { path = "../config" }
link_checker = { path = "../link_checker" }
[dev-dependencies]
templates = { path = "../templates" }

View file

@ -3,13 +3,15 @@ extern crate test;
extern crate tera;
extern crate rendering;
extern crate config;
extern crate front_matter;
use std::collections::HashMap;
use tera::Tera;
use rendering::{Context, markdown_to_html};
use rendering::{RenderContext, render_content, render_shortcodes};
use front_matter::InsertAnchor;
use config::Config;
static CONTENT: &'static str = r#"
# Modus cognitius profanam ne duae virtutis mundi
@ -84,17 +86,46 @@ if __name__ == "__main__":
"#;
#[bench]
fn bench_markdown_to_html_with_highlighting(b: &mut test::Bencher) {
let tera_ctx = Tera::default();
fn bench_render_content_with_highlighting(b: &mut test::Bencher) {
let mut tera = Tera::default();
tera.add_raw_template("shortcodes/youtube.html", "{{id}}").unwrap();
let permalinks_ctx = HashMap::new();
let context = Context::new(&tera_ctx, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::None);
b.iter(|| markdown_to_html(CONTENT, &context));
let config = Config::default();
let context = RenderContext::new(&tera, &config, "", &permalinks_ctx, InsertAnchor::None);
b.iter(|| render_content(CONTENT, &context).unwrap());
}
#[bench]
fn bench_markdown_to_html_without_highlighting(b: &mut test::Bencher) {
let tera_ctx = Tera::default();
fn bench_render_content_without_highlighting(b: &mut test::Bencher) {
let mut tera = Tera::default();
tera.add_raw_template("shortcodes/youtube.html", "{{id}}").unwrap();
let permalinks_ctx = HashMap::new();
let context = Context::new(&tera_ctx, false, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::None);
b.iter(|| markdown_to_html(CONTENT, &context));
let mut config = Config::default();
config.highlight_code = false;
let context = RenderContext::new(&tera, &config, "", &permalinks_ctx, InsertAnchor::None);
b.iter(|| render_content(CONTENT, &context).unwrap());
}
#[bench]
fn bench_render_content_no_shortcode(b: &mut test::Bencher) {
let tera = Tera::default();
let content2 = CONTENT.replace(r#"{{ youtube(id="my_youtube_id") }}"#, "");
let mut config = Config::default();
config.highlight_code = false;
let permalinks_ctx = HashMap::new();
let context = RenderContext::new(&tera, &config, "", &permalinks_ctx, InsertAnchor::None);
b.iter(|| render_content(&content2, &context).unwrap());
}
#[bench]
fn bench_render_shortcodes_one_present(b: &mut test::Bencher) {
let mut tera = Tera::default();
tera.add_raw_template("shortcodes/youtube.html", "{{id}}").unwrap();
let config = Config::default();
let permalinks_ctx = HashMap::new();
let context = RenderContext::new(&tera, &config, "", &permalinks_ctx, InsertAnchor::None);
b.iter(|| render_shortcodes(CONTENT, &context));
}

View file

@ -0,0 +1,72 @@
// Partly taken from Tera
whitespace = _{ " " | "\t" | "\r" | "\n" }
/// LITERALS
int = @{ "-" ? ~ ("0" | '1'..'9' ~ '0'..'9' * ) }
float = @{
"-" ? ~
(
"0" ~ "." ~ '0'..'9' + |
'1'..'9' ~ '0'..'9' * ~ "." ~ '0'..'9' +
)
}
// matches anything between 2 double quotes
double_quoted_string = @{ "\"" ~ (!("\"") ~ any)* ~ "\""}
// matches anything between 2 single quotes
single_quoted_string = @{ "\'" ~ (!("\'") ~ any)* ~ "\'"}
// matches anything between 2 backquotes\backticks
backquoted_quoted_string = @{ "`" ~ (!("`") ~ any)* ~ "`"}
string = @{
double_quoted_string |
single_quoted_string |
backquoted_quoted_string
}
boolean = { "true" | "false" }
literal = { boolean | string | float | int }
array = { "[" ~ (literal ~ ",")* ~ literal? ~ "]"}
/// Idents
all_chars = _{'a'..'z' | 'A'..'Z' | "_" | '0'..'9'}
ident = @{
('a'..'z' | 'A'..'Z' | "_") ~
all_chars*
}
/// Now specific to Gutenberg
// shortcode is abbreviated to sc to keep things short
kwarg = { ident ~ "=" ~ (literal | array) }
kwargs = _{ kwarg ~ ("," ~ kwarg )* }
sc_def = _{ ident ~ "(" ~ kwargs* ~ ")" }
inline_shortcode = !{ "{{" ~ sc_def ~ "}}" }
ignored_inline_shortcode = !{ "{{/*" ~ sc_def ~ "*/}}" }
sc_body_start = !{ "{%" ~ sc_def ~ "%}" }
sc_body_end = !{ "{%" ~ "end" ~ "%}" }
ignored_sc_body_start = !{ "{%/*" ~ sc_def ~ "*/%}" }
ignored_sc_body_end = !{ "{%/*" ~ "end" ~ "*/%}" }
shortcode_with_body = !{ sc_body_start ~ text_in_body_sc ~ sc_body_end }
ignored_shortcode_with_body = !{ ignored_sc_body_start ~ text_in_ignored_body_sc ~ ignored_sc_body_end }
text_in_body_sc = ${ (!(sc_body_end) ~ any)+ }
text_in_ignored_body_sc = ${ (!(ignored_sc_body_end) ~ any)+ }
text = ${ (!(inline_shortcode | ignored_inline_shortcode | sc_body_start | ignored_sc_body_start) ~ any)+ }
content = _{
ignored_inline_shortcode |
inline_shortcode |
ignored_shortcode_with_body |
shortcode_with_body |
text
}
page = ${ soi ~ content* ~ eoi }

View file

@ -1,41 +1,38 @@
use std::collections::HashMap;
use tera::Tera;
use tera::{Tera, Context};
use front_matter::InsertAnchor;
use config::Config;
/// All the information from the gutenberg site that is needed to render HTML from markdown
#[derive(Debug)]
pub struct Context<'a> {
pub struct RenderContext<'a> {
pub tera: &'a Tera,
pub highlight_code: bool,
pub highlight_theme: String,
pub current_page_permalink: String,
pub config: &'a Config,
pub tera_context: Context,
pub current_page_permalink: &'a str,
pub permalinks: &'a HashMap<String, String>,
pub insert_anchor: InsertAnchor,
}
impl<'a> Context<'a> {
impl<'a> RenderContext<'a> {
pub fn new(
tera: &'a Tera,
highlight_code: bool,
highlight_theme: String,
current_page_permalink: &str,
config: &'a Config,
current_page_permalink: &'a str,
permalinks: &'a HashMap<String, String>,
insert_anchor: InsertAnchor,
) -> Context<'a> {
Context {
) -> RenderContext<'a> {
let mut tera_context = Context::new();
tera_context.insert("config", config);
RenderContext {
tera,
current_page_permalink: current_page_permalink.to_string(),
tera_context,
current_page_permalink,
permalinks,
insert_anchor,
highlight_code,
highlight_theme,
config,
}
}
pub fn should_insert_anchor(&self) -> bool {
self.insert_anchor != InsertAnchor::None
}
}

View file

@ -1,6 +1,3 @@
#[macro_use]
extern crate lazy_static;
extern crate regex;
extern crate tera;
extern crate syntect;
extern crate pulldown_cmark;
@ -8,20 +5,39 @@ extern crate slug;
#[macro_use]
extern crate serde_derive;
extern crate serde;
extern crate pest;
#[macro_use]
extern crate pest_derive;
#[macro_use]
extern crate errors;
extern crate front_matter;
extern crate highlighting;
extern crate utils;
extern crate config;
extern crate link_checker;
#[cfg(test)]
extern crate templates;
mod context;
mod markdown;
mod short_code;
mod table_of_contents;
mod shortcode;
pub use context::Context;
pub use markdown::markdown_to_html;
use errors::Result;
use markdown::markdown_to_html;
pub use table_of_contents::Header;
pub use shortcode::render_shortcodes;
pub use context::RenderContext;
pub fn render_content(content: &str, context: &RenderContext) -> Result<(String, Vec<Header>)> {
// Don't do anything if there is nothing like a shortcode in the content
if content.contains("{{") || content.contains("{%") {
let rendered = render_shortcodes(content, context)?;
return markdown_to_html(&rendered, context);
}
markdown_to_html(&content, context)
}

View file

@ -8,46 +8,11 @@ use syntect::html::{start_coloured_html_snippet, styles_to_coloured_html, Includ
use errors::Result;
use utils::site::resolve_internal_link;
use context::Context;
use highlighting::{SYNTAX_SET, THEME_SET};
use short_code::{SHORTCODE_RE, ShortCode, parse_shortcode, render_simple_shortcode};
use highlighting::{get_highlighter, THEME_SET};
use link_checker::check_url;
use table_of_contents::{TempHeader, Header, make_table_of_contents};
pub fn markdown_to_html(content: &str, context: &Context) -> Result<(String, Vec<Header>)> {
// We try to be smart about highlighting code as it can be time-consuming
// If the global config disables it, then we do nothing. However,
// if we see a code block in the content, we assume that this page needs
// to be highlighted. It could potentially have false positive if the content
// has ``` in it but that seems kind of unlikely
let should_highlight = if context.highlight_code {
content.contains("```")
} else {
false
};
// Set while parsing
let mut error = None;
let mut highlighter: Option<HighlightLines> = None;
// the markdown parser will send several Text event if a markdown character
// is present in it, for example `hello_test` will be split in 2: hello and _test.
// Since we can use those chars in shortcode arguments, we need to collect
// the full shortcode somehow first
let mut current_shortcode = String::new();
let mut shortcode_block = None;
// shortcodes live outside of paragraph so we need to ensure we don't close
// a paragraph that has already been closed
let mut added_shortcode = false;
// Don't transform things that look like shortcodes in code blocks
let mut in_code_block = false;
// If we get text in header, we need to insert the id and a anchor
let mut in_header = false;
// pulldown_cmark can send several text events for a title if there are markdown
// specific characters like `!` in them. We only want to insert the anchor the first time
let mut header_created = false;
let mut anchors: Vec<String> = vec![];
// the rendered html
let mut html = String::new();
use context::RenderContext;
// We might have cases where the slug is already present in our list of anchor
// for example an article could have several titles named Example
@ -66,26 +31,39 @@ pub fn markdown_to_html(content: &str, context: &Context) -> Result<(String, Vec
find_anchor(anchors, name, level + 1)
}
fn is_colocated_asset_link(link: &str) -> bool {
!link.contains("/") // http://, ftp://, ../ etc
&& !link.starts_with("mailto:")
}
pub fn markdown_to_html(content: &str, context: &RenderContext) -> Result<(String, Vec<Header>)> {
// the rendered html
let mut html = String::with_capacity(content.len());
// Set while parsing
let mut error = None;
let mut highlighter: Option<HighlightLines> = None;
// If we get text in header, we need to insert the id and a anchor
let mut in_header = false;
// pulldown_cmark can send several text events for a title if there are markdown
// specific characters like `!` in them. We only want to insert the anchor the first time
let mut header_created = false;
let mut anchors: Vec<String> = vec![];
let mut headers = vec![];
// Defaults to a 0 level so not a real header
// It should be an Option ideally but not worth the hassle to update
let mut temp_header = TempHeader::default();
let mut clear_shortcode_block = false;
let mut opts = Options::empty();
opts.insert(OPTION_ENABLE_TABLES);
opts.insert(OPTION_ENABLE_FOOTNOTES);
{
let parser = Parser::new_ext(content, opts).map(|event| {
if clear_shortcode_block {
clear_shortcode_block = false;
shortcode_block = None;
}
match event {
Event::Text(mut text) => {
Event::Text(text) => {
// Header first
if in_header {
if header_created {
@ -95,10 +73,10 @@ pub fn markdown_to_html(content: &str, context: &Context) -> Result<(String, Vec
let id = find_anchor(&anchors, slugify(&text), 0);
anchors.push(id.clone());
// update the header and add it to the list
temp_header.id = id.clone();
temp_header.permalink = format!("{}#{}", context.current_page_permalink, id);
temp_header.id = id;
// += as we might have some <code> or other things already there
temp_header.title += &text;
temp_header.permalink = format!("{}#{}", context.current_page_permalink, id);
header_created = true;
return Event::Html(Owned(String::new()));
}
@ -110,190 +88,132 @@ pub fn markdown_to_html(content: &str, context: &Context) -> Result<(String, Vec
return Event::Html(Owned(html));
}
if in_code_block {
return Event::Text(text);
}
// Are we in the middle of a shortcode that somehow got cut off
// by the markdown parser?
if current_shortcode.is_empty() {
if text.starts_with("{{") && !text.ends_with("}}") {
current_shortcode += &text;
} else if text.starts_with("{%") && !text.ends_with("%}") {
current_shortcode += &text;
}
} else {
current_shortcode += &text;
}
if current_shortcode.ends_with("}}") || current_shortcode.ends_with("%}") {
text = Owned(current_shortcode.clone());
current_shortcode = String::new();
}
// Shortcode without body
if shortcode_block.is_none() && text.starts_with("{{") && text.ends_with("}}") && SHORTCODE_RE.is_match(&text) {
let (name, args) = parse_shortcode(&text);
added_shortcode = true;
match render_simple_shortcode(context.tera, &name, &args) {
// Make before and after cleaning up of extra <p> / </p> tags more parallel.
// Or, in other words:
// TERRIBLE HORRIBLE NO GOOD VERY BAD HACK
Ok(s) => return Event::Html(Owned(format!("</p>{}<p>", s))),
Err(e) => {
error = Some(e);
return Event::Html(Owned(String::new()));
}
}
}
// Shortcode with a body
if shortcode_block.is_none() && text.starts_with("{%") && text.ends_with("%}") {
if SHORTCODE_RE.is_match(&text) {
let (name, args) = parse_shortcode(&text);
shortcode_block = Some(ShortCode::new(&name, args));
}
// Don't return anything
return Event::Text(Owned(String::new()));
}
// If we have some text while in a shortcode, it's either the body
// or the end tag
if shortcode_block.is_some() {
if let Some(ref mut shortcode) = shortcode_block {
if text.trim() == "{% end %}" {
added_shortcode = true;
clear_shortcode_block = true;
match shortcode.render(context.tera) {
Ok(s) => return Event::Html(Owned(format!("</p>{}", s))),
Err(e) => {
error = Some(e);
return Event::Html(Owned(String::new()));
}
}
} else {
shortcode.append(&text);
return Event::Html(Owned(String::new()));
}
}
}
// Business as usual
Event::Text(text)
},
Event::Start(Tag::CodeBlock(ref info)) => {
in_code_block = true;
if !should_highlight {
return Event::Html(Owned("<pre><code>".to_owned()));
}
let theme = &THEME_SET.themes[&context.highlight_theme];
highlighter = SYNTAX_SET.with(|ss| {
let syntax = info
.split(' ')
.next()
.and_then(|lang| ss.find_syntax_by_token(lang))
.unwrap_or_else(|| ss.find_syntax_plain_text());
Some(HighlightLines::new(syntax, theme))
});
Event::Start(Tag::CodeBlock(ref info)) => {
if !context.config.highlight_code {
return Event::Html(Owned("<pre><code>".to_string()));
}
let theme = &THEME_SET.themes[&context.config.highlight_theme];
highlighter = Some(get_highlighter(&theme, info));
let snippet = start_coloured_html_snippet(theme);
Event::Html(Owned(snippet))
},
}
Event::End(Tag::CodeBlock(_)) => {
in_code_block = false;
if !should_highlight{
return Event::Html(Owned("</code></pre>\n".to_owned()))
if !context.config.highlight_code {
return Event::Html(Owned("</code></pre>\n".to_string()));
}
// reset highlight and close the code block
highlighter = None;
Event::Html(Owned("</pre>".to_owned()))
},
// Need to handle relative links
Event::Start(Tag::Link(ref link, ref title)) => {
if in_header {
return Event::Html(Owned("".to_owned()));
Event::Html(Owned("</pre>".to_string()))
}
if link.starts_with("./") {
match resolve_internal_link(link, context.permalinks) {
Ok(url) => {
return Event::Start(Tag::Link(Owned(url), title.clone()));
},
Err(_) => {
error = Some(format!("Relative link {} not found.", link).into());
return Event::Html(Owned("".to_string()));
}
};
Event::Start(Tag::Image(src, title)) => {
if is_colocated_asset_link(&src) {
return Event::Start(
Tag::Image(
Owned(format!("{}{}", context.current_page_permalink, src)),
title,
)
);
}
Event::Start(Tag::Link(link.clone(), title.clone()))
},
Event::Start(Tag::Image(src, title))
}
Event::Start(Tag::Link(link, title)) => {
// A few situations here:
// - it could be a relative link (starting with `./`)
// - it could be a link to a co-located asset
// - it could be a normal link
// - any of those can be in a header or not: if it's in a header
// we need to append to a string
let fixed_link = if link.starts_with("./") {
match resolve_internal_link(&link, context.permalinks) {
Ok(url) => url,
Err(_) => {
error = Some(format!("Relative link {} not found.", link).into());
return Event::Html(Owned(String::new()));
}
}
} else if is_colocated_asset_link(&link) {
format!("{}{}", context.current_page_permalink, link)
} else {
if context.config.check_external_links && !link.starts_with('#') {
let res = check_url(&link);
if res.is_valid() {
link.to_string()
} else {
error = Some(
format!("Link {} is not valid: {}", link, res.message()).into()
);
String::new()
}
} else {
link.to_string()
}
};
if in_header {
let html = if title.is_empty() {
format!("<a href=\"{}\">", fixed_link)
} else {
format!("<a href=\"{}\" title=\"{}\">", fixed_link, title)
};
temp_header.push(&html);
return Event::Html(Owned(String::new()));
}
Event::Start(Tag::Link(Owned(fixed_link), title))
}
Event::End(Tag::Link(_, _)) => {
if in_header {
return Event::Html(Owned("".to_owned()));
temp_header.push("</a>");
return Event::Html(Owned(String::new()));
}
event
}
// need to know when we are in a code block to disable shortcodes in them
Event::Start(Tag::Code) => {
in_code_block = true;
if in_header {
temp_header.push("<code>");
return Event::Html(Owned(String::new()));
}
event
},
}
Event::End(Tag::Code) => {
in_code_block = false;
if in_header {
temp_header.push("</code>");
return Event::Html(Owned(String::new()));
}
event
},
}
Event::Start(Tag::Header(num)) => {
in_header = true;
temp_header = TempHeader::new(num);
Event::Html(Owned(String::new()))
},
}
Event::End(Tag::Header(_)) => {
// End of a header, reset all the things and return the stringified version of the header
// End of a header, reset all the things and return the stringified
// version of the header
in_header = false;
header_created = false;
let val = temp_header.to_string(context);
let val = temp_header.to_string(context.tera, context.insert_anchor);
headers.push(temp_header.clone());
temp_header = TempHeader::default();
Event::Html(Owned(val))
},
// If we added shortcodes, don't close a paragraph since there's none
Event::End(Tag::Paragraph) => {
if added_shortcode {
added_shortcode = false;
return Event::Html(Owned("".to_owned()));
}
event
},
// Ignore softbreaks inside shortcodes
Event::SoftBreak => {
if shortcode_block.is_some() {
return Event::Html(Owned("".to_owned()));
_ => event,
}
event
},
_ => {
// println!("event = {:?}", event);
event
},
}});
});
cmark::html::push_html(&mut html, parser);
}
if !current_shortcode.is_empty() {
return Err(format!("A shortcode was not closed properly:\n{:?}", current_shortcode).into());
}
match error {
Some(e) => Err(e),
None => Ok((html.replace("<p></p>", "").replace("</p></p>", "</p>"), make_table_of_contents(&headers))),
None => Ok((
html.replace("<p></p>", "").replace("</p></p>", "</p>"),
make_table_of_contents(&headers)
)),
}
}

View file

@ -1,190 +0,0 @@
use std::collections::HashMap;
use regex::Regex;
use tera::{Tera, Context, Value, to_value};
use errors::{Result, ResultExt};
lazy_static!{
// Does this look like a shortcode?
pub static ref SHORTCODE_RE: Regex = Regex::new(
r#"\{(?:%|\{)\s+(\w+?)\((\w+?="?(?:.|\n)+?"?)?\)\s+(?:%|\})\}"#
).unwrap();
// Parse the shortcode args with capture groups named after their type
pub static ref SHORTCODE_ARGS_RE: Regex = Regex::new(
r#"(?P<name>\w+)=\s*((?P<str>".*?")|(?P<float>[-+]?[0-9]+\.[0-9]+)|(?P<int>[-+]?[0-9]+)|(?P<bool>true|false))"#
).unwrap();
}
/// A shortcode that has a body
/// Called by having some content like {% ... %} body {% end %}
/// We need the struct to hold the data while we're processing the markdown
#[derive(Debug)]
pub struct ShortCode {
name: String,
args: HashMap<String, Value>,
body: String,
}
impl ShortCode {
pub fn new(name: &str, args: HashMap<String, Value>) -> ShortCode {
ShortCode {
name: name.to_string(),
args,
body: String::new(),
}
}
pub fn append(&mut self, text: &str) {
self.body.push_str(text)
}
pub fn render(&self, tera: &Tera) -> Result<String> {
let mut context = Context::new();
for (key, value) in &self.args {
context.add(key, value);
}
context.add("body", &self.body);
let tpl_name = format!("shortcodes/{}.html", self.name);
tera.render(&tpl_name, &context)
.chain_err(|| format!("Failed to render {} shortcode", self.name))
}
}
/// Parse a shortcode without a body
pub fn parse_shortcode(input: &str) -> (String, HashMap<String, Value>) {
let mut args = HashMap::new();
let caps = SHORTCODE_RE.captures(input).unwrap();
// caps[0] is the full match
let name = &caps[1];
if let Some(arg_list) = caps.get(2) {
for arg_cap in SHORTCODE_ARGS_RE.captures_iter(arg_list.as_str()) {
let arg_name = arg_cap["name"].trim().to_string();
if let Some(arg_val) = arg_cap.name("str") {
args.insert(arg_name, to_value(arg_val.as_str().replace("\"", "")).unwrap());
continue;
}
if let Some(arg_val) = arg_cap.name("int") {
args.insert(arg_name, to_value(arg_val.as_str().parse::<i64>().unwrap()).unwrap());
continue;
}
if let Some(arg_val) = arg_cap.name("float") {
args.insert(arg_name, to_value(arg_val.as_str().parse::<f64>().unwrap()).unwrap());
continue;
}
if let Some(arg_val) = arg_cap.name("bool") {
args.insert(arg_name, to_value(arg_val.as_str() == "true").unwrap());
continue;
}
}
}
(name.to_string(), args)
}
/// Renders a shortcode or return an error
pub fn render_simple_shortcode(tera: &Tera, name: &str, args: &HashMap<String, Value>) -> Result<String> {
let mut context = Context::new();
for (key, value) in args.iter() {
context.add(key, value);
}
let tpl_name = format!("shortcodes/{}.html", name);
tera.render(&tpl_name, &context).chain_err(|| format!("Failed to render {} shortcode", name))
}
#[cfg(test)]
mod tests {
use super::{parse_shortcode, SHORTCODE_RE};
#[test]
fn can_match_all_kinds_of_shortcode() {
let inputs = vec![
"{{ basic() }}",
"{{ basic(ho=1) }}",
"{{ basic(ho=\"hey\") }}",
"{{ basic(ho=\"hey_underscore\") }}",
"{{ basic(ho=\"hey-dash\") }}",
"{% basic(ho=\"hey-dash\") %}",
"{% basic(ho=\"hey_underscore\") %}",
"{% basic() %}",
"{% quo_te(author=\"Bob\") %}",
"{{ quo_te(author=\"Bob\") }}",
// https://github.com/Keats/gutenberg/issues/229
r#"{{ youtube(id="dQw4w9WgXcQ",
autoplay=true) }}"#,
];
for i in inputs {
println!("{}", i);
assert!(SHORTCODE_RE.is_match(i));
}
}
// https://github.com/Keats/gutenberg/issues/228
#[test]
fn doesnt_panic_on_invalid_shortcode() {
let (name, args) = parse_shortcode(r#"{{ youtube(id="dQw4w9WgXcQ", autoplay) }}"#);
assert_eq!(name, "youtube");
assert_eq!(args["id"], "dQw4w9WgXcQ");
assert!(args.get("autoplay").is_none());
}
#[test]
fn can_parse_simple_shortcode_no_arg() {
let (name, args) = parse_shortcode(r#"{{ basic() }}"#);
assert_eq!(name, "basic");
assert!(args.is_empty());
}
#[test]
fn can_parse_simple_shortcode_one_arg() {
let (name, args) = parse_shortcode(r#"{{ youtube(id="w7Ft2ymGmfc") }}"#);
assert_eq!(name, "youtube");
assert_eq!(args["id"], "w7Ft2ymGmfc");
}
#[test]
fn can_parse_simple_shortcode_several_arg() {
let (name, args) = parse_shortcode(r#"{{ youtube(id="w7Ft2ymGmfc", autoplay=true) }}"#);
assert_eq!(name, "youtube");
assert_eq!(args["id"], "w7Ft2ymGmfc");
assert_eq!(args["autoplay"], true);
}
#[test]
fn can_parse_block_shortcode_several_arg() {
let (name, args) = parse_shortcode(r#"{% youtube(id="w7Ft2ymGmfc", autoplay=true) %}"#);
assert_eq!(name, "youtube");
assert_eq!(args["id"], "w7Ft2ymGmfc");
assert_eq!(args["autoplay"], true);
}
#[test]
fn can_parse_shortcode_number() {
let (name, args) = parse_shortcode(r#"{% test(int=42, float=42.0, autoplay=false) %}"#);
assert_eq!(name, "test");
assert_eq!(args["int"], 42);
assert_eq!(args["float"], 42.0);
assert_eq!(args["autoplay"], false);
}
// https://github.com/Keats/gutenberg/issues/249
#[test]
fn can_parse_shortcode_with_comma_in_it() {
let (name, args) = parse_shortcode(
r#"{% quote(author="C++ Standard Core Language Defect Reports and Accepted Issues, Revision 82, delete and user-written deallocation function", href="http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#348") %}"#
);
assert_eq!(name, "quote");
assert_eq!(args["author"], "C++ Standard Core Language Defect Reports and Accepted Issues, Revision 82, delete and user-written deallocation function");
assert_eq!(args["href"], "http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#348");
}
}

View file

@ -0,0 +1,362 @@
use pest::Parser;
use pest::iterators::Pair;
use tera::{Map, Context, Value, to_value};
use errors::{Result, ResultExt};
use ::context::RenderContext;
// This include forces recompiling this source file if the grammar file changes.
// Uncomment it when doing changes to the .pest file
const _GRAMMAR: &str = include_str!("content.pest");
#[derive(Parser)]
#[grammar = "content.pest"]
pub struct ContentParser;
fn replace_string_markers(input: &str) -> String {
match input.chars().next().unwrap() {
'"' => input.replace('"', "").to_string(),
'\'' => input.replace('\'', "").to_string(),
'`' => input.replace('`', "").to_string(),
_ => unreachable!("How did you even get there"),
}
}
fn parse_literal(pair: Pair<Rule>) -> Value {
let mut val = None;
for p in pair.into_inner() {
match p.as_rule() {
Rule::boolean => match p.as_str() {
"true" => val = Some(Value::Bool(true)),
"false" => val = Some(Value::Bool(false)),
_ => unreachable!(),
},
Rule::string => val = Some(Value::String(replace_string_markers(p.as_str()))),
Rule::float => {
val = Some(to_value(p.as_str().parse::<f64>().unwrap()).unwrap());
}
Rule::int => {
val = Some(to_value(p.as_str().parse::<i64>().unwrap()).unwrap());
}
_ => unreachable!("Unknown literal: {:?}", p)
};
}
val.unwrap()
}
/// Returns (shortcode_name, kwargs)
fn parse_shortcode_call(pair: Pair<Rule>) -> (String, Map<String, Value>) {
let mut name = None;
let mut args = Map::new();
for p in pair.into_inner() {
match p.as_rule() {
Rule::ident => { name = Some(p.into_span().as_str().to_string()); }
Rule::kwarg => {
let mut arg_name = None;
let mut arg_val = None;
for p2 in p.into_inner() {
match p2.as_rule() {
Rule::ident => { arg_name = Some(p2.into_span().as_str().to_string()); }
Rule::literal => { arg_val = Some(parse_literal(p2)); }
Rule::array => {
let mut vals = vec![];
for p3 in p2.into_inner() {
match p3.as_rule() {
Rule::literal => vals.push(parse_literal(p3)),
_ => unreachable!("Got something other than literal in an array: {:?}", p3),
}
}
arg_val = Some(Value::Array(vals));
}
_ => unreachable!("Got something unexpected in a kwarg: {:?}", p2),
}
}
args.insert(arg_name.unwrap(), arg_val.unwrap());
}
_ => unreachable!("Got something unexpected in a shortcode: {:?}", p)
}
}
(name.unwrap(), args)
}
fn render_shortcode(name: String, args: Map<String, Value>, context: &RenderContext, body: Option<&str>) -> Result<String> {
let mut tera_context = Context::new();
for (key, value) in args.iter() {
tera_context.insert(key, value);
}
if let Some(ref b) = body {
// Trimming right to avoid most shortcodes with bodies ending up with a HTML new line
tera_context.insert("body", b.trim_right());
}
tera_context.extend(context.tera_context.clone());
let tpl_name = format!("shortcodes/{}.html", name);
let res = context.tera
.render(&tpl_name, &tera_context)
.chain_err(|| format!("Failed to render {} shortcode", name))?;
// We trim left every single line of a shortcode to avoid the accidental
// shortcode counted as code block because of 4 spaces left padding
Ok(res.lines().map(|s| s.trim_left()).collect())
}
pub fn render_shortcodes(content: &str, context: &RenderContext) -> Result<String> {
let mut res = String::with_capacity(content.len());
let mut pairs = match ContentParser::parse(Rule::page, content) {
Ok(p) => p,
Err(e) => {
let fancy_e = e.renamed_rules(|rule| {
match *rule {
Rule::int => "an integer".to_string(),
Rule::float => "a float".to_string(),
Rule::string => "a string".to_string(),
Rule::literal => "a literal (int, float, string, bool)".to_string(),
Rule::array => "an array".to_string(),
Rule::kwarg => "a keyword argument".to_string(),
Rule::ident => "an identifier".to_string(),
Rule::inline_shortcode => "an inline shortcode".to_string(),
Rule::ignored_inline_shortcode => "an ignored inline shortcode".to_string(),
Rule::sc_body_start => "the start of a shortcode".to_string(),
Rule::ignored_sc_body_start => "the start of an ignored shortcode".to_string(),
Rule::text => "some text".to_string(),
_ => format!("TODO error: {:?}", rule).to_string(),
}
});
bail!("{}", fancy_e);
}
};
// We have at least a `page` pair
for p in pairs.next().unwrap().into_inner() {
match p.as_rule() {
Rule::text | Rule::text_in_ignored_body_sc | Rule::text_in_body_sc => res.push_str(p.into_span().as_str()),
Rule::inline_shortcode => {
let (name, args) = parse_shortcode_call(p);
res.push_str(&render_shortcode(name, args, context, None)?);
}
Rule::shortcode_with_body => {
let mut inner = p.into_inner();
// 3 items in inner: call, body, end
// we don't care about the closing tag
let (name, args) = parse_shortcode_call(inner.next().unwrap());
let body = inner.next().unwrap().into_span().as_str();
res.push_str(&render_shortcode(name, args, context, Some(body))?);
}
Rule::ignored_inline_shortcode => {
res.push_str(
&p.into_span().as_str()
.replacen("{{/*", "{{", 1)
.replacen("*/}}", "}}", 1)
);
}
Rule::ignored_shortcode_with_body => {
for p2 in p.into_inner() {
match p2.as_rule() {
Rule::ignored_sc_body_start | Rule::ignored_sc_body_end => {
res.push_str(
&p2.into_span().as_str()
.replacen("{%/*", "{%", 1)
.replacen("*/%}", "%}", 1)
);
}
Rule::text_in_ignored_body_sc => res.push_str(p2.into_span().as_str()),
_ => unreachable!("Got something weird in an ignored shortcode: {:?}", p2),
}
}
}
_ => unreachable!("unexpected page rule: {:?}", p.as_rule()),
}
}
Ok(res)
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use tera::Tera;
use config::Config;
use front_matter::InsertAnchor;
use super::*;
macro_rules! assert_lex_rule {
($rule: expr, $input: expr) => {
let res = ContentParser::parse($rule, $input);
println!("{:?}", $input);
println!("{:#?}", res);
if res.is_err() {
println!("{}", res.unwrap_err());
panic!();
}
assert!(res.is_ok());
assert_eq!(res.unwrap().last().unwrap().into_span().end(), $input.len());
};
}
fn render_shortcodes(code: &str, tera: &Tera) -> String {
let config = Config::default();
let permalinks = HashMap::new();
let context = RenderContext::new(&tera, &config, "", &permalinks, InsertAnchor::None);
super::render_shortcodes(code, &context).unwrap()
}
#[test]
fn lex_text() {
let inputs = vec!["Hello world", "HEllo \n world", "Hello 1 2 true false 'hey'"];
for i in inputs {
assert_lex_rule!(Rule::text, i);
}
}
#[test]
fn lex_inline_shortcode() {
let inputs = vec![
"{{ youtube() }}",
"{{ youtube(id=1, autoplay=true, url='hey') }}",
"{{ youtube(id=1, \nautoplay=true, url='hey') }}",
];
for i in inputs {
assert_lex_rule!(Rule::inline_shortcode, i);
}
}
#[test]
fn lex_inline_ignored_shortcode() {
let inputs = vec![
"{{/* youtube() */}}",
"{{/* youtube(id=1, autoplay=true, url='hey') */}}",
"{{/* youtube(id=1, \nautoplay=true, \nurl='hey') */}}",
];
for i in inputs {
assert_lex_rule!(Rule::ignored_inline_shortcode, i);
}
}
#[test]
fn lex_shortcode_with_body() {
let inputs = vec![
r#"{% youtube() %}
Some text
{% end %}"#,
r#"{% youtube(id=1,
autoplay=true, url='hey') %}
Some text
{% end %}"#,
];
for i in inputs {
assert_lex_rule!(Rule::shortcode_with_body, i);
}
}
#[test]
fn lex_ignored_shortcode_with_body() {
let inputs = vec![
r#"{%/* youtube() */%}
Some text
{%/* end */%}"#,
r#"{%/* youtube(id=1,
autoplay=true, url='hey') */%}
Some text
{%/* end */%}"#,
];
for i in inputs {
assert_lex_rule!(Rule::ignored_shortcode_with_body, i);
}
}
#[test]
fn lex_page() {
let inputs = vec![
"Some text and a shortcode `{{/* youtube() */}}`",
"{{ youtube(id=1, autoplay=true, url='hey') }}",
"{{ youtube(id=1, \nautoplay=true, url='hey') }} that's it",
r#"
This is a test
{% hello() %}
Body {{ var }}
{% end %}
"#
];
for i in inputs {
assert_lex_rule!(Rule::page, i);
}
}
#[test]
fn does_nothing_with_no_shortcodes() {
let res = render_shortcodes("Hello World", &Tera::default());
assert_eq!(res, "Hello World");
}
#[test]
fn can_unignore_inline_shortcode() {
let res = render_shortcodes("Hello World {{/* youtube() */}}", &Tera::default());
assert_eq!(res, "Hello World {{ youtube() }}");
}
#[test]
fn can_unignore_shortcode_with_body() {
let res = render_shortcodes(r#"
Hello World
{%/* youtube() */%}Some body {{ hello() }}{%/* end */%}"#, &Tera::default());
assert_eq!(res, "\nHello World\n{% youtube() %}Some body {{ hello() }}{% end %}");
}
#[test]
fn can_parse_shortcode_arguments() {
let inputs = vec![
("{{ youtube() }}", "youtube", Map::new()),
(
"{{ youtube(id=1, autoplay=true, hello='salut', float=1.2) }}",
"youtube",
{
let mut m = Map::new();
m.insert("id".to_string(), to_value(1).unwrap());
m.insert("autoplay".to_string(), to_value(true).unwrap());
m.insert("hello".to_string(), to_value("salut").unwrap());
m.insert("float".to_string(), to_value(1.2).unwrap());
m
}
),
(
"{{ gallery(photos=['something', 'else'], fullscreen=true) }}",
"gallery",
{
let mut m = Map::new();
m.insert("photos".to_string(), to_value(["something", "else"]).unwrap());
m.insert("fullscreen".to_string(), to_value(true).unwrap());
m
}
),
];
for (i, n, a) in inputs {
let mut res = ContentParser::parse(Rule::inline_shortcode, i).unwrap();
let (name, args) = parse_shortcode_call(res.next().unwrap());
assert_eq!(name, n);
assert_eq!(args, a);
}
}
#[test]
fn can_render_inline_shortcodes() {
let mut tera = Tera::default();
tera.add_raw_template("shortcodes/youtube.html", "Hello {{id}}").unwrap();
let res = render_shortcodes("Inline {{ youtube(id=1) }}.", &tera);
assert_eq!(res, "Inline Hello 1.");
}
#[test]
fn can_render_shortcodes_with_body() {
let mut tera = Tera::default();
tera.add_raw_template("shortcodes/youtube.html", "{{body}}").unwrap();
let res = render_shortcodes("Body\n {% youtube() %}Hey!{% end %}", &tera);
assert_eq!(res, "Body\n Hey!");
}
}

View file

@ -1,8 +1,6 @@
use tera::{Context as TeraContext};
use tera::{Tera, Context as TeraContext};
use front_matter::InsertAnchor;
use context::Context;
#[derive(Debug, PartialEq, Clone, Serialize)]
pub struct Header {
@ -50,16 +48,16 @@ impl TempHeader {
}
/// Transform all the information we have about this header into the HTML string for it
pub fn to_string(&self, context: &Context) -> String {
let anchor_link = if context.should_insert_anchor() {
pub fn to_string(&self, tera: &Tera, insert_anchor: InsertAnchor) -> String {
let anchor_link = if insert_anchor != InsertAnchor::None {
let mut c = TeraContext::new();
c.add("id", &self.id);
context.tera.render("anchor-link.html", &c).unwrap()
tera.render("anchor-link.html", &c).unwrap()
} else {
String::new()
};
match context.insert_anchor {
match insert_anchor {
InsertAnchor::None => format!("<h{lvl} id=\"{id}\">{t}</h{lvl}>\n", lvl = self.level, t = self.title, id = self.id),
InsertAnchor::Left => format!("<h{lvl} id=\"{id}\">{a}{t}</h{lvl}>\n", lvl = self.level, a = anchor_link, t = self.title, id = self.id),
InsertAnchor::Right => format!("<h{lvl} id=\"{id}\">{t}{a}</h{lvl}>\n", lvl = self.level, a = anchor_link, t = self.title, id = self.id),

View file

@ -2,22 +2,25 @@ extern crate tera;
extern crate front_matter;
extern crate templates;
extern crate rendering;
extern crate config;
use std::collections::HashMap;
use tera::Tera;
use config::Config;
use front_matter::InsertAnchor;
use templates::GUTENBERG_TERA;
use rendering::{Context, markdown_to_html};
use rendering::{RenderContext, render_content};
#[test]
fn can_do_markdown_to_html_simple() {
fn can_do_render_content_simple() {
let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new();
let context = Context::new(&tera_ctx, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::None);
let res = markdown_to_html("hello", &context).unwrap();
let config = Config::default();
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content("hello", &context).unwrap();
assert_eq!(res.0, "<p>hello</p>\n");
}
@ -25,9 +28,10 @@ fn can_do_markdown_to_html_simple() {
fn doesnt_highlight_code_block_with_highlighting_off() {
let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new();
let mut context = Context::new(&tera_ctx, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::None);
context.highlight_code = false;
let res = markdown_to_html("```\n$ gutenberg server\n```", &context).unwrap();
let mut config = Config::default();
config.highlight_code = false;
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content("```\n$ gutenberg server\n```", &context).unwrap();
assert_eq!(
res.0,
"<pre><code>$ gutenberg server\n</code></pre>\n"
@ -38,8 +42,9 @@ fn doesnt_highlight_code_block_with_highlighting_off() {
fn can_highlight_code_block_no_lang() {
let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new();
let context = Context::new(&tera_ctx, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::None);
let res = markdown_to_html("```\n$ gutenberg server\n$ ping\n```", &context).unwrap();
let config = Config::default();
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content("```\n$ gutenberg server\n$ ping\n```", &context).unwrap();
assert_eq!(
res.0,
"<pre style=\"background-color:#2b303b\">\n<span style=\"background-color:#2b303b;color:#c0c5ce;\">$ gutenberg server\n</span><span style=\"background-color:#2b303b;color:#c0c5ce;\">$ ping\n</span></pre>"
@ -50,8 +55,9 @@ fn can_highlight_code_block_no_lang() {
fn can_highlight_code_block_with_lang() {
let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new();
let context = Context::new(&tera_ctx, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::None);
let res = markdown_to_html("```python\nlist.append(1)\n```", &context).unwrap();
let config = Config::default();
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content("```python\nlist.append(1)\n```", &context).unwrap();
assert_eq!(
res.0,
"<pre style=\"background-color:#2b303b\">\n<span style=\"background-color:#2b303b;color:#c0c5ce;\">list.</span><span style=\"background-color:#2b303b;color:#bf616a;\">append</span><span style=\"background-color:#2b303b;color:#c0c5ce;\">(</span><span style=\"background-color:#2b303b;color:#d08770;\">1</span><span style=\"background-color:#2b303b;color:#c0c5ce;\">)\n</span></pre>"
@ -62,8 +68,9 @@ fn can_highlight_code_block_with_lang() {
fn can_higlight_code_block_with_unknown_lang() {
let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new();
let context = Context::new(&tera_ctx, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::None);
let res = markdown_to_html("```yolo\nlist.append(1)\n```", &context).unwrap();
let config = Config::default();
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content("```yolo\nlist.append(1)\n```", &context).unwrap();
// defaults to plain text
assert_eq!(
res.0,
@ -74,8 +81,9 @@ fn can_higlight_code_block_with_unknown_lang() {
#[test]
fn can_render_shortcode() {
let permalinks_ctx = HashMap::new();
let context = Context::new(&GUTENBERG_TERA, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::None);
let res = markdown_to_html(r#"
let config = Config::default();
let context = RenderContext::new(&GUTENBERG_TERA, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content(r#"
Hello
{{ youtube(id="ub36ffWAqgQ") }}
@ -87,7 +95,8 @@ Hello
#[test]
fn can_render_shortcode_with_markdown_char_in_args_name() {
let permalinks_ctx = HashMap::new();
let context = Context::new(&GUTENBERG_TERA, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::None);
let config = Config::default();
let context = RenderContext::new(&GUTENBERG_TERA, &config, "", &permalinks_ctx, InsertAnchor::None);
let input = vec![
"name",
"na_me",
@ -95,7 +104,7 @@ fn can_render_shortcode_with_markdown_char_in_args_name() {
"n1",
];
for i in input {
let res = markdown_to_html(&format!("{{{{ youtube(id=\"hey\", {}=1) }}}}", i), &context).unwrap();
let res = render_content(&format!("{{{{ youtube(id=\"hey\", {}=1) }}}}", i), &context).unwrap();
assert!(res.0.contains(r#"<iframe src="https://www.youtube.com/embed/hey""#));
}
}
@ -103,7 +112,8 @@ fn can_render_shortcode_with_markdown_char_in_args_name() {
#[test]
fn can_render_shortcode_with_markdown_char_in_args_value() {
let permalinks_ctx = HashMap::new();
let context = Context::new(&GUTENBERG_TERA, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::None);
let config = Config::default();
let context = RenderContext::new(&GUTENBERG_TERA, &config, "", &permalinks_ctx, InsertAnchor::None);
let input = vec![
"ub36ffWAqgQ-hey",
"ub36ffWAqgQ_hey",
@ -112,7 +122,7 @@ fn can_render_shortcode_with_markdown_char_in_args_value() {
"ub36ffWAqgQ#hey",
];
for i in input {
let res = markdown_to_html(&format!("{{{{ youtube(id=\"{}\") }}}}", i), &context).unwrap();
let res = render_content(&format!("{{{{ youtube(id=\"{}\") }}}}", i), &context).unwrap();
assert!(res.0.contains(&format!(r#"<iframe src="https://www.youtube.com/embed/{}""#, i)));
}
}
@ -126,12 +136,13 @@ fn can_render_body_shortcode_with_markdown_char_in_name() {
"quo_te",
"qu_o_te",
];
let config = Config::default();
for i in input {
tera.add_raw_template(&format!("shortcodes/{}.html", i), "<blockquote>{{ body }} - {{ author}}</blockquote>").unwrap();
let context = Context::new(&tera, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::None);
let context = RenderContext::new(&tera, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = markdown_to_html(&format!("{{% {}(author=\"Bob\") %}}\nhey\n{{% end %}}", i), &context).unwrap();
let res = render_content(&format!("{{% {}(author=\"Bob\") %}}\nhey\n{{% end %}}", i), &context).unwrap();
println!("{:?}", res);
assert!(res.0.contains("<blockquote>hey - Bob</blockquote>"));
}
@ -157,9 +168,10 @@ Here is another paragraph.
";
tera.add_raw_template(&format!("shortcodes/{}.html", "figure"), shortcode).unwrap();
let context = Context::new(&tera, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::None);
let config = Config::default();
let context = RenderContext::new(&tera, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = markdown_to_html(markdown_string, &context).unwrap();
let res = render_content(markdown_string, &context).unwrap();
println!("{:?}", res);
assert_eq!(res.0, expected);
}
@ -189,9 +201,10 @@ Here is another paragraph.
";
tera.add_raw_template(&format!("shortcodes/{}.html", "figure"), shortcode).unwrap();
let context = Context::new(&tera, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::None);
let config = Config::default();
let context = RenderContext::new(&tera, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = markdown_to_html(markdown_string, &context).unwrap();
let res = render_content(markdown_string, &context).unwrap();
println!("{:?}", res);
assert_eq!(res.0, expected);
}
@ -199,8 +212,9 @@ Here is another paragraph.
#[test]
fn can_render_several_shortcode_in_row() {
let permalinks_ctx = HashMap::new();
let context = Context::new(&GUTENBERG_TERA, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::None);
let res = markdown_to_html(r#"
let config = Config::default();
let context = RenderContext::new(&GUTENBERG_TERA, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content(r#"
Hello
{{ youtube(id="ub36ffWAqgQ") }}
@ -222,18 +236,12 @@ Hello
}
#[test]
fn errors_if_unterminated_shortcode() {
fn doesnt_render_ignored_shortcodes() {
let permalinks_ctx = HashMap::new();
let context = Context::new(&GUTENBERG_TERA, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::None);
let res = markdown_to_html(r#"{{ youtube(id="w7Ft2ym_a"#, &context);
assert!(res.is_err());
}
#[test]
fn doesnt_render_shortcode_in_code_block() {
let permalinks_ctx = HashMap::new();
let context = Context::new(&GUTENBERG_TERA, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::None);
let res = markdown_to_html(r#"```{{ youtube(id="w7Ft2ymGmfc") }}```"#, &context).unwrap();
let mut config = Config::default();
config.highlight_code = false;
let context = RenderContext::new(&GUTENBERG_TERA, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content(r#"```{{/* youtube(id="w7Ft2ymGmfc") */}}```"#, &context).unwrap();
assert_eq!(res.0, "<p><code>{{ youtube(id=&quot;w7Ft2ymGmfc&quot;) }}</code></p>\n");
}
@ -243,23 +251,25 @@ fn can_render_shortcode_with_body() {
tera.extend(&GUTENBERG_TERA).unwrap();
tera.add_raw_template("shortcodes/quote.html", "<blockquote>{{ body }} - {{ author }}</blockquote>").unwrap();
let permalinks_ctx = HashMap::new();
let context = Context::new(&tera, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::None);
let config = Config::default();
let context = RenderContext::new(&tera, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = markdown_to_html(r#"
let res = render_content(r#"
Hello
{% quote(author="Keats") %}
A quote
{% end %}
"#, &context).unwrap();
assert_eq!(res.0, "<p>Hello\n</p><blockquote>A quote - Keats</blockquote>");
assert_eq!(res.0, "<p>Hello</p>\n<blockquote>A quote - Keats</blockquote>\n");
}
#[test]
fn errors_rendering_unknown_shortcode() {
let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new();
let context = Context::new(&tera_ctx, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::None);
let res = markdown_to_html("{{ hello(flash=true) }}", &context);
let config = Config::default();
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content("{{ hello(flash=true) }}", &context);
assert!(res.is_err());
}
@ -268,8 +278,9 @@ fn can_make_valid_relative_link() {
let mut permalinks = HashMap::new();
permalinks.insert("pages/about.md".to_string(), "https://vincent.is/about".to_string());
let tera_ctx = Tera::default();
let context = Context::new(&tera_ctx, true, "base16-ocean-dark".to_string(), "", &permalinks, InsertAnchor::None);
let res = markdown_to_html(
let config = Config::default();
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks, InsertAnchor::None);
let res = render_content(
r#"[rel link](./pages/about.md), [abs link](https://vincent.is/about)"#,
&context
).unwrap();
@ -284,8 +295,9 @@ fn can_make_relative_links_with_anchors() {
let mut permalinks = HashMap::new();
permalinks.insert("pages/about.md".to_string(), "https://vincent.is/about".to_string());
let tera_ctx = Tera::default();
let context = Context::new(&tera_ctx, true, "base16-ocean-dark".to_string(), "", &permalinks, InsertAnchor::None);
let res = markdown_to_html(r#"[rel link](./pages/about.md#cv)"#, &context).unwrap();
let config = Config::default();
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks, InsertAnchor::None);
let res = render_content(r#"[rel link](./pages/about.md#cv)"#, &context).unwrap();
assert!(
res.0.contains(r#"<p><a href="https://vincent.is/about#cv">rel link</a></p>"#)
@ -296,8 +308,9 @@ fn can_make_relative_links_with_anchors() {
fn errors_relative_link_inexistant() {
let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new();
let context = Context::new(&tera_ctx, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::None);
let res = markdown_to_html("[rel link](./pages/about.md)", &context);
let config = Config::default();
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content("[rel link](./pages/about.md)", &context);
assert!(res.is_err());
}
@ -305,8 +318,9 @@ fn errors_relative_link_inexistant() {
fn can_add_id_to_headers() {
let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new();
let context = Context::new(&tera_ctx, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::None);
let res = markdown_to_html(r#"# Hello"#, &context).unwrap();
let config = Config::default();
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content(r#"# Hello"#, &context).unwrap();
assert_eq!(res.0, "<h1 id=\"hello\">Hello</h1>\n");
}
@ -314,16 +328,18 @@ fn can_add_id_to_headers() {
fn can_add_id_to_headers_same_slug() {
let tera_ctx = Tera::default();
let permalinks_ctx = HashMap::new();
let context = Context::new(&tera_ctx, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::None);
let res = markdown_to_html("# Hello\n# Hello", &context).unwrap();
let config = Config::default();
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content("# Hello\n# Hello", &context).unwrap();
assert_eq!(res.0, "<h1 id=\"hello\">Hello</h1>\n<h1 id=\"hello-1\">Hello</h1>\n");
}
#[test]
fn can_insert_anchor_left() {
let permalinks_ctx = HashMap::new();
let context = Context::new(&GUTENBERG_TERA, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::Left);
let res = markdown_to_html("# Hello", &context).unwrap();
let config = Config::default();
let context = RenderContext::new(&GUTENBERG_TERA, &config, "", &permalinks_ctx, InsertAnchor::Left);
let res = render_content("# Hello", &context).unwrap();
assert_eq!(
res.0,
"<h1 id=\"hello\"><a class=\"gutenberg-anchor\" href=\"#hello\" aria-label=\"Anchor link for: hello\">🔗</a>\nHello</h1>\n"
@ -333,8 +349,9 @@ fn can_insert_anchor_left() {
#[test]
fn can_insert_anchor_right() {
let permalinks_ctx = HashMap::new();
let context = Context::new(&GUTENBERG_TERA, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::Right);
let res = markdown_to_html("# Hello", &context).unwrap();
let config = Config::default();
let context = RenderContext::new(&GUTENBERG_TERA, &config, "", &permalinks_ctx, InsertAnchor::Right);
let res = render_content("# Hello", &context).unwrap();
assert_eq!(
res.0,
"<h1 id=\"hello\">Hello<a class=\"gutenberg-anchor\" href=\"#hello\" aria-label=\"Anchor link for: hello\">🔗</a>\n</h1>\n"
@ -345,8 +362,9 @@ fn can_insert_anchor_right() {
#[test]
fn can_insert_anchor_with_exclamation_mark() {
let permalinks_ctx = HashMap::new();
let context = Context::new(&GUTENBERG_TERA, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::Left);
let res = markdown_to_html("# Hello!", &context).unwrap();
let config = Config::default();
let context = RenderContext::new(&GUTENBERG_TERA, &config, "", &permalinks_ctx, InsertAnchor::Left);
let res = render_content("# Hello!", &context).unwrap();
assert_eq!(
res.0,
"<h1 id=\"hello\"><a class=\"gutenberg-anchor\" href=\"#hello\" aria-label=\"Anchor link for: hello\">🔗</a>\nHello!</h1>\n"
@ -357,19 +375,21 @@ fn can_insert_anchor_with_exclamation_mark() {
#[test]
fn can_insert_anchor_with_link() {
let permalinks_ctx = HashMap::new();
let context = Context::new(&GUTENBERG_TERA, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::Left);
let res = markdown_to_html("## [](#xresources)Xresources", &context).unwrap();
let config = Config::default();
let context = RenderContext::new(&GUTENBERG_TERA, &config, "", &permalinks_ctx, InsertAnchor::Left);
let res = render_content("## [Rust](https://rust-lang.org)", &context).unwrap();
assert_eq!(
res.0,
"<h2 id=\"xresources\"><a class=\"gutenberg-anchor\" href=\"#xresources\" aria-label=\"Anchor link for: xresources\">🔗</a>\nXresources</h2>\n"
"<h2 id=\"rust\"><a class=\"gutenberg-anchor\" href=\"#rust\" aria-label=\"Anchor link for: rust\">🔗</a>\n<a href=\"https://rust-lang.org\">Rust</a></h2>\n"
);
}
#[test]
fn can_insert_anchor_with_other_special_chars() {
let permalinks_ctx = HashMap::new();
let context = Context::new(&GUTENBERG_TERA, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::Left);
let res = markdown_to_html("# Hello*_()", &context).unwrap();
let config = Config::default();
let context = RenderContext::new(&GUTENBERG_TERA, &config, "", &permalinks_ctx, InsertAnchor::Left);
let res = render_content("# Hello*_()", &context).unwrap();
assert_eq!(
res.0,
"<h1 id=\"hello\"><a class=\"gutenberg-anchor\" href=\"#hello\" aria-label=\"Anchor link for: hello\">🔗</a>\nHello*_()</h1>\n"
@ -379,16 +399,16 @@ fn can_insert_anchor_with_other_special_chars() {
#[test]
fn can_make_toc() {
let permalinks_ctx = HashMap::new();
let context = Context::new(
let config = Config::default();
let context = RenderContext::new(
&GUTENBERG_TERA,
true,
"base16-ocean-dark".to_string(),
&config,
"https://mysite.com/something",
&permalinks_ctx,
InsertAnchor::Left
);
let res = markdown_to_html(r#"
let res = render_content(r#"
# Header 1
## Header 2
@ -408,8 +428,9 @@ fn can_make_toc() {
#[test]
fn can_understand_backtick_in_titles() {
let permalinks_ctx = HashMap::new();
let context = Context::new(&GUTENBERG_TERA, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::None);
let res = markdown_to_html("# `Hello`", &context).unwrap();
let config = Config::default();
let context = RenderContext::new(&GUTENBERG_TERA, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content("# `Hello`", &context).unwrap();
assert_eq!(
res.0,
"<h1 id=\"hello\"><code>Hello</code></h1>\n"
@ -419,10 +440,125 @@ fn can_understand_backtick_in_titles() {
#[test]
fn can_understand_backtick_in_paragraphs() {
let permalinks_ctx = HashMap::new();
let context = Context::new(&GUTENBERG_TERA, true, "base16-ocean-dark".to_string(), "", &permalinks_ctx, InsertAnchor::None);
let res = markdown_to_html("Hello `world`", &context).unwrap();
let config = Config::default();
let context = RenderContext::new(&GUTENBERG_TERA, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content("Hello `world`", &context).unwrap();
assert_eq!(
res.0,
"<p>Hello <code>world</code></p>\n"
);
}
// https://github.com/Keats/gutenberg/issues/297
#[test]
fn can_understand_links_in_header() {
let permalinks_ctx = HashMap::new();
let config = Config::default();
let context = RenderContext::new(&GUTENBERG_TERA, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content("# [Rust](https://rust-lang.org)", &context).unwrap();
assert_eq!(
res.0,
"<h1 id=\"rust\"><a href=\"https://rust-lang.org\">Rust</a></h1>\n"
);
}
#[test]
fn can_understand_link_with_title_in_header() {
let permalinks_ctx = HashMap::new();
let config = Config::default();
let context = RenderContext::new(&GUTENBERG_TERA, &config, "", &permalinks_ctx, InsertAnchor::None);
let res = render_content("# [Rust](https://rust-lang.org \"Rust homepage\")", &context).unwrap();
assert_eq!(
res.0,
"<h1 id=\"rust\"><a href=\"https://rust-lang.org\" title=\"Rust homepage\">Rust</a></h1>\n"
);
}
#[test]
fn can_make_valid_relative_link_in_header() {
let mut permalinks = HashMap::new();
permalinks.insert("pages/about.md".to_string(), "https://vincent.is/about/".to_string());
let tera_ctx = Tera::default();
let config = Config::default();
let context = RenderContext::new(&tera_ctx, &config, "", &permalinks, InsertAnchor::None);
let res = render_content(
r#" # [rel link](./pages/about.md)"#,
&context
).unwrap();
assert_eq!(
res.0,
"<h1 id=\"rel-link\"><a href=\"https://vincent.is/about/\">rel link</a></h1>\n"
);
}
#[test]
fn can_make_permalinks_with_colocated_assets_for_link() {
let permalinks_ctx = HashMap::new();
let config = Config::default();
let context = RenderContext::new(&GUTENBERG_TERA, &config, "https://vincent.is/about/", &permalinks_ctx, InsertAnchor::None);
let res = render_content("[an image](image.jpg)", &context).unwrap();
assert_eq!(
res.0,
"<p><a href=\"https://vincent.is/about/image.jpg\">an image</a></p>\n"
);
}
#[test]
fn can_make_permalinks_with_colocated_assets_for_image() {
let permalinks_ctx = HashMap::new();
let config = Config::default();
let context = RenderContext::new(&GUTENBERG_TERA, &config, "https://vincent.is/about/", &permalinks_ctx, InsertAnchor::None);
let res = render_content("![alt text](image.jpg)", &context).unwrap();
assert_eq!(
res.0,
"<p><img src=\"https://vincent.is/about/image.jpg\" alt=\"alt text\" /></p>\n"
);
}
#[test]
fn markdown_doesnt_wrap_html_in_paragraph() {
let permalinks_ctx = HashMap::new();
let config = Config::default();
let context = RenderContext::new(&GUTENBERG_TERA, &config, "https://vincent.is/about/", &permalinks_ctx, InsertAnchor::None);
let res = render_content(r#"
Some text
<h1>Helo</h1>
<div>
<a href="mobx-flow.png">
<img src="mobx-flow.png" alt="MobX flow">
</a>
</div>
"#, &context).unwrap();
assert_eq!(
res.0,
"<p>Some text</p>\n<h1>Helo</h1>\n<div>\n<a href=\"mobx-flow.png\">\n <img src=\"mobx-flow.png\" alt=\"MobX flow\">\n </a>\n</div>\n"
);
}
#[test]
fn can_validate_valid_external_links() {
let permalinks_ctx = HashMap::new();
let mut config = Config::default();
config.check_external_links = true;
let context = RenderContext::new(&GUTENBERG_TERA, &config, "https://vincent.is/about/", &permalinks_ctx, InsertAnchor::None);
let res = render_content("[a link](http://google.com)", &context).unwrap();
assert_eq!(
res.0,
"<p><a href=\"http://google.com\">a link</a></p>\n"
);
}
#[test]
fn can_show_error_message_for_invalid_external_links() {
let permalinks_ctx = HashMap::new();
let mut config = Config::default();
config.check_external_links = true;
let context = RenderContext::new(&GUTENBERG_TERA, &config, "https://vincent.is/about/", &permalinks_ctx, InsertAnchor::None);
let res = render_content("[a link](http://google.comy)", &context);
assert!(res.is_err());
let err = res.unwrap_err();
assert!(err.description().contains("Link http://google.comy is not valid"));
}

View file

@ -20,6 +20,7 @@ pagination = { path = "../pagination" }
taxonomies = { path = "../taxonomies" }
content = { path = "../content" }
search = { path = "../search" }
imageproc = { path = "../imageproc" }
[dev-dependencies]
tempdir = "0.3"
tempfile = "3"

View file

@ -3,12 +3,12 @@
#![feature(test)]
extern crate test;
extern crate site;
extern crate tempdir;
extern crate tempfile;
use std::env;
use site::Site;
use tempdir::TempDir;
use tempfile::tempdir;
#[bench]
@ -17,7 +17,7 @@ fn bench_rendering_small_blog(b: &mut test::Bencher) {
path.push("benches");
path.push("small-blog");
let mut site = Site::new(&path, "config.toml").unwrap();
let tmp_dir = TempDir::new("example").expect("create temp dir");
let tmp_dir = tempdir().expect("create temp dir");
let public = &tmp_dir.path().join("public");
site.set_output_path(&public);
site.load().unwrap();
@ -31,7 +31,7 @@ fn bench_rendering_medium_blog(b: &mut test::Bencher) {
path.push("benches");
path.push("medium-blog");
let mut site = Site::new(&path, "config.toml").unwrap();
let tmp_dir = TempDir::new("example").expect("create temp dir");
let tmp_dir = tempdir().expect("create temp dir");
let public = &tmp_dir.path().join("public");
site.set_output_path(&public);
site.load().unwrap();
@ -45,7 +45,7 @@ fn bench_rendering_medium_blog(b: &mut test::Bencher) {
// path.push("benches");
// path.push("big-blog");
// let mut site = Site::new(&path, "config.toml").unwrap();
// let tmp_dir = TempDir::new("example").expect("create temp dir");
// let tmp_dir = tempdir().expect("create temp dir");
// let public = &tmp_dir.path().join("public");
// site.set_output_path(&public);
// site.load().unwrap();
@ -59,7 +59,7 @@ fn bench_rendering_small_kb(b: &mut test::Bencher) {
path.push("benches");
path.push("small-kb");
let mut site = Site::new(&path, "config.toml").unwrap();
let tmp_dir = TempDir::new("example").expect("create temp dir");
let tmp_dir = tempdir().expect("create temp dir");
let public = &tmp_dir.path().join("public");
site.set_output_path(&public);
site.load().unwrap();
@ -73,7 +73,7 @@ fn bench_rendering_medium_kb(b: &mut test::Bencher) {
path.push("benches");
path.push("medium-kb");
let mut site = Site::new(&path, "config.toml").unwrap();
let tmp_dir = TempDir::new("example").expect("create temp dir");
let tmp_dir = tempdir().expect("create temp dir");
let public = &tmp_dir.path().join("public");
site.set_output_path(&public);
site.load().unwrap();

View file

@ -1,11 +1,11 @@
#![feature(test)]
extern crate test;
extern crate site;
extern crate tempdir;
extern crate tempfile;
use std::env;
use tempdir::TempDir;
use tempfile::tempdir;
use site::Site;

View file

@ -16,14 +16,16 @@ extern crate pagination;
extern crate taxonomies;
extern crate content;
extern crate search;
extern crate imageproc;
#[cfg(test)]
extern crate tempdir;
extern crate tempfile;
use std::collections::HashMap;
use std::fs::{create_dir_all, remove_dir_all, copy};
use std::mem;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use glob::glob;
use tera::{Tera, Context};
@ -33,10 +35,11 @@ use errors::{Result, ResultExt};
use config::{Config, get_config};
use utils::fs::{create_file, copy_directory, create_directory, ensure_directory_exists};
use utils::templates::{render_template, rewrite_theme_paths};
use content::{Page, Section, populate_previous_and_next_pages, sort_pages};
use utils::net::get_available_port;
use content::{Page, Section, populate_siblings, sort_pages};
use templates::{GUTENBERG_TERA, global_fns, render_redirect_template};
use front_matter::{SortBy, InsertAnchor};
use taxonomies::Taxonomy;
use taxonomies::{Taxonomy, find_taxonomies};
use pagination::Paginator;
use rayon::prelude::*;
@ -65,11 +68,13 @@ pub struct Site {
pub pages: HashMap<PathBuf, Page>,
pub sections: HashMap<PathBuf, Section>,
pub tera: Tera,
live_reload: bool,
imageproc: Arc<Mutex<imageproc::Processor>>,
// the live reload port to be used if there is one
pub live_reload: Option<u16>,
pub output_path: PathBuf,
content_path: PathBuf,
pub static_path: PathBuf,
pub tags: Option<Taxonomy>,
pub categories: Option<Taxonomy>,
pub taxonomies: Vec<Taxonomy>,
/// A map of all .md files (section and pages) and their permalink
/// We need that if there are relative links in the content that need to be resolved
pub permalinks: HashMap<String, String>,
@ -107,17 +112,22 @@ impl Site {
// the `extend` above already does it but hey
tera.build_inheritance_chains()?;
let content_path = path.join("content");
let static_path = path.join("static");
let imageproc = imageproc::Processor::new(content_path.clone(), &static_path, &config.base_url);
let site = Site {
base_path: path.to_path_buf(),
config,
tera,
pages: HashMap::new(),
sections: HashMap::new(),
live_reload: false,
imageproc: Arc::new(Mutex::new(imageproc)),
live_reload: None,
output_path: path.join("public"),
static_path: path.join("static"),
tags: None,
categories: None,
content_path,
static_path,
taxonomies: Vec::new(),
permalinks: HashMap::new(),
};
@ -126,12 +136,11 @@ impl Site {
/// The index section is ALWAYS at that path
pub fn index_section_path(&self) -> PathBuf {
self.base_path.join("content").join("_index.md")
self.content_path.join("_index.md")
}
/// What the function name says
pub fn enable_live_reload(&mut self) {
self.live_reload = true;
self.live_reload = get_available_port();
}
/// Get all the orphan (== without section) pages in the site
@ -152,6 +161,12 @@ impl Site {
orphans
}
pub fn set_base_url(&mut self, base_url: String) {
let mut imageproc = self.imageproc.lock().unwrap();
imageproc.set_base_url(&base_url);
self.config.base_url = base_url;
}
pub fn set_output_path<P: AsRef<Path>>(&mut self, path: P) {
self.output_path = path.as_ref().to_path_buf();
}
@ -216,7 +231,7 @@ impl Site {
if !self.sections.contains_key(&index_path) {
let mut index_section = Section::default();
index_section.permalink = self.config.make_permalink("");
index_section.file.parent = self.base_path.join("content");
index_section.file.parent = self.content_path.clone();
index_section.file.relative = "_index.md".to_string();
self.sections.insert(index_path, index_section);
}
@ -228,10 +243,10 @@ impl Site {
self.add_page(p, false)?;
}
self.register_early_global_fns();
self.render_markdown()?;
self.populate_sections();
self.populate_tags_and_categories();
self.populate_taxonomies()?;
self.register_tera_global_fns();
Ok(())
@ -269,17 +284,27 @@ impl Site {
Ok(())
}
/// Adds global fns that are to be available to shortcodes while rendering markdown
pub fn register_early_global_fns(&mut self) {
self.tera.register_global_function(
"get_url", global_fns::make_get_url(self.permalinks.clone(), self.config.clone()),
);
self.tera.register_global_function(
"resize_image", global_fns::make_resize_image(self.imageproc.clone()),
);
}
pub fn register_tera_global_fns(&mut self) {
self.tera.register_global_function("trans", global_fns::make_trans(self.config.clone()));
self.tera.register_global_function("get_page", global_fns::make_get_page(&self.pages));
self.tera.register_global_function("get_section", global_fns::make_get_section(&self.sections));
self.tera.register_global_function(
"get_taxonomy_url",
global_fns::make_get_taxonomy_url(self.tags.clone(), self.categories.clone())
"get_taxonomy",
global_fns::make_get_taxonomy(self.taxonomies.clone()),
);
self.tera.register_global_function(
"get_url",
global_fns::make_get_url(self.permalinks.clone(), self.config.clone())
"get_taxonomy_url",
global_fns::make_get_taxonomy_url(self.taxonomies.clone()),
);
}
@ -380,43 +405,36 @@ impl Site {
}
let pages = mem::replace(&mut section.pages, vec![]);
let (sorted_pages, cannot_be_sorted_pages) = sort_pages(pages, section.meta.sort_by);
section.pages = populate_previous_and_next_pages(&sorted_pages);
section.pages = populate_siblings(&sorted_pages, section.meta.sort_by);
section.ignored_pages = cannot_be_sorted_pages;
}
}
/// Find all the tags and categories if it's asked in the config
pub fn populate_tags_and_categories(&mut self) {
let generate_tags_pages = self.config.generate_tags_pages;
let generate_categories_pages = self.config.generate_categories_pages;
if !generate_tags_pages && !generate_categories_pages {
return;
pub fn populate_taxonomies(&mut self) -> Result<()> {
if self.config.taxonomies.is_empty() {
return Ok(());
}
// TODO: can we pass a reference?
let (tags, categories) = Taxonomy::find_tags_and_categories(
self.taxonomies = find_taxonomies(
&self.config,
self.pages
.values()
.filter(|p| !p.is_draft())
.cloned()
.collect::<Vec<_>>()
.as_slice()
);
if generate_tags_pages {
self.tags = Some(tags);
}
if generate_categories_pages {
self.categories = Some(categories);
}
.as_slice(),
)?;
Ok(())
}
/// Inject live reload script tag if in live reload mode
fn inject_livereload(&self, html: String) -> String {
if self.live_reload {
if let Some(port) = self.live_reload {
return html.replace(
"</body>",
r#"<script src="/livereload.js?port=1112&mindelay=10"></script></body>"#
&format!(r#"<script src="/livereload.js?port={}&mindelay=10"></script></body>"#, port),
);
}
@ -429,7 +447,7 @@ impl Site {
if let Some(ref theme) = self.config.theme {
copy_directory(
&self.base_path.join("themes").join(theme).join("static"),
&self.output_path
&self.output_path,
)?;
}
// We're fine with missing static folders
@ -440,6 +458,17 @@ impl Site {
Ok(())
}
pub fn num_img_ops(&self) -> usize {
let imageproc = self.imageproc.lock().unwrap();
imageproc.num_img_ops()
}
pub fn process_images(&self) -> Result<()> {
let mut imageproc = self.imageproc.lock().unwrap();
imageproc.prune()?;
imageproc.do_process()
}
/// Deletes the `public` directory if it exists
pub fn clean(&self) -> Result<()> {
if self.output_path.exists() {
@ -490,13 +519,11 @@ impl Site {
self.render_orphan_pages()?;
self.render_sitemap()?;
if self.config.generate_rss {
self.render_rss_feed()?;
self.render_rss_feed(None, None)?;
}
self.render_404()?;
self.render_robots()?;
// `render_categories` and `render_tags` will check whether the config allows
// them to render or not
self.render_categories()?;
self.render_tags()?;
self.render_taxonomies()?;
if let Some(ref theme) = self.config.theme {
let theme_path = self.base_path.join("themes").join(theme);
@ -509,6 +536,7 @@ impl Site {
self.compile_sass(&self.base_path)?;
}
self.process_images()?;
self.copy_static_directories()?;
if self.config.build_search_index {
@ -599,41 +627,57 @@ impl Site {
for page in self.pages.values() {
for alias in &page.meta.aliases {
let mut output_path = self.output_path.to_path_buf();
for component in alias.split('/') {
let mut split = alias.split('/').collect::<Vec<_>>();
// If the alias ends with an html file name, use that instead of mapping
// as a path containing an `index.html`
let page_name = match split.pop() {
Some(part) if part.ends_with(".html") => part,
Some(part) => {
split.push(part);
"index.html"
}
None => "index.html"
};
for component in split {
output_path.push(&component);
if !output_path.exists() {
create_directory(&output_path)?;
}
}
create_file(&output_path.join("index.html"), &render_redirect_template(&page.permalink, &self.tera)?)?;
create_file(&output_path.join(page_name), &render_redirect_template(&page.permalink, &self.tera)?)?;
}
}
Ok(())
}
/// Renders 404.html
pub fn render_404(&self) -> Result<()> {
ensure_directory_exists(&self.output_path)?;
let mut context = Context::new();
context.insert("config", &self.config);
create_file(
&self.output_path.join("404.html"),
&render_template("404.html", &self.tera, &context, &self.config.theme)?,
)
}
/// Renders robots.txt
pub fn render_robots(&self) -> Result<()> {
ensure_directory_exists(&self.output_path)?;
create_file(
&self.output_path.join("robots.txt"),
&render_template("robots.txt", &self.tera, &Context::new(), &self.config.theme)?
&render_template("robots.txt", &self.tera, &Context::new(), &self.config.theme)?,
)
}
/// Renders all categories and the single category pages if there are some
pub fn render_categories(&self) -> Result<()> {
if let Some(ref categories) = self.categories {
self.render_taxonomy(categories)?;
}
Ok(())
}
/// Renders all tags and the single tag pages if there are some
pub fn render_tags(&self) -> Result<()> {
if let Some(ref tags) = self.tags {
self.render_taxonomy(tags)?;
/// Renders all taxonomies with at least one non-draft post
pub fn render_taxonomies(&self) -> Result<()> {
// TODO: make parallel?
for taxonomy in &self.taxonomies {
self.render_taxonomy(taxonomy)?;
}
Ok(())
@ -641,12 +685,12 @@ impl Site {
fn render_taxonomy(&self, taxonomy: &Taxonomy) -> Result<()> {
if taxonomy.items.is_empty() {
return Ok(())
return Ok(());
}
ensure_directory_exists(&self.output_path)?;
let output_path = self.output_path.join(&taxonomy.get_list_name());
let list_output = taxonomy.render_list(&self.tera, &self.config)?;
let output_path = self.output_path.join(&taxonomy.kind.name);
let list_output = taxonomy.render_all_terms(&self.tera, &self.config)?;
create_directory(&output_path)?;
create_file(&output_path.join("index.html"), &self.inject_livereload(list_output))?;
@ -654,12 +698,25 @@ impl Site {
.items
.par_iter()
.map(|item| {
let single_output = taxonomy.render_single_item(item, &self.tera, &self.config)?;
create_directory(&output_path.join(&item.slug))?;
if taxonomy.kind.rss {
// TODO: can we get rid of `clone()`?
self.render_rss_feed(
Some(item.pages.clone()),
Some(&PathBuf::from(format!("{}/{}", taxonomy.kind.name, item.slug))),
)?;
}
if taxonomy.kind.is_paginated() {
self.render_paginated(&output_path, &Paginator::from_taxonomy(&taxonomy, item))
} else {
let single_output = taxonomy.render_term(item, &self.tera, &self.config)?;
let path = output_path.join(&item.slug);
create_directory(&path)?;
create_file(
&output_path.join(&item.slug).join("index.html"),
&self.inject_livereload(single_output)
&path.join("index.html"),
&self.inject_livereload(single_output),
)
}
})
.fold(|| Ok(()), Result::and)
.reduce(|| Ok(()), Result::and)
@ -692,31 +749,19 @@ impl Site {
sections.sort_by(|a, b| a.permalink.cmp(&b.permalink));
context.add("sections", &sections);
let mut categories = vec![];
if let Some(ref c) = self.categories {
let name = c.get_list_name();
categories.push(SitemapEntry::new(self.config.make_permalink(&name), None));
for item in &c.items {
categories.push(
SitemapEntry::new(self.config.make_permalink(&format!("{}/{}", &name, item.slug)), None),
);
let mut taxonomies = vec![];
for taxonomy in &self.taxonomies {
let name = &taxonomy.kind.name;
let mut terms = vec![];
terms.push(SitemapEntry::new(self.config.make_permalink(name), None));
for item in &taxonomy.items {
terms.push(SitemapEntry::new(self.config.make_permalink(&format!("{}/{}", &name, item.slug)), None));
}
terms.sort_by(|a, b| a.permalink.cmp(&b.permalink));
taxonomies.push(terms);
}
categories.sort_by(|a, b| a.permalink.cmp(&b.permalink));
context.add("categories", &categories);
context.add("taxonomies", &taxonomies);
let mut tags = vec![];
if let Some(ref t) = self.tags {
let name = t.get_list_name();
tags.push(SitemapEntry::new(self.config.make_permalink(&name), None));
for item in &t.items {
tags.push(
SitemapEntry::new(self.config.make_permalink(&format!("{}/{}", &name, item.slug)), None),
);
}
}
tags.sort_by(|a, b| a.permalink.cmp(&b.permalink));
context.add("tags", &tags);
context.add("config", &self.config);
let sitemap = &render_template("sitemap.xml", &self.tera, &context, &self.config.theme)?;
@ -726,14 +771,20 @@ impl Site {
Ok(())
}
pub fn render_rss_feed(&self) -> Result<()> {
/// Renders a RSS feed for the given path and at the given path
/// If both arguments are `None`, it will render only the RSS feed for the whole
/// site at the root folder.
pub fn render_rss_feed(&self, all_pages: Option<Vec<Page>>, base_path: Option<&PathBuf>) -> Result<()> {
ensure_directory_exists(&self.output_path)?;
let mut context = Context::new();
let pages = self.pages.values()
let pages = all_pages
// TODO: avoid that cloned().
// It requires having `sort_pages` take references of Page
.unwrap_or_else(|| self.pages.values().cloned().collect::<Vec<_>>())
.into_iter()
.filter(|p| p.meta.date.is_some() && !p.is_draft())
.cloned()
.collect::<Vec<Page>>();
.collect::<Vec<_>>();
// Don't generate a RSS feed if none of the pages has a date
if pages.is_empty() {
@ -742,20 +793,32 @@ impl Site {
let (sorted_pages, _) = sort_pages(pages, SortBy::Date);
context.add("last_build_date", &sorted_pages[0].meta.date.clone().map(|d| d.to_string()));
// limit to the last n elements)
// limit to the last n elements
context.add("pages", &sorted_pages.iter().take(self.config.rss_limit).collect::<Vec<_>>());
context.add("config", &self.config);
let rss_feed_url = if self.config.base_url.ends_with('/') {
format!("{}{}", self.config.base_url, "rss.xml")
let rss_feed_url = if let Some(ref base) = base_path {
self.config.make_permalink(&base.join("rss.xml").to_string_lossy().replace('\\', "/"))
} else {
format!("{}/{}", self.config.base_url, "rss.xml")
self.config.make_permalink("rss.xml")
};
context.add("feed_url", &rss_feed_url);
let feed = &render_template("rss.xml", &self.tera, &context, &self.config.theme)?;
if let Some(ref base) = base_path {
let mut output_path = self.output_path.clone().to_path_buf();
for component in base.components() {
output_path.push(component);
if !output_path.exists() {
create_directory(&output_path)?;
}
}
create_file(&output_path.join("rss.xml"), feed)?;
} else {
create_file(&self.output_path.join("rss.xml"), feed)?;
}
Ok(())
}
@ -794,7 +857,7 @@ impl Site {
}
if section.meta.is_paginated() {
self.render_paginated(&output_path, section)?;
self.render_paginated(&output_path, &Paginator::from_section(&section.pages, section))?;
} else {
let output = section.render_html(&self.tera, &self.config)?;
create_file(&output_path.join("index.html"), &self.inject_livereload(output))?;
@ -806,8 +869,8 @@ impl Site {
/// Used only on reload
pub fn render_index(&self) -> Result<()> {
self.render_section(
&self.sections[&self.base_path.join("content").join("_index.md")],
false
&self.sections[&self.content_path.join("_index.md")],
false,
)
}
@ -834,11 +897,10 @@ impl Site {
}
/// Renders a list of pages when the section/index is wanting pagination.
pub fn render_paginated(&self, output_path: &Path, section: &Section) -> Result<()> {
pub fn render_paginated(&self, output_path: &Path, paginator: &Paginator) -> Result<()> {
ensure_directory_exists(&self.output_path)?;
let paginator = Paginator::new(&section.pages, section);
let folder_path = output_path.join(&section.meta.paginate_path);
let folder_path = output_path.join(&paginator.paginate_path);
create_directory(&folder_path)?;
paginator
@ -853,7 +915,7 @@ impl Site {
create_file(&page_path.join("index.html"), &self.inject_livereload(output))?;
} else {
create_file(&output_path.join("index.html"), &self.inject_livereload(output))?;
create_file(&page_path.join("index.html"), &render_redirect_template(&section.permalink, &self.tera)?)?;
create_file(&page_path.join("index.html"), &render_redirect_template(&paginator.permalink, &self.tera)?)?;
}
Ok(())
})

View file

@ -1,12 +1,13 @@
extern crate site;
extern crate tempdir;
extern crate tempfile;
use std::collections::HashMap;
use std::env;
use std::path::Path;
use std::fs::File;
use std::io::prelude::*;
use tempdir::TempDir;
use tempfile::tempdir;
use site::Site;
@ -95,7 +96,7 @@ fn can_build_site_without_live_reload() {
path.push("test_site");
let mut site = Site::new(&path, "config.toml").unwrap();
site.load().unwrap();
let tmp_dir = TempDir::new("example").expect("create temp dir");
let tmp_dir = tempdir().expect("create temp dir");
let public = &tmp_dir.path().join("public");
site.set_output_path(&public);
site.build().unwrap();
@ -128,6 +129,10 @@ fn can_build_site_without_live_reload() {
assert!(file_exists!(public, "an-old-url/old-page/index.html"));
assert!(file_contains!(public, "an-old-url/old-page/index.html", "something-else"));
// html aliases work
assert!(file_exists!(public, "an-old-url/an-old-alias.html"));
assert!(file_contains!(public, "an-old-url/an-old-alias.html", "something-else"));
// redirect_to works
assert!(file_exists!(public, "posts/tutorials/devops/index.html"));
assert!(file_contains!(public, "posts/tutorials/devops/index.html", "docker"));
@ -168,7 +173,7 @@ fn can_build_site_with_live_reload() {
path.push("test_site");
let mut site = Site::new(&path, "config.toml").unwrap();
site.load().unwrap();
let tmp_dir = TempDir::new("example").expect("create temp dir");
let tmp_dir = tempdir().expect("create temp dir");
let public = &tmp_dir.path().join("public");
site.set_output_path(&public);
site.enable_live_reload();
@ -197,7 +202,7 @@ fn can_build_site_with_live_reload() {
assert_eq!(file_exists!(public, "tags/index.html"), false);
// no live reload code
assert!(file_contains!(public, "index.html", "/livereload.js?port=1112&mindelay=10"));
assert!(file_contains!(public, "index.html", "/livereload.js"));
// the summary anchor link has been created
assert!(file_contains!(public, "posts/python/index.html", r#"<a name="continue-reading"></a>"#));
@ -205,28 +210,27 @@ fn can_build_site_with_live_reload() {
}
#[test]
fn can_build_site_with_categories() {
fn can_build_site_with_taxonomies() {
let mut path = env::current_dir().unwrap().parent().unwrap().parent().unwrap().to_path_buf();
path.push("test_site");
let mut site = Site::new(&path, "config.toml").unwrap();
site.config.generate_categories_pages = true;
site.load().unwrap();
for (i, page) in site.pages.values_mut().enumerate() {
page.meta.category = if i % 2 == 0 {
Some("A".to_string())
} else {
Some("B".to_string())
page.meta.taxonomies = {
let mut taxonomies = HashMap::new();
taxonomies.insert("categories".to_string(), vec![if i % 2 == 0 { "A" } else { "B" }.to_string()]);
taxonomies
};
}
site.populate_tags_and_categories();
let tmp_dir = TempDir::new("example").expect("create temp dir");
site.populate_taxonomies().unwrap();
let tmp_dir = tempdir().expect("create temp dir");
let public = &tmp_dir.path().join("public");
site.set_output_path(&public);
site.build().unwrap();
assert!(Path::new(&public).exists());
assert_eq!(site.categories.unwrap().len(), 2);
assert_eq!(site.taxonomies.len(), 1);
assert!(file_exists!(public, "index.html"));
assert!(file_exists!(public, "sitemap.xml"));
@ -242,12 +246,13 @@ fn can_build_site_with_categories() {
assert!(file_exists!(public, "posts/tutorials/index.html"));
assert!(file_exists!(public, "posts/tutorials/devops/index.html"));
assert!(file_exists!(public, "posts/tutorials/programming/index.html"));
// TODO: add assertion for syntax highlighting
// Categories are there
assert!(file_exists!(public, "categories/index.html"));
assert!(file_exists!(public, "categories/a/index.html"));
assert!(file_exists!(public, "categories/b/index.html"));
assert!(file_exists!(public, "categories/a/rss.xml"));
assert!(file_contains!(public, "categories/a/rss.xml", "https://replace-this-with-your-url.com/categories/a/rss.xml"));
// Extending from a theme works
assert!(file_contains!(public, "categories/a/index.html", "EXTENDED"));
// Tags aren't
@ -258,58 +263,6 @@ fn can_build_site_with_categories() {
assert!(file_contains!(public, "sitemap.xml", "<loc>https://replace-this-with-your-url.com/categories/a/</loc>"));
}
#[test]
fn can_build_site_with_tags() {
let mut path = env::current_dir().unwrap().parent().unwrap().parent().unwrap().to_path_buf();
path.push("test_site");
let mut site = Site::new(&path, "config.toml").unwrap();
site.config.generate_tags_pages = true;
site.load().unwrap();
for (i, page) in site.pages.values_mut().enumerate() {
page.meta.tags = if i % 2 == 0 {
Some(vec!["tag1".to_string(), "tag2".to_string()])
} else {
Some(vec!["tag with space".to_string()])
};
}
site.populate_tags_and_categories();
let tmp_dir = TempDir::new("example").expect("create temp dir");
let public = &tmp_dir.path().join("public");
site.set_output_path(&public);
site.build().unwrap();
assert!(Path::new(&public).exists());
assert_eq!(site.tags.unwrap().len(), 3);
assert!(file_exists!(public, "index.html"));
assert!(file_exists!(public, "sitemap.xml"));
assert!(file_exists!(public, "robots.txt"));
assert!(file_exists!(public, "a-fixed-url/index.html"));
assert!(file_exists!(public, "posts/python/index.html"));
assert!(file_exists!(public, "posts/tutorials/devops/nix/index.html"));
assert!(file_exists!(public, "posts/with-assets/index.html"));
// Sections
assert!(file_exists!(public, "posts/index.html"));
assert!(file_exists!(public, "posts/tutorials/index.html"));
assert!(file_exists!(public, "posts/tutorials/devops/index.html"));
assert!(file_exists!(public, "posts/tutorials/programming/index.html"));
// TODO: add assertion for syntax highlighting
// Tags are there
assert!(file_exists!(public, "tags/index.html"));
assert!(file_exists!(public, "tags/tag1/index.html"));
assert!(file_exists!(public, "tags/tag2/index.html"));
assert!(file_exists!(public, "tags/tag-with-space/index.html"));
// Categories aren't
assert_eq!(file_exists!(public, "categories/index.html"), false);
// Tags are in the sitemap
assert!(file_contains!(public, "sitemap.xml", "<loc>https://replace-this-with-your-url.com/tags/</loc>"));
assert!(file_contains!(public, "sitemap.xml", "<loc>https://replace-this-with-your-url.com/tags/tag-with-space/</loc>"));
}
#[test]
fn can_build_site_and_insert_anchor_links() {
let mut path = env::current_dir().unwrap().parent().unwrap().parent().unwrap().to_path_buf();
@ -317,7 +270,7 @@ fn can_build_site_and_insert_anchor_links() {
let mut site = Site::new(&path, "config.toml").unwrap();
site.load().unwrap();
let tmp_dir = TempDir::new("example").expect("create temp dir");
let tmp_dir = tempdir().expect("create temp dir");
let public = &tmp_dir.path().join("public");
site.set_output_path(&public);
site.build().unwrap();
@ -340,7 +293,7 @@ fn can_build_site_with_pagination_for_section() {
section.meta.paginate_by = Some(2);
section.meta.template = Some("section_paginated.html".to_string());
}
let tmp_dir = TempDir::new("example").expect("create temp dir");
let tmp_dir = tempdir().expect("create temp dir");
let public = &tmp_dir.path().join("public");
site.set_output_path(&public);
site.build().unwrap();
@ -397,7 +350,7 @@ fn can_build_site_with_pagination_for_index() {
index.meta.paginate_by = Some(2);
index.meta.template = Some("index_paginated.html".to_string());
}
let tmp_dir = TempDir::new("example").expect("create temp dir");
let tmp_dir = tempdir().expect("create temp dir");
let public = &tmp_dir.path().join("public");
site.set_output_path(&public);
site.build().unwrap();
@ -437,7 +390,7 @@ fn can_build_rss_feed() {
path.push("test_site");
let mut site = Site::new(&path, "config.toml").unwrap();
site.load().unwrap();
let tmp_dir = TempDir::new("example").expect("create temp dir");
let tmp_dir = tempdir().expect("create temp dir");
let public = &tmp_dir.path().join("public");
site.set_output_path(&public);
site.build().unwrap();
@ -458,7 +411,7 @@ fn can_build_search_index() {
let mut site = Site::new(&path, "config.toml").unwrap();
site.load().unwrap();
site.config.build_search_index = true;
let tmp_dir = TempDir::new("example").expect("create temp dir");
let tmp_dir = tempdir().expect("create temp dir");
let public = &tmp_dir.path().join("public");
site.set_output_path(&public);
site.build().unwrap();

View file

@ -3,6 +3,7 @@ extern crate serde_derive;
extern crate tera;
extern crate slug;
#[macro_use]
extern crate errors;
extern crate config;
extern crate content;
@ -14,19 +15,13 @@ use std::collections::HashMap;
use slug::slugify;
use tera::{Context, Tera};
use config::Config;
use config::{Config, Taxonomy as TaxonomyConfig};
use errors::{Result, ResultExt};
use content::{Page, sort_pages};
use front_matter::SortBy;
use utils::templates::render_template;
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum TaxonomyKind {
Tags,
Categories,
}
/// A tag or category
#[derive(Debug, Clone, Serialize, PartialEq)]
pub struct TaxonomyItem {
@ -37,15 +32,14 @@ pub struct TaxonomyItem {
}
impl TaxonomyItem {
pub fn new(name: &str, kind: TaxonomyKind, config: &Config, pages: Vec<Page>) -> TaxonomyItem {
pub fn new(name: &str, path: &str, config: &Config, pages: Vec<Page>) -> TaxonomyItem {
// Taxonomy are almost always used for blogs so we filter by dates
// and it's not like we can sort things across sections by anything other
// than dates
let (mut pages, ignored_pages) = sort_pages(pages, SortBy::Date);
let slug = slugify(name);
let permalink = {
let kind_path = if kind == TaxonomyKind::Tags { "tags" } else { "categories" };
config.make_permalink(&format!("/{}/{}", kind_path, slug))
config.make_permalink(&format!("/{}/{}", path, slug))
};
// We still append pages without dates at the end
@ -61,49 +55,19 @@ impl TaxonomyItem {
}
/// All the tags or categories
#[derive(Debug, Clone, PartialEq)]
#[derive(Debug, Clone, PartialEq, Serialize)]
pub struct Taxonomy {
pub kind: TaxonomyKind,
pub kind: TaxonomyConfig,
// this vec is sorted by the count of item
pub items: Vec<TaxonomyItem>,
}
impl Taxonomy {
pub fn find_tags_and_categories(config: &Config, all_pages: &[Page]) -> (Taxonomy, Taxonomy) {
let mut tags = HashMap::new();
let mut categories = HashMap::new();
// Find all the tags/categories first
for page in all_pages {
if let Some(ref category) = page.meta.category {
categories
.entry(category.to_string())
.or_insert_with(|| vec![])
.push(page.clone());
}
if let Some(ref t) = page.meta.tags {
for tag in t {
tags
.entry(tag.to_string())
.or_insert_with(|| vec![])
.push(page.clone());
}
}
}
// Then make TaxonomyItem out of them, after sorting it
let tags_taxonomy = Taxonomy::new(TaxonomyKind::Tags, config, tags);
let categories_taxonomy = Taxonomy::new(TaxonomyKind::Categories, config, categories);
(tags_taxonomy, categories_taxonomy)
}
fn new(kind: TaxonomyKind, config: &Config, items: HashMap<String, Vec<Page>>) -> Taxonomy {
fn new(kind: TaxonomyConfig, config: &Config, items: HashMap<String, Vec<Page>>) -> Taxonomy {
let mut sorted_items = vec![];
for (name, pages) in &items {
for (name, pages) in items {
sorted_items.push(
TaxonomyItem::new(name, kind, config, pages.clone())
TaxonomyItem::new(&name, &kind.name, config, pages)
);
}
sorted_items.sort_by(|a, b| a.name.cmp(&b.name));
@ -122,69 +86,123 @@ impl Taxonomy {
self.len() == 0
}
pub fn get_single_item_name(&self) -> String {
match self.kind {
TaxonomyKind::Tags => "tag".to_string(),
TaxonomyKind::Categories => "category".to_string(),
}
}
pub fn get_list_name(&self) -> String {
match self.kind {
TaxonomyKind::Tags => "tags".to_string(),
TaxonomyKind::Categories => "categories".to_string(),
}
}
pub fn render_single_item(&self, item: &TaxonomyItem, tera: &Tera, config: &Config) -> Result<String> {
let name = self.get_single_item_name();
pub fn render_term(&self, item: &TaxonomyItem, tera: &Tera, config: &Config) -> Result<String> {
let mut context = Context::new();
context.add("config", config);
context.add(&name, item);
context.add("current_url", &config.make_permalink(&format!("{}/{}", name, item.slug)));
context.add("current_path", &format!("/{}/{}", name, item.slug));
context.add("term", item);
context.add("taxonomy", &self.kind);
context.add("current_url", &config.make_permalink(&format!("{}/{}", self.kind.name, item.slug)));
context.add("current_path", &format!("/{}/{}", self.kind.name, item.slug));
render_template(&format!("{}.html", name), tera, &context, &config.theme)
.chain_err(|| format!("Failed to render {} page.", name))
render_template(&format!("{}/single.html", self.kind.name), tera, &context, &config.theme)
.chain_err(|| format!("Failed to render single term {} page.", self.kind.name))
}
pub fn render_list(&self, tera: &Tera, config: &Config) -> Result<String> {
let name = self.get_list_name();
pub fn render_all_terms(&self, tera: &Tera, config: &Config) -> Result<String> {
let mut context = Context::new();
context.add("config", config);
context.add(&name, &self.items);
context.add("current_url", &config.make_permalink(&name));
context.add("current_path", &name);
context.add("terms", &self.items);
context.add("taxonomy", &self.kind);
context.add("current_url", &config.make_permalink(&self.kind.name));
context.add("current_path", &self.kind.name);
render_template(&format!("{}.html", name), tera, &context, &config.theme)
.chain_err(|| format!("Failed to render {} page.", name))
render_template(&format!("{}/list.html", self.kind.name), tera, &context, &config.theme)
.chain_err(|| format!("Failed to render a list of {} page.", self.kind.name))
}
}
pub fn find_taxonomies(config: &Config, all_pages: &[Page]) -> Result<Vec<Taxonomy>> {
let taxonomies_def = {
let mut m = HashMap::new();
for t in &config.taxonomies {
m.insert(t.name.clone(), t);
}
m
};
let mut all_taxonomies = HashMap::new();
// Find all the taxonomies first
for page in all_pages {
for (name, val) in &page.meta.taxonomies {
if taxonomies_def.contains_key(name) {
all_taxonomies
.entry(name)
.or_insert_with(|| HashMap::new());
for v in val {
all_taxonomies.get_mut(name)
.unwrap()
.entry(v.to_string())
.or_insert_with(|| vec![])
.push(page.clone());
}
} else {
bail!("Page `{}` has taxonomy `{}` which is not defined in config.toml", page.file.path.display(), name);
}
}
}
let mut taxonomies = vec![];
for (name, taxo) in all_taxonomies {
taxonomies.push(Taxonomy::new(taxonomies_def[name].clone(), config, taxo));
}
Ok(taxonomies)
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
use config::Config;
use config::{Config, Taxonomy};
use content::Page;
#[test]
fn can_make_taxonomies() {
let config = Config::default();
let mut config = Config::default();
config.taxonomies = vec![
Taxonomy { name: "categories".to_string(), ..Taxonomy::default() },
Taxonomy { name: "tags".to_string(), ..Taxonomy::default() },
Taxonomy { name: "authors".to_string(), ..Taxonomy::default() },
];
let mut page1 = Page::default();
page1.meta.tags = Some(vec!["rust".to_string(), "db".to_string()]);
page1.meta.category = Some("Programming tutorials".to_string());
let mut taxo_page1 = HashMap::new();
taxo_page1.insert("tags".to_string(), vec!["rust".to_string(), "db".to_string()]);
taxo_page1.insert("categories".to_string(), vec!["Programming tutorials".to_string()]);
page1.meta.taxonomies = taxo_page1;
let mut page2 = Page::default();
page2.meta.tags = Some(vec!["rust".to_string(), "js".to_string()]);
page2.meta.category = Some("Other".to_string());
let mut taxo_page2 = HashMap::new();
taxo_page2.insert("tags".to_string(), vec!["rust".to_string(), "js".to_string()]);
taxo_page2.insert("categories".to_string(), vec!["Other".to_string()]);
page2.meta.taxonomies = taxo_page2;
let mut page3 = Page::default();
page3.meta.tags = Some(vec!["js".to_string()]);
let mut taxo_page3 = HashMap::new();
taxo_page3.insert("tags".to_string(), vec!["js".to_string()]);
taxo_page3.insert("authors".to_string(), vec!["Vincent Prouillet".to_string()]);
page3.meta.taxonomies = taxo_page3;
let pages = vec![page1, page2, page3];
let (tags, categories) = Taxonomy::find_tags_and_categories(&config, &pages);
let taxonomies = find_taxonomies(&config, &pages).unwrap();
let (tags, categories, authors) = {
let mut t = None;
let mut c = None;
let mut a = None;
for x in taxonomies {
match x.kind.name.as_ref() {
"tags" => t = Some(x),
"categories" => c = Some(x),
"authors" => a = Some(x),
_ => unreachable!(),
}
}
(t.unwrap(), c.unwrap(), a.unwrap())
};
assert_eq!(tags.items.len(), 3);
assert_eq!(categories.items.len(), 2);
assert_eq!(authors.items.len(), 1);
assert_eq!(tags.items[0].name, "db");
assert_eq!(tags.items[0].slug, "db");
@ -211,4 +229,22 @@ mod tests {
assert_eq!(categories.items[1].permalink, "http://a-website.com/categories/programming-tutorials/");
assert_eq!(categories.items[1].pages.len(), 1);
}
#[test]
fn errors_on_unknown_taxonomy() {
let mut config = Config::default();
config.taxonomies = vec![
Taxonomy { name: "authors".to_string(), ..Taxonomy::default() },
];
let mut page1 = Page::default();
let mut taxo_page1 = HashMap::new();
taxo_page1.insert("tags".to_string(), vec!["rust".to_string(), "db".to_string()]);
page1.meta.taxonomies = taxo_page1;
let taxonomies = find_taxonomies(&config, &vec![page1]);
assert!(taxonomies.is_err());
let err = taxonomies.unwrap_err();
// no path as this is created by Default
assert_eq!(err.description(), "Page `` has taxonomy `tags` which is not defined in config.toml");
}
}

View file

@ -14,3 +14,4 @@ utils = { path = "../utils" }
content = { path = "../content" }
config = { path = "../config" }
taxonomies = { path = "../taxonomies" }
imageproc = { path = "../imageproc" }

View file

@ -0,0 +1,10 @@
<!doctype html>
<html>
<head>
<title>File Not Found: 404.</title>
</head>
<body>
<h1>Oops!</h1>
<h2>File Not Found: 404.</h2>
</body>
</html>

View file

@ -1,18 +1,18 @@
<rss xmlns:atom="http://www.w3.org/2005/Atom" version="2.0">
<channel>
<title>{{ config.title }}</title>
<link>{{ config.base_url }}</link>
<link>{{ config.base_url | safe }}</link>
<description>{{ config.description }}</description>
<generator>Gutenberg</generator>
<language>{{ config.default_language }}</language>
<atom:link href="{{ feed_url }}" rel="self" type="application/rss+xml"/>
<atom:link href="{{ feed_url | safe }}" rel="self" type="application/rss+xml"/>
<lastBuildDate>{{ last_build_date | date(format="%a, %d %b %Y %H:%M:%S %z") }}</lastBuildDate>
{% for page in pages %}
<item>
<title>{{ page.title }}</title>
<pubDate>{{ page.date | date(format="%a, %d %b %Y %H:%M:%S %z") }}</pubDate>
<link>{{ page.permalink }}</link>
<guid>{{ page.permalink }}</guid>
<link>{{ page.permalink | safe }}</link>
<guid>{{ page.permalink | safe }}</guid>
<description>{% if page.summary %}{{ page.summary }}{% else %}{{ page.content }}{% endif %}</description>
</item>
{% endfor %}

View file

@ -12,14 +12,11 @@
<loc>{{ section.permalink | safe }}</loc>
</url>
{% endfor %}
{% for category in categories %}
{% for taxonomy in taxonomies %}
{% for entry in taxonomy %}
<url>
<loc>{{ category.permalink | safe }}</loc>
<loc>{{ entry.permalink | safe }}</loc>
</url>
{% endfor %}
{% for tag in tags %}
<url>
<loc>{{ tag.permalink | safe }}</loc>
</url>
{% endfor %}
</urlset>

View file

@ -51,7 +51,7 @@ pub fn base64_decode(value: Value, _: HashMap<String, Value>) -> TeraResult<Valu
mod tests {
use std::collections::HashMap;
use tera::{to_value};
use tera::to_value;
use super::{markdown, base64_decode, base64_encode};

View file

@ -1,5 +1,6 @@
use std::collections::HashMap;
use std::path::{PathBuf};
use std::path::PathBuf;
use std::sync::{Arc, Mutex};
use tera::{GlobalFn, Value, from_value, to_value, Result};
@ -7,29 +8,45 @@ use content::{Page, Section};
use config::Config;
use utils::site::resolve_internal_link;
use taxonomies::Taxonomy;
use imageproc;
macro_rules! required_string_arg {
($e: expr, $err: expr) => {
macro_rules! required_arg {
($ty: ty, $e: expr, $err: expr) => {
match $e {
Some(v) => match from_value::<String>(v.clone()) {
Some(v) => match from_value::<$ty>(v.clone()) {
Ok(u) => u,
Err(_) => return Err($err.into())
},
None => return Err($err.into())
}
};
}
macro_rules! optional_arg {
($ty: ty, $e: expr, $err: expr) => {
match $e {
Some(v) => match from_value::<$ty>(v.clone()) {
Ok(u) => Some(u),
Err(_) => return Err($err.into())
},
None => None
}
};
}
pub fn make_trans(config: Config) -> GlobalFn {
let translations_config = config.translations;
let default_lang = to_value(config.default_language).unwrap();
let default_lang = config.default_language.clone();
Box::new(move |args| -> Result<Value> {
let key = required_string_arg!(args.get("key"), "`trans` requires a `key` argument.");
let lang_arg = args.get("lang").unwrap_or(&default_lang).clone();
let lang = from_value::<String>(lang_arg).unwrap();
let key = required_arg!(String, args.get("key"), "`trans` requires a `key` argument.");
let lang = optional_arg!(
String,
args.get("lang"),
"`trans`: `lang` must be a string."
).unwrap_or(default_lang.clone());
let translations = &translations_config[lang.as_str()];
Ok(to_value(&translations[key.as_str()]).unwrap())
})
@ -43,7 +60,11 @@ pub fn make_get_page(all_pages: &HashMap<PathBuf, Page>) -> GlobalFn {
}
Box::new(move |args| -> Result<Value> {
let path = required_string_arg!(args.get("path"), "`get_page` requires a `path` argument with a string value");
let path = required_arg!(
String,
args.get("path"),
"`get_page` requires a `path` argument with a string value"
);
match pages.get(&path) {
Some(p) => Ok(to_value(p).unwrap()),
None => Err(format!("Page `{}` not found.", path).into())
@ -61,7 +82,11 @@ pub fn make_get_section(all_sections: &HashMap<PathBuf, Section>) -> GlobalFn {
}
Box::new(move |args| -> Result<Value> {
let path = required_string_arg!(args.get("path"), "`get_section` requires a `path` argument with a string value");
let path = required_arg!(
String,
args.get("path"),
"`get_section` requires a `path` argument with a string value"
);
//println!("Found {:#?}", sections.get(&path).unwrap().pages[0]);
match sections.get(&path) {
Some(p) => Ok(to_value(p).unwrap()),
@ -84,7 +109,11 @@ pub fn make_get_url(permalinks: HashMap<String, String>, config: Config) -> Glob
from_value::<bool>(c.clone()).unwrap_or(true)
});
let path = required_string_arg!(args.get("path"), "`get_url` requires a `path` argument with a string value");
let path = required_arg!(
String,
args.get("path"),
"`get_url` requires a `path` argument with a string value"
);
if path.starts_with("./") {
match resolve_internal_link(&path, &permalinks) {
Ok(url) => Ok(to_value(url).unwrap()),
@ -105,39 +134,123 @@ pub fn make_get_url(permalinks: HashMap<String, String>, config: Config) -> Glob
})
}
pub fn make_get_taxonomy_url(tags: Option<Taxonomy>, categories: Option<Taxonomy>) -> GlobalFn {
pub fn make_get_taxonomy(all_taxonomies: Vec<Taxonomy>) -> GlobalFn {
let mut taxonomies = HashMap::new();
for taxonomy in all_taxonomies {
taxonomies.insert(taxonomy.kind.name.clone(), taxonomy);
}
Box::new(move |args| -> Result<Value> {
let kind = required_string_arg!(args.get("kind"), "`get_taxonomy_url` requires a `kind` argument with a string value");
let name = required_string_arg!(args.get("name"), "`get_taxonomy_url` requires a `name` argument with a string value");
let container = match kind.as_ref() {
"tag" => &tags,
"category" => &categories,
_ => return Err("`get_taxonomy_url` can only get `tag` or `category` for the `kind` argument".into()),
let kind = required_arg!(
String,
args.get("kind"),
"`get_taxonomy` requires a `kind` argument with a string value"
);
let container = match taxonomies.get(&kind) {
Some(c) => c,
None => return Err(
format!("`get_taxonomy` received an unknown taxonomy as kind: {}", kind).into()
),
};
if let Some(ref c) = *container {
for item in &c.items {
return Ok(to_value(container).unwrap());
})
}
pub fn make_get_taxonomy_url(all_taxonomies: Vec<Taxonomy>) -> GlobalFn {
let mut taxonomies = HashMap::new();
for taxonomy in all_taxonomies {
taxonomies.insert(taxonomy.kind.name.clone(), taxonomy);
}
Box::new(move |args| -> Result<Value> {
let kind = required_arg!(
String,
args.get("kind"),
"`get_taxonomy_url` requires a `kind` argument with a string value"
);
let name = required_arg!(
String,
args.get("name"),
"`get_taxonomy_url` requires a `name` argument with a string value"
);
let container = match taxonomies.get(&kind) {
Some(c) => c,
None => return Err(
format!("`get_taxonomy_url` received an unknown taxonomy as kind: {}", kind).into()
)
};
for item in &container.items {
if item.name == name {
return Ok(to_value(item.permalink.clone()).unwrap());
}
}
bail!("`get_taxonomy_url`: couldn't find `{}` in `{}` taxonomy", name, kind);
} else {
bail!("`get_taxonomy_url` tried to get a taxonomy of kind `{}` but there isn't any", kind);
}
Err(
format!("`get_taxonomy_url`: couldn't find `{}` in `{}` taxonomy", name, kind).into()
)
})
}
pub fn make_resize_image(imageproc: Arc<Mutex<imageproc::Processor>>) -> GlobalFn {
static DEFAULT_OP: &'static str = "fill";
const DEFAULT_Q: u8 = 75;
Box::new(move |args| -> Result<Value> {
let path = required_arg!(
String,
args.get("path"),
"`resize_image` requires a `path` argument with a string value"
);
let width = optional_arg!(
u32,
args.get("width"),
"`resize_image`: `width` must be a non-negative integer"
);
let height = optional_arg!(
u32,
args.get("height"),
"`resize_image`: `height` must be a non-negative integer"
);
let op = optional_arg!(
String,
args.get("op"),
"`resize_image`: `op` must be a string"
).unwrap_or(DEFAULT_OP.to_string());
let quality = optional_arg!(
u8,
args.get("quality"),
"`resize_image`: `quality` must be a number"
).unwrap_or(DEFAULT_Q);
if quality == 0 || quality > 100 {
return Err("`resize_image`: `quality` must be in range 1-100".to_string().into());
}
let mut imageproc = imageproc.lock().unwrap();
if !imageproc.source_exists(&path) {
return Err(format!("`resize_image`: Cannot find path: {}", path).into());
}
let imageop = imageproc::ImageOp::from_args(path.clone(), &op, width, height, quality)
.map_err(|e| format!("`resize_image`: {}", e))?;
let url = imageproc.insert(imageop);
to_value(url).map_err(|err| err.into())
})
}
#[cfg(test)]
mod tests {
use super::{make_get_url, make_get_taxonomy_url, make_trans};
use super::{make_get_url, make_get_taxonomy, make_get_taxonomy_url, make_trans};
use std::collections::HashMap;
use tera::to_value;
use config::Config;
use taxonomies::{Taxonomy, TaxonomyKind, TaxonomyItem};
use config::{Config, Taxonomy as TaxonomyConfig};
use taxonomies::{Taxonomy, TaxonomyItem};
#[test]
@ -181,27 +294,53 @@ mod tests {
}
#[test]
fn can_get_tag_url() {
fn can_get_taxonomy() {
let taxo_config = TaxonomyConfig { name: "tags".to_string(), ..TaxonomyConfig::default() };
let tag = TaxonomyItem::new(
"Progamming",
TaxonomyKind::Tags,
"tags",
&Config::default(),
vec![],
);
let tags = Taxonomy {
kind: TaxonomyKind::Tags,
kind: taxo_config,
items: vec![tag],
};
let static_fn = make_get_taxonomy_url(Some(tags), None);
let static_fn = make_get_taxonomy(vec![tags.clone()]);
// can find it correctly
let mut args = HashMap::new();
args.insert("kind".to_string(), to_value("tag").unwrap());
args.insert("name".to_string(), to_value("Prog amming").unwrap());
assert_eq!(static_fn(args).unwrap(), "http://a-website.com/tags/prog-amming/");
args.insert("kind".to_string(), to_value("tags").unwrap());
assert_eq!(static_fn(args).unwrap(), to_value(&tags).unwrap());
// and errors if it can't find it
let mut args = HashMap::new();
args.insert("kind".to_string(), to_value("tag").unwrap());
args.insert("kind".to_string(), to_value("something-else").unwrap());
assert!(static_fn(args).is_err());
}
#[test]
fn can_get_taxonomy_url() {
let taxo_config = TaxonomyConfig { name: "tags".to_string(), ..TaxonomyConfig::default() };
let tag = TaxonomyItem::new(
"Programming",
"tags",
&Config::default(),
vec![],
);
let tags = Taxonomy {
kind: taxo_config,
items: vec![tag],
};
let static_fn = make_get_taxonomy_url(vec![tags.clone()]);
// can find it correctly
let mut args = HashMap::new();
args.insert("kind".to_string(), to_value("tags").unwrap());
args.insert("name".to_string(), to_value("Programming").unwrap());
assert_eq!(static_fn(args).unwrap(), to_value("http://a-website.com/tags/programming/").unwrap());
// and errors if it can't find it
let mut args = HashMap::new();
args.insert("kind".to_string(), to_value("tags").unwrap());
args.insert("name".to_string(), to_value("random").unwrap());
assert!(static_fn(args).is_err());
}

View file

@ -5,12 +5,12 @@ extern crate tera;
extern crate base64;
extern crate pulldown_cmark;
#[macro_use]
extern crate errors;
extern crate utils;
extern crate content;
extern crate config;
extern crate taxonomies;
extern crate imageproc;
pub mod filters;
pub mod global_fns;
@ -23,6 +23,7 @@ lazy_static! {
pub static ref GUTENBERG_TERA: Tera = {
let mut tera = Tera::default();
tera.add_raw_templates(vec![
("404.html", include_str!("builtins/404.html")),
("rss.xml", include_str!("builtins/rss.xml")),
("sitemap.xml", include_str!("builtins/sitemap.xml")),
("robots.txt", include_str!("builtins/robots.txt")),

View file

@ -6,8 +6,8 @@ authors = ["Vincent Prouillet <prouillet.vincent@gmail.com>"]
[dependencies]
errors = { path = "../errors" }
tera = "0.11"
unicode-segmentation = "1.2"
walkdir = "2"
[dev-dependencies]
tempdir = "0.3"
tempfile = "3"

View file

@ -93,17 +93,43 @@ pub fn copy_directory(src: &PathBuf, dest: &PathBuf) -> Result<()> {
Ok(())
}
/// Compares source and target files' timestamps and returns true if the source file
/// has been created _or_ updated after the target file has
pub fn file_stale<PS, PT>(p_source: PS, p_target: PT) -> bool where PS: AsRef<Path>, PT: AsRef<Path> {
let p_source = p_source.as_ref();
let p_target = p_target.as_ref();
if !p_target.exists() {
return true;
}
let get_time = |path: &Path| path.metadata().ok().and_then(|meta| {
Some(match (meta.created().ok(), meta.modified().ok()) {
(Some(tc), Some(tm)) => tc.max(tm),
(Some(tc), None) => tc,
(None, Some(tm)) => tm,
(None, None) => return None,
})
});
let time_source = get_time(p_source);
let time_target = get_time(p_target);
time_source.and_then(|ts| time_target.map(|tt| ts > tt)).unwrap_or(true)
}
#[cfg(test)]
mod tests {
use std::fs::File;
use tempdir::TempDir;
use tempfile::tempdir;
use super::{find_related_assets};
use super::find_related_assets;
#[test]
fn can_find_related_assets() {
let tmp_dir = TempDir::new("example").expect("create temp dir");
let tmp_dir = tempdir().expect("create temp dir");
File::create(tmp_dir.path().join("index.md")).unwrap();
File::create(tmp_dir.path().join("example.js")).unwrap();
File::create(tmp_dir.path().join("graph.jpg")).unwrap();

View file

@ -2,10 +2,12 @@
extern crate errors;
#[cfg(test)]
extern crate tempdir;
extern crate tempfile;
extern crate tera;
extern crate walkdir;
extern crate unicode_segmentation;
pub mod fs;
pub mod site;
pub mod templates;
pub mod net;

View file

@ -0,0 +1,14 @@
use std::net::TcpListener;
pub fn get_available_port() -> Option<u16> {
(1000..9000)
.find(|port| port_is_available(*port))
}
fn port_is_available(port: u16) -> bool {
match TcpListener::bind(("127.0.0.1", port)) {
Ok(_) => true,
Err(_) => false,
}
}

View file

@ -1,11 +1,11 @@
use std::collections::HashMap;
use unicode_segmentation::UnicodeSegmentation;
use errors::Result;
/// Get word count and estimated reading time
pub fn get_reading_analytics(content: &str) -> (usize, usize) {
// Only works for latin language but good enough for a start
let word_count: usize = content.split_whitespace().count();
let word_count: usize = content.unicode_words().count();
// https://help.medium.com/hc/en-us/articles/214991667-Read-time
// 275 seems a bit too high though
@ -26,7 +26,7 @@ pub fn resolve_internal_link(link: &str, permalinks: &HashMap<String, String>) -
} else {
Ok(p.to_string())
}
},
}
None => bail!(format!("Relative link {} not found.", link)),
}
}

View file

@ -27,7 +27,7 @@ pub fn render_template(name: &str, tera: &Tera, context: &Context, theme: &Optio
.map_err(|e| e.into());
}
if let &Some(ref t) = theme {
if let Some(ref t) = *theme {
return tera
.render(&format!("{}/templates/{}", t, name), context)
.map_err(|e| e.into());
@ -37,13 +37,13 @@ pub fn render_template(name: &str, tera: &Tera, context: &Context, theme: &Optio
match name {
"index.html" | "section.html" => {
render_default_tpl!(name, "https://www.getgutenberg.io/documentation/templates/pages-sections/#section-variables")
},
}
"page.html" => {
render_default_tpl!(name, "https://www.getgutenberg.io/documentation/templates/pages-sections/#page-variables")
},
"tag.html" | "tags.html" | "category.html" | "categories.html" => {
render_default_tpl!(name, "https://www.getgutenberg.io/documentation/templates/tags-categories/")
},
}
"single.html" | "list.html" => {
render_default_tpl!(name, "https://www.getgutenberg.io/documentation/templates/taxonomies/")
}
_ => bail!("Tried to render `{}` but the template wasn't found", name)
}
}

View file

@ -7,6 +7,7 @@ highlight_code = true
insert_anchor_links = true
highlight_theme = "kronuz"
build_search_index = true
# check_external_links = true
[extra]
author = "Vincent Prouillet"

Binary file not shown.

After

Width:  |  Height:  |  Size: 192 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 204 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 250 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 47 KiB

View file

@ -0,0 +1,134 @@
+++
title = "Image processing"
weight = 120
+++
Gutengerb provides support for automatic image resizing through the built-in function `resize_image`,
which is available in template code as well as in shortcodes.
The function usage is as follows:
```jinja2
resize_image(path, width, height, op, quality)
```
### Arguments
- `path`: The path to the source image relative to the `content` directory in the [directory structure](./documentation/getting-started/directory-structure.md).
- `width` and `height`: The dimensions in pixels of the resized image. Usage depends on the `op` argument.
- `op`: Resize operation. This can be one of five choices: `"scale"`, `"fit_width"`, `"fit_height"`, `"fit"`, or `"fill"`.
What each of these does is explained below.
This argument is optional, default value is `"fill"`.
- `quality`: JPEG quality of the resized image, in percents. Optional argument, default value is `75`.
### Image processing and return value
Gutenberg performs image processing during the build process and places the resized images in a subdirectory in the static files directory:
```
static/_processed_images/
```
Resized images are JPEGs. Filename of each resized image is a hash of the function arguments,
which means that once an image is resized in a certain way, it will be stored in the above directory and will not
need to be resized again during subsequent builds (unless the image itself, the dimensions, or other arguments are changed).
Therefore, if you have a large number of images, they will only need to be resized once.
The function returns a full URL to the resized image.
## Resize operations
The source for all examples is this 300 × 380 pixels image:
![gutenberg](gutenberg.jpg)
### **`"scale"`**
Simply scales the image to the specified dimensions (`width` & `height`) irrespective of the aspect ratio.
`resize_image(..., width=150, height=150, op="scale")`
{{ resize_image(path="documentation/content/image-processing/gutenberg.jpg", width=150, height=150, op="scale") }}
### **`"fit_width"`**
Resizes the image such that the resulting width is `width` and height is whatever will preserve the aspect ratio.
The `height` argument is not needed.
`resize_image(..., width=100, op="fit_width")`
{{ resize_image(path="documentation/content/image-processing/gutenberg.jpg", width=100, height=0, op="fit_width") }}
### **`"fit_height"`**
Resizes the image such that the resulting height is `height` and width is whatever will preserve the aspect ratio.
The `width` argument is not needed.
`resize_image(..., height=150, op="fit_height")`
{{ resize_image(path="documentation/content/image-processing/gutenberg.jpg", width=0, height=150, op="fit_height") }}
### **`"fit"`**
Like `"fit_width"` and `"fit_height"` combined.
Resizes the image such that the result fits within `width` and `height` preserving aspect ratio. This means that both width or height
will be at max `width` and `height`, respectively, but possibly one of them smaller so as to preserve the aspect ratio.
`resize_image(..., width=150, height=150, op="fit")`
{{ resize_image(path="documentation/content/image-processing/gutenberg.jpg", width=150, height=150, op="fit") }}
### **`"fill"`**
This is the default operation. It takes the image's center part with the same aspect ratio as the `width` & `height` given and resizes that
to `width` & `height`. This means that parts of the image that are outsize of the resized aspect ratio are cropped away.
`resize_image(..., width=150, height=150, op="fill")`
{{ resize_image(path="documentation/content/image-processing/gutenberg.jpg", width=150, height=150, op="fill") }}
## Using `resize_image` in markdown via shortcodes
`resize_image` is a built-in Tera global function (see the [Templates](./documentation/templates/_index.md) chapter),
but it can be used in markdown, too, using [Shortcodes](./documentation/content/shortcodes.md).
The examples above were generated using a shortcode file named `resize_image.html` with this content:
```jinja2
<img src="{{ resize_image(path=path, width=width, height=height, op=op) }}" />
```
## Creating picture galleries
The `resize_image()` can be used multiple times and/or in loops as it is designed to handle this efficiently.
This can be used along with `assets` [page metadata](./documentation/templates/pages-sections.md) to create picture galleries.
The `assets` variable holds paths to all assets in the directory of a page with resources
(see [Assets colocation](./documentation/content/overview.md#assets-colocation)): if you have files other than images you
will need to filter them out in the loop first like in the example below.
This can be used in shortcodes. For example, we can create a very simple html-only clickable
picture gallery with the following shortcode named `gallery.html`:
```jinja2
{% for asset in page.assets %}
{% if asset is ending_with(".jpg") %}
<a href="{{ get_url(path=asset) }}">
<img src="{{ resize_image(path=asset, width=240, height=180, op="fill") }}" />
</a>
&ensp;
{% endif %}
{% endfor %}
```
As you can notice, we didn't specify an `op` argument, which means it'll default to `"fill"`. Similarly, the JPEG quality will default to `75`.
To call it from a markdown file, simply do:
```jinja2
{{/* gallery() */}}
```
Here is the result:
{{ gallery() }}
<small>
Image attribution: example-01: Willi Heidelbach, example-02: Daniel Ullrich, others: public domain.
</small>

View file

@ -29,7 +29,8 @@ it is at the beginning of the file, surrounded by `+++` and uses TOML.
While none of the front-matter variables are mandatory, the opening and closing `+++` are required.
Here is an example page with all the variables available:
Here is an example page with all the variables available. The values provided below are the default
values.
```md
+++
@ -38,9 +39,16 @@ description = ""
# The date of the post.
# 2 formats are allowed: YYYY-MM-DD (2012-10-02) and RFC3339 (2002-10-02T15:00:00Z)
# Do not wrap dates in quotes, the line below only indicates that there is no default date
# Do not wrap dates in quotes, the line below only indicates that there is no default date.
# If the section variable `sort_by` is set to `date`, then any page that lacks a `date`
# will not be rendered.
date =
# The weight as defined in the Section page
# If the section variable `sort_by` is set to `weight`, then any page that lacks a `weight`
# will not be rendered.
weight = 0
# A draft page will not be present in prev/next pagination
draft = false
@ -54,18 +62,6 @@ slug = ""
# It should not start with a `/` and the slash will be removed if it does
path = ""
# An array of strings allowing you to group pages with them
tags = []
# An overarching category name for that page, allowing you to group pages with it
category = ""
# The order as defined in the Section page
order = 0
# The weight as defined in the Section page
weight = 0
# Use aliases if you are moving content but want to redirect previous URLs to the
# current one. This takes an array of path, not URLs.
aliases = []
@ -78,6 +74,11 @@ in_search_index = true
# Template to use to render this page
template = "page.html"
# The taxonomies for that page. The keys need to be the same as the taxonomies
# name configured in `config.toml` and the values an array of String like
# tags = ["rust", "web"]
[taxonomies]
# Your own data
[extra]
+++

View file

@ -27,7 +27,8 @@ to your templates through the `section.content` variable.
While none of the front-matter variables are mandatory, the opening and closing `+++` are required.
Here is an example `_index.md` with all the variables available:
Here is an example `_index.md` with all the variables available. The values provided below are the
default values.
```md
@ -36,7 +37,7 @@ title = ""
description = ""
# Whether to sort by "date", "order", "weight" or "none". More on that below
# Whether to sort pages by "date", "weight", or "none". More on that below
sort_by = "none"
# Used by the parent section to order its subsections.
@ -107,16 +108,16 @@ create a list of links to the posts, a simple template might look like this:
This would iterate over the posts, and would do so in a specific order
based on the `sort_by` variable set in the `_index.md` page for the
containing section. The `sort_by` variable can be given three values: `date`,
`weight`, and `order`. If no `sort_by` method is set, the pages will be
sorted in a default order that is not guaranteed to correspond to any of the
explicit orders. The value of `sort_by` will also determine which pages
are listed stored in the `page.next` and `page.previous` variables. The effects of these values is explained below.
`weight`, and `none`. If no `sort_by` method is set, the pages will be
sorted in the `none` order, which is not intended to be used for sorted content.
Any page that is missing the data it needs to be sorted will be ignored and
won't be rendered. For example, if a page is missing the date variable the
containing section sets `sort_by = "date"`, then that page will be ignored. The terminal will warn you if this is happening.
containing section sets `sort_by = "date"`, then that page will be ignored.
The terminal will warn you if this is happening.
If several pages have the same date/weight/order, their permalink will be used to break the tie following an alphabetical order.
If several pages have the same date/weight/order, their permalink will be used
to break the tie following an alphabetical order.
## Sorting Pages
The `sort_by` front-matter variable can have the following values:
@ -124,68 +125,33 @@ The `sort_by` front-matter variable can have the following values:
### `date`
This will sort all pages by their `date` field, from the most recent (at the
top of the list) to the oldest (at the bottom of the list). Each page will
get a `page.next` variable that points *down* the list (to the page just
older than the current page) and a `page.previous` variable that points up
the list (to the just newer page).
get `page.earlier` and `page.later` variables that contain the pages with
earlier and later dates, respectively.
### `weight`
This will be sort all pages by their `weight` field, from lightest weight
(at the top of the list) to heaviest (at the bottom of the list). Each
page gets a `page.next` variable that points *up* the list (to the page that
is just lighter than the current page) and a `page.previous` variable that
points down the list (to the page that is just heavier than the current page).
page gets `page.lighter` and `page.heavier` variables that contain the
pages with lighter and heavier weights, respectively.
### `order`
This will be sort all pages by their `order` field. Order is the opposite of weight; think of it as listing the order in which pages were posted, with the
oldest (first) at the bottom of the list. Each page also gets a
`page.next` variable that points *up* the list (to the page with a higher order
than the current page) and a `page.previous` variable that points down the list
(to the page just lower in order).
When iterating through pages, you may wish to use the Tera `reverse` filter,
which reverses the order of the pages. Thus, after using the `reverse` filter,
pages sorted by weight will be sorted from lightest (at the top) to heaviest
(at the bottom); pages sorted by date will be sorted from oldest (at the top)
to newest (at the bottom).
To make this a bit more concrete, let's play out the simple example raised
above. Imagine that we set the `weight` and `order` both to 1 in `Post_1`,
both to 2 in `Post_2` and both to 3 in `Post_3`. (In practice, there would
typically be no reason to set *both* `order` and `weight`).
If we then set `sort_by = "weight"` in the `blog/_index.md` file, we would
get the following order from a Tera for loop:
* Page_1 [`page.next = null`, `page.previous = Page_2`]
* Page_2 [`page.next = Page_1`, `page.previous = Page_2`]
* Page_3 [`page.next = Page_2`, `page.previous = Page_2`]
If, however, we set the `sort_by` front-matter variable to `order`, we
would get:
* Page_3 [`page.next = null`, `page.previous = Page_2`]
* Page_2 [`page.next = Page_3`, `page.previous = Page_1`]
* Page_1 [`page.next = Page_2`, `page.previous = null`]
Note that the order is reversed but in *both* cases the `page.previous` is
pointing *up* the list, and `page.next` is pointing *down* the list. This
fits many common use cases, including when Gutenberg is used for a blog as
in this simple example.
However, Gutenberg is flexible enough to accommodate alternate use cases as
well. If you would prefer the `page.next` and `page.previous` variables
to point in the opposite direction, you can use Tera's `reverse` filter.
`reverse` causes the order to be reversed but does *not* alter the behaviour
of `next` and `previous`. Thus, combining `sort_by = "weight"` with `reverse`
gives you the same sequence as using `sort_by = "order"` but with `next`
and `previous` pointing in the other direction. By combining `sort_by` and
`reverse`, you can achieve any combination of sorting order and
`next`/`previous` values.
`reverse` has no effect on `page.later`/`page.earlier`/`page.heavier`/`page.lighter`.
## Sorting Subsections
Sorting sections is a bit less flexible but also much simpler. This is
because sections do not have `next` or `previous` values. Further, they can
only be sorted by `weight`—thus, the `sort_by` value in the containing section
has no impact at all on any subsections (only on pages).
Sorting sections is a bit less flexible: sections are always sorted by `weight`,
and do not have any variables that point to the next heavier/lighter sections.
Based on this, by default the lightest (lowest `weight`) subsections will be at
the top of the list and the heaviest (highest `weight`) will be at the top;
the `reverse` filter reverses this order.
**Note**: If the `weight` variable for your section is not set (or if it
**Note**: Unlike pages, permalinks will **not** be used to break ties between
equally weighted sections. Thus, if the `weight` variable for your section is not set (or if it
is set in a way that produces ties), then your sections will be sorted in
**random** order. Moreover, that order is determined at build time and will
change with each site rebuild. Thus, if there is any chance that you will

View file

@ -32,6 +32,9 @@ are in a `if` statement, we can assume they are optional.
That's it, Gutenberg will now recognise this template as a shortcode named `youtube` (the filename minus the `.html` extension).
The markdown renderer will wrap an inline HTML node like `<a>` or `<span>` into a paragraph. If you want to disable that,
simply wrap your shortcode in a `div`.
## Using shortcodes
There are two kinds of shortcodes:
@ -41,36 +44,42 @@ There are two kinds of shortcodes:
In both cases, their arguments must be named and they will all be passed to the template.
Any shortcodes in code blocks will be ignored.
Lastly, a shortcode name (and thus the corresponding `.html` file) as well as the arguments name
can only contain numbers, letters and underscores, or in Regex terms the following: `[0-9A-Za-z_]`.
While theoretically an argument name could be a number, it will not be possible to use it in the template in that case.
Argument values can be of 4 types:
Argument values can be of 5 types:
- string: surrounded by double quotes `"..."`
- string: surrounded by double quotes, single quotes or backticks
- bool: `true` or `false`
- float: a number with a `.` in it
- integer: a number without a `.` in it
- array: an array of any kind of values, except arrays
Malformed values will be silently ignored.
Both type of shortcodes will also get either a `page` or `section` variable depending on where they were used and a `config`
one. Those values will overwrite any arguments passed to a shortcode so shortcodes should not use arguments called like one
of these.
### Shortcodes without body
On a new line, call the shortcode as if it was a Tera function in a variable block. All the examples below are valid
Simply call the shortcode as if it was a Tera function in a variable block. All the examples below are valid
calls of the YouTube shortcode.
```md
Here is a YouTube video:
{{ youtube(id="dQw4w9WgXcQ") }}
{{/* youtube(id="dQw4w9WgXcQ") */}}
{{ youtube(id="dQw4w9WgXcQ", autoplay=true) }}
{{/* youtube(id="dQw4w9WgXcQ", autoplay=true) */}}
{{ youtube(id="dQw4w9WgXcQ", autoplay=true, class="youtube") }}
An inline {{/* youtube(id="dQw4w9WgXcQ", autoplay=true, class="youtube") */}} shortcode
```
Note that if you want to have some content that looks like a shortcode but not have Gutenberg try to render it,
you will need to escape it by using `{{/*` and `*/}}` instead of `{{` and `}}`.
### Shortcodes with body
For example, let's imagine we have the following shortcode `quote.html` template:
@ -86,14 +95,18 @@ We could use it in our markup file like so:
```md
As someone said:
{% quote(author="Vincent") %}
{%/* quote(author="Vincent") */%}
A quote
{% end %}
{%/* end */%}
```
The body of the shortcode will be automatically passed down to the rendering context as the `body` variable and needs
to be in a newline.
If you want to have some content that looks like a shortcode but not have Gutenberg try to render it,
you will need to escape it by using `{%/*` and `*/%}` instead of `{%` and `%}`. You won't need to escape
anything else until the closing tag.
## Built-in shortcodes
Gutenberg comes with a few built-in shortcodes. If you want to override a default shortcode template,
@ -112,11 +125,11 @@ The arguments are:
Usage example:
```md
{{ youtube(id="dQw4w9WgXcQ") }}
{{/* youtube(id="dQw4w9WgXcQ") */}}
{{ youtube(id="dQw4w9WgXcQ", autoplay=true) }}
{{/* youtube(id="dQw4w9WgXcQ", autoplay=true) */}}
{{ youtube(id="dQw4w9WgXcQ", autoplay=true, class="youtube") }}
{{/* youtube(id="dQw4w9WgXcQ", autoplay=true, class="youtube") */}}
```
Result example:
@ -134,9 +147,9 @@ The arguments are:
Usage example:
```md
{{ vimeo(id="124313553") }}
{{/* vimeo(id="124313553") */}}
{{ vimeo(id="124313553", class="vimeo") }}
{{/* vimeo(id="124313553", class="vimeo") */}}
```
Result example:
@ -154,17 +167,17 @@ The arguments are:
Usage example:
```md
{{ streamable(id="2zt0") }}
{{/* streamable(id="92ok4") */}}
{{ streamable(id="2zt0", class="streamble") }}
{{/* streamable(id="92ok4", class="streamble") */}}
```
Result example:
{{ streamable(id="2zt0") }}
{{ streamable(id="92ok4") }}
### Gist
Embed a [Github gist]().
Embed a [Github gist](https://gist.github.com).
The arguments are:
@ -175,9 +188,9 @@ The arguments are:
Usage example:
```md
{{ gist(url="https://gist.github.com/Keats/e5fb6aad409f28721c0ba14161644c57") }}
{{/* gist(url="https://gist.github.com/Keats/e5fb6aad409f28721c0ba14161644c57") */}}
{{ gist(url="https://gist.github.com/Keats/e5fb6aad409f28721c0ba14161644c57", class="gist") }}
{{/* gist(url="https://gist.github.com/Keats/e5fb6aad409f28721c0ba14161644c57", class="gist") */}}
```
Result example:

View file

@ -1,23 +0,0 @@
+++
title = "Tags & Categories"
weight = 90
+++
Gutenberg has built-in support for basic taxonomies: tags and categories.
Those taxonomies are automatically built across the whole site based on
the `tags` and `category` fields of the front-matter: you do not need to define
that a tag or a category exists. You have to set `generate_tags_pages` and/or
`generate_categories_pages` in your [config.toml](./documentation/getting-started/configuration.md).
The taxonomy pages will only be created if at least one item is found and
are available at the following paths:
```plain
$BASE_URL/tags/
$BASE_URL/tags/$TAG_SLUG
$BASE_URL/categories/
$BASE_URL/categories/$CATEGORY_SLUG
```
It is currently not possible to change those paths or to create custom taxonomies.

View file

@ -0,0 +1,36 @@
+++
title = "Taxonomies"
weight = 90
+++
Gutenberg has built-in support for taxonomies.
The first step is to define the taxonomies in your [config.toml](./documentation/getting-started/configuration.md).
A taxonomy has 4 variables:
- `name`: a required string that will be used in the URLs, usually the plural version (i.e. tags, categories etc)
- `paginate_by`: if this is set to a number, each term page will be paginated by this much.
- `paginate_path`: if set, will be the path used by paginated page and the page number will be appended after it.
For example the default would be page/1
- `rss`: if set to `true`, a RSS feed will be generated for each individual term.
Once this is done, you can then set taxonomies in your content and Gutenberg will pick
them up:
```toml
+++
...
[taxonomies]
tags = ["rust", "web"]
categories = ["programming"]
+++
```
The taxonomy pages will only be created if at least one non-draft page is found and
are available at the following paths:
```plain
$BASE_URL/$NAME/
$BASE_URL/$NAME/$SLUG
```

View file

@ -40,13 +40,15 @@ generate_rss = false
# The number of articles to include in the RSS feed
rss_limit = 20
# Whether to generate a tags page and individual
# tag pages for pages with tags
generate_tags_pages = false
# Whether to generate a categories page and individual
# category pages for pages with a category
generate_categories_pages = false
# The taxonomies to be rendered for that site and their configuration
# Example:
# taxonomies = [
# {name: "tags", rss: true}, # each tag will have its own RSS feed
# {name: "categories", paginate_by: 5}, # 5 items per page for a term
# {name: "authors"}, # Basic definition: no RSS or pagination
# ]
#
taxonomies = []
# Whether to compile the Sass files found in the `sass` directory
compile_sass = false
@ -55,6 +57,12 @@ compile_sass = false
# content for the `default_language`
build_search_index = false
# Go through every links in all content and check if the links are valid
# If a link is invalid (404, 500, etc), the build will error.
# Link checking can take a very long time if you have many links so this should
# only enabled once in a while to catch any dead links.
check_external_links = false
# A list of glob patterns specifying asset files to ignore when
# processing the content directory.
# Defaults to none, which means all asset files are copied over to the public folder.

View file

@ -0,0 +1,8 @@
+++
title = "404 error page"
weight = 80
+++
Gutenberg will look for a `404.html` file in the `templates` directory or
use the built-in one. The default template is very basic and gets a simple
variable in the context: the site `config`.

View file

@ -0,0 +1,23 @@
+++
title = "Archive"
weight = 90
+++
Gutenberg doesn't have a built-in way to display an archive page, a page showing
all post titles ordered by year. However, this can be accomplished directly in the templates:
```jinja2
{% for year, posts in section.pages | group_by(attribute="year") %}
<h2>{{ year }}</h2>
<ul>
{% for post in posts %}
<li><a href="{{ post.permalink }}">{{ post.title }}</a></li>
{% endfor %}
</ul>
{% endfor %}
```
This snippet assumes that posts are sorted by date and that you want to display the archive
in a descending order. If you want to show articles in a ascending order, simply add a `reverse` filter
after the `group_by`.

View file

@ -105,14 +105,14 @@ This can also be used to get the permalinks for static assets for example if
we want to link to the file that is located at `static/css/app.css`:
```jinja2
{{ get_url(path="css/app.css") }}
{{/* get_url(path="css/app.css") */}}
```
For assets it is reccommended that you pass `trailing_slash=false` to the `get_url` function. This prevents errors
when dealing with certain hosting providers. An example is:
```jinja2
{{ get_url(path="css/app.css", trailing_slash=false) }}
{{/* get_url(path="css/app.css", trailing_slash=false) */}}
```
In the case of non-internal links, you can also add a cachebust of the format `?t=1290192` at the end of a URL
@ -120,19 +120,30 @@ by passing `cachebust=true` to the `get_url` function.
### `get_taxonomy_url`
Gets the permalink for the tag or category given.
Gets the permalink for the taxonomy item found.
```jinja2
{% set url = get_taxonomy_url(kind="category", name=page.category) %}
{% set url = get_taxonomy_url(kind="categories", name=page.taxonomies.category) %}
```
The `name` will almost come from a variable but in case you want to do it manually,
the value should be the same as the one in the front-matter, not the slugified version.
### `get_taxonomy`
Gets the whole taxonomy of a specific kind.
```jinja2
{% set categories = get_taxonomy_url(kind="categories") %}
```
### `trans`
Gets the translation of the given `key`, for the `default_language` or the `language given
```jinja2
{{ trans(key="title") }}
{{ trans(key="title", lang="fr") }}
{{/* trans(key="title") */}}
{{/* trans(key="title", lang="fr") */}}
```
### `resize_image`
Resizes an image file.
Pease refer to [_Content / Image Processing_](./documentation/content/image-processing/index.md) for complete documentation.

View file

@ -32,11 +32,20 @@ extra: HashMap<String, Any>;
word_count: Number;
// Based on https://help.medium.com/hc/en-us/articles/214991667-Read-time
reading_time: Number;
// `previous` and `next` are only filled if the content can be sorted
previous: Page?;
next: Page?;
// `earlier` and `later` are only populated if the section variable `sort_by` is set to `date`
earlier: Page?;
later: Page?;
// `heavier` and `lighter` are only populated if the section variable `sort_by` is set to `weight`
heavier: Page?;
lighter: Page?;
// See the Table of contents section below for more details
toc: Array<Header>;
// Year/month/day is only set if the page has a date and month/day are 1-indexed
year: Number?;
month: Number?;
day: Number?;
// Paths of colocated assets, relative to the content directory
assets: Array<String>;
```
## Section variables
@ -63,7 +72,7 @@ extra: HashMap<String, Any>;
pages: Array<Pages>;
// Direct subsections to this section, sorted by subsections weight
subsections: Array<Section>;
// Naive word count, will not work for languages without whitespace
// Unicode word count
word_count: Number;
// Based on https://help.medium.com/hc/en-us/articles/214991667-Read-time
reading_time: Number;

View file

@ -3,9 +3,14 @@ title = "Pagination"
weight = 30
+++
Two things can get paginated: a section or a taxonomy term.
A paginated section gets the same `section` variable as a normal
[section page](./documentation/templates/pages-sections.md#section-variables).
In addition, a paginated section gets a `paginator` variable of the `Pager` type:
[section page](./documentation/templates/pages-sections.md#section-variables)
while a paginated taxonomy gets the a `taxonomy` variable of type `TaxonomyConfig`, equivalent
to the taxonomy definition in the `config.toml`.
In addition, a paginated page gets a `paginator` variable of the `Pager` type:
```ts
// How many items per page

View file

@ -1,31 +0,0 @@
+++
title = "Tags & Categories"
weight = 40
+++
Tags and categories actually get the same data but with different variable names.
The default templates for those pages are the following:
- `tags.html`: list of tags, gets variable `tags` sorted alphabetically
- `tag.html`: individual tag, gets variable `tag`
- `categories.html`: list of categories, gets variable `categories` sorted alphabetically
- `category.html`: individual category, gets variable `category`
You can override any of those templates by putting one with the same name in the `templates` directory.
`tags` and `categories` both are an array of `TaxonomyItem` sorted alphabetically, while `tag` and `category`
are a `TaxonomyItem`.
A `TaxonomyItem` has the following fields:
```ts
name: String;
slug: String;
permalink: String;
pages: Array<Page>;
```
As `pages` can span many sections, the `pages` array is sorted by date.
Currently, there is no way to define different taxonomy templates per section, change
the path used for them or paginate them.

View file

@ -0,0 +1,51 @@
+++
title = "Taxonomies"
weight = 40
+++
Gutenberg will look up the following files in the `templates` directory:
- `$TAXONOMY_NAME/single.html`
- `$TAXONOMY_NAME/list.html`
First, a `TaxonomyTerm` has the following fields:
```ts
name: String;
slug: String;
permalink: String;
pages: Array<Page>;
```
## Non-paginated taxonomies
If a taxonomy is not paginated, the templates get the following variables:
### Single term (`single.html`)
```ts
// The site config
config: Config;
// The data of the taxonomy, from the config
taxonomy: TaxonomyConfig;
// The current full permalink for that page
current_url: String;
// The current path for that page
current_path: String;
// The current term being rendered
term: TaxonomyTerm;
```
### Taxonomy list (`list.html`)
```ts
// The site config
config: Config;
// The data of the taxonomy, from the config
taxonomy: TaxonomyConfig;
// The current full permalink for that page
current_url: String;
// The current path for that page
current_path: String;
// All terms for that taxonomy
terms: Array<TaxonomyTerm>;
```
## Paginated taxonomies

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

View file

@ -0,0 +1,8 @@
{% for asset in page.assets %}
{% if asset is ending_with(".jpg") %}
<a href="{{ get_url(path=asset) }}">
<img src="{{ resize_image(path=asset, width=240, height=180, op="fill") }}" />
</a>
&ensp;
{% endif %}
{% endfor %}

View file

@ -0,0 +1 @@
<img src="{{ resize_image(path=path, width=width, height=height, op=op) }}" />

View file

@ -22,16 +22,16 @@
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
use std::env;
use std::fs::remove_dir_all;
use std::path::Path;
use std::fs::{remove_dir_all, File};
use std::io::{self, Read};
use std::path::{Path, PathBuf};
use std::sync::mpsc::channel;
use std::time::{Instant, Duration};
use std::thread;
use chrono::prelude::*;
use iron::{Iron, Request, IronResult, Response, status};
use mount::Mount;
use staticfile::Static;
use actix_web::{self, fs, http, server, App, HttpRequest, HttpResponse, Responder};
use actix_web::middleware::{Middleware, Started, Response};
use notify::{Watcher, RecursiveMode, watcher};
use ws::{WebSocket, Sender, Message};
use ctrlc;
@ -58,9 +58,36 @@ enum ChangeKind {
// errors
const LIVE_RELOAD: &'static str = include_str!("livereload.js");
struct NotFoundHandler {
rendered_template: PathBuf,
}
fn livereload_handler(_: &mut Request) -> IronResult<Response> {
Ok(Response::with((status::Ok, LIVE_RELOAD.to_string())))
impl<S> Middleware<S> for NotFoundHandler {
fn start(&self, _req: &HttpRequest<S>) -> actix_web::Result<Started> {
Ok(Started::Done)
}
fn response(
&self,
_req: &HttpRequest<S>,
mut resp: HttpResponse,
) -> actix_web::Result<Response> {
if http::StatusCode::NOT_FOUND == resp.status() {
let mut fh = File::open(&self.rendered_template)?;
let mut buf: Vec<u8> = vec![];
let _ = fh.read_to_end(&mut buf)?;
resp.replace_body(buf);
resp.headers_mut().insert(
http::header::CONTENT_TYPE,
http::header::HeaderValue::from_static("text/html"),
);
}
Ok(Response::Done(resp))
}
}
fn livereload_handler(_: &HttpRequest) -> &'static str {
LIVE_RELOAD
}
fn rebuild_done_handling(broadcaster: &Sender, res: Result<()>, reload_path: &str) {
@ -86,13 +113,13 @@ fn create_new_site(interface: &str, port: &str, output_dir: &str, base_url: &str
let base_address = format!("{}:{}", base_url, port);
let address = format!("{}:{}", interface, port);
site.config.base_url = if site.config.base_url.ends_with('/') {
let base_url = if site.config.base_url.ends_with('/') {
format!("http://{}/", base_address)
} else {
format!("http://{}", base_address)
};
site.set_base_url(base_url);
site.set_output_path(output_dir);
site.load()?;
site.enable_live_reload();
@ -102,6 +129,24 @@ fn create_new_site(interface: &str, port: &str, output_dir: &str, base_url: &str
Ok((site, address))
}
/// Attempt to render `index.html` when a directory is requested.
///
/// The default "batteries included" mechanisms for actix to handle directory
/// listings rely on redirection which behaves oddly (the location headers
/// seem to use relative paths for some reason).
/// They also mean that the address in the browser will include the
/// `index.html` on a successful redirect (rare), which is unsightly.
///
/// Rather than deal with all of that, we can hijack a hook for presenting a
/// custom directory listing response and serve it up using their
/// `NamedFile` responder.
fn handle_directory<'a, 'b>(dir: &'a fs::Directory, req: &'b HttpRequest) -> io::Result<HttpResponse> {
let mut path = PathBuf::from(&dir.base);
path.push(&dir.path);
path.push("index.html");
fs::NamedFile::open(path)?.respond_to(req)
}
pub fn serve(interface: &str, port: &str, output_dir: &str, base_url: &str, config_file: &str) -> Result<()> {
let start = Instant::now();
let (mut site, address) = create_new_site(interface, port, output_dir, base_url, config_file)?;
@ -115,8 +160,8 @@ pub fn serve(interface: &str, port: &str, output_dir: &str, base_url: &str, conf
.chain_err(|| "Can't watch the `content` folder. Does it exist?")?;
watcher.watch("templates/", RecursiveMode::Recursive)
.chain_err(|| "Can't watch the `templates` folder. Does it exist?")?;
watcher.watch("config.toml", RecursiveMode::Recursive)
.chain_err(|| "Can't watch the `config.toml` file. Does it exist?")?;
watcher.watch(config_file, RecursiveMode::Recursive)
.chain_err(|| "Can't watch the `config` file. Does it exist?")?;
if Path::new("static").exists() {
watching_static = true;
@ -127,16 +172,32 @@ pub fn serve(interface: &str, port: &str, output_dir: &str, base_url: &str, conf
// Sass support is optional so don't make it an error to no have a sass folder
let _ = watcher.watch("sass/", RecursiveMode::Recursive);
let ws_address = format!("{}:{}", interface, "1112");
let ws_address = format!("{}:{}", interface, site.live_reload.unwrap());
let output_path = Path::new(output_dir).to_path_buf();
// output path is going to need to be moved later on, so clone it for the
// http closure to avoid contention.
let static_root = output_path.clone();
thread::spawn(move || {
let s = server::new(move || {
App::new()
.middleware(NotFoundHandler { rendered_template: static_root.join("404.html") })
.resource(r"/livereload.js", |r| r.f(livereload_handler))
// Start a webserver that serves the `output_dir` directory
let mut mount = Mount::new();
mount.mount("/", Static::new(Path::new(output_dir)));
mount.mount("/livereload.js", livereload_handler);
// Starts with a _ to not trigger the unused lint
// we need to assign to a variable otherwise it will block
let _iron = Iron::new(mount).http(address.as_str())
.chain_err(|| "Can't start the webserver")?;
.handler(
r"/",
fs::StaticFiles::new(&static_root)
.unwrap()
.show_files_listing()
.files_listing_renderer(handle_directory)
)
})
.bind(&address)
.expect("Can't start the webserver")
.shutdown_timeout(20);
println!("Web server is available at http://{}", &address);
s.run();
});
// The websocket for livereload
let ws_server = WebSocket::new(|output: Sender| {
@ -169,10 +230,9 @@ pub fn serve(interface: &str, port: &str, output_dir: &str, base_url: &str, conf
}
println!("Listening for changes in {}/{{{}}}", pwd, watchers.join(", "));
println!("Web server is available at http://{}", address);
println!("Press Ctrl+C to stop\n");
// Delete the output folder on ctrl+C
let output_path = Path::new(output_dir).to_path_buf();
ctrlc::set_handler(move || {
remove_dir_all(&output_path).expect("Failed to delete output directory");
::std::process::exit(0);
@ -253,7 +313,7 @@ fn is_temp_file(path: &Path) -> bool {
}
},
None => {
path.ends_with(".DS_STORE")
true
},
}
}
@ -263,7 +323,8 @@ fn is_temp_file(path: &Path) -> bool {
fn detect_change_kind(pwd: &str, path: &Path) -> (ChangeKind, String) {
let path_str = format!("{}", path.display())
.replace(pwd, "")
.replace("\\", "/");
.replace("\\", "");
let change_kind = if path_str.starts_with("/templates") {
ChangeKind::Templates
} else if path_str.starts_with("/content") {

View file

@ -27,10 +27,11 @@ pub fn error(message: &str) {
/// Display in the console the number of pages/sections in the site
pub fn notify_site_size(site: &Site) {
println!(
"-> Creating {} pages ({} orphan) and {} sections",
"-> Creating {} pages ({} orphan), {} sections, and processing {} images",
site.pages.len(),
site.get_all_orphan_pages().len(),
site.sections.len() - 1, // -1 since we do not the index as a section
site.num_img_ops(),
);
}
@ -43,7 +44,7 @@ pub fn warn_about_ignored_pages(site: &Site) {
if !ignored_pages.is_empty() {
warn(&format!(
"{} page(s) ignored (missing date or order in a sorted section):",
"{} page(s) ignored (missing date or weight in a sorted section):",
ignored_pages.len()
));
for path in ignored_pages {

View file

@ -1,11 +1,9 @@
extern crate actix_web;
#[macro_use]
extern crate clap;
extern crate chrono;
extern crate term_painter;
extern crate staticfile;
extern crate iron;
extern crate mount;
extern crate notify;
extern crate term_painter;
extern crate url;
extern crate ws;
extern crate ctrlc;

Binary file not shown.

Binary file not shown.

View file

@ -1,7 +1,8 @@
title = "My site"
base_url = "https://staging.com"
highlight_code = true
theme = "sample"
[extra.author]
name = "Vincent Prouillet"

View file

@ -6,5 +6,9 @@ generate_rss = true
rss_limit = 2
theme = "sample"
taxonomies = [
{name = "categories", rss = true},
]
[extra.author]
name = "Vincent Prouillet"

View file

@ -3,7 +3,7 @@ title = "Fixed slug"
description = ""
slug = "something-else"
date = 2017-01-01
aliases = ["/an-old-url/old-page"]
aliases = ["/an-old-url/old-page", "/an-old-url/an-old-alias.html"]
+++
A simple page with a slug defined

View file

@ -1,6 +1,6 @@
+++
title = "DevOps"
sort_by = "order"
sort_by = "weight"
redirect_to = "posts/tutorials/devops/docker"
weight = 10
+++

Some files were not shown because too many files have changed in this diff Show more