2016-12-06 08:27:03 +00:00
|
|
|
/// A page, can be a blog post or a basic page
|
2017-03-06 11:58:31 +00:00
|
|
|
use std::cmp::Ordering;
|
2016-12-11 06:05:03 +00:00
|
|
|
use std::fs::File;
|
|
|
|
use std::io::prelude::*;
|
2016-12-13 06:22:24 +00:00
|
|
|
use std::path::Path;
|
2017-02-23 08:34:57 +00:00
|
|
|
use std::result::Result as StdResult;
|
2016-12-06 08:27:03 +00:00
|
|
|
|
2016-12-13 06:22:24 +00:00
|
|
|
|
|
|
|
use pulldown_cmark as cmark;
|
2016-12-06 08:27:03 +00:00
|
|
|
use regex::Regex;
|
2017-02-23 08:34:57 +00:00
|
|
|
use tera::{Tera, Context};
|
|
|
|
use serde::ser::{SerializeStruct, self};
|
2017-03-03 08:12:40 +00:00
|
|
|
use slug::slugify;
|
2016-12-06 08:27:03 +00:00
|
|
|
|
2016-12-11 06:05:03 +00:00
|
|
|
use errors::{Result, ResultExt};
|
2016-12-06 12:48:23 +00:00
|
|
|
use config::Config;
|
2017-02-23 08:34:57 +00:00
|
|
|
use front_matter::{FrontMatter};
|
2016-12-06 08:27:03 +00:00
|
|
|
|
|
|
|
|
|
|
|
lazy_static! {
|
2017-02-23 08:34:57 +00:00
|
|
|
static ref PAGE_RE: Regex = Regex::new(r"^\n?\+\+\+\n((?s).*(?-s))\+\+\+\n((?s).*(?-s))$").unwrap();
|
2016-12-06 08:27:03 +00:00
|
|
|
}
|
|
|
|
|
2016-12-06 11:53:14 +00:00
|
|
|
|
2017-02-23 08:34:57 +00:00
|
|
|
#[derive(Clone, Debug, PartialEq, Deserialize)]
|
2016-12-11 06:05:03 +00:00
|
|
|
pub struct Page {
|
2017-02-23 08:34:57 +00:00
|
|
|
/// .md filepath, excluding the content/ bit
|
2016-12-13 09:05:59 +00:00
|
|
|
#[serde(skip_serializing)]
|
2016-12-11 06:05:03 +00:00
|
|
|
pub filepath: String,
|
2017-02-23 08:34:57 +00:00
|
|
|
/// The name of the .md file
|
2016-12-13 09:05:59 +00:00
|
|
|
#[serde(skip_serializing)]
|
2016-12-13 06:22:24 +00:00
|
|
|
pub filename: String,
|
2017-02-23 08:34:57 +00:00
|
|
|
/// The directories above our .md file are called sections
|
|
|
|
/// for example a file at content/kb/solutions/blabla.md will have 2 sections:
|
|
|
|
/// `kb` and `solutions`
|
2016-12-13 09:05:59 +00:00
|
|
|
#[serde(skip_serializing)]
|
2016-12-13 06:22:24 +00:00
|
|
|
pub sections: Vec<String>,
|
2017-02-23 08:34:57 +00:00
|
|
|
/// The actual content of the page, in markdown
|
2016-12-13 09:05:59 +00:00
|
|
|
#[serde(skip_serializing)]
|
|
|
|
pub raw_content: String,
|
2017-02-23 08:34:57 +00:00
|
|
|
/// The HTML rendered of the page
|
2016-12-11 06:05:03 +00:00
|
|
|
pub content: String,
|
2017-02-23 08:34:57 +00:00
|
|
|
/// The front matter meta-data
|
|
|
|
pub meta: FrontMatter,
|
|
|
|
/// The previous page, by date
|
|
|
|
pub previous: Option<Box<Page>>,
|
|
|
|
/// The next page, by date
|
|
|
|
pub next: Option<Box<Page>>,
|
2016-12-06 08:27:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-02-23 08:34:57 +00:00
|
|
|
impl Page {
|
|
|
|
pub fn new(meta: FrontMatter) -> Page {
|
2016-12-06 11:53:14 +00:00
|
|
|
Page {
|
2016-12-11 06:05:03 +00:00
|
|
|
filepath: "".to_string(),
|
2016-12-13 06:22:24 +00:00
|
|
|
filename: "".to_string(),
|
|
|
|
sections: vec![],
|
|
|
|
raw_content: "".to_string(),
|
2016-12-06 11:53:14 +00:00
|
|
|
content: "".to_string(),
|
2017-02-23 08:34:57 +00:00
|
|
|
meta: meta,
|
|
|
|
previous: None,
|
|
|
|
next: None,
|
2016-12-06 11:53:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-23 08:34:57 +00:00
|
|
|
/// Get the slug for the page.
|
|
|
|
/// First tries to find the slug in the meta and defaults to filename otherwise
|
|
|
|
pub fn get_slug(&self) -> String {
|
|
|
|
if let Some(ref slug) = self.meta.slug {
|
|
|
|
slug.to_string()
|
|
|
|
} else {
|
2017-03-03 08:12:40 +00:00
|
|
|
slugify(self.filename.clone())
|
2017-02-23 08:34:57 +00:00
|
|
|
}
|
|
|
|
}
|
2016-12-06 11:53:14 +00:00
|
|
|
|
2017-03-06 13:45:33 +00:00
|
|
|
/// Get the URL (without the base URL) to that page
|
2017-03-03 08:12:40 +00:00
|
|
|
pub fn get_url(&self) -> String {
|
|
|
|
if let Some(ref u) = self.meta.url {
|
|
|
|
return u.to_string();
|
|
|
|
}
|
|
|
|
|
|
|
|
if !self.sections.is_empty() {
|
|
|
|
return format!("/{}/{}", self.sections.join("/"), self.get_slug());
|
|
|
|
}
|
|
|
|
|
|
|
|
format!("/{}", self.get_slug())
|
|
|
|
}
|
|
|
|
|
2017-03-06 13:45:33 +00:00
|
|
|
// Get word count and estimated reading time
|
|
|
|
pub fn get_reading_analytics(&self) -> (usize, usize) {
|
|
|
|
// Only works for latin language but good enough for a start
|
|
|
|
let word_count: usize = self.raw_content.split_whitespace().count();
|
|
|
|
|
|
|
|
// https://help.medium.com/hc/en-us/articles/214991667-Read-time
|
|
|
|
// 275 seems a bit too high though
|
|
|
|
(word_count, (word_count / 200))
|
|
|
|
}
|
|
|
|
|
2016-12-06 08:27:03 +00:00
|
|
|
// Parse a page given the content of the .md file
|
|
|
|
// Files without front matter or with invalid front matter are considered
|
|
|
|
// erroneous
|
2017-02-23 08:34:57 +00:00
|
|
|
pub fn parse(filepath: &str, content: &str) -> Result<Page> {
|
2016-12-06 08:27:03 +00:00
|
|
|
// 1. separate front matter from content
|
2017-02-23 08:34:57 +00:00
|
|
|
if !PAGE_RE.is_match(content) {
|
2016-12-11 06:05:03 +00:00
|
|
|
bail!("Couldn't find front matter in `{}`. Did you forget to add `+++`?", filepath);
|
2016-12-06 08:27:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// 2. extract the front matter and the content
|
2017-02-23 08:34:57 +00:00
|
|
|
let caps = PAGE_RE.captures(content).unwrap();
|
|
|
|
// caps[0] is the full match
|
|
|
|
let front_matter = &caps[1];
|
|
|
|
let content = &caps[2];
|
2016-12-06 08:27:03 +00:00
|
|
|
|
2017-02-23 08:34:57 +00:00
|
|
|
// 3. create our page, parse front matter and assign all of that
|
|
|
|
let meta = FrontMatter::parse(&front_matter)
|
2016-12-11 06:05:03 +00:00
|
|
|
.chain_err(|| format!("Error when parsing front matter of file `{}`", filepath))?;
|
2016-12-06 11:53:14 +00:00
|
|
|
|
2017-02-23 08:34:57 +00:00
|
|
|
let mut page = Page::new(meta);
|
|
|
|
page.filepath = filepath.to_string();
|
|
|
|
page.raw_content = content.to_string();
|
2016-12-13 06:22:24 +00:00
|
|
|
page.content = {
|
|
|
|
let mut html = String::new();
|
|
|
|
let parser = cmark::Parser::new(&page.raw_content);
|
|
|
|
cmark::html::push_html(&mut html, parser);
|
|
|
|
html
|
|
|
|
};
|
|
|
|
|
2017-02-23 08:34:57 +00:00
|
|
|
// 4. Find sections
|
2016-12-13 10:14:49 +00:00
|
|
|
// Pages with custom urls exists outside of sections
|
2017-02-23 08:34:57 +00:00
|
|
|
if page.meta.url.is_none() {
|
2016-12-13 10:14:49 +00:00
|
|
|
let path = Path::new(filepath);
|
2017-02-23 08:34:57 +00:00
|
|
|
page.filename = path.file_stem().expect("Couldn't get filename").to_string_lossy().to_string();
|
2016-12-13 10:14:49 +00:00
|
|
|
|
|
|
|
// find out if we have sections
|
|
|
|
for section in path.parent().unwrap().components() {
|
|
|
|
page.sections.push(section.as_ref().to_string_lossy().to_string());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-11 06:05:03 +00:00
|
|
|
Ok(page)
|
|
|
|
}
|
2016-12-06 11:53:14 +00:00
|
|
|
|
2016-12-13 06:22:24 +00:00
|
|
|
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Page> {
|
|
|
|
let path = path.as_ref();
|
|
|
|
|
2016-12-11 06:05:03 +00:00
|
|
|
let mut content = String::new();
|
|
|
|
File::open(path)
|
2016-12-13 06:22:24 +00:00
|
|
|
.chain_err(|| format!("Failed to open '{:?}'", path.display()))?
|
2016-12-11 06:05:03 +00:00
|
|
|
.read_to_string(&mut content)?;
|
2016-12-06 08:27:03 +00:00
|
|
|
|
2016-12-13 06:22:24 +00:00
|
|
|
// Remove the content string from name
|
|
|
|
// Maybe get a path as an arg instead and use strip_prefix?
|
2017-02-23 08:34:57 +00:00
|
|
|
Page::parse(&path.strip_prefix("content").unwrap().to_string_lossy(), &content)
|
2016-12-11 06:05:03 +00:00
|
|
|
}
|
2016-12-06 08:27:03 +00:00
|
|
|
|
2016-12-11 06:05:03 +00:00
|
|
|
fn get_layout_name(&self) -> String {
|
2017-02-23 08:34:57 +00:00
|
|
|
match self.meta.layout {
|
2016-12-11 06:05:03 +00:00
|
|
|
Some(ref l) => l.to_string(),
|
2017-02-23 08:34:57 +00:00
|
|
|
None => "page.html".to_string()
|
2016-12-06 11:53:14 +00:00
|
|
|
}
|
2016-12-06 08:27:03 +00:00
|
|
|
}
|
2016-12-06 12:48:23 +00:00
|
|
|
|
2017-03-06 13:45:33 +00:00
|
|
|
/// Renders the page using the default layout, unless specified in front-matter
|
2017-03-03 08:12:40 +00:00
|
|
|
pub fn render_html(&self, tera: &Tera, config: &Config) -> Result<String> {
|
2016-12-11 06:05:03 +00:00
|
|
|
let tpl = self.get_layout_name();
|
|
|
|
let mut context = Context::new();
|
|
|
|
context.add("site", config);
|
|
|
|
context.add("page", self);
|
2016-12-13 06:22:24 +00:00
|
|
|
|
2017-02-23 08:34:57 +00:00
|
|
|
tera.render(&tpl, &context)
|
2016-12-13 06:22:24 +00:00
|
|
|
.chain_err(|| "Error while rendering template")
|
2016-12-11 06:05:03 +00:00
|
|
|
}
|
2016-12-06 08:27:03 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 08:34:57 +00:00
|
|
|
impl ser::Serialize for Page {
|
|
|
|
fn serialize<S>(&self, serializer: S) -> StdResult<S::Ok, S::Error> where S: ser::Serializer {
|
2017-03-06 13:45:33 +00:00
|
|
|
let mut state = serializer.serialize_struct("page", 12)?;
|
2017-02-23 08:34:57 +00:00
|
|
|
state.serialize_field("content", &self.content)?;
|
|
|
|
state.serialize_field("title", &self.meta.title)?;
|
|
|
|
state.serialize_field("description", &self.meta.description)?;
|
|
|
|
state.serialize_field("date", &self.meta.date)?;
|
2017-03-03 08:12:40 +00:00
|
|
|
state.serialize_field("slug", &self.get_slug())?;
|
|
|
|
state.serialize_field("url", &self.get_url())?;
|
2017-02-23 08:34:57 +00:00
|
|
|
state.serialize_field("tags", &self.meta.tags)?;
|
|
|
|
state.serialize_field("draft", &self.meta.draft)?;
|
|
|
|
state.serialize_field("category", &self.meta.category)?;
|
|
|
|
state.serialize_field("extra", &self.meta.extra)?;
|
2017-03-06 13:45:33 +00:00
|
|
|
let (word_count, reading_time) = self.get_reading_analytics();
|
|
|
|
state.serialize_field("word_count", &word_count)?;
|
|
|
|
state.serialize_field("reading_time", &reading_time)?;
|
2017-02-23 08:34:57 +00:00
|
|
|
state.end()
|
|
|
|
}
|
|
|
|
}
|
2016-12-06 08:27:03 +00:00
|
|
|
|
2017-03-06 11:58:31 +00:00
|
|
|
impl PartialOrd for Page {
|
|
|
|
fn partial_cmp(&self, other: &Page) -> Option<Ordering> {
|
|
|
|
if self.meta.date.is_none() {
|
|
|
|
println!("No self data");
|
|
|
|
return Some(Ordering::Less);
|
|
|
|
}
|
|
|
|
|
|
|
|
if other.meta.date.is_none() {
|
|
|
|
println!("No other date");
|
|
|
|
return Some(Ordering::Greater);
|
|
|
|
}
|
|
|
|
|
|
|
|
let this_date = self.meta.parse_date().unwrap();
|
|
|
|
let other_date = other.meta.parse_date().unwrap();
|
|
|
|
|
|
|
|
if this_date > other_date {
|
|
|
|
return Some(Ordering::Less);
|
|
|
|
}
|
|
|
|
if this_date < other_date {
|
|
|
|
return Some(Ordering::Greater);
|
|
|
|
}
|
|
|
|
|
|
|
|
Some(Ordering::Equal)
|
|
|
|
}
|
2016-12-13 09:05:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-12-06 08:27:03 +00:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
2016-12-06 11:53:14 +00:00
|
|
|
use super::{Page};
|
|
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_can_parse_a_valid_page() {
|
|
|
|
let content = r#"
|
2017-02-23 08:34:57 +00:00
|
|
|
+++
|
2016-12-06 11:53:14 +00:00
|
|
|
title = "Hello"
|
2017-02-23 08:34:57 +00:00
|
|
|
description = "hey there"
|
2016-12-11 06:05:03 +00:00
|
|
|
slug = "hello-world"
|
2016-12-06 11:53:14 +00:00
|
|
|
+++
|
|
|
|
Hello world"#;
|
2017-02-23 08:34:57 +00:00
|
|
|
let res = Page::parse("post.md", content);
|
2016-12-06 11:53:14 +00:00
|
|
|
assert!(res.is_ok());
|
|
|
|
let page = res.unwrap();
|
2016-12-06 08:27:03 +00:00
|
|
|
|
2017-02-23 08:34:57 +00:00
|
|
|
assert_eq!(page.meta.title, "Hello".to_string());
|
|
|
|
assert_eq!(page.meta.slug.unwrap(), "hello-world".to_string());
|
2016-12-13 06:22:24 +00:00
|
|
|
assert_eq!(page.raw_content, "Hello world".to_string());
|
|
|
|
assert_eq!(page.content, "<p>Hello world</p>\n".to_string());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_can_find_one_parent_directory() {
|
|
|
|
let content = r#"
|
2017-02-23 08:34:57 +00:00
|
|
|
+++
|
2016-12-13 06:22:24 +00:00
|
|
|
title = "Hello"
|
2017-02-23 08:34:57 +00:00
|
|
|
description = "hey there"
|
2016-12-13 06:22:24 +00:00
|
|
|
slug = "hello-world"
|
|
|
|
+++
|
|
|
|
Hello world"#;
|
2017-02-23 08:34:57 +00:00
|
|
|
let res = Page::parse("posts/intro.md", content);
|
2016-12-13 06:22:24 +00:00
|
|
|
assert!(res.is_ok());
|
|
|
|
let page = res.unwrap();
|
|
|
|
assert_eq!(page.sections, vec!["posts".to_string()]);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2016-12-13 10:14:49 +00:00
|
|
|
fn test_can_find_multiple_parent_directories() {
|
2016-12-13 06:22:24 +00:00
|
|
|
let content = r#"
|
2017-02-23 08:34:57 +00:00
|
|
|
+++
|
2016-12-13 06:22:24 +00:00
|
|
|
title = "Hello"
|
2017-02-23 08:34:57 +00:00
|
|
|
description = "hey there"
|
2016-12-13 06:22:24 +00:00
|
|
|
slug = "hello-world"
|
|
|
|
+++
|
|
|
|
Hello world"#;
|
2017-02-23 08:34:57 +00:00
|
|
|
let res = Page::parse("posts/intro/start.md", content);
|
2016-12-13 06:22:24 +00:00
|
|
|
assert!(res.is_ok());
|
|
|
|
let page = res.unwrap();
|
|
|
|
assert_eq!(page.sections, vec!["posts".to_string(), "intro".to_string()]);
|
2016-12-06 11:53:14 +00:00
|
|
|
}
|
|
|
|
|
2016-12-13 10:14:49 +00:00
|
|
|
#[test]
|
|
|
|
fn test_can_make_url_from_sections_and_slug() {
|
|
|
|
let content = r#"
|
2017-02-23 08:34:57 +00:00
|
|
|
+++
|
2016-12-13 10:14:49 +00:00
|
|
|
title = "Hello"
|
2017-02-23 08:34:57 +00:00
|
|
|
description = "hey there"
|
2016-12-13 10:14:49 +00:00
|
|
|
slug = "hello-world"
|
|
|
|
+++
|
|
|
|
Hello world"#;
|
2017-02-23 08:34:57 +00:00
|
|
|
let res = Page::parse("posts/intro/start.md", content);
|
2016-12-13 10:14:49 +00:00
|
|
|
assert!(res.is_ok());
|
|
|
|
let page = res.unwrap();
|
2017-03-03 08:12:40 +00:00
|
|
|
assert_eq!(page.get_url(), "/posts/intro/hello-world");
|
2016-12-13 10:14:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_can_make_url_from_sections_and_slug_root() {
|
|
|
|
let content = r#"
|
2017-02-23 08:34:57 +00:00
|
|
|
+++
|
2016-12-13 10:14:49 +00:00
|
|
|
title = "Hello"
|
2017-02-23 08:34:57 +00:00
|
|
|
description = "hey there"
|
2016-12-13 10:14:49 +00:00
|
|
|
slug = "hello-world"
|
|
|
|
+++
|
|
|
|
Hello world"#;
|
2017-02-23 08:34:57 +00:00
|
|
|
let res = Page::parse("start.md", content);
|
2016-12-13 10:14:49 +00:00
|
|
|
assert!(res.is_ok());
|
|
|
|
let page = res.unwrap();
|
2017-03-03 08:12:40 +00:00
|
|
|
assert_eq!(page.get_url(), "/hello-world");
|
2017-02-23 08:34:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_errors_on_invalid_front_matter_format() {
|
|
|
|
let content = r#"
|
|
|
|
title = "Hello"
|
|
|
|
description = "hey there"
|
|
|
|
slug = "hello-world"
|
|
|
|
+++
|
|
|
|
Hello world"#;
|
|
|
|
let res = Page::parse("start.md", content);
|
|
|
|
assert!(res.is_err());
|
2016-12-13 10:14:49 +00:00
|
|
|
}
|
2017-03-03 08:12:40 +00:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_can_make_slug_from_non_slug_filename() {
|
|
|
|
let content = r#"
|
|
|
|
+++
|
|
|
|
title = "Hello"
|
|
|
|
description = "hey there"
|
|
|
|
+++
|
|
|
|
Hello world"#;
|
|
|
|
let res = Page::parse("file with space.md", content);
|
|
|
|
assert!(res.is_ok());
|
|
|
|
let page = res.unwrap();
|
|
|
|
assert_eq!(page.get_slug(), "file-with-space");
|
|
|
|
}
|
2017-03-06 13:45:33 +00:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_reading_analytics_short() {
|
|
|
|
let content = r#"
|
|
|
|
+++
|
|
|
|
title = "Hello"
|
|
|
|
description = "hey there"
|
|
|
|
+++
|
|
|
|
Hello world"#;
|
|
|
|
let res = Page::parse("file with space.md", content);
|
|
|
|
assert!(res.is_ok());
|
|
|
|
let page = res.unwrap();
|
|
|
|
let (word_count, reading_time) = page.get_reading_analytics();
|
|
|
|
assert_eq!(word_count, 2);
|
|
|
|
assert_eq!(reading_time, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_reading_analytics_long() {
|
|
|
|
let mut content = r#"
|
|
|
|
+++
|
|
|
|
title = "Hello"
|
|
|
|
description = "hey there"
|
|
|
|
+++
|
|
|
|
Hello world"#.to_string();
|
|
|
|
for _ in 0..1000 {
|
|
|
|
content.push_str(" Hello world");
|
|
|
|
}
|
|
|
|
let res = Page::parse("hello.md", &content);
|
|
|
|
assert!(res.is_ok());
|
|
|
|
let page = res.unwrap();
|
|
|
|
let (word_count, reading_time) = page.get_reading_analytics();
|
|
|
|
assert_eq!(word_count, 2002);
|
|
|
|
assert_eq!(reading_time, 10);
|
|
|
|
}
|
2016-12-06 08:27:03 +00:00
|
|
|
}
|