helpers: Test coverage increase

Started to increase coverage in helpers package, now at 74.9% of statements.

In the process, also a few minor changes have been applied to content.go.

* Content.go has undergone a formatting refactor regarding comments
* Unused function TruncateWords has been removed
* RenderingContext's "mmark" has been changed to use MmarkRender
* Content_test.go added to cover content.go's functionality
This commit is contained in:
Gergely Brautigam
2016-02-05 18:40:49 +01:00
committed by Bjørn Erik Pedersen
parent 318a984526
commit e95f3af933
2 changed files with 305 additions and 25 deletions

View File

@@ -35,10 +35,10 @@ import (
"sync"
)
// Length of the summary that Hugo extracts from a content.
// SummaryLength is the length of the summary that Hugo extracts from a content.
var SummaryLength = 70
// Custom divider <!--more--> let's user define where summarization ends.
// SummaryDivider denotes where content summarization should end. The default is "<!--more-->".
var SummaryDivider = []byte("<!--more-->")
// Blackfriday holds configuration values for Blackfriday rendering.
@@ -157,7 +157,7 @@ func BytesToHTML(b []byte) template.HTML {
return template.HTML(string(b))
}
// GetHtmlRenderer creates a new Renderer with the given configuration.
// GetHTMLRenderer creates a new Renderer with the given configuration.
func GetHTMLRenderer(defaultFlags int, ctx *RenderingContext) blackfriday.Renderer {
renderParameters := blackfriday.HtmlRendererParameters{
FootnoteAnchorPrefix: viper.GetString("FootnoteAnchorPrefix"),
@@ -237,7 +237,7 @@ func markdownRenderWithTOC(ctx *RenderingContext) []byte {
getMarkdownExtensions(ctx))
}
// mmark
// GetMmarkHtmlRenderer returns markdown html renderer.
func GetMmarkHtmlRenderer(defaultFlags int, ctx *RenderingContext) mmark.Renderer {
renderParameters := mmark.HtmlRendererParameters{
FootnoteAnchorPrefix: viper.GetString("FootnoteAnchorPrefix"),
@@ -259,6 +259,7 @@ func GetMmarkHtmlRenderer(defaultFlags int, ctx *RenderingContext) mmark.Rendere
}
}
// GetMmarkExtensions returns markdown extensions.
func GetMmarkExtensions(ctx *RenderingContext) int {
flags := 0
flags |= mmark.EXTENSION_TABLES
@@ -283,17 +284,12 @@ func GetMmarkExtensions(ctx *RenderingContext) int {
return flags
}
// MmarkRender renders markdowns.
func MmarkRender(ctx *RenderingContext) []byte {
return mmark.Parse(ctx.Content, GetMmarkHtmlRenderer(0, ctx),
GetMmarkExtensions(ctx)).Bytes()
}
func MmarkRenderWithTOC(ctx *RenderingContext) []byte {
return mmark.Parse(ctx.Content,
GetMmarkHtmlRenderer(0, ctx),
GetMmarkExtensions(ctx)).Bytes()
}
// ExtractTOC extracts Table of Contents from content.
func ExtractTOC(content []byte) (newcontent []byte, toc []byte) {
origContent := make([]byte, len(content))
@@ -331,7 +327,7 @@ func ExtractTOC(content []byte) (newcontent []byte, toc []byte) {
}
// RenderingContext holds contextual information, like content and configuration,
// for a given content renderin.g
// for a given content rendering.
type RenderingContext struct {
Content []byte
PageFmt string
@@ -361,7 +357,7 @@ func RenderBytesWithTOC(ctx *RenderingContext) []byte {
case "asciidoc":
return []byte(GetAsciidocContent(ctx.Content))
case "mmark":
return MmarkRenderWithTOC(ctx)
return MmarkRender(ctx)
case "rst":
return []byte(GetRstContent(ctx.Content))
}
@@ -403,17 +399,7 @@ func RemoveSummaryDivider(content []byte) []byte {
return bytes.Replace(content, SummaryDivider, []byte(""), -1)
}
// TruncateWords takes content and an int and shortens down the number
// of words in the content down to the number of int.
func TruncateWords(s string, max int) string {
words := strings.Fields(s)
if max > len(words) {
return strings.Join(words, " ")
}
return strings.Join(words[:max], " ")
}
// TruncateWordsByRune truncates words by runes.
func TruncateWordsByRune(words []string, max int) (string, bool) {
count := 0
for index, word := range words {
@@ -430,9 +416,8 @@ func TruncateWordsByRune(words []string, max int) (string, bool) {
if count >= max {
truncatedWords := append(words[:index], word[:ri])
return strings.Join(truncatedWords, " "), true
} else {
count++
}
count++
}
}
}