diff --git a/.direnv/flake-profile b/.direnv/flake-profile
index e6d65cacd772cb922c88eb3a376fc00af0b71642..fe685a51a8cf6c9e63adf03d3d33a98315026ee5 120000
--- a/.direnv/flake-profile
+++ b/.direnv/flake-profile
@@ -1 +1 @@
-flake-profile-24-link
\ No newline at end of file
+flake-profile-27-link
\ No newline at end of file
diff --git a/.direnv/flake-profile-25-link b/.direnv/flake-profile-25-link
new file mode 120000
index 0000000000000000000000000000000000000000..85c89287ca1be1f751ff6919d6e7f78b9947b7f5
--- /dev/null
+++ b/.direnv/flake-profile-25-link
@@ -0,0 +1 @@
+/nix/store/z732hca3vl0w2mil01ljp1gaya4dm67x-bob-env
\ No newline at end of file
diff --git a/.direnv/flake-profile-26-link b/.direnv/flake-profile-26-link
new file mode 120000
index 0000000000000000000000000000000000000000..adee8908dddb0a4a35f07cff7ee9620bdf24fdeb
--- /dev/null
+++ b/.direnv/flake-profile-26-link
@@ -0,0 +1 @@
+/nix/store/19hxmpsvwilgpw4nfg974zvgxry3gngr-bob-env
\ No newline at end of file
diff --git a/.direnv/flake-profile-27-link b/.direnv/flake-profile-27-link
new file mode 120000
index 0000000000000000000000000000000000000000..2c4ab8aa8448a536c73ca88ed87af16fc52c435c
--- /dev/null
+++ b/.direnv/flake-profile-27-link
@@ -0,0 +1 @@
+/nix/store/67qcqdvw00dwhx0s484z1a2c1s7rq910-bob-0.5.3-env
\ No newline at end of file
diff --git a/application/source/vendor/github.com/andybalholm/cascadia/.travis.yml b/application/source/vendor/github.com/andybalholm/cascadia/.travis.yml
deleted file mode 100644
index 6f227517d646b25805b395abee4bd45580871241..0000000000000000000000000000000000000000
--- a/application/source/vendor/github.com/andybalholm/cascadia/.travis.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-language: go
-
-go:
-  - 1.3
-  - 1.4
-
-install:
-  - go get github.com/andybalholm/cascadia
-
-script:
- - go test -v
-
-notifications:
-  email: false
diff --git a/application/source/vendor/github.com/andybalholm/cascadia/LICENSE b/application/source/vendor/github.com/andybalholm/cascadia/LICENSE
deleted file mode 100644
index ee5ad35acc7ae7070c37089197b6eb3a7d2dbf06..0000000000000000000000000000000000000000
--- a/application/source/vendor/github.com/andybalholm/cascadia/LICENSE
+++ /dev/null
@@ -1,24 +0,0 @@
-Copyright (c) 2011 Andy Balholm. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-   * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-   * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/application/source/vendor/github.com/andybalholm/cascadia/README.md b/application/source/vendor/github.com/andybalholm/cascadia/README.md
deleted file mode 100644
index 6433cb9ce1495a7631487806844f8b94653b3290..0000000000000000000000000000000000000000
--- a/application/source/vendor/github.com/andybalholm/cascadia/README.md
+++ /dev/null
@@ -1,144 +0,0 @@
-# cascadia
-
-[![](https://travis-ci.org/andybalholm/cascadia.svg)](https://travis-ci.org/andybalholm/cascadia)
-
-The Cascadia package implements CSS selectors for use with the parse trees produced by the html package.
-
-To test CSS selectors without writing Go code, check out [cascadia](https://github.com/suntong/cascadia) the command line tool, a thin wrapper around this package.
-
-[Refer to godoc here](https://godoc.org/github.com/andybalholm/cascadia).
-
-## Example
-
-The following is an example of how you can use Cascadia.
-
-```go
-package main
-
-import (
-	"fmt"
-	"log"
-	"strings"
-
-	"github.com/andybalholm/cascadia"
-	"golang.org/x/net/html"
-)
-
-var pricingHtml string = `
-<div class="card mb-4 box-shadow">
-	<div class="card-header">
-		<h4 class="my-0 font-weight-normal">Free</h4>
-	</div>
-	<div class="card-body">
-		<h1 class="card-title pricing-card-title">$0/mo</h1>
-		<ul class="list-unstyled mt-3 mb-4">
-			<li>10 users included</li>
-			<li>2 GB of storage</li>
-			<li><a href="https://example.com">See more</a></li>
-		</ul>
-	</div>
-</div>
-
-<div class="card mb-4 box-shadow">
-	<div class="card-header">
-		<h4 class="my-0 font-weight-normal">Pro</h4>
-	</div>
-	<div class="card-body">
-		<h1 class="card-title pricing-card-title">$15/mo</h1>
-		<ul class="list-unstyled mt-3 mb-4">
-			<li>20 users included</li>
-			<li>10 GB of storage</li>
-			<li><a href="https://example.com">See more</a></li>
-		</ul>
-	</div>
-</div>
-
-<div class="card mb-4 box-shadow">
-	<div class="card-header">
-		<h4 class="my-0 font-weight-normal">Enterprise</h4>
-	</div>
-	<div class="card-body">
-		<h1 class="card-title pricing-card-title">$29/mo</h1>
-		<ul class="list-unstyled mt-3 mb-4">
-			<li>30 users included</li>
-			<li>15 GB of storage</li>
-			<li><a>See more</a></li>
-		</ul>
-	</div>
-</div>
-`
-
-func Query(n *html.Node, query string) *html.Node {
-	sel, err := cascadia.Parse(query)
-	if err != nil {
-		return &html.Node{}
-	}
-	return cascadia.Query(n, sel)
-}
-
-func QueryAll(n *html.Node, query string) []*html.Node {
-	sel, err := cascadia.Parse(query)
-	if err != nil {
-		return []*html.Node{}
-	}
-	return cascadia.QueryAll(n, sel)
-}
-
-func AttrOr(n *html.Node, attrName, or string) string {
-	for _, a := range n.Attr {
-		if a.Key == attrName {
-			return a.Val
-		}
-	}
-	return or
-}
-
-func main() {
-	doc, err := html.Parse(strings.NewReader(pricingHtml))
-	if err != nil {
-		log.Fatal(err)
-	}
-	fmt.Printf("List of pricing plans:\n\n")
-	for i, p := range QueryAll(doc, "div.card.mb-4.box-shadow") {
-		planName := Query(p, "h4").FirstChild.Data
-		price := Query(p, ".pricing-card-title").FirstChild.Data
-		usersIncluded := Query(p, "li:first-child").FirstChild.Data
-		storage := Query(p, "li:nth-child(2)").FirstChild.Data
-		detailsUrl := AttrOr(Query(p, "li:last-child a"), "href", "(No link available)")
-		fmt.Printf(
-			"Plan #%d\nName: %s\nPrice: %s\nUsers: %s\nStorage: %s\nDetails: %s\n\n",
-			i+1,
-			planName,
-			price,
-			usersIncluded,
-			storage,
-			detailsUrl,
-		)
-	}
-}
-```
-The output is:
-```
-List of pricing plans:
-
-Plan #1
-Name: Free
-Price: $0/mo
-Users: 10 users included
-Storage: 2 GB of storage
-Details: https://example.com
-
-Plan #2
-Name: Pro
-Price: $15/mo
-Users: 20 users included
-Storage: 10 GB of storage
-Details: https://example.com
-
-Plan #3
-Name: Enterprise
-Price: $29/mo
-Users: 30 users included
-Storage: 15 GB of storage
-Details: (No link available)
-```
diff --git a/application/source/vendor/github.com/andybalholm/cascadia/parser.go b/application/source/vendor/github.com/andybalholm/cascadia/parser.go
deleted file mode 100644
index 06eccd581a871ad4622e4e3d161c803db1b8af54..0000000000000000000000000000000000000000
--- a/application/source/vendor/github.com/andybalholm/cascadia/parser.go
+++ /dev/null
@@ -1,889 +0,0 @@
-// Package cascadia is an implementation of CSS selectors.
-package cascadia
-
-import (
-	"errors"
-	"fmt"
-	"regexp"
-	"strconv"
-	"strings"
-)
-
-// a parser for CSS selectors
-type parser struct {
-	s string // the source text
-	i int    // the current position
-
-	// if `false`, parsing a pseudo-element
-	// returns an error.
-	acceptPseudoElements bool
-}
-
-// parseEscape parses a backslash escape.
-func (p *parser) parseEscape() (result string, err error) {
-	if len(p.s) < p.i+2 || p.s[p.i] != '\\' {
-		return "", errors.New("invalid escape sequence")
-	}
-
-	start := p.i + 1
-	c := p.s[start]
-	switch {
-	case c == '\r' || c == '\n' || c == '\f':
-		return "", errors.New("escaped line ending outside string")
-	case hexDigit(c):
-		// unicode escape (hex)
-		var i int
-		for i = start; i < start+6 && i < len(p.s) && hexDigit(p.s[i]); i++ {
-			// empty
-		}
-		v, _ := strconv.ParseUint(p.s[start:i], 16, 64)
-		if len(p.s) > i {
-			switch p.s[i] {
-			case '\r':
-				i++
-				if len(p.s) > i && p.s[i] == '\n' {
-					i++
-				}
-			case ' ', '\t', '\n', '\f':
-				i++
-			}
-		}
-		p.i = i
-		return string(rune(v)), nil
-	}
-
-	// Return the literal character after the backslash.
-	result = p.s[start : start+1]
-	p.i += 2
-	return result, nil
-}
-
-// toLowerASCII returns s with all ASCII capital letters lowercased.
-func toLowerASCII(s string) string {
-	var b []byte
-	for i := 0; i < len(s); i++ {
-		if c := s[i]; 'A' <= c && c <= 'Z' {
-			if b == nil {
-				b = make([]byte, len(s))
-				copy(b, s)
-			}
-			b[i] = s[i] + ('a' - 'A')
-		}
-	}
-
-	if b == nil {
-		return s
-	}
-
-	return string(b)
-}
-
-func hexDigit(c byte) bool {
-	return '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F'
-}
-
-// nameStart returns whether c can be the first character of an identifier
-// (not counting an initial hyphen, or an escape sequence).
-func nameStart(c byte) bool {
-	return 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '_' || c > 127
-}
-
-// nameChar returns whether c can be a character within an identifier
-// (not counting an escape sequence).
-func nameChar(c byte) bool {
-	return 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '_' || c > 127 ||
-		c == '-' || '0' <= c && c <= '9'
-}
-
-// parseIdentifier parses an identifier.
-func (p *parser) parseIdentifier() (result string, err error) {
-	const prefix = '-'
-	var numPrefix int
-
-	for len(p.s) > p.i && p.s[p.i] == prefix {
-		p.i++
-		numPrefix++
-	}
-
-	if len(p.s) <= p.i {
-		return "", errors.New("expected identifier, found EOF instead")
-	}
-
-	if c := p.s[p.i]; !(nameStart(c) || c == '\\') {
-		return "", fmt.Errorf("expected identifier, found %c instead", c)
-	}
-
-	result, err = p.parseName()
-	if numPrefix > 0 && err == nil {
-		result = strings.Repeat(string(prefix), numPrefix) + result
-	}
-	return
-}
-
-// parseName parses a name (which is like an identifier, but doesn't have
-// extra restrictions on the first character).
-func (p *parser) parseName() (result string, err error) {
-	i := p.i
-loop:
-	for i < len(p.s) {
-		c := p.s[i]
-		switch {
-		case nameChar(c):
-			start := i
-			for i < len(p.s) && nameChar(p.s[i]) {
-				i++
-			}
-			result += p.s[start:i]
-		case c == '\\':
-			p.i = i
-			val, err := p.parseEscape()
-			if err != nil {
-				return "", err
-			}
-			i = p.i
-			result += val
-		default:
-			break loop
-		}
-	}
-
-	if result == "" {
-		return "", errors.New("expected name, found EOF instead")
-	}
-
-	p.i = i
-	return result, nil
-}
-
-// parseString parses a single- or double-quoted string.
-func (p *parser) parseString() (result string, err error) {
-	i := p.i
-	if len(p.s) < i+2 {
-		return "", errors.New("expected string, found EOF instead")
-	}
-
-	quote := p.s[i]
-	i++
-
-loop:
-	for i < len(p.s) {
-		switch p.s[i] {
-		case '\\':
-			if len(p.s) > i+1 {
-				switch c := p.s[i+1]; c {
-				case '\r':
-					if len(p.s) > i+2 && p.s[i+2] == '\n' {
-						i += 3
-						continue loop
-					}
-					fallthrough
-				case '\n', '\f':
-					i += 2
-					continue loop
-				}
-			}
-			p.i = i
-			val, err := p.parseEscape()
-			if err != nil {
-				return "", err
-			}
-			i = p.i
-			result += val
-		case quote:
-			break loop
-		case '\r', '\n', '\f':
-			return "", errors.New("unexpected end of line in string")
-		default:
-			start := i
-			for i < len(p.s) {
-				if c := p.s[i]; c == quote || c == '\\' || c == '\r' || c == '\n' || c == '\f' {
-					break
-				}
-				i++
-			}
-			result += p.s[start:i]
-		}
-	}
-
-	if i >= len(p.s) {
-		return "", errors.New("EOF in string")
-	}
-
-	// Consume the final quote.
-	i++
-
-	p.i = i
-	return result, nil
-}
-
-// parseRegex parses a regular expression; the end is defined by encountering an
-// unmatched closing ')' or ']' which is not consumed
-func (p *parser) parseRegex() (rx *regexp.Regexp, err error) {
-	i := p.i
-	if len(p.s) < i+2 {
-		return nil, errors.New("expected regular expression, found EOF instead")
-	}
-
-	// number of open parens or brackets;
-	// when it becomes negative, finished parsing regex
-	open := 0
-
-loop:
-	for i < len(p.s) {
-		switch p.s[i] {
-		case '(', '[':
-			open++
-		case ')', ']':
-			open--
-			if open < 0 {
-				break loop
-			}
-		}
-		i++
-	}
-
-	if i >= len(p.s) {
-		return nil, errors.New("EOF in regular expression")
-	}
-	rx, err = regexp.Compile(p.s[p.i:i])
-	p.i = i
-	return rx, err
-}
-
-// skipWhitespace consumes whitespace characters and comments.
-// It returns true if there was actually anything to skip.
-func (p *parser) skipWhitespace() bool {
-	i := p.i
-	for i < len(p.s) {
-		switch p.s[i] {
-		case ' ', '\t', '\r', '\n', '\f':
-			i++
-			continue
-		case '/':
-			if strings.HasPrefix(p.s[i:], "/*") {
-				end := strings.Index(p.s[i+len("/*"):], "*/")
-				if end != -1 {
-					i += end + len("/**/")
-					continue
-				}
-			}
-		}
-		break
-	}
-
-	if i > p.i {
-		p.i = i
-		return true
-	}
-
-	return false
-}
-
-// consumeParenthesis consumes an opening parenthesis and any following
-// whitespace. It returns true if there was actually a parenthesis to skip.
-func (p *parser) consumeParenthesis() bool {
-	if p.i < len(p.s) && p.s[p.i] == '(' {
-		p.i++
-		p.skipWhitespace()
-		return true
-	}
-	return false
-}
-
-// consumeClosingParenthesis consumes a closing parenthesis and any preceding
-// whitespace. It returns true if there was actually a parenthesis to skip.
-func (p *parser) consumeClosingParenthesis() bool {
-	i := p.i
-	p.skipWhitespace()
-	if p.i < len(p.s) && p.s[p.i] == ')' {
-		p.i++
-		return true
-	}
-	p.i = i
-	return false
-}
-
-// parseTypeSelector parses a type selector (one that matches by tag name).
-func (p *parser) parseTypeSelector() (result tagSelector, err error) {
-	tag, err := p.parseIdentifier()
-	if err != nil {
-		return
-	}
-	return tagSelector{tag: toLowerASCII(tag)}, nil
-}
-
-// parseIDSelector parses a selector that matches by id attribute.
-func (p *parser) parseIDSelector() (idSelector, error) {
-	if p.i >= len(p.s) {
-		return idSelector{}, fmt.Errorf("expected id selector (#id), found EOF instead")
-	}
-	if p.s[p.i] != '#' {
-		return idSelector{}, fmt.Errorf("expected id selector (#id), found '%c' instead", p.s[p.i])
-	}
-
-	p.i++
-	id, err := p.parseName()
-	if err != nil {
-		return idSelector{}, err
-	}
-
-	return idSelector{id: id}, nil
-}
-
-// parseClassSelector parses a selector that matches by class attribute.
-func (p *parser) parseClassSelector() (classSelector, error) {
-	if p.i >= len(p.s) {
-		return classSelector{}, fmt.Errorf("expected class selector (.class), found EOF instead")
-	}
-	if p.s[p.i] != '.' {
-		return classSelector{}, fmt.Errorf("expected class selector (.class), found '%c' instead", p.s[p.i])
-	}
-
-	p.i++
-	class, err := p.parseIdentifier()
-	if err != nil {
-		return classSelector{}, err
-	}
-
-	return classSelector{class: class}, nil
-}
-
-// parseAttributeSelector parses a selector that matches by attribute value.
-func (p *parser) parseAttributeSelector() (attrSelector, error) {
-	if p.i >= len(p.s) {
-		return attrSelector{}, fmt.Errorf("expected attribute selector ([attribute]), found EOF instead")
-	}
-	if p.s[p.i] != '[' {
-		return attrSelector{}, fmt.Errorf("expected attribute selector ([attribute]), found '%c' instead", p.s[p.i])
-	}
-
-	p.i++
-	p.skipWhitespace()
-	key, err := p.parseIdentifier()
-	if err != nil {
-		return attrSelector{}, err
-	}
-	key = toLowerASCII(key)
-
-	p.skipWhitespace()
-	if p.i >= len(p.s) {
-		return attrSelector{}, errors.New("unexpected EOF in attribute selector")
-	}
-
-	if p.s[p.i] == ']' {
-		p.i++
-		return attrSelector{key: key, operation: ""}, nil
-	}
-
-	if p.i+2 >= len(p.s) {
-		return attrSelector{}, errors.New("unexpected EOF in attribute selector")
-	}
-
-	op := p.s[p.i : p.i+2]
-	if op[0] == '=' {
-		op = "="
-	} else if op[1] != '=' {
-		return attrSelector{}, fmt.Errorf(`expected equality operator, found "%s" instead`, op)
-	}
-	p.i += len(op)
-
-	p.skipWhitespace()
-	if p.i >= len(p.s) {
-		return attrSelector{}, errors.New("unexpected EOF in attribute selector")
-	}
-	var val string
-	var rx *regexp.Regexp
-	if op == "#=" {
-		rx, err = p.parseRegex()
-	} else {
-		switch p.s[p.i] {
-		case '\'', '"':
-			val, err = p.parseString()
-		default:
-			val, err = p.parseIdentifier()
-		}
-	}
-	if err != nil {
-		return attrSelector{}, err
-	}
-
-	p.skipWhitespace()
-	if p.i >= len(p.s) {
-		return attrSelector{}, errors.New("unexpected EOF in attribute selector")
-	}
-
-	// check if the attribute contains an ignore case flag
-	ignoreCase := false
-	if p.s[p.i] == 'i' || p.s[p.i] == 'I' {
-		ignoreCase = true
-		p.i++
-	}
-
-	p.skipWhitespace()
-	if p.i >= len(p.s) {
-		return attrSelector{}, errors.New("unexpected EOF in attribute selector")
-	}
-
-	if p.s[p.i] != ']' {
-		return attrSelector{}, fmt.Errorf("expected ']', found '%c' instead", p.s[p.i])
-	}
-	p.i++
-
-	switch op {
-	case "=", "!=", "~=", "|=", "^=", "$=", "*=", "#=":
-		return attrSelector{key: key, val: val, operation: op, regexp: rx, insensitive: ignoreCase}, nil
-	default:
-		return attrSelector{}, fmt.Errorf("attribute operator %q is not supported", op)
-	}
-}
-
-var (
-	errExpectedParenthesis        = errors.New("expected '(' but didn't find it")
-	errExpectedClosingParenthesis = errors.New("expected ')' but didn't find it")
-	errUnmatchedParenthesis       = errors.New("unmatched '('")
-)
-
-// parsePseudoclassSelector parses a pseudoclass selector like :not(p) or a pseudo-element
-// For backwards compatibility, both ':' and '::' prefix are allowed for pseudo-elements.
-// https://drafts.csswg.org/selectors-3/#pseudo-elements
-// Returning a nil `Sel` (and a nil `error`) means we found a pseudo-element.
-func (p *parser) parsePseudoclassSelector() (out Sel, pseudoElement string, err error) {
-	if p.i >= len(p.s) {
-		return nil, "", fmt.Errorf("expected pseudoclass selector (:pseudoclass), found EOF instead")
-	}
-	if p.s[p.i] != ':' {
-		return nil, "", fmt.Errorf("expected attribute selector (:pseudoclass), found '%c' instead", p.s[p.i])
-	}
-
-	p.i++
-	var mustBePseudoElement bool
-	if p.i >= len(p.s) {
-		return nil, "", fmt.Errorf("got empty pseudoclass (or pseudoelement)")
-	}
-	if p.s[p.i] == ':' { // we found a pseudo-element
-		mustBePseudoElement = true
-		p.i++
-	}
-
-	name, err := p.parseIdentifier()
-	if err != nil {
-		return
-	}
-	name = toLowerASCII(name)
-	if mustBePseudoElement && (name != "after" && name != "backdrop" && name != "before" &&
-		name != "cue" && name != "first-letter" && name != "first-line" && name != "grammar-error" &&
-		name != "marker" && name != "placeholder" && name != "selection" && name != "spelling-error") {
-		return out, "", fmt.Errorf("unknown pseudoelement :%s", name)
-	}
-
-	switch name {
-	case "not", "has", "haschild":
-		if !p.consumeParenthesis() {
-			return out, "", errExpectedParenthesis
-		}
-		sel, parseErr := p.parseSelectorGroup()
-		if parseErr != nil {
-			return out, "", parseErr
-		}
-		if !p.consumeClosingParenthesis() {
-			return out, "", errExpectedClosingParenthesis
-		}
-
-		out = relativePseudoClassSelector{name: name, match: sel}
-
-	case "contains", "containsown":
-		if !p.consumeParenthesis() {
-			return out, "", errExpectedParenthesis
-		}
-		if p.i == len(p.s) {
-			return out, "", errUnmatchedParenthesis
-		}
-		var val string
-		switch p.s[p.i] {
-		case '\'', '"':
-			val, err = p.parseString()
-		default:
-			val, err = p.parseIdentifier()
-		}
-		if err != nil {
-			return out, "", err
-		}
-		val = strings.ToLower(val)
-		p.skipWhitespace()
-		if p.i >= len(p.s) {
-			return out, "", errors.New("unexpected EOF in pseudo selector")
-		}
-		if !p.consumeClosingParenthesis() {
-			return out, "", errExpectedClosingParenthesis
-		}
-
-		out = containsPseudoClassSelector{own: name == "containsown", value: val}
-
-	case "matches", "matchesown":
-		if !p.consumeParenthesis() {
-			return out, "", errExpectedParenthesis
-		}
-		rx, err := p.parseRegex()
-		if err != nil {
-			return out, "", err
-		}
-		if p.i >= len(p.s) {
-			return out, "", errors.New("unexpected EOF in pseudo selector")
-		}
-		if !p.consumeClosingParenthesis() {
-			return out, "", errExpectedClosingParenthesis
-		}
-
-		out = regexpPseudoClassSelector{own: name == "matchesown", regexp: rx}
-
-	case "nth-child", "nth-last-child", "nth-of-type", "nth-last-of-type":
-		if !p.consumeParenthesis() {
-			return out, "", errExpectedParenthesis
-		}
-		a, b, err := p.parseNth()
-		if err != nil {
-			return out, "", err
-		}
-		if !p.consumeClosingParenthesis() {
-			return out, "", errExpectedClosingParenthesis
-		}
-		last := name == "nth-last-child" || name == "nth-last-of-type"
-		ofType := name == "nth-of-type" || name == "nth-last-of-type"
-		out = nthPseudoClassSelector{a: a, b: b, last: last, ofType: ofType}
-
-	case "first-child":
-		out = nthPseudoClassSelector{a: 0, b: 1, ofType: false, last: false}
-	case "last-child":
-		out = nthPseudoClassSelector{a: 0, b: 1, ofType: false, last: true}
-	case "first-of-type":
-		out = nthPseudoClassSelector{a: 0, b: 1, ofType: true, last: false}
-	case "last-of-type":
-		out = nthPseudoClassSelector{a: 0, b: 1, ofType: true, last: true}
-	case "only-child":
-		out = onlyChildPseudoClassSelector{ofType: false}
-	case "only-of-type":
-		out = onlyChildPseudoClassSelector{ofType: true}
-	case "input":
-		out = inputPseudoClassSelector{}
-	case "empty":
-		out = emptyElementPseudoClassSelector{}
-	case "root":
-		out = rootPseudoClassSelector{}
-	case "link":
-		out = linkPseudoClassSelector{}
-	case "lang":
-		if !p.consumeParenthesis() {
-			return out, "", errExpectedParenthesis
-		}
-		if p.i == len(p.s) {
-			return out, "", errUnmatchedParenthesis
-		}
-		val, err := p.parseIdentifier()
-		if err != nil {
-			return out, "", err
-		}
-		val = strings.ToLower(val)
-		p.skipWhitespace()
-		if p.i >= len(p.s) {
-			return out, "", errors.New("unexpected EOF in pseudo selector")
-		}
-		if !p.consumeClosingParenthesis() {
-			return out, "", errExpectedClosingParenthesis
-		}
-		out = langPseudoClassSelector{lang: val}
-	case "enabled":
-		out = enabledPseudoClassSelector{}
-	case "disabled":
-		out = disabledPseudoClassSelector{}
-	case "checked":
-		out = checkedPseudoClassSelector{}
-	case "visited", "hover", "active", "focus", "target":
-		// Not applicable in a static context: never match.
-		out = neverMatchSelector{value: ":" + name}
-	case "after", "backdrop", "before", "cue", "first-letter", "first-line", "grammar-error", "marker", "placeholder", "selection", "spelling-error":
-		return nil, name, nil
-	default:
-		return out, "", fmt.Errorf("unknown pseudoclass or pseudoelement :%s", name)
-	}
-	return
-}
-
-// parseInteger parses a  decimal integer.
-func (p *parser) parseInteger() (int, error) {
-	i := p.i
-	start := i
-	for i < len(p.s) && '0' <= p.s[i] && p.s[i] <= '9' {
-		i++
-	}
-	if i == start {
-		return 0, errors.New("expected integer, but didn't find it")
-	}
-	p.i = i
-
-	val, err := strconv.Atoi(p.s[start:i])
-	if err != nil {
-		return 0, err
-	}
-
-	return val, nil
-}
-
-// parseNth parses the argument for :nth-child (normally of the form an+b).
-func (p *parser) parseNth() (a, b int, err error) {
-	// initial state
-	if p.i >= len(p.s) {
-		goto eof
-	}
-	switch p.s[p.i] {
-	case '-':
-		p.i++
-		goto negativeA
-	case '+':
-		p.i++
-		goto positiveA
-	case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
-		goto positiveA
-	case 'n', 'N':
-		a = 1
-		p.i++
-		goto readN
-	case 'o', 'O', 'e', 'E':
-		id, nameErr := p.parseName()
-		if nameErr != nil {
-			return 0, 0, nameErr
-		}
-		id = toLowerASCII(id)
-		if id == "odd" {
-			return 2, 1, nil
-		}
-		if id == "even" {
-			return 2, 0, nil
-		}
-		return 0, 0, fmt.Errorf("expected 'odd' or 'even', but found '%s' instead", id)
-	default:
-		goto invalid
-	}
-
-positiveA:
-	if p.i >= len(p.s) {
-		goto eof
-	}
-	switch p.s[p.i] {
-	case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
-		a, err = p.parseInteger()
-		if err != nil {
-			return 0, 0, err
-		}
-		goto readA
-	case 'n', 'N':
-		a = 1
-		p.i++
-		goto readN
-	default:
-		goto invalid
-	}
-
-negativeA:
-	if p.i >= len(p.s) {
-		goto eof
-	}
-	switch p.s[p.i] {
-	case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
-		a, err = p.parseInteger()
-		if err != nil {
-			return 0, 0, err
-		}
-		a = -a
-		goto readA
-	case 'n', 'N':
-		a = -1
-		p.i++
-		goto readN
-	default:
-		goto invalid
-	}
-
-readA:
-	if p.i >= len(p.s) {
-		goto eof
-	}
-	switch p.s[p.i] {
-	case 'n', 'N':
-		p.i++
-		goto readN
-	default:
-		// The number we read as a is actually b.
-		return 0, a, nil
-	}
-
-readN:
-	p.skipWhitespace()
-	if p.i >= len(p.s) {
-		goto eof
-	}
-	switch p.s[p.i] {
-	case '+':
-		p.i++
-		p.skipWhitespace()
-		b, err = p.parseInteger()
-		if err != nil {
-			return 0, 0, err
-		}
-		return a, b, nil
-	case '-':
-		p.i++
-		p.skipWhitespace()
-		b, err = p.parseInteger()
-		if err != nil {
-			return 0, 0, err
-		}
-		return a, -b, nil
-	default:
-		return a, 0, nil
-	}
-
-eof:
-	return 0, 0, errors.New("unexpected EOF while attempting to parse expression of form an+b")
-
-invalid:
-	return 0, 0, errors.New("unexpected character while attempting to parse expression of form an+b")
-}
-
-// parseSimpleSelectorSequence parses a selector sequence that applies to
-// a single element.
-func (p *parser) parseSimpleSelectorSequence() (Sel, error) {
-	var selectors []Sel
-
-	if p.i >= len(p.s) {
-		return nil, errors.New("expected selector, found EOF instead")
-	}
-
-	switch p.s[p.i] {
-	case '*':
-		// It's the universal selector. Just skip over it, since it doesn't affect the meaning.
-		p.i++
-		if p.i+2 < len(p.s) && p.s[p.i:p.i+2] == "|*" { // other version of universal selector
-			p.i += 2
-		}
-	case '#', '.', '[', ':':
-		// There's no type selector. Wait to process the other till the main loop.
-	default:
-		r, err := p.parseTypeSelector()
-		if err != nil {
-			return nil, err
-		}
-		selectors = append(selectors, r)
-	}
-
-	var pseudoElement string
-loop:
-	for p.i < len(p.s) {
-		var (
-			ns               Sel
-			newPseudoElement string
-			err              error
-		)
-		switch p.s[p.i] {
-		case '#':
-			ns, err = p.parseIDSelector()
-		case '.':
-			ns, err = p.parseClassSelector()
-		case '[':
-			ns, err = p.parseAttributeSelector()
-		case ':':
-			ns, newPseudoElement, err = p.parsePseudoclassSelector()
-		default:
-			break loop
-		}
-		if err != nil {
-			return nil, err
-		}
-		// From https://drafts.csswg.org/selectors-3/#pseudo-elements :
-		// "Only one pseudo-element may appear per selector, and if present
-		// it must appear after the sequence of simple selectors that
-		// represents the subjects of the selector.""
-		if ns == nil { // we found a pseudo-element
-			if pseudoElement != "" {
-				return nil, fmt.Errorf("only one pseudo-element is accepted per selector, got %s and %s", pseudoElement, newPseudoElement)
-			}
-			if !p.acceptPseudoElements {
-				return nil, fmt.Errorf("pseudo-element %s found, but pseudo-elements support is disabled", newPseudoElement)
-			}
-			pseudoElement = newPseudoElement
-		} else {
-			if pseudoElement != "" {
-				return nil, fmt.Errorf("pseudo-element %s must be at the end of selector", pseudoElement)
-			}
-			selectors = append(selectors, ns)
-		}
-
-	}
-	if len(selectors) == 1 && pseudoElement == "" { // no need wrap the selectors in compoundSelector
-		return selectors[0], nil
-	}
-	return compoundSelector{selectors: selectors, pseudoElement: pseudoElement}, nil
-}
-
-// parseSelector parses a selector that may include combinators.
-func (p *parser) parseSelector() (Sel, error) {
-	p.skipWhitespace()
-	result, err := p.parseSimpleSelectorSequence()
-	if err != nil {
-		return nil, err
-	}
-
-	for {
-		var (
-			combinator byte
-			c          Sel
-		)
-		if p.skipWhitespace() {
-			combinator = ' '
-		}
-		if p.i >= len(p.s) {
-			return result, nil
-		}
-
-		switch p.s[p.i] {
-		case '+', '>', '~':
-			combinator = p.s[p.i]
-			p.i++
-			p.skipWhitespace()
-		case ',', ')':
-			// These characters can't begin a selector, but they can legally occur after one.
-			return result, nil
-		}
-
-		if combinator == 0 {
-			return result, nil
-		}
-
-		c, err = p.parseSimpleSelectorSequence()
-		if err != nil {
-			return nil, err
-		}
-		result = combinedSelector{first: result, combinator: combinator, second: c}
-	}
-}
-
-// parseSelectorGroup parses a group of selectors, separated by commas.
-func (p *parser) parseSelectorGroup() (SelectorGroup, error) {
-	current, err := p.parseSelector()
-	if err != nil {
-		return nil, err
-	}
-	result := SelectorGroup{current}
-
-	for p.i < len(p.s) {
-		if p.s[p.i] != ',' {
-			break
-		}
-		p.i++
-		c, err := p.parseSelector()
-		if err != nil {
-			return nil, err
-		}
-		result = append(result, c)
-	}
-	return result, nil
-}
diff --git a/application/source/vendor/github.com/andybalholm/cascadia/pseudo_classes.go b/application/source/vendor/github.com/andybalholm/cascadia/pseudo_classes.go
deleted file mode 100644
index 6234c3eb91c4e41467e75c072f3f793af459fcb2..0000000000000000000000000000000000000000
--- a/application/source/vendor/github.com/andybalholm/cascadia/pseudo_classes.go
+++ /dev/null
@@ -1,458 +0,0 @@
-package cascadia
-
-import (
-	"bytes"
-	"fmt"
-	"regexp"
-	"strings"
-
-	"golang.org/x/net/html"
-	"golang.org/x/net/html/atom"
-)
-
-// This file implements the pseudo classes selectors,
-// which share the implementation of PseudoElement() and Specificity()
-
-type abstractPseudoClass struct{}
-
-func (s abstractPseudoClass) Specificity() Specificity {
-	return Specificity{0, 1, 0}
-}
-
-func (c abstractPseudoClass) PseudoElement() string {
-	return ""
-}
-
-type relativePseudoClassSelector struct {
-	name  string // one of "not", "has", "haschild"
-	match SelectorGroup
-}
-
-func (s relativePseudoClassSelector) Match(n *html.Node) bool {
-	if n.Type != html.ElementNode {
-		return false
-	}
-	switch s.name {
-	case "not":
-		// matches elements that do not match a.
-		return !s.match.Match(n)
-	case "has":
-		//  matches elements with any descendant that matches a.
-		return hasDescendantMatch(n, s.match)
-	case "haschild":
-		// matches elements with a child that matches a.
-		return hasChildMatch(n, s.match)
-	default:
-		panic(fmt.Sprintf("unsupported relative pseudo class selector : %s", s.name))
-	}
-}
-
-// hasChildMatch returns whether n has any child that matches a.
-func hasChildMatch(n *html.Node, a Matcher) bool {
-	for c := n.FirstChild; c != nil; c = c.NextSibling {
-		if a.Match(c) {
-			return true
-		}
-	}
-	return false
-}
-
-// hasDescendantMatch performs a depth-first search of n's descendants,
-// testing whether any of them match a. It returns true as soon as a match is
-// found, or false if no match is found.
-func hasDescendantMatch(n *html.Node, a Matcher) bool {
-	for c := n.FirstChild; c != nil; c = c.NextSibling {
-		if a.Match(c) || (c.Type == html.ElementNode && hasDescendantMatch(c, a)) {
-			return true
-		}
-	}
-	return false
-}
-
-// Specificity returns the specificity of the most specific selectors
-// in the pseudo-class arguments.
-// See https://www.w3.org/TR/selectors/#specificity-rules
-func (s relativePseudoClassSelector) Specificity() Specificity {
-	var max Specificity
-	for _, sel := range s.match {
-		newSpe := sel.Specificity()
-		if max.Less(newSpe) {
-			max = newSpe
-		}
-	}
-	return max
-}
-
-func (c relativePseudoClassSelector) PseudoElement() string {
-	return ""
-}
-
-type containsPseudoClassSelector struct {
-	abstractPseudoClass
-	value string
-	own   bool
-}
-
-func (s containsPseudoClassSelector) Match(n *html.Node) bool {
-	var text string
-	if s.own {
-		// matches nodes that directly contain the given text
-		text = strings.ToLower(nodeOwnText(n))
-	} else {
-		// matches nodes that contain the given text.
-		text = strings.ToLower(nodeText(n))
-	}
-	return strings.Contains(text, s.value)
-}
-
-type regexpPseudoClassSelector struct {
-	abstractPseudoClass
-	regexp *regexp.Regexp
-	own    bool
-}
-
-func (s regexpPseudoClassSelector) Match(n *html.Node) bool {
-	var text string
-	if s.own {
-		// matches nodes whose text directly matches the specified regular expression
-		text = nodeOwnText(n)
-	} else {
-		// matches nodes whose text matches the specified regular expression
-		text = nodeText(n)
-	}
-	return s.regexp.MatchString(text)
-}
-
-// writeNodeText writes the text contained in n and its descendants to b.
-func writeNodeText(n *html.Node, b *bytes.Buffer) {
-	switch n.Type {
-	case html.TextNode:
-		b.WriteString(n.Data)
-	case html.ElementNode:
-		for c := n.FirstChild; c != nil; c = c.NextSibling {
-			writeNodeText(c, b)
-		}
-	}
-}
-
-// nodeText returns the text contained in n and its descendants.
-func nodeText(n *html.Node) string {
-	var b bytes.Buffer
-	writeNodeText(n, &b)
-	return b.String()
-}
-
-// nodeOwnText returns the contents of the text nodes that are direct
-// children of n.
-func nodeOwnText(n *html.Node) string {
-	var b bytes.Buffer
-	for c := n.FirstChild; c != nil; c = c.NextSibling {
-		if c.Type == html.TextNode {
-			b.WriteString(c.Data)
-		}
-	}
-	return b.String()
-}
-
-type nthPseudoClassSelector struct {
-	abstractPseudoClass
-	a, b         int
-	last, ofType bool
-}
-
-func (s nthPseudoClassSelector) Match(n *html.Node) bool {
-	if s.a == 0 {
-		if s.last {
-			return simpleNthLastChildMatch(s.b, s.ofType, n)
-		} else {
-			return simpleNthChildMatch(s.b, s.ofType, n)
-		}
-	}
-	return nthChildMatch(s.a, s.b, s.last, s.ofType, n)
-}
-
-// nthChildMatch implements :nth-child(an+b).
-// If last is true, implements :nth-last-child instead.
-// If ofType is true, implements :nth-of-type instead.
-func nthChildMatch(a, b int, last, ofType bool, n *html.Node) bool {
-	if n.Type != html.ElementNode {
-		return false
-	}
-
-	parent := n.Parent
-	if parent == nil {
-		return false
-	}
-
-	i := -1
-	count := 0
-	for c := parent.FirstChild; c != nil; c = c.NextSibling {
-		if (c.Type != html.ElementNode) || (ofType && c.Data != n.Data) {
-			continue
-		}
-		count++
-		if c == n {
-			i = count
-			if !last {
-				break
-			}
-		}
-	}
-
-	if i == -1 {
-		// This shouldn't happen, since n should always be one of its parent's children.
-		return false
-	}
-
-	if last {
-		i = count - i + 1
-	}
-
-	i -= b
-	if a == 0 {
-		return i == 0
-	}
-
-	return i%a == 0 && i/a >= 0
-}
-
-// simpleNthChildMatch implements :nth-child(b).
-// If ofType is true, implements :nth-of-type instead.
-func simpleNthChildMatch(b int, ofType bool, n *html.Node) bool {
-	if n.Type != html.ElementNode {
-		return false
-	}
-
-	parent := n.Parent
-	if parent == nil {
-		return false
-	}
-
-	count := 0
-	for c := parent.FirstChild; c != nil; c = c.NextSibling {
-		if c.Type != html.ElementNode || (ofType && c.Data != n.Data) {
-			continue
-		}
-		count++
-		if c == n {
-			return count == b
-		}
-		if count >= b {
-			return false
-		}
-	}
-	return false
-}
-
-// simpleNthLastChildMatch implements :nth-last-child(b).
-// If ofType is true, implements :nth-last-of-type instead.
-func simpleNthLastChildMatch(b int, ofType bool, n *html.Node) bool {
-	if n.Type != html.ElementNode {
-		return false
-	}
-
-	parent := n.Parent
-	if parent == nil {
-		return false
-	}
-
-	count := 0
-	for c := parent.LastChild; c != nil; c = c.PrevSibling {
-		if c.Type != html.ElementNode || (ofType && c.Data != n.Data) {
-			continue
-		}
-		count++
-		if c == n {
-			return count == b
-		}
-		if count >= b {
-			return false
-		}
-	}
-	return false
-}
-
-type onlyChildPseudoClassSelector struct {
-	abstractPseudoClass
-	ofType bool
-}
-
-// Match implements :only-child.
-// If `ofType` is true, it implements :only-of-type instead.
-func (s onlyChildPseudoClassSelector) Match(n *html.Node) bool {
-	if n.Type != html.ElementNode {
-		return false
-	}
-
-	parent := n.Parent
-	if parent == nil {
-		return false
-	}
-
-	count := 0
-	for c := parent.FirstChild; c != nil; c = c.NextSibling {
-		if (c.Type != html.ElementNode) || (s.ofType && c.Data != n.Data) {
-			continue
-		}
-		count++
-		if count > 1 {
-			return false
-		}
-	}
-
-	return count == 1
-}
-
-type inputPseudoClassSelector struct {
-	abstractPseudoClass
-}
-
-// Matches input, select, textarea and button elements.
-func (s inputPseudoClassSelector) Match(n *html.Node) bool {
-	return n.Type == html.ElementNode && (n.Data == "input" || n.Data == "select" || n.Data == "textarea" || n.Data == "button")
-}
-
-type emptyElementPseudoClassSelector struct {
-	abstractPseudoClass
-}
-
-// Matches empty elements.
-func (s emptyElementPseudoClassSelector) Match(n *html.Node) bool {
-	if n.Type != html.ElementNode {
-		return false
-	}
-
-	for c := n.FirstChild; c != nil; c = c.NextSibling {
-		switch c.Type {
-		case html.ElementNode:
-			return false
-		case html.TextNode:
-			if strings.TrimSpace(nodeText(c)) == "" {
-				continue
-			} else {
-				return false
-			}
-		}
-	}
-
-	return true
-}
-
-type rootPseudoClassSelector struct {
-	abstractPseudoClass
-}
-
-// Match implements :root
-func (s rootPseudoClassSelector) Match(n *html.Node) bool {
-	if n.Type != html.ElementNode {
-		return false
-	}
-	if n.Parent == nil {
-		return false
-	}
-	return n.Parent.Type == html.DocumentNode
-}
-
-func hasAttr(n *html.Node, attr string) bool {
-	return matchAttribute(n, attr, func(string) bool { return true })
-}
-
-type linkPseudoClassSelector struct {
-	abstractPseudoClass
-}
-
-// Match implements :link
-func (s linkPseudoClassSelector) Match(n *html.Node) bool {
-	return (n.DataAtom == atom.A || n.DataAtom == atom.Area || n.DataAtom == atom.Link) && hasAttr(n, "href")
-}
-
-type langPseudoClassSelector struct {
-	abstractPseudoClass
-	lang string
-}
-
-func (s langPseudoClassSelector) Match(n *html.Node) bool {
-	own := matchAttribute(n, "lang", func(val string) bool {
-		return val == s.lang || strings.HasPrefix(val, s.lang+"-")
-	})
-	if n.Parent == nil {
-		return own
-	}
-	return own || s.Match(n.Parent)
-}
-
-type enabledPseudoClassSelector struct {
-	abstractPseudoClass
-}
-
-func (s enabledPseudoClassSelector) Match(n *html.Node) bool {
-	if n.Type != html.ElementNode {
-		return false
-	}
-	switch n.DataAtom {
-	case atom.A, atom.Area, atom.Link:
-		return hasAttr(n, "href")
-	case atom.Optgroup, atom.Menuitem, atom.Fieldset:
-		return !hasAttr(n, "disabled")
-	case atom.Button, atom.Input, atom.Select, atom.Textarea, atom.Option:
-		return !hasAttr(n, "disabled") && !inDisabledFieldset(n)
-	}
-	return false
-}
-
-type disabledPseudoClassSelector struct {
-	abstractPseudoClass
-}
-
-func (s disabledPseudoClassSelector) Match(n *html.Node) bool {
-	if n.Type != html.ElementNode {
-		return false
-	}
-	switch n.DataAtom {
-	case atom.Optgroup, atom.Menuitem, atom.Fieldset:
-		return hasAttr(n, "disabled")
-	case atom.Button, atom.Input, atom.Select, atom.Textarea, atom.Option:
-		return hasAttr(n, "disabled") || inDisabledFieldset(n)
-	}
-	return false
-}
-
-func hasLegendInPreviousSiblings(n *html.Node) bool {
-	for s := n.PrevSibling; s != nil; s = s.PrevSibling {
-		if s.DataAtom == atom.Legend {
-			return true
-		}
-	}
-	return false
-}
-
-func inDisabledFieldset(n *html.Node) bool {
-	if n.Parent == nil {
-		return false
-	}
-	if n.Parent.DataAtom == atom.Fieldset && hasAttr(n.Parent, "disabled") &&
-		(n.DataAtom != atom.Legend || hasLegendInPreviousSiblings(n)) {
-		return true
-	}
-	return inDisabledFieldset(n.Parent)
-}
-
-type checkedPseudoClassSelector struct {
-	abstractPseudoClass
-}
-
-func (s checkedPseudoClassSelector) Match(n *html.Node) bool {
-	if n.Type != html.ElementNode {
-		return false
-	}
-	switch n.DataAtom {
-	case atom.Input, atom.Menuitem:
-		return hasAttr(n, "checked") && matchAttribute(n, "type", func(val string) bool {
-			t := toLowerASCII(val)
-			return t == "checkbox" || t == "radio"
-		})
-	case atom.Option:
-		return hasAttr(n, "selected")
-	}
-	return false
-}
diff --git a/application/source/vendor/github.com/andybalholm/cascadia/selector.go b/application/source/vendor/github.com/andybalholm/cascadia/selector.go
deleted file mode 100644
index 87549be23958dfc5948fb4ea20ac998c02ea8d8a..0000000000000000000000000000000000000000
--- a/application/source/vendor/github.com/andybalholm/cascadia/selector.go
+++ /dev/null
@@ -1,586 +0,0 @@
-package cascadia
-
-import (
-	"fmt"
-	"regexp"
-	"strings"
-
-	"golang.org/x/net/html"
-)
-
-// Matcher is the interface for basic selector functionality.
-// Match returns whether a selector matches n.
-type Matcher interface {
-	Match(n *html.Node) bool
-}
-
-// Sel is the interface for all the functionality provided by selectors.
-type Sel interface {
-	Matcher
-	Specificity() Specificity
-
-	// Returns a CSS input compiling to this selector.
-	String() string
-
-	// Returns a pseudo-element, or an empty string.
-	PseudoElement() string
-}
-
-// Parse parses a selector. Use `ParseWithPseudoElement`
-// if you need support for pseudo-elements.
-func Parse(sel string) (Sel, error) {
-	p := &parser{s: sel}
-	compiled, err := p.parseSelector()
-	if err != nil {
-		return nil, err
-	}
-
-	if p.i < len(sel) {
-		return nil, fmt.Errorf("parsing %q: %d bytes left over", sel, len(sel)-p.i)
-	}
-
-	return compiled, nil
-}
-
-// ParseWithPseudoElement parses a single selector,
-// with support for pseudo-element.
-func ParseWithPseudoElement(sel string) (Sel, error) {
-	p := &parser{s: sel, acceptPseudoElements: true}
-	compiled, err := p.parseSelector()
-	if err != nil {
-		return nil, err
-	}
-
-	if p.i < len(sel) {
-		return nil, fmt.Errorf("parsing %q: %d bytes left over", sel, len(sel)-p.i)
-	}
-
-	return compiled, nil
-}
-
-// ParseGroup parses a selector, or a group of selectors separated by commas.
-// Use `ParseGroupWithPseudoElements`
-// if you need support for pseudo-elements.
-func ParseGroup(sel string) (SelectorGroup, error) {
-	p := &parser{s: sel}
-	compiled, err := p.parseSelectorGroup()
-	if err != nil {
-		return nil, err
-	}
-
-	if p.i < len(sel) {
-		return nil, fmt.Errorf("parsing %q: %d bytes left over", sel, len(sel)-p.i)
-	}
-
-	return compiled, nil
-}
-
-// ParseGroupWithPseudoElements parses a selector, or a group of selectors separated by commas.
-// It supports pseudo-elements.
-func ParseGroupWithPseudoElements(sel string) (SelectorGroup, error) {
-	p := &parser{s: sel, acceptPseudoElements: true}
-	compiled, err := p.parseSelectorGroup()
-	if err != nil {
-		return nil, err
-	}
-
-	if p.i < len(sel) {
-		return nil, fmt.Errorf("parsing %q: %d bytes left over", sel, len(sel)-p.i)
-	}
-
-	return compiled, nil
-}
-
-// A Selector is a function which tells whether a node matches or not.
-//
-// This type is maintained for compatibility; I recommend using the newer and
-// more idiomatic interfaces Sel and Matcher.
-type Selector func(*html.Node) bool
-
-// Compile parses a selector and returns, if successful, a Selector object
-// that can be used to match against html.Node objects.
-func Compile(sel string) (Selector, error) {
-	compiled, err := ParseGroup(sel)
-	if err != nil {
-		return nil, err
-	}
-
-	return Selector(compiled.Match), nil
-}
-
-// MustCompile is like Compile, but panics instead of returning an error.
-func MustCompile(sel string) Selector {
-	compiled, err := Compile(sel)
-	if err != nil {
-		panic(err)
-	}
-	return compiled
-}
-
-// MatchAll returns a slice of the nodes that match the selector,
-// from n and its children.
-func (s Selector) MatchAll(n *html.Node) []*html.Node {
-	return s.matchAllInto(n, nil)
-}
-
-func (s Selector) matchAllInto(n *html.Node, storage []*html.Node) []*html.Node {
-	if s(n) {
-		storage = append(storage, n)
-	}
-
-	for child := n.FirstChild; child != nil; child = child.NextSibling {
-		storage = s.matchAllInto(child, storage)
-	}
-
-	return storage
-}
-
-func queryInto(n *html.Node, m Matcher, storage []*html.Node) []*html.Node {
-	for child := n.FirstChild; child != nil; child = child.NextSibling {
-		if m.Match(child) {
-			storage = append(storage, child)
-		}
-		storage = queryInto(child, m, storage)
-	}
-
-	return storage
-}
-
-// QueryAll returns a slice of all the nodes that match m, from the descendants
-// of n.
-func QueryAll(n *html.Node, m Matcher) []*html.Node {
-	return queryInto(n, m, nil)
-}
-
-// Match returns true if the node matches the selector.
-func (s Selector) Match(n *html.Node) bool {
-	return s(n)
-}
-
-// MatchFirst returns the first node that matches s, from n and its children.
-func (s Selector) MatchFirst(n *html.Node) *html.Node {
-	if s.Match(n) {
-		return n
-	}
-
-	for c := n.FirstChild; c != nil; c = c.NextSibling {
-		m := s.MatchFirst(c)
-		if m != nil {
-			return m
-		}
-	}
-	return nil
-}
-
-// Query returns the first node that matches m, from the descendants of n.
-// If none matches, it returns nil.
-func Query(n *html.Node, m Matcher) *html.Node {
-	for c := n.FirstChild; c != nil; c = c.NextSibling {
-		if m.Match(c) {
-			return c
-		}
-		if matched := Query(c, m); matched != nil {
-			return matched
-		}
-	}
-
-	return nil
-}
-
-// Filter returns the nodes in nodes that match the selector.
-func (s Selector) Filter(nodes []*html.Node) (result []*html.Node) {
-	for _, n := range nodes {
-		if s(n) {
-			result = append(result, n)
-		}
-	}
-	return result
-}
-
-// Filter returns the nodes that match m.
-func Filter(nodes []*html.Node, m Matcher) (result []*html.Node) {
-	for _, n := range nodes {
-		if m.Match(n) {
-			result = append(result, n)
-		}
-	}
-	return result
-}
-
-type tagSelector struct {
-	tag string
-}
-
-// Matches elements with a given tag name.
-func (t tagSelector) Match(n *html.Node) bool {
-	return n.Type == html.ElementNode && n.Data == t.tag
-}
-
-func (c tagSelector) Specificity() Specificity {
-	return Specificity{0, 0, 1}
-}
-
-func (c tagSelector) PseudoElement() string {
-	return ""
-}
-
-type classSelector struct {
-	class string
-}
-
-// Matches elements by class attribute.
-func (t classSelector) Match(n *html.Node) bool {
-	return matchAttribute(n, "class", func(s string) bool {
-		return matchInclude(t.class, s, false)
-	})
-}
-
-func (c classSelector) Specificity() Specificity {
-	return Specificity{0, 1, 0}
-}
-
-func (c classSelector) PseudoElement() string {
-	return ""
-}
-
-type idSelector struct {
-	id string
-}
-
-// Matches elements by id attribute.
-func (t idSelector) Match(n *html.Node) bool {
-	return matchAttribute(n, "id", func(s string) bool {
-		return s == t.id
-	})
-}
-
-func (c idSelector) Specificity() Specificity {
-	return Specificity{1, 0, 0}
-}
-
-func (c idSelector) PseudoElement() string {
-	return ""
-}
-
-type attrSelector struct {
-	key, val, operation string
-	regexp              *regexp.Regexp
-	insensitive         bool
-}
-
-// Matches elements by attribute value.
-func (t attrSelector) Match(n *html.Node) bool {
-	switch t.operation {
-	case "":
-		return matchAttribute(n, t.key, func(string) bool { return true })
-	case "=":
-		return matchAttribute(n, t.key, func(s string) bool { return matchInsensitiveValue(s, t.val, t.insensitive) })
-	case "!=":
-		return attributeNotEqualMatch(t.key, t.val, n, t.insensitive)
-	case "~=":
-		// matches elements where the attribute named key is a whitespace-separated list that includes val.
-		return matchAttribute(n, t.key, func(s string) bool { return matchInclude(t.val, s, t.insensitive) })
-	case "|=":
-		return attributeDashMatch(t.key, t.val, n, t.insensitive)
-	case "^=":
-		return attributePrefixMatch(t.key, t.val, n, t.insensitive)
-	case "$=":
-		return attributeSuffixMatch(t.key, t.val, n, t.insensitive)
-	case "*=":
-		return attributeSubstringMatch(t.key, t.val, n, t.insensitive)
-	case "#=":
-		return attributeRegexMatch(t.key, t.regexp, n)
-	default:
-		panic(fmt.Sprintf("unsuported operation : %s", t.operation))
-	}
-}
-
-// matches elements where we ignore (or not) the case of the attribute value
-// the user attribute is the value set by the user to match elements
-// the real attribute is the attribute value found in the code parsed
-func matchInsensitiveValue(userAttr string, realAttr string, ignoreCase bool) bool {
-	if ignoreCase {
-		return strings.EqualFold(userAttr, realAttr)
-	}
-	return userAttr == realAttr
-
-}
-
-// matches elements where the attribute named key satisifes the function f.
-func matchAttribute(n *html.Node, key string, f func(string) bool) bool {
-	if n.Type != html.ElementNode {
-		return false
-	}
-	for _, a := range n.Attr {
-		if a.Key == key && f(a.Val) {
-			return true
-		}
-	}
-	return false
-}
-
-// attributeNotEqualMatch matches elements where
-// the attribute named key does not have the value val.
-func attributeNotEqualMatch(key, val string, n *html.Node, ignoreCase bool) bool {
-	if n.Type != html.ElementNode {
-		return false
-	}
-	for _, a := range n.Attr {
-		if a.Key == key && matchInsensitiveValue(a.Val, val, ignoreCase) {
-			return false
-		}
-	}
-	return true
-}
-
-// returns true if s is a whitespace-separated list that includes val.
-func matchInclude(val string, s string, ignoreCase bool) bool {
-	for s != "" {
-		i := strings.IndexAny(s, " \t\r\n\f")
-		if i == -1 {
-			return matchInsensitiveValue(s, val, ignoreCase)
-		}
-		if matchInsensitiveValue(s[:i], val, ignoreCase) {
-			return true
-		}
-		s = s[i+1:]
-	}
-	return false
-}
-
-//  matches elements where the attribute named key equals val or starts with val plus a hyphen.
-func attributeDashMatch(key, val string, n *html.Node, ignoreCase bool) bool {
-	return matchAttribute(n, key,
-		func(s string) bool {
-			if matchInsensitiveValue(s, val, ignoreCase) {
-				return true
-			}
-			if len(s) <= len(val) {
-				return false
-			}
-			if matchInsensitiveValue(s[:len(val)], val, ignoreCase) && s[len(val)] == '-' {
-				return true
-			}
-			return false
-		})
-}
-
-// attributePrefixMatch returns a Selector that matches elements where
-// the attribute named key starts with val.
-func attributePrefixMatch(key, val string, n *html.Node, ignoreCase bool) bool {
-	return matchAttribute(n, key,
-		func(s string) bool {
-			if strings.TrimSpace(s) == "" {
-				return false
-			}
-			if ignoreCase {
-				return strings.HasPrefix(strings.ToLower(s), strings.ToLower(val))
-			}
-			return strings.HasPrefix(s, val)
-		})
-}
-
-// attributeSuffixMatch matches elements where
-// the attribute named key ends with val.
-func attributeSuffixMatch(key, val string, n *html.Node, ignoreCase bool) bool {
-	return matchAttribute(n, key,
-		func(s string) bool {
-			if strings.TrimSpace(s) == "" {
-				return false
-			}
-			if ignoreCase {
-				return strings.HasSuffix(strings.ToLower(s), strings.ToLower(val))
-			}
-			return strings.HasSuffix(s, val)
-		})
-}
-
-// attributeSubstringMatch matches nodes where
-// the attribute named key contains val.
-func attributeSubstringMatch(key, val string, n *html.Node, ignoreCase bool) bool {
-	return matchAttribute(n, key,
-		func(s string) bool {
-			if strings.TrimSpace(s) == "" {
-				return false
-			}
-			if ignoreCase {
-				return strings.Contains(strings.ToLower(s), strings.ToLower(val))
-			}
-			return strings.Contains(s, val)
-		})
-}
-
-// attributeRegexMatch  matches nodes where
-// the attribute named key matches the regular expression rx
-func attributeRegexMatch(key string, rx *regexp.Regexp, n *html.Node) bool {
-	return matchAttribute(n, key,
-		func(s string) bool {
-			return rx.MatchString(s)
-		})
-}
-
-func (c attrSelector) Specificity() Specificity {
-	return Specificity{0, 1, 0}
-}
-
-func (c attrSelector) PseudoElement() string {
-	return ""
-}
-
-// see pseudo_classes.go for pseudo classes selectors
-
-// on a static context, some selectors can't match anything
-type neverMatchSelector struct {
-	value string
-}
-
-func (s neverMatchSelector) Match(n *html.Node) bool {
-	return false
-}
-
-func (s neverMatchSelector) Specificity() Specificity {
-	return Specificity{0, 0, 0}
-}
-
-func (c neverMatchSelector) PseudoElement() string {
-	return ""
-}
-
-type compoundSelector struct {
-	selectors     []Sel
-	pseudoElement string
-}
-
-// Matches elements if each sub-selectors matches.
-func (t compoundSelector) Match(n *html.Node) bool {
-	if len(t.selectors) == 0 {
-		return n.Type == html.ElementNode
-	}
-
-	for _, sel := range t.selectors {
-		if !sel.Match(n) {
-			return false
-		}
-	}
-	return true
-}
-
-func (s compoundSelector) Specificity() Specificity {
-	var out Specificity
-	for _, sel := range s.selectors {
-		out = out.Add(sel.Specificity())
-	}
-	if s.pseudoElement != "" {
-		// https://drafts.csswg.org/selectors-3/#specificity
-		out = out.Add(Specificity{0, 0, 1})
-	}
-	return out
-}
-
-func (c compoundSelector) PseudoElement() string {
-	return c.pseudoElement
-}
-
-type combinedSelector struct {
-	first      Sel
-	combinator byte
-	second     Sel
-}
-
-func (t combinedSelector) Match(n *html.Node) bool {
-	if t.first == nil {
-		return false // maybe we should panic
-	}
-	switch t.combinator {
-	case 0:
-		return t.first.Match(n)
-	case ' ':
-		return descendantMatch(t.first, t.second, n)
-	case '>':
-		return childMatch(t.first, t.second, n)
-	case '+':
-		return siblingMatch(t.first, t.second, true, n)
-	case '~':
-		return siblingMatch(t.first, t.second, false, n)
-	default:
-		panic("unknown combinator")
-	}
-}
-
-// matches an element if it matches d and has an ancestor that matches a.
-func descendantMatch(a, d Matcher, n *html.Node) bool {
-	if !d.Match(n) {
-		return false
-	}
-
-	for p := n.Parent; p != nil; p = p.Parent {
-		if a.Match(p) {
-			return true
-		}
-	}
-
-	return false
-}
-
-// matches an element if it matches d and its parent matches a.
-func childMatch(a, d Matcher, n *html.Node) bool {
-	return d.Match(n) && n.Parent != nil && a.Match(n.Parent)
-}
-
-// matches an element if it matches s2 and is preceded by an element that matches s1.
-// If adjacent is true, the sibling must be immediately before the element.
-func siblingMatch(s1, s2 Matcher, adjacent bool, n *html.Node) bool {
-	if !s2.Match(n) {
-		return false
-	}
-
-	if adjacent {
-		for n = n.PrevSibling; n != nil; n = n.PrevSibling {
-			if n.Type == html.TextNode || n.Type == html.CommentNode {
-				continue
-			}
-			return s1.Match(n)
-		}
-		return false
-	}
-
-	// Walk backwards looking for element that matches s1
-	for c := n.PrevSibling; c != nil; c = c.PrevSibling {
-		if s1.Match(c) {
-			return true
-		}
-	}
-
-	return false
-}
-
-func (s combinedSelector) Specificity() Specificity {
-	spec := s.first.Specificity()
-	if s.second != nil {
-		spec = spec.Add(s.second.Specificity())
-	}
-	return spec
-}
-
-// on combinedSelector, a pseudo-element only makes sens on the last
-// selector, although others increase specificity.
-func (c combinedSelector) PseudoElement() string {
-	if c.second == nil {
-		return ""
-	}
-	return c.second.PseudoElement()
-}
-
-// A SelectorGroup is a list of selectors, which matches if any of the
-// individual selectors matches.
-type SelectorGroup []Sel
-
-// Match returns true if the node matches one of the single selectors.
-func (s SelectorGroup) Match(n *html.Node) bool {
-	for _, sel := range s {
-		if sel.Match(n) {
-			return true
-		}
-	}
-	return false
-}
diff --git a/application/source/vendor/github.com/andybalholm/cascadia/serialize.go b/application/source/vendor/github.com/andybalholm/cascadia/serialize.go
deleted file mode 100644
index 61acf04e1c69470888485381ea95c1195d908f18..0000000000000000000000000000000000000000
--- a/application/source/vendor/github.com/andybalholm/cascadia/serialize.go
+++ /dev/null
@@ -1,176 +0,0 @@
-package cascadia
-
-import (
-	"fmt"
-	"strconv"
-	"strings"
-)
-
-// implements the reverse operation Sel -> string
-
-var specialCharReplacer *strings.Replacer
-
-func init() {
-	var pairs []string
-	for _, s := range ",!\"#$%&'()*+ -./:;<=>?@[\\]^`{|}~" {
-		pairs = append(pairs, string(s), "\\"+string(s))
-	}
-	specialCharReplacer = strings.NewReplacer(pairs...)
-}
-
-// espace special CSS char
-func escape(s string) string { return specialCharReplacer.Replace(s) }
-
-func (c tagSelector) String() string {
-	return c.tag
-}
-
-func (c idSelector) String() string {
-	return "#" + escape(c.id)
-}
-
-func (c classSelector) String() string {
-	return "." + escape(c.class)
-}
-
-func (c attrSelector) String() string {
-	val := c.val
-	if c.operation == "#=" {
-		val = c.regexp.String()
-	} else if c.operation != "" {
-		val = fmt.Sprintf(`"%s"`, val)
-	}
-
-	ignoreCase := ""
-
-	if c.insensitive {
-		ignoreCase = " i"
-	}
-
-	return fmt.Sprintf(`[%s%s%s%s]`, c.key, c.operation, val, ignoreCase)
-}
-
-func (c relativePseudoClassSelector) String() string {
-	return fmt.Sprintf(":%s(%s)", c.name, c.match.String())
-}
-
-func (c containsPseudoClassSelector) String() string {
-	s := "contains"
-	if c.own {
-		s += "Own"
-	}
-	return fmt.Sprintf(`:%s("%s")`, s, c.value)
-}
-
-func (c regexpPseudoClassSelector) String() string {
-	s := "matches"
-	if c.own {
-		s += "Own"
-	}
-	return fmt.Sprintf(":%s(%s)", s, c.regexp.String())
-}
-
-func (c nthPseudoClassSelector) String() string {
-	if c.a == 0 && c.b == 1 { // special cases
-		s := ":first-"
-		if c.last {
-			s = ":last-"
-		}
-		if c.ofType {
-			s += "of-type"
-		} else {
-			s += "child"
-		}
-		return s
-	}
-	var name string
-	switch [2]bool{c.last, c.ofType} {
-	case [2]bool{true, true}:
-		name = "nth-last-of-type"
-	case [2]bool{true, false}:
-		name = "nth-last-child"
-	case [2]bool{false, true}:
-		name = "nth-of-type"
-	case [2]bool{false, false}:
-		name = "nth-child"
-	}
-	s := fmt.Sprintf("+%d", c.b)
-	if c.b < 0 { // avoid +-8 invalid syntax
-		s = strconv.Itoa(c.b)
-	}
-	return fmt.Sprintf(":%s(%dn%s)", name, c.a, s)
-}
-
-func (c onlyChildPseudoClassSelector) String() string {
-	if c.ofType {
-		return ":only-of-type"
-	}
-	return ":only-child"
-}
-
-func (c inputPseudoClassSelector) String() string {
-	return ":input"
-}
-
-func (c emptyElementPseudoClassSelector) String() string {
-	return ":empty"
-}
-
-func (c rootPseudoClassSelector) String() string {
-	return ":root"
-}
-
-func (c linkPseudoClassSelector) String() string {
-	return ":link"
-}
-
-func (c langPseudoClassSelector) String() string {
-	return fmt.Sprintf(":lang(%s)", c.lang)
-}
-
-func (c neverMatchSelector) String() string {
-	return c.value
-}
-
-func (c enabledPseudoClassSelector) String() string {
-	return ":enabled"
-}
-
-func (c disabledPseudoClassSelector) String() string {
-	return ":disabled"
-}
-
-func (c checkedPseudoClassSelector) String() string {
-	return ":checked"
-}
-
-func (c compoundSelector) String() string {
-	if len(c.selectors) == 0 && c.pseudoElement == "" {
-		return "*"
-	}
-	chunks := make([]string, len(c.selectors))
-	for i, sel := range c.selectors {
-		chunks[i] = sel.String()
-	}
-	s := strings.Join(chunks, "")
-	if c.pseudoElement != "" {
-		s += "::" + c.pseudoElement
-	}
-	return s
-}
-
-func (c combinedSelector) String() string {
-	start := c.first.String()
-	if c.second != nil {
-		start += fmt.Sprintf(" %s %s", string(c.combinator), c.second.String())
-	}
-	return start
-}
-
-func (c SelectorGroup) String() string {
-	ck := make([]string, len(c))
-	for i, s := range c {
-		ck[i] = s.String()
-	}
-	return strings.Join(ck, ", ")
-}
diff --git a/application/source/vendor/github.com/andybalholm/cascadia/specificity.go b/application/source/vendor/github.com/andybalholm/cascadia/specificity.go
deleted file mode 100644
index 8db864f9beb2ccfd23fb5b11bd2e0c860a880824..0000000000000000000000000000000000000000
--- a/application/source/vendor/github.com/andybalholm/cascadia/specificity.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package cascadia
-
-// Specificity is the CSS specificity as defined in
-// https://www.w3.org/TR/selectors/#specificity-rules
-// with the convention Specificity = [A,B,C].
-type Specificity [3]int
-
-// returns `true` if s < other (strictly), false otherwise
-func (s Specificity) Less(other Specificity) bool {
-	for i := range s {
-		if s[i] < other[i] {
-			return true
-		}
-		if s[i] > other[i] {
-			return false
-		}
-	}
-	return false
-}
-
-func (s Specificity) Add(other Specificity) Specificity {
-	for i, sp := range other {
-		s[i] += sp
-	}
-	return s
-}
diff --git a/application/source/vendor/github.com/volker-schukai/tokenizer/.codecov.yml b/application/source/vendor/github.com/volker-schukai/tokenizer/.codecov.yml
deleted file mode 100644
index bfdc9877d9acc165c0342befa33f2e8f769538cf..0000000000000000000000000000000000000000
--- a/application/source/vendor/github.com/volker-schukai/tokenizer/.codecov.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-coverage:
-  status:
-    project:
-      default:
-        informational: true
-    patch:
-      default:
-        informational: true
diff --git a/application/source/vendor/github.com/volker-schukai/tokenizer/.gitignore b/application/source/vendor/github.com/volker-schukai/tokenizer/.gitignore
deleted file mode 100644
index 723ef36f4e4f32c4560383aa5987c575a30c6535..0000000000000000000000000000000000000000
--- a/application/source/vendor/github.com/volker-schukai/tokenizer/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-.idea
\ No newline at end of file
diff --git a/application/source/vendor/github.com/volker-schukai/tokenizer/LICENSE b/application/source/vendor/github.com/volker-schukai/tokenizer/LICENSE
deleted file mode 100644
index 209a7b92a054986875b2ba19a90b3c94c221c129..0000000000000000000000000000000000000000
--- a/application/source/vendor/github.com/volker-schukai/tokenizer/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2021 Ivan Shalganov
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/application/source/vendor/github.com/volker-schukai/tokenizer/helpers.go b/application/source/vendor/github.com/volker-schukai/tokenizer/helpers.go
deleted file mode 100644
index 9022d6e677de1393167dfad682442c7afdabd8a9..0000000000000000000000000000000000000000
--- a/application/source/vendor/github.com/volker-schukai/tokenizer/helpers.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package tokenizer
-
-import (
-	"reflect"
-	"runtime"
-	"unsafe"
-)
-
-// b2s converts byte slice to a string without memory allocation.
-// See https://groups.google.com/forum/#!msg/Golang-Nuts/ENgbUzYvCuU/90yGx7GUAgAJ .
-func b2s(b []byte) string {
-	return *(*string)(unsafe.Pointer(&b))
-}
-
-// s2b converts string to a byte slice without memory allocation.
-//
-// Note it may break if string and/or slice header will change
-// in the future go versions.
-func s2b(s string) (b []byte) {
-	sh := (*reflect.StringHeader)(unsafe.Pointer(&s))
-	bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
-	bh.Data = sh.Data
-	bh.Cap = sh.Len
-	bh.Len = sh.Len
-
-	runtime.KeepAlive(&s)
-
-	return b
-}
-
-func isNumberByte(b byte) bool {
-	return '0' <= b && b <= '9'
-}
-
-func bytesStarts(prefix []byte, b []byte) bool {
-	if len(prefix) > len(b) {
-		return false
-	}
-	return b2s(prefix) == b2s(b[0:len(prefix)])
-}
-
-func bytesEnds(suffix []byte, b []byte) bool {
-	if len(suffix) > len(b) {
-		return false
-	}
-	return b2s(suffix) == b2s(b[len(b)-len(suffix):])
-}
diff --git a/application/source/vendor/github.com/volker-schukai/tokenizer/parser.go b/application/source/vendor/github.com/volker-schukai/tokenizer/parser.go
deleted file mode 100644
index 5f8b7f2174acce0b480b8b167ea316143ecd7ad5..0000000000000000000000000000000000000000
--- a/application/source/vendor/github.com/volker-schukai/tokenizer/parser.go
+++ /dev/null
@@ -1,472 +0,0 @@
-package tokenizer
-
-import (
-	"io"
-	"unicode"
-	"unicode/utf8"
-)
-
-// DefaultChunkSize default chunk size for reader.
-const DefaultChunkSize = 4096
-
-// parsing is main parser
-type parsing struct {
-	t         *Tokenizer
-	curr      byte
-	pos       int
-	line      int
-	str       []byte
-	err       error
-	reader    io.Reader
-	token     *Token
-	head      *Token
-	ptr       *Token
-	tail      []byte
-	stopKeys  []*tokenRef
-	n         int // tokens id generator
-	chunkSize int // chunks size for infinite buffer
-	offset    int
-	resume    bool
-	parsed    int
-}
-
-// newParser creates new parser for string
-func newParser(t *Tokenizer, str []byte) *parsing {
-	tok := t.allocToken()
-	tok.line = 1
-	return &parsing{
-		t:     t,
-		str:   str,
-		line:  1,
-		token: tok,
-	}
-}
-
-func newInfParser(t *Tokenizer, reader io.Reader, bufferSize uint) *parsing {
-	if bufferSize == 0 {
-		bufferSize = DefaultChunkSize
-	}
-	buffer := make([]byte, bufferSize)
-	tok := t.allocToken()
-	tok.line = 1
-	return &parsing{
-		t:         t,
-		str:       buffer,
-		reader:    reader,
-		line:      1,
-		chunkSize: int(bufferSize),
-		token:     tok,
-	}
-}
-
-func (p *parsing) prev() {
-	if p.pos > 0 {
-		p.pos--
-		p.curr = p.str[p.pos]
-	}
-}
-
-func (p *parsing) ensureBytes(n int) bool {
-	if p.pos+n >= len(p.str) {
-		if p.reader != nil {
-			p.loadChunk()
-			if p.pos+n < len(p.str) {
-				return true
-			}
-		}
-		return false
-	}
-	return true
-}
-
-func (p *parsing) next() {
-	p.pos++
-	if p.pos >= len(p.str) {
-		if p.reader == nil || p.loadChunk() == 0 {
-			p.curr = 0
-			return
-		}
-	}
-	p.curr = p.str[p.pos]
-}
-
-func (p *parsing) nextByte() byte {
-	if p.ensureBytes(1) {
-		return p.str[p.pos+1]
-	}
-	return 0
-}
-
-func (p *parsing) slice(from, to int) []byte {
-	if to < len(p.str) {
-		return p.str[from:to]
-	}
-	return p.str[from:]
-}
-
-func (p *parsing) preload() {
-	n, err := p.reader.Read(p.str)
-	if n < p.chunkSize {
-		p.str = p.str[:n]
-		p.reader = nil
-	}
-	if err != nil {
-		p.reader = nil
-		if err != io.EOF {
-			p.err = err
-		}
-	}
-}
-
-func (p *parsing) loadChunk() int {
-	// chunk size = new chunk size + size of tail of prev chunk
-	chunk := make([]byte, len(p.str)+p.chunkSize)
-	copy(chunk, p.str)
-	n, err := p.reader.Read(chunk[len(p.str):])
-
-	if n < p.chunkSize {
-		p.str = chunk[:len(p.str)+n]
-		p.reader = nil
-	} else {
-		p.str = chunk
-	}
-
-	if err != nil {
-		p.reader = nil
-		if err != io.EOF {
-			p.err = err
-		}
-	}
-	p.resume = false
-	return n
-}
-
-// checkPoint reset internal values for next chunk of data
-func (p *parsing) checkPoint() bool {
-	if p.pos > 0 {
-		p.parsed += p.pos
-		p.str = p.str[p.pos:]
-		p.offset += p.pos
-		p.pos = 0
-		if len(p.str) == 0 {
-			p.curr = 0
-		}
-	}
-	return p.resume
-}
-
-// parse bytes (p.str) to tokens and append them to the end if stream of tokens.
-func (p *parsing) parse() {
-	if len(p.str) == 0 {
-		if p.reader == nil || p.loadChunk() == 0 { // if it's not infinite stream or this is the end of stream
-			return
-		}
-	}
-	p.curr = p.str[p.pos]
-	p.resume = true
-	for p.checkPoint() {
-		if p.stopKeys != nil {
-			for _, t := range p.stopKeys {
-				if p.ptr.key == t.Key {
-					return
-				}
-			}
-		}
-		p.parseWhitespace()
-		if p.curr == 0 {
-			break
-		}
-		if p.parseToken() {
-			continue
-		}
-		if p.curr == 0 {
-			break
-		}
-		if p.parseKeyword() {
-			continue
-		}
-		if p.curr == 0 {
-			break
-		}
-		if p.parseNumber() {
-			continue
-		}
-		if p.curr == 0 {
-			break
-		}
-		if p.parseQuote() {
-			continue
-		}
-		if p.curr == 0 {
-			break
-		}
-		if p.t.flags&fStopOnUnknown != 0 {
-			break
-		}
-		p.token.key = TokenUnknown
-		p.token.value = p.str[p.pos : p.pos+1]
-		p.token.offset = p.offset + p.pos
-		p.next()
-		p.emmitToken()
-		if p.curr == 0 {
-			break
-		}
-	}
-	if len(p.token.indent) > 0 {
-		p.tail = p.token.indent
-	}
-}
-
-func (p *parsing) parseWhitespace() bool {
-	var start = -1
-	for p.curr != 0 {
-		var matched = false
-		for _, ws := range p.t.wSpaces {
-			if p.curr == ws {
-				if start == -1 {
-					start = p.pos
-				}
-				matched = true
-				break
-			}
-		}
-		if !matched {
-			break
-		}
-		if p.curr == newLine {
-			p.line++
-		}
-		p.next()
-	}
-	if start != -1 {
-		p.token.line = p.line
-		p.token.indent = p.str[start:p.pos]
-		return true
-	}
-	return false
-}
-
-func (p *parsing) parseKeyword() bool {
-	var start = -1
-	for p.curr != 0 {
-		var r rune
-		var size int
-		p.ensureBytes(4)
-		r, size = utf8.DecodeRune(p.slice(p.pos, p.pos+4))
-		if unicode.IsLetter(r) ||
-			(p.t.flags&fAllowKeywordUnderscore != 0 && p.curr == '_') ||
-			(p.t.flags&fAllowNumberInKeyword != 0 && start != -1 && isNumberByte(p.curr)) {
-
-			if start == -1 {
-				start = p.pos
-			}
-			p.pos += size - 1 // rune may be more than 1 byte
-		} else {
-			break
-		}
-		p.next()
-	}
-	if start != -1 {
-		p.token.key = TokenKeyword
-		p.token.value = p.str[start:p.pos]
-		p.token.offset = p.offset + start
-		p.emmitToken()
-		return true
-	}
-	return false
-}
-
-const (
-	stageCoefficient = iota + 1
-	stageMantissa
-	stagePower
-)
-
-func (p *parsing) parseNumber() bool {
-	var start = -1
-	var needNumber = true
-
-	var stage uint8 = 0
-	for p.curr != 0 {
-		if isNumberByte(p.curr) {
-			needNumber = false
-			if start == -1 {
-				if stage == 0 {
-					stage = stageCoefficient
-					start = p.pos
-				}
-			}
-		} else if p.t.flags&fAllowNumberUnderscore != 0 && p.curr == '_' {
-			if stage != stageCoefficient {
-				break
-			}
-			// todo checks double underscore
-		} else if !needNumber && p.curr == '.' {
-			if stage != stageCoefficient {
-				break
-			}
-			stage = stageMantissa
-			needNumber = true
-		} else if !needNumber && (p.curr == 'e' || p.curr == 'E') {
-			if stage != stageMantissa && stage != stageCoefficient {
-				break
-			}
-			ePowSign := false
-			switch p.nextByte() {
-			case '-', '+':
-				ePowSign = true
-				p.next()
-			}
-			needNumber = true
-			if isNumberByte(p.nextByte()) {
-				stage = stagePower
-			} else {
-				if ePowSign { // rollback sign position
-					p.prev()
-				}
-				break
-			}
-		} else {
-			break
-		}
-		p.next()
-	}
-	if stage == 0 {
-		return false
-	}
-	p.token.value = p.str[start:p.pos]
-	if stage == stageCoefficient {
-		p.token.key = TokenInteger
-		p.token.offset = p.offset + start
-	} else {
-		p.token.key = TokenFloat
-		p.token.offset = p.offset + start
-	}
-	p.emmitToken()
-	return true
-}
-
-// match compare next bytes from data with `r`
-func (p *parsing) match(r []byte, seek bool) bool {
-	if r[0] == p.curr {
-		if len(r) > 1 {
-			if p.ensureBytes(len(r) - 1) {
-				var i = 1
-				for ; i < len(r); i++ {
-					if r[i] != p.str[p.pos+i] {
-						return false
-					}
-				}
-				if seek {
-					p.pos += i - 1
-					p.next()
-				}
-				return true
-			}
-			return false
-		}
-		if seek {
-			p.next()
-		}
-		return true
-	}
-	return false
-}
-
-// parseQuote parses quoted string.
-func (p *parsing) parseQuote() bool {
-	var quote *StringSettings
-	var start = p.pos
-	for _, q := range p.t.quotes {
-		if p.match(q.StartToken, true) {
-			quote = q
-			break
-		}
-	}
-	if quote == nil {
-		return false
-	}
-	p.token.key = TokenString
-	p.token.offset = p.offset + start
-	p.token.string = quote
-	escapes := false
-	for p.curr != 0 {
-		if escapes {
-			escapes = false
-		} else if p.curr == quote.EscapeSymbol {
-			escapes = true
-		} else if p.match(quote.EndToken, true) {
-			break
-		} else if quote.Injects != nil {
-			loop := true
-			for _, inject := range quote.Injects {
-				for _, token := range p.t.tokens[inject.StartKey] {
-					if p.match(token.Token, true) {
-						p.token.key = TokenStringFragment
-						p.token.value = p.str[start : p.pos-len(token.Token)]
-						p.emmitToken()
-						p.token.key = token.Key
-						p.token.value = token.Token
-						p.token.offset = p.offset + p.pos - len(token.Token)
-						p.emmitToken()
-						stopKeys := p.stopKeys // may be recursive quotes
-						p.stopKeys = p.t.tokens[inject.EndKey]
-						p.parse()
-						p.stopKeys = stopKeys
-						p.token.key = TokenStringFragment
-						p.token.offset = p.offset + p.pos
-						p.token.string = quote
-						start = p.pos
-						loop = false
-						break
-					}
-				}
-				if !loop {
-					break
-				}
-			}
-		}
-		if p.curr == newLine {
-			p.line++
-		}
-		p.next()
-	}
-	p.token.value = p.str[start:p.pos]
-	p.emmitToken()
-	return true
-}
-
-// parseToken search any rune sequence from tokenItem.
-func (p *parsing) parseToken() bool {
-	if p.curr != 0 {
-		toks := p.t.index[p.curr]
-		if toks != nil {
-			start := p.pos
-			for _, t := range toks {
-				if p.match(t.Token, true) {
-					p.token.key = t.Key
-					p.token.offset = p.offset + start
-					p.token.value = t.Token
-					p.emmitToken()
-					return true
-				}
-			}
-		}
-	}
-	return false
-}
-
-// emmitToken add new p.token to stream
-func (p *parsing) emmitToken() {
-	if p.ptr == nil {
-		p.ptr = p.token
-		p.head = p.ptr
-	} else {
-		p.ptr.addNext(p.token)
-		p.ptr = p.token
-	}
-	p.n++
-	p.token = p.t.allocToken()
-	p.token.id = p.n
-	p.token.line = p.line
-}
diff --git a/application/source/vendor/github.com/volker-schukai/tokenizer/readme.md b/application/source/vendor/github.com/volker-schukai/tokenizer/readme.md
deleted file mode 100644
index e86b7ecc78cec84b8c2a490405b7bc4bded3cccf..0000000000000000000000000000000000000000
--- a/application/source/vendor/github.com/volker-schukai/tokenizer/readme.md
+++ /dev/null
@@ -1,385 +0,0 @@
-# Tokenizer 
-
-[![Build Status](https://github.com/bzick/tokenizer/actions/workflows/tokenizer.yml/badge.svg)](https://github.com/bzick/tokenizer/actions/workflows/tokenizer.yml)
-[![codecov](https://codecov.io/gh/bzick/tokenizer/branch/master/graph/badge.svg?token=MFY5NWATGC)](https://codecov.io/gh/bzick/tokenizer)
-[![Go Report Card](https://goreportcard.com/badge/github.com/bzick/tokenizer?rnd=2)](https://goreportcard.com/report/github.com/bzick/tokenizer)
-[![GoDoc](https://godoc.org/github.com/bzick/tokenizer?status.svg)](https://godoc.org/github.com/bzick/tokenizer)
-
-Tokenizer — parse any string, slice or infinite buffer to any tokens.
-
-Main features:
-
-* High performance.
-* No regexp.
-* Provides [simple API](https://pkg.go.dev/github.com/bzick/tokenizer).
-* Supports [integer](#integer-number) and [float](#float-number) numbers.
-* Supports [quoted string or other "framed"](#framed-string) strings.
-* Supports [injection](#injection-in-framed-string) in quoted or "framed" strings.
-* Supports unicode.
-* [Customization of tokens](#user-defined-tokens).
-* Autodetect white space symbols.
-* Parse any data syntax (xml, [json](https://github.com/bzick/tokenizer/blob/master/example_test.go), yaml), any programming language.
-* Single pass through the data.
-* Parses [infinite incoming data](#parse-buffer) and don't panic.
-
-Use cases:
-- Parsing html, xml, [json](./example_test.go), yaml and other text formats.
-- Parsing huge or infinite texts. 
-- Parsing any programming languages.
-- Parsing templates.
-- Parsing formulas.
-
-For example, parsing SQL `WHERE` condition `user_id = 119 and modified > "2020-01-01 00:00:00" or amount >= 122.34`:
-
-```go
-// define custom tokens keys
-const ( 
-	TEquality = 1
-	TDot      = 2
-	TMath     = 3
-)
-
-// configure tokenizer
-parser := tokenizer.New()
-parser.DefineTokens(TEquality, []string{"<", "<=", "==", ">=", ">", "!="})
-parser.DefineTokens(TDot, []string{"."})
-parser.DefineTokens(TMath, []string{"+", "-", "/", "*", "%"})
-parser.DefineStringToken(`"`, `"`).SetEscapeSymbol(tokenizer.BackSlash)
-
-// create tokens stream
-stream := parser.ParseString(`user_id = 119 and modified > "2020-01-01 00:00:00" or amount >= 122.34`)
-defer stream.Close()
-
-// iterate over each token
-for stream.Valid() {
-	if stream.CurrentToken().Is(tokenizer.TokenKeyword) {
-		field := stream.CurrentToken().ValueString()
-		// ... 
-	}
-	stream.Next()
-}
-```
-
-tokens stram:
-```
-string:  user_id  =  119  and  modified  >  "2020-01-01 00:00:00"  or  amount  >=  122.34
-tokens: |user_id| =| 119| and| modified| >| "2020-01-01 00:00:00"| or| amount| >=| 122.34|
-        |   0   | 1|  2 |  3 |    4    | 5|            6         | 7 |    8  | 9 |    10 |
-
-0:  {key: TokenKeyword, value: "user_id"}                token.Value()          == "user_id"
-1:  {key: TEquality, value: "="}                         token.Value()          == "="
-2:  {key: TokenInteger, value: "119"}                    token.ValueInt()       == 119
-3:  {key: TokenKeyword, value: "and"}                    token.Value()          == "and"
-4:  {key: TokenKeyword, value: "modified"}               token.Value()          == "modified"
-5:  {key: TEquality, value: ">"}                         token.Value()          == ">"
-6:  {key: TokenString, value: "\"2020-01-01 00:00:00\""} token.ValueUnescaped() == "2020-01-01 00:00:00"
-7:  {key: TokenKeyword, value: "or"}                     token.Value()          == "and"
-8:  {key: TokenKeyword, value: "amount"}                 token.Value()          == "amount"
-9:  {key: TEquality, value: ">="}                        token.Value()          == ">="
-10: {key: TokenFloat, value: "122.34"}                   token.ValueFloat()     == 122.34
-```
-
-More examples:
-- [JSON parser](./example_test.go)
-
-## Begin
-
-### Create and parse
-
-```go
-import (
-    "github.com/bzick/tokenizer"
-)
-
-var parser := tokenizer.New()
-parser.AllowKeywordUnderscore() // ... and other configuration code
-
-```
-
-There is two ways to **parse string or slice**:
-
-- `parser.ParseString(str)`
-- `parser.ParseBytes(slice)`
-
-The package allows to **parse an endless stream** of data into tokens.
-For parsing, you need to pass `io.Reader`, from which data will be read (chunk-by-chunk):
-
-```go
-fp, err := os.Open("data.json") // huge JSON file
-// check fs, configure tokenizer ...
-
-stream := parser.ParseStream(fp, 4096).SetHistorySize(10)
-defer stream.Close()
-for stream.IsValid() { 
-	// ...
-	stream.Next()
-}
-```
-
-## Embedded tokens
-
-- `tokenizer.TokenUnknown` — unspecified token key. 
-- `tokenizer.TokenKeyword` — keyword, any combination of letters, including unicode letters.
-- `tokenizer.TokenInteger` — integer value
-- `tokenizer.TokenFloat` — float/double value
-- `tokenizer.TokenString` — quoted string
-- `tokenizer.TokenStringFragment` — fragment framed (quoted) string 
-
-### Unknown token — `tokenizer.TokenUnknown`
-
-A token marks as `TokenUnknown` if the parser detects an unknown token:
-
-```go
-parser.ParseString(`one!`)
-```
-```
-{
-    {
-        Key: tokenizer.TokenKeyword
-        Value: "One"
-    },
-    {
-        Key: tokenizer.TokenUnknown
-        Value: "!"
-    }
-}
-```
-
-By default, `TokenUnknown` tokens are added to the stream. 
-To exclude them from the stream, use the `tokenizer.StopOnUndefinedToken()` method
-
-```
-{
-    {
-        Key: tokenizer.TokenKeyword
-        Value: "one"
-    }
-}
-```
-
-Please note that if the `tokenizer.StopOnUndefinedToken` setting is enabled, then the string may not be fully parsed.
-To find out that the string was not fully parsed, check the length of the parsed string `stream.GetParsedLength()`
-and the length of the original string.
-
-### Keywords
-
-Any word that is not a custom token is stored in a single token as `tokenizer.TokenKeyword`.
-
-The word can contains unicode characters, numbers (see `tokenizer.AllowNumbersInKeyword ()`) and underscore (see `tokenizer.AllowKeywordUnderscore ()`).
-
-```go
-parser.ParseString(`one two четыре`)
-```
-```
-tokens: {
-    {
-        Key: tokenizer.TokenKeyword
-        Value: "one"
-    },
-    {
-        Key: tokenizer.TokenKeyword
-        Value: "two"
-    },
-    {
-        Key: tokenizer.TokenKeyword
-        Value: "четыре"
-    }
-}
-```
-
-### Integer number
-
-Any integer is stored as one token with key `tokenizer.Token Integer`.
-
-```go
-parser.ParseString(`223 999`)
-```
-```
-tokens: {
-    {
-        Key: tokenizer.TokenInteger
-        Value: "223"
-    },
-    {
-        Key: tokenizer.TokenInteger
-        Value: "999"
-    },
-}
-```
-
-To get int64 from the token value use `stream.GetInt()`:
-
-```go
-stream := tokenizer.ParseString("123")
-fmt.Print("Token is %d", stream.CurrentToken().GetInt())  // Token is 123
-```
-
-### Float number
-
-Any float number is stored as one token with key `tokenizer.TokenFloat`. Float number may
-- have point, for example `1.2`
-- have exponent, for example `1e6`
-- have lower `e` or upper `E` letter in the exponent, for example `1E6`, `1e6`
-- have sign in the exponent, for example `1e-6`, `1e6`, `1e+6`
-
-```
-tokenizer.ParseString(`1.3e-8`):
-{
-    {
-        Key: tokenizer.TokenFloat
-        Value: "1.3e-8"
-    },
-}
-```
-
-To get float64 from the token value use `token.GetFloat()`:
-
-```go
-stream := tokenizer.ParseString("1.3e2")
-fmt.Print("Token is %d", stream.CurrentToken().GetFloat())  // Token is 130
-```
-
-### Framed string
-
-Strings that are framed with tokens are called framed strings. An obvious example is quoted a string like `"one two"`.
-There quotes — edge tokens.
-
-You can create and customize framed string through `tokenizer.AddQuote()`:
-
-```go
-const TokenDoubleQuotedString = 10
-tokenizer.DefineStringToken(TokenDoubleQuotedString, `"`, `"`).SetEscapeSymbol('\\')
-
-stream := tokenizer.ParseString(`"two \"three"`)
-```
-```
-{
-    {
-        Key: tokenizer.TokenString
-        Value: "\"two \\"three\""
-    },
-}
-```
-
-To get a framed string without edge tokens and special characters, use the `stream.ValueUnescape()` method:
-
-```go
-v := stream.CurrentToken().ValueUnescape() // result: two "three
-```
-
-The method `token.StringKey()` will be return token string key defined in the `DefineStringToken`:
-
-```go
-stream.CurrentToken().StringKey() == TokenDoubleQuotedString // true
-```
-
-### Injection in framed string
-
-Strings can contain expression substitutions that can be parsed into tokens. For example `"one {{two}} three"`.
-Fragments of strings before, between and after substitutions will be stored in tokens as `tokenizer.TokenStringFragment`. 
-
-```go
-const (
-    TokenOpenInjection = 1
-    TokenCloseInjection = 2
-    TokenQuotedString = 3
-)
-
-parser := tokenizer.New()
-parser.DefineTokens(TokenOpenInjection, []string{"{{"})
-parser.DefineTokens(TokenCloseInjection, []string{"}}"})
-parser.DefineStringToken(TokenQuotedString, `"`, `"`).AddInjection(TokenOpenInjection, TokenCloseInjection)
-
-parser.ParseString(`"one {{ two }} three"`)
-```
-Tokens:
-```
-{
-    {
-        Key: tokenizer.TokenStringFragment,
-        Value: "one"
-    },
-    {
-        Key: TokenOpenInjection,
-        Value: "{{"
-    },
-    {
-        Key: tokenizer.TokenKeyword,
-        Value: "two"
-    },
-    {
-        Key: TokenCloseInjection,
-        Value: "}}"
-    },
-    {
-        Key: tokenizer.TokenStringFragment,
-        Value: "three"
-    },
-}
-```
-
-Use cases:
-- parse templates
-- parse placeholders
-
-## User defined tokens
-
-The new token can be defined via the `DefineTokens` method:
-
-```go
-
-const (
-    TokenCurlyOpen    = 1
-    TokenCurlyClose   = 2
-    TokenSquareOpen   = 3
-    TokenSquareClose  = 4
-    TokenColon        = 5
-    TokenComma        = 6
-	TokenDoubleQuoted = 7
-)
-
-// json parser
-parser := tokenizer.New()
-parser.
-	DefineTokens(TokenCurlyOpen, []string{"{"}).
-	DefineTokens(TokenCurlyClose, []string{"}"}).
-	DefineTokens(TokenSquareOpen, []string{"["}).
-	DefineTokens(TokenSquareClose, []string{"]"}).
-	DefineTokens(TokenColon, []string{":"}).
-	DefineTokens(TokenComma, []string{","}).
-	DefineStringToken(TokenDoubleQuoted, `"`, `"`).SetSpecialSymbols(tokenizer.DefaultStringEscapes)
-
-stream := parser.ParseString(`{"key": [1]}`)
-```
-
-
-## Known issues
-
-* zero-byte `\0` ignores in the source string.
-
-## Benchmark
-
-Parse string/bytes
-```
-pkg: tokenizer
-cpu: Intel(R) Core(TM) i7-7820HQ CPU @ 2.90GHz
-BenchmarkParseBytes
-    stream_test.go:251: Speed: 70 bytes string with 19.689µs: 3555284 byte/sec
-    stream_test.go:251: Speed: 7000 bytes string with 848.163µs: 8253130 byte/sec
-    stream_test.go:251: Speed: 700000 bytes string with 75.685945ms: 9248744 byte/sec
-    stream_test.go:251: Speed: 11093670 bytes string with 1.16611538s: 9513355 byte/sec
-BenchmarkParseBytes-8   	  158481	      7358 ns/op
-```
-
-Parse infinite stream
-```
-pkg: tokenizer
-cpu: Intel(R) Core(TM) i7-7820HQ CPU @ 2.90GHz
-BenchmarkParseInfStream
-    stream_test.go:226: Speed: 70 bytes at 33.826µs: 2069414 byte/sec
-    stream_test.go:226: Speed: 7000 bytes at 627.357µs: 11157921 byte/sec
-    stream_test.go:226: Speed: 700000 bytes at 27.675799ms: 25292856 byte/sec
-    stream_test.go:226: Speed: 30316440 bytes at 1.18061702s: 25678471 byte/sec
-BenchmarkParseInfStream-8   	  433092	      2726 ns/op
-PASS
-```
diff --git a/application/source/vendor/github.com/volker-schukai/tokenizer/stream.go b/application/source/vendor/github.com/volker-schukai/tokenizer/stream.go
deleted file mode 100644
index aa1003efada35b2da91d961cb81aaeffde24f1c2..0000000000000000000000000000000000000000
--- a/application/source/vendor/github.com/volker-schukai/tokenizer/stream.go
+++ /dev/null
@@ -1,333 +0,0 @@
-package tokenizer
-
-import (
-	"strconv"
-	"strings"
-)
-
-// Stream iterator via parsed tokens.
-// If data reads from an infinite buffer then the iterator will be read data from reader chunk-by-chunk.
-type Stream struct {
-	t *Tokenizer
-	// count of tokens in the stream
-	len int
-	// pointer to the node of double-linked list of tokens
-	current *Token
-	// pointer of valid token if current moved to out of bounds (out of end list)
-	prev *Token
-	// pointer of valid token if current moved to out of bounds (out of begin list)
-	next *Token
-	// pointer to head of list
-	head *Token
-
-	// last whitespaces before end of source
-	wsTail []byte
-	// count of parsed bytes
-	parsed int
-
-	p           *parsing
-	historySize int
-}
-
-// NewStream creates new parsed stream of tokens.
-func NewStream(p *parsing) *Stream {
-	return &Stream{
-		t:       p.t,
-		head:    p.head,
-		current: p.head,
-		len:     p.n,
-		wsTail:  p.tail,
-		parsed:  p.parsed + p.pos,
-	}
-}
-
-// NewInfStream creates new stream with active parser.
-func NewInfStream(p *parsing) *Stream {
-	return &Stream{
-		t:       p.t,
-		p:       p,
-		len:     p.n,
-		head:    p.head,
-		current: p.head,
-	}
-}
-
-// SetHistorySize sets the number of tokens that should remain after the current token
-func (s *Stream) SetHistorySize(size int) *Stream {
-	s.historySize = size
-	return s
-}
-
-// Close releases all token objects to pool
-func (s *Stream) Close() {
-	for ptr := s.head; ptr != nil; {
-		p := ptr.next
-		s.t.freeToken(ptr)
-		ptr = p
-	}
-	s.next = nil
-	s.prev = nil
-	s.head = undefToken
-	s.current = undefToken
-	s.len = 0
-}
-
-func (s *Stream) String() string {
-	items := make([]string, 0, s.len)
-	ptr := s.head
-	for ptr != nil {
-		items = append(items, strconv.Itoa(ptr.id)+": "+ptr.String())
-		ptr = ptr.next
-	}
-
-	return strings.Join(items, "\n")
-}
-
-// GetParsedLength returns currently count parsed bytes.
-func (s *Stream) GetParsedLength() int {
-	if s.p == nil {
-		return s.parsed
-	} else {
-		return s.p.parsed + s.p.pos
-	}
-}
-
-// GoNext moves stream pointer to the next token.
-// If there is no token, it initiates the parsing of the next chunk of data.
-// If there is no data, the pointer will point to the TokenUndef token.
-func (s *Stream) GoNext() *Stream {
-	if s.current.next != nil {
-		s.current = s.current.next
-		if s.current.next == nil && s.p != nil { // lazy load and parse next data-chunk
-			n := s.p.n
-			s.p.parse()
-			s.len += s.p.n - n
-		}
-		if s.historySize != 0 && s.current.id-s.head.id > s.historySize {
-			t := s.head
-			s.head = s.head.unlink()
-			s.t.freeToken(t)
-			s.len--
-		}
-	} else if s.current == undefToken {
-		s.current = s.prev
-		s.prev = nil
-	} else {
-		s.prev = s.current
-		s.current = undefToken
-	}
-	return s
-}
-
-// GoPrev moves pointer of stream to the next token.
-// The number of possible calls is limited if you specified SetHistorySize.
-// If the beginning of the stream or the end of the history is reached, the pointer will point to the TokenUndef token.
-func (s *Stream) GoPrev() *Stream {
-	if s.current.prev != nil {
-		s.current = s.current.prev
-	} else if s.current == undefToken {
-		s.current = s.next
-		s.prev = nil
-	} else {
-		s.next = s.current
-		s.current = undefToken
-	}
-	return s
-}
-
-// GoTo moves pointer of stream to specific token.
-// The search is done by token ID.
-func (s *Stream) GoTo(id int) *Stream {
-	if id > s.current.id {
-		for s.current != nil && id != s.current.id {
-			s.GoNext()
-		}
-	} else if id < s.current.id {
-		for s.current != nil && id != s.current.id {
-			s.GoPrev()
-		}
-	}
-	return s
-}
-
-// IsValid checks if stream is valid.
-// This means that the pointer has not reached the end of the stream.
-func (s *Stream) IsValid() bool {
-	return s.current != undefToken
-}
-
-// IsNextSequence checks if these are next tokens in exactly the same sequence as specified.
-func (s *Stream) IsNextSequence(keys ...TokenKey) bool {
-	var (
-		result = true
-		hSize  = 0
-		id     = s.CurrentToken().ID()
-	)
-	if s.historySize > 0 && s.historySize < len(keys) {
-		hSize = s.historySize
-		s.historySize = len(keys)
-	}
-
-	for _, key := range keys {
-		if !s.GoNext().CurrentToken().Is(key) {
-			result = false
-			break
-		}
-	}
-	s.GoTo(id)
-
-	if hSize != 0 {
-		s.SetHistorySize(hSize)
-	}
-	return result
-}
-
-// IsAnyNextSequence checks that at least one token from each group is contained in a sequence of tokens
-func (s *Stream) IsAnyNextSequence(keys ...[]TokenKey) bool {
-	var (
-		result = true
-		hSize  = 0
-		id     = s.CurrentToken().ID()
-	)
-	if s.historySize > 0 && s.historySize < len(keys) {
-		hSize = s.historySize
-		s.historySize = len(keys)
-	}
-
-	for _, key := range keys {
-		found := false
-		for _, k := range key {
-			if s.GoNext().CurrentToken().Is(k) {
-				found = true
-				break
-			}
-		}
-		if !found {
-			result = false
-			break
-		}
-	}
-	s.GoTo(id)
-
-	if hSize != 0 {
-		s.SetHistorySize(hSize)
-	}
-	return result
-}
-
-// HeadToken returns pointer to head-token
-// Head-token may be changed if history size set.
-func (s *Stream) HeadToken() *Token {
-	return s.head
-}
-
-// CurrentToken always returns the token.
-// If the pointer is not valid (see IsValid) CurrentToken will be returns TokenUndef token.
-// Do not save result (Token) into variables — current token may be changed at any time.
-func (s *Stream) CurrentToken() *Token {
-	return s.current
-}
-
-// PrevToken returns previous token from the stream.
-// If previous token doesn't exist method return TypeUndef token.
-// Do not save result (Token) into variables — previous token may be changed at any time.
-func (s *Stream) PrevToken() *Token {
-	if s.current.prev != nil {
-		return s.current.prev
-	}
-	return undefToken
-}
-
-// NextToken returns next token from the stream.
-// If next token doesn't exist method return TypeUndef token.
-// Do not save result (Token) into variables — next token may be changed at any time.
-func (s *Stream) NextToken() *Token {
-	if s.current.next != nil {
-		return s.current.next
-	}
-	return undefToken
-}
-
-// GoNextIfNextIs moves stream pointer to the next token if the next token has specific token keys.
-// If keys matched pointer will be updated and method returned true. Otherwise, returned false.
-func (s *Stream) GoNextIfNextIs(key TokenKey, otherKeys ...TokenKey) bool {
-	if s.NextToken().Is(key, otherKeys...) {
-		s.GoNext()
-		return true
-	}
-	return false
-}
-
-// GetSnippet returns slice of tokens.
-// Slice generated from current token position and include tokens before and after current token.
-func (s *Stream) GetSnippet(before, after int) []Token {
-	var segment []Token
-	if s.current == undefToken {
-		if s.prev != nil && before > s.prev.id-s.head.id {
-			before = s.prev.id - s.head.id
-		} else {
-			before = 0
-		}
-	} else if before > s.current.id-s.head.id {
-		before = s.current.id - s.head.id
-	}
-	if after > s.len-before-1 {
-		after = s.len - before - 1
-	}
-	segment = make([]Token, before+after+1)
-	var ptr *Token
-	if s.next != nil {
-		ptr = s.next
-	} else if s.prev != nil {
-		ptr = s.prev
-	} else {
-		ptr = s.current
-	}
-	for p := ptr; p != nil; p, before = ptr.prev, before-1 {
-		segment[before] = Token{
-			id:     ptr.id,
-			key:    ptr.key,
-			value:  ptr.value,
-			line:   ptr.line,
-			offset: ptr.offset,
-			indent: ptr.indent,
-			string: ptr.string,
-		}
-		if before <= 0 {
-			break
-		}
-	}
-	for p, i := ptr.next, 1; p != nil; p, i = p.next, i+1 {
-		segment[before+i] = Token{
-			id:     p.id,
-			key:    p.key,
-			value:  p.value,
-			line:   p.line,
-			offset: p.offset,
-			indent: p.indent,
-			string: p.string,
-		}
-		if i >= after {
-			break
-		}
-	}
-	return segment
-}
-
-// GetSnippetAsString returns tokens before and after current token as string.
-// `maxStringLength` specify max length of each token string. Zero — unlimited token string length.
-// If string greater than maxLength method removes some runes in the middle of the string.
-func (s *Stream) GetSnippetAsString(before, after, maxStringLength int) string {
-	segments := s.GetSnippet(before, after)
-	str := make([]string, len(segments))
-	for i, token := range segments {
-		v := token.ValueString()
-		if maxStringLength > 4 && len(v) > maxStringLength {
-			str[i] = v[:maxStringLength/2] + "..." + v[maxStringLength/2+1:]
-		} else {
-			str[i] = v
-		}
-	}
-
-	return strings.Join(str, "")
-}
diff --git a/application/source/vendor/github.com/volker-schukai/tokenizer/token.go b/application/source/vendor/github.com/volker-schukai/tokenizer/token.go
deleted file mode 100644
index a07cdcada7ebde9fdcc1ef5dc89f1729e5819763..0000000000000000000000000000000000000000
--- a/application/source/vendor/github.com/volker-schukai/tokenizer/token.go
+++ /dev/null
@@ -1,231 +0,0 @@
-package tokenizer
-
-import (
-	"fmt"
-	"strconv"
-)
-
-var undefToken = &Token{
-	id: -1,
-}
-
-// Token struct describe one token.
-type Token struct {
-	id     int
-	key    TokenKey
-	value  []byte
-	line   int
-	offset int
-	indent []byte
-	string *StringSettings
-
-	prev *Token
-	next *Token
-}
-
-// addNext add new token as next node of dl-list.
-func (t *Token) addNext(next *Token) {
-	next.prev = t
-	t.next = next
-}
-
-// unlink remove token from dl-list and fix links of prev and next nodes.
-// Method returns next token or nil if no next token found.
-func (t *Token) unlink() *Token {
-	next := t.next
-	t.next.prev = nil
-	t.next = nil
-	t.prev = nil
-
-	return next
-}
-
-// ID returns id of token. Id is the sequence number of tokens in the stream.
-func (t *Token) ID() int {
-	return t.id
-}
-
-// String returns a multiline string with the token's information.
-func (t Token) String() string {
-	return fmt.Sprintf("{\n\tId: %d\n\tKey: %d\n\tValue: %s\n\tPosition: %d\n\tIndent: %d bytes\n\tLine: %d\n}",
-		t.id, t.key, t.value, t.offset, len(t.indent), t.line)
-}
-
-// IsValid checks if this token is valid — the key is not TokenUndef.
-func (t *Token) IsValid() bool {
-	return t.key != TokenUndef
-}
-
-// IsKeyword checks if this is keyword — the key is TokenKeyword.
-func (t Token) IsKeyword() bool {
-	return t.key == TokenKeyword
-}
-
-// IsNumber checks if this token is integer or float — the key is TokenInteger or TokenFloat.
-func (t Token) IsNumber() bool {
-	return t.key == TokenInteger || t.key == TokenFloat
-}
-
-// IsFloat checks if this token is float — the key is TokenFloat.
-func (t Token) IsFloat() bool {
-	return t.key == TokenFloat
-}
-
-// IsInteger checks if this token is integer — the key is TokenInteger.
-func (t Token) IsInteger() bool {
-	return t.key == TokenInteger
-}
-
-// ValueInt returns value as int64.
-// If the token is float the result wild be round by math's rules.
-// If the token is not TokenInteger or TokenFloat zero will be returned.
-// Method doesn't use cache. Each call starts a number parser.
-func (t Token) ValueInt() int64 {
-	if t.key == TokenInteger {
-		num, _ := strconv.ParseInt(b2s(t.value), 10, 64)
-		return num
-	} else if t.key == TokenFloat {
-		num, _ := strconv.ParseFloat(b2s(t.value), 64)
-		return int64(num)
-	}
-	return 0
-}
-
-// ValueFloat returns value as float64.
-// If the token is not TokenInteger or TokenFloat zero will be returned.
-// Method doesn't use cache. Each call starts a number parser.
-func (t *Token) ValueFloat() float64 {
-	if t.key == TokenFloat {
-		num, _ := strconv.ParseFloat(b2s(t.value), 64)
-		return num
-	} else if t.key == TokenInteger {
-		num, _ := strconv.ParseInt(b2s(t.value), 10, 64)
-		return float64(num)
-	}
-	return 0.0
-}
-
-// Indent returns spaces before the token.
-func (t *Token) Indent() []byte {
-	return t.indent
-}
-
-// Key returns the key of the token pointed to by the pointer.
-// If pointer is not valid (see IsValid) TokenUndef will be returned.
-func (t *Token) Key() TokenKey {
-	return t.key
-}
-
-// Value returns value of current token as slice of bytes from source.
-// If current token is invalid value returns nil.
-//
-// Do not change bytes in the slice. Copy slice before change.
-func (t *Token) Value() []byte {
-	return t.value
-}
-
-// ValueString returns value of the token as string.
-// If the token is TokenUndef method returns empty string.
-func (t *Token) ValueString() string {
-	if t.value == nil {
-		return ""
-	}
-	return b2s(t.value)
-}
-
-// Line returns line number in input string.
-// Line numbers starts from 1.
-func (t *Token) Line() int {
-	return t.line
-}
-
-// Offset returns the byte position in input string (from start).
-func (t *Token) Offset() int {
-	return t.offset
-}
-
-// StringSettings returns StringSettings structure if token is framed string.
-func (t *Token) StringSettings() *StringSettings {
-	return t.string
-}
-
-// StringKey returns key of string.
-// If key not defined for string TokenString will be returned.
-func (t *Token) StringKey() TokenKey {
-	if t.string != nil {
-		return t.string.Key
-	}
-	return TokenString
-}
-
-// IsString checks if current token is a quoted string.
-// Token key may be TokenString or TokenStringFragment.
-func (t Token) IsString() bool {
-	return t.key == TokenString || t.key == TokenStringFragment
-}
-
-// ValueUnescaped returns clear (unquoted) string
-//  - without edge-tokens (quotes)
-//  - with character escaping handling
-//
-// For example quoted string
-//		"one \"two\"\t three"
-// transforms to
-//		one "two"		three
-// Method doesn't use cache. Each call starts a string parser.
-func (t *Token) ValueUnescaped() []byte {
-	if t.string != nil {
-		from := 0
-		to := len(t.value)
-		if bytesStarts(t.string.StartToken, t.value) {
-			from = len(t.string.StartToken)
-		}
-		if bytesEnds(t.string.EndToken, t.value) {
-			to = len(t.value) - len(t.string.EndToken)
-		}
-		str := t.value[from:to]
-		result := make([]byte, 0, len(str))
-		escaping := false
-		start := 0
-		for i := 0; i < len(str); i++ {
-			if escaping {
-				if v, ok := t.string.SpecSymbols[str[i]]; ok {
-					result = append(result, t.value[start:i]...)
-					result = append(result, v)
-				}
-				start = i
-				escaping = false
-			} else if t.string.EscapeSymbol != 0 && str[i] == t.string.EscapeSymbol {
-				escaping = true
-			}
-		}
-		if start == 0 { // no one escapes
-			return str
-		}
-		return result
-	}
-	return t.value
-}
-
-// ValueUnescapedString like as ValueUnescaped but returns string.
-func (t *Token) ValueUnescapedString() string {
-	if s := t.ValueUnescaped(); s != nil {
-		return b2s(s)
-	}
-	return ""
-}
-
-// Is checks if the token has any of these keys.
-func (t *Token) Is(key TokenKey, keys ...TokenKey) bool {
-	if t.key == key {
-		return true
-	}
-	if len(keys) > 0 {
-		for _, k := range keys {
-			if t.key == k {
-				return true
-			}
-		}
-	}
-	return false
-}
diff --git a/application/source/vendor/github.com/volker-schukai/tokenizer/tokenizer.go b/application/source/vendor/github.com/volker-schukai/tokenizer/tokenizer.go
deleted file mode 100644
index 25c5bd160dae6e60d1834ee16c95bf2824e7a37b..0000000000000000000000000000000000000000
--- a/application/source/vendor/github.com/volker-schukai/tokenizer/tokenizer.go
+++ /dev/null
@@ -1,243 +0,0 @@
-package tokenizer
-
-import (
-	"io"
-	"sort"
-	"sync"
-)
-
-const newLine = '\n'
-
-// TokenKey token type identifier
-type TokenKey int
-
-const (
-	// TokenUnknown means that this token not embedded token and not user defined.
-	TokenUnknown TokenKey = -6
-	// TokenStringFragment means that this is only fragment of quoted string with injections
-	// For example, "one {{ two }} three", where "one " and " three" — TokenStringFragment
-	TokenStringFragment TokenKey = -5
-	// TokenString means than this token is quoted string.
-	// For example, "one two"
-	TokenString TokenKey = -4
-	// TokenFloat means that this token is float number with point and/or exponent.
-	// For example, 1.2, 1e6, 1E-6
-	TokenFloat TokenKey = -3
-	// TokenInteger means that this token is integer number.
-	// For example, 3, 49983
-	TokenInteger TokenKey = -2
-	// TokenKeyword means that this token is word.
-	// For example, one, two, три
-	TokenKeyword TokenKey = -1
-	// TokenUndef means that token doesn't exist.
-	// Then stream out of range of token list any getter or checker will return TokenUndef token.
-	TokenUndef TokenKey = 0
-)
-
-const (
-	fStopOnUnknown          uint16 = 0b1
-	fAllowKeywordUnderscore uint16 = 0b10
-	fAllowNumberUnderscore  uint16 = 0b100
-	fAllowNumberInKeyword   uint16 = 0b1000
-)
-
-// BackSlash just backslash byte
-const BackSlash = '\\'
-
-var defaultWhiteSpaces = []byte{' ', '\t', '\n', '\r'}
-
-// DefaultStringEscapes is default escaped symbols. Those symbols are often used everywhere.
-var DefaultStringEscapes = map[byte]byte{
-	'n':  '\n',
-	'r':  '\r',
-	't':  '\t',
-	'\\': '\\',
-}
-
-// tokenItem describes one token.
-type tokenRef struct {
-	// Token type. Not unique.
-	Key TokenKey
-	// Token value as is. Should be unique.
-	Token []byte
-}
-
-// QuoteInjectSettings describes open injection token and close injection token.
-type QuoteInjectSettings struct {
-	// Token type witch opens quoted string.
-	StartKey TokenKey
-	// Token type witch closes quoted string.
-	EndKey TokenKey
-}
-
-// StringSettings describes framed(quoted) string tokens like quoted strings.
-type StringSettings struct {
-	Key          TokenKey
-	StartToken   []byte
-	EndToken     []byte
-	EscapeSymbol byte
-	SpecSymbols  map[byte]byte
-	Injects      []QuoteInjectSettings
-}
-
-// AddInjection configure injection in to string.
-// Injection - parsable fragment of framed(quoted) string.
-// Often used for parsing of placeholders or template's expressions in the framed string.
-func (q *StringSettings) AddInjection(startTokenKey, endTokenKey TokenKey) *StringSettings {
-	q.Injects = append(q.Injects, QuoteInjectSettings{StartKey: startTokenKey, EndKey: endTokenKey})
-	return q
-}
-
-// SetEscapeSymbol set escape symbol for framed(quoted) string.
-// Escape symbol allows ignoring close token of framed string.
-// Also escape symbol allows using special symbols in the frame strings, like \n, \t.
-func (q *StringSettings) SetEscapeSymbol(symbol byte) *StringSettings {
-	q.EscapeSymbol = symbol
-	return q
-}
-
-// SetSpecialSymbols set mapping of all escapable symbols for escape symbol, like \n, \t, \r.
-func (q *StringSettings) SetSpecialSymbols(special map[byte]byte) *StringSettings {
-	q.SpecSymbols = special
-	return q
-}
-
-// Tokenizer stores all tokens configuration and behaviors.
-type Tokenizer struct {
-	// bit flags
-	flags uint16
-	// all defined custom tokens {key: [token1, token2, ...], ...}
-	tokens  map[TokenKey][]*tokenRef
-	index   map[byte][]*tokenRef
-	quotes  []*StringSettings
-	wSpaces []byte
-	pool    sync.Pool
-}
-
-// New creates new tokenizer.
-func New() *Tokenizer {
-	t := Tokenizer{
-		flags:   0,
-		tokens:  map[TokenKey][]*tokenRef{},
-		index:   map[byte][]*tokenRef{},
-		quotes:  []*StringSettings{},
-		wSpaces: defaultWhiteSpaces,
-	}
-	t.pool.New = func() interface{} {
-		return new(Token)
-	}
-	return &t
-}
-
-// SetWhiteSpaces sets custom whitespace symbols between tokens.
-// By default: {' ', '\t', '\n', '\r'}
-func (t *Tokenizer) SetWhiteSpaces(ws []byte) *Tokenizer {
-	t.wSpaces = ws
-	return t
-}
-
-// StopOnUndefinedToken stops parsing if unknown token detected.
-func (t *Tokenizer) StopOnUndefinedToken() *Tokenizer {
-	t.flags |= fStopOnUnknown
-	return t
-}
-
-// AllowKeywordUnderscore allows underscore symbol in keywords, like `one_two` or `_three`
-func (t *Tokenizer) AllowKeywordUnderscore() *Tokenizer {
-	t.flags |= fAllowKeywordUnderscore
-	return t
-}
-
-// AllowNumbersInKeyword allows numbers in keywords, like `one1` or `r2d2`
-// The method allows numbers in keywords, but the keyword itself must not start with a number.
-// There should be no spaces between letters and numbers.
-func (t *Tokenizer) AllowNumbersInKeyword() *Tokenizer {
-	t.flags |= fAllowNumberInKeyword
-	return t
-}
-
-// DefineTokens add custom token.
-// There `key` unique is identifier of `tokens`, `tokens` — slice of string of tokens.
-// If key already exists tokens will be rewritten.
-func (t *Tokenizer) DefineTokens(key TokenKey, tokens []string) *Tokenizer {
-	var tks []*tokenRef
-	if key < 1 {
-		return t
-	}
-	for _, token := range tokens {
-		ref := tokenRef{
-			Key:   key,
-			Token: s2b(token),
-		}
-		head := ref.Token[0]
-		tks = append(tks, &ref)
-		if t.index[head] == nil {
-			t.index[head] = []*tokenRef{}
-		}
-		t.index[head] = append(t.index[head], &ref)
-		sort.Slice(t.index[head], func(i, j int) bool {
-			return len(t.index[head][i].Token) > len(t.index[head][j].Token)
-		})
-	}
-	t.tokens[key] = tks
-
-	return t
-}
-
-// DefineStringToken defines a token string.
-// For example, a piece of data surrounded by quotes: "string in quotes" or 'string on sigle quotes'.
-// Arguments startToken and endToken defines open and close "quotes".
-//  - t.DefineStringToken("`", "`") - parse string "one `two three`" will be parsed as
-// 			[{key: TokenKeyword, value: "one"}, {key: TokenString, value: "`two three`"}]
-//  - t.DefineStringToken("//", "\n") - parse string "parse // like comment\n" will be parsed as
-//			[{key: TokenKeyword, value: "parse"}, {key: TokenString, value: "// like comment"}]
-func (t *Tokenizer) DefineStringToken(key TokenKey, startToken, endToken string) *StringSettings {
-	q := &StringSettings{
-		Key:        key,
-		StartToken: s2b(startToken),
-		EndToken:   s2b(endToken),
-	}
-	if q.StartToken == nil {
-		return q
-	}
-	t.quotes = append(t.quotes, q)
-
-	return q
-}
-
-func (t *Tokenizer) allocToken() *Token {
-	return t.pool.Get().(*Token)
-}
-
-func (t *Tokenizer) freeToken(token *Token) {
-	token.next = nil
-	token.prev = nil
-	token.value = nil
-	token.indent = nil
-	token.offset = 0
-	token.line = 0
-	token.id = 0
-	token.key = 0
-	token.string = nil
-	t.pool.Put(token)
-}
-
-// ParseString parse the string into tokens
-func (t *Tokenizer) ParseString(str string) *Stream {
-	return t.ParseBytes(s2b(str))
-}
-
-// ParseBytes parse the bytes slice into tokens
-func (t *Tokenizer) ParseBytes(str []byte) *Stream {
-	p := newParser(t, str)
-	p.parse()
-	return NewStream(p)
-}
-
-// ParseStream parse the string into tokens.
-func (t *Tokenizer) ParseStream(r io.Reader, bufferSize uint) *Stream {
-	p := newInfParser(t, r, bufferSize)
-	p.preload()
-	p.parse()
-	return NewInfStream(p)
-}
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/.gitignore b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/.gitignore
deleted file mode 100644
index c55f7d029ba6d56ff0080797e6bfd7a4f7b7867b..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/.gitignore
+++ /dev/null
@@ -1,149 +0,0 @@
-# Created by https://www.toptal.com/developers/gitignore/api/intellij,go
-# Edit at https://www.toptal.com/developers/gitignore?templates=intellij,go
-
-### Go ###
-# If you prefer the allow list template instead of the deny list, see community template:
-# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
-#
-# Binaries for programs and plugins
-*.exe
-*.exe~
-*.dll
-*.so
-*.dylib
-
-# Test binary, built with `go test -c`
-*.test
-
-# Output of the go coverage tool, specifically when used with LiteIDE
-*.out
-
-testdata/
-
-# Dependency directories (remove the comment below to include it)
-# vendor/
-
-# Go workspace file
-go.work
-
-# Go Fuzz build
-testdata/
-
-### Go Patch ###
-/vendor/
-/Godeps/
-
-### Intellij ###
-# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
-# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
-
-# User-specific stuff
-.idea/**/workspace.xml
-.idea/**/tasks.xml
-.idea/**/usage.statistics.xml
-.idea/**/dictionaries
-.idea/**/shelf
-
-# AWS User-specific
-.idea/**/aws.xml
-
-# Generated files
-.idea/**/contentModel.xml
-
-# Sensitive or high-churn files
-.idea/**/dataSources/
-.idea/**/dataSources.ids
-.idea/**/dataSources.local.xml
-.idea/**/sqlDataSources.xml
-.idea/**/dynamic.xml
-.idea/**/uiDesigner.xml
-.idea/**/dbnavigator.xml
-
-# Gradle
-.idea/**/gradle.xml
-.idea/**/libraries
-
-# Gradle and Maven with auto-import
-# When using Gradle or Maven with auto-import, you should exclude module files,
-# since they will be recreated, and may cause churn.  Uncomment if using
-# auto-import.
-# .idea/artifacts
-# .idea/compiler.xml
-# .idea/jarRepositories.xml
-# .idea/modules.xml
-# .idea/*.iml
-# .idea/modules
-# *.iml
-# *.ipr
-
-# CMake
-cmake-build-*/
-
-# Mongo Explorer plugin
-.idea/**/mongoSettings.xml
-
-# File-based project format
-*.iws
-
-# IntelliJ
-out/
-
-# mpeltonen/sbt-idea plugin
-.idea_modules/
-
-# JIRA plugin
-atlassian-ide-plugin.xml
-
-# Cursive Clojure plugin
-.idea/replstate.xml
-
-# SonarLint plugin
-.idea/sonarlint/
-
-# Crashlytics plugin (for Android Studio and IntelliJ)
-com_crashlytics_export_strings.xml
-crashlytics.properties
-crashlytics-build.properties
-fabric.properties
-
-# Editor-based Rest Client
-.idea/httpRequests
-
-# Android studio 3.1+ serialized cache file
-.idea/caches/build_file_checksums.ser
-
-### Intellij Patch ###
-# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
-
-# *.iml
-# modules.xml
-# .idea/misc.xml
-# *.ipr
-
-# Sonarlint plugin
-# https://plugins.jetbrains.com/plugin/7973-sonarlint
-.idea/**/sonarlint/
-
-# SonarQube Plugin
-# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin
-.idea/**/sonarIssues.xml
-
-# Markdown Navigator plugin
-# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced
-.idea/**/markdown-navigator.xml
-.idea/**/markdown-navigator-enh.xml
-.idea/**/markdown-navigator/
-
-# Cache file creation bug
-# See https://youtrack.jetbrains.com/issue/JBR-2257
-.idea/$CACHE_FILE$
-
-# CodeStream plugin
-# https://plugins.jetbrains.com/plugin/12206-codestream
-.idea/codestream.xml
-
-# Azure Toolkit for IntelliJ plugin
-# https://plugins.jetbrains.com/plugin/8053-azure-toolkit-for-intellij
-.idea/**/azureSettings.xml
-
-# End of https://www.toptal.com/developers/gitignore/api/intellij,go
\ No newline at end of file
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/CHANGELOG.md b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/CHANGELOG.md
deleted file mode 100644
index 60cb170d178737232a896d59c78e7a51d89b4bc9..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/CHANGELOG.md
+++ /dev/null
@@ -1,195 +0,0 @@
-
-<a name="v1.16.0"></a>
-## [v1.16.0] - 2023-05-20
-
-<a name="v1.15.0"></a>
-## [v1.15.0] - 2023-03-09
-
-<a name="v1.14.0"></a>
-## [v1.14.0] - 2023-03-09
-
-<a name="v1.13.2"></a>
-## [v1.13.2] - 2022-12-23
-
-<a name="v1.13.1"></a>
-## [v1.13.1] - 2022-12-23
-
-<a name="v1.13.0"></a>
-## [v1.13.0] - 2022-10-23
-### Add Features
-- feat change proxy to map and expose map
-
-### Changes
-- chore add licenses
-
-
-<a name="v1.12.0"></a>
-## [v1.12.0] - 2022-10-19
-### Bug Fixes
-- fix remove tag for command [#4](https://gitlab.schukai.com/oss/libraries/go/application/xflags/issues/4)
-
-
-<a name="v1.11.0"></a>
-## [v1.11.0] - 2022-10-19
-### Add Features
-- feat improve the help output
-
-
-<a name="v1.10.2"></a>
-## [v1.10.2] - 2022-10-16
-### Bug Fixes
-- fix copy only explicite values [#3](https://gitlab.schukai.com/oss/libraries/go/application/xflags/issues/3)
-
-
-<a name="v1.10.1"></a>
-## [v1.10.1] - 2022-10-16
-### Bug Fixes
-- fix assign the correct value to the proxy
-
-### Changes
-- chore add license texts
-
-
-<a name="v1.10.0"></a>
-## [v1.10.0] - 2022-10-15
-### Code Refactoring
-- refactor functions moved to a separate repos
-
-
-<a name="v1.9.0"></a>
-## [v1.9.0] - 2022-10-15
-### Add Features
-- feat implements proxy interface [#2](https://gitlab.schukai.com/oss/libraries/go/application/xflags/issues/2)
-
-
-<a name="v1.8.3"></a>
-## [v1.8.3] - 2022-10-15
-### Bug Fixes
-- fix help request should not removed from errors
-
-
-<a name="v1.8.2"></a>
-## [v1.8.2] - 2022-10-15
-### Bug Fixes
-- fix pathfinder panic
-
-
-<a name="v1.8.1"></a>
-## [v1.8.1] - 2022-10-15
-
-<a name="v1.8.0"></a>
-## [v1.8.0] - 2022-10-15
-### Bug Fixes
-- fix invalid type exception
-
-
-<a name="v1.7.0"></a>
-## [v1.7.0] - 2022-10-14
-### Add Features
-- feat introduction of copy-interface
-- feat new method to get the output of the flags
-
-
-<a name="v1.6.0"></a>
-## [v1.6.0] - 2022-10-13
-### Code Refactoring
-- refactor the execute function, the execute function should not print messages
-
-
-<a name="v1.5.0"></a>
-## [v1.5.0] - 2022-10-13
-### Add Features
-- feat new opportunities for interaction with flags [#1](https://gitlab.schukai.com/oss/libraries/go/application/xflags/issues/1)
-
-### Changes
-- chore add license
-
-
-<a name="v1.4.0"></a>
-## [v1.4.0] - 2022-10-09
-### Add Features
-- feat new Execute() Method for the automatic execution of command
-- feat new function GetDefaults()
-
-
-<a name="v1.3.1"></a>
-## [v1.3.1] - 2022-10-08
-### Bug Fixes
-- fix remove one test renmant
-
-
-<a name="v1.3.0"></a>
-## [v1.3.0] - 2022-10-08
-### Code Refactoring
-- refactor change func name FlagOutput() to Output()
-
-
-<a name="v1.2.3"></a>
-## [v1.2.3] - 2022-10-07
-### Changes
-- chore change license things
-- chore change license things
-
-
-<a name="v1.2.2"></a>
-## [v1.2.2] - 2022-10-06
-### Changes
-- chore add licenses
-
-
-<a name="v1.2.1"></a>
-## [v1.2.1] - 2022-10-06
-### Changes
-- chore add licenses header
-
-
-<a name="v1.2.0"></a>
-## [v1.2.0] - 2022-10-05
-### Add Features
-- feat new function ParseOsArgs
-
-### Bug Fixes
-- fix Settings should be exported
-- fix parse only the arguments and not the programm
-
-
-<a name="v1.1.1"></a>
-## [v1.1.1] - 2022-10-05
-### Bug Fixes
-- fix repository_url was wrong
-
-
-<a name="v1.1.0"></a>
-## [v1.1.0] - 2022-10-05
-
-<a name="v1.0.0"></a>
-## v1.0.0 - 2022-10-04
-
-[v1.16.0]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.15.0...v1.16.0
-[v1.15.0]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.14.0...v1.15.0
-[v1.14.0]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.13.2...v1.14.0
-[v1.13.2]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.13.1...v1.13.2
-[v1.13.1]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.13.0...v1.13.1
-[v1.13.0]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.12.0...v1.13.0
-[v1.12.0]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.11.0...v1.12.0
-[v1.11.0]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.10.2...v1.11.0
-[v1.10.2]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.10.1...v1.10.2
-[v1.10.1]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.10.0...v1.10.1
-[v1.10.0]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.9.0...v1.10.0
-[v1.9.0]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.8.3...v1.9.0
-[v1.8.3]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.8.2...v1.8.3
-[v1.8.2]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.8.1...v1.8.2
-[v1.8.1]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.8.0...v1.8.1
-[v1.8.0]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.7.0...v1.8.0
-[v1.7.0]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.6.0...v1.7.0
-[v1.6.0]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.5.0...v1.6.0
-[v1.5.0]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.4.0...v1.5.0
-[v1.4.0]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.3.1...v1.4.0
-[v1.3.1]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.3.0...v1.3.1
-[v1.3.0]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.2.3...v1.3.0
-[v1.2.3]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.2.2...v1.2.3
-[v1.2.2]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.2.1...v1.2.2
-[v1.2.1]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.2.0...v1.2.1
-[v1.2.0]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.1.1...v1.2.0
-[v1.1.1]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.1.0...v1.1.1
-[v1.1.0]: https://gitlab.schukai.com/oss/libraries/go/application/xflags/compare/v1.0.0...v1.1.0
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/LICENSE b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/LICENSE
deleted file mode 100644
index 22686f9fe879acaa7d1db639310a955de05b0d2a..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/LICENSE
+++ /dev/null
@@ -1,662 +0,0 @@
-                    GNU AFFERO GENERAL PUBLIC LICENSE
-                       Version 3, 19 November 2007
-                              AGPL-3.0
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-                            Preamble
-
-  The GNU Affero General Public License is a free, copyleft license for
-software and other kinds of works, specifically designed to ensure
-cooperation with the community in the case of network server software.
-
-  The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works.  By contrast,
-our General Public Licenses are intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users.
-
-  When we speak of free software, we are referring to freedom, not
-price.  Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
-  Developers that use our General Public Licenses protect your rights
-with two steps: (1) assert copyright on the software, and (2) offer
-you this License which gives you legal permission to copy, distribute
-and/or modify the software.
-
-  A secondary benefit of defending all users' freedom is that
-improvements made in alternate versions of the program, if they
-receive widespread use, become available for other developers to
-incorporate.  Many developers of free software are heartened and
-encouraged by the resulting cooperation.  However, in the case of
-software used on network servers, this result may fail to come about.
-The GNU General Public License permits making a modified version and
-letting the public access it on a server without ever releasing its
-source code to the public.
-
-  The GNU Affero General Public License is designed specifically to
-ensure that, in such cases, the modified source code becomes available
-to the community.  It requires the operator of a network server to
-provide the source code of the modified version running there to the
-users of that server.  Therefore, public use of a modified version, on
-a publicly accessible server, gives the public access to the source
-code of the modified version.
-
-  An older license, called the Affero General Public License and
-published by Affero, was designed to accomplish similar goals.  This is
-a different license, not a version of the Affero GPL, but Affero has
-released a new version of the Affero GPL which permits relicensing under
-this license.
-
-  The precise terms and conditions for copying, distribution and
-modification follow.
-
-                       TERMS AND CONDITIONS
-
-  0. Definitions.
-
-  "This License" refers to version 3 of the GNU Affero General Public License.
-
-  "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
-  "The Program" refers to any copyrightable work licensed under this
-License.  Each licensee is addressed as "you".  "Licensees" and
-"recipients" may be individuals or organizations.
-
-  To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy.  The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
-  A "covered work" means either the unmodified Program or a work based
-on the Program.
-
-  To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy.  Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
-  To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies.  Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
-  An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License.  If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
-  1. Source Code.
-
-  The "source code" for a work means the preferred form of the work
-for making modifications to it.  "Object code" means any non-source
-form of a work.
-
-  A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
-  The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form.  A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
-  The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities.  However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work.  For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
-  The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
-  The Corresponding Source for a work in source code form is that
-same work.
-
-  2. Basic Permissions.
-
-  All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met.  This License explicitly affirms your unlimited
-permission to run the unmodified Program.  The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work.  This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
-  You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force.  You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright.  Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
-  Conveying under any other circumstances is permitted solely under
-the conditions stated below.  Sublicensing is not allowed; section 10
-makes it unnecessary.
-
-  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
-  No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
-  When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
-  4. Conveying Verbatim Copies.
-
-  You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
-  You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
-  5. Conveying Modified Source Versions.
-
-  You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
-    a) The work must carry prominent notices stating that you modified
-    it, and giving a relevant date.
-
-    b) The work must carry prominent notices stating that it is
-    released under this License and any conditions added under section
-    7.  This requirement modifies the requirement in section 4 to
-    "keep intact all notices".
-
-    c) You must license the entire work, as a whole, under this
-    License to anyone who comes into possession of a copy.  This
-    License will therefore apply, along with any applicable section 7
-    additional terms, to the whole of the work, and all its parts,
-    regardless of how they are packaged.  This License gives no
-    permission to license the work in any other way, but it does not
-    invalidate such permission if you have separately received it.
-
-    d) If the work has interactive user interfaces, each must display
-    Appropriate Legal Notices; however, if the Program has interactive
-    interfaces that do not display Appropriate Legal Notices, your
-    work need not make them do so.
-
-  A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit.  Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
-  6. Conveying Non-Source Forms.
-
-  You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
-    a) Convey the object code in, or embodied in, a physical product
-    (including a physical distribution medium), accompanied by the
-    Corresponding Source fixed on a durable physical medium
-    customarily used for software interchange.
-
-    b) Convey the object code in, or embodied in, a physical product
-    (including a physical distribution medium), accompanied by a
-    written offer, valid for at least three years and valid for as
-    long as you offer spare parts or customer support for that product
-    model, to give anyone who possesses the object code either (1) a
-    copy of the Corresponding Source for all the software in the
-    product that is covered by this License, on a durable physical
-    medium customarily used for software interchange, for a price no
-    more than your reasonable cost of physically performing this
-    conveying of source, or (2) access to copy the
-    Corresponding Source from a network server at no charge.
-
-    c) Convey individual copies of the object code with a copy of the
-    written offer to provide the Corresponding Source.  This
-    alternative is allowed only occasionally and noncommercially, and
-    only if you received the object code with such an offer, in accord
-    with subsection 6b.
-
-    d) Convey the object code by offering access from a designated
-    place (gratis or for a charge), and offer equivalent access to the
-    Corresponding Source in the same way through the same place at no
-    further charge.  You need not require recipients to copy the
-    Corresponding Source along with the object code.  If the place to
-    copy the object code is a network server, the Corresponding Source
-    may be on a different server (operated by you or a third party)
-    that supports equivalent copying facilities, provided you maintain
-    clear directions next to the object code saying where to find the
-    Corresponding Source.  Regardless of what server hosts the
-    Corresponding Source, you remain obligated to ensure that it is
-    available for as long as needed to satisfy these requirements.
-
-    e) Convey the object code using peer-to-peer transmission, provided
-    you inform other peers where the object code and Corresponding
-    Source of the work are being offered to the general public at no
-    charge under subsection 6d.
-
-  A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
-  A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling.  In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage.  For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product.  A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
-  "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source.  The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
-  If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information.  But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
-  The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed.  Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
-  Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
-  7. Additional Terms.
-
-  "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law.  If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
-  When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it.  (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.)  You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
-  Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
-    a) Disclaiming warranty or limiting liability differently from the
-    terms of sections 15 and 16 of this License; or
-
-    b) Requiring preservation of specified reasonable legal notices or
-    author attributions in that material or in the Appropriate Legal
-    Notices displayed by works containing it; or
-
-    c) Prohibiting misrepresentation of the origin of that material, or
-    requiring that modified versions of such material be marked in
-    reasonable ways as different from the original version; or
-
-    d) Limiting the use for publicity purposes of names of licensors or
-    authors of the material; or
-
-    e) Declining to grant rights under trademark law for use of some
-    trade names, trademarks, or service marks; or
-
-    f) Requiring indemnification of licensors and authors of that
-    material by anyone who conveys the material (or modified versions of
-    it) with contractual assumptions of liability to the recipient, for
-    any liability that these contractual assumptions directly impose on
-    those licensors and authors.
-
-  All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10.  If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term.  If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
-  If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
-  Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
-  8. Termination.
-
-  You may not propagate or modify a covered work except as expressly
-provided under this License.  Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
-  However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
-  Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
-  Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License.  If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
-  9. Acceptance Not Required for Having Copies.
-
-  You are not required to accept this License in order to receive or
-run a copy of the Program.  Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance.  However,
-nothing other than this License grants you permission to propagate or
-modify any covered work.  These actions infringe copyright if you do
-not accept this License.  Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
-  10. Automatic Licensing of Downstream Recipients.
-
-  Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License.  You are not responsible
-for enforcing compliance by third parties with this License.
-
-  An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations.  If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
-  You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License.  For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
-  11. Patents.
-
-  A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based.  The
-work thus licensed is called the contributor's "contributor version".
-
-  A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version.  For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
-  Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
-  In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement).  To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
-  If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients.  "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
-  If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
-  A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License.  You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
-  Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
-  12. No Surrender of Others' Freedom.
-
-  If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License.  If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all.  For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
-  13. Remote Network Interaction; Use with the GNU General Public License.
-
-  Notwithstanding any other provision of this License, if you modify the
-Program, your modified version must prominently offer all users
-interacting with it remotely through a computer network (if your version
-supports such interaction) an opportunity to receive the Corresponding
-Source of your version by providing access to the Corresponding Source
-from a network server at no charge, through some standard or customary
-means of facilitating copying of software.  This Corresponding Source
-shall include the Corresponding Source for any work covered by version 3
-of the GNU General Public License that is incorporated pursuant to the
-following paragraph.
-
-  Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU General Public License into a single
-combined work, and to convey the resulting work.  The terms of this
-License will continue to apply to the part which is the covered work,
-but the work with which it is combined will remain governed by version
-3 of the GNU General Public License.
-
-  14. Revised Versions of this License.
-
-  The Free Software Foundation may publish revised and/or new versions of
-the GNU Affero General Public License from time to time.  Such new versions
-will be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
-  Each version is given a distinguishing version number.  If the
-Program specifies that a certain numbered version of the GNU Affero General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation.  If the Program does not specify a version number of the
-GNU Affero General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
-  If the Program specifies that a proxy can decide which future
-versions of the GNU Affero General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
-  Later license versions may give you additional or different
-permissions.  However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
-  15. Disclaimer of Warranty.
-
-  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-  16. Limitation of Liability.
-
-  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
-  17. Interpretation of Sections 15 and 16.
-
-  If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
-                     END OF TERMS AND CONDITIONS
-
-            How to Apply These Terms to Your New Programs
-
-  If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
-  To do so, attach the following notices to the program.  It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-    <one line to give the program's name and a brief idea of what it does.>
-    Copyright (C) <year>  <name of author>
-
-    This program is free software: you can redistribute it and/or modify
-    it under the terms of the GNU Affero General Public License as published
-    by the Free Software Foundation, either version 3 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU Affero General Public License for more details.
-
-    You should have received a copy of the GNU Affero General Public License
-    along with this program.  If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
-  If your software can interact with users remotely through a computer
-network, you should also make sure that it provides a way for users to
-get its source.  For example, if your program is a web application, its
-interface could display a "Source" link that leads users to an archive
-of the code.  There are many ways you could offer source, and different
-solutions will be better for different programs; see section 13 for the
-specific requirements.
-
-  You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU AGPL, see
-<https://www.gnu.org/licenses/>.
\ No newline at end of file
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/Makefile b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/Makefile
deleted file mode 100644
index c149982aa9f602b5dfb1aba0ced25241a1a8b472..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/Makefile
+++ /dev/null
@@ -1,157 +0,0 @@
-## Copyright 2022 schukai GmbH. All rights reserved.
-## Use of this source code is governed by a AGPL-3.0
-## license that can be found in the LICENSE file.
-
-PROJECT_ROOT:=$(dir $(realpath $(lastword $(MAKEFILE_LIST))))
-THIS_MAKEFILE:=$(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST))
-THIS_MAKEFILE_PATH:=$(PROJECT_ROOT)$(THIS_MAKEFILE) 
-
-# @see .PHONY https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html#Phony-Targets
-.DEFAULT_GOAL := help
-
-.PHONY: print
-## Print Path	
-print:
-	@echo "THIS_MAKEFILE:      $(THIS_MAKEFILE)"
-	@echo "THIS_MAKEFILE_PATH: $(THIS_MAKEFILE_PATH)"
-	@echo "PROJECT_ROOT:       $(PROJECT_ROOT)"
-
-# Add a comment to the public targets so that it appears
-# in this help Use two # characters for a help comment
-.PHONY: help
-help:
-	@printf "${COMMENT}Usage:${RESET}\n"
-	@printf " make [target] [arg=\"val\"...]\n\n"
-	@printf "${COMMENT}Available targets:${RESET}\n"
-	@awk '/^[a-zA-Z\-\\_0-9\.@]+:/ { \
-		helpMessage = match(lastLine, /^## (.*)/); \
-		if (helpMessage) { \
-			helpCommand = substr($$1, 0, index($$1, ":")); \
-			helpMessage = substr(lastLine, RSTART + 3, RLENGTH); \
-			printf " ${INFO}%-22s${RESET} %s\n", helpCommand, helpMessage; \
-		} \
-	} \
-	{ lastLine = $$0 }' $(MAKEFILE_LIST)
-	@printf "\n${COMMENT}Available arguments:${RESET}\n\n"
-	@awk '/^(([a-zA-Z\-\\_0-9\.@]+)\s[?:]?=)/ { \
-		helpMessage = match(lastLine, /^## (.*)/); \
-		if (helpMessage) { \
-			helpMessage = substr(lastLine, RSTART + 3, RLENGTH); \
-			printf " ${INFO}%-22s${RESET} %s (Default: %s)\n", $$1, helpMessage, $$3; \
-		} \
-	} \
-	{ lastLine = $$0 }' $(MAKEFILE_LIST)
-
-
-## run tests
-test:
-	echo "Running tests"
-	go test -cover -v ./...
-
-## run tests with fuzzing
-test-fuzz:
-	echo "Running fuzz tests"
-	go test -v -fuzztime=30s -fuzz=Fuzz ./...
-
-#### VERSION
-BIN_DIR ?= $(shell echo $$HOME)/.local/bin/
-VERSION_NAME 	     := version
-EXECUTABLES = $(EXECUTABLES:-) $(VERSION_NAME)
-VERSION_BIN_PATH := $(BIN_DIR)$(VERSION_NAME)
-
-VERSION_BIN := $(shell command -v $(VERSION_NAME) 2> /dev/null)
-
-ifndef VERSION_BIN
-    $(shell curl -o $(VERSION_BIN_PATH) http://download.schukai.com/tools/version/version-$(shell uname -s | tr [:upper:] [:lower:])-$(shell echo `uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/`))
-    $(shell chmod +x $(VERSION_BIN_PATH))
-endif
-
-GIT_CHGLOG_BIN := $(shell command -v git-chglog 2> /dev/null)
-
-ifeq ($(GIT_CHGLOG_BIN),)
-    $(shell go install github.com/git-chglog/git-chglog/cmd/git-chglog@latest)
-endif     
-     
-RELEASE_FILE ?= $(PROJECT_ROOT)release.json
-CHANGELOG_FILE ?= $(PROJECT_ROOT)CHANGELOG.md
- 
-ifeq ("$(wildcard $(RELEASE_FILE))","")
-  $(shell echo '{"version":"0.1.0"}' > $(RELEASE_FILE))
-endif
-
-PROJECT_VERSION ?= $(shell cat $(RELEASE_FILE) | jq -r .version)
-PROJECT_BUILD_DATE ?= $(shell $(VERSION_BIN) date)
-
-.PHONY: next-patch-version
-next-patch-version: check-clean-repo
-	echo "Creating next version"
-	$(VERSION_BIN) patch --path $(RELEASE_FILE) --selector "version"
-	git add $(RELEASE_FILE) && git commit -m "Bump version to $$(cat $(RELEASE_FILE) | jq -r .version)"
-
-.PHONY: next-minor-version
-next-minor-version: check-clean-repo
-	echo  "Creating next minor version"
-	$(VERSION_BIN) minor --path $(RELEASE_FILE) --selector "version"
-	git add $(RELEASE_FILE) && git commit -m "Bump version to $$( cat $(RELEASE_FILE) | jq -r .version)"
-
-.PHONY: next-major-version
-next-major-version: check-clean-repo
-	echo "Creating next minor version"
-	$(VERSION_BIN) major --path $(RELEASE_FILE) --selector "version"
-	git add $(RELEASE_FILE) && git commit -m "Bump version to $$(cat $(RELEASE_FILE) | jq -r .version)"
-
-.PHONY: check-clean-repo
-check-clean-repo:
-	git diff-index --quiet HEAD || (echo "There are uncommitted changes after running make. Please commit or stash them before running make."; exit 1)
-	
-## tag repository with next patch version
-tag-patch-version: next-patch-version 
-	echo "Tagging patch version"
-	$(eval PROJECT_VERSION := $(shell cat $(RELEASE_FILE) | jq -r .version))
-	git-chglog --next-tag v$(PROJECT_VERSION) -o $(CHANGELOG_FILE)
-	git add $(CHANGELOG_FILE) && git commit -m "Update changelog"
-	git tag -a v$(PROJECT_VERSION) -m "Version $(PROJECT_VERSION)"
-
-## tag repository with next minor version
-tag-minor-version: next-minor-version 
-	echo "Tagging minor version"
-	$(eval PROJECT_VERSION := $(shell cat $(RELEASE_FILE) | jq -r .version))
-	git-chglog --next-tag v$(PROJECT_VERSION) -o $(CHANGELOG_FILE)
-	git add $(CHANGELOG_FILE) && git commit -m "Update changelog"
-	git tag -a v$(PROJECT_VERSION) -m "Version $(PROJECT_VERSION)"
-
-## tag repository with next major version
-tag-major-version: next-major-version 
-	echo "Tagging major version"
-	$(eval PROJECT_VERSION := $(shell cat $(RELEASE_FILE) | jq -r .version))
-	git-chglog --next-tag v$(PROJECT_VERSION) -o $(CHANGELOG_FILE)
-	git add $(CHANGELOG_FILE) && git commit -m "Update changelog"
-	git tag -a v$(PROJECT_VERSION) -m "Version $(PROJECT_VERSION)"
-
-GO_MOD_FILE := $(SOURCE_PATH)go.mod
-
-ifeq ($(shell test -e $(GO_MOD_FILE) && echo -n yes),yes)
-    GO_CURRENT_MODULE := $(shell cat $(GO_MOD_FILE) | head -n1 | cut -d" " -f2)
-	# go install github.com/google/go-licenses@latest
-	EXECUTABLES = $(EXECUTABLES:-) go-licenses;    
-endif
-
-.PHONY: fetch-licenses
-## Fetch licenses for all modules
-fetch-licenses:
-	go-licenses save $(GO_CURRENT_MODULE) --ignore gitlab.schukai.com --force --save_path $(PROJECT_ROOT)licenses/
-
-# https://spdx.github.io/spdx-spec/v2.3/SPDX-license-list/
-ADDLICENSE_BIN ?= addlicense
-ifeq ($(shell command -v $(ADDLICENSE_BIN) 2> /dev/null),)
-	$(shell go install github.com/google/addlicense@latest)
-	EXECUTABLES = $(EXECUTABLES:-) $(ADDLICENSE_BIN);
-endif
-
-.PHONY: add-licenses
-## Add license headers to all go files
-add-licenses:
-	addlicense -c "schukai GmbH" -s -l "AGPL-3.0" ./*.go
-
-
-
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/README.md b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/README.md
deleted file mode 100644
index 1bfefff79921659f79ca0eafe6964903bf07b93d..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/README.md
+++ /dev/null
@@ -1,235 +0,0 @@
-## X-Flags
-
-## What does this library?
-
-This library provides a simple way to use flags in your application. It extends the standard library
-to be able to define and use a structure with flags.
-
-It supports:
-
-* [X]  Define flags in a structure
-* [X]  Define callbacks for flags
-* [X]  Define default values for flags
-* [X]  Define a map for values
-
-## Installation
-
-```shell
-go get gitlab.schukai.com/oss/libraries/go/application/xflags
-```
-
-**Note:** This library uses [Go Modules](https://github.com/golang/go/wiki/Modules) to manage dependencies.
-
-## Usage
-
-### Initialize
-
-A new flag set is created using the `xflags.New()` function. 
-The structure passed is used as the type for the flags.
-
-```go
-package main
-
-import (
-	"fmt"
-	"os"
-	"gitlab.schukai.com/oss/libraries/go/application/xflags"
-)
-
-```
-
-### Definition
-
-The flags are defined in the structure. The structure can be nested.
-The name of the field is used as the name of the flag. The type of the
-field is used as the type of the flag.
-
-```go
-type Definition struct {
-  Verbose bool `short:"v" long:"verbose" description:"Show verbose debug information"`
-  Serve   struct {
-    Host string `short:"h" long:"host" description:"Host to bind to" default:"localhost"`
-    Port int    `short:"p" long:"port" description:"Port to bind to" default:"8080"`
-  } `command:"serve" description:"Run the HTTP server" call:"DoServe"`
-}
-```
-
-The following tags are supported:
-
-| Tag           | Context  | Description                                |
-|---------------|----------|--------------------------------------------|
-| `short`       | Value    | Short name of the flag.                    |
-| `long`        | Value    | Long name of the flag.                     |
-| `description` | Value    | Description of the flag.                   |
-| `required`    | Value    | Flag is required.                          |
-| `map`         | Value    | Copy the value to the mapped structure.    |
-| `command`     | Command  | Flag is a command.                         |
-| `call`        | Command  | Function to call when the command is used. |
-| `ignore`      | -/-      | Property is ignored.                       |
-
-
-### Callbacks
-
-The functions are called up with a receiver. The receiver is the
-configuration. The function must have the following signature: 
-`func (d *Definition) <name> (s *setting[Definition])`
-
-Let's assume we have the above definition. The Property `Serve` contains
-the command `serve`. Furthermore, the command has the tag `call` with
-the value `DoServe`. The function `DoServe` is called when the command
-`serve` is used.
-
-Important: The function must be exported, that means it 
-must start with a capital letter.
-
-The function is called with the receiver `*Definition`
-
-An example for the function `DoServe`:
-
-```go
-func (d *Definition) DoServe(_ *setting[Definition]) {
-   fmt.Printf("Serving on %s:%d", d.Serve.Host, d.Serve.Port)
-}
-```
-
-In this example, the function is called with the receiver `*Definition`.
-The function is called with the setting `*setting[Definition]`. The
-setting is used to get the values of the flags. But in this example, we
-don't need the setting. So we use the underscore `_` to ignore the
-setting.
-
-### New Setting
-
-The function `New` creates a new setting for the given
-definition. The function returns a pointer to the setting.
-The first argument is a name for the setting. The second argument is the
-definition.
-
-A good choice for the name is the argument `os.Args[0]`.
-
-```go
-// define instance
-var instance *xflags.Settings[Definition]
-
-func Execute() {
-  instance := xflags.New(os.Args[0], Definition{})
-  if instance.HasErrors() {
-    // ...
-```
-
-### Parse
-
-The flags are parsed using the `Parse()` function. The function returns
-the command and the setting. The command is the name of the command
-which was used. The setting is the setting of the flags.
-
-```go
-setting.Parse(os.Args[1:])
-```
-
-For testing, you can use the following arguments:
-
-```go
-setting.Parse([]string{"--verbose", "serve", "--host", "localhost", "--port", "8080"})
-```
-
-### Get Values
-
-The values of the flags are available in the setting. The values are
-available in the structure. The structure is the same as the definition.
-
-```go
-fmt.Printf("Host: %s", setting.GetValues().Serve.Host)
-fmt.Printf("Port: %d", setting.GetValues().Serve.Port)
-```
-
-### Execute
-
-The function `Execute()` executes the command. See the section
-[Callbacks](#callbacks) for more information.
-
-```go
-setting.Execute()
-```
-
-### Mapped Values
-
-The mapped structure is used to copy the 
-values of the flags to another structure
-and to a map.
-
-The mapped structure must implement the `Copyable` interface.
-
-```go
-type MyObj struct {
-  Verbose bool
-  Serve   struct {
-    Host string
-    Port int
-  }
-}
-
-func (m *MyObj) Copy(_ map[string]any) {
-}
-
-func main() {
-  setting := New(os.Args[0], Definition{})
-  setting.SetMappedObject(&MyObj{})
-  setting.Parse(os.Args[1:])
-  setting.Execute()
-}
-```
-
-The path in the structure is defined by the tag `map`.
-
-Die Map der Werte kann über die Methode `GetMap()` abgerufen werden.
-
-
-
-### Arguments
-
-the free arguments can be fetched with the method `Args()`.
-
-### Check Status
-
-The execution result can be queried with the functions:
-
-- `HelpRequested() bool`
-- `WasExecuted() bool`
-- `Error() error`
-- `MissingCommand() bool`
-
-## Contributing
-
-Merge requests are welcome. For major changes, please open an issue first to discuss what
-you would like to change. **Please make sure to update tests as appropriate.**
-
-Versioning is done with [SemVer](https://semver.org/).
-Changelog is generated with [git-chglog](https://github.com/git-chglog/git-chglog#git-chglog)
-
-Commit messages should follow the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) specification.
-Messages are started with a type, which is one of the following:
-
-- **feat**: A new feature
-- **fix**: A bug fix
-- **doc**: Documentation only changes
-- **refactor**: A code change that neither fixes a bug nor adds a feature
-- **perf**: A code change that improves performance
-- **test**: Adding missing or correcting existing tests
-- **chore**: Other changes that don't modify src or test files
-
-The footer would be used for a reference to an issue or a breaking change.
-
-A commit that has a footer `BREAKING CHANGE:`, or appends a ! after the type/scope,
-introduces a breaking API change (correlating with MAJOR in semantic versioning).
-A BREAKING CHANGE can be part of commits of any type.
-
-the following is an example of a commit message:
-
-```text
-feat: add 'extras' field
-```
-
-## License
-
-[AGPL-3.0](https://choosealicense.com/licenses/agpl-3.0/)
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/api.go b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/api.go
deleted file mode 100644
index efdde2664b3f5619a502f0f33c87ed90701a09a6..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/api.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2022 schukai GmbH
-// SPDX-License-Identifier: AGPL-3.0
-
-package xflags
-
-import (
-	"bytes"
-	"flag"
-	"fmt"
-	"io"
-	"os"
-	"reflect"
-)
-
-type dummyCopyArg struct{}
-
-func (n dummyCopyArg) Copy(_ map[string]any) {}
-
-// Execute executes the command line arguments and calls the functions.
-func Execute[C any](cmd C, cpy ...Copyable) *Settings[C] {
-
-	if cpy == nil {
-		return execute(cmd, dummyCopyArg{}, os.Args[0], os.Args[1:])
-	}
-
-	if len(cpy) > 1 {
-		panic("too many arguments")
-	}
-
-	return execute(cmd, cpy[0], os.Args[0], os.Args[1:])
-}
-
-// PrintFlagOutput prints the flag output to the standard output.
-func (s *Settings[C]) PrintFlagOutput() {
-	fmt.Println(s.command.flagSet.Output())
-}
-
-// GetFlagOutput returns the flag output.
-func (s *Settings[C]) GetFlagOutput() {
-	fmt.Println(s.command.flagSet.Output())
-}
-
-// execute is the internal implementation of Execute.
-func execute[C any, D Copyable](cmd C, proxy D, name string, args []string) *Settings[C] {
-	instance := New(name, cmd)
-	if instance.HasErrors() {
-		return instance
-	}
-
-	if (reflect.ValueOf(&proxy).Elem().Type() != reflect.TypeOf(dummyCopyArg{})) {
-		instance.SetMappedObject(proxy)
-		if instance.HasErrors() {
-			return instance
-		}
-	}
-
-	instance.Parse(args)
-	if instance.HelpRequested() {
-		return instance
-	}
-
-	if instance.HasErrors() {
-		return instance
-	}
-
-	instance.Execute()
-	if instance.HasErrors() {
-		return instance
-	}
-
-	return instance
-}
-
-// New creates a new instance of the settings.
-// name should be the name of the command and comes from the first argument of the command line.
-// os.Args[0] is a good choice.
-func New[C any](name string, definitions C) *Settings[C] {
-
-	s := &Settings[C]{
-		config: config{
-			errorHandling: flag.ContinueOnError,
-		},
-	}
-
-	if reflect.TypeOf(definitions).Kind() != reflect.Struct {
-		s.errors = append(s.errors, newUnsupportedReflectKindError(reflect.TypeOf(definitions)))
-		return s
-	}
-
-	s.mapping = make(map[string]any)
-
-	buf := bytes.NewBufferString("")
-	s.flagOutput = io.Writer(buf)
-	s.definitions = definitions
-	s.initCommands(name)
-
-	return s
-}
-
-// Output returns the writer where the flag package writes its output.
-func (s *Settings[C]) Output() string {
-	return s.flagOutput.(*bytes.Buffer).String()
-}
-
-// Args Returns not parsed arguments.
-func (s *Settings[C]) Args() []string {
-	return s.args
-}
-
-// GetDefaults returns the default values of the settings.
-func (s *Settings[C]) GetDefaults() string {
-	mem := s.flagOutput
-	s.flagOutput.(*bytes.Buffer).Reset()
-	s.command.flagSet.PrintDefaults()
-	r := s.flagOutput.(*bytes.Buffer).String()
-	s.flagOutput = mem
-	return r
-}
-
-func (s *Settings[C]) GetMap() map[string]any {
-	return s.mapping
-}
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/command.go b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/command.go
deleted file mode 100644
index e58b438a06846701ce08c3cf5a6357ed1700f46e..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/command.go
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2022 schukai GmbH
-// SPDX-License-Identifier: AGPL-3.0
-
-package xflags
-
-import (
-	"flag"
-	"reflect"
-)
-
-type cmd[C any] struct {
-	name         string
-	flagSet      *flag.FlagSet
-	tagMapping   map[string]string
-	proxyMapping map[string]string
-	commands     []*cmd[C]
-	settings     *Settings[C]
-	valuePath    []string
-	functionName string
-}
-
-func (c *cmd[C]) parse(args []string) {
-
-	s := c.settings
-
-	for _, command := range c.commands {
-
-		if command.name == args[0] {
-
-			if command.flagSet == nil {
-				s.errors = append(s.errors, newMissingFlagSetError(command.name))
-				continue
-			}
-
-			err := command.flagSet.Parse(args[1:])
-			if err != nil {
-				s.errors = append(s.errors, err)
-				continue
-			}
-
-			s.assignValues(*command)
-
-			r := command.flagSet.Args()
-			if len(r) == 0 {
-				c.settings.args = []string{}
-				continue
-			}
-
-			c.settings.args = r
-			command.parse(r)
-
-		}
-	}
-
-}
-
-func buildCommandStruct[C any](s *Settings[C], name, fkt string, errorHandling flag.ErrorHandling, path []string) *cmd[C] {
-	cc := &cmd[C]{
-		name:         name,
-		flagSet:      flag.NewFlagSet(name, errorHandling),
-		commands:     []*cmd[C]{},
-		settings:     s,
-		tagMapping:   map[string]string{},
-		proxyMapping: map[string]string{},
-		valuePath:    path,
-		functionName: fkt,
-	}
-
-	cc.flagSet.SetOutput(s.flagOutput)
-
-	return cc
-}
-
-func (c *cmd[C]) initCommands(x reflect.Value, m map[string]string, path string) {
-
-	if x.Kind() != reflect.Struct {
-		c.settings.errors = append(c.settings.errors, newUnsupportedReflectKindError(x.Type()))
-		return
-	}
-
-	cc := buildCommandStruct[C](c.settings, m[tagCommand], m[tagCall], c.settings.config.errorHandling, append(c.valuePath, path))
-
-	cc.parseStruct(x.Interface())
-	c.commands = append(c.commands, cc)
-
-}
-
-func (c *cmd[C]) initFlags(x reflect.Value, m map[string]string) {
-
-	if x.Kind() == reflect.Struct {
-		c.settings.errors = append(c.settings.errors, newUnsupportedReflectKindError(x.Type()))
-		return
-	}
-
-	switch x.Kind() {
-	case reflect.Bool:
-		if m[tagShort] != "" {
-			c.flagSet.Bool(m[tagShort], x.Bool(), m[tagDescription])
-		}
-		if m[tagLong] != "" {
-			c.flagSet.Bool(m[tagLong], x.Bool(), m[tagDescription])
-		}
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		if m[tagShort] != "" {
-			c.flagSet.Int(m[tagShort], int(x.Int()), m[tagDescription])
-		}
-		if m[tagLong] != "" {
-			c.flagSet.Int(m[tagLong], int(x.Int()), m[tagDescription])
-		}
-
-	case reflect.Float32, reflect.Float64:
-		if m[tagShort] != "" {
-			c.flagSet.Float64(m[tagShort], x.Float(), m[tagDescription])
-		}
-		if m[tagLong] != "" {
-			c.flagSet.Float64(m[tagLong], x.Float(), m[tagDescription])
-		}
-	case reflect.String:
-		if m[tagShort] != "" {
-			c.flagSet.String(m[tagShort], x.String(), m[tagDescription])
-		}
-		if m[tagLong] != "" {
-			c.flagSet.String(m[tagLong], x.String(), m[tagDescription])
-		}
-	case reflect.Slice:
-
-		if x.Type() == reflect.TypeOf(StringFlags{}) {
-
-			if m[tagShort] != "" {
-				xx := x.Interface().(StringFlags)
-				c.flagSet.Var(&xx, m[tagShort], m[tagDescription])
-			}
-
-			if m[tagLong] != "" {
-				xx := x.Interface().(StringFlags)
-				c.flagSet.Var(&xx, m[tagLong], m[tagDescription])
-			}
-
-		}
-
-	default:
-		c.settings.errors = append(c.settings.errors, newUnsupportedFlagTypeError(x.Type()))
-	}
-
-}
-
-func (c *cmd[C]) parseStruct(dta any) {
-
-	t := reflect.TypeOf(dta)
-
-	if t.Kind() != reflect.Struct {
-		c.settings.errors = append(c.settings.errors, newUnsupportedReflectKindError(t))
-		return
-	}
-
-	v := reflect.ValueOf(dta)
-
-	for i := 0; i < v.NumField(); i++ {
-		x := v.Field(i)
-		m := getTagMap(v.Type().Field(i))
-
-		if m[tagShort] != "" || m[tagLong] != "" {
-			if m[tagCommand] != "" {
-				c.settings.errors = append(c.settings.errors, newAmbiguousTagError(v.Type().Field(i).Name, m))
-				continue
-			}
-
-			if m[tagMapping] != "" {
-				c.proxyMapping[v.Type().Field(i).Name] = m[tagMapping]
-			}
-
-			if m[tagShort] != "" {
-				c.tagMapping[m[tagShort]] = v.Type().Field(i).Name
-			}
-
-			if m[tagLong] != "" {
-				c.tagMapping[m[tagLong]] = v.Type().Field(i).Name
-			}
-			c.initFlags(x, m)
-		} else if m[tagCommand] != "" {
-
-			c.initCommands(x, m, v.Type().Field(i).Name)
-		} else if m[tagIgnore] != "" {
-			continue
-		} else {
-			c.settings.errors = append(c.settings.errors, newMissingTagError(v.Type().Field(i).Name))
-		}
-
-	}
-
-}
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/doc.go b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/doc.go
deleted file mode 100644
index b252df1527633d78f26f378caa6b1e015ce2c26e..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/doc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// This package provides a simple way to create a CLI application
-// with subcommands. It is based on the flags package from
-
-// Copyright 2022 schukai GmbH
-// SPDX-License-Identifier: AGPL-3.0
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published
-// by the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program.  If not, see <https://www.gnu.org/licenses/>.
-
-package xflags
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/error.go b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/error.go
deleted file mode 100644
index 7086f98a322b8ea092e8be4d79e817450db1f030..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/error.go
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2022 schukai GmbH
-// SPDX-License-Identifier: AGPL-3.0
-
-package xflags
-
-import (
-	"errors"
-	"reflect"
-)
-
-// ResetError is used to reset the error to nil
-// After calling this function, the call HasErrors() will return false
-func (s *Settings[C]) ResetErrors() *Settings[C] {
-	s.errors = []error{}
-	return s
-}
-
-// Check if the setting contains errors
-func (s *Settings[C]) HasErrors() bool {
-	return len(s.errors) > 0
-}
-
-// Get all errors
-func (s *Settings[C]) Errors() []error {
-	return s.errors
-}
-
-func (s *Settings[C]) AddError(err error) *Settings[C] {
-	s.errors = append(s.errors, err)
-	return s
-}
-
-var WatchListNotInitializedError = errors.New("watch list not initialized")
-var MissingCommandError = errors.New("missing command")
-var NotParsedError = errors.New("flag set not parsed")
-var ShadowMustBePointerError = errors.New("shadow must be a pointer to a struct")
-
-// At the reflect level, some types are not supported
-type UnsupportedReflectKindError error
-
-func newUnsupportedReflectKindError(t reflect.Type) UnsupportedReflectKindError {
-	return UnsupportedReflectKindError(errors.New("type " + t.String() + " is not supported"))
-}
-
-// AmbiguousTagError is used when a tag is ambiguous
-type AmbiguousTagError error
-
-func newAmbiguousTagError(name string, m map[string]string) AmbiguousTagError {
-
-	msg := "ambiguous tag for field " + name + ": "
-	for k, v := range m {
-		msg += k + " = " + v + ", "
-	}
-
-	return AmbiguousTagError(errors.New(msg))
-}
-
-// UnsupportedFlagTypeError is used when a flag type is not supported
-type UnsupportedFlagTypeError error
-
-func newUnsupportedFlagTypeError(t reflect.Type) UnsupportedFlagTypeError {
-	return UnsupportedFlagTypeError(errors.New("type " + t.String() + " is not supported"))
-}
-
-// EmptyTagError is used when a tag is empty
-type EmptyTagError error
-
-func newEmptyTagError(tag, name string) EmptyTagError {
-	return EmptyTagError(errors.New("tag " + tag + " is empty for field " + name))
-}
-
-// MissingTagError is used when a tag is missing
-type MissingTagError error
-
-func newMissingTagError(tag string) MissingTagError {
-	return MissingTagError(errors.New("tag " + tag + " is empty"))
-}
-
-type InvalidPathError error
-
-func newInvalidPathError(path string) InvalidPathError {
-	return InvalidPathError(errors.New("invalid path " + path))
-}
-
-type UnsupportedTypeAtTopOfPathError error
-
-func newUnsupportedTypeAtTopOfPathError(path string, t reflect.Type) UnsupportedTypeAtTopOfPathError {
-	return UnsupportedTypeAtTopOfPathError(errors.New("unsupported type " + t.String() + " at top of path " + path))
-}
-
-type UnsupportedTypePathError error
-
-func newUnsupportedTypePathError(path string, t reflect.Type) UnsupportedTypeAtTopOfPathError {
-	return UnsupportedTypePathError(errors.New("unsupported type " + t.String() + " at path " + path))
-}
-
-type UnknownFlagError error
-
-func newUnknownFlagError(name string) UnknownFlagError {
-	return UnknownFlagError(errors.New("unknown flag " + name))
-}
-
-type CannotSetError error
-
-func newCannotSetError(name string) CannotSetError {
-	return CannotSetError(errors.New("cannot set " + name))
-}
-
-type MissingFlagSetError error
-
-func newMissingFlagSetError(name string) MissingFlagSetError {
-	return MissingFlagSetError(errors.New("missing flag set for command " + name))
-}
-
-type StdoutError error
-
-func newStdoutError(message string) StdoutError {
-	return StdoutError(errors.New(message))
-}
-
-type MissingFunctionError error
-
-func newMissingFunctionError(missing string) MissingFunctionError {
-	return MissingFunctionError(errors.New("missing function " + missing))
-}
-
-type InvalidTypeForPathError error
-
-func newInvalidTypeForPathError(path string, pt string, nt string) InvalidTypeForPathError {
-	return InvalidTypeForPathError(errors.New("invalid type for path " + path + ": expected " + pt + ", got " + nt))
-}
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/execute.go b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/execute.go
deleted file mode 100644
index 68a06c612477211264501f11003dc5901308f6fd..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/execute.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2022 schukai GmbH
-// SPDX-License-Identifier: AGPL-3.0
-
-package xflags
-
-import (
-	"flag"
-	"fmt"
-	"reflect"
-	"strings"
-)
-
-func (s *Settings[C]) Execute() *Settings[C] {
-	if len(s.errors) > 0 {
-		return s
-	}
-
-	if s.command == nil {
-		s.errors = append(s.errors, MissingCommandError)
-		return s
-	}
-
-	if !s.command.flagSet.Parsed() {
-		s.errors = append(s.errors, NotParsedError)
-	} else {
-		s.wasExecuted = callCmdFunctions(s, s.command.commands)
-	}
-
-	return s
-}
-
-func callCmdFunctions[C any](settings *Settings[C], commands []*cmd[C]) bool {
-
-	wasExecuted := false
-	shouldExecute := false
-
-	var availableCommands []string
-	currentCommand := ""
-
-	for _, command := range commands {
-		if command.flagSet.Parsed() {
-
-			shouldExecute = true
-			currentCommand = command.name
-
-			if len(command.commands) > 0 {
-				r := callCmdFunctions(settings, command.commands)
-				if r {
-					wasExecuted = true
-				}
-			}
-
-			if !wasExecuted {
-				f := reflect.ValueOf(&command.settings.definitions).MethodByName(command.functionName)
-				if f.IsValid() {
-					m := command.settings
-					in := []reflect.Value{reflect.ValueOf(m)}
-					f.Call(in)
-					wasExecuted = true
-				}
-			}
-
-			break
-		} else {
-			availableCommands = append(availableCommands, command.name)
-		}
-
-	}
-
-	if shouldExecute {
-		if !wasExecuted {
-			settings.errors = append(settings.errors, newMissingFunctionError(currentCommand))
-			return false
-		}
-
-		return true
-	}
-
-	if len(availableCommands) > 0 {
-		if settings.hint == "" {
-			settings.hint = fmt.Sprintf("Did you mean: %v?", strings.Join(availableCommands, ", "))
-		}
-		settings.errors = append(settings.errors, MissingCommandError)
-	}
-
-	return false
-
-}
-
-// HelpRequested indicates if the help flag was set.
-func (s *Settings[C]) HelpRequested() bool {
-
-	for _, err := range s.errors {
-		if err == flag.ErrHelp {
-			return true
-		}
-	}
-
-	return false
-}
-
-// MissingCommandError is returned if no command was found.
-func (s *Settings[C]) MissingCommand() bool {
-
-	for _, err := range s.errors {
-		if err == MissingCommandError {
-			return true
-		}
-	}
-
-	return false
-}
-
-// WasExecuted returns true if the call function was executed
-func (s *Settings[C]) WasExecuted() bool {
-	return s.wasExecuted
-}
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/help-util.go b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/help-util.go
deleted file mode 100644
index c3512fd773ca3236257b513c8a6296bc6eb45283..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/help-util.go
+++ /dev/null
@@ -1,138 +0,0 @@
-/**
-* Copyright (c) 2009 The Go Authors. All rights reserved.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following disclaimer
-* in the documentation and/or other materials provided with the
-* distribution.
-* * Neither the name of Google Inc. nor the names of its
-* contributors may be used to endorse or promote products derived from
-* this software without specific prior written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-**/
-
-package xflags
-
-// this file contain adapted standard function from flag.go
-
-import (
-	"flag"
-	"fmt"
-	"reflect"
-	"strings"
-)
-
-func getFlagTable(f *flag.FlagSet) []string {
-	var isZeroValueErrs []error
-
-	result := []string{}
-
-	f.VisitAll(func(f *flag.Flag) {
-		var b strings.Builder
-		fmt.Fprintf(&b, "  -%s", f.Name) // Two spaces before -; see next two comments.
-
-		name, usage := flag.UnquoteUsage(f)
-		if len(name) > 0 {
-			b.WriteString(" ")
-			b.WriteString(name)
-		}
-		// Boolean flags of one ASCII letter are so common we
-		// treat them specially, putting their usage on the same line.
-		if b.Len() <= 4 { // space, space, '-', 'x'.
-			b.WriteString("\t")
-		} else {
-			// Four spaces before the tab triggers good alignment
-			// for both 4- and 8-space tab stops.
-			b.WriteString("\n    \t")
-		}
-		b.WriteString(strings.ReplaceAll(usage, "\n", "\n    \t"))
-
-		// Print the default value only if it differs to the zero value
-		// for this flag type.
-		if isZero, err := isZeroValue(f, f.DefValue); err != nil {
-			isZeroValueErrs = append(isZeroValueErrs, err)
-		} else if !isZero {
-			if _, ok := f.Value.(*stringValue); ok {
-				// put quotes on the value
-				fmt.Fprintf(&b, " (default %q)", f.DefValue)
-			} else {
-				fmt.Fprintf(&b, " (default %v)", f.DefValue)
-			}
-		}
-
-		result = append(result, b.String())
-
-	})
-	// If calling String on any zero flag.Values triggered a panic, print
-	// the messages after the full set of defaults so that the programmer
-	// knows to fix the panic.
-	if errs := isZeroValueErrs; len(errs) > 0 {
-		fmt.Fprintln(f.Output())
-		for _, err := range errs {
-			result = append(result, err.Error())
-		}
-	}
-
-	return result
-}
-
-// isZeroValue determines whether the string represents the zero
-// value for a flag.
-func isZeroValue(f *flag.Flag, value string) (ok bool, err error) {
-	// Sync a zero value of the flag's Value type, and see if the
-	// result of calling its String method equals the value passed in.
-	// This works unless the Value type is itself an interface type.
-	typ := reflect.TypeOf(f.Value)
-	var z reflect.Value
-	if typ.Kind() == reflect.Pointer {
-		z = reflect.New(typ.Elem())
-	} else {
-		z = reflect.Zero(typ)
-	}
-	// Catch panics calling the String method, which shouldn't prevent the
-	// usage message from being printed, but that we should report to the
-	// user so that they know to fix their code.
-	defer func() {
-		if e := recover(); e != nil {
-			if typ.Kind() == reflect.Pointer {
-				typ = typ.Elem()
-			}
-			err = fmt.Errorf("panic calling String method on zero %v for flag %s: %v", typ, f.Name, e)
-		}
-	}()
-	return value == z.Interface().(flag.Value).String(), nil
-}
-
-// -- string Value
-type stringValue string
-
-func newStringValue(val string, p *string) *stringValue {
-	*p = val
-	return (*stringValue)(p)
-}
-
-func (s *stringValue) Set(val string) error {
-	*s = stringValue(val)
-	return nil
-}
-
-func (s *stringValue) Get() any { return string(*s) }
-
-func (s *stringValue) String() string { return string(*s) }
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/help.go b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/help.go
deleted file mode 100644
index 060c8a76ca974f6032383021b3911c8de407f0c3..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/help.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2022 schukai GmbH
-// SPDX-License-Identifier: AGPL-3.0
-
-package xflags
-
-import (
-	"strings"
-)
-
-func (c *cmd[C]) getCommandLevel() (*cmd[C], []string) {
-
-	result := c
-
-	path := []string{}
-	path = append(path, c.name)
-
-	for _, c := range c.commands {
-
-		if c.flagSet.Parsed() {
-			var p []string
-			result, p = c.getCommandLevel()
-			path = append(path, p...)
-			break
-		}
-	}
-
-	return result, path
-}
-
-func (s *Settings[C]) Help() string {
-	return s.createHelp(s.command, []string{})
-}
-
-func (s *Settings[C]) createHelp(cmd *cmd[C], path []string) string {
-
-	h := strings.Join(path, " ")
-	if h != "" {
-		h = " " + h + " "
-	}
-	var help string
-
-	help = "Usage:" + h
-
-	g := getFlagTable(cmd.settings.command.flagSet)
-	if len(g) > 0 {
-		help += "[global options] "
-	}
-
-	if len(cmd.commands) > 0 {
-		help += "[command] "
-	}
-
-	help += "[arguments]"
-	help += "\n"
-
-	if len(g) > 0 {
-		help += "\nGlobal Options:\n"
-		help += strings.Join(g, "\n") + "\n"
-	}
-
-	if len(cmd.commands) > 0 {
-		for _, c := range cmd.commands {
-			help += "\nCommand: " + c.name + "\n"
-			//help += fmt.Sprintf("  %s\t%s", c.name, c.tagMapping[tagDescription])
-			s := getFlagTable(c.flagSet)
-
-			if len(s) > 0 {
-				help += "\nOptions:\n"
-				help += strings.Join(s, "\n") + "\n"
-			}
-
-		}
-	}
-	return help
-}
-
-// Help returns the help text for the command
-func (s *Settings[C]) ContextHelp() string {
-	cmd, path := s.command.getCommandLevel()
-	return s.createHelp(cmd, path)
-
-}
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/hint.go b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/hint.go
deleted file mode 100644
index 8586d2033980a9330987a340141bcc45837e19ce..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/hint.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2023 schukai GmbH
-// SPDX-License-Identifier: AGPL-3.0
-
-package xflags
-
-func (s *Settings[C]) SetHint(hint string) *Settings[C] {
-	s.hint = hint
-	return s
-}
-
-func (s *Settings[C]) HasHint() bool {
-	return s.hint != ""
-}
-
-func (s *Settings[C]) GetHint() string {
-	return s.hint
-}
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/mapping.go b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/mapping.go
deleted file mode 100644
index f4f04381876bac98e0df65bbd0a6eaf03b34ffb3..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/mapping.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2022 schukai GmbH
-// SPDX-License-Identifier: AGPL-3.0
-
-package xflags
-
-import (
-	"flag"
-	"gitlab.schukai.com/oss/libraries/go/utilities/pathfinder"
-	"reflect"
-	"strconv"
-	"strings"
-)
-
-// SetMappedObject sets the shadow struct for the flag configuration.
-func (s *Settings[C]) SetMappedObject(proxy Copyable) *Settings[C] {
-
-	if reflect.TypeOf(proxy).Kind() != reflect.Ptr {
-		s.errors = append(s.errors, ShadowMustBePointerError)
-		return s
-	}
-
-	if reflect.TypeOf(proxy).Elem().Kind() != reflect.Struct {
-		s.errors = append(s.errors, ShadowMustBePointerError)
-		return s
-	}
-
-	s.proxy = proxy
-	return s
-}
-
-// Copyable is the interface for the proxy struct.
-type Copyable interface {
-	Copy(map[string]any)
-}
-
-func (s *Settings[C]) assignValues(c cmd[C]) {
-	flgs := c.flagSet
-	flgs.Visit(func(f *flag.Flag) {
-
-		name := f.Name
-		stringValue := f.Value.String()
-
-		k, ok := c.tagMapping[name]
-		if !ok {
-			s.errors = append(s.errors, newUnknownFlagError(name))
-			return
-		}
-
-		pa := append(c.valuePath, k)
-		p := strings.Join(pa, ".")
-
-		q, err := pathfinder.GetValue(&s.definitions, p)
-		if err != nil {
-			s.errors = append(s.errors, err)
-			return
-		}
-
-		if q == nil {
-			s.errors = append(s.errors, newUnknownFlagError(name))
-			return
-		}
-
-		typeOf := reflect.TypeOf(q)
-
-		switch typeOf.Kind() {
-		case reflect.String:
-			err = pathfinder.SetValue(&s.definitions, p, stringValue)
-		case reflect.Int:
-			intVar, err := strconv.Atoi(stringValue)
-			if err != nil {
-				s.errors = append(s.errors, err)
-				return
-			}
-			err = pathfinder.SetValue(&s.definitions, p, intVar)
-		case reflect.Bool:
-			boolVar, err := strconv.ParseBool(stringValue)
-			if err != nil {
-				s.errors = append(s.errors, err)
-				return
-			}
-			err = pathfinder.SetValue(&s.definitions, p, boolVar)
-		case reflect.Slice:
-
-			switch typeOf.Elem().Kind() {
-			case reflect.String:
-				qs := q.(StringFlags)
-				stringSliceValue := f.Value.(*StringFlags)
-				for _, v := range *stringSliceValue {
-					err = qs.Set(v)
-					if err != nil {
-						s.errors = append(s.errors, err)
-						return
-					}
-				}
-				err = pathfinder.SetValue(&s.definitions, p, qs)
-			default:
-				err = pathfinder.SetValue(&s.definitions, p, stringValue)
-			}
-
-		}
-
-		if err != nil {
-			s.errors = append(s.errors, err)
-		}
-
-		if c.proxyMapping[k] != "" {
-			p = c.proxyMapping[k]
-			s.mapping[p] = stringValue
-		}
-
-	})
-
-}
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/parse.go b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/parse.go
deleted file mode 100644
index 75e9ed018c9f05057419cb0d5203d920dc9cb940..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/parse.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2022 schukai GmbH
-// SPDX-License-Identifier: AGPL-3.0
-
-package xflags
-
-import (
-	"os"
-)
-
-// ParseOsArgs parses the os.Args.
-func (s *Settings[C]) ParseOsArgs() *Settings[C] {
-	return s.Parse(os.Args[1:])
-}
-
-// Parse parses the command line arguments and assigns the values to the settings.
-func (s *Settings[C]) Parse(args []string) *Settings[C] {
-	if len(s.errors) > 0 {
-		return s
-	}
-
-	if s.command == nil {
-		s.errors = append(s.errors, MissingCommandError)
-		return s
-	}
-
-	err := s.command.flagSet.Parse(args)
-	if err != nil {
-		s.errors = append(s.errors, err)
-		return s
-	}
-
-	s.assignValues(*s.command)
-
-	r := s.command.flagSet.Args()
-	if len(r) == 0 {
-		return s
-	}
-
-	s.command.parse(r)
-
-	if s.mapping != nil && len(s.mapping) > 0 && s.proxy != nil {
-		s.proxy.Copy(s.mapping)
-	}
-
-	return s
-}
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/release.json b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/release.json
deleted file mode 100644
index 8621db17b0def94363211bf07ced301b57ea4f1c..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/release.json
+++ /dev/null
@@ -1 +0,0 @@
-{"version":"1.16.0"}
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/setting.go b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/setting.go
deleted file mode 100644
index 78e1acd9699969f52785296e4032a78536fccacb..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/setting.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2022 schukai GmbH
-// SPDX-License-Identifier: AGPL-3.0
-
-package xflags
-
-import (
-	"flag"
-	"io"
-)
-
-func (s *Settings[C]) initCommands(name string) {
-	s.command = buildCommandStruct[C](s, name, "", s.config.errorHandling, []string{})
-	s.command.parseStruct(s.definitions)
-}
-
-type config struct {
-	errorHandling flag.ErrorHandling
-}
-
-// Settings[C] is the main struct for the xflags package.
-type Settings[C any] struct {
-	definitions C
-
-	command *cmd[C]
-
-	errors     []error
-	flagOutput io.Writer
-
-	args []string
-
-	config config
-
-	mapping     map[string]any
-	proxy       Copyable
-	wasExecuted bool
-
-	hint string
-}
-
-func (s *Settings[C]) GetValues() C {
-	return s.definitions
-}
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/tags.go b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/tags.go
deleted file mode 100644
index f019823b57e9b7374e63e1cfeb7c3ba5792bec3d..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/tags.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2022 schukai GmbH
-// SPDX-License-Identifier: AGPL-3.0
-
-package xflags
-
-import (
-	"reflect"
-	"strconv"
-)
-
-const (
-	tagIgnore      = "ignore"
-	tagCall        = "call"
-	tagCommand     = "command"
-	tagShort       = "short"
-	tagLong        = "long"
-	tagDescription = "description"
-	tagMapping     = "map"
-)
-
-func getTagMap(field reflect.StructField) (value map[string]string) {
-
-	tagValues := map[string]string{}
-	tag := field.Tag
-
-	// code from reflect.StructTag.Lookup
-	for tag != "" {
-		// Skip leading space.
-		i := 0
-		for i < len(tag) && tag[i] == ' ' {
-			i++
-		}
-		tag = tag[i:]
-		if tag == "" {
-			break
-		}
-
-		i = 0
-		for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
-			i++
-		}
-		if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
-			break
-		}
-		name := string(tag[:i])
-		tag = tag[i+1:]
-
-		// Scan quoted string to find value.
-		i = 1
-		for i < len(tag) && tag[i] != '"' {
-			if tag[i] == '\\' {
-				i++
-			}
-			i++
-		}
-		if i >= len(tag) {
-			break
-		}
-		qvalue := string(tag[:i+1])
-		tag = tag[i+1:]
-
-		value, err := strconv.Unquote(qvalue)
-		if err != nil {
-			break
-		}
-
-		tagValues[name] = value
-
-	}
-
-	return tagValues
-}
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/type.go b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/type.go
deleted file mode 100644
index b5602b6dfefb4afdb0bec5ef62307f3362e75987..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/type.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2023 schukai GmbH
-// SPDX-License-Identifier: AGPL-3.0
-
-package xflags
-
-import (
-	"fmt"
-	"strings"
-)
-
-type StringFlags []string
-
-func (i *StringFlags) String() string {
-	return strings.Join(*i, ",")
-}
-
-func (i *StringFlags) Set(value string) error {
-	*i = append(*i, value)
-	return nil
-}
-
-type IntFlags []int
-
-func (i *IntFlags) String() string {
-	r := make([]string, len(*i))
-	for k, v := range *i {
-		r[k] = fmt.Sprintf("%c", v)
-	}
-
-	return strings.Join(r, ",")
-}
-
-func (i *IntFlags) Set(value int) error {
-	*i = append(*i, value)
-	return nil
-}
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/markup/html/LICENSE b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/markup/html/LICENSE
deleted file mode 100644
index 89c200ee3121a668aa2f1ed591d36da44a542000..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/markup/html/LICENSE
+++ /dev/null
@@ -1,7 +0,0 @@
-LICENSE
-
-* All content residing under the "documentation/" directory of this repository is licensed under "Creative Commons: CC BY-SA 4.0 license".
-* All content that resides under the "application/" directory of this repository, if that directory exists, is licensed under the license defined in "application/LICENSE".
-* All third-party components that are integrated into our software are licensed under the original licence provided by the owner of the respective component.
-* Content outside the above directories or restrictions is available under the "AGPL 3.0" licence as defined here: https://www.gnu.org/licenses/agpl-3.0.en.html
-
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/markup/html/engine/engine.go b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/markup/html/engine/engine.go
deleted file mode 100644
index f6e984e7ded13e1b8b6a80643db1785660ca7535..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/markup/html/engine/engine.go
+++ /dev/null
@@ -1,652 +0,0 @@
-// Copyright 2023 schukai GmbH
-// SPDX-License-Identifier: AGPL-3.0
-
-package engine
-
-import (
-	"fmt"
-	"github.com/andybalholm/cascadia"
-	"gitlab.schukai.com/oss/libraries/go/utilities/data.git"
-	"golang.org/x/net/html"
-	"io"
-	"reflect"
-	"strings"
-)
-
-const attributePrefix = "data-"
-
-type Engine struct {
-	attributePrefix string
-	errors          []error
-	logNode         *html.Node
-	transformer     *data.Transformer
-}
-
-func (e *Engine) SetLogNode(node *html.Node) *Engine {
-	e.logNode = node
-	return e
-}
-
-func (e *Engine) SetAttributePrefix(prefix string) *Engine {
-	e.attributePrefix = prefix
-	return e
-}
-
-func (e *Engine) GetAttributePrefix() string {
-	return e.attributePrefix
-}
-
-func (e *Engine) GetLogNode() *html.Node {
-	return e.logNode
-}
-
-func New(dataset map[any]any) *Engine {
-
-	return &Engine{
-		attributePrefix: attributePrefix,
-		transformer:     data.NewTransformer(&dataset),
-	}
-}
-
-func (e *Engine) HasErrors() bool {
-	return len(e.errors) > 0
-}
-
-func (e *Engine) GetErrors() []error {
-	return e.errors
-}
-
-func (e *Engine) ProcessNode(node *html.Node) *Engine {
-	if node == nil {
-		return e
-	}
-
-	if node.Type == html.ElementNode || node.Type == html.DocumentNode {
-		e.ProcessElement(node)
-	}
-	return e
-}
-
-func functionExists(obj interface{}, funcName string) bool {
-	m := reflect.ValueOf(obj).MethodByName(funcName)
-	if m.IsValid() {
-		return true
-	}
-
-	return false
-}
-
-//func (e *Engine) callFunction(funcName string, node *html.Node, value string) *Engine {
-//	reflect.ValueOf(e).MethodByName(funcName).Call([]reflect.Value{reflect.ValueOf(node), reflect.ValueOf(value)})
-//	return e
-//}
-
-func (e *Engine) ProcessElement(node *html.Node) *Engine {
-	if node.Type != html.ElementNode && node.Type != html.DocumentNode {
-		e.errors = append(e.errors, &UnsupportedTypeError{Message: "Unsupported element type", NodeType: node.Type})
-		return e
-	}
-
-	runChildren := true
-
-	for _, attr := range node.Attr {
-		if attr.Namespace != "" ||
-			len(attr.Key) < len(e.attributePrefix) ||
-			attr.Key[:len(e.attributePrefix)] != e.attributePrefix {
-			continue
-		}
-
-		fktName := attr.Key[len(e.attributePrefix):]
-		fktName = strings.ToLower(fktName)
-
-		switch fktName {
-		case "repeat":
-			runChildren = false
-			node.Attr = removeAttribute(node.Attr, attr.Key)
-			e.processRepeat(node, attr)
-
-		case "debug":
-			node.Attr = removeAttribute(node.Attr, attr.Key)
-			e.processDebug(node, attr)
-		case "attributes":
-			node.Attr = removeAttribute(node.Attr, attr.Key)
-			e.processAttributes(node, attr)
-		case "condition":
-			node.Attr = removeAttribute(node.Attr, attr.Key)
-			e.processCondition(node, attr)
-		case "replace":
-			node.Attr = removeAttribute(node.Attr, attr.Key)
-			e.processReplace(node, attr)
-		case "replace-self":
-			node.Attr = removeAttribute(node.Attr, attr.Key)
-			e.processReplaceSelf(node, attr)
-		case "remove":
-			node.Attr = removeAttribute(node.Attr, attr.Key)
-			e.processRemove(node, attr)
-		case "removetag":
-			node.Attr = removeAttribute(node.Attr, attr.Key)
-			e.processRemoveTag(node, attr)
-		default:
-			continue
-		}
-
-	}
-
-	if runChildren {
-		e.walkNodes(node)
-	}
-
-	return e
-
-}
-
-func (e *Engine) processRepeat(node *html.Node, attr html.Attribute) *Engine {
-
-	parent := node.Parent
-
-	if parent == nil {
-		e.errors = append(e.errors, &UnsupportedTypeError{Message: "Node has no parent", NodeType: node.Type})
-		return e
-	}
-
-	v := attr.Val
-	if v == "" {
-		return e
-	}
-
-	p := strings.Index(v, " ")
-	if p == -1 {
-		return e
-	}
-
-	key := v[:p]
-	instruction := v[p+1:]
-
-	iterator, err := e.transformer.Transform(instruction)
-	if err != nil {
-		e.errors = append(e.errors, err)
-		return e
-	}
-
-	var nextSibling *html.Node
-
-	for child := parent.FirstChild; child != nil; child = child.NextSibling {
-
-		if child == node {
-			nextSibling = node.NextSibling
-			parent.RemoveChild(node)
-			break
-		}
-
-	}
-
-	data := e.transformer.Dataset()
-	if data == nil {
-		data = &map[any]any{}
-	}
-
-	var errors []error
-
-	switch reflect.TypeOf(iterator).Kind() {
-	case reflect.Map:
-
-		listValue := reflect.ValueOf(iterator)
-		for _, key := range listValue.MapKeys() {
-			item := listValue.MapIndex(key).Interface()
-			errors = runNode(node, data, key.String(), item, nextSibling, parent)
-		}
-	case reflect.Slice:
-
-		listValue := reflect.ValueOf(iterator)
-		for i := 0; i < listValue.Len(); i++ {
-			item := listValue.Index(i).Interface()
-			errors = runNode(node, data, key, item, nextSibling, parent)
-
-		}
-
-	default:
-		e.errors = append(e.errors, &UnsupportedTypeError{Message: "Unsupported iterator type", NodeType: node.Type})
-	}
-
-	if errors != nil {
-		e.errors = append(e.errors, errors...)
-	}
-
-	return e
-}
-
-func runNode(node *html.Node, data *map[any]any, key string, item interface{}, nextSibling *html.Node, parent *html.Node) []error {
-
-	_, ok := (*data)[key]
-	if ok {
-		return []error{&UnsupportedTypeError{Message: "Key already exists", NodeType: node.Type}}
-	}
-
-	(*data)[key] = item
-	defer delete(*data, key)
-
-	x := New(*data)
-
-	template := CloneNode(node)
-	template.Parent = nil
-	template.PrevSibling = nil
-	template.NextSibling = nil
-
-	x.ProcessNodes(template)
-
-	if nextSibling != nil {
-		parent.InsertBefore(template, nextSibling)
-		nextSibling = template
-	} else {
-		parent.AppendChild(template)
-	}
-
-	return x.errors
-
-}
-
-func CloneNode(n *html.Node) *html.Node {
-	if n == nil {
-		return nil
-	}
-
-	val := &html.Node{}
-
-	val.Type = n.Type
-	val.Data = n.Data
-	val.DataAtom = n.DataAtom
-	val.Namespace = n.Namespace
-	val.Attr = make([]html.Attribute, len(n.Attr))
-	copy(val.Attr, n.Attr)
-
-	for child := n.FirstChild; child != nil; child = child.NextSibling {
-		val.AppendChild(CloneNode(child))
-	}
-
-	return val
-}
-
-func (e *Engine) processReplaceSelf(node *html.Node, attr html.Attribute) *Engine {
-
-	replace := attr.Val
-	if replace == "" {
-		return e
-	}
-
-	r, err := e.transformer.Transform(replace)
-	if err != nil {
-		e.errors = append(e.errors, err)
-		return e
-	}
-
-	if node.Parent == nil {
-		e.errors = append(e.errors, &UnsupportedTypeError{Message: "Node has no parent", NodeType: node.Type})
-		return e
-	}
-
-	parent := node.Parent
-	parent.InsertBefore(&html.Node{
-		Type: html.TextNode,
-		Data: r.(string),
-	}, node)
-
-	parent.RemoveChild(node)
-
-	return e
-
-}
-
-func (e *Engine) processReplace(node *html.Node, attr html.Attribute) *Engine {
-
-	replace := attr.Val
-	if replace == "" {
-		return e
-	}
-
-	r, err := e.transformer.Transform(replace)
-	if err != nil {
-		e.errors = append(e.errors, err)
-		return e
-	}
-
-	for child := node.FirstChild; child != nil; child = child.NextSibling {
-		node.RemoveChild(child)
-	}
-
-	nn := &html.Node{
-		Type: html.TextNode,
-	}
-
-	switch r.(type) {
-	case string:
-		nn.Data = r.(string)
-	default:
-		nn.Data = fmt.Sprintf("%v", r)
-	}
-
-	node.AppendChild(nn)
-
-	return e
-
-}
-
-func (e *Engine) processRemove(node *html.Node, attr html.Attribute) *Engine {
-
-	if node.Parent == nil {
-		e.errors = append(e.errors, &UnsupportedTypeError{Message: "Node has no parent", NodeType: node.Type})
-		return e
-	}
-
-	var condition any
-	var err error
-
-	v := attr.Val
-	if v != "" {
-		condition, err = e.transformer.Transform(v)
-		if err != nil {
-			e.errors = append(e.errors, err)
-			return e
-		}
-
-		switch condition.(type) {
-		case bool:
-			if !condition.(bool) {
-				return e
-			}
-		}
-	}
-
-	removeNode(node, e, v)
-
-	return e
-
-}
-
-func (e *Engine) processRemoveTag(node *html.Node, attr html.Attribute) *Engine {
-
-	parent := node.Parent
-
-	var condition any
-	var err error
-
-	v := attr.Val
-	if v != "" {
-		condition, err = e.transformer.Transform(v)
-		if err != nil {
-			e.errors = append(e.errors, err)
-			return e
-		}
-
-		switch condition.(type) {
-		case bool:
-			if !condition.(bool) {
-				return e
-			}
-		}
-	}
-
-	if parent == nil {
-		prefix := e.attributePrefix
-		node.Attr = append(node.Attr, html.Attribute{
-			Key: prefix + "condition-hide",
-			Val: v,
-		})
-
-		return e
-	}
-
-	for node.FirstChild != nil {
-		child := node.FirstChild
-		node.RemoveChild(child)
-		parent.InsertBefore(child, node)
-	}
-
-	parent.RemoveChild(node)
-
-	return e
-}
-
-func (e *Engine) processDebug(node *html.Node, attr html.Attribute) *Engine {
-	e.logNode = node
-	return e
-}
-
-func (e *Engine) processCondition(node *html.Node, attr html.Attribute) *Engine {
-
-	condition := attr.Val
-	if condition == "" {
-		return e
-	}
-
-	r, err := e.transformer.Transform(condition)
-	if err != nil {
-		e.errors = append(e.errors, err)
-		return e
-	}
-
-	shouldRemove := true
-
-	switch r.(type) {
-	case bool:
-		if r == true {
-			shouldRemove = false
-		}
-	}
-
-	if shouldRemove {
-		removeNode(node, e, condition)
-	}
-
-	return e
-}
-
-func removeNode(node *html.Node, e *Engine, condition string) {
-	if node.Parent != nil {
-		node.Parent.RemoveChild(node)
-	} else {
-		prefix := e.attributePrefix
-		node.Attr = append(node.Attr, html.Attribute{
-			Key: prefix + "condition-hide",
-			Val: condition,
-		})
-	}
-}
-
-func isHtmlFragment(html string) bool {
-
-	if strings.HasPrefix(html, "<!DOCTYPE html>") {
-		return false
-	}
-
-	if strings.HasPrefix(html, "<html") {
-		return false
-	}
-
-	return true
-}
-
-func (e *Engine) ProcessHtml(w io.Writer, r io.Reader) *Engine {
-
-	var err error
-
-	content, err := io.ReadAll(r)
-	if err != nil {
-		e.errors = append(e.errors, err)
-		return e
-	}
-
-	stringContent := string(content)
-	isFragment := isHtmlFragment(stringContent)
-
-	var doc *html.Node
-
-	selector := "body"
-
-	if isFragment {
-		c := strings.TrimSpace(stringContent)
-		if strings.HasPrefix(c, "<tr") {
-			selector = "table > tbody"
-			c = "<table>" + c
-		}
-
-		if strings.HasSuffix(c, "</tr>") {
-			c = c + "</table>"
-		}
-
-		if strings.HasPrefix(c, "<td") {
-			selector = "table > tbody > tr"
-			c = "<table><tr>" + c
-		}
-
-		if strings.HasSuffix(c, "</td>") {
-			c = c + "</tr></table>"
-		}
-
-		stringContent = c
-	}
-
-	doc, err = html.Parse(strings.NewReader(stringContent))
-	if err != nil {
-		e.errors = append(e.errors, err)
-		return e
-	}
-
-	e.ProcessNodes(doc)
-
-	if isFragment {
-
-		sel, err := cascadia.Compile(selector)
-		if err != nil {
-			e.errors = append(e.errors, err)
-			return e
-		}
-
-		body := sel.MatchFirst(doc)
-		stringBuilder := &strings.Builder{}
-		err = html.Render(stringBuilder, body)
-		if err != nil {
-			e.errors = append(e.errors, err)
-		}
-
-		result := stringBuilder.String()
-		if selector == "body" {
-			result = strings.TrimPrefix(result, "<body>")
-			result = strings.TrimSuffix(result, "</body>")
-		}
-
-		if selector == "table > tbody" {
-			result = strings.TrimPrefix(result, "<tbody>")
-			result = strings.TrimSuffix(result, "</tbody>")
-		}
-
-		if selector == "table > tbody > tr" {
-			result = strings.TrimPrefix(result, "<tr>")
-			result = strings.TrimSuffix(result, "</tr>")
-		}
-
-		w.Write([]byte(result))
-
-		return e
-
-	}
-
-	err = html.Render(w, doc)
-	if err != nil {
-		e.errors = append(e.errors, err)
-	}
-	return e
-
-}
-
-func (e *Engine) walkNodes(n *html.Node) *Engine {
-
-	if n == nil {
-		return e
-	}
-
-	for c := n.FirstChild; c != nil; {
-		//ns := c.NextSibling
-		e.ProcessNode(c)
-		c = c.NextSibling
-	}
-
-	return e
-}
-
-func (e *Engine) ProcessNodes(node *html.Node) *Engine {
-
-	if node == nil {
-		return e
-	}
-
-	e.ProcessNode(node)
-	e.walkNodes(node)
-	return e
-}
-
-func (e *Engine) processAttributes(node *html.Node, attr html.Attribute) *Engine {
-
-	value := attr.Val
-
-	value = strings.TrimSpace(value)
-	values := strings.Split(value, ",")
-	if len(values) == 0 {
-		return e
-	}
-
-	for _, v := range values {
-		v = strings.TrimSpace(v)
-		if v == "" {
-			continue
-		}
-
-		pos := strings.Index(v, " ")
-		if pos == -1 {
-			e.errors = append(e.errors, &InvalidAttributeError{
-				Message:   "Invalid attribute",
-				Attribute: v,
-				Node:      node,
-			})
-			continue
-		}
-
-		key := v[:pos]
-		instruction := v[pos+1:]
-
-		val, err := e.transformer.Transform(instruction)
-		if err != nil {
-			e.errors = append(e.errors, err)
-			continue
-		}
-
-		var cVal string
-		if val != nil {
-			cVal = val.(string)
-		}
-
-		node.Attr = removeAttribute(node.Attr, key)
-
-		node.Attr = append(node.Attr, html.Attribute{
-			Key: key,
-			Val: cVal,
-		})
-
-	}
-
-	return e
-}
-
-func removeAttribute(attrs []html.Attribute, key string) []html.Attribute {
-
-	var result []html.Attribute
-
-	for _, attr := range attrs {
-		if attr.Key == key {
-			continue
-		}
-
-		result = append(result, attr)
-	}
-
-	return result
-}
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/markup/html/engine/error.go b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/markup/html/engine/error.go
deleted file mode 100644
index 0e65bab34f5494d3e023bbc3913da787f7aa28d8..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/markup/html/engine/error.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2023 schukai GmbH
-// SPDX-License-Identifier: AGPL-3.0
-
-package engine
-
-import (
-	"fmt"
-	"golang.org/x/net/html"
-)
-
-type UnsupportedTypeError struct {
-	Message  string
-	NodeType html.NodeType
-}
-
-func (e *UnsupportedTypeError) Error() string {
-	return e.Message
-}
-
-type UnsupportedFunctionError struct {
-	Message      string
-	FunctionName string
-}
-
-func (e *UnsupportedFunctionError) Error() string {
-	return fmt.Sprintf("%s: %s", e.Message, e.FunctionName)
-}
-
-type InvalidAttributeError struct {
-	Message   string
-	Attribute string
-	Node      *html.Node
-}
-
-func (e *InvalidAttributeError) Error() string {
-	return fmt.Sprintf("%s: %s", e.Message, e.Attribute)
-}
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/.gitignore b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/.gitignore
deleted file mode 100644
index dd6e53ac21498dfebb933a225a7ea1e3d5c0acac..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/.gitignore
+++ /dev/null
@@ -1,513 +0,0 @@
-
-# Makefile comes from update
-Makefile.example
-
-# Vendor
-/development/vendor/
-/deployment/vendor/
-
-# Created by https://www.toptal.com/developers/gitignore/api/intellij+iml,phpunit,git,vim,visualstudiocode,phpstorm,go,intellij+all,netbeans,dbeaver,node,yarn
-# Edit at https://www.toptal.com/developers/gitignore?templates=intellij+iml,phpunit,git,vim,visualstudiocode,phpstorm,go,intellij+all,netbeans,dbeaver,node,yarn
-
-### DBeaver ###
-# ide config file
-.dbeaver-data-sources*.xml
-
-### Git ###
-# Created by git for backups. To disable backups in Git:
-# git config --global mergetool.keepBackup false
-*.orig
-
-# Created by git when using merge tools for conflicts
-*.BACKUP.*
-*.BASE.*
-*.LOCAL.*
-*.REMOTE.*
-*_BACKUP_*.txt
-*_BASE_*.txt
-*_LOCAL_*.txt
-*_REMOTE_*.txt
-
-### Go ###
-# If you prefer the allow list template instead of the deny list, see community template:
-# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
-#
-# Binaries for programs and plugins
-*.exe
-*.exe~
-*.dll
-*.so
-*.dylib
-
-# Test binary, built with `go test -c`
-*.test
-
-# Output of the go coverage tool, specifically when used with LiteIDE
-*.out
-
-# Dependency directories (remove the comment below to include it)
-# vendor/
-
-# Go workspace file
-go.work
-
-### Go Patch ###
-/vendor/
-/Godeps/
-
-### Intellij+all ###
-# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
-# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
-
-# User-specific stuff
-.idea/**/workspace.xml
-.idea/**/tasks.xml
-.idea/**/usage.statistics.xml
-.idea/**/dictionaries
-.idea/**/shelf
-
-# AWS User-specific
-.idea/**/aws.xml
-
-# Generated files
-.idea/**/contentModel.xml
-
-# Sensitive or high-churn files
-.idea/**/dataSources/
-.idea/**/dataSources.ids
-.idea/**/dataSources.local.xml
-.idea/**/sqlDataSources.xml
-.idea/**/dynamic.xml
-.idea/**/uiDesigner.xml
-.idea/**/dbnavigator.xml
-
-# Gradle
-.idea/**/gradle.xml
-.idea/**/libraries
-
-# Gradle and Maven with auto-import
-# When using Gradle or Maven with auto-import, you should exclude module files,
-# since they will be recreated, and may cause churn.  Uncomment if using
-# auto-import.
-# .idea/artifacts
-# .idea/compiler.xml
-# .idea/jarRepositories.xml
-# .idea/modules.xml
-# .idea/*.iml
-# .idea/modules
-# *.iml
-# *.ipr
-
-# CMake
-cmake-build-*/
-
-# Mongo Explorer plugin
-.idea/**/mongoSettings.xml
-
-# File-based project format
-*.iws
-
-# IntelliJ
-out/
-
-# mpeltonen/sbt-idea plugin
-.idea_modules/
-
-# JIRA plugin
-atlassian-ide-plugin.xml
-
-# Cursive Clojure plugin
-.idea/replstate.xml
-
-# SonarLint plugin
-.idea/sonarlint/
-
-# Crashlytics plugin (for Android Studio and IntelliJ)
-com_crashlytics_export_strings.xml
-crashlytics.properties
-crashlytics-build.properties
-fabric.properties
-
-# Editor-based Rest Client
-.idea/httpRequests
-
-# Android studio 3.1+ serialized cache file
-.idea/caches/build_file_checksums.ser
-
-### Intellij+all Patch ###
-# Ignore everything but code style settings and run configurations
-# that are supposed to be shared within teams.
-
-.idea/*
-
-!.idea/codeStyles
-!.idea/runConfigurations
-
-### Intellij+iml ###
-# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
-# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
-
-# User-specific stuff
-
-# AWS User-specific
-
-# Generated files
-
-# Sensitive or high-churn files
-
-# Gradle
-
-# Gradle and Maven with auto-import
-# When using Gradle or Maven with auto-import, you should exclude module files,
-# since they will be recreated, and may cause churn.  Uncomment if using
-# auto-import.
-# .idea/artifacts
-# .idea/compiler.xml
-# .idea/jarRepositories.xml
-# .idea/modules.xml
-# .idea/*.iml
-# .idea/modules
-# *.iml
-# *.ipr
-
-# CMake
-
-# Mongo Explorer plugin
-
-# File-based project format
-
-# IntelliJ
-
-# mpeltonen/sbt-idea plugin
-
-# JIRA plugin
-
-# Cursive Clojure plugin
-
-# SonarLint plugin
-
-# Crashlytics plugin (for Android Studio and IntelliJ)
-
-# Editor-based Rest Client
-
-# Android studio 3.1+ serialized cache file
-
-### Intellij+iml Patch ###
-# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023
-
-*.iml
-modules.xml
-.idea/misc.xml
-*.ipr
-
-### NetBeans ###
-**/nbproject/private/
-**/nbproject/Makefile-*.mk
-**/nbproject/Package-*.bash
-build/
-nbbuild/
-dist/
-nbdist/
-.nb-gradle/
-
-### Node ###
-# Logs
-logs
-*.log
-npm-debug.log*
-yarn-debug.log*
-yarn-error.log*
-lerna-debug.log*
-.pnpm-debug.log*
-
-# Diagnostic reports (https://nodejs.org/api/report.html)
-report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
-
-# Runtime data
-pids
-*.pid
-*.seed
-*.pid.lock
-
-# Directory for instrumented libs generated by jscoverage/JSCover
-lib-cov
-
-# Coverage directory used by tools like istanbul
-coverage
-*.lcov
-
-# nyc test coverage
-.nyc_output
-
-# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
-.grunt
-
-# Bower dependency directory (https://bower.io/)
-bower_components
-
-# node-waf configuration
-.lock-wscript
-
-# Compiled binary addons (https://nodejs.org/api/addons.html)
-build/Release
-
-# Dependency directories
-node_modules/
-jspm_packages/
-
-# Snowpack dependency directory (https://snowpack.dev/)
-web_modules/
-
-# TypeScript cache
-*.tsbuildinfo
-
-# Optional npm cache directory
-.npm
-
-# Optional eslint cache
-.eslintcache
-
-# Optional stylelint cache
-.stylelintcache
-
-# Microbundle cache
-.rpt2_cache/
-.rts2_cache_cjs/
-.rts2_cache_es/
-.rts2_cache_umd/
-
-# Optional REPL history
-.node_repl_history
-
-# Output of 'npm pack'
-*.tgz
-
-# Yarn Integrity file
-.yarn-integrity
-
-# dotenv environment variable files
-.env
-.env.development.local
-.env.test.local
-.env.production.local
-.env.local
-
-# parcel-bundler cache (https://parceljs.org/)
-.cache
-.parcel-cache
-
-# Next.js build output
-.next
-out
-
-# Nuxt.js build / generate output
-.nuxt
-dist
-
-# Gatsby files
-.cache/
-# Comment in the public line in if your project uses Gatsby and not Next.js
-# https://nextjs.org/blog/next-9-1#public-directory-support
-# public
-
-# vuepress build output
-.vuepress/dist
-
-# vuepress v2.x temp and cache directory
-.temp
-
-# Docusaurus cache and generated files
-.docusaurus
-
-# Serverless directories
-.serverless/
-
-# FuseBox cache
-.fusebox/
-
-# DynamoDB Local files
-.dynamodb/
-
-# TernJS port file
-.tern-port
-
-# Stores VSCode versions used for testing VSCode extensions
-.vscode-test
-
-# yarn v2
-.yarn/cache
-.yarn/unplugged
-.yarn/build-state.yml
-.yarn/install-state.gz
-.pnp.*
-
-### Node Patch ###
-# Serverless Webpack directories
-.webpack/
-
-# Optional stylelint cache
-
-# SvelteKit build / generate output
-.svelte-kit
-
-### PhpStorm ###
-# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
-# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
-
-# User-specific stuff
-
-# AWS User-specific
-
-# Generated files
-
-# Sensitive or high-churn files
-
-# Gradle
-
-# Gradle and Maven with auto-import
-# When using Gradle or Maven with auto-import, you should exclude module files,
-# since they will be recreated, and may cause churn.  Uncomment if using
-# auto-import.
-# .idea/artifacts
-# .idea/compiler.xml
-# .idea/jarRepositories.xml
-# .idea/modules.xml
-# .idea/*.iml
-# .idea/modules
-# *.iml
-# *.ipr
-
-# CMake
-
-# Mongo Explorer plugin
-
-# File-based project format
-
-# IntelliJ
-
-# mpeltonen/sbt-idea plugin
-
-# JIRA plugin
-
-# Cursive Clojure plugin
-
-# SonarLint plugin
-
-# Crashlytics plugin (for Android Studio and IntelliJ)
-
-# Editor-based Rest Client
-
-# Android studio 3.1+ serialized cache file
-
-### PhpStorm Patch ###
-# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
-
-# *.iml
-# modules.xml
-# .idea/misc.xml
-# *.ipr
-
-# Sonarlint plugin
-# https://plugins.jetbrains.com/plugin/7973-sonarlint
-.idea/**/sonarlint/
-
-# SonarQube Plugin
-# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin
-.idea/**/sonarIssues.xml
-
-# Markdown Navigator plugin
-# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced
-.idea/**/markdown-navigator.xml
-.idea/**/markdown-navigator-enh.xml
-.idea/**/markdown-navigator/
-
-# Cache file creation bug
-# See https://youtrack.jetbrains.com/issue/JBR-2257
-.idea/ACHE_FILE
-# CodeStream plugin
-# https://plugins.jetbrains.com/plugin/12206-codestream
-.idea/codestream.xml
-
-# Azure Toolkit for IntelliJ plugin
-# https://plugins.jetbrains.com/plugin/8053-azure-toolkit-for-intellij
-.idea/**/azureSettings.xml
-
-### PHPUnit ###
-# Covers PHPUnit
-# Reference: https://phpunit.de/
-
-# Generated files
-.phpunit.result.cache
-.phpunit.cache
-
-# PHPUnit
-/app/phpunit.xml
-/phpunit.xml
-
-# Build data
-/build/
-
-### Vim ###
-# Swap
-[._]*.s[a-v][a-z]
-!*.svg  # comment out if you don't need vector files
-[._]*.sw[a-p]
-[._]s[a-rt-v][a-z]
-[._]ss[a-gi-z]
-[._]sw[a-p]
-
-# Session
-Session.vim
-Sessionx.vim
-
-# Temporary
-.netrwhist
-*~
-# Auto-generated tag files
-tags
-# Persistent undo
-[._]*.un~
-
-### VisualStudioCode ###
-.vscode/*
-!.vscode/settings.json
-!.vscode/tasks.json
-!.vscode/launch.json
-!.vscode/extensions.json
-!.vscode/*.code-snippets
-
-# Local History for Visual Studio Code
-.history/
-
-# Built Visual Studio Code Extensions
-*.vsix
-
-### VisualStudioCode Patch ###
-# Ignore all local history of files
-.history
-.ionide
-
-# Support for Project snippet scope
-.vscode/*.code-snippets
-
-# Ignore code-workspaces
-*.code-workspace
-
-### yarn ###
-# https://yarnpkg.com/getting-started/qa#which-files-should-be-gitignored
-
-.yarn/*
-!.yarn/releases
-!.yarn/patches
-!.yarn/plugins
-!.yarn/sdks
-!.yarn/versions
-
-# if you are NOT using Zero-installs, then:
-# comment the following lines
-!.yarn/cache
-
-# and uncomment the following lines
-# .pnp.*
-
-
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/CHANGELOG.md b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/CHANGELOG.md
deleted file mode 100644
index d4099540d7a5bbcf07ed509fcaf1ee0eb6b9e34e..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/CHANGELOG.md
+++ /dev/null
@@ -1,8 +0,0 @@
-
-<a name="v0.2.0"></a>
-## v0.2.0 - 2022-12-18
-### Add Features
-- implementation of basic features
-- first implementation
-
-
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/LICENSE b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/LICENSE
deleted file mode 100644
index 89c200ee3121a668aa2f1ed591d36da44a542000..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/LICENSE
+++ /dev/null
@@ -1,7 +0,0 @@
-LICENSE
-
-* All content residing under the "documentation/" directory of this repository is licensed under "Creative Commons: CC BY-SA 4.0 license".
-* All content that resides under the "application/" directory of this repository, if that directory exists, is licensed under the license defined in "application/LICENSE".
-* All third-party components that are integrated into our software are licensed under the original licence provided by the owner of the respective component.
-* Content outside the above directories or restrictions is available under the "AGPL 3.0" licence as defined here: https://www.gnu.org/licenses/agpl-3.0.en.html
-
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/Makefile b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/Makefile
deleted file mode 100644
index c1997338fb9cc711a87cf2f9aaa99142f5507b3c..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/Makefile
+++ /dev/null
@@ -1,102 +0,0 @@
-#############################################################################################
-#############################################################################################
-##
-## PROJECT-DEFINITIONS
-##
-#############################################################################################
-#############################################################################################
-
-COMPONENT_NAME        := data
-
-#############################################################################################
-#############################################################################################
-##
-## MORE GENERAL BLOCK WITH STANDARD DEFINITIONS
-##
-#############################################################################################
-#############################################################################################
-
-# get Makefile directory name: http://stackoverflow.com/a/5982798/376773
-THIS_MAKEFILE_PATH:=$(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST))
-PROJECT_ROOT:=$(shell cd $(dir $(THIS_MAKEFILE_PATH));pwd)/
-THIS_MAKEFILE:=$(PROJECT_ROOT)$(THIS_MAKEFILE_PATH)
-
-# include project.mk only if it exists
--include $(PROJECT_ROOT)project.mk
-
-
-# Define the location of Makefiles
-MAKEFILE_IMPORT_PATH?=$(PROJECT_ROOT)makefiles/
-
-# include project.mk only if it exists
--include $(MAKEFILE_IMPORT_PATH)project.mk
-
-#############################################################################################
-#############################################################################################
-##
-## INCLUSION OF VARIOUS STANDARD RULES
-##
-#############################################################################################
-#############################################################################################
-
-#include $(MAKEFILE_IMPORT_PATH)directories-standard.mk
-include $(MAKEFILE_IMPORT_PATH)directories-go-lib.mk
-#include $(MAKEFILE_IMPORT_PATH)directories-go-utilities.mk
-#include $(MAKEFILE_IMPORT_PATH)directories-platform-part.mk
-#include $(MAKEFILE_IMPORT_PATH)jsdoc.mk
-include $(MAKEFILE_IMPORT_PATH)output.mk
-include $(MAKEFILE_IMPORT_PATH)placeholder.mk
-#include $(MAKEFILE_IMPORT_PATH)conan.mk
-#include $(MAKEFILE_IMPORT_PATH)s3.mk
-#include $(MAKEFILE_IMPORT_PATH)readme-standard.mk
-#include $(MAKEFILE_IMPORT_PATH)readme-webcomponents.mk
-#include $(MAKEFILE_IMPORT_PATH)readme-go-utilities.mk
-include $(MAKEFILE_IMPORT_PATH)readme-go-lib.mk
-#include $(MAKEFILE_IMPORT_PATH)readme-platform-part.mk
-include $(MAKEFILE_IMPORT_PATH)licenses.mk
-include $(MAKEFILE_IMPORT_PATH)license-agpl3.mk
-#include $(MAKEFILE_IMPORT_PATH)license-unlicensed.mk
-#include $(MAKEFILE_IMPORT_PATH)license-all-rights-reserved.mk
-#include $(MAKEFILE_IMPORT_PATH)jsdoc-json.mk
-include $(MAKEFILE_IMPORT_PATH)go.mk
-include $(MAKEFILE_IMPORT_PATH)changelog.mk
-#include $(MAKEFILE_IMPORT_PATH)docman.mk
-#include $(MAKEFILE_IMPORT_PATH)reqman.mk
-include $(MAKEFILE_IMPORT_PATH)git.mk
-include $(MAKEFILE_IMPORT_PATH)gitignore.mk
-include $(MAKEFILE_IMPORT_PATH)color.mk
-include $(MAKEFILE_IMPORT_PATH)version.mk
-#include $(MAKEFILE_IMPORT_PATH)docker.mk
-#include $(MAKEFILE_IMPORT_PATH)node.mk
-include $(MAKEFILE_IMPORT_PATH)terminal.mk
-include $(MAKEFILE_IMPORT_PATH)target-go-fetch-licenses.mk
-include $(MAKEFILE_IMPORT_PATH)target-add-licenses.mk
-include $(MAKEFILE_IMPORT_PATH)target-deploy-tool.mk
-#include $(MAKEFILE_IMPORT_PATH)target-jsdoc-build.mk
-#include $(MAKEFILE_IMPORT_PATH)target-docman.mk
-#include $(MAKEFILE_IMPORT_PATH)target-caddy.mk
-#include $(MAKEFILE_IMPORT_PATH)target-conan.mk
-#include $(MAKEFILE_IMPORT_PATH)target-phpunit.mk
-#include $(MAKEFILE_IMPORT_PATH)target-jekyll.mk
-include $(MAKEFILE_IMPORT_PATH)target-update-makefiles.mk
-include $(MAKEFILE_IMPORT_PATH)target-help.mk
-include $(MAKEFILE_IMPORT_PATH)target-go-build.mk
-#include $(MAKEFILE_IMPORT_PATH)target-node-build.mk
-#include $(MAKEFILE_IMPORT_PATH)target-node-test.mk
-#include $(MAKEFILE_IMPORT_PATH)target-npm-publish.mk
-#include $(MAKEFILE_IMPORT_PATH)target-npm.mk
-include $(MAKEFILE_IMPORT_PATH)target-git.mk
-#include $(MAKEFILE_IMPORT_PATH)target-init-standard.mk
-#include $(MAKEFILE_IMPORT_PATH)target-init-webcomponent.mk
-#include $(MAKEFILE_IMPORT_PATH)target-init-go-utilities.mk
-#nclude $(MAKEFILE_IMPORT_PATH)target-init-go-lib.mk
-#include $(MAKEFILE_IMPORT_PATH)target-init-platform-part.mk
-include $(MAKEFILE_IMPORT_PATH)target-version.mk
-include $(MAKEFILE_IMPORT_PATH)target-variable.mk	
-include $(MAKEFILE_IMPORT_PATH)terminal-check.mk
-
-
-#############################################################################################
-
-# include target-project.mk only if it exists
--include $(MAKEFILE_IMPORT_PATH)target-project.mk
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/README.md b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/README.md
deleted file mode 100644
index 9c7b70d22cd91e277da002507e489cb5035c6dff..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/README.md
+++ /dev/null
@@ -1,68 +0,0 @@
-# Data
-
-Data is ...
-
-## Documentation
-
-To check out docs and examples, visit ....
-
-## Installation
-
-```shell
-go get gitlab.schukai.com/oss/libraries/go/utilities/data.git 
-```
-
-**Note:** This library uses [Go Modules](https://github.com/golang/go/wiki/Modules) to manage dependencies.
-
-## Usage
-
-## Contributing
-
-Merge requests are welcome. For major changes, please open an issue first to discuss what
-you would like to change. **Please make sure to update tests as appropriate.**
-
-Versioning is done with [SemVer](https://semver.org/).
-Changelog is generated with [git-chglog](https://github.com/git-chglog/git-chglog#git-chglog)
-
-Commit messages should follow the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) specification.
-Messages are started with a type, which is one of the following:
-
-- **feat**: A new feature
-- **fix**: A bug fix
-- **doc**: Documentation only changes
-- **refactor**: A code change that neither fixes a bug nor adds a feature
-- **perf**: A code change that improves performance
-- **test**: Adding missing or correcting existing tests
-- **chore**: Other changes that don't modify src or test files
-
-The footer would be used for a reference to an issue or a breaking change.
-
-A commit that has a footer `BREAKING CHANGE:`, or appends a ! after the type/scope,
-introduces a breaking API change (correlating with MAJOR in semantic versioning).
-A BREAKING CHANGE can be part of commits of any type.
-
-the following is an example of a commit message:
-
-```text
-feat: add 'extras' field
-```
-
-## Questions
-
-For questions and commercial support, please contact [schukai GmbH](https://www.schukai.com/).
-The issue list of this repo is exclusively for bug reports and feature requests.
-
-## Issues
-
-Please make sure to read the Issue Reporting Checklist before opening an
-issue. Issues not conforming to the guidelines may be closed immediately.
-
-## License
-
-© schukai GmbH, Released under the AGPL 3.0 License.
-
-[AGPL](https://www.gnu.org/licenses/agpl-3.0.de.html)
-
-You can also purchase a commercial license.
-
-
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/mem.prof b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/mem.prof
deleted file mode 100644
index 38e295555833f2d752cbaccc33aaa5ca269c9365..0000000000000000000000000000000000000000
Binary files a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/mem.prof and /dev/null differ
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/parser.go b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/parser.go
deleted file mode 100644
index d34cd64bcffc6b202e4f6d9d8852373cd5c4f64c..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/parser.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package data
-
-import "github.com/volker-schukai/tokenizer"
-
-const (
-	PipeSymbol = iota + 1 // |
-	PipeCmdDelimiter
-	PipeQuote
-	PipeReflectionIsNil
-
-	PipeCmdStatic
-	PipeCmdIndex
-	PipeCmdPath
-	PipeCmdToUpper
-	PipeCmdToLower
-	PipeCmdEmpty
-	PipeCmdEquals
-	PipeCmdTrim
-	PipeCmdRawUrlEncode
-	PipeCmdNot
-	PipeCmdToInteger
-	PipeCmdToString
-	PipeCmdToFloat
-	PipeCmdToBoolean
-	PipeCmdToNumber
-	PipeCmdToJSON
-	PipeCmdFromJSON
-	PipeCmdUCFirst
-	PipeCmdUCWords
-	PipeCmdLength
-	PipeCmdBase64Encode
-	PipeCmdBase64Decode
-	PipeCmdHTMLSpecialChars
-	PipeCmdHTMLEntityEncode
-	PipeCmdHTMLEntityDecode
-	PipeCmdPlaintext
-	PipeCmdMD5
-	PipeCmdSHA1
-	PipeCmdSHA256
-	PipeCmdSplit
-	PipeCmdReplace
-	PipeCmdJoin
-	PipeCmdNop
-	PipeCmdStringPad
-	PipeCmdStringRepeat
-	PipeCmdReverse
-	PipeCmdSubstring
-	PipeCmdWordwrap
-	PipeCmdPrefix
-	PipeCmdSuffix
-	PipeCmdUrlEncode
-	PipeCmdUrlDecode
-	PipeCmdChar
-	PipeCmdFloor
-	PipeCmdCeil
-	PipeCmdRound
-	PipeCmdAdd
-	PipeCmdSubtract
-	PipeCmdMultiply
-	PipeCmdDivide
-	PipeCmdModulo
-)
-
-func initTokenizer() *tokenizer.Tokenizer {
-	parser := tokenizer.New()
-	parser.AllowKeywordUnderscore().AllowNumbersInKeyword()
-	parser.DefineTokens(PipeSymbol, []string{"|"})
-	parser.DefineTokens(PipeCmdDelimiter, []string{":"})
-
-	parser.DefineTokens(PipeCmdStatic, []string{"static"})
-	parser.DefineTokens(PipeCmdIndex, []string{"index", "dataset"})
-	parser.DefineTokens(PipeCmdPath, []string{"path"})
-
-	parser.DefineTokens(PipeReflectionIsNil, []string{"isnull", "isnil"})
-	parser.DefineTokens(PipeCmdNop, []string{"nop"})
-
-	parser.DefineTokens(PipeCmdEquals, []string{"eq", "equals", "equal", "?"}) // tested
-	parser.DefineTokens(PipeCmdEmpty, []string{"empty"})                       // tested
-	parser.DefineTokens(PipeCmdNot, []string{"not", "!"})                      // tested
-
-	parser.DefineTokens(PipeCmdToInteger, []string{"tointeger", "int", "toint"}) // tested
-	parser.DefineTokens(PipeCmdToString, []string{"tostring", "str"})
-	parser.DefineTokens(PipeCmdToFloat, []string{"tofloat", "float"})
-	parser.DefineTokens(PipeCmdToBoolean, []string{"toboolean", "tobool", "bool"})
-	parser.DefineTokens(PipeCmdToNumber, []string{"tonumber", "number"})
-
-	parser.DefineTokens(PipeCmdToJSON, []string{"tojson", "json", "encodejson", "jsonencode"})
-	parser.DefineTokens(PipeCmdFromJSON, []string{"fromjson", "decodejson", "jsondecode"})
-
-	parser.DefineTokens(PipeCmdToUpper, []string{"toupper", "upper", "strtoupper"}) // tested
-	parser.DefineTokens(PipeCmdToLower, []string{"tolower", "lower", "strtolower"}) // tested
-
-	parser.DefineTokens(PipeCmdChar, []string{"char", "chr"})
-	parser.DefineTokens(PipeCmdFloor, []string{"floor", "rounddown"})
-	parser.DefineTokens(PipeCmdCeil, []string{"ceil", "roundup"})
-	parser.DefineTokens(PipeCmdRound, []string{"round"})
-	parser.DefineTokens(PipeCmdAdd, []string{"add", "+"})
-	parser.DefineTokens(PipeCmdSubtract, []string{"subtract", "sub", "-"})
-	parser.DefineTokens(PipeCmdMultiply, []string{"multiply", "mul", "*"})
-	parser.DefineTokens(PipeCmdDivide, []string{"divide", "div", "/"})
-	parser.DefineTokens(PipeCmdModulo, []string{"modulo", "mod", "%"})
-
-	parser.DefineTokens(PipeCmdUrlEncode, []string{"urlencoded", "urlencode", "url_encode"})
-	parser.DefineTokens(PipeCmdUrlDecode, []string{"urldecoded", "urldecoded", "url_decode"})
-	parser.DefineTokens(PipeCmdRawUrlEncode, []string{"rawurlencode", "rawurlencoded"})
-	parser.DefineTokens(PipeCmdUCFirst, []string{"ucfirst", "ucfirst"})
-	parser.DefineTokens(PipeCmdUCWords, []string{"ucwords", "ucwords"})
-	parser.DefineTokens(PipeCmdLength, []string{"length", "len"})
-	parser.DefineTokens(PipeCmdBase64Encode, []string{"base64encode", "base64_encode", "base64"})
-	parser.DefineTokens(PipeCmdBase64Decode, []string{"base64decode", "base64_decode"})
-	parser.DefineTokens(PipeCmdHTMLSpecialChars, []string{"htmlspecialchars", "html_special_chars"})
-	parser.DefineTokens(PipeCmdHTMLEntityEncode, []string{"htmlentities", "html_entity_encode"})
-	parser.DefineTokens(PipeCmdHTMLEntityDecode, []string{"html_entity_decode", "html_entity_decode"})
-	parser.DefineTokens(PipeCmdPlaintext, []string{"plaintext", "text", "plain", "strip_tags"})
-	parser.DefineTokens(PipeCmdTrim, []string{"trim"})
-	parser.DefineTokens(PipeCmdMD5, []string{"md5"})
-	parser.DefineTokens(PipeCmdSHA1, []string{"sha1"})
-	parser.DefineTokens(PipeCmdSHA256, []string{"sha256"})
-	parser.DefineTokens(PipeCmdStringPad, []string{"strpad", "stringpad"})
-	parser.DefineTokens(PipeCmdStringRepeat, []string{"strrepeat", "stringrepeat"})
-	parser.DefineTokens(PipeCmdReverse, []string{"reverse", "strrev"})
-	parser.DefineTokens(PipeCmdSubstring, []string{"substr", "substring"})
-	parser.DefineTokens(PipeCmdWordwrap, []string{"wordwrap"})
-	parser.DefineTokens(PipeCmdPrefix, []string{"prefix"})
-	parser.DefineTokens(PipeCmdSuffix, []string{"suffix"})
-	parser.DefineTokens(PipeCmdSplit, []string{"split"})
-	parser.DefineTokens(PipeCmdReplace, []string{"replace"})
-
-	parser.DefineTokens(PipeCmdJoin, []string{"join"})
-
-	parser.DefineStringToken(PipeQuote, `"`, `"`).SetEscapeSymbol(tokenizer.BackSlash)
-	return parser
-}
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/project.mk b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/project.mk
deleted file mode 100644
index 4bbcd7e61d606c7e57e6fa846a075bcacc2bc935..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/project.mk
+++ /dev/null
@@ -1,4 +0,0 @@
-
-## Project directory in which the Makefiles should be located
-MAKEFILE_IMPORT_PATH=$(PROJECT_ROOT)makefiles/
-
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/release.json b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/release.json
deleted file mode 100644
index 5fb95979d2ab2001560e5602fe206728e0ccdf87..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/release.json
+++ /dev/null
@@ -1 +0,0 @@
-{"version":"0.2.0"}
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/transformer.go b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/transformer.go
deleted file mode 100644
index 1d430cab8a5f71699b99d53d71f1bd7e05cdd8c2..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/transformer.go
+++ /dev/null
@@ -1,907 +0,0 @@
-package data
-
-import (
-	"crypto/md5"
-	"crypto/sha1"
-	"crypto/sha256"
-	"encoding/base64"
-	"encoding/hex"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"github.com/volker-schukai/tokenizer"
-	"gitlab.schukai.com/oss/libraries/go/utilities/pathfinder"
-	"html"
-	"math"
-	"net/url"
-	"reflect"
-	"regexp"
-	"strconv"
-	"strings"
-)
-
-type Transformer struct {
-	dataset *map[any]any
-	errors  []error
-	parser  *tokenizer.Tokenizer
-}
-
-//
-//type Number interface {
-//	int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64
-//}
-
-func NewTransformer(dataset *map[any]any) *Transformer {
-	parser := initTokenizer()
-
-	return &Transformer{
-		dataset: dataset,
-		parser:  parser,
-	}
-}
-
-// DefineStringToken defines a token string.
-// For example, a piece of data surrounded by quotes: "string in quotes" or 'string on sigle quotes'.
-// Arguments startToken and endToken defines open and close "quotes".
-//  - t.DefineStringToken("`", "`") - parse string "one `two three`" will be parsed as
-// 			[{key: TokenKeyword, value: "one"}, {key: TokenString, value: "`two three`"}]
-//  - t.DefineStringToken("//", "\n") - parse string "parse // like comment\n" will be parsed as
-//			[{key: TokenKeyword, value: "parse"}, {key: TokenString, value: "// like comment"}]
-//func (t *Tokenizer) DefineStringToken(key TokenKey, startToken, endToken string) *StringSettings {
-
-type TokenList []*tokenizer.Token
-
-func (t *Transformer) Transform(pipe string) (interface{}, error) {
-	return t.tokenize(pipe)
-}
-
-func (t *Transformer) Dataset() *map[any]any {
-	return t.dataset
-}
-
-func (t *Transformer) tokenize(pipe string) (any, error) {
-
-	// create tokens stream
-	stream := t.parser.ParseString(pipe)
-	defer stream.Close()
-
-	tokenMap := make([]TokenList, 0)
-	currentList := make(TokenList, 0)
-
-	// iterate over each token
-	for stream.IsValid() {
-		token := stream.CurrentToken()
-		if token == nil {
-			break
-		}
-		stream.GoNext()
-
-		if token.Is(PipeSymbol) {
-			tokenMap = append(tokenMap, currentList)
-			currentList = make(TokenList, 0)
-			continue
-		}
-
-		if token.Is(PipeCmdDelimiter) {
-			continue
-		}
-
-		currentList = append(currentList, token)
-	}
-
-	if len(currentList) > 0 {
-		tokenMap = append(tokenMap, currentList)
-	}
-
-	var currentValue any
-	//datasetAvailable := true
-
-	currentValue = t.dataset
-	//if reflect.ValueOf(currentValue).IsNil() {
-	//	datasetAvailable = false
-	//}
-
-	var err error
-	var ok bool
-
-	for _, tokens := range tokenMap {
-		if len(tokens) == 0 {
-			continue
-		}
-
-		if tokens[0].Is(PipeReflectionIsNil) {
-			currentValue = currentValue == nil
-			continue
-		}
-
-		if tokens[0].Is(PipeCmdStatic) {
-			if currentValue, err = handleStaticCommand(tokens); err != nil {
-				return nil, err
-			}
-			continue
-		}
-
-		if tokens[0].Is(PipeCmdPath) {
-
-			if len(tokens) > 1 {
-
-				parts := []string{}
-				for _, token := range tokens[1:] {
-					parts = append(parts, token.ValueUnescapedString())
-				}
-
-				path := strings.Join(parts, "")
-				currentValue, err = pathfinder.GetValue[any](currentValue, path)
-				if err != nil {
-					return nil, err
-				}
-			} else {
-				return nil, errors.New("invalid path command")
-			}
-
-			continue
-
-		} else if tokens[0].Is(PipeCmdIndex) {
-			var index string
-			if index, err = handleIndexCommand(tokens); err != nil {
-				return nil, err
-			}
-
-			switch currentValue.(type) {
-
-			case *map[any]any:
-
-				if reflect.ValueOf(currentValue).IsNil() {
-					return nil, errors.New("index command on nil map")
-				}
-
-				currentValue, ok = (*currentValue.(*map[any]any))[index]
-				if !ok {
-					return nil, errors.New("index " + index + " not found")
-				}
-
-			case map[any]any:
-				currentValue, ok = currentValue.(map[any]any)[index]
-				if !ok {
-					t.errors = append(t.errors, errors.New("index not found: "+index))
-				}
-			case *map[string]any:
-
-				if reflect.ValueOf(currentValue).IsNil() {
-					return nil, errors.New("index command on nil map")
-				}
-
-				currentValue, ok = (*currentValue.(*map[string]any))[index]
-				if !ok {
-					t.errors = append(t.errors, errors.New("index not found: "+index))
-				}
-
-			case map[string]any:
-				currentValue, ok = currentValue.(map[string]any)[index]
-				if !ok {
-					t.errors = append(t.errors, errors.New("index not found: "+index))
-				}
-
-			case *[]string:
-				indexInt, err := strconv.Atoi(index)
-				if err != nil {
-					return nil, errors.New("index must be an integer")
-				}
-				currentValue = (*currentValue.(*[]string))[indexInt]
-
-			case []string:
-				indexInt, err := strconv.Atoi(index)
-				if err != nil {
-					return nil, err
-				}
-				currentValue = currentValue.([]string)[indexInt]
-			case *[]any:
-				indexInt, err := strconv.Atoi(index)
-				if err != nil {
-					return nil, err
-				}
-				currentValue = (*currentValue.(*[]any))[indexInt]
-
-			case []any:
-				indexInt, err := strconv.Atoi(index)
-				if err != nil {
-					return nil, err
-				}
-				currentValue = currentValue.([]any)[indexInt]
-				if currentValue == nil {
-					t.errors = append(t.errors, errors.New("index not found: "+index))
-				}
-
-			default:
-				//var value generic[currentValue]
-				//
-				//genericValue := reflect.ValueOf(currentValue)
-				vxx, err := pathfinder.GetValue[any](currentValue, index)
-				fmt.Println(vxx, err)
-
-				//case struct{}:
-				//	return nil, errors.New("index command on struct")
-				//
-				//default:
-				//	return nil, errors.New("unsupported type " + reflect.TypeOf(currentValue).String())
-			}
-
-			continue
-		}
-
-		switch currentValue.(type) {
-		case string:
-			if currentValue, err = handleStrings(tokens, currentValue.(string)); err != nil {
-				return nil, err
-			}
-			continue
-
-		case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
-			number := reflect.ValueOf(currentValue)
-			if currentValue, err = handleInteger(tokens, number.Int()); err != nil {
-				return nil, err
-			}
-			continue
-
-		case float32, float64:
-			number := reflect.ValueOf(currentValue)
-			if currentValue, err = handleFloat(tokens, number.Float()); err != nil {
-				return nil, err
-			}
-
-			continue
-
-		case bool:
-			if currentValue, err = handleBoolean(tokens, currentValue.(bool)); err != nil {
-				return nil, err
-			}
-
-			continue
-
-		case *map[any]any:
-			v, ok := currentValue.(*map[any]any)
-			if !ok {
-				return nil, errors.New("invalid map")
-			}
-			if currentValue, err = handleMap(tokens, v); err != nil {
-				return nil, err
-			}
-
-		case map[any]any, map[string]any:
-			v, ok := currentValue.(map[any]any)
-			if !ok {
-				m := make(map[any]any)
-				for k, vv := range currentValue.(map[string]any) {
-					m[k] = vv
-				}
-				v = m
-			}
-
-			if currentValue, err = handleMap(tokens, &v); err != nil {
-				return nil, err
-			}
-			continue
-
-		case []any:
-			if currentValue, err = handleArray(tokens, currentValue.([]any)); err != nil {
-				return nil, err
-			}
-			continue
-
-		case nil:
-			if currentValue, err = handleNil(tokens); err != nil {
-				return nil, err
-			}
-
-			continue
-
-		default:
-			return nil, errors.New("the type " + reflect.TypeOf(currentValue).String() + " is not supported")
-		}
-
-		return nil, errors.New("unknown command " + tokens[0].ValueUnescapedString())
-
-	}
-
-	return currentValue, nil
-}
-
-func handleIndexCommand(tokens TokenList) (string, error) {
-
-	if len(tokens) == 2 {
-		return tokens[1].ValueUnescapedString(), nil
-	}
-
-	return "", errors.New("invalid index command")
-
-}
-
-func handleArray(tokens TokenList, array []any) (any, error) {
-
-	return nil, errors.New(tokens[0].ValueString() + " is not a valid command for string")
-
-}
-func handleFloat(tokens TokenList, currentValue float64) (any, error) {
-
-	if tokens[0].Is(PipeCmdRound) {
-		factor := 1.0
-
-		if len(tokens) < 2 {
-			factor = 1.0
-		}
-
-		if len(tokens) == 2 {
-			s, err := strconv.ParseFloat(tokens[1].ValueUnescapedString(), 64)
-			if err != nil {
-				return nil, err
-			}
-
-			factor = math.Pow10(int(s))
-		}
-
-		return math.Round(currentValue*factor) / factor, nil
-
-	} else if tokens[0].Is(PipeCmdEquals) {
-		if len(tokens) < 2 {
-			return nil, errors.New("missing value for equals command")
-		}
-
-		if currentValue == tokens[1].ValueFloat() {
-			if len(tokens) > 2 {
-				return tokens[2].ValueUnescapedString(), nil
-			}
-			return true, nil
-		}
-
-		if len(tokens) > 3 {
-			return tokens[3].ValueUnescapedString(), nil
-		}
-		return false, nil
-
-	} else if tokens[0].Is(PipeCmdToInteger) {
-		return int(currentValue), nil
-	} else if tokens[0].Is(PipeCmdToString) {
-		return strconv.FormatFloat(currentValue, 'f', -1, 64), nil
-	} else if tokens[0].Is(PipeCmdToFloat) {
-		return currentValue, nil
-	} else if tokens[0].Is(PipeCmdToBoolean) {
-		return currentValue != 0, nil
-	} else if tokens[0].Is(PipeCmdToNumber) {
-		return currentValue, nil
-
-	} else if tokens[0].Is(PipeCmdFloor) {
-
-		return math.Floor(currentValue), nil
-
-	} else if tokens[0].Is(PipeCmdCeil) {
-
-		return math.Ceil(currentValue), nil
-
-	} else if tokens[0].Is(PipeCmdToJSON) {
-		jsonBytes, err := json.Marshal(currentValue)
-		if err != nil {
-			return nil, err
-		}
-		return string(jsonBytes), nil
-	} else if tokens[0].Is(PipeCmdAdd) {
-
-		if len(tokens) < 2 {
-			return nil, errors.New("missing argument for add command")
-		}
-
-		return currentValue + tokens[1].ValueFloat(), nil
-	} else if tokens[0].Is(PipeCmdSubtract) {
-
-		if len(tokens) < 2 {
-			return nil, errors.New("missing argument for subtract command")
-		}
-
-		return currentValue - tokens[1].ValueFloat(), nil
-	} else if tokens[0].Is(PipeCmdMultiply) {
-
-		if len(tokens) < 2 {
-			return nil, errors.New("missing argument for multiply command")
-		}
-
-		return currentValue * tokens[1].ValueFloat(), nil
-	} else if tokens[0].Is(PipeCmdDivide) {
-
-		if len(tokens) < 2 {
-			return nil, errors.New("missing argument for divide command")
-		}
-
-		if tokens[1].ValueInt() == 0 {
-			return nil, errors.New("divide by zero")
-		}
-
-		return currentValue / tokens[1].ValueFloat(), nil
-
-	}
-
-	return nil, errors.New(tokens[0].ValueString() + " is not a valid command for value of type float (" + strconv.FormatFloat(currentValue, 'f', -1, 64) + ")")
-
-}
-func handleMap(tokens TokenList, currentValue *map[any]any) (any, error) {
-
-	if tokens[0].Is(PipeCmdToJSON) {
-
-		// convert to string map
-		stringMap := make(map[string]any)
-		for k, v := range *currentValue {
-			key, ok := k.(string)
-			if !ok {
-				return nil, errors.New("invalid key type for json conversion")
-			}
-			stringMap[key] = v
-		}
-
-		jsonBytes, err := json.Marshal(stringMap)
-		if err != nil {
-			return nil, err
-		}
-		return string(jsonBytes), nil
-	}
-
-	return nil, errors.New(tokens[0].ValueString() + " is not a valid command for value of type map")
-
-}
-func handleBoolean(tokens TokenList, currentValue bool) (any, error) {
-
-	if tokens[0].Is(PipeCmdEquals) {
-		if len(tokens) < 2 {
-			return nil, errors.New("missing value for equals command")
-		}
-
-		if currentValue == (tokens[1].ValueUnescapedString() == "true") {
-			if len(tokens) > 2 {
-				return tokens[2].ValueUnescapedString(), nil
-			}
-			return true, nil
-		}
-
-		if len(tokens) > 3 {
-			return tokens[3].ValueUnescapedString(), nil
-		}
-		return false, nil
-
-	} else if tokens[0].Is(PipeCmdNot) {
-		return !currentValue, nil
-
-	} else if tokens[0].Is(PipeCmdToInteger) {
-		if currentValue {
-			return 1, nil
-		}
-		return 0, nil
-	} else if tokens[0].Is(PipeCmdToString) {
-		if currentValue {
-			return "true", nil
-		}
-		return "false", nil
-	} else if tokens[0].Is(PipeCmdToFloat) {
-		if currentValue {
-			return 1.0, nil
-		}
-		return 0.0, nil
-	} else if tokens[0].Is(PipeCmdToBoolean) {
-		return currentValue, nil
-	} else if tokens[0].Is(PipeCmdToNumber) {
-		if currentValue {
-			return 1, nil
-		}
-		return 0, nil
-
-	} else if tokens[0].Is(PipeCmdToJSON) {
-		jsonBytes, err := json.Marshal(currentValue)
-		if err != nil {
-			return nil, err
-		}
-		return string(jsonBytes), nil
-
-	}
-
-	return nil, errors.New(tokens[0].ValueString() + " is not a valid command for value of type boolean (" + strconv.FormatBool(currentValue) + ")")
-
-}
-func handleNil(tokens TokenList) (any, error) {
-
-	if tokens[0].Is(PipeCmdEquals) {
-
-		// nil is always false, because it is not a value
-		if len(tokens) < 2 {
-			return nil, errors.New("missing value for equals command")
-		}
-
-		if len(tokens) > 3 {
-			return tokens[3].ValueUnescapedString(), nil
-		}
-		return false, nil
-
-	} else if tokens[0].Is(PipeCmdToJSON) {
-		return "null", nil
-	}
-
-	return nil, errors.New(tokens[0].ValueString() + " is not a valid command for value of type nil")
-
-}
-
-func handleInteger(tokens TokenList, currentValue int64) (any, error) {
-
-	if tokens[0].Is(PipeCmdChar) {
-		return fmt.Sprintf("%c", currentValue), nil
-	} else if tokens[0].Is(PipeCmdEquals) {
-		if len(tokens) < 2 {
-			return nil, errors.New("missing value for equals command")
-		}
-
-		if currentValue == tokens[1].ValueInt() {
-			if len(tokens) > 2 {
-				return tokens[2].ValueUnescapedString(), nil
-			}
-			return true, nil
-		}
-
-		if len(tokens) > 3 {
-			return tokens[3].ValueUnescapedString(), nil
-		}
-		return false, nil
-
-	} else if tokens[0].Is(PipeCmdToInteger) {
-		return currentValue, nil
-	} else if tokens[0].Is(PipeCmdToString) {
-		return strconv.FormatInt(currentValue, 10), nil
-	} else if tokens[0].Is(PipeCmdToFloat) {
-		return float64(currentValue), nil
-	} else if tokens[0].Is(PipeCmdToBoolean) {
-		return currentValue != 0, nil
-	} else if tokens[0].Is(PipeCmdToNumber) {
-		return float64(currentValue), nil
-	} else if tokens[0].Is(PipeCmdToJSON) {
-		jsonBytes, err := json.Marshal(currentValue)
-		if err != nil {
-			return nil, err
-		}
-		return string(jsonBytes), nil
-	} else if tokens[0].Is(PipeCmdAdd) {
-
-		if len(tokens) < 2 {
-			return nil, errors.New("missing argument for add command")
-		}
-
-		return currentValue + tokens[1].ValueInt(), nil
-	} else if tokens[0].Is(PipeCmdSubtract) {
-
-		if len(tokens) < 2 {
-			return nil, errors.New("missing argument for subtract command")
-		}
-
-		return currentValue - tokens[1].ValueInt(), nil
-	} else if tokens[0].Is(PipeCmdMultiply) {
-
-		if len(tokens) < 2 {
-			return nil, errors.New("missing argument for multiply command")
-		}
-
-		return currentValue * tokens[1].ValueInt(), nil
-	} else if tokens[0].Is(PipeCmdDivide) {
-
-		if len(tokens) < 2 {
-			return nil, errors.New("missing argument for divide command")
-		}
-
-		if tokens[1].ValueInt() == 0 {
-			return nil, errors.New("divide by zero")
-		}
-
-		return currentValue / tokens[1].ValueInt(), nil
-
-	} else if tokens[0].Is(PipeCmdModulo) {
-
-		if len(tokens) < 2 {
-			return nil, errors.New("missing argument for modulo command")
-		}
-
-		return currentValue % tokens[1].ValueInt(), nil
-	}
-
-	return nil, errors.New(tokens[0].ValueString() + " is not a valid command for value of type integer (" + strconv.FormatInt(currentValue, 10) + ")")
-}
-func handleStrings(tokens TokenList, currentValue string) (any, error) {
-
-	if tokens[0].Is(PipeCmdToLower) {
-		return strings.ToLower(currentValue), nil
-	} else if tokens[0].Is(PipeCmdToUpper) {
-		return strings.ToUpper(currentValue), nil
-	} else if tokens[0].Is(PipeCmdEmpty) {
-		if currentValue == "" {
-			if len(tokens) >= 2 {
-				return tokens[1].ValueUnescapedString(), nil
-			}
-			return true, nil
-		}
-
-		if len(tokens) == 3 {
-			return tokens[2].ValueUnescapedString(), nil
-		}
-		return false, nil
-
-	} else if tokens[0].Is(PipeCmdPlaintext) {
-
-		re := regexp.MustCompile(`<(.|\n)*?>`)
-		return re.ReplaceAllString(currentValue, ""), nil
-
-	} else if tokens[0].Is(PipeCmdEquals) {
-		if len(tokens) < 2 {
-			return nil, errors.New("missing argument for equals command")
-		}
-
-		if currentValue == tokens[1].ValueUnescapedString() {
-			if len(tokens) > 2 {
-				return tokens[2].ValueUnescapedString(), nil
-			}
-			return true, nil
-		}
-
-		if len(tokens) > 3 {
-			return tokens[3].ValueUnescapedString(), nil
-		}
-
-		return false, nil
-
-	} else if tokens[0].Is(PipeCmdUCFirst) {
-
-		return strings.ToUpper(currentValue[0:1]) + currentValue[1:], nil
-
-	} else if tokens[0].Is(PipeCmdUCWords) {
-
-		for i, v := range currentValue {
-			if i == 0 || currentValue[i-1] == ' ' {
-				currentValue = currentValue[:i] + strings.ToUpper(string(v)) + currentValue[i+1:]
-			}
-		}
-		return currentValue, nil
-
-	} else if tokens[0].Is(PipeCmdLength) {
-
-		return len(currentValue), nil
-
-	} else if tokens[0].Is(PipeCmdBase64Encode) {
-
-		return base64.StdEncoding.EncodeToString([]byte(currentValue)), nil
-
-	} else if tokens[0].Is(PipeCmdBase64Decode) {
-
-		bytes, err := base64.StdEncoding.DecodeString(currentValue)
-		if err != nil {
-			return nil, err
-		}
-		return string(bytes), nil
-
-	} else if tokens[0].Is(PipeCmdTrim) {
-
-		return strings.TrimSpace(currentValue), nil
-
-	} else if tokens[0].Is(PipeCmdHTMLSpecialChars) {
-
-		return html.EscapeString(currentValue), nil
-
-	} else if tokens[0].Is(PipeCmdHTMLEntityEncode) {
-
-		return html.UnescapeString(currentValue), nil
-
-	} else if tokens[0].Is(PipeCmdHTMLEntityDecode) {
-
-		return html.UnescapeString(currentValue), nil
-
-	} else if tokens[0].Is(PipeCmdRawUrlEncode) {
-
-		return strings.Replace(url.QueryEscape(currentValue), "+", "%20", -1), nil
-
-	} else if tokens[0].Is(PipeCmdUrlEncode) {
-
-		return url.QueryEscape(currentValue), nil
-
-	} else if tokens[0].Is(PipeCmdUrlDecode) {
-
-		return url.QueryUnescape(currentValue)
-
-	} else if tokens[0].Is(PipeCmdMD5) {
-
-		d := md5.Sum([]byte(currentValue))
-		return hex.EncodeToString(d[:]), nil
-
-	} else if tokens[0].Is(PipeCmdSHA1) {
-
-		d := sha1.Sum([]byte(currentValue))
-		return hex.EncodeToString(d[:]), nil
-
-	} else if tokens[0].Is(PipeCmdSHA256) {
-
-		d := sha256.Sum256([]byte(currentValue))
-		return hex.EncodeToString(d[:]), nil
-
-	} else if tokens[0].Is(PipeCmdSplit) {
-
-		if len(tokens) < 2 {
-			return nil, errors.New("missing argument for split command")
-		}
-
-		return strings.Split(currentValue, tokens[1].ValueUnescapedString()), nil
-
-	} else if tokens[0].Is(PipeCmdReplace) {
-
-		if len(tokens) < 3 {
-			return nil, errors.New("missing argument for replace command")
-		}
-
-		return strings.Replace(currentValue, tokens[1].ValueUnescapedString(), tokens[2].ValueUnescapedString(), -1), nil
-
-	} else if tokens[0].Is(PipeCmdNop) {
-
-		return currentValue, nil
-
-	} else if tokens[0].Is(PipeCmdStringPad) {
-
-		if len(tokens) < 2 {
-			return nil, errors.New("missing argument for pad command")
-		}
-
-		padLength := int(tokens[1].ValueInt())
-
-		if padLength < 0 {
-			return nil, errors.New("pad length must be greater than 0")
-		}
-
-		if padLength < len(currentValue) {
-			return currentValue, nil
-		}
-
-		padLength -= len(currentValue)
-
-		padString := " "
-		padType := "right"
-
-		if len(tokens) > 2 {
-			padString = tokens[2].ValueUnescapedString()
-		}
-
-		if len(tokens) > 3 {
-			padType = tokens[3].ValueUnescapedString()
-		}
-
-		if padType == "left" {
-			return strings.Repeat(padString, padLength) + currentValue, nil
-		} else if padType == "both" {
-			leftPad := padLength / 2
-			rightPad := padLength - leftPad
-			return strings.Repeat(padString, leftPad) + currentValue + strings.Repeat(padString, rightPad), nil
-		}
-
-		return currentValue + strings.Repeat(padString, padLength), nil
-
-	} else if tokens[0].Is(PipeCmdStringRepeat) {
-
-		return strings.Repeat(currentValue, int(tokens[1].ValueInt())), nil
-
-	} else if tokens[0].Is(PipeCmdReverse) {
-
-		rns := []rune(currentValue)
-		for i, j := 0, len(rns)-1; i < j; i, j = i+1, j-1 {
-			rns[i], rns[j] = rns[j], rns[i]
-		}
-
-		// return the reversed string.
-		return string(rns), nil
-
-	} else if tokens[0].Is(PipeCmdSubstring) {
-
-		if len(tokens) < 2 {
-			return nil, errors.New("missing argument for substring command")
-		}
-
-		start := int(tokens[1].ValueInt())
-		if start > len(currentValue) {
-			return "", nil
-		}
-
-		if len(tokens) == 2 {
-			return currentValue[start:], nil
-		}
-
-		offset := int(tokens[2].ValueInt())
-		offset += start
-
-		if offset < 0 {
-			offset = len(currentValue) + offset
-		}
-
-		if len(currentValue) < offset {
-			offset = len(currentValue)
-		}
-
-		if offset < start {
-			return "", nil
-		}
-
-		return currentValue[start:offset], nil
-
-	} else if tokens[0].Is(PipeCmdWordwrap) {
-
-		if len(tokens) < 2 {
-			return nil, errors.New("missing arguments for wordwrap command")
-		}
-
-		width := int(tokens[1].ValueInt())
-		breakString := "\n"
-
-		if len(tokens) > 2 {
-			breakString = tokens[2].ValueUnescapedString()
-		}
-
-		return wordWrap(currentValue, uint(width), breakString), nil
-
-	} else if tokens[0].Is(PipeCmdPrefix) {
-
-		if len(tokens) < 2 {
-			return nil, errors.New("missing argument for prefix command")
-		}
-
-		return tokens[1].ValueUnescapedString() + currentValue, nil
-
-	} else if tokens[0].Is(PipeCmdSuffix) {
-
-		if len(tokens) < 2 {
-			return nil, errors.New("missing argument for suffix command")
-		}
-
-		return currentValue + tokens[1].ValueUnescapedString(), nil
-
-	} else if tokens[0].Is(PipeCmdToInteger) {
-		return strconv.ParseInt(currentValue, 10, 64)
-	} else if tokens[0].Is(PipeCmdToString) {
-		return currentValue, nil
-	} else if tokens[0].Is(PipeCmdToFloat) {
-		return strconv.ParseFloat(currentValue, 64)
-	} else if tokens[0].Is(PipeCmdToBoolean) {
-		return strconv.ParseBool(currentValue)
-	} else if tokens[0].Is(PipeCmdToNumber) {
-		return strconv.ParseFloat(currentValue, 64)
-	} else if tokens[0].Is(PipeCmdToJSON) {
-		jsonValue, err := json.Marshal(currentValue)
-		if err != nil {
-			return nil, err
-		}
-		return string(jsonValue), nil
-	} else if tokens[0].Is(PipeCmdFromJSON) {
-		jsonValue := interface{}(nil)
-		err := json.Unmarshal([]byte(currentValue), &jsonValue)
-		if err != nil {
-			return nil, err
-		}
-		return jsonValue, nil
-	}
-
-	return nil, errors.New(tokens[0].ValueString() + " is not a valid command for value of type string (" + currentValue + ")")
-
-}
-
-func handleStaticCommand(tokens TokenList) (any, error) {
-
-	if len(tokens) == 2 {
-		if tokens[1].Is(tokenizer.TokenFloat) {
-			return tokens[1].ValueFloat(), nil
-		} else if tokens[1].Is(tokenizer.TokenInteger) {
-			return tokens[1].ValueInt(), nil
-		} else if tokens[1].Is(tokenizer.TokenString) {
-			return tokens[1].ValueUnescapedString(), nil
-		}
-	}
-
-	value := ""
-
-	for _, token := range tokens[1:] {
-		value += token.ValueUnescapedString()
-	}
-
-	return value, nil
-
-}
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/util.go b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/util.go
deleted file mode 100644
index cbcf9508a408636168780c8fa0aa7f0ad73bc846..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/data.git/util.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package data
-
-import (
-	"bytes"
-	"unicode"
-)
-
-func inArray(needle any, hystack any) bool {
-	switch key := needle.(type) {
-	case string:
-		for _, item := range hystack.([]string) {
-			if key == item {
-				return true
-			}
-		}
-	case int:
-		for _, item := range hystack.([]int) {
-			if key == item {
-				return true
-			}
-		}
-	case int64:
-		for _, item := range hystack.([]int64) {
-			if key == item {
-				return true
-			}
-		}
-	default:
-		return false
-	}
-
-	return false
-}
-
-func wordWrap(str string, width uint, br string) string {
-
-	init := make([]byte, 0, len(str))
-	buf := bytes.NewBuffer(init)
-	var current uint
-	var wordbuf, spacebuf bytes.Buffer
-	for _, char := range str {
-		if char == '\n' {
-			if wordbuf.Len() == 0 {
-				if current+uint(spacebuf.Len()) > width {
-					current = 0
-				} else {
-					current += uint(spacebuf.Len())
-					spacebuf.WriteTo(buf)
-				}
-				spacebuf.Reset()
-			} else {
-				current += uint(spacebuf.Len() + wordbuf.Len())
-				spacebuf.WriteTo(buf)
-				spacebuf.Reset()
-				wordbuf.WriteTo(buf)
-				wordbuf.Reset()
-			}
-			buf.WriteRune(char)
-			current = 0
-		} else if unicode.IsSpace(char) {
-			if spacebuf.Len() == 0 || wordbuf.Len() > 0 {
-				current += uint(spacebuf.Len() + wordbuf.Len())
-				spacebuf.WriteTo(buf)
-				spacebuf.Reset()
-				wordbuf.WriteTo(buf)
-				wordbuf.Reset()
-			}
-			spacebuf.WriteRune(char)
-		} else {
-			wordbuf.WriteRune(char)
-			if current+uint(spacebuf.Len()+wordbuf.Len()) > width && uint(wordbuf.Len()) < width {
-				buf.WriteString(br)
-				current = 0
-				spacebuf.Reset()
-			}
-		}
-	}
-
-	if wordbuf.Len() == 0 {
-		if current+uint(spacebuf.Len()) <= width {
-			spacebuf.WriteTo(buf)
-		}
-	} else {
-		spacebuf.WriteTo(buf)
-		wordbuf.WriteTo(buf)
-	}
-	return buf.String()
-}
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/.gitignore b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/.gitignore
deleted file mode 100644
index c55f7d029ba6d56ff0080797e6bfd7a4f7b7867b..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/.gitignore
+++ /dev/null
@@ -1,149 +0,0 @@
-# Created by https://www.toptal.com/developers/gitignore/api/intellij,go
-# Edit at https://www.toptal.com/developers/gitignore?templates=intellij,go
-
-### Go ###
-# If you prefer the allow list template instead of the deny list, see community template:
-# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
-#
-# Binaries for programs and plugins
-*.exe
-*.exe~
-*.dll
-*.so
-*.dylib
-
-# Test binary, built with `go test -c`
-*.test
-
-# Output of the go coverage tool, specifically when used with LiteIDE
-*.out
-
-testdata/
-
-# Dependency directories (remove the comment below to include it)
-# vendor/
-
-# Go workspace file
-go.work
-
-# Go Fuzz build
-testdata/
-
-### Go Patch ###
-/vendor/
-/Godeps/
-
-### Intellij ###
-# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
-# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
-
-# User-specific stuff
-.idea/**/workspace.xml
-.idea/**/tasks.xml
-.idea/**/usage.statistics.xml
-.idea/**/dictionaries
-.idea/**/shelf
-
-# AWS User-specific
-.idea/**/aws.xml
-
-# Generated files
-.idea/**/contentModel.xml
-
-# Sensitive or high-churn files
-.idea/**/dataSources/
-.idea/**/dataSources.ids
-.idea/**/dataSources.local.xml
-.idea/**/sqlDataSources.xml
-.idea/**/dynamic.xml
-.idea/**/uiDesigner.xml
-.idea/**/dbnavigator.xml
-
-# Gradle
-.idea/**/gradle.xml
-.idea/**/libraries
-
-# Gradle and Maven with auto-import
-# When using Gradle or Maven with auto-import, you should exclude module files,
-# since they will be recreated, and may cause churn.  Uncomment if using
-# auto-import.
-# .idea/artifacts
-# .idea/compiler.xml
-# .idea/jarRepositories.xml
-# .idea/modules.xml
-# .idea/*.iml
-# .idea/modules
-# *.iml
-# *.ipr
-
-# CMake
-cmake-build-*/
-
-# Mongo Explorer plugin
-.idea/**/mongoSettings.xml
-
-# File-based project format
-*.iws
-
-# IntelliJ
-out/
-
-# mpeltonen/sbt-idea plugin
-.idea_modules/
-
-# JIRA plugin
-atlassian-ide-plugin.xml
-
-# Cursive Clojure plugin
-.idea/replstate.xml
-
-# SonarLint plugin
-.idea/sonarlint/
-
-# Crashlytics plugin (for Android Studio and IntelliJ)
-com_crashlytics_export_strings.xml
-crashlytics.properties
-crashlytics-build.properties
-fabric.properties
-
-# Editor-based Rest Client
-.idea/httpRequests
-
-# Android studio 3.1+ serialized cache file
-.idea/caches/build_file_checksums.ser
-
-### Intellij Patch ###
-# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
-
-# *.iml
-# modules.xml
-# .idea/misc.xml
-# *.ipr
-
-# Sonarlint plugin
-# https://plugins.jetbrains.com/plugin/7973-sonarlint
-.idea/**/sonarlint/
-
-# SonarQube Plugin
-# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin
-.idea/**/sonarIssues.xml
-
-# Markdown Navigator plugin
-# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced
-.idea/**/markdown-navigator.xml
-.idea/**/markdown-navigator-enh.xml
-.idea/**/markdown-navigator/
-
-# Cache file creation bug
-# See https://youtrack.jetbrains.com/issue/JBR-2257
-.idea/$CACHE_FILE$
-
-# CodeStream plugin
-# https://plugins.jetbrains.com/plugin/12206-codestream
-.idea/codestream.xml
-
-# Azure Toolkit for IntelliJ plugin
-# https://plugins.jetbrains.com/plugin/8053-azure-toolkit-for-intellij
-.idea/**/azureSettings.xml
-
-# End of https://www.toptal.com/developers/gitignore/api/intellij,go
\ No newline at end of file
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/CHANGELOG.md b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/CHANGELOG.md
deleted file mode 100644
index 859750c6e1aef2a330d820f26d18d1c09533718f..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/CHANGELOG.md
+++ /dev/null
@@ -1,37 +0,0 @@
-
-<a name="v0.5.2"></a>
-## [v0.5.2] - 2022-12-23
-
-<a name="v0.5.1"></a>
-## [v0.5.1] - 2022-12-23
-
-<a name="v0.5.0"></a>
-## [v0.5.0] - 2022-12-18
-
-<a name="v0.4.0"></a>
-## [v0.4.0] - 2022-12-17
-
-<a name="v0.3.1"></a>
-## [v0.3.1] - 2022-10-16
-### Bug Fixes
-- fix secure access to structure with a constraint
-
-
-<a name="v0.3.0"></a>
-## [v0.3.0] - 2022-10-15
-### Code Refactoring
-- refactor change function signatur
-
-
-<a name="v0.2.0"></a>
-## v0.2.0 - 2022-10-15
-### Add Features
-- feat takeover from other project
-
-
-[v0.5.2]: https://gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/compare/v0.5.1...v0.5.2
-[v0.5.1]: https://gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/compare/v0.5.0...v0.5.1
-[v0.5.0]: https://gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/compare/v0.4.0...v0.5.0
-[v0.4.0]: https://gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/compare/v0.3.1...v0.4.0
-[v0.3.1]: https://gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/compare/v0.3.0...v0.3.1
-[v0.3.0]: https://gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/compare/v0.2.0...v0.3.0
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/LICENSE b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/LICENSE
deleted file mode 100644
index 22686f9fe879acaa7d1db639310a955de05b0d2a..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/LICENSE
+++ /dev/null
@@ -1,662 +0,0 @@
-                    GNU AFFERO GENERAL PUBLIC LICENSE
-                       Version 3, 19 November 2007
-                              AGPL-3.0
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-                            Preamble
-
-  The GNU Affero General Public License is a free, copyleft license for
-software and other kinds of works, specifically designed to ensure
-cooperation with the community in the case of network server software.
-
-  The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works.  By contrast,
-our General Public Licenses are intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users.
-
-  When we speak of free software, we are referring to freedom, not
-price.  Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
-  Developers that use our General Public Licenses protect your rights
-with two steps: (1) assert copyright on the software, and (2) offer
-you this License which gives you legal permission to copy, distribute
-and/or modify the software.
-
-  A secondary benefit of defending all users' freedom is that
-improvements made in alternate versions of the program, if they
-receive widespread use, become available for other developers to
-incorporate.  Many developers of free software are heartened and
-encouraged by the resulting cooperation.  However, in the case of
-software used on network servers, this result may fail to come about.
-The GNU General Public License permits making a modified version and
-letting the public access it on a server without ever releasing its
-source code to the public.
-
-  The GNU Affero General Public License is designed specifically to
-ensure that, in such cases, the modified source code becomes available
-to the community.  It requires the operator of a network server to
-provide the source code of the modified version running there to the
-users of that server.  Therefore, public use of a modified version, on
-a publicly accessible server, gives the public access to the source
-code of the modified version.
-
-  An older license, called the Affero General Public License and
-published by Affero, was designed to accomplish similar goals.  This is
-a different license, not a version of the Affero GPL, but Affero has
-released a new version of the Affero GPL which permits relicensing under
-this license.
-
-  The precise terms and conditions for copying, distribution and
-modification follow.
-
-                       TERMS AND CONDITIONS
-
-  0. Definitions.
-
-  "This License" refers to version 3 of the GNU Affero General Public License.
-
-  "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
-  "The Program" refers to any copyrightable work licensed under this
-License.  Each licensee is addressed as "you".  "Licensees" and
-"recipients" may be individuals or organizations.
-
-  To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy.  The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
-  A "covered work" means either the unmodified Program or a work based
-on the Program.
-
-  To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy.  Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
-  To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies.  Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
-  An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License.  If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
-  1. Source Code.
-
-  The "source code" for a work means the preferred form of the work
-for making modifications to it.  "Object code" means any non-source
-form of a work.
-
-  A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
-  The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form.  A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
-  The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities.  However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work.  For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
-  The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
-  The Corresponding Source for a work in source code form is that
-same work.
-
-  2. Basic Permissions.
-
-  All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met.  This License explicitly affirms your unlimited
-permission to run the unmodified Program.  The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work.  This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
-  You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force.  You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright.  Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
-  Conveying under any other circumstances is permitted solely under
-the conditions stated below.  Sublicensing is not allowed; section 10
-makes it unnecessary.
-
-  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
-  No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
-  When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
-  4. Conveying Verbatim Copies.
-
-  You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
-  You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
-  5. Conveying Modified Source Versions.
-
-  You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
-    a) The work must carry prominent notices stating that you modified
-    it, and giving a relevant date.
-
-    b) The work must carry prominent notices stating that it is
-    released under this License and any conditions added under section
-    7.  This requirement modifies the requirement in section 4 to
-    "keep intact all notices".
-
-    c) You must license the entire work, as a whole, under this
-    License to anyone who comes into possession of a copy.  This
-    License will therefore apply, along with any applicable section 7
-    additional terms, to the whole of the work, and all its parts,
-    regardless of how they are packaged.  This License gives no
-    permission to license the work in any other way, but it does not
-    invalidate such permission if you have separately received it.
-
-    d) If the work has interactive user interfaces, each must display
-    Appropriate Legal Notices; however, if the Program has interactive
-    interfaces that do not display Appropriate Legal Notices, your
-    work need not make them do so.
-
-  A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit.  Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
-  6. Conveying Non-Source Forms.
-
-  You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
-    a) Convey the object code in, or embodied in, a physical product
-    (including a physical distribution medium), accompanied by the
-    Corresponding Source fixed on a durable physical medium
-    customarily used for software interchange.
-
-    b) Convey the object code in, or embodied in, a physical product
-    (including a physical distribution medium), accompanied by a
-    written offer, valid for at least three years and valid for as
-    long as you offer spare parts or customer support for that product
-    model, to give anyone who possesses the object code either (1) a
-    copy of the Corresponding Source for all the software in the
-    product that is covered by this License, on a durable physical
-    medium customarily used for software interchange, for a price no
-    more than your reasonable cost of physically performing this
-    conveying of source, or (2) access to copy the
-    Corresponding Source from a network server at no charge.
-
-    c) Convey individual copies of the object code with a copy of the
-    written offer to provide the Corresponding Source.  This
-    alternative is allowed only occasionally and noncommercially, and
-    only if you received the object code with such an offer, in accord
-    with subsection 6b.
-
-    d) Convey the object code by offering access from a designated
-    place (gratis or for a charge), and offer equivalent access to the
-    Corresponding Source in the same way through the same place at no
-    further charge.  You need not require recipients to copy the
-    Corresponding Source along with the object code.  If the place to
-    copy the object code is a network server, the Corresponding Source
-    may be on a different server (operated by you or a third party)
-    that supports equivalent copying facilities, provided you maintain
-    clear directions next to the object code saying where to find the
-    Corresponding Source.  Regardless of what server hosts the
-    Corresponding Source, you remain obligated to ensure that it is
-    available for as long as needed to satisfy these requirements.
-
-    e) Convey the object code using peer-to-peer transmission, provided
-    you inform other peers where the object code and Corresponding
-    Source of the work are being offered to the general public at no
-    charge under subsection 6d.
-
-  A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
-  A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling.  In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage.  For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product.  A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
-  "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source.  The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
-  If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information.  But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
-  The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed.  Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
-  Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
-  7. Additional Terms.
-
-  "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law.  If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
-  When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it.  (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.)  You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
-  Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
-    a) Disclaiming warranty or limiting liability differently from the
-    terms of sections 15 and 16 of this License; or
-
-    b) Requiring preservation of specified reasonable legal notices or
-    author attributions in that material or in the Appropriate Legal
-    Notices displayed by works containing it; or
-
-    c) Prohibiting misrepresentation of the origin of that material, or
-    requiring that modified versions of such material be marked in
-    reasonable ways as different from the original version; or
-
-    d) Limiting the use for publicity purposes of names of licensors or
-    authors of the material; or
-
-    e) Declining to grant rights under trademark law for use of some
-    trade names, trademarks, or service marks; or
-
-    f) Requiring indemnification of licensors and authors of that
-    material by anyone who conveys the material (or modified versions of
-    it) with contractual assumptions of liability to the recipient, for
-    any liability that these contractual assumptions directly impose on
-    those licensors and authors.
-
-  All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10.  If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term.  If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
-  If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
-  Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
-  8. Termination.
-
-  You may not propagate or modify a covered work except as expressly
-provided under this License.  Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
-  However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
-  Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
-  Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License.  If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
-  9. Acceptance Not Required for Having Copies.
-
-  You are not required to accept this License in order to receive or
-run a copy of the Program.  Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance.  However,
-nothing other than this License grants you permission to propagate or
-modify any covered work.  These actions infringe copyright if you do
-not accept this License.  Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
-  10. Automatic Licensing of Downstream Recipients.
-
-  Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License.  You are not responsible
-for enforcing compliance by third parties with this License.
-
-  An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations.  If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
-  You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License.  For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
-  11. Patents.
-
-  A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based.  The
-work thus licensed is called the contributor's "contributor version".
-
-  A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version.  For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
-  Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
-  In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement).  To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
-  If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients.  "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
-  If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
-  A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License.  You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
-  Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
-  12. No Surrender of Others' Freedom.
-
-  If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License.  If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all.  For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
-  13. Remote Network Interaction; Use with the GNU General Public License.
-
-  Notwithstanding any other provision of this License, if you modify the
-Program, your modified version must prominently offer all users
-interacting with it remotely through a computer network (if your version
-supports such interaction) an opportunity to receive the Corresponding
-Source of your version by providing access to the Corresponding Source
-from a network server at no charge, through some standard or customary
-means of facilitating copying of software.  This Corresponding Source
-shall include the Corresponding Source for any work covered by version 3
-of the GNU General Public License that is incorporated pursuant to the
-following paragraph.
-
-  Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU General Public License into a single
-combined work, and to convey the resulting work.  The terms of this
-License will continue to apply to the part which is the covered work,
-but the work with which it is combined will remain governed by version
-3 of the GNU General Public License.
-
-  14. Revised Versions of this License.
-
-  The Free Software Foundation may publish revised and/or new versions of
-the GNU Affero General Public License from time to time.  Such new versions
-will be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
-  Each version is given a distinguishing version number.  If the
-Program specifies that a certain numbered version of the GNU Affero General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation.  If the Program does not specify a version number of the
-GNU Affero General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
-  If the Program specifies that a proxy can decide which future
-versions of the GNU Affero General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
-  Later license versions may give you additional or different
-permissions.  However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
-  15. Disclaimer of Warranty.
-
-  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-  16. Limitation of Liability.
-
-  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
-  17. Interpretation of Sections 15 and 16.
-
-  If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
-                     END OF TERMS AND CONDITIONS
-
-            How to Apply These Terms to Your New Programs
-
-  If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
-  To do so, attach the following notices to the program.  It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-    <one line to give the program's name and a brief idea of what it does.>
-    Copyright (C) <year>  <name of author>
-
-    This program is free software: you can redistribute it and/or modify
-    it under the terms of the GNU Affero General Public License as published
-    by the Free Software Foundation, either version 3 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU Affero General Public License for more details.
-
-    You should have received a copy of the GNU Affero General Public License
-    along with this program.  If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
-  If your software can interact with users remotely through a computer
-network, you should also make sure that it provides a way for users to
-get its source.  For example, if your program is a web application, its
-interface could display a "Source" link that leads users to an archive
-of the code.  There are many ways you could offer source, and different
-solutions will be better for different programs; see section 13 for the
-specific requirements.
-
-  You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU AGPL, see
-<https://www.gnu.org/licenses/>.
\ No newline at end of file
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/Makefile b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/Makefile
deleted file mode 100644
index c149982aa9f602b5dfb1aba0ced25241a1a8b472..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/Makefile
+++ /dev/null
@@ -1,157 +0,0 @@
-## Copyright 2022 schukai GmbH. All rights reserved.
-## Use of this source code is governed by a AGPL-3.0
-## license that can be found in the LICENSE file.
-
-PROJECT_ROOT:=$(dir $(realpath $(lastword $(MAKEFILE_LIST))))
-THIS_MAKEFILE:=$(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST))
-THIS_MAKEFILE_PATH:=$(PROJECT_ROOT)$(THIS_MAKEFILE) 
-
-# @see .PHONY https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html#Phony-Targets
-.DEFAULT_GOAL := help
-
-.PHONY: print
-## Print Path	
-print:
-	@echo "THIS_MAKEFILE:      $(THIS_MAKEFILE)"
-	@echo "THIS_MAKEFILE_PATH: $(THIS_MAKEFILE_PATH)"
-	@echo "PROJECT_ROOT:       $(PROJECT_ROOT)"
-
-# Add a comment to the public targets so that it appears
-# in this help Use two # characters for a help comment
-.PHONY: help
-help:
-	@printf "${COMMENT}Usage:${RESET}\n"
-	@printf " make [target] [arg=\"val\"...]\n\n"
-	@printf "${COMMENT}Available targets:${RESET}\n"
-	@awk '/^[a-zA-Z\-\\_0-9\.@]+:/ { \
-		helpMessage = match(lastLine, /^## (.*)/); \
-		if (helpMessage) { \
-			helpCommand = substr($$1, 0, index($$1, ":")); \
-			helpMessage = substr(lastLine, RSTART + 3, RLENGTH); \
-			printf " ${INFO}%-22s${RESET} %s\n", helpCommand, helpMessage; \
-		} \
-	} \
-	{ lastLine = $$0 }' $(MAKEFILE_LIST)
-	@printf "\n${COMMENT}Available arguments:${RESET}\n\n"
-	@awk '/^(([a-zA-Z\-\\_0-9\.@]+)\s[?:]?=)/ { \
-		helpMessage = match(lastLine, /^## (.*)/); \
-		if (helpMessage) { \
-			helpMessage = substr(lastLine, RSTART + 3, RLENGTH); \
-			printf " ${INFO}%-22s${RESET} %s (Default: %s)\n", $$1, helpMessage, $$3; \
-		} \
-	} \
-	{ lastLine = $$0 }' $(MAKEFILE_LIST)
-
-
-## run tests
-test:
-	echo "Running tests"
-	go test -cover -v ./...
-
-## run tests with fuzzing
-test-fuzz:
-	echo "Running fuzz tests"
-	go test -v -fuzztime=30s -fuzz=Fuzz ./...
-
-#### VERSION
-BIN_DIR ?= $(shell echo $$HOME)/.local/bin/
-VERSION_NAME 	     := version
-EXECUTABLES = $(EXECUTABLES:-) $(VERSION_NAME)
-VERSION_BIN_PATH := $(BIN_DIR)$(VERSION_NAME)
-
-VERSION_BIN := $(shell command -v $(VERSION_NAME) 2> /dev/null)
-
-ifndef VERSION_BIN
-    $(shell curl -o $(VERSION_BIN_PATH) http://download.schukai.com/tools/version/version-$(shell uname -s | tr [:upper:] [:lower:])-$(shell echo `uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/`))
-    $(shell chmod +x $(VERSION_BIN_PATH))
-endif
-
-GIT_CHGLOG_BIN := $(shell command -v git-chglog 2> /dev/null)
-
-ifeq ($(GIT_CHGLOG_BIN),)
-    $(shell go install github.com/git-chglog/git-chglog/cmd/git-chglog@latest)
-endif     
-     
-RELEASE_FILE ?= $(PROJECT_ROOT)release.json
-CHANGELOG_FILE ?= $(PROJECT_ROOT)CHANGELOG.md
- 
-ifeq ("$(wildcard $(RELEASE_FILE))","")
-  $(shell echo '{"version":"0.1.0"}' > $(RELEASE_FILE))
-endif
-
-PROJECT_VERSION ?= $(shell cat $(RELEASE_FILE) | jq -r .version)
-PROJECT_BUILD_DATE ?= $(shell $(VERSION_BIN) date)
-
-.PHONY: next-patch-version
-next-patch-version: check-clean-repo
-	echo "Creating next version"
-	$(VERSION_BIN) patch --path $(RELEASE_FILE) --selector "version"
-	git add $(RELEASE_FILE) && git commit -m "Bump version to $$(cat $(RELEASE_FILE) | jq -r .version)"
-
-.PHONY: next-minor-version
-next-minor-version: check-clean-repo
-	echo  "Creating next minor version"
-	$(VERSION_BIN) minor --path $(RELEASE_FILE) --selector "version"
-	git add $(RELEASE_FILE) && git commit -m "Bump version to $$( cat $(RELEASE_FILE) | jq -r .version)"
-
-.PHONY: next-major-version
-next-major-version: check-clean-repo
-	echo "Creating next minor version"
-	$(VERSION_BIN) major --path $(RELEASE_FILE) --selector "version"
-	git add $(RELEASE_FILE) && git commit -m "Bump version to $$(cat $(RELEASE_FILE) | jq -r .version)"
-
-.PHONY: check-clean-repo
-check-clean-repo:
-	git diff-index --quiet HEAD || (echo "There are uncommitted changes after running make. Please commit or stash them before running make."; exit 1)
-	
-## tag repository with next patch version
-tag-patch-version: next-patch-version 
-	echo "Tagging patch version"
-	$(eval PROJECT_VERSION := $(shell cat $(RELEASE_FILE) | jq -r .version))
-	git-chglog --next-tag v$(PROJECT_VERSION) -o $(CHANGELOG_FILE)
-	git add $(CHANGELOG_FILE) && git commit -m "Update changelog"
-	git tag -a v$(PROJECT_VERSION) -m "Version $(PROJECT_VERSION)"
-
-## tag repository with next minor version
-tag-minor-version: next-minor-version 
-	echo "Tagging minor version"
-	$(eval PROJECT_VERSION := $(shell cat $(RELEASE_FILE) | jq -r .version))
-	git-chglog --next-tag v$(PROJECT_VERSION) -o $(CHANGELOG_FILE)
-	git add $(CHANGELOG_FILE) && git commit -m "Update changelog"
-	git tag -a v$(PROJECT_VERSION) -m "Version $(PROJECT_VERSION)"
-
-## tag repository with next major version
-tag-major-version: next-major-version 
-	echo "Tagging major version"
-	$(eval PROJECT_VERSION := $(shell cat $(RELEASE_FILE) | jq -r .version))
-	git-chglog --next-tag v$(PROJECT_VERSION) -o $(CHANGELOG_FILE)
-	git add $(CHANGELOG_FILE) && git commit -m "Update changelog"
-	git tag -a v$(PROJECT_VERSION) -m "Version $(PROJECT_VERSION)"
-
-GO_MOD_FILE := $(SOURCE_PATH)go.mod
-
-ifeq ($(shell test -e $(GO_MOD_FILE) && echo -n yes),yes)
-    GO_CURRENT_MODULE := $(shell cat $(GO_MOD_FILE) | head -n1 | cut -d" " -f2)
-	# go install github.com/google/go-licenses@latest
-	EXECUTABLES = $(EXECUTABLES:-) go-licenses;    
-endif
-
-.PHONY: fetch-licenses
-## Fetch licenses for all modules
-fetch-licenses:
-	go-licenses save $(GO_CURRENT_MODULE) --ignore gitlab.schukai.com --force --save_path $(PROJECT_ROOT)licenses/
-
-# https://spdx.github.io/spdx-spec/v2.3/SPDX-license-list/
-ADDLICENSE_BIN ?= addlicense
-ifeq ($(shell command -v $(ADDLICENSE_BIN) 2> /dev/null),)
-	$(shell go install github.com/google/addlicense@latest)
-	EXECUTABLES = $(EXECUTABLES:-) $(ADDLICENSE_BIN);
-endif
-
-.PHONY: add-licenses
-## Add license headers to all go files
-add-licenses:
-	addlicense -c "schukai GmbH" -s -l "AGPL-3.0" ./*.go
-
-
-
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/README.md b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/README.md
deleted file mode 100644
index 4d1522c15b54f8c311a7cedeb5c6bfad357b9053..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/README.md
+++ /dev/null
@@ -1,69 +0,0 @@
-## Pathfinder
-
-## What does this library?
-
-This library provides a simple way to get and set values in a nested structure.
-
-It supports:
-
-* [X]  Set values in a nested structure
-* [X]  Get values from a nested structure
-
-## Installation
-
-```shell
-go get gitlab.schukai.com/oss/libraries/go/utilities/pathfinder
-```
-
-**Note:** This library uses [Go Modules](https://github.com/golang/go/wiki/Modules) to manage dependencies.
-
-## Usage
-
-### Set values
-
-```go
-s := &StructA{}
-err := GetValue[*StructA](s, "my.key")
-```
-
-### Get values
-
-```go
-s := &StructA{}
-err := SetValue[*StructA](s, "my.key", "value")
-```
-
-## Contributing
-
-Merge requests are welcome. For major changes, please open an issue first to discuss what
-you would like to change. **Please make sure to update tests as appropriate.**
-
-Versioning is done with [SemVer](https://semver.org/).
-Changelog is generated with [git-chglog](https://github.com/git-chglog/git-chglog#git-chglog)
-
-Commit messages should follow the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) specification.
-Messages are started with a type, which is one of the following:
-
-- **feat**: A new feature
-- **fix**: A bug fix
-- **doc**: Documentation only changes
-- **refactor**: A code change that neither fixes a bug nor adds a feature
-- **perf**: A code change that improves performance
-- **test**: Adding missing or correcting existing tests
-- **chore**: Other changes that don't modify src or test files
-
-The footer would be used for a reference to an issue or a breaking change.
-
-A commit that has a footer `BREAKING CHANGE:`, or appends a ! after the type/scope,
-introduces a breaking API change (correlating with MAJOR in semantic versioning).
-A BREAKING CHANGE can be part of commits of any type.
-
-the following is an example of a commit message:
-
-```text
-feat: add 'extras' field
-```
-
-## License
-
-[AGPL-3.0](https://choosealicense.com/licenses/agpl-3.0/)
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/error.go b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/error.go
deleted file mode 100644
index 614b13e8adaf28a9c606b9d0c847cbd80e7a4ca3..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/error.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2022 schukai GmbH
-// SPDX-License-Identifier: AGPL-3.0
-
-package pathfinder
-
-import (
-	"errors"
-	"reflect"
-)
-
-type InvalidPathError error
-
-func newInvalidPathError(path string) InvalidPathError {
-	return InvalidPathError(errors.New("invalid path " + path))
-}
-
-type UnsupportedTypeAtTopOfPathError error
-
-func newUnsupportedTypeAtTopOfPathError(path string, t reflect.Type) UnsupportedTypeAtTopOfPathError {
-	return UnsupportedTypeAtTopOfPathError(errors.New("unsupported type " + t.String() + " at top of path " + path))
-}
-
-type UnsupportedTypePathError error
-
-func newUnsupportedTypePathError(path string, t reflect.Type) UnsupportedTypePathError {
-	return UnsupportedTypePathError(errors.New("unsupported type " + t.String() + " at path " + path))
-}
-
-type CannotSetError error
-
-func newCannotSetError(name string) CannotSetError {
-	return CannotSetError(errors.New("cannot set " + name))
-}
-
-type InvalidTypeForPathError error
-
-func newInvalidTypeForPathError(path string, pt string, nt string) InvalidTypeForPathError {
-	return InvalidTypeForPathError(errors.New("invalid type for path " + path + ": expected " + pt + ", got " + nt))
-}
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/get.go b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/get.go
deleted file mode 100644
index 79789c408c1562f857fc69d1000d702fa5e0246c..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/get.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2022 schukai GmbH
-// SPDX-License-Identifier: AGPL-3.0
-
-package pathfinder
-
-import (
-	"reflect"
-	"strconv"
-	"strings"
-)
-
-// This function returns the value of a field in a struct, given a path to the field.
-func GetValue[D any](obj D, keyWithDots string) (any, error) {
-	keySlice := strings.Split(keyWithDots, ".")
-	v := reflect.ValueOf(obj)
-
-	for _, key := range keySlice[0:len(keySlice)] {
-
-		switch v.Kind() {
-		case reflect.Ptr, reflect.Slice, reflect.Array, reflect.Interface:
-			v = v.Elem()
-		}
-
-		switch v.Kind() {
-		case reflect.Map:
-			v = v.MapIndex(reflect.ValueOf(key))
-			if !v.IsValid() {
-				return nil, newInvalidPathError(keyWithDots)
-			}
-
-		case reflect.Slice, reflect.Array:
-			index, err := strconv.Atoi(key)
-			if err != nil {
-				return nil, newInvalidPathError(keyWithDots)
-			}
-			v = v.Index(index)
-		case reflect.Struct:
-			v = v.FieldByName(key)
-			if !v.IsValid() {
-				return nil, newInvalidPathError(keyWithDots)
-			}
-		default:
-			return nil, newInvalidPathError(keyWithDots)
-		}
-
-	}
-
-	if v.Kind() == reflect.Invalid {
-		return nil, newInvalidPathError(keyWithDots)
-	}
-
-	for v.Kind() == reflect.Ptr {
-		v = v.Elem()
-	}
-	
-	return v.Interface(), nil
-
-}
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/release.json b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/release.json
deleted file mode 100644
index ccd00c212fd33519ee25a4425025db1859dd1925..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/release.json
+++ /dev/null
@@ -1 +0,0 @@
-{"version":"0.5.2"}
diff --git a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/set.go b/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/set.go
deleted file mode 100644
index f5ca2f43314c861ad15eeb3abcf15d50c3463f31..0000000000000000000000000000000000000000
--- a/application/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/set.go
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2022 schukai GmbH
-// SPDX-License-Identifier: AGPL-3.0
-
-package pathfinder
-
-import (
-	"fmt"
-	"reflect"
-	"strconv"
-	"strings"
-)
-
-// This function sets the value of a field in a struct, given a path to the field.
-func SetValue[D any](obj D, keyWithDots string, newValue any) error {
-
-	keySlice := strings.Split(keyWithDots, ".")
-	v := reflect.ValueOf(obj)
-
-	for _, key := range keySlice[0 : len(keySlice)-1] {
-		for v.Kind() != reflect.Ptr {
-			if v.Kind() == reflect.Invalid {
-				return newInvalidPathError(keyWithDots)
-			}
-			v = v.Addr()
-		}
-
-		if v.Kind() != reflect.Ptr {
-			return newUnsupportedTypePathError(keyWithDots, v.Type())
-		}
-
-		elem := v.Elem()
-		if elem.Kind() != reflect.Struct {
-			return newUnsupportedTypePathError(keyWithDots, v.Type())
-		}
-
-		v = elem.FieldByName(key)
-
-	}
-
-	if v.Kind() == reflect.Invalid {
-		return newInvalidPathError(keyWithDots)
-	}
-
-	for v.Kind() == reflect.Ptr {
-		v = v.Elem()
-	}
-
-	// non-supporter type at the top of the path
-	if v.Kind() != reflect.Struct {
-		return newUnsupportedTypeAtTopOfPathError(keyWithDots, v.Type())
-	}
-
-	v = v.FieldByName(keySlice[len(keySlice)-1])
-	if !v.IsValid() {
-		return newInvalidPathError(keyWithDots)
-	}
-
-	if !v.CanSet() {
-		return newCannotSetError(keyWithDots)
-	}
-
-	switch v.Kind() {
-	case reflect.Ptr:
-		if newValue == nil {
-			v.Set(reflect.Zero(v.Type()))
-		} else {
-			v.Set(reflect.ValueOf(&newValue))
-		}
-		return nil
-	}
-
-	newValueType := reflect.TypeOf(newValue)
-	if newValueType == nil {
-		return newUnsupportedTypePathError(keyWithDots, v.Type())
-	}
-
-	newValueKind := reflect.TypeOf(newValue).Kind()
-
-	switch v.Kind() {
-	case reflect.String:
-		if newValueKind == reflect.String {
-			v.SetString(newValue.(string))
-		} else {
-			v.SetString(fmt.Sprintf("%v", newValue))
-		}
-
-	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
-
-		if newValueKind == reflect.Int {
-			v.SetInt(int64(newValue.(int)))
-		} else {
-			s, err := strconv.ParseInt(fmt.Sprintf("%v", newValue), 10, 64)
-			if err != nil {
-				return err
-			}
-			v.SetInt(s)
-		}
-
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
-
-		if newValueKind == reflect.Int {
-			v.SetUint(uint64(newValue.(int)))
-		} else {
-			s, err := strconv.ParseInt(fmt.Sprintf("%v", newValue), 10, 64)
-			if err != nil {
-				return err
-			}
-			v.SetUint(uint64(s))
-		}
-
-	case reflect.Bool:
-
-		if newValueKind == reflect.Bool {
-			v.SetBool(newValue.(bool))
-		} else {
-			b, err := strconv.ParseBool(fmt.Sprintf("%v", newValue))
-			if err != nil {
-				return err
-			}
-
-			v.SetBool(b)
-		}
-
-	case reflect.Float64, reflect.Float32:
-
-		if newValueKind == reflect.Float64 {
-			v.SetFloat(newValue.(float64))
-		} else {
-			s, err := strconv.ParseFloat(fmt.Sprintf("%v", newValue), 64)
-			if err != nil {
-				return err
-			}
-
-			v.SetFloat(s)
-		}
-
-	case reflect.Slice, reflect.Array:
-
-		if newValueKind == reflect.Ptr {
-			newValue = reflect.ValueOf(newValue).Elem().Interface()
-			v.Set(reflect.ValueOf(newValue))
-		} else if newValueKind == reflect.Slice {
-			v.Set(reflect.ValueOf(newValue))
-		} else {
-			return newUnsupportedTypePathError(keyWithDots, v.Type())
-		}
-
-	default:
-		return newInvalidTypeForPathError(keyWithDots, v.Type().String(), newValueKind.String())
-	}
-
-	return nil
-
-}
diff --git a/application/source/vendor/golang.org/x/crypto/LICENSE b/application/source/vendor/golang.org/x/crypto/LICENSE
deleted file mode 100644
index 6a66aea5eafe0ca6a688840c47219556c552488e..0000000000000000000000000000000000000000
--- a/application/source/vendor/golang.org/x/crypto/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-   * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-   * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-   * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/application/source/vendor/golang.org/x/crypto/PATENTS b/application/source/vendor/golang.org/x/crypto/PATENTS
deleted file mode 100644
index 733099041f84fa1e58611ab2e11af51c1f26d1d2..0000000000000000000000000000000000000000
--- a/application/source/vendor/golang.org/x/crypto/PATENTS
+++ /dev/null
@@ -1,22 +0,0 @@
-Additional IP Rights Grant (Patents)
-
-"This implementation" means the copyrightable works distributed by
-Google as part of the Go project.
-
-Google hereby grants to You a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer and otherwise run, modify and propagate the contents of this
-implementation of Go, where such license applies only to those patent
-claims, both currently owned or controlled by Google and acquired in
-the future, licensable by Google that are necessarily infringed by this
-implementation of Go.  This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation.  If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of Go or any code incorporated within this
-implementation of Go constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of Go
-shall terminate as of the date such litigation is filed.
diff --git a/application/source/vendor/golang.org/x/crypto/bcrypt/base64.go b/application/source/vendor/golang.org/x/crypto/bcrypt/base64.go
deleted file mode 100644
index fc311609081849d379013a25310456fbb1a6e66a..0000000000000000000000000000000000000000
--- a/application/source/vendor/golang.org/x/crypto/bcrypt/base64.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package bcrypt
-
-import "encoding/base64"
-
-const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
-
-var bcEncoding = base64.NewEncoding(alphabet)
-
-func base64Encode(src []byte) []byte {
-	n := bcEncoding.EncodedLen(len(src))
-	dst := make([]byte, n)
-	bcEncoding.Encode(dst, src)
-	for dst[n-1] == '=' {
-		n--
-	}
-	return dst[:n]
-}
-
-func base64Decode(src []byte) ([]byte, error) {
-	numOfEquals := 4 - (len(src) % 4)
-	for i := 0; i < numOfEquals; i++ {
-		src = append(src, '=')
-	}
-
-	dst := make([]byte, bcEncoding.DecodedLen(len(src)))
-	n, err := bcEncoding.Decode(dst, src)
-	if err != nil {
-		return nil, err
-	}
-	return dst[:n], nil
-}
diff --git a/application/source/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/application/source/vendor/golang.org/x/crypto/bcrypt/bcrypt.go
deleted file mode 100644
index 5577c0f939a23ea0e3eede819c6ce8386a551bec..0000000000000000000000000000000000000000
--- a/application/source/vendor/golang.org/x/crypto/bcrypt/bcrypt.go
+++ /dev/null
@@ -1,304 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing
-// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf
-package bcrypt // import "golang.org/x/crypto/bcrypt"
-
-// The code is a port of Provos and Mazières's C implementation.
-import (
-	"crypto/rand"
-	"crypto/subtle"
-	"errors"
-	"fmt"
-	"io"
-	"strconv"
-
-	"golang.org/x/crypto/blowfish"
-)
-
-const (
-	MinCost     int = 4  // the minimum allowable cost as passed in to GenerateFromPassword
-	MaxCost     int = 31 // the maximum allowable cost as passed in to GenerateFromPassword
-	DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword
-)
-
-// The error returned from CompareHashAndPassword when a password and hash do
-// not match.
-var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password")
-
-// The error returned from CompareHashAndPassword when a hash is too short to
-// be a bcrypt hash.
-var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password")
-
-// The error returned from CompareHashAndPassword when a hash was created with
-// a bcrypt algorithm newer than this implementation.
-type HashVersionTooNewError byte
-
-func (hv HashVersionTooNewError) Error() string {
-	return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion)
-}
-
-// The error returned from CompareHashAndPassword when a hash starts with something other than '$'
-type InvalidHashPrefixError byte
-
-func (ih InvalidHashPrefixError) Error() string {
-	return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih))
-}
-
-type InvalidCostError int
-
-func (ic InvalidCostError) Error() string {
-	return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), MinCost, MaxCost)
-}
-
-const (
-	majorVersion       = '2'
-	minorVersion       = 'a'
-	maxSaltSize        = 16
-	maxCryptedHashSize = 23
-	encodedSaltSize    = 22
-	encodedHashSize    = 31
-	minHashSize        = 59
-)
-
-// magicCipherData is an IV for the 64 Blowfish encryption calls in
-// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes.
-var magicCipherData = []byte{
-	0x4f, 0x72, 0x70, 0x68,
-	0x65, 0x61, 0x6e, 0x42,
-	0x65, 0x68, 0x6f, 0x6c,
-	0x64, 0x65, 0x72, 0x53,
-	0x63, 0x72, 0x79, 0x44,
-	0x6f, 0x75, 0x62, 0x74,
-}
-
-type hashed struct {
-	hash  []byte
-	salt  []byte
-	cost  int // allowed range is MinCost to MaxCost
-	major byte
-	minor byte
-}
-
-// ErrPasswordTooLong is returned when the password passed to
-// GenerateFromPassword is too long (i.e. > 72 bytes).
-var ErrPasswordTooLong = errors.New("bcrypt: password length exceeds 72 bytes")
-
-// GenerateFromPassword returns the bcrypt hash of the password at the given
-// cost. If the cost given is less than MinCost, the cost will be set to
-// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package,
-// to compare the returned hashed password with its cleartext version.
-// GenerateFromPassword does not accept passwords longer than 72 bytes, which
-// is the longest password bcrypt will operate on.
-func GenerateFromPassword(password []byte, cost int) ([]byte, error) {
-	if len(password) > 72 {
-		return nil, ErrPasswordTooLong
-	}
-	p, err := newFromPassword(password, cost)
-	if err != nil {
-		return nil, err
-	}
-	return p.Hash(), nil
-}
-
-// CompareHashAndPassword compares a bcrypt hashed password with its possible
-// plaintext equivalent. Returns nil on success, or an error on failure.
-func CompareHashAndPassword(hashedPassword, password []byte) error {
-	p, err := newFromHash(hashedPassword)
-	if err != nil {
-		return err
-	}
-
-	otherHash, err := bcrypt(password, p.cost, p.salt)
-	if err != nil {
-		return err
-	}
-
-	otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor}
-	if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 {
-		return nil
-	}
-
-	return ErrMismatchedHashAndPassword
-}
-
-// Cost returns the hashing cost used to create the given hashed
-// password. When, in the future, the hashing cost of a password system needs
-// to be increased in order to adjust for greater computational power, this
-// function allows one to establish which passwords need to be updated.
-func Cost(hashedPassword []byte) (int, error) {
-	p, err := newFromHash(hashedPassword)
-	if err != nil {
-		return 0, err
-	}
-	return p.cost, nil
-}
-
-func newFromPassword(password []byte, cost int) (*hashed, error) {
-	if cost < MinCost {
-		cost = DefaultCost
-	}
-	p := new(hashed)
-	p.major = majorVersion
-	p.minor = minorVersion
-
-	err := checkCost(cost)
-	if err != nil {
-		return nil, err
-	}
-	p.cost = cost
-
-	unencodedSalt := make([]byte, maxSaltSize)
-	_, err = io.ReadFull(rand.Reader, unencodedSalt)
-	if err != nil {
-		return nil, err
-	}
-
-	p.salt = base64Encode(unencodedSalt)
-	hash, err := bcrypt(password, p.cost, p.salt)
-	if err != nil {
-		return nil, err
-	}
-	p.hash = hash
-	return p, err
-}
-
-func newFromHash(hashedSecret []byte) (*hashed, error) {
-	if len(hashedSecret) < minHashSize {
-		return nil, ErrHashTooShort
-	}
-	p := new(hashed)
-	n, err := p.decodeVersion(hashedSecret)
-	if err != nil {
-		return nil, err
-	}
-	hashedSecret = hashedSecret[n:]
-	n, err = p.decodeCost(hashedSecret)
-	if err != nil {
-		return nil, err
-	}
-	hashedSecret = hashedSecret[n:]
-
-	// The "+2" is here because we'll have to append at most 2 '=' to the salt
-	// when base64 decoding it in expensiveBlowfishSetup().
-	p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2)
-	copy(p.salt, hashedSecret[:encodedSaltSize])
-
-	hashedSecret = hashedSecret[encodedSaltSize:]
-	p.hash = make([]byte, len(hashedSecret))
-	copy(p.hash, hashedSecret)
-
-	return p, nil
-}
-
-func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) {
-	cipherData := make([]byte, len(magicCipherData))
-	copy(cipherData, magicCipherData)
-
-	c, err := expensiveBlowfishSetup(password, uint32(cost), salt)
-	if err != nil {
-		return nil, err
-	}
-
-	for i := 0; i < 24; i += 8 {
-		for j := 0; j < 64; j++ {
-			c.Encrypt(cipherData[i:i+8], cipherData[i:i+8])
-		}
-	}
-
-	// Bug compatibility with C bcrypt implementations. We only encode 23 of
-	// the 24 bytes encrypted.
-	hsh := base64Encode(cipherData[:maxCryptedHashSize])
-	return hsh, nil
-}
-
-func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) {
-	csalt, err := base64Decode(salt)
-	if err != nil {
-		return nil, err
-	}
-
-	// Bug compatibility with C bcrypt implementations. They use the trailing
-	// NULL in the key string during expansion.
-	// We copy the key to prevent changing the underlying array.
-	ckey := append(key[:len(key):len(key)], 0)
-
-	c, err := blowfish.NewSaltedCipher(ckey, csalt)
-	if err != nil {
-		return nil, err
-	}
-
-	var i, rounds uint64
-	rounds = 1 << cost
-	for i = 0; i < rounds; i++ {
-		blowfish.ExpandKey(ckey, c)
-		blowfish.ExpandKey(csalt, c)
-	}
-
-	return c, nil
-}
-
-func (p *hashed) Hash() []byte {
-	arr := make([]byte, 60)
-	arr[0] = '$'
-	arr[1] = p.major
-	n := 2
-	if p.minor != 0 {
-		arr[2] = p.minor
-		n = 3
-	}
-	arr[n] = '$'
-	n++
-	copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost)))
-	n += 2
-	arr[n] = '$'
-	n++
-	copy(arr[n:], p.salt)
-	n += encodedSaltSize
-	copy(arr[n:], p.hash)
-	n += encodedHashSize
-	return arr[:n]
-}
-
-func (p *hashed) decodeVersion(sbytes []byte) (int, error) {
-	if sbytes[0] != '$' {
-		return -1, InvalidHashPrefixError(sbytes[0])
-	}
-	if sbytes[1] > majorVersion {
-		return -1, HashVersionTooNewError(sbytes[1])
-	}
-	p.major = sbytes[1]
-	n := 3
-	if sbytes[2] != '$' {
-		p.minor = sbytes[2]
-		n++
-	}
-	return n, nil
-}
-
-// sbytes should begin where decodeVersion left off.
-func (p *hashed) decodeCost(sbytes []byte) (int, error) {
-	cost, err := strconv.Atoi(string(sbytes[0:2]))
-	if err != nil {
-		return -1, err
-	}
-	err = checkCost(cost)
-	if err != nil {
-		return -1, err
-	}
-	p.cost = cost
-	return 3, nil
-}
-
-func (p *hashed) String() string {
-	return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor)
-}
-
-func checkCost(cost int) error {
-	if cost < MinCost || cost > MaxCost {
-		return InvalidCostError(cost)
-	}
-	return nil
-}
diff --git a/application/source/vendor/golang.org/x/crypto/blowfish/block.go b/application/source/vendor/golang.org/x/crypto/blowfish/block.go
deleted file mode 100644
index 9d80f19521b461af2a77b10e8eaf64936d8e223d..0000000000000000000000000000000000000000
--- a/application/source/vendor/golang.org/x/crypto/blowfish/block.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package blowfish
-
-// getNextWord returns the next big-endian uint32 value from the byte slice
-// at the given position in a circular manner, updating the position.
-func getNextWord(b []byte, pos *int) uint32 {
-	var w uint32
-	j := *pos
-	for i := 0; i < 4; i++ {
-		w = w<<8 | uint32(b[j])
-		j++
-		if j >= len(b) {
-			j = 0
-		}
-	}
-	*pos = j
-	return w
-}
-
-// ExpandKey performs a key expansion on the given *Cipher. Specifically, it
-// performs the Blowfish algorithm's key schedule which sets up the *Cipher's
-// pi and substitution tables for calls to Encrypt. This is used, primarily,
-// by the bcrypt package to reuse the Blowfish key schedule during its
-// set up. It's unlikely that you need to use this directly.
-func ExpandKey(key []byte, c *Cipher) {
-	j := 0
-	for i := 0; i < 18; i++ {
-		// Using inlined getNextWord for performance.
-		var d uint32
-		for k := 0; k < 4; k++ {
-			d = d<<8 | uint32(key[j])
-			j++
-			if j >= len(key) {
-				j = 0
-			}
-		}
-		c.p[i] ^= d
-	}
-
-	var l, r uint32
-	for i := 0; i < 18; i += 2 {
-		l, r = encryptBlock(l, r, c)
-		c.p[i], c.p[i+1] = l, r
-	}
-
-	for i := 0; i < 256; i += 2 {
-		l, r = encryptBlock(l, r, c)
-		c.s0[i], c.s0[i+1] = l, r
-	}
-	for i := 0; i < 256; i += 2 {
-		l, r = encryptBlock(l, r, c)
-		c.s1[i], c.s1[i+1] = l, r
-	}
-	for i := 0; i < 256; i += 2 {
-		l, r = encryptBlock(l, r, c)
-		c.s2[i], c.s2[i+1] = l, r
-	}
-	for i := 0; i < 256; i += 2 {
-		l, r = encryptBlock(l, r, c)
-		c.s3[i], c.s3[i+1] = l, r
-	}
-}
-
-// This is similar to ExpandKey, but folds the salt during the key
-// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero
-// salt passed in, reusing ExpandKey turns out to be a place of inefficiency
-// and specializing it here is useful.
-func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) {
-	j := 0
-	for i := 0; i < 18; i++ {
-		c.p[i] ^= getNextWord(key, &j)
-	}
-
-	j = 0
-	var l, r uint32
-	for i := 0; i < 18; i += 2 {
-		l ^= getNextWord(salt, &j)
-		r ^= getNextWord(salt, &j)
-		l, r = encryptBlock(l, r, c)
-		c.p[i], c.p[i+1] = l, r
-	}
-
-	for i := 0; i < 256; i += 2 {
-		l ^= getNextWord(salt, &j)
-		r ^= getNextWord(salt, &j)
-		l, r = encryptBlock(l, r, c)
-		c.s0[i], c.s0[i+1] = l, r
-	}
-
-	for i := 0; i < 256; i += 2 {
-		l ^= getNextWord(salt, &j)
-		r ^= getNextWord(salt, &j)
-		l, r = encryptBlock(l, r, c)
-		c.s1[i], c.s1[i+1] = l, r
-	}
-
-	for i := 0; i < 256; i += 2 {
-		l ^= getNextWord(salt, &j)
-		r ^= getNextWord(salt, &j)
-		l, r = encryptBlock(l, r, c)
-		c.s2[i], c.s2[i+1] = l, r
-	}
-
-	for i := 0; i < 256; i += 2 {
-		l ^= getNextWord(salt, &j)
-		r ^= getNextWord(salt, &j)
-		l, r = encryptBlock(l, r, c)
-		c.s3[i], c.s3[i+1] = l, r
-	}
-}
-
-func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
-	xl, xr := l, r
-	xl ^= c.p[0]
-	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1]
-	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2]
-	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3]
-	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4]
-	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5]
-	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6]
-	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7]
-	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8]
-	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9]
-	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10]
-	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11]
-	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12]
-	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13]
-	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14]
-	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15]
-	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16]
-	xr ^= c.p[17]
-	return xr, xl
-}
-
-func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
-	xl, xr := l, r
-	xl ^= c.p[17]
-	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16]
-	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15]
-	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14]
-	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13]
-	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12]
-	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11]
-	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10]
-	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9]
-	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8]
-	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7]
-	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6]
-	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5]
-	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4]
-	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3]
-	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2]
-	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1]
-	xr ^= c.p[0]
-	return xr, xl
-}
diff --git a/application/source/vendor/golang.org/x/crypto/blowfish/cipher.go b/application/source/vendor/golang.org/x/crypto/blowfish/cipher.go
deleted file mode 100644
index 213bf204afea5bb048b8e2dd61f68cfeb9ee2afb..0000000000000000000000000000000000000000
--- a/application/source/vendor/golang.org/x/crypto/blowfish/cipher.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm.
-//
-// Blowfish is a legacy cipher and its short block size makes it vulnerable to
-// birthday bound attacks (see https://sweet32.info). It should only be used
-// where compatibility with legacy systems, not security, is the goal.
-//
-// Deprecated: any new system should use AES (from crypto/aes, if necessary in
-// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from
-// golang.org/x/crypto/chacha20poly1305).
-package blowfish // import "golang.org/x/crypto/blowfish"
-
-// The code is a port of Bruce Schneier's C implementation.
-// See https://www.schneier.com/blowfish.html.
-
-import "strconv"
-
-// The Blowfish block size in bytes.
-const BlockSize = 8
-
-// A Cipher is an instance of Blowfish encryption using a particular key.
-type Cipher struct {
-	p              [18]uint32
-	s0, s1, s2, s3 [256]uint32
-}
-
-type KeySizeError int
-
-func (k KeySizeError) Error() string {
-	return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k))
-}
-
-// NewCipher creates and returns a Cipher.
-// The key argument should be the Blowfish key, from 1 to 56 bytes.
-func NewCipher(key []byte) (*Cipher, error) {
-	var result Cipher
-	if k := len(key); k < 1 || k > 56 {
-		return nil, KeySizeError(k)
-	}
-	initCipher(&result)
-	ExpandKey(key, &result)
-	return &result, nil
-}
-
-// NewSaltedCipher creates a returns a Cipher that folds a salt into its key
-// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
-// sufficient and desirable. For bcrypt compatibility, the key can be over 56
-// bytes.
-func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
-	if len(salt) == 0 {
-		return NewCipher(key)
-	}
-	var result Cipher
-	if k := len(key); k < 1 {
-		return nil, KeySizeError(k)
-	}
-	initCipher(&result)
-	expandKeyWithSalt(key, salt, &result)
-	return &result, nil
-}
-
-// BlockSize returns the Blowfish block size, 8 bytes.
-// It is necessary to satisfy the Block interface in the
-// package "crypto/cipher".
-func (c *Cipher) BlockSize() int { return BlockSize }
-
-// Encrypt encrypts the 8-byte buffer src using the key k
-// and stores the result in dst.
-// Note that for amounts of data larger than a block,
-// it is not safe to just call Encrypt on successive blocks;
-// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
-func (c *Cipher) Encrypt(dst, src []byte) {
-	l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
-	r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
-	l, r = encryptBlock(l, r, c)
-	dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
-	dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
-}
-
-// Decrypt decrypts the 8-byte buffer src using the key k
-// and stores the result in dst.
-func (c *Cipher) Decrypt(dst, src []byte) {
-	l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
-	r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
-	l, r = decryptBlock(l, r, c)
-	dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
-	dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
-}
-
-func initCipher(c *Cipher) {
-	copy(c.p[0:], p[0:])
-	copy(c.s0[0:], s0[0:])
-	copy(c.s1[0:], s1[0:])
-	copy(c.s2[0:], s2[0:])
-	copy(c.s3[0:], s3[0:])
-}
diff --git a/application/source/vendor/golang.org/x/crypto/blowfish/const.go b/application/source/vendor/golang.org/x/crypto/blowfish/const.go
deleted file mode 100644
index d04077595abc44b8db297e4945c4fd546e3eb073..0000000000000000000000000000000000000000
--- a/application/source/vendor/golang.org/x/crypto/blowfish/const.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// The startup permutation array and substitution boxes.
-// They are the hexadecimal digits of PI; see:
-// https://www.schneier.com/code/constants.txt.
-
-package blowfish
-
-var s0 = [256]uint32{
-	0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96,
-	0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
-	0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658,
-	0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
-	0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e,
-	0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
-	0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6,
-	0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
-	0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c,
-	0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
-	0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1,
-	0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
-	0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a,
-	0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
-	0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176,
-	0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
-	0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706,
-	0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
-	0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b,
-	0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
-	0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c,
-	0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
-	0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a,
-	0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
-	0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760,
-	0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
-	0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8,
-	0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
-	0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33,
-	0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
-	0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0,
-	0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
-	0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777,
-	0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
-	0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705,
-	0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
-	0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e,
-	0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
-	0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9,
-	0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
-	0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f,
-	0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
-	0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a,
-}
-
-var s1 = [256]uint32{
-	0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d,
-	0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
-	0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65,
-	0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
-	0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9,
-	0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
-	0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d,
-	0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
-	0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc,
-	0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
-	0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908,
-	0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
-	0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124,
-	0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
-	0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908,
-	0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
-	0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b,
-	0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
-	0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa,
-	0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
-	0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d,
-	0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
-	0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5,
-	0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
-	0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96,
-	0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
-	0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca,
-	0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
-	0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77,
-	0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
-	0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054,
-	0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
-	0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea,
-	0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
-	0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646,
-	0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
-	0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea,
-	0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
-	0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e,
-	0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
-	0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd,
-	0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
-	0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7,
-}
-
-var s2 = [256]uint32{
-	0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7,
-	0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
-	0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af,
-	0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
-	0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4,
-	0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
-	0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec,
-	0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
-	0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332,
-	0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
-	0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58,
-	0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
-	0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22,
-	0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
-	0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60,
-	0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
-	0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99,
-	0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
-	0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74,
-	0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
-	0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3,
-	0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
-	0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979,
-	0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
-	0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa,
-	0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
-	0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086,
-	0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
-	0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24,
-	0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
-	0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84,
-	0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
-	0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09,
-	0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
-	0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe,
-	0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
-	0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0,
-	0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
-	0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188,
-	0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
-	0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8,
-	0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
-	0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0,
-}
-
-var s3 = [256]uint32{
-	0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742,
-	0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
-	0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79,
-	0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
-	0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a,
-	0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
-	0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1,
-	0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
-	0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797,
-	0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
-	0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6,
-	0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
-	0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba,
-	0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
-	0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5,
-	0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
-	0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce,
-	0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
-	0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd,
-	0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
-	0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb,
-	0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
-	0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc,
-	0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
-	0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc,
-	0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
-	0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a,
-	0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
-	0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a,
-	0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
-	0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b,
-	0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
-	0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e,
-	0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
-	0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623,
-	0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
-	0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a,
-	0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
-	0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3,
-	0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
-	0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c,
-	0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
-	0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
-}
-
-var p = [18]uint32{
-	0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0,
-	0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
-	0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b,
-}
diff --git a/application/source/vendor/golang.org/x/net/LICENSE b/application/source/vendor/golang.org/x/net/LICENSE
deleted file mode 100644
index 6a66aea5eafe0ca6a688840c47219556c552488e..0000000000000000000000000000000000000000
--- a/application/source/vendor/golang.org/x/net/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-   * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-   * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-   * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/application/source/vendor/golang.org/x/net/PATENTS b/application/source/vendor/golang.org/x/net/PATENTS
deleted file mode 100644
index 733099041f84fa1e58611ab2e11af51c1f26d1d2..0000000000000000000000000000000000000000
--- a/application/source/vendor/golang.org/x/net/PATENTS
+++ /dev/null
@@ -1,22 +0,0 @@
-Additional IP Rights Grant (Patents)
-
-"This implementation" means the copyrightable works distributed by
-Google as part of the Go project.
-
-Google hereby grants to You a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer and otherwise run, modify and propagate the contents of this
-implementation of Go, where such license applies only to those patent
-claims, both currently owned or controlled by Google and acquired in
-the future, licensable by Google that are necessarily infringed by this
-implementation of Go.  This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation.  If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of Go or any code incorporated within this
-implementation of Go constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of Go
-shall terminate as of the date such litigation is filed.
diff --git a/application/source/vendor/golang.org/x/net/html/atom/atom.go b/application/source/vendor/golang.org/x/net/html/atom/atom.go
deleted file mode 100644
index cd0a8ac15451b5d585d965bc178d0b80effb682d..0000000000000000000000000000000000000000
--- a/application/source/vendor/golang.org/x/net/html/atom/atom.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package atom provides integer codes (also known as atoms) for a fixed set of
-// frequently occurring HTML strings: tag names and attribute keys such as "p"
-// and "id".
-//
-// Sharing an atom's name between all elements with the same tag can result in
-// fewer string allocations when tokenizing and parsing HTML. Integer
-// comparisons are also generally faster than string comparisons.
-//
-// The value of an atom's particular code is not guaranteed to stay the same
-// between versions of this package. Neither is any ordering guaranteed:
-// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to
-// be dense. The only guarantees are that e.g. looking up "div" will yield
-// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0.
-package atom // import "golang.org/x/net/html/atom"
-
-// Atom is an integer code for a string. The zero value maps to "".
-type Atom uint32
-
-// String returns the atom's name.
-func (a Atom) String() string {
-	start := uint32(a >> 8)
-	n := uint32(a & 0xff)
-	if start+n > uint32(len(atomText)) {
-		return ""
-	}
-	return atomText[start : start+n]
-}
-
-func (a Atom) string() string {
-	return atomText[a>>8 : a>>8+a&0xff]
-}
-
-// fnv computes the FNV hash with an arbitrary starting value h.
-func fnv(h uint32, s []byte) uint32 {
-	for i := range s {
-		h ^= uint32(s[i])
-		h *= 16777619
-	}
-	return h
-}
-
-func match(s string, t []byte) bool {
-	for i, c := range t {
-		if s[i] != c {
-			return false
-		}
-	}
-	return true
-}
-
-// Lookup returns the atom whose name is s. It returns zero if there is no
-// such atom. The lookup is case sensitive.
-func Lookup(s []byte) Atom {
-	if len(s) == 0 || len(s) > maxAtomLen {
-		return 0
-	}
-	h := fnv(hash0, s)
-	if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
-		return a
-	}
-	if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
-		return a
-	}
-	return 0
-}
-
-// String returns a string whose contents are equal to s. In that sense, it is
-// equivalent to string(s) but may be more efficient.
-func String(s []byte) string {
-	if a := Lookup(s); a != 0 {
-		return a.String()
-	}
-	return string(s)
-}
diff --git a/application/source/vendor/golang.org/x/net/html/atom/table.go b/application/source/vendor/golang.org/x/net/html/atom/table.go
deleted file mode 100644
index 2a938864cb9d3e88b3259c4856412517a39dce5f..0000000000000000000000000000000000000000
--- a/application/source/vendor/golang.org/x/net/html/atom/table.go
+++ /dev/null
@@ -1,783 +0,0 @@
-// Code generated by go generate gen.go; DO NOT EDIT.
-
-//go:generate go run gen.go
-
-package atom
-
-const (
-	A                         Atom = 0x1
-	Abbr                      Atom = 0x4
-	Accept                    Atom = 0x1a06
-	AcceptCharset             Atom = 0x1a0e
-	Accesskey                 Atom = 0x2c09
-	Acronym                   Atom = 0xaa07
-	Action                    Atom = 0x27206
-	Address                   Atom = 0x6f307
-	Align                     Atom = 0xb105
-	Allowfullscreen           Atom = 0x2080f
-	Allowpaymentrequest       Atom = 0xc113
-	Allowusermedia            Atom = 0xdd0e
-	Alt                       Atom = 0xf303
-	Annotation                Atom = 0x1c90a
-	AnnotationXml             Atom = 0x1c90e
-	Applet                    Atom = 0x31906
-	Area                      Atom = 0x35604
-	Article                   Atom = 0x3fc07
-	As                        Atom = 0x3c02
-	Aside                     Atom = 0x10705
-	Async                     Atom = 0xff05
-	Audio                     Atom = 0x11505
-	Autocomplete              Atom = 0x2780c
-	Autofocus                 Atom = 0x12109
-	Autoplay                  Atom = 0x13c08
-	B                         Atom = 0x101
-	Base                      Atom = 0x3b04
-	Basefont                  Atom = 0x3b08
-	Bdi                       Atom = 0xba03
-	Bdo                       Atom = 0x14b03
-	Bgsound                   Atom = 0x15e07
-	Big                       Atom = 0x17003
-	Blink                     Atom = 0x17305
-	Blockquote                Atom = 0x1870a
-	Body                      Atom = 0x2804
-	Br                        Atom = 0x202
-	Button                    Atom = 0x19106
-	Canvas                    Atom = 0x10306
-	Caption                   Atom = 0x23107
-	Center                    Atom = 0x22006
-	Challenge                 Atom = 0x29b09
-	Charset                   Atom = 0x2107
-	Checked                   Atom = 0x47907
-	Cite                      Atom = 0x19c04
-	Class                     Atom = 0x56405
-	Code                      Atom = 0x5c504
-	Col                       Atom = 0x1ab03
-	Colgroup                  Atom = 0x1ab08
-	Color                     Atom = 0x1bf05
-	Cols                      Atom = 0x1c404
-	Colspan                   Atom = 0x1c407
-	Command                   Atom = 0x1d707
-	Content                   Atom = 0x58b07
-	Contenteditable           Atom = 0x58b0f
-	Contextmenu               Atom = 0x3800b
-	Controls                  Atom = 0x1de08
-	Coords                    Atom = 0x1ea06
-	Crossorigin               Atom = 0x1fb0b
-	Data                      Atom = 0x4a504
-	Datalist                  Atom = 0x4a508
-	Datetime                  Atom = 0x2b808
-	Dd                        Atom = 0x2d702
-	Default                   Atom = 0x10a07
-	Defer                     Atom = 0x5c705
-	Del                       Atom = 0x45203
-	Desc                      Atom = 0x56104
-	Details                   Atom = 0x7207
-	Dfn                       Atom = 0x8703
-	Dialog                    Atom = 0xbb06
-	Dir                       Atom = 0x9303
-	Dirname                   Atom = 0x9307
-	Disabled                  Atom = 0x16408
-	Div                       Atom = 0x16b03
-	Dl                        Atom = 0x5e602
-	Download                  Atom = 0x46308
-	Draggable                 Atom = 0x17a09
-	Dropzone                  Atom = 0x40508
-	Dt                        Atom = 0x64b02
-	Em                        Atom = 0x6e02
-	Embed                     Atom = 0x6e05
-	Enctype                   Atom = 0x28d07
-	Face                      Atom = 0x21e04
-	Fieldset                  Atom = 0x22608
-	Figcaption                Atom = 0x22e0a
-	Figure                    Atom = 0x24806
-	Font                      Atom = 0x3f04
-	Footer                    Atom = 0xf606
-	For                       Atom = 0x25403
-	ForeignObject             Atom = 0x2540d
-	Foreignobject             Atom = 0x2610d
-	Form                      Atom = 0x26e04
-	Formaction                Atom = 0x26e0a
-	Formenctype               Atom = 0x2890b
-	Formmethod                Atom = 0x2a40a
-	Formnovalidate            Atom = 0x2ae0e
-	Formtarget                Atom = 0x2c00a
-	Frame                     Atom = 0x8b05
-	Frameset                  Atom = 0x8b08
-	H1                        Atom = 0x15c02
-	H2                        Atom = 0x2de02
-	H3                        Atom = 0x30d02
-	H4                        Atom = 0x34502
-	H5                        Atom = 0x34f02
-	H6                        Atom = 0x64d02
-	Head                      Atom = 0x33104
-	Header                    Atom = 0x33106
-	Headers                   Atom = 0x33107
-	Height                    Atom = 0x5206
-	Hgroup                    Atom = 0x2ca06
-	Hidden                    Atom = 0x2d506
-	High                      Atom = 0x2db04
-	Hr                        Atom = 0x15702
-	Href                      Atom = 0x2e004
-	Hreflang                  Atom = 0x2e008
-	Html                      Atom = 0x5604
-	HttpEquiv                 Atom = 0x2e80a
-	I                         Atom = 0x601
-	Icon                      Atom = 0x58a04
-	Id                        Atom = 0x10902
-	Iframe                    Atom = 0x2fc06
-	Image                     Atom = 0x30205
-	Img                       Atom = 0x30703
-	Input                     Atom = 0x44b05
-	Inputmode                 Atom = 0x44b09
-	Ins                       Atom = 0x20403
-	Integrity                 Atom = 0x23f09
-	Is                        Atom = 0x16502
-	Isindex                   Atom = 0x30f07
-	Ismap                     Atom = 0x31605
-	Itemid                    Atom = 0x38b06
-	Itemprop                  Atom = 0x19d08
-	Itemref                   Atom = 0x3cd07
-	Itemscope                 Atom = 0x67109
-	Itemtype                  Atom = 0x31f08
-	Kbd                       Atom = 0xb903
-	Keygen                    Atom = 0x3206
-	Keytype                   Atom = 0xd607
-	Kind                      Atom = 0x17704
-	Label                     Atom = 0x5905
-	Lang                      Atom = 0x2e404
-	Legend                    Atom = 0x18106
-	Li                        Atom = 0xb202
-	Link                      Atom = 0x17404
-	List                      Atom = 0x4a904
-	Listing                   Atom = 0x4a907
-	Loop                      Atom = 0x5d04
-	Low                       Atom = 0xc303
-	Main                      Atom = 0x1004
-	Malignmark                Atom = 0xb00a
-	Manifest                  Atom = 0x6d708
-	Map                       Atom = 0x31803
-	Mark                      Atom = 0xb604
-	Marquee                   Atom = 0x32707
-	Math                      Atom = 0x32e04
-	Max                       Atom = 0x33d03
-	Maxlength                 Atom = 0x33d09
-	Media                     Atom = 0xe605
-	Mediagroup                Atom = 0xe60a
-	Menu                      Atom = 0x38704
-	Menuitem                  Atom = 0x38708
-	Meta                      Atom = 0x4b804
-	Meter                     Atom = 0x9805
-	Method                    Atom = 0x2a806
-	Mglyph                    Atom = 0x30806
-	Mi                        Atom = 0x34702
-	Min                       Atom = 0x34703
-	Minlength                 Atom = 0x34709
-	Mn                        Atom = 0x2b102
-	Mo                        Atom = 0xa402
-	Ms                        Atom = 0x67402
-	Mtext                     Atom = 0x35105
-	Multiple                  Atom = 0x35f08
-	Muted                     Atom = 0x36705
-	Name                      Atom = 0x9604
-	Nav                       Atom = 0x1303
-	Nobr                      Atom = 0x3704
-	Noembed                   Atom = 0x6c07
-	Noframes                  Atom = 0x8908
-	Nomodule                  Atom = 0xa208
-	Nonce                     Atom = 0x1a605
-	Noscript                  Atom = 0x21608
-	Novalidate                Atom = 0x2b20a
-	Object                    Atom = 0x26806
-	Ol                        Atom = 0x13702
-	Onabort                   Atom = 0x19507
-	Onafterprint              Atom = 0x2360c
-	Onautocomplete            Atom = 0x2760e
-	Onautocompleteerror       Atom = 0x27613
-	Onauxclick                Atom = 0x61f0a
-	Onbeforeprint             Atom = 0x69e0d
-	Onbeforeunload            Atom = 0x6e70e
-	Onblur                    Atom = 0x56d06
-	Oncancel                  Atom = 0x11908
-	Oncanplay                 Atom = 0x14d09
-	Oncanplaythrough          Atom = 0x14d10
-	Onchange                  Atom = 0x41b08
-	Onclick                   Atom = 0x2f507
-	Onclose                   Atom = 0x36c07
-	Oncontextmenu             Atom = 0x37e0d
-	Oncopy                    Atom = 0x39106
-	Oncuechange               Atom = 0x3970b
-	Oncut                     Atom = 0x3a205
-	Ondblclick                Atom = 0x3a70a
-	Ondrag                    Atom = 0x3b106
-	Ondragend                 Atom = 0x3b109
-	Ondragenter               Atom = 0x3ba0b
-	Ondragexit                Atom = 0x3c50a
-	Ondragleave               Atom = 0x3df0b
-	Ondragover                Atom = 0x3ea0a
-	Ondragstart               Atom = 0x3f40b
-	Ondrop                    Atom = 0x40306
-	Ondurationchange          Atom = 0x41310
-	Onemptied                 Atom = 0x40a09
-	Onended                   Atom = 0x42307
-	Onerror                   Atom = 0x42a07
-	Onfocus                   Atom = 0x43107
-	Onhashchange              Atom = 0x43d0c
-	Oninput                   Atom = 0x44907
-	Oninvalid                 Atom = 0x45509
-	Onkeydown                 Atom = 0x45e09
-	Onkeypress                Atom = 0x46b0a
-	Onkeyup                   Atom = 0x48007
-	Onlanguagechange          Atom = 0x48d10
-	Onload                    Atom = 0x49d06
-	Onloadeddata              Atom = 0x49d0c
-	Onloadedmetadata          Atom = 0x4b010
-	Onloadend                 Atom = 0x4c609
-	Onloadstart               Atom = 0x4cf0b
-	Onmessage                 Atom = 0x4da09
-	Onmessageerror            Atom = 0x4da0e
-	Onmousedown               Atom = 0x4e80b
-	Onmouseenter              Atom = 0x4f30c
-	Onmouseleave              Atom = 0x4ff0c
-	Onmousemove               Atom = 0x50b0b
-	Onmouseout                Atom = 0x5160a
-	Onmouseover               Atom = 0x5230b
-	Onmouseup                 Atom = 0x52e09
-	Onmousewheel              Atom = 0x53c0c
-	Onoffline                 Atom = 0x54809
-	Ononline                  Atom = 0x55108
-	Onpagehide                Atom = 0x5590a
-	Onpageshow                Atom = 0x5730a
-	Onpaste                   Atom = 0x57f07
-	Onpause                   Atom = 0x59a07
-	Onplay                    Atom = 0x5a406
-	Onplaying                 Atom = 0x5a409
-	Onpopstate                Atom = 0x5ad0a
-	Onprogress                Atom = 0x5b70a
-	Onratechange              Atom = 0x5cc0c
-	Onrejectionhandled        Atom = 0x5d812
-	Onreset                   Atom = 0x5ea07
-	Onresize                  Atom = 0x5f108
-	Onscroll                  Atom = 0x60008
-	Onsecuritypolicyviolation Atom = 0x60819
-	Onseeked                  Atom = 0x62908
-	Onseeking                 Atom = 0x63109
-	Onselect                  Atom = 0x63a08
-	Onshow                    Atom = 0x64406
-	Onsort                    Atom = 0x64f06
-	Onstalled                 Atom = 0x65909
-	Onstorage                 Atom = 0x66209
-	Onsubmit                  Atom = 0x66b08
-	Onsuspend                 Atom = 0x67b09
-	Ontimeupdate              Atom = 0x400c
-	Ontoggle                  Atom = 0x68408
-	Onunhandledrejection      Atom = 0x68c14
-	Onunload                  Atom = 0x6ab08
-	Onvolumechange            Atom = 0x6b30e
-	Onwaiting                 Atom = 0x6c109
-	Onwheel                   Atom = 0x6ca07
-	Open                      Atom = 0x1a304
-	Optgroup                  Atom = 0x5f08
-	Optimum                   Atom = 0x6d107
-	Option                    Atom = 0x6e306
-	Output                    Atom = 0x51d06
-	P                         Atom = 0xc01
-	Param                     Atom = 0xc05
-	Pattern                   Atom = 0x6607
-	Picture                   Atom = 0x7b07
-	Ping                      Atom = 0xef04
-	Placeholder               Atom = 0x1310b
-	Plaintext                 Atom = 0x1b209
-	Playsinline               Atom = 0x1400b
-	Poster                    Atom = 0x2cf06
-	Pre                       Atom = 0x47003
-	Preload                   Atom = 0x48607
-	Progress                  Atom = 0x5b908
-	Prompt                    Atom = 0x53606
-	Public                    Atom = 0x58606
-	Q                         Atom = 0xcf01
-	Radiogroup                Atom = 0x30a
-	Rb                        Atom = 0x3a02
-	Readonly                  Atom = 0x35708
-	Referrerpolicy            Atom = 0x3d10e
-	Rel                       Atom = 0x48703
-	Required                  Atom = 0x24c08
-	Reversed                  Atom = 0x8008
-	Rows                      Atom = 0x9c04
-	Rowspan                   Atom = 0x9c07
-	Rp                        Atom = 0x23c02
-	Rt                        Atom = 0x19a02
-	Rtc                       Atom = 0x19a03
-	Ruby                      Atom = 0xfb04
-	S                         Atom = 0x2501
-	Samp                      Atom = 0x7804
-	Sandbox                   Atom = 0x12907
-	Scope                     Atom = 0x67505
-	Scoped                    Atom = 0x67506
-	Script                    Atom = 0x21806
-	Seamless                  Atom = 0x37108
-	Section                   Atom = 0x56807
-	Select                    Atom = 0x63c06
-	Selected                  Atom = 0x63c08
-	Shape                     Atom = 0x1e505
-	Size                      Atom = 0x5f504
-	Sizes                     Atom = 0x5f505
-	Slot                      Atom = 0x1ef04
-	Small                     Atom = 0x20605
-	Sortable                  Atom = 0x65108
-	Sorted                    Atom = 0x33706
-	Source                    Atom = 0x37806
-	Spacer                    Atom = 0x43706
-	Span                      Atom = 0x9f04
-	Spellcheck                Atom = 0x4740a
-	Src                       Atom = 0x5c003
-	Srcdoc                    Atom = 0x5c006
-	Srclang                   Atom = 0x5f907
-	Srcset                    Atom = 0x6f906
-	Start                     Atom = 0x3fa05
-	Step                      Atom = 0x58304
-	Strike                    Atom = 0xd206
-	Strong                    Atom = 0x6dd06
-	Style                     Atom = 0x6ff05
-	Sub                       Atom = 0x66d03
-	Summary                   Atom = 0x70407
-	Sup                       Atom = 0x70b03
-	Svg                       Atom = 0x70e03
-	System                    Atom = 0x71106
-	Tabindex                  Atom = 0x4be08
-	Table                     Atom = 0x59505
-	Target                    Atom = 0x2c406
-	Tbody                     Atom = 0x2705
-	Td                        Atom = 0x9202
-	Template                  Atom = 0x71408
-	Textarea                  Atom = 0x35208
-	Tfoot                     Atom = 0xf505
-	Th                        Atom = 0x15602
-	Thead                     Atom = 0x33005
-	Time                      Atom = 0x4204
-	Title                     Atom = 0x11005
-	Tr                        Atom = 0xcc02
-	Track                     Atom = 0x1ba05
-	Translate                 Atom = 0x1f209
-	Tt                        Atom = 0x6802
-	Type                      Atom = 0xd904
-	Typemustmatch             Atom = 0x2900d
-	U                         Atom = 0xb01
-	Ul                        Atom = 0xa702
-	Updateviacache            Atom = 0x460e
-	Usemap                    Atom = 0x59e06
-	Value                     Atom = 0x1505
-	Var                       Atom = 0x16d03
-	Video                     Atom = 0x2f105
-	Wbr                       Atom = 0x57c03
-	Width                     Atom = 0x64905
-	Workertype                Atom = 0x71c0a
-	Wrap                      Atom = 0x72604
-	Xmp                       Atom = 0x12f03
-)
-
-const hash0 = 0x81cdf10e
-
-const maxAtomLen = 25
-
-var table = [1 << 9]Atom{
-	0x1:   0xe60a,  // mediagroup
-	0x2:   0x2e404, // lang
-	0x4:   0x2c09,  // accesskey
-	0x5:   0x8b08,  // frameset
-	0x7:   0x63a08, // onselect
-	0x8:   0x71106, // system
-	0xa:   0x64905, // width
-	0xc:   0x2890b, // formenctype
-	0xd:   0x13702, // ol
-	0xe:   0x3970b, // oncuechange
-	0x10:  0x14b03, // bdo
-	0x11:  0x11505, // audio
-	0x12:  0x17a09, // draggable
-	0x14:  0x2f105, // video
-	0x15:  0x2b102, // mn
-	0x16:  0x38704, // menu
-	0x17:  0x2cf06, // poster
-	0x19:  0xf606,  // footer
-	0x1a:  0x2a806, // method
-	0x1b:  0x2b808, // datetime
-	0x1c:  0x19507, // onabort
-	0x1d:  0x460e,  // updateviacache
-	0x1e:  0xff05,  // async
-	0x1f:  0x49d06, // onload
-	0x21:  0x11908, // oncancel
-	0x22:  0x62908, // onseeked
-	0x23:  0x30205, // image
-	0x24:  0x5d812, // onrejectionhandled
-	0x26:  0x17404, // link
-	0x27:  0x51d06, // output
-	0x28:  0x33104, // head
-	0x29:  0x4ff0c, // onmouseleave
-	0x2a:  0x57f07, // onpaste
-	0x2b:  0x5a409, // onplaying
-	0x2c:  0x1c407, // colspan
-	0x2f:  0x1bf05, // color
-	0x30:  0x5f504, // size
-	0x31:  0x2e80a, // http-equiv
-	0x33:  0x601,   // i
-	0x34:  0x5590a, // onpagehide
-	0x35:  0x68c14, // onunhandledrejection
-	0x37:  0x42a07, // onerror
-	0x3a:  0x3b08,  // basefont
-	0x3f:  0x1303,  // nav
-	0x40:  0x17704, // kind
-	0x41:  0x35708, // readonly
-	0x42:  0x30806, // mglyph
-	0x44:  0xb202,  // li
-	0x46:  0x2d506, // hidden
-	0x47:  0x70e03, // svg
-	0x48:  0x58304, // step
-	0x49:  0x23f09, // integrity
-	0x4a:  0x58606, // public
-	0x4c:  0x1ab03, // col
-	0x4d:  0x1870a, // blockquote
-	0x4e:  0x34f02, // h5
-	0x50:  0x5b908, // progress
-	0x51:  0x5f505, // sizes
-	0x52:  0x34502, // h4
-	0x56:  0x33005, // thead
-	0x57:  0xd607,  // keytype
-	0x58:  0x5b70a, // onprogress
-	0x59:  0x44b09, // inputmode
-	0x5a:  0x3b109, // ondragend
-	0x5d:  0x3a205, // oncut
-	0x5e:  0x43706, // spacer
-	0x5f:  0x1ab08, // colgroup
-	0x62:  0x16502, // is
-	0x65:  0x3c02,  // as
-	0x66:  0x54809, // onoffline
-	0x67:  0x33706, // sorted
-	0x69:  0x48d10, // onlanguagechange
-	0x6c:  0x43d0c, // onhashchange
-	0x6d:  0x9604,  // name
-	0x6e:  0xf505,  // tfoot
-	0x6f:  0x56104, // desc
-	0x70:  0x33d03, // max
-	0x72:  0x1ea06, // coords
-	0x73:  0x30d02, // h3
-	0x74:  0x6e70e, // onbeforeunload
-	0x75:  0x9c04,  // rows
-	0x76:  0x63c06, // select
-	0x77:  0x9805,  // meter
-	0x78:  0x38b06, // itemid
-	0x79:  0x53c0c, // onmousewheel
-	0x7a:  0x5c006, // srcdoc
-	0x7d:  0x1ba05, // track
-	0x7f:  0x31f08, // itemtype
-	0x82:  0xa402,  // mo
-	0x83:  0x41b08, // onchange
-	0x84:  0x33107, // headers
-	0x85:  0x5cc0c, // onratechange
-	0x86:  0x60819, // onsecuritypolicyviolation
-	0x88:  0x4a508, // datalist
-	0x89:  0x4e80b, // onmousedown
-	0x8a:  0x1ef04, // slot
-	0x8b:  0x4b010, // onloadedmetadata
-	0x8c:  0x1a06,  // accept
-	0x8d:  0x26806, // object
-	0x91:  0x6b30e, // onvolumechange
-	0x92:  0x2107,  // charset
-	0x93:  0x27613, // onautocompleteerror
-	0x94:  0xc113,  // allowpaymentrequest
-	0x95:  0x2804,  // body
-	0x96:  0x10a07, // default
-	0x97:  0x63c08, // selected
-	0x98:  0x21e04, // face
-	0x99:  0x1e505, // shape
-	0x9b:  0x68408, // ontoggle
-	0x9e:  0x64b02, // dt
-	0x9f:  0xb604,  // mark
-	0xa1:  0xb01,   // u
-	0xa4:  0x6ab08, // onunload
-	0xa5:  0x5d04,  // loop
-	0xa6:  0x16408, // disabled
-	0xaa:  0x42307, // onended
-	0xab:  0xb00a,  // malignmark
-	0xad:  0x67b09, // onsuspend
-	0xae:  0x35105, // mtext
-	0xaf:  0x64f06, // onsort
-	0xb0:  0x19d08, // itemprop
-	0xb3:  0x67109, // itemscope
-	0xb4:  0x17305, // blink
-	0xb6:  0x3b106, // ondrag
-	0xb7:  0xa702,  // ul
-	0xb8:  0x26e04, // form
-	0xb9:  0x12907, // sandbox
-	0xba:  0x8b05,  // frame
-	0xbb:  0x1505,  // value
-	0xbc:  0x66209, // onstorage
-	0xbf:  0xaa07,  // acronym
-	0xc0:  0x19a02, // rt
-	0xc2:  0x202,   // br
-	0xc3:  0x22608, // fieldset
-	0xc4:  0x2900d, // typemustmatch
-	0xc5:  0xa208,  // nomodule
-	0xc6:  0x6c07,  // noembed
-	0xc7:  0x69e0d, // onbeforeprint
-	0xc8:  0x19106, // button
-	0xc9:  0x2f507, // onclick
-	0xca:  0x70407, // summary
-	0xcd:  0xfb04,  // ruby
-	0xce:  0x56405, // class
-	0xcf:  0x3f40b, // ondragstart
-	0xd0:  0x23107, // caption
-	0xd4:  0xdd0e,  // allowusermedia
-	0xd5:  0x4cf0b, // onloadstart
-	0xd9:  0x16b03, // div
-	0xda:  0x4a904, // list
-	0xdb:  0x32e04, // math
-	0xdc:  0x44b05, // input
-	0xdf:  0x3ea0a, // ondragover
-	0xe0:  0x2de02, // h2
-	0xe2:  0x1b209, // plaintext
-	0xe4:  0x4f30c, // onmouseenter
-	0xe7:  0x47907, // checked
-	0xe8:  0x47003, // pre
-	0xea:  0x35f08, // multiple
-	0xeb:  0xba03,  // bdi
-	0xec:  0x33d09, // maxlength
-	0xed:  0xcf01,  // q
-	0xee:  0x61f0a, // onauxclick
-	0xf0:  0x57c03, // wbr
-	0xf2:  0x3b04,  // base
-	0xf3:  0x6e306, // option
-	0xf5:  0x41310, // ondurationchange
-	0xf7:  0x8908,  // noframes
-	0xf9:  0x40508, // dropzone
-	0xfb:  0x67505, // scope
-	0xfc:  0x8008,  // reversed
-	0xfd:  0x3ba0b, // ondragenter
-	0xfe:  0x3fa05, // start
-	0xff:  0x12f03, // xmp
-	0x100: 0x5f907, // srclang
-	0x101: 0x30703, // img
-	0x104: 0x101,   // b
-	0x105: 0x25403, // for
-	0x106: 0x10705, // aside
-	0x107: 0x44907, // oninput
-	0x108: 0x35604, // area
-	0x109: 0x2a40a, // formmethod
-	0x10a: 0x72604, // wrap
-	0x10c: 0x23c02, // rp
-	0x10d: 0x46b0a, // onkeypress
-	0x10e: 0x6802,  // tt
-	0x110: 0x34702, // mi
-	0x111: 0x36705, // muted
-	0x112: 0xf303,  // alt
-	0x113: 0x5c504, // code
-	0x114: 0x6e02,  // em
-	0x115: 0x3c50a, // ondragexit
-	0x117: 0x9f04,  // span
-	0x119: 0x6d708, // manifest
-	0x11a: 0x38708, // menuitem
-	0x11b: 0x58b07, // content
-	0x11d: 0x6c109, // onwaiting
-	0x11f: 0x4c609, // onloadend
-	0x121: 0x37e0d, // oncontextmenu
-	0x123: 0x56d06, // onblur
-	0x124: 0x3fc07, // article
-	0x125: 0x9303,  // dir
-	0x126: 0xef04,  // ping
-	0x127: 0x24c08, // required
-	0x128: 0x45509, // oninvalid
-	0x129: 0xb105,  // align
-	0x12b: 0x58a04, // icon
-	0x12c: 0x64d02, // h6
-	0x12d: 0x1c404, // cols
-	0x12e: 0x22e0a, // figcaption
-	0x12f: 0x45e09, // onkeydown
-	0x130: 0x66b08, // onsubmit
-	0x131: 0x14d09, // oncanplay
-	0x132: 0x70b03, // sup
-	0x133: 0xc01,   // p
-	0x135: 0x40a09, // onemptied
-	0x136: 0x39106, // oncopy
-	0x137: 0x19c04, // cite
-	0x138: 0x3a70a, // ondblclick
-	0x13a: 0x50b0b, // onmousemove
-	0x13c: 0x66d03, // sub
-	0x13d: 0x48703, // rel
-	0x13e: 0x5f08,  // optgroup
-	0x142: 0x9c07,  // rowspan
-	0x143: 0x37806, // source
-	0x144: 0x21608, // noscript
-	0x145: 0x1a304, // open
-	0x146: 0x20403, // ins
-	0x147: 0x2540d, // foreignObject
-	0x148: 0x5ad0a, // onpopstate
-	0x14a: 0x28d07, // enctype
-	0x14b: 0x2760e, // onautocomplete
-	0x14c: 0x35208, // textarea
-	0x14e: 0x2780c, // autocomplete
-	0x14f: 0x15702, // hr
-	0x150: 0x1de08, // controls
-	0x151: 0x10902, // id
-	0x153: 0x2360c, // onafterprint
-	0x155: 0x2610d, // foreignobject
-	0x156: 0x32707, // marquee
-	0x157: 0x59a07, // onpause
-	0x158: 0x5e602, // dl
-	0x159: 0x5206,  // height
-	0x15a: 0x34703, // min
-	0x15b: 0x9307,  // dirname
-	0x15c: 0x1f209, // translate
-	0x15d: 0x5604,  // html
-	0x15e: 0x34709, // minlength
-	0x15f: 0x48607, // preload
-	0x160: 0x71408, // template
-	0x161: 0x3df0b, // ondragleave
-	0x162: 0x3a02,  // rb
-	0x164: 0x5c003, // src
-	0x165: 0x6dd06, // strong
-	0x167: 0x7804,  // samp
-	0x168: 0x6f307, // address
-	0x169: 0x55108, // ononline
-	0x16b: 0x1310b, // placeholder
-	0x16c: 0x2c406, // target
-	0x16d: 0x20605, // small
-	0x16e: 0x6ca07, // onwheel
-	0x16f: 0x1c90a, // annotation
-	0x170: 0x4740a, // spellcheck
-	0x171: 0x7207,  // details
-	0x172: 0x10306, // canvas
-	0x173: 0x12109, // autofocus
-	0x174: 0xc05,   // param
-	0x176: 0x46308, // download
-	0x177: 0x45203, // del
-	0x178: 0x36c07, // onclose
-	0x179: 0xb903,  // kbd
-	0x17a: 0x31906, // applet
-	0x17b: 0x2e004, // href
-	0x17c: 0x5f108, // onresize
-	0x17e: 0x49d0c, // onloadeddata
-	0x180: 0xcc02,  // tr
-	0x181: 0x2c00a, // formtarget
-	0x182: 0x11005, // title
-	0x183: 0x6ff05, // style
-	0x184: 0xd206,  // strike
-	0x185: 0x59e06, // usemap
-	0x186: 0x2fc06, // iframe
-	0x187: 0x1004,  // main
-	0x189: 0x7b07,  // picture
-	0x18c: 0x31605, // ismap
-	0x18e: 0x4a504, // data
-	0x18f: 0x5905,  // label
-	0x191: 0x3d10e, // referrerpolicy
-	0x192: 0x15602, // th
-	0x194: 0x53606, // prompt
-	0x195: 0x56807, // section
-	0x197: 0x6d107, // optimum
-	0x198: 0x2db04, // high
-	0x199: 0x15c02, // h1
-	0x19a: 0x65909, // onstalled
-	0x19b: 0x16d03, // var
-	0x19c: 0x4204,  // time
-	0x19e: 0x67402, // ms
-	0x19f: 0x33106, // header
-	0x1a0: 0x4da09, // onmessage
-	0x1a1: 0x1a605, // nonce
-	0x1a2: 0x26e0a, // formaction
-	0x1a3: 0x22006, // center
-	0x1a4: 0x3704,  // nobr
-	0x1a5: 0x59505, // table
-	0x1a6: 0x4a907, // listing
-	0x1a7: 0x18106, // legend
-	0x1a9: 0x29b09, // challenge
-	0x1aa: 0x24806, // figure
-	0x1ab: 0xe605,  // media
-	0x1ae: 0xd904,  // type
-	0x1af: 0x3f04,  // font
-	0x1b0: 0x4da0e, // onmessageerror
-	0x1b1: 0x37108, // seamless
-	0x1b2: 0x8703,  // dfn
-	0x1b3: 0x5c705, // defer
-	0x1b4: 0xc303,  // low
-	0x1b5: 0x19a03, // rtc
-	0x1b6: 0x5230b, // onmouseover
-	0x1b7: 0x2b20a, // novalidate
-	0x1b8: 0x71c0a, // workertype
-	0x1ba: 0x3cd07, // itemref
-	0x1bd: 0x1,     // a
-	0x1be: 0x31803, // map
-	0x1bf: 0x400c,  // ontimeupdate
-	0x1c0: 0x15e07, // bgsound
-	0x1c1: 0x3206,  // keygen
-	0x1c2: 0x2705,  // tbody
-	0x1c5: 0x64406, // onshow
-	0x1c7: 0x2501,  // s
-	0x1c8: 0x6607,  // pattern
-	0x1cc: 0x14d10, // oncanplaythrough
-	0x1ce: 0x2d702, // dd
-	0x1cf: 0x6f906, // srcset
-	0x1d0: 0x17003, // big
-	0x1d2: 0x65108, // sortable
-	0x1d3: 0x48007, // onkeyup
-	0x1d5: 0x5a406, // onplay
-	0x1d7: 0x4b804, // meta
-	0x1d8: 0x40306, // ondrop
-	0x1da: 0x60008, // onscroll
-	0x1db: 0x1fb0b, // crossorigin
-	0x1dc: 0x5730a, // onpageshow
-	0x1dd: 0x4,     // abbr
-	0x1de: 0x9202,  // td
-	0x1df: 0x58b0f, // contenteditable
-	0x1e0: 0x27206, // action
-	0x1e1: 0x1400b, // playsinline
-	0x1e2: 0x43107, // onfocus
-	0x1e3: 0x2e008, // hreflang
-	0x1e5: 0x5160a, // onmouseout
-	0x1e6: 0x5ea07, // onreset
-	0x1e7: 0x13c08, // autoplay
-	0x1e8: 0x63109, // onseeking
-	0x1ea: 0x67506, // scoped
-	0x1ec: 0x30a,   // radiogroup
-	0x1ee: 0x3800b, // contextmenu
-	0x1ef: 0x52e09, // onmouseup
-	0x1f1: 0x2ca06, // hgroup
-	0x1f2: 0x2080f, // allowfullscreen
-	0x1f3: 0x4be08, // tabindex
-	0x1f6: 0x30f07, // isindex
-	0x1f7: 0x1a0e,  // accept-charset
-	0x1f8: 0x2ae0e, // formnovalidate
-	0x1fb: 0x1c90e, // annotation-xml
-	0x1fc: 0x6e05,  // embed
-	0x1fd: 0x21806, // script
-	0x1fe: 0xbb06,  // dialog
-	0x1ff: 0x1d707, // command
-}
-
-const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" +
-	"asefontimeupdateviacacheightmlabelooptgroupatternoembedetail" +
-	"sampictureversedfnoframesetdirnameterowspanomoduleacronymali" +
-	"gnmarkbdialogallowpaymentrequestrikeytypeallowusermediagroup" +
-	"ingaltfooterubyasyncanvasidefaultitleaudioncancelautofocusan" +
-	"dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" +
-	"bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" +
-	"penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" +
-	"ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" +
-	"ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" +
-	"ignObjectforeignobjectformactionautocompleteerrorformenctype" +
-	"mustmatchallengeformmethodformnovalidatetimeformtargethgroup" +
-	"osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" +
-	"ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" +
-	"inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" +
-	"extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" +
-	"enterondragexitemreferrerpolicyondragleaveondragoverondragst" +
-	"articleondropzonemptiedondurationchangeonendedonerroronfocus" +
-	"paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" +
-	"spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" +
-	"onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" +
-	"usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" +
-	"seoveronmouseupromptonmousewheelonofflineononlineonpagehides" +
-	"classectionbluronpageshowbronpastepublicontenteditableonpaus" +
-	"emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" +
-	"jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" +
-	"violationauxclickonseekedonseekingonselectedonshowidth6onsor" +
-	"tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" +
-	"handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" +
-	"wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" +
-	"arysupsvgsystemplateworkertypewrap"
diff --git a/application/source/vendor/golang.org/x/net/html/const.go b/application/source/vendor/golang.org/x/net/html/const.go
deleted file mode 100644
index ff7acf2d5b4b2d720b92b5dfc51b05894a36af31..0000000000000000000000000000000000000000
--- a/application/source/vendor/golang.org/x/net/html/const.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-// Section 12.2.4.2 of the HTML5 specification says "The following elements
-// have varying levels of special parsing rules".
-// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements
-var isSpecialElementMap = map[string]bool{
-	"address":    true,
-	"applet":     true,
-	"area":       true,
-	"article":    true,
-	"aside":      true,
-	"base":       true,
-	"basefont":   true,
-	"bgsound":    true,
-	"blockquote": true,
-	"body":       true,
-	"br":         true,
-	"button":     true,
-	"caption":    true,
-	"center":     true,
-	"col":        true,
-	"colgroup":   true,
-	"dd":         true,
-	"details":    true,
-	"dir":        true,
-	"div":        true,
-	"dl":         true,
-	"dt":         true,
-	"embed":      true,
-	"fieldset":   true,
-	"figcaption": true,
-	"figure":     true,
-	"footer":     true,
-	"form":       true,
-	"frame":      true,
-	"frameset":   true,
-	"h1":         true,
-	"h2":         true,
-	"h3":         true,
-	"h4":         true,
-	"h5":         true,
-	"h6":         true,
-	"head":       true,
-	"header":     true,
-	"hgroup":     true,
-	"hr":         true,
-	"html":       true,
-	"iframe":     true,
-	"img":        true,
-	"input":      true,
-	"keygen":     true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility.
-	"li":         true,
-	"link":       true,
-	"listing":    true,
-	"main":       true,
-	"marquee":    true,
-	"menu":       true,
-	"meta":       true,
-	"nav":        true,
-	"noembed":    true,
-	"noframes":   true,
-	"noscript":   true,
-	"object":     true,
-	"ol":         true,
-	"p":          true,
-	"param":      true,
-	"plaintext":  true,
-	"pre":        true,
-	"script":     true,
-	"section":    true,
-	"select":     true,
-	"source":     true,
-	"style":      true,
-	"summary":    true,
-	"table":      true,
-	"tbody":      true,
-	"td":         true,
-	"template":   true,
-	"textarea":   true,
-	"tfoot":      true,
-	"th":         true,
-	"thead":      true,
-	"title":      true,
-	"tr":         true,
-	"track":      true,
-	"ul":         true,
-	"wbr":        true,
-	"xmp":        true,
-}
-
-func isSpecialElement(element *Node) bool {
-	switch element.Namespace {
-	case "", "html":
-		return isSpecialElementMap[element.Data]
-	case "math":
-		switch element.Data {
-		case "mi", "mo", "mn", "ms", "mtext", "annotation-xml":
-			return true
-		}
-	case "svg":
-		switch element.Data {
-		case "foreignObject", "desc", "title":
-			return true
-		}
-	}
-	return false
-}
diff --git a/application/source/vendor/golang.org/x/net/html/doc.go b/application/source/vendor/golang.org/x/net/html/doc.go
deleted file mode 100644
index 2466ae3d9a5d87af422d0e672a46eb8e5f389e3c..0000000000000000000000000000000000000000
--- a/application/source/vendor/golang.org/x/net/html/doc.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package html implements an HTML5-compliant tokenizer and parser.
-
-Tokenization is done by creating a Tokenizer for an io.Reader r. It is the
-caller's responsibility to ensure that r provides UTF-8 encoded HTML.
-
-	z := html.NewTokenizer(r)
-
-Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(),
-which parses the next token and returns its type, or an error:
-
-	for {
-		tt := z.Next()
-		if tt == html.ErrorToken {
-			// ...
-			return ...
-		}
-		// Process the current token.
-	}
-
-There are two APIs for retrieving the current token. The high-level API is to
-call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs
-allow optionally calling Raw after Next but before Token, Text, TagName, or
-TagAttr. In EBNF notation, the valid call sequence per token is:
-
-	Next {Raw} [ Token | Text | TagName {TagAttr} ]
-
-Token returns an independent data structure that completely describes a token.
-Entities (such as "&lt;") are unescaped, tag names and attribute keys are
-lower-cased, and attributes are collected into a []Attribute. For example:
-
-	for {
-		if z.Next() == html.ErrorToken {
-			// Returning io.EOF indicates success.
-			return z.Err()
-		}
-		emitToken(z.Token())
-	}
-
-The low-level API performs fewer allocations and copies, but the contents of
-the []byte values returned by Text, TagName and TagAttr may change on the next
-call to Next. For example, to extract an HTML page's anchor text:
-
-	depth := 0
-	for {
-		tt := z.Next()
-		switch tt {
-		case html.ErrorToken:
-			return z.Err()
-		case html.TextToken:
-			if depth > 0 {
-				// emitBytes should copy the []byte it receives,
-				// if it doesn't process it immediately.
-				emitBytes(z.Text())
-			}
-		case html.StartTagToken, html.EndTagToken:
-			tn, _ := z.TagName()
-			if len(tn) == 1 && tn[0] == 'a' {
-				if tt == html.StartTagToken {
-					depth++
-				} else {
-					depth--
-				}
-			}
-		}
-	}
-
-Parsing is done by calling Parse with an io.Reader, which returns the root of
-the parse tree (the document element) as a *Node. It is the caller's
-responsibility to ensure that the Reader provides UTF-8 encoded HTML. For
-example, to process each anchor node in depth-first order:
-
-	doc, err := html.Parse(r)
-	if err != nil {
-		// ...
-	}
-	var f func(*html.Node)
-	f = func(n *html.Node) {
-		if n.Type == html.ElementNode && n.Data == "a" {
-			// Do something with n...
-		}
-		for c := n.FirstChild; c != nil; c = c.NextSibling {
-			f(c)
-		}
-	}
-	f(doc)
-
-The relevant specifications include:
-https://html.spec.whatwg.org/multipage/syntax.html and
-https://html.spec.whatwg.org/multipage/syntax.html#tokenization
-
-# Security Considerations
-
-Care should be taken when parsing and interpreting HTML, whether full documents
-or fragments, within the framework of the HTML specification, especially with
-regard to untrusted inputs.
-
-This package provides both a tokenizer and a parser, which implement the
-tokenization, and tokenization and tree construction stages of the WHATWG HTML
-parsing specification respectively. While the tokenizer parses and normalizes
-individual HTML tokens, only the parser constructs the DOM tree from the
-tokenized HTML, as described in the tree construction stage of the
-specification, dynamically modifying or extending the docuemnt's DOM tree.
-
-If your use case requires semantically well-formed HTML documents, as defined by
-the WHATWG specification, the parser should be used rather than the tokenizer.
-
-In security contexts, if trust decisions are being made using the tokenized or
-parsed content, the input must be re-serialized (for instance by using Render or
-Token.String) in order for those trust decisions to hold, as the process of
-tokenization or parsing may alter the content.
-*/
-package html // import "golang.org/x/net/html"
-
-// The tokenization algorithm implemented by this package is not a line-by-line
-// transliteration of the relatively verbose state-machine in the WHATWG
-// specification. A more direct approach is used instead, where the program
-// counter implies the state, such as whether it is tokenizing a tag or a text
-// node. Specification compliance is verified by checking expected and actual
-// outputs over a test suite rather than aiming for algorithmic fidelity.
-
-// TODO(nigeltao): Does a DOM API belong in this package or a separate one?
-// TODO(nigeltao): How does parsing interact with a JavaScript engine?
diff --git a/application/source/vendor/golang.org/x/net/html/doctype.go b/application/source/vendor/golang.org/x/net/html/doctype.go
deleted file mode 100644
index c484e5a94fbf0a38b9c1789356f9f152ccaec4d2..0000000000000000000000000000000000000000
--- a/application/source/vendor/golang.org/x/net/html/doctype.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
-	"strings"
-)
-
-// parseDoctype parses the data from a DoctypeToken into a name,
-// public identifier, and system identifier. It returns a Node whose Type
-// is DoctypeNode, whose Data is the name, and which has attributes
-// named "system" and "public" for the two identifiers if they were present.
-// quirks is whether the document should be parsed in "quirks mode".
-func parseDoctype(s string) (n *Node, quirks bool) {
-	n = &Node{Type: DoctypeNode}
-
-	// Find the name.
-	space := strings.IndexAny(s, whitespace)
-	if space == -1 {
-		space = len(s)
-	}
-	n.Data = s[:space]
-	// The comparison to "html" is case-sensitive.
-	if n.Data != "html" {
-		quirks = true
-	}
-	n.Data = strings.ToLower(n.Data)
-	s = strings.TrimLeft(s[space:], whitespace)
-
-	if len(s) < 6 {
-		// It can't start with "PUBLIC" or "SYSTEM".
-		// Ignore the rest of the string.
-		return n, quirks || s != ""
-	}
-
-	key := strings.ToLower(s[:6])
-	s = s[6:]
-	for key == "public" || key == "system" {
-		s = strings.TrimLeft(s, whitespace)
-		if s == "" {
-			break
-		}
-		quote := s[0]
-		if quote != '"' && quote != '\'' {
-			break
-		}
-		s = s[1:]
-		q := strings.IndexRune(s, rune(quote))
-		var id string
-		if q == -1 {
-			id = s
-			s = ""
-		} else {
-			id = s[:q]
-			s = s[q+1:]
-		}
-		n.Attr = append(n.Attr, Attribute{Key: key, Val: id})
-		if key == "public" {
-			key = "system"
-		} else {
-			key = ""
-		}
-	}
-
-	if key != "" || s != "" {
-		quirks = true
-	} else if len(n.Attr) > 0 {
-		if n.Attr[0].Key == "public" {
-			public := strings.ToLower(n.Attr[0].Val)
-			switch public {
-			case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html":
-				quirks = true
-			default:
-				for _, q := range quirkyIDs {
-					if strings.HasPrefix(public, q) {
-						quirks = true
-						break
-					}
-				}
-			}
-			// The following two public IDs only cause quirks mode if there is no system ID.
-			if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") ||
-				strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) {
-				quirks = true
-			}
-		}
-		if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" &&
-			strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" {
-			quirks = true
-		}
-	}
-
-	return n, quirks
-}
-
-// quirkyIDs is a list of public doctype identifiers that cause a document
-// to be interpreted in quirks mode. The identifiers should be in lower case.
-var quirkyIDs = []string{
-	"+//silmaril//dtd html pro v0r11 19970101//",
-	"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
-	"-//as//dtd html 3.0 aswedit + extensions//",
-	"-//ietf//dtd html 2.0 level 1//",
-	"-//ietf//dtd html 2.0 level 2//",
-	"-//ietf//dtd html 2.0 strict level 1//",
-	"-//ietf//dtd html 2.0 strict level 2//",
-	"-//ietf//dtd html 2.0 strict//",
-	"-//ietf//dtd html 2.0//",
-	"-//ietf//dtd html 2.1e//",
-	"-//ietf//dtd html 3.0//",
-	"-//ietf//dtd html 3.2 final//",
-	"-//ietf//dtd html 3.2//",
-	"-//ietf//dtd html 3//",
-	"-//ietf//dtd html level 0//",
-	"-//ietf//dtd html level 1//",
-	"-//ietf//dtd html level 2//",
-	"-//ietf//dtd html level 3//",
-	"-//ietf//dtd html strict level 0//",
-	"-//ietf//dtd html strict level 1//",
-	"-//ietf//dtd html strict level 2//",
-	"-//ietf//dtd html strict level 3//",
-	"-//ietf//dtd html strict//",
-	"-//ietf//dtd html//",
-	"-//metrius//dtd metrius presentational//",
-	"-//microsoft//dtd internet explorer 2.0 html strict//",
-	"-//microsoft//dtd internet explorer 2.0 html//",
-	"-//microsoft//dtd internet explorer 2.0 tables//",
-	"-//microsoft//dtd internet explorer 3.0 html strict//",
-	"-//microsoft//dtd internet explorer 3.0 html//",
-	"-//microsoft//dtd internet explorer 3.0 tables//",
-	"-//netscape comm. corp.//dtd html//",
-	"-//netscape comm. corp.//dtd strict html//",
-	"-//o'reilly and associates//dtd html 2.0//",
-	"-//o'reilly and associates//dtd html extended 1.0//",
-	"-//o'reilly and associates//dtd html extended relaxed 1.0//",
-	"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
-	"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
-	"-//spyglass//dtd html 2.0 extended//",
-	"-//sq//dtd html 2.0 hotmetal + extensions//",
-	"-//sun microsystems corp.//dtd hotjava html//",
-	"-//sun microsystems corp.//dtd hotjava strict html//",
-	"-//w3c//dtd html 3 1995-03-24//",
-	"-//w3c//dtd html 3.2 draft//",
-	"-//w3c//dtd html 3.2 final//",
-	"-//w3c//dtd html 3.2//",
-	"-//w3c//dtd html 3.2s draft//",
-	"-//w3c//dtd html 4.0 frameset//",
-	"-//w3c//dtd html 4.0 transitional//",
-	"-//w3c//dtd html experimental 19960712//",
-	"-//w3c//dtd html experimental 970421//",
-	"-//w3c//dtd w3 html//",
-	"-//w3o//dtd w3 html 3.0//",
-	"-//webtechs//dtd mozilla html 2.0//",
-	"-//webtechs//dtd mozilla html//",
-}
diff --git a/application/source/vendor/golang.org/x/net/html/entity.go b/application/source/vendor/golang.org/x/net/html/entity.go
deleted file mode 100644
index b628880a014d865f22c5b2c15a2aaf63aa340ed7..0000000000000000000000000000000000000000
--- a/application/source/vendor/golang.org/x/net/html/entity.go
+++ /dev/null
@@ -1,2253 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-// All entities that do not end with ';' are 6 or fewer bytes long.
-const longestEntityWithoutSemicolon = 6
-
-// entity is a map from HTML entity names to their values. The semicolon matters:
-// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references
-// lists both "amp" and "amp;" as two separate entries.
-//
-// Note that the HTML5 list is larger than the HTML4 list at
-// http://www.w3.org/TR/html4/sgml/entities.html
-var entity = map[string]rune{
-	"AElig;":                           '\U000000C6',
-	"AMP;":                             '\U00000026',
-	"Aacute;":                          '\U000000C1',
-	"Abreve;":                          '\U00000102',
-	"Acirc;":                           '\U000000C2',
-	"Acy;":                             '\U00000410',
-	"Afr;":                             '\U0001D504',
-	"Agrave;":                          '\U000000C0',
-	"Alpha;":                           '\U00000391',
-	"Amacr;":                           '\U00000100',
-	"And;":                             '\U00002A53',
-	"Aogon;":                           '\U00000104',
-	"Aopf;":                            '\U0001D538',
-	"ApplyFunction;":                   '\U00002061',
-	"Aring;":                           '\U000000C5',
-	"Ascr;":                            '\U0001D49C',
-	"Assign;":                          '\U00002254',
-	"Atilde;":                          '\U000000C3',
-	"Auml;":                            '\U000000C4',
-	"Backslash;":                       '\U00002216',
-	"Barv;":                            '\U00002AE7',
-	"Barwed;":                          '\U00002306',
-	"Bcy;":                             '\U00000411',
-	"Because;":                         '\U00002235',
-	"Bernoullis;":                      '\U0000212C',
-	"Beta;":                            '\U00000392',
-	"Bfr;":                             '\U0001D505',
-	"Bopf;":                            '\U0001D539',
-	"Breve;":                           '\U000002D8',
-	"Bscr;":                            '\U0000212C',
-	"Bumpeq;":                          '\U0000224E',
-	"CHcy;":                            '\U00000427',
-	"COPY;":                            '\U000000A9',
-	"Cacute;":                          '\U00000106',
-	"Cap;":                             '\U000022D2',
-	"CapitalDifferentialD;":            '\U00002145',
-	"Cayleys;":                         '\U0000212D',
-	"Ccaron;":                          '\U0000010C',
-	"Ccedil;":                          '\U000000C7',
-	"Ccirc;":                           '\U00000108',
-	"Cconint;":                         '\U00002230',
-	"Cdot;":                            '\U0000010A',
-	"Cedilla;":                         '\U000000B8',
-	"CenterDot;":                       '\U000000B7',
-	"Cfr;":                             '\U0000212D',
-	"Chi;":                             '\U000003A7',
-	"CircleDot;":                       '\U00002299',
-	"CircleMinus;":                     '\U00002296',
-	"CirclePlus;":                      '\U00002295',
-	"CircleTimes;":                     '\U00002297',
-	"ClockwiseContourIntegral;":        '\U00002232',
-	"CloseCurlyDoubleQuote;":           '\U0000201D',
-	"CloseCurlyQuote;":                 '\U00002019',
-	"Colon;":                           '\U00002237',
-	"Colone;":                          '\U00002A74',
-	"Congruent;":                       '\U00002261',
-	"Conint;":                          '\U0000222F',
-	"ContourIntegral;":                 '\U0000222E',
-	"Copf;":                            '\U00002102',
-	"Coproduct;":                       '\U00002210',
-	"CounterClockwiseContourIntegral;": '\U00002233',
-	"Cross;":                           '\U00002A2F',
-	"Cscr;":                            '\U0001D49E',
-	"Cup;":                             '\U000022D3',
-	"CupCap;":                          '\U0000224D',
-	"DD;":                              '\U00002145',
-	"DDotrahd;":                        '\U00002911',
-	"DJcy;":                            '\U00000402',
-	"DScy;":                            '\U00000405',
-	"DZcy;":                            '\U0000040F',
-	"Dagger;":                          '\U00002021',
-	"Darr;":                            '\U000021A1',
-	"Dashv;":                           '\U00002AE4',
-	"Dcaron;":                          '\U0000010E',
-	"Dcy;":                             '\U00000414',
-	"Del;":                             '\U00002207',
-	"Delta;":                           '\U00000394',
-	"Dfr;":                             '\U0001D507',
-	"DiacriticalAcute;":                '\U000000B4',
-	"DiacriticalDot;":                  '\U000002D9',
-	"DiacriticalDoubleAcute;":          '\U000002DD',
-	"DiacriticalGrave;":                '\U00000060',
-	"DiacriticalTilde;":                '\U000002DC',
-	"Diamond;":                         '\U000022C4',
-	"DifferentialD;":                   '\U00002146',
-	"Dopf;":                            '\U0001D53B',
-	"Dot;":                             '\U000000A8',
-	"DotDot;":                          '\U000020DC',
-	"DotEqual;":                        '\U00002250',
-	"DoubleContourIntegral;":           '\U0000222F',
-	"DoubleDot;":                       '\U000000A8',
-	"DoubleDownArrow;":                 '\U000021D3',
-	"DoubleLeftArrow;":                 '\U000021D0',
-	"DoubleLeftRightArrow;":            '\U000021D4',
-	"DoubleLeftTee;":                   '\U00002AE4',
-	"DoubleLongLeftArrow;":             '\U000027F8',
-	"DoubleLongLeftRightArrow;":        '\U000027FA',
-	"DoubleLongRightArrow;":            '\U000027F9',
-	"DoubleRightArrow;":                '\U000021D2',
-	"DoubleRightTee;":                  '\U000022A8',
-	"DoubleUpArrow;":                   '\U000021D1',
-	"DoubleUpDownArrow;":               '\U000021D5',
-	"DoubleVerticalBar;":               '\U00002225',
-	"DownArrow;":                       '\U00002193',
-	"DownArrowBar;":                    '\U00002913',
-	"DownArrowUpArrow;":                '\U000021F5',
-	"DownBreve;":                       '\U00000311',
-	"DownLeftRightVector;":             '\U00002950',
-	"DownLeftTeeVector;":               '\U0000295E',
-	"DownLeftVector;":                  '\U000021BD',
-	"DownLeftVectorBar;":               '\U00002956',
-	"DownRightTeeVector;":              '\U0000295F',
-	"DownRightVector;":                 '\U000021C1',
-	"DownRightVectorBar;":              '\U00002957',
-	"DownTee;":                         '\U000022A4',
-	"DownTeeArrow;":                    '\U000021A7',
-	"Downarrow;":                       '\U000021D3',
-	"Dscr;":                            '\U0001D49F',
-	"Dstrok;":                          '\U00000110',
-	"ENG;":                             '\U0000014A',
-	"ETH;":                             '\U000000D0',
-	"Eacute;":                          '\U000000C9',
-	"Ecaron;":                          '\U0000011A',
-	"Ecirc;":                           '\U000000CA',
-	"Ecy;":                             '\U0000042D',
-	"Edot;":                            '\U00000116',
-	"Efr;":                             '\U0001D508',
-	"Egrave;":                          '\U000000C8',
-	"Element;":                         '\U00002208',
-	"Emacr;":                           '\U00000112',
-	"EmptySmallSquare;":                '\U000025FB',
-	"EmptyVerySmallSquare;":            '\U000025AB',
-	"Eogon;":                           '\U00000118',
-	"Eopf;":                            '\U0001D53C',
-	"Epsilon;":                         '\U00000395',
-	"Equal;":                           '\U00002A75',
-	"EqualTilde;":                      '\U00002242',
-	"Equilibrium;":                     '\U000021CC',
-	"Escr;":                            '\U00002130',
-	"Esim;":                            '\U00002A73',
-	"Eta;":                             '\U00000397',
-	"Euml;":                            '\U000000CB',
-	"Exists;":                          '\U00002203',
-	"ExponentialE;":                    '\U00002147',
-	"Fcy;":                             '\U00000424',
-	"Ffr;":                             '\U0001D509',
-	"FilledSmallSquare;":               '\U000025FC',
-	"FilledVerySmallSquare;":           '\U000025AA',
-	"Fopf;":                            '\U0001D53D',
-	"ForAll;":                          '\U00002200',
-	"Fouriertrf;":                      '\U00002131',
-	"Fscr;":                            '\U00002131',
-	"GJcy;":                            '\U00000403',
-	"GT;":                              '\U0000003E',
-	"Gamma;":                           '\U00000393',
-	"Gammad;":                          '\U000003DC',
-	"Gbreve;":                          '\U0000011E',
-	"Gcedil;":                          '\U00000122',
-	"Gcirc;":                           '\U0000011C',
-	"Gcy;":                             '\U00000413',
-	"Gdot;":                            '\U00000120',
-	"Gfr;":                             '\U0001D50A',
-	"Gg;":                              '\U000022D9',
-	"Gopf;":                            '\U0001D53E',
-	"GreaterEqual;":                    '\U00002265',
-	"GreaterEqualLess;":                '\U000022DB',
-	"GreaterFullEqual;":                '\U00002267',
-	"GreaterGreater;":                  '\U00002AA2',
-	"GreaterLess;":                     '\U00002277',
-	"GreaterSlantEqual;":               '\U00002A7E',
-	"GreaterTilde;":                    '\U00002273',
-	"Gscr;":                            '\U0001D4A2',
-	"Gt;":                              '\U0000226B',
-	"HARDcy;":                          '\U0000042A',
-	"Hacek;":                           '\U000002C7',
-	"Hat;":                             '\U0000005E',
-	"Hcirc;":                           '\U00000124',
-	"Hfr;":                             '\U0000210C',
-	"HilbertSpace;":                    '\U0000210B',
-	"Hopf;":                            '\U0000210D',
-	"HorizontalLine;":                  '\U00002500',
-	"Hscr;":                            '\U0000210B',
-	"Hstrok;":                          '\U00000126',
-	"HumpDownHump;":                    '\U0000224E',
-	"HumpEqual;":                       '\U0000224F',
-	"IEcy;":                            '\U00000415',
-	"IJlig;":                           '\U00000132',
-	"IOcy;":                            '\U00000401',
-	"Iacute;":                          '\U000000CD',
-	"Icirc;":                           '\U000000CE',
-	"Icy;":                             '\U00000418',
-	"Idot;":                            '\U00000130',
-	"Ifr;":                             '\U00002111',
-	"Igrave;":                          '\U000000CC',
-	"Im;":                              '\U00002111',
-	"Imacr;":                           '\U0000012A',
-	"ImaginaryI;":                      '\U00002148',
-	"Implies;":                         '\U000021D2',
-	"Int;":                             '\U0000222C',
-	"Integral;":                        '\U0000222B',
-	"Intersection;":                    '\U000022C2',
-	"InvisibleComma;":                  '\U00002063',
-	"InvisibleTimes;":                  '\U00002062',
-	"Iogon;":                           '\U0000012E',
-	"Iopf;":                            '\U0001D540',
-	"Iota;":                            '\U00000399',
-	"Iscr;":                            '\U00002110',
-	"Itilde;":                          '\U00000128',
-	"Iukcy;":                           '\U00000406',
-	"Iuml;":                            '\U000000CF',
-	"Jcirc;":                           '\U00000134',
-	"Jcy;":                             '\U00000419',
-	"Jfr;":                             '\U0001D50D',
-	"Jopf;":                            '\U0001D541',
-	"Jscr;":                            '\U0001D4A5',
-	"Jsercy;":                          '\U00000408',
-	"Jukcy;":                           '\U00000404',
-	"KHcy;":                            '\U00000425',
-	"KJcy;":                            '\U0000040C',
-	"Kappa;":                           '\U0000039A',
-	"Kcedil;":                          '\U00000136',
-	"Kcy;":                             '\U0000041A',
-	"Kfr;":                             '\U0001D50E',
-	"Kopf;":                            '\U0001D542',
-	"Kscr;":                            '\U0001D4A6',
-	"LJcy;":                            '\U00000409',
-	"LT;":                              '\U0000003C',
-	"Lacute;":                          '\U00000139',
-	"Lambda;":                          '\U0000039B',
-	"Lang;":                            '\U000027EA',
-	"Laplacetrf;":                      '\U00002112',
-	"Larr;":                            '\U0000219E',
-	"Lcaron;":                          '\U0000013D',
-	"Lcedil;":                          '\U0000013B',
-	"Lcy;":                             '\U0000041B',
-	"LeftAngleBracket;":                '\U000027E8',
-	"LeftArrow;":                       '\U00002190',
-	"LeftArrowBar;":                    '\U000021E4',
-	"LeftArrowRightArrow;":             '\U000021C6',
-	"LeftCeiling;":                     '\U00002308',
-	"LeftDoubleBracket;":               '\U000027E6',
-	"LeftDownTeeVector;":               '\U00002961',
-	"LeftDownVector;":                  '\U000021C3',
-	"LeftDownVectorBar;":               '\U00002959',
-	"LeftFloor;":                       '\U0000230A',
-	"LeftRightArrow;":                  '\U00002194',
-	"LeftRightVector;":                 '\U0000294E',
-	"LeftTee;":                         '\U000022A3',
-	"LeftTeeArrow;":                    '\U000021A4',
-	"LeftTeeVector;":                   '\U0000295A',
-	"LeftTriangle;":                    '\U000022B2',
-	"LeftTriangleBar;":                 '\U000029CF',
-	"LeftTriangleEqual;":               '\U000022B4',
-	"LeftUpDownVector;":                '\U00002951',
-	"LeftUpTeeVector;":                 '\U00002960',
-	"LeftUpVector;":                    '\U000021BF',
-	"LeftUpVectorBar;":                 '\U00002958',
-	"LeftVector;":                      '\U000021BC',
-	"LeftVectorBar;":                   '\U00002952',
-	"Leftarrow;":                       '\U000021D0',
-	"Leftrightarrow;":                  '\U000021D4',
-	"LessEqualGreater;":                '\U000022DA',
-	"LessFullEqual;":                   '\U00002266',
-	"LessGreater;":                     '\U00002276',
-	"LessLess;":                        '\U00002AA1',
-	"LessSlantEqual;":                  '\U00002A7D',
-	"LessTilde;":                       '\U00002272',
-	"Lfr;":                             '\U0001D50F',
-	"Ll;":                              '\U000022D8',
-	"Lleftarrow;":                      '\U000021DA',
-	"Lmidot;":                          '\U0000013F',
-	"LongLeftArrow;":                   '\U000027F5',
-	"LongLeftRightArrow;":              '\U000027F7',
-	"LongRightArrow;":                  '\U000027F6',
-	"Longleftarrow;":                   '\U000027F8',
-	"Longleftrightarrow;":              '\U000027FA',
-	"Longrightarrow;":                  '\U000027F9',
-	"Lopf;":                            '\U0001D543',
-	"LowerLeftArrow;":                  '\U00002199',
-	"LowerRightArrow;":                 '\U00002198',
-	"Lscr;":                            '\U00002112',
-	"Lsh;":                             '\U000021B0',
-	"Lstrok;":                          '\U00000141',
-	"Lt;":                              '\U0000226A',
-	"Map;":                             '\U00002905',
-	"Mcy;":                             '\U0000041C',
-	"MediumSpace;":                     '\U0000205F',
-	"Mellintrf;":                       '\U00002133',
-	"Mfr;":                             '\U0001D510',
-	"MinusPlus;":                       '\U00002213',
-	"Mopf;":                            '\U0001D544',
-	"Mscr;":                            '\U00002133',
-	"Mu;":                              '\U0000039C',
-	"NJcy;":                            '\U0000040A',
-	"Nacute;":                          '\U00000143',
-	"Ncaron;":                          '\U00000147',
-	"Ncedil;":                          '\U00000145',
-	"Ncy;":                             '\U0000041D',
-	"NegativeMediumSpace;":             '\U0000200B',
-	"NegativeThickSpace;":              '\U0000200B',
-	"NegativeThinSpace;":               '\U0000200B',
-	"NegativeVeryThinSpace;":           '\U0000200B',
-	"NestedGreaterGreater;":            '\U0000226B',
-	"NestedLessLess;":                  '\U0000226A',
-	"NewLine;":                         '\U0000000A',
-	"Nfr;":                             '\U0001D511',
-	"NoBreak;":                         '\U00002060',
-	"NonBreakingSpace;":                '\U000000A0',
-	"Nopf;":                            '\U00002115',
-	"Not;":                             '\U00002AEC',
-	"NotCongruent;":                    '\U00002262',
-	"NotCupCap;":                       '\U0000226D',
-	"NotDoubleVerticalBar;":            '\U00002226',
-	"NotElement;":                      '\U00002209',
-	"NotEqual;":                        '\U00002260',
-	"NotExists;":                       '\U00002204',
-	"NotGreater;":                      '\U0000226F',
-	"NotGreaterEqual;":                 '\U00002271',
-	"NotGreaterLess;":                  '\U00002279',
-	"NotGreaterTilde;":                 '\U00002275',
-	"NotLeftTriangle;":                 '\U000022EA',
-	"NotLeftTriangleEqual;":            '\U000022EC',
-	"NotLess;":                         '\U0000226E',
-	"NotLessEqual;":                    '\U00002270',
-	"NotLessGreater;":                  '\U00002278',
-	"NotLessTilde;":                    '\U00002274',
-	"NotPrecedes;":                     '\U00002280',
-	"NotPrecedesSlantEqual;":           '\U000022E0',
-	"NotReverseElement;":               '\U0000220C',
-	"NotRightTriangle;":                '\U000022EB',
-	"NotRightTriangleEqual;":           '\U000022ED',
-	"NotSquareSubsetEqual;":            '\U000022E2',
-	"NotSquareSupersetEqual;":          '\U000022E3',
-	"NotSubsetEqual;":                  '\U00002288',
-	"NotSucceeds;":                     '\U00002281',
-	"NotSucceedsSlantEqual;":           '\U000022E1',
-	"NotSupersetEqual;":                '\U00002289',
-	"NotTilde;":                        '\U00002241',
-	"NotTildeEqual;":                   '\U00002244',
-	"NotTildeFullEqual;":               '\U00002247',
-	"NotTildeTilde;":                   '\U00002249',
-	"NotVerticalBar;":                  '\U00002224',
-	"Nscr;":                            '\U0001D4A9',
-	"Ntilde;":                          '\U000000D1',
-	"Nu;":                              '\U0000039D',
-	"OElig;":                           '\U00000152',
-	"Oacute;":                          '\U000000D3',
-	"Ocirc;":                           '\U000000D4',
-	"Ocy;":                             '\U0000041E',
-	"Odblac;":                          '\U00000150',
-	"Ofr;":                             '\U0001D512',
-	"Ograve;":                          '\U000000D2',
-	"Omacr;":                           '\U0000014C',
-	"Omega;":                           '\U000003A9',
-	"Omicron;":                         '\U0000039F',
-	"Oopf;":                            '\U0001D546',
-	"OpenCurlyDoubleQuote;":            '\U0000201C',
-	"OpenCurlyQuote;":                  '\U00002018',
-	"Or;":                              '\U00002A54',
-	"Oscr;":                            '\U0001D4AA',
-	"Oslash;":                          '\U000000D8',
-	"Otilde;":                          '\U000000D5',
-	"Otimes;":                          '\U00002A37',
-	"Ouml;":                            '\U000000D6',
-	"OverBar;":                         '\U0000203E',
-	"OverBrace;":                       '\U000023DE',
-	"OverBracket;":                     '\U000023B4',
-	"OverParenthesis;":                 '\U000023DC',
-	"PartialD;":                        '\U00002202',
-	"Pcy;":                             '\U0000041F',
-	"Pfr;":                             '\U0001D513',
-	"Phi;":                             '\U000003A6',
-	"Pi;":                              '\U000003A0',
-	"PlusMinus;":                       '\U000000B1',
-	"Poincareplane;":                   '\U0000210C',
-	"Popf;":                            '\U00002119',
-	"Pr;":                              '\U00002ABB',
-	"Precedes;":                        '\U0000227A',
-	"PrecedesEqual;":                   '\U00002AAF',
-	"PrecedesSlantEqual;":              '\U0000227C',
-	"PrecedesTilde;":                   '\U0000227E',
-	"Prime;":                           '\U00002033',
-	"Product;":                         '\U0000220F',
-	"Proportion;":                      '\U00002237',
-	"Proportional;":                    '\U0000221D',
-	"Pscr;":                            '\U0001D4AB',
-	"Psi;":                             '\U000003A8',
-	"QUOT;":                            '\U00000022',
-	"Qfr;":                             '\U0001D514',
-	"Qopf;":                            '\U0000211A',
-	"Qscr;":                            '\U0001D4AC',
-	"RBarr;":                           '\U00002910',
-	"REG;":                             '\U000000AE',
-	"Racute;":                          '\U00000154',
-	"Rang;":                            '\U000027EB',
-	"Rarr;":                            '\U000021A0',
-	"Rarrtl;":                          '\U00002916',
-	"Rcaron;":                          '\U00000158',
-	"Rcedil;":                          '\U00000156',
-	"Rcy;":                             '\U00000420',
-	"Re;":                              '\U0000211C',
-	"ReverseElement;":                  '\U0000220B',
-	"ReverseEquilibrium;":              '\U000021CB',
-	"ReverseUpEquilibrium;":            '\U0000296F',
-	"Rfr;":                             '\U0000211C',
-	"Rho;":                             '\U000003A1',
-	"RightAngleBracket;":               '\U000027E9',
-	"RightArrow;":                      '\U00002192',
-	"RightArrowBar;":                   '\U000021E5',
-	"RightArrowLeftArrow;":             '\U000021C4',
-	"RightCeiling;":                    '\U00002309',
-	"RightDoubleBracket;":              '\U000027E7',
-	"RightDownTeeVector;":              '\U0000295D',
-	"RightDownVector;":                 '\U000021C2',
-	"RightDownVectorBar;":              '\U00002955',
-	"RightFloor;":                      '\U0000230B',
-	"RightTee;":                        '\U000022A2',
-	"RightTeeArrow;":                   '\U000021A6',
-	"RightTeeVector;":                  '\U0000295B',
-	"RightTriangle;":                   '\U000022B3',
-	"RightTriangleBar;":                '\U000029D0',
-	"RightTriangleEqual;":              '\U000022B5',
-	"RightUpDownVector;":               '\U0000294F',
-	"RightUpTeeVector;":                '\U0000295C',
-	"RightUpVector;":                   '\U000021BE',
-	"RightUpVectorBar;":                '\U00002954',
-	"RightVector;":                     '\U000021C0',
-	"RightVectorBar;":                  '\U00002953',
-	"Rightarrow;":                      '\U000021D2',
-	"Ropf;":                            '\U0000211D',
-	"RoundImplies;":                    '\U00002970',
-	"Rrightarrow;":                     '\U000021DB',
-	"Rscr;":                            '\U0000211B',
-	"Rsh;":                             '\U000021B1',
-	"RuleDelayed;":                     '\U000029F4',
-	"SHCHcy;":                          '\U00000429',
-	"SHcy;":                            '\U00000428',
-	"SOFTcy;":                          '\U0000042C',
-	"Sacute;":                          '\U0000015A',
-	"Sc;":                              '\U00002ABC',
-	"Scaron;":                          '\U00000160',
-	"Scedil;":                          '\U0000015E',
-	"Scirc;":                           '\U0000015C',
-	"Scy;":                             '\U00000421',
-	"Sfr;":                             '\U0001D516',
-	"ShortDownArrow;":                  '\U00002193',
-	"ShortLeftArrow;":                  '\U00002190',
-	"ShortRightArrow;":                 '\U00002192',
-	"ShortUpArrow;":                    '\U00002191',
-	"Sigma;":                           '\U000003A3',
-	"SmallCircle;":                     '\U00002218',
-	"Sopf;":                            '\U0001D54A',
-	"Sqrt;":                            '\U0000221A',
-	"Square;":                          '\U000025A1',
-	"SquareIntersection;":              '\U00002293',
-	"SquareSubset;":                    '\U0000228F',
-	"SquareSubsetEqual;":               '\U00002291',
-	"SquareSuperset;":                  '\U00002290',
-	"SquareSupersetEqual;":             '\U00002292',
-	"SquareUnion;":                     '\U00002294',
-	"Sscr;":                            '\U0001D4AE',
-	"Star;":                            '\U000022C6',
-	"Sub;":                             '\U000022D0',
-	"Subset;":                          '\U000022D0',
-	"SubsetEqual;":                     '\U00002286',
-	"Succeeds;":                        '\U0000227B',
-	"SucceedsEqual;":                   '\U00002AB0',
-	"SucceedsSlantEqual;":              '\U0000227D',
-	"SucceedsTilde;":                   '\U0000227F',
-	"SuchThat;":                        '\U0000220B',
-	"Sum;":                             '\U00002211',
-	"Sup;":                             '\U000022D1',
-	"Superset;":                        '\U00002283',
-	"SupersetEqual;":                   '\U00002287',
-	"Supset;":                          '\U000022D1',
-	"THORN;":                           '\U000000DE',
-	"TRADE;":                           '\U00002122',
-	"TSHcy;":                           '\U0000040B',
-	"TScy;":                            '\U00000426',
-	"Tab;":                             '\U00000009',
-	"Tau;":                             '\U000003A4',
-	"Tcaron;":                          '\U00000164',
-	"Tcedil;":                          '\U00000162',
-	"Tcy;":                             '\U00000422',
-	"Tfr;":                             '\U0001D517',
-	"Therefore;":                       '\U00002234',
-	"Theta;":                           '\U00000398',
-	"ThinSpace;":                       '\U00002009',
-	"Tilde;":                           '\U0000223C',
-	"TildeEqual;":                      '\U00002243',
-	"TildeFullEqual;":                  '\U00002245',
-	"TildeTilde;":                      '\U00002248',
-	"Topf;":                            '\U0001D54B',
-	"TripleDot;":                       '\U000020DB',
-	"Tscr;":                            '\U0001D4AF',
-	"Tstrok;":                          '\U00000166',
-	"Uacute;":                          '\U000000DA',
-	"Uarr;":                            '\U0000219F',
-	"Uarrocir;":                        '\U00002949',
-	"Ubrcy;":                           '\U0000040E',
-	"Ubreve;":                          '\U0000016C',
-	"Ucirc;":                           '\U000000DB',
-	"Ucy;":                             '\U00000423',
-	"Udblac;":                          '\U00000170',
-	"Ufr;":                             '\U0001D518',
-	"Ugrave;":                          '\U000000D9',
-	"Umacr;":                           '\U0000016A',
-	"UnderBar;":                        '\U0000005F',
-	"UnderBrace;":                      '\U000023DF',
-	"UnderBracket;":                    '\U000023B5',
-	"UnderParenthesis;":                '\U000023DD',
-	"Union;":                           '\U000022C3',
-	"UnionPlus;":                       '\U0000228E',
-	"Uogon;":                           '\U00000172',
-	"Uopf;":                            '\U0001D54C',
-	"UpArrow;":                         '\U00002191',
-	"UpArrowBar;":                      '\U00002912',
-	"UpArrowDownArrow;":                '\U000021C5',
-	"UpDownArrow;":                     '\U00002195',
-	"UpEquilibrium;":                   '\U0000296E',
-	"UpTee;":                           '\U000022A5',
-	"UpTeeArrow;":                      '\U000021A5',
-	"Uparrow;":                         '\U000021D1',
-	"Updownarrow;":                     '\U000021D5',
-	"UpperLeftArrow;":                  '\U00002196',
-	"UpperRightArrow;":                 '\U00002197',
-	"Upsi;":                            '\U000003D2',
-	"Upsilon;":                         '\U000003A5',
-	"Uring;":                           '\U0000016E',
-	"Uscr;":                            '\U0001D4B0',
-	"Utilde;":                          '\U00000168',
-	"Uuml;":                            '\U000000DC',
-	"VDash;":                           '\U000022AB',
-	"Vbar;":                            '\U00002AEB',
-	"Vcy;":                             '\U00000412',
-	"Vdash;":                           '\U000022A9',
-	"Vdashl;":                          '\U00002AE6',
-	"Vee;":                             '\U000022C1',
-	"Verbar;":                          '\U00002016',
-	"Vert;":                            '\U00002016',
-	"VerticalBar;":                     '\U00002223',
-	"VerticalLine;":                    '\U0000007C',
-	"VerticalSeparator;":               '\U00002758',
-	"VerticalTilde;":                   '\U00002240',
-	"VeryThinSpace;":                   '\U0000200A',
-	"Vfr;":                             '\U0001D519',
-	"Vopf;":                            '\U0001D54D',
-	"Vscr;":                            '\U0001D4B1',
-	"Vvdash;":                          '\U000022AA',
-	"Wcirc;":                           '\U00000174',
-	"Wedge;":                           '\U000022C0',
-	"Wfr;":                             '\U0001D51A',
-	"Wopf;":                            '\U0001D54E',
-	"Wscr;":                            '\U0001D4B2',
-	"Xfr;":                             '\U0001D51B',
-	"Xi;":                              '\U0000039E',
-	"Xopf;":                            '\U0001D54F',
-	"Xscr;":                            '\U0001D4B3',
-	"YAcy;":                            '\U0000042F',
-	"YIcy;":                            '\U00000407',
-	"YUcy;":                            '\U0000042E',
-	"Yacute;":                          '\U000000DD',
-	"Ycirc;":                           '\U00000176',
-	"Ycy;":                             '\U0000042B',
-	"Yfr;":                             '\U0001D51C',
-	"Yopf;":                            '\U0001D550',
-	"Yscr;":                            '\U0001D4B4',
-	"Yuml;":                            '\U00000178',
-	"ZHcy;":                            '\U00000416',
-	"Zacute;":                          '\U00000179',
-	"Zcaron;":                          '\U0000017D',
-	"Zcy;":                             '\U00000417',
-	"Zdot;":                            '\U0000017B',
-	"ZeroWidthSpace;":                  '\U0000200B',
-	"Zeta;":                            '\U00000396',
-	"Zfr;":                             '\U00002128',
-	"Zopf;":                            '\U00002124',
-	"Zscr;":                            '\U0001D4B5',
-	"aacute;":                          '\U000000E1',
-	"abreve;":                          '\U00000103',
-	"ac;":                              '\U0000223E',
-	"acd;":                             '\U0000223F',
-	"acirc;":                           '\U000000E2',
-	"acute;":                           '\U000000B4',
-	"acy;":                             '\U00000430',
-	"aelig;":                           '\U000000E6',
-	"af;":                              '\U00002061',
-	"afr;":                             '\U0001D51E',
-	"agrave;":                          '\U000000E0',
-	"alefsym;":                         '\U00002135',
-	"aleph;":                           '\U00002135',
-	"alpha;":                           '\U000003B1',
-	"amacr;":                           '\U00000101',
-	"amalg;":                           '\U00002A3F',
-	"amp;":                             '\U00000026',
-	"and;":                             '\U00002227',
-	"andand;":                          '\U00002A55',
-	"andd;":                            '\U00002A5C',
-	"andslope;":                        '\U00002A58',
-	"andv;":                            '\U00002A5A',
-	"ang;":                             '\U00002220',
-	"ange;":                            '\U000029A4',
-	"angle;":                           '\U00002220',
-	"angmsd;":                          '\U00002221',
-	"angmsdaa;":                        '\U000029A8',
-	"angmsdab;":                        '\U000029A9',
-	"angmsdac;":                        '\U000029AA',
-	"angmsdad;":                        '\U000029AB',
-	"angmsdae;":                        '\U000029AC',
-	"angmsdaf;":                        '\U000029AD',
-	"angmsdag;":                        '\U000029AE',
-	"angmsdah;":                        '\U000029AF',
-	"angrt;":                           '\U0000221F',
-	"angrtvb;":                         '\U000022BE',
-	"angrtvbd;":                        '\U0000299D',
-	"angsph;":                          '\U00002222',
-	"angst;":                           '\U000000C5',
-	"angzarr;":                         '\U0000237C',
-	"aogon;":                           '\U00000105',
-	"aopf;":                            '\U0001D552',
-	"ap;":                              '\U00002248',
-	"apE;":                             '\U00002A70',
-	"apacir;":                          '\U00002A6F',
-	"ape;":                             '\U0000224A',
-	"apid;":                            '\U0000224B',
-	"apos;":                            '\U00000027',
-	"approx;":                          '\U00002248',
-	"approxeq;":                        '\U0000224A',
-	"aring;":                           '\U000000E5',
-	"ascr;":                            '\U0001D4B6',
-	"ast;":                             '\U0000002A',
-	"asymp;":                           '\U00002248',
-	"asympeq;":                         '\U0000224D',
-	"atilde;":                          '\U000000E3',
-	"auml;":                            '\U000000E4',
-	"awconint;":                        '\U00002233',
-	"awint;":                           '\U00002A11',
-	"bNot;":                            '\U00002AED',
-	"backcong;":                        '\U0000224C',
-	"backepsilon;":                     '\U000003F6',
-	"backprime;":                       '\U00002035',
-	"backsim;":                         '\U0000223D',
-	"backsimeq;":                       '\U000022CD',
-	"barvee;":                          '\U000022BD',
-	"barwed;":                          '\U00002305',
-	"barwedge;":                        '\U00002305',
-	"bbrk;":                            '\U000023B5',
-	"bbrktbrk;":                        '\U000023B6',
-	"bcong;":                           '\U0000224C',
-	"bcy;":                             '\U00000431',
-	"bdquo;":                           '\U0000201E',
-	"becaus;":                          '\U00002235',
-	"because;":                         '\U00002235',
-	"bemptyv;":                         '\U000029B0',
-	"bepsi;":                           '\U000003F6',
-	"bernou;":                          '\U0000212C',
-	"beta;":                            '\U000003B2',
-	"beth;":                            '\U00002136',
-	"between;":                         '\U0000226C',
-	"bfr;":                             '\U0001D51F',
-	"bigcap;":                          '\U000022C2',
-	"bigcirc;":                         '\U000025EF',
-	"bigcup;":                          '\U000022C3',
-	"bigodot;":                         '\U00002A00',
-	"bigoplus;":                        '\U00002A01',
-	"bigotimes;":                       '\U00002A02',
-	"bigsqcup;":                        '\U00002A06',
-	"bigstar;":                         '\U00002605',
-	"bigtriangledown;":                 '\U000025BD',
-	"bigtriangleup;":                   '\U000025B3',
-	"biguplus;":                        '\U00002A04',
-	"bigvee;":                          '\U000022C1',
-	"bigwedge;":                        '\U000022C0',
-	"bkarow;":                          '\U0000290D',
-	"blacklozenge;":                    '\U000029EB',
-	"blacksquare;":                     '\U000025AA',
-	"blacktriangle;":                   '\U000025B4',
-	"blacktriangledown;":               '\U000025BE',
-	"blacktriangleleft;":               '\U000025C2',
-	"blacktriangleright;":              '\U000025B8',
-	"blank;":                           '\U00002423',
-	"blk12;":                           '\U00002592',
-	"blk14;":                           '\U00002591',
-	"blk34;":                           '\U00002593',
-	"block;":                           '\U00002588',
-	"bnot;":                            '\U00002310',
-	"bopf;":                            '\U0001D553',
-	"bot;":                             '\U000022A5',
-	"bottom;":                          '\U000022A5',
-	"bowtie;":                          '\U000022C8',
-	"boxDL;":                           '\U00002557',
-	"boxDR;":                           '\U00002554',
-	"boxDl;":                           '\U00002556',
-	"boxDr;":                           '\U00002553',
-	"boxH;":                            '\U00002550',
-	"boxHD;":                           '\U00002566',
-	"boxHU;":                           '\U00002569',
-	"boxHd;":                           '\U00002564',
-	"boxHu;":                           '\U00002567',
-	"boxUL;":                           '\U0000255D',
-	"boxUR;":                           '\U0000255A',
-	"boxUl;":                           '\U0000255C',
-	"boxUr;":                           '\U00002559',
-	"boxV;":                            '\U00002551',
-	"boxVH;":                           '\U0000256C',
-	"boxVL;":                           '\U00002563',
-	"boxVR;":                           '\U00002560',
-	"boxVh;":                           '\U0000256B',
-	"boxVl;":                           '\U00002562',
-	"boxVr;":                           '\U0000255F',
-	"boxbox;":                          '\U000029C9',
-	"boxdL;":                           '\U00002555',
-	"boxdR;":                           '\U00002552',
-	"boxdl;":                           '\U00002510',
-	"boxdr;":                           '\U0000250C',
-	"boxh;":                            '\U00002500',
-	"boxhD;":                           '\U00002565',
-	"boxhU;":                           '\U00002568',
-	"boxhd;":                           '\U0000252C',
-	"boxhu;":                           '\U00002534',
-	"boxminus;":                        '\U0000229F',
-	"boxplus;":                         '\U0000229E',
-	"boxtimes;":                        '\U000022A0',
-	"boxuL;":                           '\U0000255B',
-	"boxuR;":                           '\U00002558',
-	"boxul;":                           '\U00002518',
-	"boxur;":                           '\U00002514',
-	"boxv;":                            '\U00002502',
-	"boxvH;":                           '\U0000256A',
-	"boxvL;":                           '\U00002561',
-	"boxvR;":                           '\U0000255E',
-	"boxvh;":                           '\U0000253C',
-	"boxvl;":                           '\U00002524',
-	"boxvr;":                           '\U0000251C',
-	"bprime;":                          '\U00002035',
-	"breve;":                           '\U000002D8',
-	"brvbar;":                          '\U000000A6',
-	"bscr;":                            '\U0001D4B7',
-	"bsemi;":                           '\U0000204F',
-	"bsim;":                            '\U0000223D',
-	"bsime;":                           '\U000022CD',
-	"bsol;":                            '\U0000005C',
-	"bsolb;":                           '\U000029C5',
-	"bsolhsub;":                        '\U000027C8',
-	"bull;":                            '\U00002022',
-	"bullet;":                          '\U00002022',
-	"bump;":                            '\U0000224E',
-	"bumpE;":                           '\U00002AAE',
-	"bumpe;":                           '\U0000224F',
-	"bumpeq;":                          '\U0000224F',
-	"cacute;":                          '\U00000107',
-	"cap;":                             '\U00002229',
-	"capand;":                          '\U00002A44',
-	"capbrcup;":                        '\U00002A49',
-	"capcap;":                          '\U00002A4B',
-	"capcup;":                          '\U00002A47',
-	"capdot;":                          '\U00002A40',
-	"caret;":                           '\U00002041',
-	"caron;":                           '\U000002C7',
-	"ccaps;":                           '\U00002A4D',
-	"ccaron;":                          '\U0000010D',
-	"ccedil;":                          '\U000000E7',
-	"ccirc;":                           '\U00000109',
-	"ccups;":                           '\U00002A4C',
-	"ccupssm;":                         '\U00002A50',
-	"cdot;":                            '\U0000010B',
-	"cedil;":                           '\U000000B8',
-	"cemptyv;":                         '\U000029B2',
-	"cent;":                            '\U000000A2',
-	"centerdot;":                       '\U000000B7',
-	"cfr;":                             '\U0001D520',
-	"chcy;":                            '\U00000447',
-	"check;":                           '\U00002713',
-	"checkmark;":                       '\U00002713',
-	"chi;":                             '\U000003C7',
-	"cir;":                             '\U000025CB',
-	"cirE;":                            '\U000029C3',
-	"circ;":                            '\U000002C6',
-	"circeq;":                          '\U00002257',
-	"circlearrowleft;":                 '\U000021BA',
-	"circlearrowright;":                '\U000021BB',
-	"circledR;":                        '\U000000AE',
-	"circledS;":                        '\U000024C8',
-	"circledast;":                      '\U0000229B',
-	"circledcirc;":                     '\U0000229A',
-	"circleddash;":                     '\U0000229D',
-	"cire;":                            '\U00002257',
-	"cirfnint;":                        '\U00002A10',
-	"cirmid;":                          '\U00002AEF',
-	"cirscir;":                         '\U000029C2',
-	"clubs;":                           '\U00002663',
-	"clubsuit;":                        '\U00002663',
-	"colon;":                           '\U0000003A',
-	"colone;":                          '\U00002254',
-	"coloneq;":                         '\U00002254',
-	"comma;":                           '\U0000002C',
-	"commat;":                          '\U00000040',
-	"comp;":                            '\U00002201',
-	"compfn;":                          '\U00002218',
-	"complement;":                      '\U00002201',
-	"complexes;":                       '\U00002102',
-	"cong;":                            '\U00002245',
-	"congdot;":                         '\U00002A6D',
-	"conint;":                          '\U0000222E',
-	"copf;":                            '\U0001D554',
-	"coprod;":                          '\U00002210',
-	"copy;":                            '\U000000A9',
-	"copysr;":                          '\U00002117',
-	"crarr;":                           '\U000021B5',
-	"cross;":                           '\U00002717',
-	"cscr;":                            '\U0001D4B8',
-	"csub;":                            '\U00002ACF',
-	"csube;":                           '\U00002AD1',
-	"csup;":                            '\U00002AD0',
-	"csupe;":                           '\U00002AD2',
-	"ctdot;":                           '\U000022EF',
-	"cudarrl;":                         '\U00002938',
-	"cudarrr;":                         '\U00002935',
-	"cuepr;":                           '\U000022DE',
-	"cuesc;":                           '\U000022DF',
-	"cularr;":                          '\U000021B6',
-	"cularrp;":                         '\U0000293D',
-	"cup;":                             '\U0000222A',
-	"cupbrcap;":                        '\U00002A48',
-	"cupcap;":                          '\U00002A46',
-	"cupcup;":                          '\U00002A4A',
-	"cupdot;":                          '\U0000228D',
-	"cupor;":                           '\U00002A45',
-	"curarr;":                          '\U000021B7',
-	"curarrm;":                         '\U0000293C',
-	"curlyeqprec;":                     '\U000022DE',
-	"curlyeqsucc;":                     '\U000022DF',
-	"curlyvee;":                        '\U000022CE',
-	"curlywedge;":                      '\U000022CF',
-	"curren;":                          '\U000000A4',
-	"curvearrowleft;":                  '\U000021B6',
-	"curvearrowright;":                 '\U000021B7',
-	"cuvee;":                           '\U000022CE',
-	"cuwed;":                           '\U000022CF',
-	"cwconint;":                        '\U00002232',
-	"cwint;":                           '\U00002231',
-	"cylcty;":                          '\U0000232D',
-	"dArr;":                            '\U000021D3',
-	"dHar;":                            '\U00002965',
-	"dagger;":                          '\U00002020',
-	"daleth;":                          '\U00002138',
-	"darr;":                            '\U00002193',
-	"dash;":                            '\U00002010',
-	"dashv;":                           '\U000022A3',
-	"dbkarow;":                         '\U0000290F',
-	"dblac;":                           '\U000002DD',
-	"dcaron;":                          '\U0000010F',
-	"dcy;":                             '\U00000434',
-	"dd;":                              '\U00002146',
-	"ddagger;":                         '\U00002021',
-	"ddarr;":                           '\U000021CA',
-	"ddotseq;":                         '\U00002A77',
-	"deg;":                             '\U000000B0',
-	"delta;":                           '\U000003B4',
-	"demptyv;":                         '\U000029B1',
-	"dfisht;":                          '\U0000297F',
-	"dfr;":                             '\U0001D521',
-	"dharl;":                           '\U000021C3',
-	"dharr;":                           '\U000021C2',
-	"diam;":                            '\U000022C4',
-	"diamond;":                         '\U000022C4',
-	"diamondsuit;":                     '\U00002666',
-	"diams;":                           '\U00002666',
-	"die;":                             '\U000000A8',
-	"digamma;":                         '\U000003DD',
-	"disin;":                           '\U000022F2',
-	"div;":                             '\U000000F7',
-	"divide;":                          '\U000000F7',
-	"divideontimes;":                   '\U000022C7',
-	"divonx;":                          '\U000022C7',
-	"djcy;":                            '\U00000452',
-	"dlcorn;":                          '\U0000231E',
-	"dlcrop;":                          '\U0000230D',
-	"dollar;":                          '\U00000024',
-	"dopf;":                            '\U0001D555',
-	"dot;":                             '\U000002D9',
-	"doteq;":                           '\U00002250',
-	"doteqdot;":                        '\U00002251',
-	"dotminus;":                        '\U00002238',
-	"dotplus;":                         '\U00002214',
-	"dotsquare;":                       '\U000022A1',
-	"doublebarwedge;":                  '\U00002306',
-	"downarrow;":                       '\U00002193',
-	"downdownarrows;":                  '\U000021CA',
-	"downharpoonleft;":                 '\U000021C3',
-	"downharpoonright;":                '\U000021C2',
-	"drbkarow;":                        '\U00002910',
-	"drcorn;":                          '\U0000231F',
-	"drcrop;":                          '\U0000230C',
-	"dscr;":                            '\U0001D4B9',
-	"dscy;":                            '\U00000455',
-	"dsol;":                            '\U000029F6',
-	"dstrok;":                          '\U00000111',
-	"dtdot;":                           '\U000022F1',
-	"dtri;":                            '\U000025BF',
-	"dtrif;":                           '\U000025BE',
-	"duarr;":                           '\U000021F5',
-	"duhar;":                           '\U0000296F',
-	"dwangle;":                         '\U000029A6',
-	"dzcy;":                            '\U0000045F',
-	"dzigrarr;":                        '\U000027FF',
-	"eDDot;":                           '\U00002A77',
-	"eDot;":                            '\U00002251',
-	"eacute;":                          '\U000000E9',
-	"easter;":                          '\U00002A6E',
-	"ecaron;":                          '\U0000011B',
-	"ecir;":                            '\U00002256',
-	"ecirc;":                           '\U000000EA',
-	"ecolon;":                          '\U00002255',
-	"ecy;":                             '\U0000044D',
-	"edot;":                            '\U00000117',
-	"ee;":                              '\U00002147',
-	"efDot;":                           '\U00002252',
-	"efr;":                             '\U0001D522',
-	"eg;":                              '\U00002A9A',
-	"egrave;":                          '\U000000E8',
-	"egs;":                             '\U00002A96',
-	"egsdot;":                          '\U00002A98',
-	"el;":                              '\U00002A99',
-	"elinters;":                        '\U000023E7',
-	"ell;":                             '\U00002113',
-	"els;":                             '\U00002A95',
-	"elsdot;":                          '\U00002A97',
-	"emacr;":                           '\U00000113',
-	"empty;":                           '\U00002205',
-	"emptyset;":                        '\U00002205',
-	"emptyv;":                          '\U00002205',
-	"emsp;":                            '\U00002003',
-	"emsp13;":                          '\U00002004',
-	"emsp14;":                          '\U00002005',
-	"eng;":                             '\U0000014B',
-	"ensp;":                            '\U00002002',
-	"eogon;":                           '\U00000119',
-	"eopf;":                            '\U0001D556',
-	"epar;":                            '\U000022D5',
-	"eparsl;":                          '\U000029E3',
-	"eplus;":                           '\U00002A71',
-	"epsi;":                            '\U000003B5',
-	"epsilon;":                         '\U000003B5',
-	"epsiv;":                           '\U000003F5',
-	"eqcirc;":                          '\U00002256',
-	"eqcolon;":                         '\U00002255',
-	"eqsim;":                           '\U00002242',
-	"eqslantgtr;":                      '\U00002A96',
-	"eqslantless;":                     '\U00002A95',
-	"equals;":                          '\U0000003D',
-	"equest;":                          '\U0000225F',
-	"equiv;":                           '\U00002261',
-	"equivDD;":                         '\U00002A78',
-	"eqvparsl;":                        '\U000029E5',
-	"erDot;":                           '\U00002253',
-	"erarr;":                           '\U00002971',
-	"escr;":                            '\U0000212F',
-	"esdot;":                           '\U00002250',
-	"esim;":                            '\U00002242',
-	"eta;":                             '\U000003B7',
-	"eth;":                             '\U000000F0',
-	"euml;":                            '\U000000EB',
-	"euro;":                            '\U000020AC',
-	"excl;":                            '\U00000021',
-	"exist;":                           '\U00002203',
-	"expectation;":                     '\U00002130',
-	"exponentiale;":                    '\U00002147',
-	"fallingdotseq;":                   '\U00002252',
-	"fcy;":                             '\U00000444',
-	"female;":                          '\U00002640',
-	"ffilig;":                          '\U0000FB03',
-	"fflig;":                           '\U0000FB00',
-	"ffllig;":                          '\U0000FB04',
-	"ffr;":                             '\U0001D523',
-	"filig;":                           '\U0000FB01',
-	"flat;":                            '\U0000266D',
-	"fllig;":                           '\U0000FB02',
-	"fltns;":                           '\U000025B1',
-	"fnof;":                            '\U00000192',
-	"fopf;":                            '\U0001D557',
-	"forall;":                          '\U00002200',
-	"fork;":                            '\U000022D4',
-	"forkv;":                           '\U00002AD9',
-	"fpartint;":                        '\U00002A0D',
-	"frac12;":                          '\U000000BD',
-	"frac13;":                          '\U00002153',
-	"frac14;":                          '\U000000BC',
-	"frac15;":                          '\U00002155',
-	"frac16;":                          '\U00002159',
-	"frac18;":                          '\U0000215B',
-	"frac23;":                          '\U00002154',
-	"frac25;":                          '\U00002156',
-	"frac34;":                          '\U000000BE',
-	"frac35;":                          '\U00002157',
-	"frac38;":                          '\U0000215C',
-	"frac45;":                          '\U00002158',
-	"frac56;":                          '\U0000215A',
-	"frac58;":                          '\U0000215D',
-	"frac78;":                          '\U0000215E',
-	"frasl;":                           '\U00002044',
-	"frown;":                           '\U00002322',
-	"fscr;":                            '\U0001D4BB',
-	"gE;":                              '\U00002267',
-	"gEl;":                             '\U00002A8C',
-	"gacute;":                          '\U000001F5',
-	"gamma;":                           '\U000003B3',
-	"gammad;":                          '\U000003DD',
-	"gap;":                             '\U00002A86',
-	"gbreve;":                          '\U0000011F',
-	"gcirc;":                           '\U0000011D',
-	"gcy;":                             '\U00000433',
-	"gdot;":                            '\U00000121',
-	"ge;":                              '\U00002265',
-	"gel;":                             '\U000022DB',
-	"geq;":                             '\U00002265',
-	"geqq;":                            '\U00002267',
-	"geqslant;":                        '\U00002A7E',
-	"ges;":                             '\U00002A7E',
-	"gescc;":                           '\U00002AA9',
-	"gesdot;":                          '\U00002A80',
-	"gesdoto;":                         '\U00002A82',
-	"gesdotol;":                        '\U00002A84',
-	"gesles;":                          '\U00002A94',
-	"gfr;":                             '\U0001D524',
-	"gg;":                              '\U0000226B',
-	"ggg;":                             '\U000022D9',
-	"gimel;":                           '\U00002137',
-	"gjcy;":                            '\U00000453',
-	"gl;":                              '\U00002277',
-	"glE;":                             '\U00002A92',
-	"gla;":                             '\U00002AA5',
-	"glj;":                             '\U00002AA4',
-	"gnE;":                             '\U00002269',
-	"gnap;":                            '\U00002A8A',
-	"gnapprox;":                        '\U00002A8A',
-	"gne;":                             '\U00002A88',
-	"gneq;":                            '\U00002A88',
-	"gneqq;":                           '\U00002269',
-	"gnsim;":                           '\U000022E7',
-	"gopf;":                            '\U0001D558',
-	"grave;":                           '\U00000060',
-	"gscr;":                            '\U0000210A',
-	"gsim;":                            '\U00002273',
-	"gsime;":                           '\U00002A8E',
-	"gsiml;":                           '\U00002A90',
-	"gt;":                              '\U0000003E',
-	"gtcc;":                            '\U00002AA7',
-	"gtcir;":                           '\U00002A7A',
-	"gtdot;":                           '\U000022D7',
-	"gtlPar;":                          '\U00002995',
-	"gtquest;":                         '\U00002A7C',
-	"gtrapprox;":                       '\U00002A86',
-	"gtrarr;":                          '\U00002978',
-	"gtrdot;":                          '\U000022D7',
-	"gtreqless;":                       '\U000022DB',
-	"gtreqqless;":                      '\U00002A8C',
-	"gtrless;":                         '\U00002277',
-	"gtrsim;":                          '\U00002273',
-	"hArr;":                            '\U000021D4',
-	"hairsp;":                          '\U0000200A',
-	"half;":                            '\U000000BD',
-	"hamilt;":                          '\U0000210B',
-	"hardcy;":                          '\U0000044A',
-	"harr;":                            '\U00002194',
-	"harrcir;":                         '\U00002948',
-	"harrw;":                           '\U000021AD',
-	"hbar;":                            '\U0000210F',
-	"hcirc;":                           '\U00000125',
-	"hearts;":                          '\U00002665',
-	"heartsuit;":                       '\U00002665',
-	"hellip;":                          '\U00002026',
-	"hercon;":                          '\U000022B9',
-	"hfr;":                             '\U0001D525',
-	"hksearow;":                        '\U00002925',
-	"hkswarow;":                        '\U00002926',
-	"hoarr;":                           '\U000021FF',
-	"homtht;":                          '\U0000223B',
-	"hookleftarrow;":                   '\U000021A9',
-	"hookrightarrow;":                  '\U000021AA',
-	"hopf;":                            '\U0001D559',
-	"horbar;":                          '\U00002015',
-	"hscr;":                            '\U0001D4BD',
-	"hslash;":                          '\U0000210F',
-	"hstrok;":                          '\U00000127',
-	"hybull;":                          '\U00002043',
-	"hyphen;":                          '\U00002010',
-	"iacute;":                          '\U000000ED',
-	"ic;":                              '\U00002063',
-	"icirc;":                           '\U000000EE',
-	"icy;":                             '\U00000438',
-	"iecy;":                            '\U00000435',
-	"iexcl;":                           '\U000000A1',
-	"iff;":                             '\U000021D4',
-	"ifr;":                             '\U0001D526',
-	"igrave;":                          '\U000000EC',
-	"ii;":                              '\U00002148',
-	"iiiint;":                          '\U00002A0C',
-	"iiint;":                           '\U0000222D',
-	"iinfin;":                          '\U000029DC',
-	"iiota;":                           '\U00002129',
-	"ijlig;":                           '\U00000133',
-	"imacr;":                           '\U0000012B',
-	"image;":                           '\U00002111',
-	"imagline;":                        '\U00002110',
-	"imagpart;":                        '\U00002111',
-	"imath;":                           '\U00000131',
-	"imof;":                            '\U000022B7',
-	"imped;":                           '\U000001B5',
-	"in;":                              '\U00002208',
-	"incare;":                          '\U00002105',
-	"infin;":                           '\U0000221E',
-	"infintie;":                        '\U000029DD',
-	"inodot;":                          '\U00000131',
-	"int;":                             '\U0000222B',
-	"intcal;":                          '\U000022BA',
-	"integers;":                        '\U00002124',
-	"intercal;":                        '\U000022BA',
-	"intlarhk;":                        '\U00002A17',
-	"intprod;":                         '\U00002A3C',
-	"iocy;":                            '\U00000451',
-	"iogon;":                           '\U0000012F',
-	"iopf;":                            '\U0001D55A',
-	"iota;":                            '\U000003B9',
-	"iprod;":                           '\U00002A3C',
-	"iquest;":                          '\U000000BF',
-	"iscr;":                            '\U0001D4BE',
-	"isin;":                            '\U00002208',
-	"isinE;":                           '\U000022F9',
-	"isindot;":                         '\U000022F5',
-	"isins;":                           '\U000022F4',
-	"isinsv;":                          '\U000022F3',
-	"isinv;":                           '\U00002208',
-	"it;":                              '\U00002062',
-	"itilde;":                          '\U00000129',
-	"iukcy;":                           '\U00000456',
-	"iuml;":                            '\U000000EF',
-	"jcirc;":                           '\U00000135',
-	"jcy;":                             '\U00000439',
-	"jfr;":                             '\U0001D527',
-	"jmath;":                           '\U00000237',
-	"jopf;":                            '\U0001D55B',
-	"jscr;":                            '\U0001D4BF',
-	"jsercy;":                          '\U00000458',
-	"jukcy;":                           '\U00000454',
-	"kappa;":                           '\U000003BA',
-	"kappav;":                          '\U000003F0',
-	"kcedil;":                          '\U00000137',
-	"kcy;":                             '\U0000043A',
-	"kfr;":                             '\U0001D528',
-	"kgreen;":                          '\U00000138',
-	"khcy;":                            '\U00000445',
-	"kjcy;":                            '\U0000045C',
-	"kopf;":                            '\U0001D55C',
-	"kscr;":                            '\U0001D4C0',
-	"lAarr;":                           '\U000021DA',
-	"lArr;":                            '\U000021D0',
-	"lAtail;":                          '\U0000291B',
-	"lBarr;":                           '\U0000290E',
-	"lE;":                              '\U00002266',
-	"lEg;":                             '\U00002A8B',
-	"lHar;":                            '\U00002962',
-	"lacute;":                          '\U0000013A',
-	"laemptyv;":                        '\U000029B4',
-	"lagran;":                          '\U00002112',
-	"lambda;":                          '\U000003BB',
-	"lang;":                            '\U000027E8',
-	"langd;":                           '\U00002991',
-	"langle;":                          '\U000027E8',
-	"lap;":                             '\U00002A85',
-	"laquo;":                           '\U000000AB',
-	"larr;":                            '\U00002190',
-	"larrb;":                           '\U000021E4',
-	"larrbfs;":                         '\U0000291F',
-	"larrfs;":                          '\U0000291D',
-	"larrhk;":                          '\U000021A9',
-	"larrlp;":                          '\U000021AB',
-	"larrpl;":                          '\U00002939',
-	"larrsim;":                         '\U00002973',
-	"larrtl;":                          '\U000021A2',
-	"lat;":                             '\U00002AAB',
-	"latail;":                          '\U00002919',
-	"late;":                            '\U00002AAD',
-	"lbarr;":                           '\U0000290C',
-	"lbbrk;":                           '\U00002772',
-	"lbrace;":                          '\U0000007B',
-	"lbrack;":                          '\U0000005B',
-	"lbrke;":                           '\U0000298B',
-	"lbrksld;":                         '\U0000298F',
-	"lbrkslu;":                         '\U0000298D',
-	"lcaron;":                          '\U0000013E',
-	"lcedil;":                          '\U0000013C',
-	"lceil;":                           '\U00002308',
-	"lcub;":                            '\U0000007B',
-	"lcy;":                             '\U0000043B',
-	"ldca;":                            '\U00002936',
-	"ldquo;":                           '\U0000201C',
-	"ldquor;":                          '\U0000201E',
-	"ldrdhar;":                         '\U00002967',
-	"ldrushar;":                        '\U0000294B',
-	"ldsh;":                            '\U000021B2',
-	"le;":                              '\U00002264',
-	"leftarrow;":                       '\U00002190',
-	"leftarrowtail;":                   '\U000021A2',
-	"leftharpoondown;":                 '\U000021BD',
-	"leftharpoonup;":                   '\U000021BC',
-	"leftleftarrows;":                  '\U000021C7',
-	"leftrightarrow;":                  '\U00002194',
-	"leftrightarrows;":                 '\U000021C6',
-	"leftrightharpoons;":               '\U000021CB',
-	"leftrightsquigarrow;":             '\U000021AD',
-	"leftthreetimes;":                  '\U000022CB',
-	"leg;":                             '\U000022DA',
-	"leq;":                             '\U00002264',
-	"leqq;":                            '\U00002266',
-	"leqslant;":                        '\U00002A7D',
-	"les;":                             '\U00002A7D',
-	"lescc;":                           '\U00002AA8',
-	"lesdot;":                          '\U00002A7F',
-	"lesdoto;":                         '\U00002A81',
-	"lesdotor;":                        '\U00002A83',
-	"lesges;":                          '\U00002A93',
-	"lessapprox;":                      '\U00002A85',
-	"lessdot;":                         '\U000022D6',
-	"lesseqgtr;":                       '\U000022DA',
-	"lesseqqgtr;":                      '\U00002A8B',
-	"lessgtr;":                         '\U00002276',
-	"lesssim;":                         '\U00002272',
-	"lfisht;":                          '\U0000297C',
-	"lfloor;":                          '\U0000230A',
-	"lfr;":                             '\U0001D529',
-	"lg;":                              '\U00002276',
-	"lgE;":                             '\U00002A91',
-	"lhard;":                           '\U000021BD',
-	"lharu;":                           '\U000021BC',
-	"lharul;":                          '\U0000296A',
-	"lhblk;":                           '\U00002584',
-	"ljcy;":                            '\U00000459',
-	"ll;":                              '\U0000226A',
-	"llarr;":                           '\U000021C7',
-	"llcorner;":                        '\U0000231E',
-	"llhard;":                          '\U0000296B',
-	"lltri;":                           '\U000025FA',
-	"lmidot;":                          '\U00000140',
-	"lmoust;":                          '\U000023B0',
-	"lmoustache;":                      '\U000023B0',
-	"lnE;":                             '\U00002268',
-	"lnap;":                            '\U00002A89',
-	"lnapprox;":                        '\U00002A89',
-	"lne;":                             '\U00002A87',
-	"lneq;":                            '\U00002A87',
-	"lneqq;":                           '\U00002268',
-	"lnsim;":                           '\U000022E6',
-	"loang;":                           '\U000027EC',
-	"loarr;":                           '\U000021FD',
-	"lobrk;":                           '\U000027E6',
-	"longleftarrow;":                   '\U000027F5',
-	"longleftrightarrow;":              '\U000027F7',
-	"longmapsto;":                      '\U000027FC',
-	"longrightarrow;":                  '\U000027F6',
-	"looparrowleft;":                   '\U000021AB',
-	"looparrowright;":                  '\U000021AC',
-	"lopar;":                           '\U00002985',
-	"lopf;":                            '\U0001D55D',
-	"loplus;":                          '\U00002A2D',
-	"lotimes;":                         '\U00002A34',
-	"lowast;":                          '\U00002217',
-	"lowbar;":                          '\U0000005F',
-	"loz;":                             '\U000025CA',
-	"lozenge;":                         '\U000025CA',
-	"lozf;":                            '\U000029EB',
-	"lpar;":                            '\U00000028',
-	"lparlt;":                          '\U00002993',
-	"lrarr;":                           '\U000021C6',
-	"lrcorner;":                        '\U0000231F',
-	"lrhar;":                           '\U000021CB',
-	"lrhard;":                          '\U0000296D',
-	"lrm;":                             '\U0000200E',
-	"lrtri;":                           '\U000022BF',
-	"lsaquo;":                          '\U00002039',
-	"lscr;":                            '\U0001D4C1',
-	"lsh;":                             '\U000021B0',
-	"lsim;":                            '\U00002272',
-	"lsime;":                           '\U00002A8D',
-	"lsimg;":                           '\U00002A8F',
-	"lsqb;":                            '\U0000005B',
-	"lsquo;":                           '\U00002018',
-	"lsquor;":                          '\U0000201A',
-	"lstrok;":                          '\U00000142',
-	"lt;":                              '\U0000003C',
-	"ltcc;":                            '\U00002AA6',
-	"ltcir;":                           '\U00002A79',
-	"ltdot;":                           '\U000022D6',
-	"lthree;":                          '\U000022CB',
-	"ltimes;":                          '\U000022C9',
-	"ltlarr;":                          '\U00002976',
-	"ltquest;":                         '\U00002A7B',
-	"ltrPar;":                          '\U00002996',
-	"ltri;":                            '\U000025C3',
-	"ltrie;":                           '\U000022B4',
-	"ltrif;":                           '\U000025C2',
-	"lurdshar;":                        '\U0000294A',
-	"luruhar;":                         '\U00002966',
-	"mDDot;":                           '\U0000223A',
-	"macr;":                            '\U000000AF',
-	"male;":                            '\U00002642',
-	"malt;":                            '\U00002720',
-	"maltese;":                         '\U00002720',
-	"map;":                             '\U000021A6',
-	"mapsto;":                          '\U000021A6',
-	"mapstodown;":                      '\U000021A7',
-	"mapstoleft;":                      '\U000021A4',
-	"mapstoup;":                        '\U000021A5',
-	"marker;":                          '\U000025AE',
-	"mcomma;":                          '\U00002A29',
-	"mcy;":                             '\U0000043C',
-	"mdash;":                           '\U00002014',
-	"measuredangle;":                   '\U00002221',
-	"mfr;":                             '\U0001D52A',
-	"mho;":                             '\U00002127',
-	"micro;":                           '\U000000B5',
-	"mid;":                             '\U00002223',
-	"midast;":                          '\U0000002A',
-	"midcir;":                          '\U00002AF0',
-	"middot;":                          '\U000000B7',
-	"minus;":                           '\U00002212',
-	"minusb;":                          '\U0000229F',
-	"minusd;":                          '\U00002238',
-	"minusdu;":                         '\U00002A2A',
-	"mlcp;":                            '\U00002ADB',
-	"mldr;":                            '\U00002026',
-	"mnplus;":                          '\U00002213',
-	"models;":                          '\U000022A7',
-	"mopf;":                            '\U0001D55E',
-	"mp;":                              '\U00002213',
-	"mscr;":                            '\U0001D4C2',
-	"mstpos;":                          '\U0000223E',
-	"mu;":                              '\U000003BC',
-	"multimap;":                        '\U000022B8',
-	"mumap;":                           '\U000022B8',
-	"nLeftarrow;":                      '\U000021CD',
-	"nLeftrightarrow;":                 '\U000021CE',
-	"nRightarrow;":                     '\U000021CF',
-	"nVDash;":                          '\U000022AF',
-	"nVdash;":                          '\U000022AE',
-	"nabla;":                           '\U00002207',
-	"nacute;":                          '\U00000144',
-	"nap;":                             '\U00002249',
-	"napos;":                           '\U00000149',
-	"napprox;":                         '\U00002249',
-	"natur;":                           '\U0000266E',
-	"natural;":                         '\U0000266E',
-	"naturals;":                        '\U00002115',
-	"nbsp;":                            '\U000000A0',
-	"ncap;":                            '\U00002A43',
-	"ncaron;":                          '\U00000148',
-	"ncedil;":                          '\U00000146',
-	"ncong;":                           '\U00002247',
-	"ncup;":                            '\U00002A42',
-	"ncy;":                             '\U0000043D',
-	"ndash;":                           '\U00002013',
-	"ne;":                              '\U00002260',
-	"neArr;":                           '\U000021D7',
-	"nearhk;":                          '\U00002924',
-	"nearr;":                           '\U00002197',
-	"nearrow;":                         '\U00002197',
-	"nequiv;":                          '\U00002262',
-	"nesear;":                          '\U00002928',
-	"nexist;":                          '\U00002204',
-	"nexists;":                         '\U00002204',
-	"nfr;":                             '\U0001D52B',
-	"nge;":                             '\U00002271',
-	"ngeq;":                            '\U00002271',
-	"ngsim;":                           '\U00002275',
-	"ngt;":                             '\U0000226F',
-	"ngtr;":                            '\U0000226F',
-	"nhArr;":                           '\U000021CE',
-	"nharr;":                           '\U000021AE',
-	"nhpar;":                           '\U00002AF2',
-	"ni;":                              '\U0000220B',
-	"nis;":                             '\U000022FC',
-	"nisd;":                            '\U000022FA',
-	"niv;":                             '\U0000220B',
-	"njcy;":                            '\U0000045A',
-	"nlArr;":                           '\U000021CD',
-	"nlarr;":                           '\U0000219A',
-	"nldr;":                            '\U00002025',
-	"nle;":                             '\U00002270',
-	"nleftarrow;":                      '\U0000219A',
-	"nleftrightarrow;":                 '\U000021AE',
-	"nleq;":                            '\U00002270',
-	"nless;":                           '\U0000226E',
-	"nlsim;":                           '\U00002274',
-	"nlt;":                             '\U0000226E',
-	"nltri;":                           '\U000022EA',
-	"nltrie;":                          '\U000022EC',
-	"nmid;":                            '\U00002224',
-	"nopf;":                            '\U0001D55F',
-	"not;":                             '\U000000AC',
-	"notin;":                           '\U00002209',
-	"notinva;":                         '\U00002209',
-	"notinvb;":                         '\U000022F7',
-	"notinvc;":                         '\U000022F6',
-	"notni;":                           '\U0000220C',
-	"notniva;":                         '\U0000220C',
-	"notnivb;":                         '\U000022FE',
-	"notnivc;":                         '\U000022FD',
-	"npar;":                            '\U00002226',
-	"nparallel;":                       '\U00002226',
-	"npolint;":                         '\U00002A14',
-	"npr;":                             '\U00002280',
-	"nprcue;":                          '\U000022E0',
-	"nprec;":                           '\U00002280',
-	"nrArr;":                           '\U000021CF',
-	"nrarr;":                           '\U0000219B',
-	"nrightarrow;":                     '\U0000219B',
-	"nrtri;":                           '\U000022EB',
-	"nrtrie;":                          '\U000022ED',
-	"nsc;":                             '\U00002281',
-	"nsccue;":                          '\U000022E1',
-	"nscr;":                            '\U0001D4C3',
-	"nshortmid;":                       '\U00002224',
-	"nshortparallel;":                  '\U00002226',
-	"nsim;":                            '\U00002241',
-	"nsime;":                           '\U00002244',
-	"nsimeq;":                          '\U00002244',
-	"nsmid;":                           '\U00002224',
-	"nspar;":                           '\U00002226',
-	"nsqsube;":                         '\U000022E2',
-	"nsqsupe;":                         '\U000022E3',
-	"nsub;":                            '\U00002284',
-	"nsube;":                           '\U00002288',
-	"nsubseteq;":                       '\U00002288',
-	"nsucc;":                           '\U00002281',
-	"nsup;":                            '\U00002285',
-	"nsupe;":                           '\U00002289',
-	"nsupseteq;":                       '\U00002289',
-	"ntgl;":                            '\U00002279',
-	"ntilde;":                          '\U000000F1',
-	"ntlg;":                            '\U00002278',
-	"ntriangleleft;":                   '\U000022EA',
-	"ntrianglelefteq;":                 '\U000022EC',
-	"ntriangleright;":                  '\U000022EB',
-	"ntrianglerighteq;":                '\U000022ED',
-	"nu;":                              '\U000003BD',
-	"num;":                             '\U00000023',
-	"numero;":                          '\U00002116',
-	"numsp;":                           '\U00002007',
-	"nvDash;":                          '\U000022AD',
-	"nvHarr;":                          '\U00002904',
-	"nvdash;":                          '\U000022AC',
-	"nvinfin;":                         '\U000029DE',
-	"nvlArr;":                          '\U00002902',
-	"nvrArr;":                          '\U00002903',
-	"nwArr;":                           '\U000021D6',
-	"nwarhk;":                          '\U00002923',
-	"nwarr;":                           '\U00002196',
-	"nwarrow;":                         '\U00002196',
-	"nwnear;":                          '\U00002927',
-	"oS;":                              '\U000024C8',
-	"oacute;":                          '\U000000F3',
-	"oast;":                            '\U0000229B',
-	"ocir;":                            '\U0000229A',
-	"ocirc;":                           '\U000000F4',
-	"ocy;":                             '\U0000043E',
-	"odash;":                           '\U0000229D',
-	"odblac;":                          '\U00000151',
-	"odiv;":                            '\U00002A38',
-	"odot;":                            '\U00002299',
-	"odsold;":                          '\U000029BC',
-	"oelig;":                           '\U00000153',
-	"ofcir;":                           '\U000029BF',
-	"ofr;":                             '\U0001D52C',
-	"ogon;":                            '\U000002DB',
-	"ograve;":                          '\U000000F2',
-	"ogt;":                             '\U000029C1',
-	"ohbar;":                           '\U000029B5',
-	"ohm;":                             '\U000003A9',
-	"oint;":                            '\U0000222E',
-	"olarr;":                           '\U000021BA',
-	"olcir;":                           '\U000029BE',
-	"olcross;":                         '\U000029BB',
-	"oline;":                           '\U0000203E',
-	"olt;":                             '\U000029C0',
-	"omacr;":                           '\U0000014D',
-	"omega;":                           '\U000003C9',
-	"omicron;":                         '\U000003BF',
-	"omid;":                            '\U000029B6',
-	"ominus;":                          '\U00002296',
-	"oopf;":                            '\U0001D560',
-	"opar;":                            '\U000029B7',
-	"operp;":                           '\U000029B9',
-	"oplus;":                           '\U00002295',
-	"or;":                              '\U00002228',
-	"orarr;":                           '\U000021BB',
-	"ord;":                             '\U00002A5D',
-	"order;":                           '\U00002134',
-	"orderof;":                         '\U00002134',
-	"ordf;":                            '\U000000AA',
-	"ordm;":                            '\U000000BA',
-	"origof;":                          '\U000022B6',
-	"oror;":                            '\U00002A56',
-	"orslope;":                         '\U00002A57',
-	"orv;":                             '\U00002A5B',
-	"oscr;":                            '\U00002134',
-	"oslash;":                          '\U000000F8',
-	"osol;":                            '\U00002298',
-	"otilde;":                          '\U000000F5',
-	"otimes;":                          '\U00002297',
-	"otimesas;":                        '\U00002A36',
-	"ouml;":                            '\U000000F6',
-	"ovbar;":                           '\U0000233D',
-	"par;":                             '\U00002225',
-	"para;":                            '\U000000B6',
-	"parallel;":                        '\U00002225',
-	"parsim;":                          '\U00002AF3',
-	"parsl;":                           '\U00002AFD',
-	"part;":                            '\U00002202',
-	"pcy;":                             '\U0000043F',
-	"percnt;":                          '\U00000025',
-	"period;":                          '\U0000002E',
-	"permil;":                          '\U00002030',
-	"perp;":                            '\U000022A5',
-	"pertenk;":                         '\U00002031',
-	"pfr;":                             '\U0001D52D',
-	"phi;":                             '\U000003C6',
-	"phiv;":                            '\U000003D5',
-	"phmmat;":                          '\U00002133',
-	"phone;":                           '\U0000260E',
-	"pi;":                              '\U000003C0',
-	"pitchfork;":                       '\U000022D4',
-	"piv;":                             '\U000003D6',
-	"planck;":                          '\U0000210F',
-	"planckh;":                         '\U0000210E',
-	"plankv;":                          '\U0000210F',
-	"plus;":                            '\U0000002B',
-	"plusacir;":                        '\U00002A23',
-	"plusb;":                           '\U0000229E',
-	"pluscir;":                         '\U00002A22',
-	"plusdo;":                          '\U00002214',
-	"plusdu;":                          '\U00002A25',
-	"pluse;":                           '\U00002A72',
-	"plusmn;":                          '\U000000B1',
-	"plussim;":                         '\U00002A26',
-	"plustwo;":                         '\U00002A27',
-	"pm;":                              '\U000000B1',
-	"pointint;":                        '\U00002A15',
-	"popf;":                            '\U0001D561',
-	"pound;":                           '\U000000A3',
-	"pr;":                              '\U0000227A',
-	"prE;":                             '\U00002AB3',
-	"prap;":                            '\U00002AB7',
-	"prcue;":                           '\U0000227C',
-	"pre;":                             '\U00002AAF',
-	"prec;":                            '\U0000227A',
-	"precapprox;":                      '\U00002AB7',
-	"preccurlyeq;":                     '\U0000227C',
-	"preceq;":                          '\U00002AAF',
-	"precnapprox;":                     '\U00002AB9',
-	"precneqq;":                        '\U00002AB5',
-	"precnsim;":                        '\U000022E8',
-	"precsim;":                         '\U0000227E',
-	"prime;":                           '\U00002032',
-	"primes;":                          '\U00002119',
-	"prnE;":                            '\U00002AB5',
-	"prnap;":                           '\U00002AB9',
-	"prnsim;":                          '\U000022E8',
-	"prod;":                            '\U0000220F',
-	"profalar;":                        '\U0000232E',
-	"profline;":                        '\U00002312',
-	"profsurf;":                        '\U00002313',
-	"prop;":                            '\U0000221D',
-	"propto;":                          '\U0000221D',
-	"prsim;":                           '\U0000227E',
-	"prurel;":                          '\U000022B0',
-	"pscr;":                            '\U0001D4C5',
-	"psi;":                             '\U000003C8',
-	"puncsp;":                          '\U00002008',
-	"qfr;":                             '\U0001D52E',
-	"qint;":                            '\U00002A0C',
-	"qopf;":                            '\U0001D562',
-	"qprime;":                          '\U00002057',
-	"qscr;":                            '\U0001D4C6',
-	"quaternions;":                     '\U0000210D',
-	"quatint;":                         '\U00002A16',
-	"quest;":                           '\U0000003F',
-	"questeq;":                         '\U0000225F',
-	"quot;":                            '\U00000022',
-	"rAarr;":                           '\U000021DB',
-	"rArr;":                            '\U000021D2',
-	"rAtail;":                          '\U0000291C',
-	"rBarr;":                           '\U0000290F',
-	"rHar;":                            '\U00002964',
-	"racute;":                          '\U00000155',
-	"radic;":                           '\U0000221A',
-	"raemptyv;":                        '\U000029B3',
-	"rang;":                            '\U000027E9',
-	"rangd;":                           '\U00002992',
-	"range;":                           '\U000029A5',
-	"rangle;":                          '\U000027E9',
-	"raquo;":                           '\U000000BB',
-	"rarr;":                            '\U00002192',
-	"rarrap;":                          '\U00002975',
-	"rarrb;":                           '\U000021E5',
-	"rarrbfs;":                         '\U00002920',
-	"rarrc;":                           '\U00002933',
-	"rarrfs;":                          '\U0000291E',
-	"rarrhk;":                          '\U000021AA',
-	"rarrlp;":                          '\U000021AC',
-	"rarrpl;":                          '\U00002945',
-	"rarrsim;":                         '\U00002974',
-	"rarrtl;":                          '\U000021A3',
-	"rarrw;":                           '\U0000219D',
-	"ratail;":                          '\U0000291A',
-	"ratio;":                           '\U00002236',
-	"rationals;":                       '\U0000211A',
-	"rbarr;":                           '\U0000290D',
-	"rbbrk;":                           '\U00002773',
-	"rbrace;":                          '\U0000007D',
-	"rbrack;":                          '\U0000005D',
-	"rbrke;":                           '\U0000298C',
-	"rbrksld;":                         '\U0000298E',
-	"rbrkslu;":                         '\U00002990',
-	"rcaron;":                          '\U00000159',
-	"rcedil;":                          '\U00000157',
-	"rceil;":                           '\U00002309',
-	"rcub;":                            '\U0000007D',
-	"rcy;":                             '\U00000440',
-	"rdca;":                            '\U00002937',
-	"rdldhar;":                         '\U00002969',
-	"rdquo;":                           '\U0000201D',
-	"rdquor;":                          '\U0000201D',
-	"rdsh;":                            '\U000021B3',
-	"real;":                            '\U0000211C',
-	"realine;":                         '\U0000211B',
-	"realpart;":                        '\U0000211C',
-	"reals;":                           '\U0000211D',
-	"rect;":                            '\U000025AD',
-	"reg;":                             '\U000000AE',
-	"rfisht;":                          '\U0000297D',
-	"rfloor;":                          '\U0000230B',
-	"rfr;":                             '\U0001D52F',
-	"rhard;":                           '\U000021C1',
-	"rharu;":                           '\U000021C0',
-	"rharul;":                          '\U0000296C',
-	"rho;":                             '\U000003C1',
-	"rhov;":                            '\U000003F1',
-	"rightarrow;":                      '\U00002192',
-	"rightarrowtail;":                  '\U000021A3',
-	"rightharpoondown;":                '\U000021C1',
-	"rightharpoonup;":                  '\U000021C0',
-	"rightleftarrows;":                 '\U000021C4',
-	"rightleftharpoons;":               '\U000021CC',
-	"rightrightarrows;":                '\U000021C9',
-	"rightsquigarrow;":                 '\U0000219D',
-	"rightthreetimes;":                 '\U000022CC',
-	"ring;":                            '\U000002DA',
-	"risingdotseq;":                    '\U00002253',
-	"rlarr;":                           '\U000021C4',
-	"rlhar;":                           '\U000021CC',
-	"rlm;":                             '\U0000200F',
-	"rmoust;":                          '\U000023B1',
-	"rmoustache;":                      '\U000023B1',
-	"rnmid;":                           '\U00002AEE',
-	"roang;":                           '\U000027ED',
-	"roarr;":                           '\U000021FE',
-	"robrk;":                           '\U000027E7',
-	"ropar;":                           '\U00002986',
-	"ropf;":                            '\U0001D563',
-	"roplus;":                          '\U00002A2E',
-	"rotimes;":                         '\U00002A35',
-	"rpar;":                            '\U00000029',
-	"rpargt;":                          '\U00002994',
-	"rppolint;":                        '\U00002A12',
-	"rrarr;":                           '\U000021C9',
-	"rsaquo;":                          '\U0000203A',
-	"rscr;":                            '\U0001D4C7',
-	"rsh;":                             '\U000021B1',
-	"rsqb;":                            '\U0000005D',
-	"rsquo;":                           '\U00002019',
-	"rsquor;":                          '\U00002019',
-	"rthree;":                          '\U000022CC',
-	"rtimes;":                          '\U000022CA',
-	"rtri;":                            '\U000025B9',
-	"rtrie;":                           '\U000022B5',
-	"rtrif;":                           '\U000025B8',
-	"rtriltri;":                        '\U000029CE',
-	"ruluhar;":                         '\U00002968',
-	"rx;":                              '\U0000211E',
-	"sacute;":                          '\U0000015B',
-	"sbquo;":                           '\U0000201A',
-	"sc;":                              '\U0000227B',
-	"scE;":                             '\U00002AB4',
-	"scap;":                            '\U00002AB8',
-	"scaron;":                          '\U00000161',
-	"sccue;":                           '\U0000227D',
-	"sce;":                             '\U00002AB0',
-	"scedil;":                          '\U0000015F',
-	"scirc;":                           '\U0000015D',
-	"scnE;":                            '\U00002AB6',
-	"scnap;":                           '\U00002ABA',
-	"scnsim;":                          '\U000022E9',
-	"scpolint;":                        '\U00002A13',
-	"scsim;":                           '\U0000227F',
-	"scy;":                             '\U00000441',
-	"sdot;":                            '\U000022C5',
-	"sdotb;":                           '\U000022A1',
-	"sdote;":                           '\U00002A66',
-	"seArr;":                           '\U000021D8',
-	"searhk;":                          '\U00002925',
-	"searr;":                           '\U00002198',
-	"searrow;":                         '\U00002198',
-	"sect;":                            '\U000000A7',
-	"semi;":                            '\U0000003B',
-	"seswar;":                          '\U00002929',
-	"setminus;":                        '\U00002216',
-	"setmn;":                           '\U00002216',
-	"sext;":                            '\U00002736',
-	"sfr;":                             '\U0001D530',
-	"sfrown;":                          '\U00002322',
-	"sharp;":                           '\U0000266F',
-	"shchcy;":                          '\U00000449',
-	"shcy;":                            '\U00000448',
-	"shortmid;":                        '\U00002223',
-	"shortparallel;":                   '\U00002225',
-	"shy;":                             '\U000000AD',
-	"sigma;":                           '\U000003C3',
-	"sigmaf;":                          '\U000003C2',
-	"sigmav;":                          '\U000003C2',
-	"sim;":                             '\U0000223C',
-	"simdot;":                          '\U00002A6A',
-	"sime;":                            '\U00002243',
-	"simeq;":                           '\U00002243',
-	"simg;":                            '\U00002A9E',
-	"simgE;":                           '\U00002AA0',
-	"siml;":                            '\U00002A9D',
-	"simlE;":                           '\U00002A9F',
-	"simne;":                           '\U00002246',
-	"simplus;":                         '\U00002A24',
-	"simrarr;":                         '\U00002972',
-	"slarr;":                           '\U00002190',
-	"smallsetminus;":                   '\U00002216',
-	"smashp;":                          '\U00002A33',
-	"smeparsl;":                        '\U000029E4',
-	"smid;":                            '\U00002223',
-	"smile;":                           '\U00002323',
-	"smt;":                             '\U00002AAA',
-	"smte;":                            '\U00002AAC',
-	"softcy;":                          '\U0000044C',
-	"sol;":                             '\U0000002F',
-	"solb;":                            '\U000029C4',
-	"solbar;":                          '\U0000233F',
-	"sopf;":                            '\U0001D564',
-	"spades;":                          '\U00002660',
-	"spadesuit;":                       '\U00002660',
-	"spar;":                            '\U00002225',
-	"sqcap;":                           '\U00002293',
-	"sqcup;":                           '\U00002294',
-	"sqsub;":                           '\U0000228F',
-	"sqsube;":                          '\U00002291',
-	"sqsubset;":                        '\U0000228F',
-	"sqsubseteq;":                      '\U00002291',
-	"sqsup;":                           '\U00002290',
-	"sqsupe;":                          '\U00002292',
-	"sqsupset;":                        '\U00002290',
-	"sqsupseteq;":                      '\U00002292',
-	"squ;":                             '\U000025A1',
-	"square;":                          '\U000025A1',
-	"squarf;":                          '\U000025AA',
-	"squf;":                            '\U000025AA',
-	"srarr;":                           '\U00002192',
-	"sscr;":                            '\U0001D4C8',
-	"ssetmn;":                          '\U00002216',
-	"ssmile;":                          '\U00002323',
-	"sstarf;":                          '\U000022C6',
-	"star;":                            '\U00002606',
-	"starf;":                           '\U00002605',
-	"straightepsilon;":                 '\U000003F5',
-	"straightphi;":                     '\U000003D5',
-	"strns;":                           '\U000000AF',
-	"sub;":                             '\U00002282',
-	"subE;":                            '\U00002AC5',
-	"subdot;":                          '\U00002ABD',
-	"sube;":                            '\U00002286',
-	"subedot;":                         '\U00002AC3',
-	"submult;":                         '\U00002AC1',
-	"subnE;":                           '\U00002ACB',
-	"subne;":                           '\U0000228A',
-	"subplus;":                         '\U00002ABF',
-	"subrarr;":                         '\U00002979',
-	"subset;":                          '\U00002282',
-	"subseteq;":                        '\U00002286',
-	"subseteqq;":                       '\U00002AC5',
-	"subsetneq;":                       '\U0000228A',
-	"subsetneqq;":                      '\U00002ACB',
-	"subsim;":                          '\U00002AC7',
-	"subsub;":                          '\U00002AD5',
-	"subsup;":                          '\U00002AD3',
-	"succ;":                            '\U0000227B',
-	"succapprox;":                      '\U00002AB8',
-	"succcurlyeq;":                     '\U0000227D',
-	"succeq;":                          '\U00002AB0',
-	"succnapprox;":                     '\U00002ABA',
-	"succneqq;":                        '\U00002AB6',
-	"succnsim;":                        '\U000022E9',
-	"succsim;":                         '\U0000227F',
-	"sum;":                             '\U00002211',
-	"sung;":                            '\U0000266A',
-	"sup;":                             '\U00002283',
-	"sup1;":                            '\U000000B9',
-	"sup2;":                            '\U000000B2',
-	"sup3;":                            '\U000000B3',
-	"supE;":                            '\U00002AC6',
-	"supdot;":                          '\U00002ABE',
-	"supdsub;":                         '\U00002AD8',
-	"supe;":                            '\U00002287',
-	"supedot;":                         '\U00002AC4',
-	"suphsol;":                         '\U000027C9',
-	"suphsub;":                         '\U00002AD7',
-	"suplarr;":                         '\U0000297B',
-	"supmult;":                         '\U00002AC2',
-	"supnE;":                           '\U00002ACC',
-	"supne;":                           '\U0000228B',
-	"supplus;":                         '\U00002AC0',
-	"supset;":                          '\U00002283',
-	"supseteq;":                        '\U00002287',
-	"supseteqq;":                       '\U00002AC6',
-	"supsetneq;":                       '\U0000228B',
-	"supsetneqq;":                      '\U00002ACC',
-	"supsim;":                          '\U00002AC8',
-	"supsub;":                          '\U00002AD4',
-	"supsup;":                          '\U00002AD6',
-	"swArr;":                           '\U000021D9',
-	"swarhk;":                          '\U00002926',
-	"swarr;":                           '\U00002199',
-	"swarrow;":                         '\U00002199',
-	"swnwar;":                          '\U0000292A',
-	"szlig;":                           '\U000000DF',
-	"target;":                          '\U00002316',
-	"tau;":                             '\U000003C4',
-	"tbrk;":                            '\U000023B4',
-	"tcaron;":                          '\U00000165',
-	"tcedil;":                          '\U00000163',
-	"tcy;":                             '\U00000442',
-	"tdot;":                            '\U000020DB',
-	"telrec;":                          '\U00002315',
-	"tfr;":                             '\U0001D531',
-	"there4;":                          '\U00002234',
-	"therefore;":                       '\U00002234',
-	"theta;":                           '\U000003B8',
-	"thetasym;":                        '\U000003D1',
-	"thetav;":                          '\U000003D1',
-	"thickapprox;":                     '\U00002248',
-	"thicksim;":                        '\U0000223C',
-	"thinsp;":                          '\U00002009',
-	"thkap;":                           '\U00002248',
-	"thksim;":                          '\U0000223C',
-	"thorn;":                           '\U000000FE',
-	"tilde;":                           '\U000002DC',
-	"times;":                           '\U000000D7',
-	"timesb;":                          '\U000022A0',
-	"timesbar;":                        '\U00002A31',
-	"timesd;":                          '\U00002A30',
-	"tint;":                            '\U0000222D',
-	"toea;":                            '\U00002928',
-	"top;":                             '\U000022A4',
-	"topbot;":                          '\U00002336',
-	"topcir;":                          '\U00002AF1',
-	"topf;":                            '\U0001D565',
-	"topfork;":                         '\U00002ADA',
-	"tosa;":                            '\U00002929',
-	"tprime;":                          '\U00002034',
-	"trade;":                           '\U00002122',
-	"triangle;":                        '\U000025B5',
-	"triangledown;":                    '\U000025BF',
-	"triangleleft;":                    '\U000025C3',
-	"trianglelefteq;":                  '\U000022B4',
-	"triangleq;":                       '\U0000225C',
-	"triangleright;":                   '\U000025B9',
-	"trianglerighteq;":                 '\U000022B5',
-	"tridot;":                          '\U000025EC',
-	"trie;":                            '\U0000225C',
-	"triminus;":                        '\U00002A3A',
-	"triplus;":                         '\U00002A39',
-	"trisb;":                           '\U000029CD',
-	"tritime;":                         '\U00002A3B',
-	"trpezium;":                        '\U000023E2',
-	"tscr;":                            '\U0001D4C9',
-	"tscy;":                            '\U00000446',
-	"tshcy;":                           '\U0000045B',
-	"tstrok;":                          '\U00000167',
-	"twixt;":                           '\U0000226C',
-	"twoheadleftarrow;":                '\U0000219E',
-	"twoheadrightarrow;":               '\U000021A0',
-	"uArr;":                            '\U000021D1',
-	"uHar;":                            '\U00002963',
-	"uacute;":                          '\U000000FA',
-	"uarr;":                            '\U00002191',
-	"ubrcy;":                           '\U0000045E',
-	"ubreve;":                          '\U0000016D',
-	"ucirc;":                           '\U000000FB',
-	"ucy;":                             '\U00000443',
-	"udarr;":                           '\U000021C5',
-	"udblac;":                          '\U00000171',
-	"udhar;":                           '\U0000296E',
-	"ufisht;":                          '\U0000297E',
-	"ufr;":                             '\U0001D532',
-	"ugrave;":                          '\U000000F9',
-	"uharl;":                           '\U000021BF',
-	"uharr;":                           '\U000021BE',
-	"uhblk;":                           '\U00002580',
-	"ulcorn;":                          '\U0000231C',
-	"ulcorner;":                        '\U0000231C',
-	"ulcrop;":                          '\U0000230F',
-	"ultri;":                           '\U000025F8',
-	"umacr;":                           '\U0000016B',
-	"uml;":                             '\U000000A8',
-	"uogon;":                           '\U00000173',
-	"uopf;":                            '\U0001D566',
-	"uparrow;":                         '\U00002191',
-	"updownarrow;":                     '\U00002195',
-	"upharpoonleft;":                   '\U000021BF',
-	"upharpoonright;":                  '\U000021BE',
-	"uplus;":                           '\U0000228E',
-	"upsi;":                            '\U000003C5',
-	"upsih;":                           '\U000003D2',
-	"upsilon;":                         '\U000003C5',
-	"upuparrows;":                      '\U000021C8',
-	"urcorn;":                          '\U0000231D',
-	"urcorner;":                        '\U0000231D',
-	"urcrop;":                          '\U0000230E',
-	"uring;":                           '\U0000016F',
-	"urtri;":                           '\U000025F9',
-	"uscr;":                            '\U0001D4CA',
-	"utdot;":                           '\U000022F0',
-	"utilde;":                          '\U00000169',
-	"utri;":                            '\U000025B5',
-	"utrif;":                           '\U000025B4',
-	"uuarr;":                           '\U000021C8',
-	"uuml;":                            '\U000000FC',
-	"uwangle;":                         '\U000029A7',
-	"vArr;":                            '\U000021D5',
-	"vBar;":                            '\U00002AE8',
-	"vBarv;":                           '\U00002AE9',
-	"vDash;":                           '\U000022A8',
-	"vangrt;":                          '\U0000299C',
-	"varepsilon;":                      '\U000003F5',
-	"varkappa;":                        '\U000003F0',
-	"varnothing;":                      '\U00002205',
-	"varphi;":                          '\U000003D5',
-	"varpi;":                           '\U000003D6',
-	"varpropto;":                       '\U0000221D',
-	"varr;":                            '\U00002195',
-	"varrho;":                          '\U000003F1',
-	"varsigma;":                        '\U000003C2',
-	"vartheta;":                        '\U000003D1',
-	"vartriangleleft;":                 '\U000022B2',
-	"vartriangleright;":                '\U000022B3',
-	"vcy;":                             '\U00000432',
-	"vdash;":                           '\U000022A2',
-	"vee;":                             '\U00002228',
-	"veebar;":                          '\U000022BB',
-	"veeeq;":                           '\U0000225A',
-	"vellip;":                          '\U000022EE',
-	"verbar;":                          '\U0000007C',
-	"vert;":                            '\U0000007C',
-	"vfr;":                             '\U0001D533',
-	"vltri;":                           '\U000022B2',
-	"vopf;":                            '\U0001D567',
-	"vprop;":                           '\U0000221D',
-	"vrtri;":                           '\U000022B3',
-	"vscr;":                            '\U0001D4CB',
-	"vzigzag;":                         '\U0000299A',
-	"wcirc;":                           '\U00000175',
-	"wedbar;":                          '\U00002A5F',
-	"wedge;":                           '\U00002227',
-	"wedgeq;":                          '\U00002259',
-	"weierp;":                          '\U00002118',
-	"wfr;":                             '\U0001D534',
-	"wopf;":                            '\U0001D568',
-	"wp;":                              '\U00002118',
-	"wr;":                              '\U00002240',
-	"wreath;":                          '\U00002240',
-	"wscr;":                            '\U0001D4CC',
-	"xcap;":                            '\U000022C2',
-	"xcirc;":                           '\U000025EF',
-	"xcup;":                            '\U000022C3',
-	"xdtri;":                           '\U000025BD',
-	"xfr;":                             '\U0001D535',
-	"xhArr;":                           '\U000027FA',
-	"xharr;":                           '\U000027F7',
-	"xi;":                              '\U000003BE',
-	"xlArr;":                           '\U000027F8',
-	"xlarr;":                           '\U000027F5',
-	"xmap;":                            '\U000027FC',
-	"xnis;":                            '\U000022FB',
-	"xodot;":                           '\U00002A00',
-	"xopf;":                            '\U0001D569',
-	"xoplus;":                          '\U00002A01',
-	"xotime;":                          '\U00002A02',
-	"xrArr;":                           '\U000027F9',
-	"xrarr;":                           '\U000027F6',
-	"xscr;":                            '\U0001D4CD',
-	"xsqcup;":                          '\U00002A06',
-	"xuplus;":                          '\U00002A04',
-	"xutri;":                           '\U000025B3',
-	"xvee;":                            '\U000022C1',
-	"xwedge;":                          '\U000022C0',
-	"yacute;":                          '\U000000FD',
-	"yacy;":                            '\U0000044F',
-	"ycirc;":                           '\U00000177',
-	"ycy;":                             '\U0000044B',
-	"yen;":                             '\U000000A5',
-	"yfr;":                             '\U0001D536',
-	"yicy;":                            '\U00000457',
-	"yopf;":                            '\U0001D56A',
-	"yscr;":                            '\U0001D4CE',
-	"yucy;":                            '\U0000044E',
-	"yuml;":                            '\U000000FF',
-	"zacute;":                          '\U0000017A',
-	"zcaron;":                          '\U0000017E',
-	"zcy;":                             '\U00000437',
-	"zdot;":                            '\U0000017C',
-	"zeetrf;":                          '\U00002128',
-	"zeta;":                            '\U000003B6',
-	"zfr;":                             '\U0001D537',
-	"zhcy;":                            '\U00000436',
-	"zigrarr;":                         '\U000021DD',
-	"zopf;":                            '\U0001D56B',
-	"zscr;":                            '\U0001D4CF',
-	"zwj;":                             '\U0000200D',
-	"zwnj;":                            '\U0000200C',
-	"AElig":                            '\U000000C6',
-	"AMP":                              '\U00000026',
-	"Aacute":                           '\U000000C1',
-	"Acirc":                            '\U000000C2',
-	"Agrave":                           '\U000000C0',
-	"Aring":                            '\U000000C5',
-	"Atilde":                           '\U000000C3',
-	"Auml":                             '\U000000C4',
-	"COPY":                             '\U000000A9',
-	"Ccedil":                           '\U000000C7',
-	"ETH":                              '\U000000D0',
-	"Eacute":                           '\U000000C9',
-	"Ecirc":                            '\U000000CA',
-	"Egrave":                           '\U000000C8',
-	"Euml":                             '\U000000CB',
-	"GT":                               '\U0000003E',
-	"Iacute":                           '\U000000CD',
-	"Icirc":                            '\U000000CE',
-	"Igrave":                           '\U000000CC',
-	"Iuml":                             '\U000000CF',
-	"LT":                               '\U0000003C',
-	"Ntilde":                           '\U000000D1',
-	"Oacute":                           '\U000000D3',
-	"Ocirc":                            '\U000000D4',
-	"Ograve":                           '\U000000D2',
-	"Oslash":                           '\U000000D8',
-	"Otilde":                           '\U000000D5',
-	"Ouml":                             '\U000000D6',
-	"QUOT":                             '\U00000022',
-	"REG":                              '\U000000AE',
-	"THORN":                            '\U000000DE',
-	"Uacute":                           '\U000000DA',
-	"Ucirc":                            '\U000000DB',
-	"Ugrave":                           '\U000000D9',
-	"Uuml":                             '\U000000DC',
-	"Yacute":                           '\U000000DD',
-	"aacute":                           '\U000000E1',
-	"acirc":                            '\U000000E2',
-	"acute":                            '\U000000B4',
-	"aelig":                            '\U000000E6',
-	"agrave":                           '\U000000E0',
-	"amp":                              '\U00000026',
-	"aring":                            '\U000000E5',
-	"atilde":                           '\U000000E3',
-	"auml":                             '\U000000E4',
-	"brvbar":                           '\U000000A6',
-	"ccedil":                           '\U000000E7',
-	"cedil":                            '\U000000B8',
-	"cent":                             '\U000000A2',
-	"copy":                             '\U000000A9',
-	"curren":                           '\U000000A4',
-	"deg":                              '\U000000B0',
-	"divide":                           '\U000000F7',
-	"eacute":                           '\U000000E9',
-	"ecirc":                            '\U000000EA',
-	"egrave":                           '\U000000E8',
-	"eth":                              '\U000000F0',
-	"euml":                             '\U000000EB',
-	"frac12":                           '\U000000BD',
-	"frac14":                           '\U000000BC',
-	"frac34":                           '\U000000BE',
-	"gt":                               '\U0000003E',
-	"iacute":                           '\U000000ED',
-	"icirc":                            '\U000000EE',
-	"iexcl":                            '\U000000A1',
-	"igrave":                           '\U000000EC',
-	"iquest":                           '\U000000BF',
-	"iuml":                             '\U000000EF',
-	"laquo":                            '\U000000AB',
-	"lt":                               '\U0000003C',
-	"macr":                             '\U000000AF',
-	"micro":                            '\U000000B5',
-	"middot":                           '\U000000B7',
-	"nbsp":                             '\U000000A0',
-	"not":                              '\U000000AC',
-	"ntilde":                           '\U000000F1',
-	"oacute":                           '\U000000F3',
-	"ocirc":                            '\U000000F4',
-	"ograve":                           '\U000000F2',
-	"ordf":                             '\U000000AA',
-	"ordm":                             '\U000000BA',
-	"oslash":                           '\U000000F8',
-	"otilde":                           '\U000000F5',
-	"ouml":                             '\U000000F6',
-	"para":                             '\U000000B6',
-	"plusmn":                           '\U000000B1',
-	"pound":                            '\U000000A3',
-	"quot":                             '\U00000022',
-	"raquo":                            '\U000000BB',
-	"reg":                              '\U000000AE',
-	"sect":                             '\U000000A7',
-	"shy":                              '\U000000AD',
-	"sup1":                             '\U000000B9',
-	"sup2":                             '\U000000B2',
-	"sup3":                             '\U000000B3',
-	"szlig":                            '\U000000DF',
-	"thorn":                            '\U000000FE',
-	"times":                            '\U000000D7',
-	"uacute":                           '\U000000FA',
-	"ucirc":                            '\U000000FB',
-	"ugrave":                           '\U000000F9',
-	"uml":                              '\U000000A8',
-	"uuml":                             '\U000000FC',
-	"yacute":                           '\U000000FD',
-	"yen":                              '\U000000A5',
-	"yuml":                             '\U000000FF',
-}
-
-// HTML entities that are two unicode codepoints.
-var entity2 = map[string][2]rune{
-	// TODO(nigeltao): Handle replacements that are wider than their names.
-	// "nLt;":                     {'\u226A', '\u20D2'},
-	// "nGt;":                     {'\u226B', '\u20D2'},
-	"NotEqualTilde;":           {'\u2242', '\u0338'},
-	"NotGreaterFullEqual;":     {'\u2267', '\u0338'},
-	"NotGreaterGreater;":       {'\u226B', '\u0338'},
-	"NotGreaterSlantEqual;":    {'\u2A7E', '\u0338'},
-	"NotHumpDownHump;":         {'\u224E', '\u0338'},
-	"NotHumpEqual;":            {'\u224F', '\u0338'},
-	"NotLeftTriangleBar;":      {'\u29CF', '\u0338'},
-	"NotLessLess;":             {'\u226A', '\u0338'},
-	"NotLessSlantEqual;":       {'\u2A7D', '\u0338'},
-	"NotNestedGreaterGreater;": {'\u2AA2', '\u0338'},
-	"NotNestedLessLess;":       {'\u2AA1', '\u0338'},
-	"NotPrecedesEqual;":        {'\u2AAF', '\u0338'},
-	"NotRightTriangleBar;":     {'\u29D0', '\u0338'},
-	"NotSquareSubset;":         {'\u228F', '\u0338'},
-	"NotSquareSuperset;":       {'\u2290', '\u0338'},
-	"NotSubset;":               {'\u2282', '\u20D2'},
-	"NotSucceedsEqual;":        {'\u2AB0', '\u0338'},
-	"NotSucceedsTilde;":        {'\u227F', '\u0338'},
-	"NotSuperset;":             {'\u2283', '\u20D2'},
-	"ThickSpace;":              {'\u205F', '\u200A'},
-	"acE;":                     {'\u223E', '\u0333'},
-	"bne;":                     {'\u003D', '\u20E5'},
-	"bnequiv;":                 {'\u2261', '\u20E5'},
-	"caps;":                    {'\u2229', '\uFE00'},
-	"cups;":                    {'\u222A', '\uFE00'},
-	"fjlig;":                   {'\u0066', '\u006A'},
-	"gesl;":                    {'\u22DB', '\uFE00'},
-	"gvertneqq;":               {'\u2269', '\uFE00'},
-	"gvnE;":                    {'\u2269', '\uFE00'},
-	"lates;":                   {'\u2AAD', '\uFE00'},
-	"lesg;":                    {'\u22DA', '\uFE00'},
-	"lvertneqq;":               {'\u2268', '\uFE00'},
-	"lvnE;":                    {'\u2268', '\uFE00'},
-	"nGg;":                     {'\u22D9', '\u0338'},
-	"nGtv;":                    {'\u226B', '\u0338'},
-	"nLl;":                     {'\u22D8', '\u0338'},
-	"nLtv;":                    {'\u226A', '\u0338'},
-	"nang;":                    {'\u2220', '\u20D2'},
-	"napE;":                    {'\u2A70', '\u0338'},
-	"napid;":                   {'\u224B', '\u0338'},
-	"nbump;":                   {'\u224E', '\u0338'},
-	"nbumpe;":                  {'\u224F', '\u0338'},
-	"ncongdot;":                {'\u2A6D', '\u0338'},
-	"nedot;":                   {'\u2250', '\u0338'},
-	"nesim;":                   {'\u2242', '\u0338'},
-	"ngE;":                     {'\u2267', '\u0338'},
-	"ngeqq;":                   {'\u2267', '\u0338'},
-	"ngeqslant;":               {'\u2A7E', '\u0338'},
-	"nges;":                    {'\u2A7E', '\u0338'},
-	"nlE;":                     {'\u2266', '\u0338'},
-	"nleqq;":                   {'\u2266', '\u0338'},
-	"nleqslant;":               {'\u2A7D', '\u0338'},
-	"nles;":                    {'\u2A7D', '\u0338'},
-	"notinE;":                  {'\u22F9', '\u0338'},
-	"notindot;":                {'\u22F5', '\u0338'},
-	"nparsl;":                  {'\u2AFD', '\u20E5'},
-	"npart;":                   {'\u2202', '\u0338'},
-	"npre;":                    {'\u2AAF', '\u0338'},
-	"npreceq;":                 {'\u2AAF', '\u0338'},
-	"nrarrc;":                  {'\u2933', '\u0338'},
-	"nrarrw;":                  {'\u219D', '\u0338'},
-	"nsce;":                    {'\u2AB0', '\u0338'},
-	"nsubE;":                   {'\u2AC5', '\u0338'},
-	"nsubset;":                 {'\u2282', '\u20D2'},
-	"nsubseteqq;":              {'\u2AC5', '\u0338'},
-	"nsucceq;":                 {'\u2AB0', '\u0338'},
-	"nsupE;":                   {'\u2AC6', '\u0338'},
-	"nsupset;":                 {'\u2283', '\u20D2'},
-	"nsupseteqq;":              {'\u2AC6', '\u0338'},
-	"nvap;":                    {'\u224D', '\u20D2'},
-	"nvge;":                    {'\u2265', '\u20D2'},
-	"nvgt;":                    {'\u003E', '\u20D2'},
-	"nvle;":                    {'\u2264', '\u20D2'},
-	"nvlt;":                    {'\u003C', '\u20D2'},
-	"nvltrie;":                 {'\u22B4', '\u20D2'},
-	"nvrtrie;":                 {'\u22B5', '\u20D2'},
-	"nvsim;":                   {'\u223C', '\u20D2'},
-	"race;":                    {'\u223D', '\u0331'},
-	"smtes;":                   {'\u2AAC', '\uFE00'},
-	"sqcaps;":                  {'\u2293', '\uFE00'},
-	"sqcups;":                  {'\u2294', '\uFE00'},
-	"varsubsetneq;":            {'\u228A', '\uFE00'},
-	"varsubsetneqq;":           {'\u2ACB', '\uFE00'},
-	"varsupsetneq;":            {'\u228B', '\uFE00'},
-	"varsupsetneqq;":           {'\u2ACC', '\uFE00'},
-	"vnsub;":                   {'\u2282', '\u20D2'},
-	"vnsup;":                   {'\u2283', '\u20D2'},
-	"vsubnE;":                  {'\u2ACB', '\uFE00'},
-	"vsubne;":                  {'\u228A', '\uFE00'},
-	"vsupnE;":                  {'\u2ACC', '\uFE00'},
-	"vsupne;":                  {'\u228B', '\uFE00'},
-}
diff --git a/application/source/vendor/golang.org/x/net/html/escape.go b/application/source/vendor/golang.org/x/net/html/escape.go
deleted file mode 100644
index 04c6bec21073773d3e975f35d62b2c6599b709d0..0000000000000000000000000000000000000000
--- a/application/source/vendor/golang.org/x/net/html/escape.go
+++ /dev/null
@@ -1,339 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
-	"bytes"
-	"strings"
-	"unicode/utf8"
-)
-
-// These replacements permit compatibility with old numeric entities that
-// assumed Windows-1252 encoding.
-// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
-var replacementTable = [...]rune{
-	'\u20AC', // First entry is what 0x80 should be replaced with.
-	'\u0081',
-	'\u201A',
-	'\u0192',
-	'\u201E',
-	'\u2026',
-	'\u2020',
-	'\u2021',
-	'\u02C6',
-	'\u2030',
-	'\u0160',
-	'\u2039',
-	'\u0152',
-	'\u008D',
-	'\u017D',
-	'\u008F',
-	'\u0090',
-	'\u2018',
-	'\u2019',
-	'\u201C',
-	'\u201D',
-	'\u2022',
-	'\u2013',
-	'\u2014',
-	'\u02DC',
-	'\u2122',
-	'\u0161',
-	'\u203A',
-	'\u0153',
-	'\u009D',
-	'\u017E',
-	'\u0178', // Last entry is 0x9F.
-	// 0x00->'\uFFFD' is handled programmatically.
-	// 0x0D->'\u000D' is a no-op.
-}
-
-// unescapeEntity reads an entity like "&lt;" from b[src:] and writes the
-// corresponding "<" to b[dst:], returning the incremented dst and src cursors.
-// Precondition: b[src] == '&' && dst <= src.
-// attribute should be true if parsing an attribute value.
-func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) {
-	// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
-
-	// i starts at 1 because we already know that s[0] == '&'.
-	i, s := 1, b[src:]
-
-	if len(s) <= 1 {
-		b[dst] = b[src]
-		return dst + 1, src + 1
-	}
-
-	if s[i] == '#' {
-		if len(s) <= 3 { // We need to have at least "&#.".
-			b[dst] = b[src]
-			return dst + 1, src + 1
-		}
-		i++
-		c := s[i]
-		hex := false
-		if c == 'x' || c == 'X' {
-			hex = true
-			i++
-		}
-
-		x := '\x00'
-		for i < len(s) {
-			c = s[i]
-			i++
-			if hex {
-				if '0' <= c && c <= '9' {
-					x = 16*x + rune(c) - '0'
-					continue
-				} else if 'a' <= c && c <= 'f' {
-					x = 16*x + rune(c) - 'a' + 10
-					continue
-				} else if 'A' <= c && c <= 'F' {
-					x = 16*x + rune(c) - 'A' + 10
-					continue
-				}
-			} else if '0' <= c && c <= '9' {
-				x = 10*x + rune(c) - '0'
-				continue
-			}
-			if c != ';' {
-				i--
-			}
-			break
-		}
-
-		if i <= 3 { // No characters matched.
-			b[dst] = b[src]
-			return dst + 1, src + 1
-		}
-
-		if 0x80 <= x && x <= 0x9F {
-			// Replace characters from Windows-1252 with UTF-8 equivalents.
-			x = replacementTable[x-0x80]
-		} else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF {
-			// Replace invalid characters with the replacement character.
-			x = '\uFFFD'
-		}
-
-		return dst + utf8.EncodeRune(b[dst:], x), src + i
-	}
-
-	// Consume the maximum number of characters possible, with the
-	// consumed characters matching one of the named references.
-
-	for i < len(s) {
-		c := s[i]
-		i++
-		// Lower-cased characters are more common in entities, so we check for them first.
-		if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
-			continue
-		}
-		if c != ';' {
-			i--
-		}
-		break
-	}
-
-	entityName := string(s[1:i])
-	if entityName == "" {
-		// No-op.
-	} else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' {
-		// No-op.
-	} else if x := entity[entityName]; x != 0 {
-		return dst + utf8.EncodeRune(b[dst:], x), src + i
-	} else if x := entity2[entityName]; x[0] != 0 {
-		dst1 := dst + utf8.EncodeRune(b[dst:], x[0])
-		return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i
-	} else if !attribute {
-		maxLen := len(entityName) - 1
-		if maxLen > longestEntityWithoutSemicolon {
-			maxLen = longestEntityWithoutSemicolon
-		}
-		for j := maxLen; j > 1; j-- {
-			if x := entity[entityName[:j]]; x != 0 {
-				return dst + utf8.EncodeRune(b[dst:], x), src + j + 1
-			}
-		}
-	}
-
-	dst1, src1 = dst+i, src+i
-	copy(b[dst:dst1], b[src:src1])
-	return dst1, src1
-}
-
-// unescape unescapes b's entities in-place, so that "a&lt;b" becomes "a<b".
-// attribute should be true if parsing an attribute value.
-func unescape(b []byte, attribute bool) []byte {
-	for i, c := range b {
-		if c == '&' {
-			dst, src := unescapeEntity(b, i, i, attribute)
-			for src < len(b) {
-				c := b[src]
-				if c == '&' {
-					dst, src = unescapeEntity(b, dst, src, attribute)
-				} else {
-					b[dst] = c
-					dst, src = dst+1, src+1
-				}
-			}
-			return b[0:dst]
-		}
-	}
-	return b
-}
-
-// lower lower-cases the A-Z bytes in b in-place, so that "aBc" becomes "abc".
-func lower(b []byte) []byte {
-	for i, c := range b {
-		if 'A' <= c && c <= 'Z' {
-			b[i] = c + 'a' - 'A'
-		}
-	}
-	return b
-}
-
-// escapeComment is like func escape but escapes its input bytes less often.
-// Per https://github.com/golang/go/issues/58246 some HTML comments are (1)
-// meaningful and (2) contain angle brackets that we'd like to avoid escaping
-// unless we have to.
-//
-// "We have to" includes the '&' byte, since that introduces other escapes.
-//
-// It also includes those bytes (not including EOF) that would otherwise end
-// the comment. Per the summary table at the bottom of comment_test.go, this is
-// the '>' byte that, per above, we'd like to avoid escaping unless we have to.
-//
-// Studying the summary table (and T actions in its '>' column) closely, we
-// only need to escape in states 43, 44, 49, 51 and 52. State 43 is at the
-// start of the comment data. State 52 is after a '!'. The other three states
-// are after a '-'.
-//
-// Our algorithm is thus to escape every '&' and to escape '>' if and only if:
-//   - The '>' is after a '!' or '-' (in the unescaped data) or
-//   - The '>' is at the start of the comment data (after the opening "<!--").
-func escapeComment(w writer, s string) error {
-	// When modifying this function, consider manually increasing the
-	// maxSuffixLen constant in func TestComments, from 6 to e.g. 9 or more.
-	// That increase should only be temporary, not committed, as it
-	// exponentially affects the test running time.
-
-	if len(s) == 0 {
-		return nil
-	}
-
-	// Loop:
-	//   - Grow j such that s[i:j] does not need escaping.
-	//   - If s[j] does need escaping, output s[i:j] and an escaped s[j],
-	//     resetting i and j to point past that s[j] byte.
-	i := 0
-	for j := 0; j < len(s); j++ {
-		escaped := ""
-		switch s[j] {
-		case '&':
-			escaped = "&amp;"
-
-		case '>':
-			if j > 0 {
-				if prev := s[j-1]; (prev != '!') && (prev != '-') {
-					continue
-				}
-			}
-			escaped = "&gt;"
-
-		default:
-			continue
-		}
-
-		if i < j {
-			if _, err := w.WriteString(s[i:j]); err != nil {
-				return err
-			}
-		}
-		if _, err := w.WriteString(escaped); err != nil {
-			return err
-		}
-		i = j + 1
-	}
-
-	if i < len(s) {
-		if _, err := w.WriteString(s[i:]); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-// escapeCommentString is to EscapeString as escapeComment is to escape.
-func escapeCommentString(s string) string {
-	if strings.IndexAny(s, "&>") == -1 {
-		return s
-	}
-	var buf bytes.Buffer
-	escapeComment(&buf, s)
-	return buf.String()
-}
-
-const escapedChars = "&'<>\"\r"
-
-func escape(w writer, s string) error {
-	i := strings.IndexAny(s, escapedChars)
-	for i != -1 {
-		if _, err := w.WriteString(s[:i]); err != nil {
-			return err
-		}
-		var esc string
-		switch s[i] {
-		case '&':
-			esc = "&amp;"
-		case '\'':
-			// "&#39;" is shorter than "&apos;" and apos was not in HTML until HTML5.
-			esc = "&#39;"
-		case '<':
-			esc = "&lt;"
-		case '>':
-			esc = "&gt;"
-		case '"':
-			// "&#34;" is shorter than "&quot;".
-			esc = "&#34;"
-		case '\r':
-			esc = "&#13;"
-		default:
-			panic("unrecognized escape character")
-		}
-		s = s[i+1:]
-		if _, err := w.WriteString(esc); err != nil {
-			return err
-		}
-		i = strings.IndexAny(s, escapedChars)
-	}
-	_, err := w.WriteString(s)
-	return err
-}
-
-// EscapeString escapes special characters like "<" to become "&lt;". It
-// escapes only five such characters: <, >, &, ' and ".
-// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't
-// always true.
-func EscapeString(s string) string {
-	if strings.IndexAny(s, escapedChars) == -1 {
-		return s
-	}
-	var buf bytes.Buffer
-	escape(&buf, s)
-	return buf.String()
-}
-
-// UnescapeString unescapes entities like "&lt;" to become "<". It unescapes a
-// larger range of entities than EscapeString escapes. For example, "&aacute;"
-// unescapes to "á", as does "&#225;" and "&xE1;".
-// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't
-// always true.
-func UnescapeString(s string) string {
-	for _, c := range s {
-		if c == '&' {
-			return string(unescape([]byte(s), false))
-		}
-	}
-	return s
-}
diff --git a/application/source/vendor/golang.org/x/net/html/foreign.go b/application/source/vendor/golang.org/x/net/html/foreign.go
deleted file mode 100644
index 9da9e9dc4246323836f27dca93d039732e88bd7d..0000000000000000000000000000000000000000
--- a/application/source/vendor/golang.org/x/net/html/foreign.go
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
-	"strings"
-)
-
-func adjustAttributeNames(aa []Attribute, nameMap map[string]string) {
-	for i := range aa {
-		if newName, ok := nameMap[aa[i].Key]; ok {
-			aa[i].Key = newName
-		}
-	}
-}
-
-func adjustForeignAttributes(aa []Attribute) {
-	for i, a := range aa {
-		if a.Key == "" || a.Key[0] != 'x' {
-			continue
-		}
-		switch a.Key {
-		case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show",
-			"xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink":
-			j := strings.Index(a.Key, ":")
-			aa[i].Namespace = a.Key[:j]
-			aa[i].Key = a.Key[j+1:]
-		}
-	}
-}
-
-func htmlIntegrationPoint(n *Node) bool {
-	if n.Type != ElementNode {
-		return false
-	}
-	switch n.Namespace {
-	case "math":
-		if n.Data == "annotation-xml" {
-			for _, a := range n.Attr {
-				if a.Key == "encoding" {
-					val := strings.ToLower(a.Val)
-					if val == "text/html" || val == "application/xhtml+xml" {
-						return true
-					}
-				}
-			}
-		}
-	case "svg":
-		switch n.Data {
-		case "desc", "foreignObject", "title":
-			return true
-		}
-	}
-	return false
-}
-
-func mathMLTextIntegrationPoint(n *Node) bool {
-	if n.Namespace != "math" {
-		return false
-	}
-	switch n.Data {
-	case "mi", "mo", "mn", "ms", "mtext":
-		return true
-	}
-	return false
-}
-
-// Section 12.2.6.5.
-var breakout = map[string]bool{
-	"b":          true,
-	"big":        true,
-	"blockquote": true,
-	"body":       true,
-	"br":         true,
-	"center":     true,
-	"code":       true,
-	"dd":         true,
-	"div":        true,
-	"dl":         true,
-	"dt":         true,
-	"em":         true,
-	"embed":      true,
-	"h1":         true,
-	"h2":         true,
-	"h3":         true,
-	"h4":         true,
-	"h5":         true,
-	"h6":         true,
-	"head":       true,
-	"hr":         true,
-	"i":          true,
-	"img":        true,
-	"li":         true,
-	"listing":    true,
-	"menu":       true,
-	"meta":       true,
-	"nobr":       true,
-	"ol":         true,
-	"p":          true,
-	"pre":        true,
-	"ruby":       true,
-	"s":          true,
-	"small":      true,
-	"span":       true,
-	"strong":     true,
-	"strike":     true,
-	"sub":        true,
-	"sup":        true,
-	"table":      true,
-	"tt":         true,
-	"u":          true,
-	"ul":         true,
-	"var":        true,
-}
-
-// Section 12.2.6.5.
-var svgTagNameAdjustments = map[string]string{
-	"altglyph":            "altGlyph",
-	"altglyphdef":         "altGlyphDef",
-	"altglyphitem":        "altGlyphItem",
-	"animatecolor":        "animateColor",
-	"animatemotion":       "animateMotion",
-	"animatetransform":    "animateTransform",
-	"clippath":            "clipPath",
-	"feblend":             "feBlend",
-	"fecolormatrix":       "feColorMatrix",
-	"fecomponenttransfer": "feComponentTransfer",
-	"fecomposite":         "feComposite",
-	"feconvolvematrix":    "feConvolveMatrix",
-	"fediffuselighting":   "feDiffuseLighting",
-	"fedisplacementmap":   "feDisplacementMap",
-	"fedistantlight":      "feDistantLight",
-	"feflood":             "feFlood",
-	"fefunca":             "feFuncA",
-	"fefuncb":             "feFuncB",
-	"fefuncg":             "feFuncG",
-	"fefuncr":             "feFuncR",
-	"fegaussianblur":      "feGaussianBlur",
-	"feimage":             "feImage",
-	"femerge":             "feMerge",
-	"femergenode":         "feMergeNode",
-	"femorphology":        "feMorphology",
-	"feoffset":            "feOffset",
-	"fepointlight":        "fePointLight",
-	"fespecularlighting":  "feSpecularLighting",
-	"fespotlight":         "feSpotLight",
-	"fetile":              "feTile",
-	"feturbulence":        "feTurbulence",
-	"foreignobject":       "foreignObject",
-	"glyphref":            "glyphRef",
-	"lineargradient":      "linearGradient",
-	"radialgradient":      "radialGradient",
-	"textpath":            "textPath",
-}
-
-// Section 12.2.6.1
-var mathMLAttributeAdjustments = map[string]string{
-	"definitionurl": "definitionURL",
-}
-
-var svgAttributeAdjustments = map[string]string{
-	"attributename":       "attributeName",
-	"attributetype":       "attributeType",
-	"basefrequency":       "baseFrequency",
-	"baseprofile":         "baseProfile",
-	"calcmode":            "calcMode",
-	"clippathunits":       "clipPathUnits",
-	"diffuseconstant":     "diffuseConstant",
-	"edgemode":            "edgeMode",
-	"filterunits":         "filterUnits",
-	"glyphref":            "glyphRef",
-	"gradienttransform":   "gradientTransform",
-	"gradientunits":       "gradientUnits",
-	"kernelmatrix":        "kernelMatrix",
-	"kernelunitlength":    "kernelUnitLength",
-	"keypoints":           "keyPoints",
-	"keysplines":          "keySplines",
-	"keytimes":            "keyTimes",
-	"lengthadjust":        "lengthAdjust",
-	"limitingconeangle":   "limitingConeAngle",
-	"markerheight":        "markerHeight",
-	"markerunits":         "markerUnits",
-	"markerwidth":         "markerWidth",
-	"maskcontentunits":    "maskContentUnits",
-	"maskunits":           "maskUnits",
-	"numoctaves":          "numOctaves",
-	"pathlength":          "pathLength",
-	"patterncontentunits": "patternContentUnits",
-	"patterntransform":    "patternTransform",
-	"patternunits":        "patternUnits",
-	"pointsatx":           "pointsAtX",
-	"pointsaty":           "pointsAtY",
-	"pointsatz":           "pointsAtZ",
-	"preservealpha":       "preserveAlpha",
-	"preserveaspectratio": "preserveAspectRatio",
-	"primitiveunits":      "primitiveUnits",
-	"refx":                "refX",
-	"refy":                "refY",
-	"repeatcount":         "repeatCount",
-	"repeatdur":           "repeatDur",
-	"requiredextensions":  "requiredExtensions",
-	"requiredfeatures":    "requiredFeatures",
-	"specularconstant":    "specularConstant",
-	"specularexponent":    "specularExponent",
-	"spreadmethod":        "spreadMethod",
-	"startoffset":         "startOffset",
-	"stddeviation":        "stdDeviation",
-	"stitchtiles":         "stitchTiles",
-	"surfacescale":        "surfaceScale",
-	"systemlanguage":      "systemLanguage",
-	"tablevalues":         "tableValues",
-	"targetx":             "targetX",
-	"targety":             "targetY",
-	"textlength":          "textLength",
-	"viewbox":             "viewBox",
-	"viewtarget":          "viewTarget",
-	"xchannelselector":    "xChannelSelector",
-	"ychannelselector":    "yChannelSelector",
-	"zoomandpan":          "zoomAndPan",
-}
diff --git a/application/source/vendor/golang.org/x/net/html/node.go b/application/source/vendor/golang.org/x/net/html/node.go
deleted file mode 100644
index 1350eef22c3ce3846e92656abce8dfa979adbde0..0000000000000000000000000000000000000000
--- a/application/source/vendor/golang.org/x/net/html/node.go
+++ /dev/null
@@ -1,225 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
-	"golang.org/x/net/html/atom"
-)
-
-// A NodeType is the type of a Node.
-type NodeType uint32
-
-const (
-	ErrorNode NodeType = iota
-	TextNode
-	DocumentNode
-	ElementNode
-	CommentNode
-	DoctypeNode
-	// RawNode nodes are not returned by the parser, but can be part of the
-	// Node tree passed to func Render to insert raw HTML (without escaping).
-	// If so, this package makes no guarantee that the rendered HTML is secure
-	// (from e.g. Cross Site Scripting attacks) or well-formed.
-	RawNode
-	scopeMarkerNode
-)
-
-// Section 12.2.4.3 says "The markers are inserted when entering applet,
-// object, marquee, template, td, th, and caption elements, and are used
-// to prevent formatting from "leaking" into applet, object, marquee,
-// template, td, th, and caption elements".
-var scopeMarker = Node{Type: scopeMarkerNode}
-
-// A Node consists of a NodeType and some Data (tag name for element nodes,
-// content for text) and are part of a tree of Nodes. Element nodes may also
-// have a Namespace and contain a slice of Attributes. Data is unescaped, so
-// that it looks like "a<b" rather than "a&lt;b". For element nodes, DataAtom
-// is the atom for Data, or zero if Data is not a known tag name.
-//
-// An empty Namespace implies a "http://www.w3.org/1999/xhtml" namespace.
-// Similarly, "math" is short for "http://www.w3.org/1998/Math/MathML", and
-// "svg" is short for "http://www.w3.org/2000/svg".
-type Node struct {
-	Parent, FirstChild, LastChild, PrevSibling, NextSibling *Node
-
-	Type      NodeType
-	DataAtom  atom.Atom
-	Data      string
-	Namespace string
-	Attr      []Attribute
-}
-
-// InsertBefore inserts newChild as a child of n, immediately before oldChild
-// in the sequence of n's children. oldChild may be nil, in which case newChild
-// is appended to the end of n's children.
-//
-// It will panic if newChild already has a parent or siblings.
-func (n *Node) InsertBefore(newChild, oldChild *Node) {
-	if newChild.Parent != nil || newChild.PrevSibling != nil || newChild.NextSibling != nil {
-		panic("html: InsertBefore called for an attached child Node")
-	}
-	var prev, next *Node
-	if oldChild != nil {
-		prev, next = oldChild.PrevSibling, oldChild
-	} else {
-		prev = n.LastChild
-	}
-	if prev != nil {
-		prev.NextSibling = newChild
-	} else {
-		n.FirstChild = newChild
-	}
-	if next != nil {
-		next.PrevSibling = newChild
-	} else {
-		n.LastChild = newChild
-	}
-	newChild.Parent = n
-	newChild.PrevSibling = prev
-	newChild.NextSibling = next
-}
-
-// AppendChild adds a node c as a child of n.
-//
-// It will panic if c already has a parent or siblings.
-func (n *Node) AppendChild(c *Node) {
-	if c.Parent != nil || c.PrevSibling != nil || c.NextSibling != nil {
-		panic("html: AppendChild called for an attached child Node")
-	}
-	last := n.LastChild
-	if last != nil {
-		last.NextSibling = c
-	} else {
-		n.FirstChild = c
-	}
-	n.LastChild = c
-	c.Parent = n
-	c.PrevSibling = last
-}
-
-// RemoveChild removes a node c that is a child of n. Afterwards, c will have
-// no parent and no siblings.
-//
-// It will panic if c's parent is not n.
-func (n *Node) RemoveChild(c *Node) {
-	if c.Parent != n {
-		panic("html: RemoveChild called for a non-child Node")
-	}
-	if n.FirstChild == c {
-		n.FirstChild = c.NextSibling
-	}
-	if c.NextSibling != nil {
-		c.NextSibling.PrevSibling = c.PrevSibling
-	}
-	if n.LastChild == c {
-		n.LastChild = c.PrevSibling
-	}
-	if c.PrevSibling != nil {
-		c.PrevSibling.NextSibling = c.NextSibling
-	}
-	c.Parent = nil
-	c.PrevSibling = nil
-	c.NextSibling = nil
-}
-
-// reparentChildren reparents all of src's child nodes to dst.
-func reparentChildren(dst, src *Node) {
-	for {
-		child := src.FirstChild
-		if child == nil {
-			break
-		}
-		src.RemoveChild(child)
-		dst.AppendChild(child)
-	}
-}
-
-// clone returns a new node with the same type, data and attributes.
-// The clone has no parent, no siblings and no children.
-func (n *Node) clone() *Node {
-	m := &Node{
-		Type:     n.Type,
-		DataAtom: n.DataAtom,
-		Data:     n.Data,
-		Attr:     make([]Attribute, len(n.Attr)),
-	}
-	copy(m.Attr, n.Attr)
-	return m
-}
-
-// nodeStack is a stack of nodes.
-type nodeStack []*Node
-
-// pop pops the stack. It will panic if s is empty.
-func (s *nodeStack) pop() *Node {
-	i := len(*s)
-	n := (*s)[i-1]
-	*s = (*s)[:i-1]
-	return n
-}
-
-// top returns the most recently pushed node, or nil if s is empty.
-func (s *nodeStack) top() *Node {
-	if i := len(*s); i > 0 {
-		return (*s)[i-1]
-	}
-	return nil
-}
-
-// index returns the index of the top-most occurrence of n in the stack, or -1
-// if n is not present.
-func (s *nodeStack) index(n *Node) int {
-	for i := len(*s) - 1; i >= 0; i-- {
-		if (*s)[i] == n {
-			return i
-		}
-	}
-	return -1
-}
-
-// contains returns whether a is within s.
-func (s *nodeStack) contains(a atom.Atom) bool {
-	for _, n := range *s {
-		if n.DataAtom == a && n.Namespace == "" {
-			return true
-		}
-	}
-	return false
-}
-
-// insert inserts a node at the given index.
-func (s *nodeStack) insert(i int, n *Node) {
-	(*s) = append(*s, nil)
-	copy((*s)[i+1:], (*s)[i:])
-	(*s)[i] = n
-}
-
-// remove removes a node from the stack. It is a no-op if n is not present.
-func (s *nodeStack) remove(n *Node) {
-	i := s.index(n)
-	if i == -1 {
-		return
-	}
-	copy((*s)[i:], (*s)[i+1:])
-	j := len(*s) - 1
-	(*s)[j] = nil
-	*s = (*s)[:j]
-}
-
-type insertionModeStack []insertionMode
-
-func (s *insertionModeStack) pop() (im insertionMode) {
-	i := len(*s)
-	im = (*s)[i-1]
-	*s = (*s)[:i-1]
-	return im
-}
-
-func (s *insertionModeStack) top() insertionMode {
-	if i := len(*s); i > 0 {
-		return (*s)[i-1]
-	}
-	return nil
-}
diff --git a/application/source/vendor/golang.org/x/net/html/parse.go b/application/source/vendor/golang.org/x/net/html/parse.go
deleted file mode 100644
index 46a89eda6c19954b7fccde101a6c87bd225ded83..0000000000000000000000000000000000000000
--- a/application/source/vendor/golang.org/x/net/html/parse.go
+++ /dev/null
@@ -1,2460 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
-	"errors"
-	"fmt"
-	"io"
-	"strings"
-
-	a "golang.org/x/net/html/atom"
-)
-
-// A parser implements the HTML5 parsing algorithm:
-// https://html.spec.whatwg.org/multipage/syntax.html#tree-construction
-type parser struct {
-	// tokenizer provides the tokens for the parser.
-	tokenizer *Tokenizer
-	// tok is the most recently read token.
-	tok Token
-	// Self-closing tags like <hr/> are treated as start tags, except that
-	// hasSelfClosingToken is set while they are being processed.
-	hasSelfClosingToken bool
-	// doc is the document root element.
-	doc *Node
-	// The stack of open elements (section 12.2.4.2) and active formatting
-	// elements (section 12.2.4.3).
-	oe, afe nodeStack
-	// Element pointers (section 12.2.4.4).
-	head, form *Node
-	// Other parsing state flags (section 12.2.4.5).
-	scripting, framesetOK bool
-	// The stack of template insertion modes
-	templateStack insertionModeStack
-	// im is the current insertion mode.
-	im insertionMode
-	// originalIM is the insertion mode to go back to after completing a text
-	// or inTableText insertion mode.
-	originalIM insertionMode
-	// fosterParenting is whether new elements should be inserted according to
-	// the foster parenting rules (section 12.2.6.1).
-	fosterParenting bool
-	// quirks is whether the parser is operating in "quirks mode."
-	quirks bool
-	// fragment is whether the parser is parsing an HTML fragment.
-	fragment bool
-	// context is the context element when parsing an HTML fragment
-	// (section 12.4).
-	context *Node
-}
-
-func (p *parser) top() *Node {
-	if n := p.oe.top(); n != nil {
-		return n
-	}
-	return p.doc
-}
-
-// Stop tags for use in popUntil. These come from section 12.2.4.2.
-var (
-	defaultScopeStopTags = map[string][]a.Atom{
-		"":     {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template},
-		"math": {a.AnnotationXml, a.Mi, a.Mn, a.Mo, a.Ms, a.Mtext},
-		"svg":  {a.Desc, a.ForeignObject, a.Title},
-	}
-)
-
-type scope int
-
-const (
-	defaultScope scope = iota
-	listItemScope
-	buttonScope
-	tableScope
-	tableRowScope
-	tableBodyScope
-	selectScope
-)
-
-// popUntil pops the stack of open elements at the highest element whose tag
-// is in matchTags, provided there is no higher element in the scope's stop
-// tags (as defined in section 12.2.4.2). It returns whether or not there was
-// such an element. If there was not, popUntil leaves the stack unchanged.
-//
-// For example, the set of stop tags for table scope is: "html", "table". If
-// the stack was:
-// ["html", "body", "font", "table", "b", "i", "u"]
-// then popUntil(tableScope, "font") would return false, but
-// popUntil(tableScope, "i") would return true and the stack would become:
-// ["html", "body", "font", "table", "b"]
-//
-// If an element's tag is in both the stop tags and matchTags, then the stack
-// will be popped and the function returns true (provided, of course, there was
-// no higher element in the stack that was also in the stop tags). For example,
-// popUntil(tableScope, "table") returns true and leaves:
-// ["html", "body", "font"]
-func (p *parser) popUntil(s scope, matchTags ...a.Atom) bool {
-	if i := p.indexOfElementInScope(s, matchTags...); i != -1 {
-		p.oe = p.oe[:i]
-		return true
-	}
-	return false
-}
-
-// indexOfElementInScope returns the index in p.oe of the highest element whose
-// tag is in matchTags that is in scope. If no matching element is in scope, it
-// returns -1.
-func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int {
-	for i := len(p.oe) - 1; i >= 0; i-- {
-		tagAtom := p.oe[i].DataAtom
-		if p.oe[i].Namespace == "" {
-			for _, t := range matchTags {
-				if t == tagAtom {
-					return i
-				}
-			}
-			switch s {
-			case defaultScope:
-				// No-op.
-			case listItemScope:
-				if tagAtom == a.Ol || tagAtom == a.Ul {
-					return -1
-				}
-			case buttonScope:
-				if tagAtom == a.Button {
-					return -1
-				}
-			case tableScope:
-				if tagAtom == a.Html || tagAtom == a.Table || tagAtom == a.Template {
-					return -1
-				}
-			case selectScope:
-				if tagAtom != a.Optgroup && tagAtom != a.Option {
-					return -1
-				}
-			default:
-				panic("unreachable")
-			}
-		}
-		switch s {
-		case defaultScope, listItemScope, buttonScope:
-			for _, t := range defaultScopeStopTags[p.oe[i].Namespace] {
-				if t == tagAtom {
-					return -1
-				}
-			}
-		}
-	}
-	return -1
-}
-
-// elementInScope is like popUntil, except that it doesn't modify the stack of
-// open elements.
-func (p *parser) elementInScope(s scope, matchTags ...a.Atom) bool {
-	return p.indexOfElementInScope(s, matchTags...) != -1
-}
-
-// clearStackToContext pops elements off the stack of open elements until a
-// scope-defined element is found.
-func (p *parser) clearStackToContext(s scope) {
-	for i := len(p.oe) - 1; i >= 0; i-- {
-		tagAtom := p.oe[i].DataAtom
-		switch s {
-		case tableScope:
-			if tagAtom == a.Html || tagAtom == a.Table || tagAtom == a.Template {
-				p.oe = p.oe[:i+1]
-				return
-			}
-		case tableRowScope:
-			if tagAtom == a.Html || tagAtom == a.Tr || tagAtom == a.Template {
-				p.oe = p.oe[:i+1]
-				return
-			}
-		case tableBodyScope:
-			if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead || tagAtom == a.Template {
-				p.oe = p.oe[:i+1]
-				return
-			}
-		default:
-			panic("unreachable")
-		}
-	}
-}
-
-// parseGenericRawTextElement implements the generic raw text element parsing
-// algorithm defined in 12.2.6.2.
-// https://html.spec.whatwg.org/multipage/parsing.html#parsing-elements-that-contain-only-text
-// TODO: Since both RAWTEXT and RCDATA states are treated as tokenizer's part
-// officially, need to make tokenizer consider both states.
-func (p *parser) parseGenericRawTextElement() {
-	p.addElement()
-	p.originalIM = p.im
-	p.im = textIM
-}
-
-// generateImpliedEndTags pops nodes off the stack of open elements as long as
-// the top node has a tag name of dd, dt, li, optgroup, option, p, rb, rp, rt or rtc.
-// If exceptions are specified, nodes with that name will not be popped off.
-func (p *parser) generateImpliedEndTags(exceptions ...string) {
-	var i int
-loop:
-	for i = len(p.oe) - 1; i >= 0; i-- {
-		n := p.oe[i]
-		if n.Type != ElementNode {
-			break
-		}
-		switch n.DataAtom {
-		case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc:
-			for _, except := range exceptions {
-				if n.Data == except {
-					break loop
-				}
-			}
-			continue
-		}
-		break
-	}
-
-	p.oe = p.oe[:i+1]
-}
-
-// addChild adds a child node n to the top element, and pushes n onto the stack
-// of open elements if it is an element node.
-func (p *parser) addChild(n *Node) {
-	if p.shouldFosterParent() {
-		p.fosterParent(n)
-	} else {
-		p.top().AppendChild(n)
-	}
-
-	if n.Type == ElementNode {
-		p.oe = append(p.oe, n)
-	}
-}
-
-// shouldFosterParent returns whether the next node to be added should be
-// foster parented.
-func (p *parser) shouldFosterParent() bool {
-	if p.fosterParenting {
-		switch p.top().DataAtom {
-		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
-			return true
-		}
-	}
-	return false
-}
-
-// fosterParent adds a child node according to the foster parenting rules.
-// Section 12.2.6.1, "foster parenting".
-func (p *parser) fosterParent(n *Node) {
-	var table, parent, prev, template *Node
-	var i int
-	for i = len(p.oe) - 1; i >= 0; i-- {
-		if p.oe[i].DataAtom == a.Table {
-			table = p.oe[i]
-			break
-		}
-	}
-
-	var j int
-	for j = len(p.oe) - 1; j >= 0; j-- {
-		if p.oe[j].DataAtom == a.Template {
-			template = p.oe[j]
-			break
-		}
-	}
-
-	if template != nil && (table == nil || j > i) {
-		template.AppendChild(n)
-		return
-	}
-
-	if table == nil {
-		// The foster parent is the html element.
-		parent = p.oe[0]
-	} else {
-		parent = table.Parent
-	}
-	if parent == nil {
-		parent = p.oe[i-1]
-	}
-
-	if table != nil {
-		prev = table.PrevSibling
-	} else {
-		prev = parent.LastChild
-	}
-	if prev != nil && prev.Type == TextNode && n.Type == TextNode {
-		prev.Data += n.Data
-		return
-	}
-
-	parent.InsertBefore(n, table)
-}
-
-// addText adds text to the preceding node if it is a text node, or else it
-// calls addChild with a new text node.
-func (p *parser) addText(text string) {
-	if text == "" {
-		return
-	}
-
-	if p.shouldFosterParent() {
-		p.fosterParent(&Node{
-			Type: TextNode,
-			Data: text,
-		})
-		return
-	}
-
-	t := p.top()
-	if n := t.LastChild; n != nil && n.Type == TextNode {
-		n.Data += text
-		return
-	}
-	p.addChild(&Node{
-		Type: TextNode,
-		Data: text,
-	})
-}
-
-// addElement adds a child element based on the current token.
-func (p *parser) addElement() {
-	p.addChild(&Node{
-		Type:     ElementNode,
-		DataAtom: p.tok.DataAtom,
-		Data:     p.tok.Data,
-		Attr:     p.tok.Attr,
-	})
-}
-
-// Section 12.2.4.3.
-func (p *parser) addFormattingElement() {
-	tagAtom, attr := p.tok.DataAtom, p.tok.Attr
-	p.addElement()
-
-	// Implement the Noah's Ark clause, but with three per family instead of two.
-	identicalElements := 0
-findIdenticalElements:
-	for i := len(p.afe) - 1; i >= 0; i-- {
-		n := p.afe[i]
-		if n.Type == scopeMarkerNode {
-			break
-		}
-		if n.Type != ElementNode {
-			continue
-		}
-		if n.Namespace != "" {
-			continue
-		}
-		if n.DataAtom != tagAtom {
-			continue
-		}
-		if len(n.Attr) != len(attr) {
-			continue
-		}
-	compareAttributes:
-		for _, t0 := range n.Attr {
-			for _, t1 := range attr {
-				if t0.Key == t1.Key && t0.Namespace == t1.Namespace && t0.Val == t1.Val {
-					// Found a match for this attribute, continue with the next attribute.
-					continue compareAttributes
-				}
-			}
-			// If we get here, there is no attribute that matches a.
-			// Therefore the element is not identical to the new one.
-			continue findIdenticalElements
-		}
-
-		identicalElements++
-		if identicalElements >= 3 {
-			p.afe.remove(n)
-		}
-	}
-
-	p.afe = append(p.afe, p.top())
-}
-
-// Section 12.2.4.3.
-func (p *parser) clearActiveFormattingElements() {
-	for {
-		if n := p.afe.pop(); len(p.afe) == 0 || n.Type == scopeMarkerNode {
-			return
-		}
-	}
-}
-
-// Section 12.2.4.3.
-func (p *parser) reconstructActiveFormattingElements() {
-	n := p.afe.top()
-	if n == nil {
-		return
-	}
-	if n.Type == scopeMarkerNode || p.oe.index(n) != -1 {
-		return
-	}
-	i := len(p.afe) - 1
-	for n.Type != scopeMarkerNode && p.oe.index(n) == -1 {
-		if i == 0 {
-			i = -1
-			break
-		}
-		i--
-		n = p.afe[i]
-	}
-	for {
-		i++
-		clone := p.afe[i].clone()
-		p.addChild(clone)
-		p.afe[i] = clone
-		if i == len(p.afe)-1 {
-			break
-		}
-	}
-}
-
-// Section 12.2.5.
-func (p *parser) acknowledgeSelfClosingTag() {
-	p.hasSelfClosingToken = false
-}
-
-// An insertion mode (section 12.2.4.1) is the state transition function from
-// a particular state in the HTML5 parser's state machine. It updates the
-// parser's fields depending on parser.tok (where ErrorToken means EOF).
-// It returns whether the token was consumed.
-type insertionMode func(*parser) bool
-
-// setOriginalIM sets the insertion mode to return to after completing a text or
-// inTableText insertion mode.
-// Section 12.2.4.1, "using the rules for".
-func (p *parser) setOriginalIM() {
-	if p.originalIM != nil {
-		panic("html: bad parser state: originalIM was set twice")
-	}
-	p.originalIM = p.im
-}
-
-// Section 12.2.4.1, "reset the insertion mode".
-func (p *parser) resetInsertionMode() {
-	for i := len(p.oe) - 1; i >= 0; i-- {
-		n := p.oe[i]
-		last := i == 0
-		if last && p.context != nil {
-			n = p.context
-		}
-
-		switch n.DataAtom {
-		case a.Select:
-			if !last {
-				for ancestor, first := n, p.oe[0]; ancestor != first; {
-					ancestor = p.oe[p.oe.index(ancestor)-1]
-					switch ancestor.DataAtom {
-					case a.Template:
-						p.im = inSelectIM
-						return
-					case a.Table:
-						p.im = inSelectInTableIM
-						return
-					}
-				}
-			}
-			p.im = inSelectIM
-		case a.Td, a.Th:
-			// TODO: remove this divergence from the HTML5 spec.
-			//
-			// See https://bugs.chromium.org/p/chromium/issues/detail?id=829668
-			p.im = inCellIM
-		case a.Tr:
-			p.im = inRowIM
-		case a.Tbody, a.Thead, a.Tfoot:
-			p.im = inTableBodyIM
-		case a.Caption:
-			p.im = inCaptionIM
-		case a.Colgroup:
-			p.im = inColumnGroupIM
-		case a.Table:
-			p.im = inTableIM
-		case a.Template:
-			// TODO: remove this divergence from the HTML5 spec.
-			if n.Namespace != "" {
-				continue
-			}
-			p.im = p.templateStack.top()
-		case a.Head:
-			// TODO: remove this divergence from the HTML5 spec.
-			//
-			// See https://bugs.chromium.org/p/chromium/issues/detail?id=829668
-			p.im = inHeadIM
-		case a.Body:
-			p.im = inBodyIM
-		case a.Frameset:
-			p.im = inFramesetIM
-		case a.Html:
-			if p.head == nil {
-				p.im = beforeHeadIM
-			} else {
-				p.im = afterHeadIM
-			}
-		default:
-			if last {
-				p.im = inBodyIM
-				return
-			}
-			continue
-		}
-		return
-	}
-}
-
-const whitespace = " \t\r\n\f"
-
-// Section 12.2.6.4.1.
-func initialIM(p *parser) bool {
-	switch p.tok.Type {
-	case TextToken:
-		p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace)
-		if len(p.tok.Data) == 0 {
-			// It was all whitespace, so ignore it.
-			return true
-		}
-	case CommentToken:
-		p.doc.AppendChild(&Node{
-			Type: CommentNode,
-			Data: p.tok.Data,
-		})
-		return true
-	case DoctypeToken:
-		n, quirks := parseDoctype(p.tok.Data)
-		p.doc.AppendChild(n)
-		p.quirks = quirks
-		p.im = beforeHTMLIM
-		return true
-	}
-	p.quirks = true
-	p.im = beforeHTMLIM
-	return false
-}
-
-// Section 12.2.6.4.2.
-func beforeHTMLIM(p *parser) bool {
-	switch p.tok.Type {
-	case DoctypeToken:
-		// Ignore the token.
-		return true
-	case TextToken:
-		p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace)
-		if len(p.tok.Data) == 0 {
-			// It was all whitespace, so ignore it.
-			return true
-		}
-	case StartTagToken:
-		if p.tok.DataAtom == a.Html {
-			p.addElement()
-			p.im = beforeHeadIM
-			return true
-		}
-	case EndTagToken:
-		switch p.tok.DataAtom {
-		case a.Head, a.Body, a.Html, a.Br:
-			p.parseImpliedToken(StartTagToken, a.Html, a.Html.String())
-			return false
-		default:
-			// Ignore the token.
-			return true
-		}
-	case CommentToken:
-		p.doc.AppendChild(&Node{
-			Type: CommentNode,
-			Data: p.tok.Data,
-		})
-		return true
-	}
-	p.parseImpliedToken(StartTagToken, a.Html, a.Html.String())
-	return false
-}
-
-// Section 12.2.6.4.3.
-func beforeHeadIM(p *parser) bool {
-	switch p.tok.Type {
-	case TextToken:
-		p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace)
-		if len(p.tok.Data) == 0 {
-			// It was all whitespace, so ignore it.
-			return true
-		}
-	case StartTagToken:
-		switch p.tok.DataAtom {
-		case a.Head:
-			p.addElement()
-			p.head = p.top()
-			p.im = inHeadIM
-			return true
-		case a.Html:
-			return inBodyIM(p)
-		}
-	case EndTagToken:
-		switch p.tok.DataAtom {
-		case a.Head, a.Body, a.Html, a.Br:
-			p.parseImpliedToken(StartTagToken, a.Head, a.Head.String())
-			return false
-		default:
-			// Ignore the token.
-			return true
-		}
-	case CommentToken:
-		p.addChild(&Node{
-			Type: CommentNode,
-			Data: p.tok.Data,
-		})
-		return true
-	case DoctypeToken:
-		// Ignore the token.
-		return true
-	}
-
-	p.parseImpliedToken(StartTagToken, a.Head, a.Head.String())
-	return false
-}
-
-// Section 12.2.6.4.4.
-func inHeadIM(p *parser) bool {
-	switch p.tok.Type {
-	case TextToken:
-		s := strings.TrimLeft(p.tok.Data, whitespace)
-		if len(s) < len(p.tok.Data) {
-			// Add the initial whitespace to the current node.
-			p.addText(p.tok.Data[:len(p.tok.Data)-len(s)])
-			if s == "" {
-				return true
-			}
-			p.tok.Data = s
-		}
-	case StartTagToken:
-		switch p.tok.DataAtom {
-		case a.Html:
-			return inBodyIM(p)
-		case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta:
-			p.addElement()
-			p.oe.pop()
-			p.acknowledgeSelfClosingTag()
-			return true
-		case a.Noscript:
-			if p.scripting {
-				p.parseGenericRawTextElement()
-				return true
-			}
-			p.addElement()
-			p.im = inHeadNoscriptIM
-			// Don't let the tokenizer go into raw text mode when scripting is disabled.
-			p.tokenizer.NextIsNotRawText()
-			return true
-		case a.Script, a.Title:
-			p.addElement()
-			p.setOriginalIM()
-			p.im = textIM
-			return true
-		case a.Noframes, a.Style:
-			p.parseGenericRawTextElement()
-			return true
-		case a.Head:
-			// Ignore the token.
-			return true
-		case a.Template:
-			// TODO: remove this divergence from the HTML5 spec.
-			//
-			// We don't handle all of the corner cases when mixing foreign
-			// content (i.e. <math> or <svg>) with <template>. Without this
-			// early return, we can get into an infinite loop, possibly because
-			// of the "TODO... further divergence" a little below.
-			//
-			// As a workaround, if we are mixing foreign content and templates,
-			// just ignore the rest of the HTML. Foreign content is rare and a
-			// relatively old HTML feature. Templates are also rare and a
-			// relatively new HTML feature. Their combination is very rare.
-			for _, e := range p.oe {
-				if e.Namespace != "" {
-					p.im = ignoreTheRemainingTokens
-					return true
-				}
-			}
-
-			p.addElement()
-			p.afe = append(p.afe, &scopeMarker)
-			p.framesetOK = false
-			p.im = inTemplateIM
-			p.templateStack = append(p.templateStack, inTemplateIM)
-			return true
-		}
-	case EndTagToken:
-		switch p.tok.DataAtom {
-		case a.Head:
-			p.oe.pop()
-			p.im = afterHeadIM
-			return true
-		case a.Body, a.Html, a.Br:
-			p.parseImpliedToken(EndTagToken, a.Head, a.Head.String())
-			return false
-		case a.Template:
-			if !p.oe.contains(a.Template) {
-				return true
-			}
-			// TODO: remove this further divergence from the HTML5 spec.
-			//
-			// See https://bugs.chromium.org/p/chromium/issues/detail?id=829668
-			p.generateImpliedEndTags()
-			for i := len(p.oe) - 1; i >= 0; i-- {
-				if n := p.oe[i]; n.Namespace == "" && n.DataAtom == a.Template {
-					p.oe = p.oe[:i]
-					break
-				}
-			}
-			p.clearActiveFormattingElements()
-			p.templateStack.pop()
-			p.resetInsertionMode()
-			return true
-		default:
-			// Ignore the token.
-			return true
-		}
-	case CommentToken:
-		p.addChild(&Node{
-			Type: CommentNode,
-			Data: p.tok.Data,
-		})
-		return true
-	case DoctypeToken:
-		// Ignore the token.
-		return true
-	}
-
-	p.parseImpliedToken(EndTagToken, a.Head, a.Head.String())
-	return false
-}
-
-// Section 12.2.6.4.5.
-func inHeadNoscriptIM(p *parser) bool {
-	switch p.tok.Type {
-	case DoctypeToken:
-		// Ignore the token.
-		return true
-	case StartTagToken:
-		switch p.tok.DataAtom {
-		case a.Html:
-			return inBodyIM(p)
-		case a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Style:
-			return inHeadIM(p)
-		case a.Head:
-			// Ignore the token.
-			return true
-		case a.Noscript:
-			// Don't let the tokenizer go into raw text mode even when a <noscript>
-			// tag is in "in head noscript" insertion mode.
-			p.tokenizer.NextIsNotRawText()
-			// Ignore the token.
-			return true
-		}
-	case EndTagToken:
-		switch p.tok.DataAtom {
-		case a.Noscript, a.Br:
-		default:
-			// Ignore the token.
-			return true
-		}
-	case TextToken:
-		s := strings.TrimLeft(p.tok.Data, whitespace)
-		if len(s) == 0 {
-			// It was all whitespace.
-			return inHeadIM(p)
-		}
-	case CommentToken:
-		return inHeadIM(p)
-	}
-	p.oe.pop()
-	if p.top().DataAtom != a.Head {
-		panic("html: the new current node will be a head element.")
-	}
-	p.im = inHeadIM
-	if p.tok.DataAtom == a.Noscript {
-		return true
-	}
-	return false
-}
-
-// Section 12.2.6.4.6.
-func afterHeadIM(p *parser) bool {
-	switch p.tok.Type {
-	case TextToken:
-		s := strings.TrimLeft(p.tok.Data, whitespace)
-		if len(s) < len(p.tok.Data) {
-			// Add the initial whitespace to the current node.
-			p.addText(p.tok.Data[:len(p.tok.Data)-len(s)])
-			if s == "" {
-				return true
-			}
-			p.tok.Data = s
-		}
-	case StartTagToken:
-		switch p.tok.DataAtom {
-		case a.Html:
-			return inBodyIM(p)
-		case a.Body:
-			p.addElement()
-			p.framesetOK = false
-			p.im = inBodyIM
-			return true
-		case a.Frameset:
-			p.addElement()
-			p.im = inFramesetIM
-			return true
-		case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title:
-			p.oe = append(p.oe, p.head)
-			defer p.oe.remove(p.head)
-			return inHeadIM(p)
-		case a.Head:
-			// Ignore the token.
-			return true
-		}
-	case EndTagToken:
-		switch p.tok.DataAtom {
-		case a.Body, a.Html, a.Br:
-			// Drop down to creating an implied <body> tag.
-		case a.Template:
-			return inHeadIM(p)
-		default:
-			// Ignore the token.
-			return true
-		}
-	case CommentToken:
-		p.addChild(&Node{
-			Type: CommentNode,
-			Data: p.tok.Data,
-		})
-		return true
-	case DoctypeToken:
-		// Ignore the token.
-		return true
-	}
-
-	p.parseImpliedToken(StartTagToken, a.Body, a.Body.String())
-	p.framesetOK = true
-	return false
-}
-
-// copyAttributes copies attributes of src not found on dst to dst.
-func copyAttributes(dst *Node, src Token) {
-	if len(src.Attr) == 0 {
-		return
-	}
-	attr := map[string]string{}
-	for _, t := range dst.Attr {
-		attr[t.Key] = t.Val
-	}
-	for _, t := range src.Attr {
-		if _, ok := attr[t.Key]; !ok {
-			dst.Attr = append(dst.Attr, t)
-			attr[t.Key] = t.Val
-		}
-	}
-}
-
-// Section 12.2.6.4.7.
-func inBodyIM(p *parser) bool {
-	switch p.tok.Type {
-	case TextToken:
-		d := p.tok.Data
-		switch n := p.oe.top(); n.DataAtom {
-		case a.Pre, a.Listing:
-			if n.FirstChild == nil {
-				// Ignore a newline at the start of a <pre> block.
-				if d != "" && d[0] == '\r' {
-					d = d[1:]
-				}
-				if d != "" && d[0] == '\n' {
-					d = d[1:]
-				}
-			}
-		}
-		d = strings.Replace(d, "\x00", "", -1)
-		if d == "" {
-			return true
-		}
-		p.reconstructActiveFormattingElements()
-		p.addText(d)
-		if p.framesetOK && strings.TrimLeft(d, whitespace) != "" {
-			// There were non-whitespace characters inserted.
-			p.framesetOK = false
-		}
-	case StartTagToken:
-		switch p.tok.DataAtom {
-		case a.Html:
-			if p.oe.contains(a.Template) {
-				return true
-			}
-			copyAttributes(p.oe[0], p.tok)
-		case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title:
-			return inHeadIM(p)
-		case a.Body:
-			if p.oe.contains(a.Template) {
-				return true
-			}
-			if len(p.oe) >= 2 {
-				body := p.oe[1]
-				if body.Type == ElementNode && body.DataAtom == a.Body {
-					p.framesetOK = false
-					copyAttributes(body, p.tok)
-				}
-			}
-		case a.Frameset:
-			if !p.framesetOK || len(p.oe) < 2 || p.oe[1].DataAtom != a.Body {
-				// Ignore the token.
-				return true
-			}
-			body := p.oe[1]
-			if body.Parent != nil {
-				body.Parent.RemoveChild(body)
-			}
-			p.oe = p.oe[:1]
-			p.addElement()
-			p.im = inFramesetIM
-			return true
-		case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Main, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
-			p.popUntil(buttonScope, a.P)
-			switch n := p.top(); n.DataAtom {
-			case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
-				p.oe.pop()
-			}
-			p.addElement()
-		case a.Pre, a.Listing:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-			// The newline, if any, will be dealt with by the TextToken case.
-			p.framesetOK = false
-		case a.Form:
-			if p.form != nil && !p.oe.contains(a.Template) {
-				// Ignore the token
-				return true
-			}
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-			if !p.oe.contains(a.Template) {
-				p.form = p.top()
-			}
-		case a.Li:
-			p.framesetOK = false
-			for i := len(p.oe) - 1; i >= 0; i-- {
-				node := p.oe[i]
-				switch node.DataAtom {
-				case a.Li:
-					p.oe = p.oe[:i]
-				case a.Address, a.Div, a.P:
-					continue
-				default:
-					if !isSpecialElement(node) {
-						continue
-					}
-				}
-				break
-			}
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.Dd, a.Dt:
-			p.framesetOK = false
-			for i := len(p.oe) - 1; i >= 0; i-- {
-				node := p.oe[i]
-				switch node.DataAtom {
-				case a.Dd, a.Dt:
-					p.oe = p.oe[:i]
-				case a.Address, a.Div, a.P:
-					continue
-				default:
-					if !isSpecialElement(node) {
-						continue
-					}
-				}
-				break
-			}
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.Plaintext:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.Button:
-			p.popUntil(defaultScope, a.Button)
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.framesetOK = false
-		case a.A:
-			for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- {
-				if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A {
-					p.inBodyEndTagFormatting(a.A, "a")
-					p.oe.remove(n)
-					p.afe.remove(n)
-					break
-				}
-			}
-			p.reconstructActiveFormattingElements()
-			p.addFormattingElement()
-		case a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
-			p.reconstructActiveFormattingElements()
-			p.addFormattingElement()
-		case a.Nobr:
-			p.reconstructActiveFormattingElements()
-			if p.elementInScope(defaultScope, a.Nobr) {
-				p.inBodyEndTagFormatting(a.Nobr, "nobr")
-				p.reconstructActiveFormattingElements()
-			}
-			p.addFormattingElement()
-		case a.Applet, a.Marquee, a.Object:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.afe = append(p.afe, &scopeMarker)
-			p.framesetOK = false
-		case a.Table:
-			if !p.quirks {
-				p.popUntil(buttonScope, a.P)
-			}
-			p.addElement()
-			p.framesetOK = false
-			p.im = inTableIM
-			return true
-		case a.Area, a.Br, a.Embed, a.Img, a.Input, a.Keygen, a.Wbr:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.oe.pop()
-			p.acknowledgeSelfClosingTag()
-			if p.tok.DataAtom == a.Input {
-				for _, t := range p.tok.Attr {
-					if t.Key == "type" {
-						if strings.ToLower(t.Val) == "hidden" {
-							// Skip setting framesetOK = false
-							return true
-						}
-					}
-				}
-			}
-			p.framesetOK = false
-		case a.Param, a.Source, a.Track:
-			p.addElement()
-			p.oe.pop()
-			p.acknowledgeSelfClosingTag()
-		case a.Hr:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-			p.oe.pop()
-			p.acknowledgeSelfClosingTag()
-			p.framesetOK = false
-		case a.Image:
-			p.tok.DataAtom = a.Img
-			p.tok.Data = a.Img.String()
-			return false
-		case a.Textarea:
-			p.addElement()
-			p.setOriginalIM()
-			p.framesetOK = false
-			p.im = textIM
-		case a.Xmp:
-			p.popUntil(buttonScope, a.P)
-			p.reconstructActiveFormattingElements()
-			p.framesetOK = false
-			p.parseGenericRawTextElement()
-		case a.Iframe:
-			p.framesetOK = false
-			p.parseGenericRawTextElement()
-		case a.Noembed:
-			p.parseGenericRawTextElement()
-		case a.Noscript:
-			if p.scripting {
-				p.parseGenericRawTextElement()
-				return true
-			}
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			// Don't let the tokenizer go into raw text mode when scripting is disabled.
-			p.tokenizer.NextIsNotRawText()
-		case a.Select:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.framesetOK = false
-			p.im = inSelectIM
-			return true
-		case a.Optgroup, a.Option:
-			if p.top().DataAtom == a.Option {
-				p.oe.pop()
-			}
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-		case a.Rb, a.Rtc:
-			if p.elementInScope(defaultScope, a.Ruby) {
-				p.generateImpliedEndTags()
-			}
-			p.addElement()
-		case a.Rp, a.Rt:
-			if p.elementInScope(defaultScope, a.Ruby) {
-				p.generateImpliedEndTags("rtc")
-			}
-			p.addElement()
-		case a.Math, a.Svg:
-			p.reconstructActiveFormattingElements()
-			if p.tok.DataAtom == a.Math {
-				adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
-			} else {
-				adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
-			}
-			adjustForeignAttributes(p.tok.Attr)
-			p.addElement()
-			p.top().Namespace = p.tok.Data
-			if p.hasSelfClosingToken {
-				p.oe.pop()
-				p.acknowledgeSelfClosingTag()
-			}
-			return true
-		case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
-			// Ignore the token.
-		default:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-		}
-	case EndTagToken:
-		switch p.tok.DataAtom {
-		case a.Body:
-			if p.elementInScope(defaultScope, a.Body) {
-				p.im = afterBodyIM
-			}
-		case a.Html:
-			if p.elementInScope(defaultScope, a.Body) {
-				p.parseImpliedToken(EndTagToken, a.Body, a.Body.String())
-				return false
-			}
-			return true
-		case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Main, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
-			p.popUntil(defaultScope, p.tok.DataAtom)
-		case a.Form:
-			if p.oe.contains(a.Template) {
-				i := p.indexOfElementInScope(defaultScope, a.Form)
-				if i == -1 {
-					// Ignore the token.
-					return true
-				}
-				p.generateImpliedEndTags()
-				if p.oe[i].DataAtom != a.Form {
-					// Ignore the token.
-					return true
-				}
-				p.popUntil(defaultScope, a.Form)
-			} else {
-				node := p.form
-				p.form = nil
-				i := p.indexOfElementInScope(defaultScope, a.Form)
-				if node == nil || i == -1 || p.oe[i] != node {
-					// Ignore the token.
-					return true
-				}
-				p.generateImpliedEndTags()
-				p.oe.remove(node)
-			}
-		case a.P:
-			if !p.elementInScope(buttonScope, a.P) {
-				p.parseImpliedToken(StartTagToken, a.P, a.P.String())
-			}
-			p.popUntil(buttonScope, a.P)
-		case a.Li:
-			p.popUntil(listItemScope, a.Li)
-		case a.Dd, a.Dt:
-			p.popUntil(defaultScope, p.tok.DataAtom)
-		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
-			p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6)
-		case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
-			p.inBodyEndTagFormatting(p.tok.DataAtom, p.tok.Data)
-		case a.Applet, a.Marquee, a.Object:
-			if p.popUntil(defaultScope, p.tok.DataAtom) {
-				p.clearActiveFormattingElements()
-			}
-		case a.Br:
-			p.tok.Type = StartTagToken
-			return false
-		case a.Template:
-			return inHeadIM(p)
-		default:
-			p.inBodyEndTagOther(p.tok.DataAtom, p.tok.Data)
-		}
-	case CommentToken:
-		p.addChild(&Node{
-			Type: CommentNode,
-			Data: p.tok.Data,
-		})
-	case ErrorToken:
-		// TODO: remove this divergence from the HTML5 spec.
-		if len(p.templateStack) > 0 {
-			p.im = inTemplateIM
-			return false
-		}
-		for _, e := range p.oe {
-			switch e.DataAtom {
-			case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc, a.Tbody, a.Td, a.Tfoot, a.Th,
-				a.Thead, a.Tr, a.Body, a.Html:
-			default:
-				return true
-			}
-		}
-	}
-
-	return true
-}
-
-func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom, tagName string) {
-	// This is the "adoption agency" algorithm, described at
-	// https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency
-
-	// TODO: this is a fairly literal line-by-line translation of that algorithm.
-	// Once the code successfully parses the comprehensive test suite, we should
-	// refactor this code to be more idiomatic.
-
-	// Steps 1-2
-	if current := p.oe.top(); current.Data == tagName && p.afe.index(current) == -1 {
-		p.oe.pop()
-		return
-	}
-
-	// Steps 3-5. The outer loop.
-	for i := 0; i < 8; i++ {
-		// Step 6. Find the formatting element.
-		var formattingElement *Node
-		for j := len(p.afe) - 1; j >= 0; j-- {
-			if p.afe[j].Type == scopeMarkerNode {
-				break
-			}
-			if p.afe[j].DataAtom == tagAtom {
-				formattingElement = p.afe[j]
-				break
-			}
-		}
-		if formattingElement == nil {
-			p.inBodyEndTagOther(tagAtom, tagName)
-			return
-		}
-
-		// Step 7. Ignore the tag if formatting element is not in the stack of open elements.
-		feIndex := p.oe.index(formattingElement)
-		if feIndex == -1 {
-			p.afe.remove(formattingElement)
-			return
-		}
-		// Step 8. Ignore the tag if formatting element is not in the scope.
-		if !p.elementInScope(defaultScope, tagAtom) {
-			// Ignore the tag.
-			return
-		}
-
-		// Step 9. This step is omitted because it's just a parse error but no need to return.
-
-		// Steps 10-11. Find the furthest block.
-		var furthestBlock *Node
-		for _, e := range p.oe[feIndex:] {
-			if isSpecialElement(e) {
-				furthestBlock = e
-				break
-			}
-		}
-		if furthestBlock == nil {
-			e := p.oe.pop()
-			for e != formattingElement {
-				e = p.oe.pop()
-			}
-			p.afe.remove(e)
-			return
-		}
-
-		// Steps 12-13. Find the common ancestor and bookmark node.
-		commonAncestor := p.oe[feIndex-1]
-		bookmark := p.afe.index(formattingElement)
-
-		// Step 14. The inner loop. Find the lastNode to reparent.
-		lastNode := furthestBlock
-		node := furthestBlock
-		x := p.oe.index(node)
-		// Step 14.1.
-		j := 0
-		for {
-			// Step 14.2.
-			j++
-			// Step. 14.3.
-			x--
-			node = p.oe[x]
-			// Step 14.4. Go to the next step if node is formatting element.
-			if node == formattingElement {
-				break
-			}
-			// Step 14.5. Remove node from the list of active formatting elements if
-			// inner loop counter is greater than three and node is in the list of
-			// active formatting elements.
-			if ni := p.afe.index(node); j > 3 && ni > -1 {
-				p.afe.remove(node)
-				// If any element of the list of active formatting elements is removed,
-				// we need to take care whether bookmark should be decremented or not.
-				// This is because the value of bookmark may exceed the size of the
-				// list by removing elements from the list.
-				if ni <= bookmark {
-					bookmark--
-				}
-				continue
-			}
-			// Step 14.6. Continue the next inner loop if node is not in the list of
-			// active formatting elements.
-			if p.afe.index(node) == -1 {
-				p.oe.remove(node)
-				continue
-			}
-			// Step 14.7.
-			clone := node.clone()
-			p.afe[p.afe.index(node)] = clone
-			p.oe[p.oe.index(node)] = clone
-			node = clone
-			// Step 14.8.
-			if lastNode == furthestBlock {
-				bookmark = p.afe.index(node) + 1
-			}
-			// Step 14.9.
-			if lastNode.Parent != nil {
-				lastNode.Parent.RemoveChild(lastNode)
-			}
-			node.AppendChild(lastNode)
-			// Step 14.10.
-			lastNode = node
-		}
-
-		// Step 15. Reparent lastNode to the common ancestor,
-		// or for misnested table nodes, to the foster parent.
-		if lastNode.Parent != nil {
-			lastNode.Parent.RemoveChild(lastNode)
-		}
-		switch commonAncestor.DataAtom {
-		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
-			p.fosterParent(lastNode)
-		default:
-			commonAncestor.AppendChild(lastNode)
-		}
-
-		// Steps 16-18. Reparent nodes from the furthest block's children
-		// to a clone of the formatting element.
-		clone := formattingElement.clone()
-		reparentChildren(clone, furthestBlock)
-		furthestBlock.AppendChild(clone)
-
-		// Step 19. Fix up the list of active formatting elements.
-		if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark {
-			// Move the bookmark with the rest of the list.
-			bookmark--
-		}
-		p.afe.remove(formattingElement)
-		p.afe.insert(bookmark, clone)
-
-		// Step 20. Fix up the stack of open elements.
-		p.oe.remove(formattingElement)
-		p.oe.insert(p.oe.index(furthestBlock)+1, clone)
-	}
-}
-
-// inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM.
-// "Any other end tag" handling from 12.2.6.5 The rules for parsing tokens in foreign content
-// https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign
-func (p *parser) inBodyEndTagOther(tagAtom a.Atom, tagName string) {
-	for i := len(p.oe) - 1; i >= 0; i-- {
-		// Two element nodes have the same tag if they have the same Data (a
-		// string-typed field). As an optimization, for common HTML tags, each
-		// Data string is assigned a unique, non-zero DataAtom (a uint32-typed
-		// field), since integer comparison is faster than string comparison.
-		// Uncommon (custom) tags get a zero DataAtom.
-		//
-		// The if condition here is equivalent to (p.oe[i].Data == tagName).
-		if (p.oe[i].DataAtom == tagAtom) &&
-			((tagAtom != 0) || (p.oe[i].Data == tagName)) {
-			p.oe = p.oe[:i]
-			break
-		}
-		if isSpecialElement(p.oe[i]) {
-			break
-		}
-	}
-}
-
-// Section 12.2.6.4.8.
-func textIM(p *parser) bool {
-	switch p.tok.Type {
-	case ErrorToken:
-		p.oe.pop()
-	case TextToken:
-		d := p.tok.Data
-		if n := p.oe.top(); n.DataAtom == a.Textarea && n.FirstChild == nil {
-			// Ignore a newline at the start of a <textarea> block.
-			if d != "" && d[0] == '\r' {
-				d = d[1:]
-			}
-			if d != "" && d[0] == '\n' {
-				d = d[1:]
-			}
-		}
-		if d == "" {
-			return true
-		}
-		p.addText(d)
-		return true
-	case EndTagToken:
-		p.oe.pop()
-	}
-	p.im = p.originalIM
-	p.originalIM = nil
-	return p.tok.Type == EndTagToken
-}
-
-// Section 12.2.6.4.9.
-func inTableIM(p *parser) bool {
-	switch p.tok.Type {
-	case TextToken:
-		p.tok.Data = strings.Replace(p.tok.Data, "\x00", "", -1)
-		switch p.oe.top().DataAtom {
-		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
-			if strings.Trim(p.tok.Data, whitespace) == "" {
-				p.addText(p.tok.Data)
-				return true
-			}
-		}
-	case StartTagToken:
-		switch p.tok.DataAtom {
-		case a.Caption:
-			p.clearStackToContext(tableScope)
-			p.afe = append(p.afe, &scopeMarker)
-			p.addElement()
-			p.im = inCaptionIM
-			return true
-		case a.Colgroup:
-			p.clearStackToContext(tableScope)
-			p.addElement()
-			p.im = inColumnGroupIM
-			return true
-		case a.Col:
-			p.parseImpliedToken(StartTagToken, a.Colgroup, a.Colgroup.String())
-			return false
-		case a.Tbody, a.Tfoot, a.Thead:
-			p.clearStackToContext(tableScope)
-			p.addElement()
-			p.im = inTableBodyIM
-			return true
-		case a.Td, a.Th, a.Tr:
-			p.parseImpliedToken(StartTagToken, a.Tbody, a.Tbody.String())
-			return false
-		case a.Table:
-			if p.popUntil(tableScope, a.Table) {
-				p.resetInsertionMode()
-				return false
-			}
-			// Ignore the token.
-			return true
-		case a.Style, a.Script, a.Template:
-			return inHeadIM(p)
-		case a.Input:
-			for _, t := range p.tok.Attr {
-				if t.Key == "type" && strings.ToLower(t.Val) == "hidden" {
-					p.addElement()
-					p.oe.pop()
-					return true
-				}
-			}
-			// Otherwise drop down to the default action.
-		case a.Form:
-			if p.oe.contains(a.Template) || p.form != nil {
-				// Ignore the token.
-				return true
-			}
-			p.addElement()
-			p.form = p.oe.pop()
-		case a.Select:
-			p.reconstructActiveFormattingElements()
-			switch p.top().DataAtom {
-			case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
-				p.fosterParenting = true
-			}
-			p.addElement()
-			p.fosterParenting = false
-			p.framesetOK = false
-			p.im = inSelectInTableIM
-			return true
-		}
-	case EndTagToken:
-		switch p.tok.DataAtom {
-		case a.Table:
-			if p.popUntil(tableScope, a.Table) {
-				p.resetInsertionMode()
-				return true
-			}
-			// Ignore the token.
-			return true
-		case a.Body, a.Caption, a.Col, a.Colgroup, a.Html, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
-			// Ignore the token.
-			return true
-		case a.Template:
-			return inHeadIM(p)
-		}
-	case CommentToken:
-		p.addChild(&Node{
-			Type: CommentNode,
-			Data: p.tok.Data,
-		})
-		return true
-	case DoctypeToken:
-		// Ignore the token.
-		return true
-	case ErrorToken:
-		return inBodyIM(p)
-	}
-
-	p.fosterParenting = true
-	defer func() { p.fosterParenting = false }()
-
-	return inBodyIM(p)
-}
-
-// Section 12.2.6.4.11.
-func inCaptionIM(p *parser) bool {
-	switch p.tok.Type {
-	case StartTagToken:
-		switch p.tok.DataAtom {
-		case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Td, a.Tfoot, a.Thead, a.Tr:
-			if !p.popUntil(tableScope, a.Caption) {
-				// Ignore the token.
-				return true
-			}
-			p.clearActiveFormattingElements()
-			p.im = inTableIM
-			return false
-		case a.Select:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.framesetOK = false
-			p.im = inSelectInTableIM
-			return true
-		}
-	case EndTagToken:
-		switch p.tok.DataAtom {
-		case a.Caption:
-			if p.popUntil(tableScope, a.Caption) {
-				p.clearActiveFormattingElements()
-				p.im = inTableIM
-			}
-			return true
-		case a.Table:
-			if !p.popUntil(tableScope, a.Caption) {
-				// Ignore the token.
-				return true
-			}
-			p.clearActiveFormattingElements()
-			p.im = inTableIM
-			return false
-		case a.Body, a.Col, a.Colgroup, a.Html, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
-			// Ignore the token.
-			return true
-		}
-	}
-	return inBodyIM(p)
-}
-
-// Section 12.2.6.4.12.
-func inColumnGroupIM(p *parser) bool {
-	switch p.tok.Type {
-	case TextToken:
-		s := strings.TrimLeft(p.tok.Data, whitespace)
-		if len(s) < len(p.tok.Data) {
-			// Add the initial whitespace to the current node.
-			p.addText(p.tok.Data[:len(p.tok.Data)-len(s)])
-			if s == "" {
-				return true
-			}
-			p.tok.Data = s
-		}
-	case CommentToken:
-		p.addChild(&Node{
-			Type: CommentNode,
-			Data: p.tok.Data,
-		})
-		return true
-	case DoctypeToken:
-		// Ignore the token.
-		return true
-	case StartTagToken:
-		switch p.tok.DataAtom {
-		case a.Html:
-			return inBodyIM(p)
-		case a.Col:
-			p.addElement()
-			p.oe.pop()
-			p.acknowledgeSelfClosingTag()
-			return true
-		case a.Template:
-			return inHeadIM(p)
-		}
-	case EndTagToken:
-		switch p.tok.DataAtom {
-		case a.Colgroup:
-			if p.oe.top().DataAtom == a.Colgroup {
-				p.oe.pop()
-				p.im = inTableIM
-			}
-			return true
-		case a.Col:
-			// Ignore the token.
-			return true
-		case a.Template:
-			return inHeadIM(p)
-		}
-	case ErrorToken:
-		return inBodyIM(p)
-	}
-	if p.oe.top().DataAtom != a.Colgroup {
-		return true
-	}
-	p.oe.pop()
-	p.im = inTableIM
-	return false
-}
-
-// Section 12.2.6.4.13.
-func inTableBodyIM(p *parser) bool {
-	switch p.tok.Type {
-	case StartTagToken:
-		switch p.tok.DataAtom {
-		case a.Tr:
-			p.clearStackToContext(tableBodyScope)
-			p.addElement()
-			p.im = inRowIM
-			return true
-		case a.Td, a.Th:
-			p.parseImpliedToken(StartTagToken, a.Tr, a.Tr.String())
-			return false
-		case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Tfoot, a.Thead:
-			if p.popUntil(tableScope, a.Tbody, a.Thead, a.Tfoot) {
-				p.im = inTableIM
-				return false
-			}
-			// Ignore the token.
-			return true
-		}
-	case EndTagToken:
-		switch p.tok.DataAtom {
-		case a.Tbody, a.Tfoot, a.Thead:
-			if p.elementInScope(tableScope, p.tok.DataAtom) {
-				p.clearStackToContext(tableBodyScope)
-				p.oe.pop()
-				p.im = inTableIM
-			}
-			return true
-		case a.Table:
-			if p.popUntil(tableScope, a.Tbody, a.Thead, a.Tfoot) {
-				p.im = inTableIM
-				return false
-			}
-			// Ignore the token.
-			return true
-		case a.Body, a.Caption, a.Col, a.Colgroup, a.Html, a.Td, a.Th, a.Tr:
-			// Ignore the token.
-			return true
-		}
-	case CommentToken:
-		p.addChild(&Node{
-			Type: CommentNode,
-			Data: p.tok.Data,
-		})
-		return true
-	}
-
-	return inTableIM(p)
-}
-
-// Section 12.2.6.4.14.
-func inRowIM(p *parser) bool {
-	switch p.tok.Type {
-	case StartTagToken:
-		switch p.tok.DataAtom {
-		case a.Td, a.Th:
-			p.clearStackToContext(tableRowScope)
-			p.addElement()
-			p.afe = append(p.afe, &scopeMarker)
-			p.im = inCellIM
-			return true
-		case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Tfoot, a.Thead, a.Tr:
-			if p.popUntil(tableScope, a.Tr) {
-				p.im = inTableBodyIM
-				return false
-			}
-			// Ignore the token.
-			return true
-		}
-	case EndTagToken:
-		switch p.tok.DataAtom {
-		case a.Tr:
-			if p.popUntil(tableScope, a.Tr) {
-				p.im = inTableBodyIM
-				return true
-			}
-			// Ignore the token.
-			return true
-		case a.Table:
-			if p.popUntil(tableScope, a.Tr) {
-				p.im = inTableBodyIM
-				return false
-			}
-			// Ignore the token.
-			return true
-		case a.Tbody, a.Tfoot, a.Thead:
-			if p.elementInScope(tableScope, p.tok.DataAtom) {
-				p.parseImpliedToken(EndTagToken, a.Tr, a.Tr.String())
-				return false
-			}
-			// Ignore the token.
-			return true
-		case a.Body, a.Caption, a.Col, a.Colgroup, a.Html, a.Td, a.Th:
-			// Ignore the token.
-			return true
-		}
-	}
-
-	return inTableIM(p)
-}
-
-// Section 12.2.6.4.15.
-func inCellIM(p *parser) bool {
-	switch p.tok.Type {
-	case StartTagToken:
-		switch p.tok.DataAtom {
-		case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
-			if p.popUntil(tableScope, a.Td, a.Th) {
-				// Close the cell and reprocess.
-				p.clearActiveFormattingElements()
-				p.im = inRowIM
-				return false
-			}
-			// Ignore the token.
-			return true
-		case a.Select:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.framesetOK = false
-			p.im = inSelectInTableIM
-			return true
-		}
-	case EndTagToken:
-		switch p.tok.DataAtom {
-		case a.Td, a.Th:
-			if !p.popUntil(tableScope, p.tok.DataAtom) {
-				// Ignore the token.
-				return true
-			}
-			p.clearActiveFormattingElements()
-			p.im = inRowIM
-			return true
-		case a.Body, a.Caption, a.Col, a.Colgroup, a.Html:
-			// Ignore the token.
-			return true
-		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
-			if !p.elementInScope(tableScope, p.tok.DataAtom) {
-				// Ignore the token.
-				return true
-			}
-			// Close the cell and reprocess.
-			if p.popUntil(tableScope, a.Td, a.Th) {
-				p.clearActiveFormattingElements()
-			}
-			p.im = inRowIM
-			return false
-		}
-	}
-	return inBodyIM(p)
-}
-
-// Section 12.2.6.4.16.
-func inSelectIM(p *parser) bool {
-	switch p.tok.Type {
-	case TextToken:
-		p.addText(strings.Replace(p.tok.Data, "\x00", "", -1))
-	case StartTagToken:
-		switch p.tok.DataAtom {
-		case a.Html:
-			return inBodyIM(p)
-		case a.Option:
-			if p.top().DataAtom == a.Option {
-				p.oe.pop()
-			}
-			p.addElement()
-		case a.Optgroup:
-			if p.top().DataAtom == a.Option {
-				p.oe.pop()
-			}
-			if p.top().DataAtom == a.Optgroup {
-				p.oe.pop()
-			}
-			p.addElement()
-		case a.Select:
-			if !p.popUntil(selectScope, a.Select) {
-				// Ignore the token.
-				return true
-			}
-			p.resetInsertionMode()
-		case a.Input, a.Keygen, a.Textarea:
-			if p.elementInScope(selectScope, a.Select) {
-				p.parseImpliedToken(EndTagToken, a.Select, a.Select.String())
-				return false
-			}
-			// In order to properly ignore <textarea>, we need to change the tokenizer mode.
-			p.tokenizer.NextIsNotRawText()
-			// Ignore the token.
-			return true
-		case a.Script, a.Template:
-			return inHeadIM(p)
-		case a.Iframe, a.Noembed, a.Noframes, a.Noscript, a.Plaintext, a.Style, a.Title, a.Xmp:
-			// Don't let the tokenizer go into raw text mode when there are raw tags
-			// to be ignored. These tags should be ignored from the tokenizer
-			// properly.
-			p.tokenizer.NextIsNotRawText()
-			// Ignore the token.
-			return true
-		}
-	case EndTagToken:
-		switch p.tok.DataAtom {
-		case a.Option:
-			if p.top().DataAtom == a.Option {
-				p.oe.pop()
-			}
-		case a.Optgroup:
-			i := len(p.oe) - 1
-			if p.oe[i].DataAtom == a.Option {
-				i--
-			}
-			if p.oe[i].DataAtom == a.Optgroup {
-				p.oe = p.oe[:i]
-			}
-		case a.Select:
-			if !p.popUntil(selectScope, a.Select) {
-				// Ignore the token.
-				return true
-			}
-			p.resetInsertionMode()
-		case a.Template:
-			return inHeadIM(p)
-		}
-	case CommentToken:
-		p.addChild(&Node{
-			Type: CommentNode,
-			Data: p.tok.Data,
-		})
-	case DoctypeToken:
-		// Ignore the token.
-		return true
-	case ErrorToken:
-		return inBodyIM(p)
-	}
-
-	return true
-}
-
-// Section 12.2.6.4.17.
-func inSelectInTableIM(p *parser) bool {
-	switch p.tok.Type {
-	case StartTagToken, EndTagToken:
-		switch p.tok.DataAtom {
-		case a.Caption, a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr, a.Td, a.Th:
-			if p.tok.Type == EndTagToken && !p.elementInScope(tableScope, p.tok.DataAtom) {
-				// Ignore the token.
-				return true
-			}
-			// This is like p.popUntil(selectScope, a.Select), but it also
-			// matches <math select>, not just <select>. Matching the MathML
-			// tag is arguably incorrect (conceptually), but it mimics what
-			// Chromium does.
-			for i := len(p.oe) - 1; i >= 0; i-- {
-				if n := p.oe[i]; n.DataAtom == a.Select {
-					p.oe = p.oe[:i]
-					break
-				}
-			}
-			p.resetInsertionMode()
-			return false
-		}
-	}
-	return inSelectIM(p)
-}
-
-// Section 12.2.6.4.18.
-func inTemplateIM(p *parser) bool {
-	switch p.tok.Type {
-	case TextToken, CommentToken, DoctypeToken:
-		return inBodyIM(p)
-	case StartTagToken:
-		switch p.tok.DataAtom {
-		case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title:
-			return inHeadIM(p)
-		case a.Caption, a.Colgroup, a.Tbody, a.Tfoot, a.Thead:
-			p.templateStack.pop()
-			p.templateStack = append(p.templateStack, inTableIM)
-			p.im = inTableIM
-			return false
-		case a.Col:
-			p.templateStack.pop()
-			p.templateStack = append(p.templateStack, inColumnGroupIM)
-			p.im = inColumnGroupIM
-			return false
-		case a.Tr:
-			p.templateStack.pop()
-			p.templateStack = append(p.templateStack, inTableBodyIM)
-			p.im = inTableBodyIM
-			return false
-		case a.Td, a.Th:
-			p.templateStack.pop()
-			p.templateStack = append(p.templateStack, inRowIM)
-			p.im = inRowIM
-			return false
-		default:
-			p.templateStack.pop()
-			p.templateStack = append(p.templateStack, inBodyIM)
-			p.im = inBodyIM
-			return false
-		}
-	case EndTagToken:
-		switch p.tok.DataAtom {
-		case a.Template:
-			return inHeadIM(p)
-		default:
-			// Ignore the token.
-			return true
-		}
-	case ErrorToken:
-		if !p.oe.contains(a.Template) {
-			// Ignore the token.
-			return true
-		}
-		// TODO: remove this divergence from the HTML5 spec.
-		//
-		// See https://bugs.chromium.org/p/chromium/issues/detail?id=829668
-		p.generateImpliedEndTags()
-		for i := len(p.oe) - 1; i >= 0; i-- {
-			if n := p.oe[i]; n.Namespace == "" && n.DataAtom == a.Template {
-				p.oe = p.oe[:i]
-				break
-			}
-		}
-		p.clearActiveFormattingElements()
-		p.templateStack.pop()
-		p.resetInsertionMode()
-		return false
-	}
-	return false
-}
-
-// Section 12.2.6.4.19.
-func afterBodyIM(p *parser) bool {
-	switch p.tok.Type {
-	case ErrorToken:
-		// Stop parsing.
-		return true
-	case TextToken:
-		s := strings.TrimLeft(p.tok.Data, whitespace)
-		if len(s) == 0 {
-			// It was all whitespace.
-			return inBodyIM(p)
-		}
-	case StartTagToken:
-		if p.tok.DataAtom == a.Html {
-			return inBodyIM(p)
-		}
-	case EndTagToken:
-		if p.tok.DataAtom == a.Html {
-			if !p.fragment {
-				p.im = afterAfterBodyIM
-			}
-			return true
-		}
-	case CommentToken:
-		// The comment is attached to the <html> element.
-		if len(p.oe) < 1 || p.oe[0].DataAtom != a.Html {
-			panic("html: bad parser state: <html> element not found, in the after-body insertion mode")
-		}
-		p.oe[0].AppendChild(&Node{
-			Type: CommentNode,
-			Data: p.tok.Data,
-		})
-		return true
-	}
-	p.im = inBodyIM
-	return false
-}
-
-// Section 12.2.6.4.20.
-func inFramesetIM(p *parser) bool {
-	switch p.tok.Type {
-	case CommentToken:
-		p.addChild(&Node{
-			Type: CommentNode,
-			Data: p.tok.Data,
-		})
-	case TextToken:
-		// Ignore all text but whitespace.
-		s := strings.Map(func(c rune) rune {
-			switch c {
-			case ' ', '\t', '\n', '\f', '\r':
-				return c
-			}
-			return -1
-		}, p.tok.Data)
-		if s != "" {
-			p.addText(s)
-		}
-	case StartTagToken:
-		switch p.tok.DataAtom {
-		case a.Html:
-			return inBodyIM(p)
-		case a.Frameset:
-			p.addElement()
-		case a.Frame:
-			p.addElement()
-			p.oe.pop()
-			p.acknowledgeSelfClosingTag()
-		case a.Noframes:
-			return inHeadIM(p)
-		}
-	case EndTagToken:
-		switch p.tok.DataAtom {
-		case a.Frameset:
-			if p.oe.top().DataAtom != a.Html {
-				p.oe.pop()
-				if p.oe.top().DataAtom != a.Frameset {
-					p.im = afterFramesetIM
-					return true
-				}
-			}
-		}
-	default:
-		// Ignore the token.
-	}
-	return true
-}
-
-// Section 12.2.6.4.21.
-func afterFramesetIM(p *parser) bool {
-	switch p.tok.Type {
-	case CommentToken:
-		p.addChild(&Node{
-			Type: CommentNode,
-			Data: p.tok.Data,
-		})
-	case TextToken:
-		// Ignore all text but whitespace.
-		s := strings.Map(func(c rune) rune {
-			switch c {
-			case ' ', '\t', '\n', '\f', '\r':
-				return c
-			}
-			return -1
-		}, p.tok.Data)
-		if s != "" {
-			p.addText(s)
-		}
-	case StartTagToken:
-		switch p.tok.DataAtom {
-		case a.Html:
-			return inBodyIM(p)
-		case a.Noframes:
-			return inHeadIM(p)
-		}
-	case EndTagToken:
-		switch p.tok.DataAtom {
-		case a.Html:
-			p.im = afterAfterFramesetIM
-			return true
-		}
-	default:
-		// Ignore the token.
-	}
-	return true
-}
-
-// Section 12.2.6.4.22.
-func afterAfterBodyIM(p *parser) bool {
-	switch p.tok.Type {
-	case ErrorToken:
-		// Stop parsing.
-		return true
-	case TextToken:
-		s := strings.TrimLeft(p.tok.Data, whitespace)
-		if len(s) == 0 {
-			// It was all whitespace.
-			return inBodyIM(p)
-		}
-	case StartTagToken:
-		if p.tok.DataAtom == a.Html {
-			return inBodyIM(p)
-		}
-	case CommentToken:
-		p.doc.AppendChild(&Node{
-			Type: CommentNode,
-			Data: p.tok.Data,
-		})
-		return true
-	case DoctypeToken:
-		return inBodyIM(p)
-	}
-	p.im = inBodyIM
-	return false
-}
-
-// Section 12.2.6.4.23.
-func afterAfterFramesetIM(p *parser) bool {
-	switch p.tok.Type {
-	case CommentToken:
-		p.doc.AppendChild(&Node{
-			Type: CommentNode,
-			Data: p.tok.Data,
-		})
-	case TextToken:
-		// Ignore all text but whitespace.
-		s := strings.Map(func(c rune) rune {
-			switch c {
-			case ' ', '\t', '\n', '\f', '\r':
-				return c
-			}
-			return -1
-		}, p.tok.Data)
-		if s != "" {
-			p.tok.Data = s
-			return inBodyIM(p)
-		}
-	case StartTagToken:
-		switch p.tok.DataAtom {
-		case a.Html:
-			return inBodyIM(p)
-		case a.Noframes:
-			return inHeadIM(p)
-		}
-	case DoctypeToken:
-		return inBodyIM(p)
-	default:
-		// Ignore the token.
-	}
-	return true
-}
-
-func ignoreTheRemainingTokens(p *parser) bool {
-	return true
-}
-
-const whitespaceOrNUL = whitespace + "\x00"
-
-// Section 12.2.6.5
-func parseForeignContent(p *parser) bool {
-	switch p.tok.Type {
-	case TextToken:
-		if p.framesetOK {
-			p.framesetOK = strings.TrimLeft(p.tok.Data, whitespaceOrNUL) == ""
-		}
-		p.tok.Data = strings.Replace(p.tok.Data, "\x00", "\ufffd", -1)
-		p.addText(p.tok.Data)
-	case CommentToken:
-		p.addChild(&Node{
-			Type: CommentNode,
-			Data: p.tok.Data,
-		})
-	case StartTagToken:
-		if !p.fragment {
-			b := breakout[p.tok.Data]
-			if p.tok.DataAtom == a.Font {
-			loop:
-				for _, attr := range p.tok.Attr {
-					switch attr.Key {
-					case "color", "face", "size":
-						b = true
-						break loop
-					}
-				}
-			}
-			if b {
-				for i := len(p.oe) - 1; i >= 0; i-- {
-					n := p.oe[i]
-					if n.Namespace == "" || htmlIntegrationPoint(n) || mathMLTextIntegrationPoint(n) {
-						p.oe = p.oe[:i+1]
-						break
-					}
-				}
-				return false
-			}
-		}
-		current := p.adjustedCurrentNode()
-		switch current.Namespace {
-		case "math":
-			adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
-		case "svg":
-			// Adjust SVG tag names. The tokenizer lower-cases tag names, but
-			// SVG wants e.g. "foreignObject" with a capital second "O".
-			if x := svgTagNameAdjustments[p.tok.Data]; x != "" {
-				p.tok.DataAtom = a.Lookup([]byte(x))
-				p.tok.Data = x
-			}
-			adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
-		default:
-			panic("html: bad parser state: unexpected namespace")
-		}
-		adjustForeignAttributes(p.tok.Attr)
-		namespace := current.Namespace
-		p.addElement()
-		p.top().Namespace = namespace
-		if namespace != "" {
-			// Don't let the tokenizer go into raw text mode in foreign content
-			// (e.g. in an SVG <title> tag).
-			p.tokenizer.NextIsNotRawText()
-		}
-		if p.hasSelfClosingToken {
-			p.oe.pop()
-			p.acknowledgeSelfClosingTag()
-		}
-	case EndTagToken:
-		for i := len(p.oe) - 1; i >= 0; i-- {
-			if p.oe[i].Namespace == "" {
-				return p.im(p)
-			}
-			if strings.EqualFold(p.oe[i].Data, p.tok.Data) {
-				p.oe = p.oe[:i]
-				break
-			}
-		}
-		return true
-	default:
-		// Ignore the token.
-	}
-	return true
-}
-
-// Section 12.2.4.2.
-func (p *parser) adjustedCurrentNode() *Node {
-	if len(p.oe) == 1 && p.fragment && p.context != nil {
-		return p.context
-	}
-	return p.oe.top()
-}
-
-// Section 12.2.6.
-func (p *parser) inForeignContent() bool {
-	if len(p.oe) == 0 {
-		return false
-	}
-	n := p.adjustedCurrentNode()
-	if n.Namespace == "" {
-		return false
-	}
-	if mathMLTextIntegrationPoint(n) {
-		if p.tok.Type == StartTagToken && p.tok.DataAtom != a.Mglyph && p.tok.DataAtom != a.Malignmark {
-			return false
-		}
-		if p.tok.Type == TextToken {
-			return false
-		}
-	}
-	if n.Namespace == "math" && n.DataAtom == a.AnnotationXml && p.tok.Type == StartTagToken && p.tok.DataAtom == a.Svg {
-		return false
-	}
-	if htmlIntegrationPoint(n) && (p.tok.Type == StartTagToken || p.tok.Type == TextToken) {
-		return false
-	}
-	if p.tok.Type == ErrorToken {
-		return false
-	}
-	return true
-}
-
-// parseImpliedToken parses a token as though it had appeared in the parser's
-// input.
-func (p *parser) parseImpliedToken(t TokenType, dataAtom a.Atom, data string) {
-	realToken, selfClosing := p.tok, p.hasSelfClosingToken
-	p.tok = Token{
-		Type:     t,
-		DataAtom: dataAtom,
-		Data:     data,
-	}
-	p.hasSelfClosingToken = false
-	p.parseCurrentToken()
-	p.tok, p.hasSelfClosingToken = realToken, selfClosing
-}
-
-// parseCurrentToken runs the current token through the parsing routines
-// until it is consumed.
-func (p *parser) parseCurrentToken() {
-	if p.tok.Type == SelfClosingTagToken {
-		p.hasSelfClosingToken = true
-		p.tok.Type = StartTagToken
-	}
-
-	consumed := false
-	for !consumed {
-		if p.inForeignContent() {
-			consumed = parseForeignContent(p)
-		} else {
-			consumed = p.im(p)
-		}
-	}
-
-	if p.hasSelfClosingToken {
-		// This is a parse error, but ignore it.
-		p.hasSelfClosingToken = false
-	}
-}
-
-func (p *parser) parse() error {
-	// Iterate until EOF. Any other error will cause an early return.
-	var err error
-	for err != io.EOF {
-		// CDATA sections are allowed only in foreign content.
-		n := p.oe.top()
-		p.tokenizer.AllowCDATA(n != nil && n.Namespace != "")
-		// Read and parse the next token.
-		p.tokenizer.Next()
-		p.tok = p.tokenizer.Token()
-		if p.tok.Type == ErrorToken {
-			err = p.tokenizer.Err()
-			if err != nil && err != io.EOF {
-				return err
-			}
-		}
-		p.parseCurrentToken()
-	}
-	return nil
-}
-
-// Parse returns the parse tree for the HTML from the given Reader.
-//
-// It implements the HTML5 parsing algorithm
-// (https://html.spec.whatwg.org/multipage/syntax.html#tree-construction),
-// which is very complicated. The resultant tree can contain implicitly created
-// nodes that have no explicit <tag> listed in r's data, and nodes' parents can
-// differ from the nesting implied by a naive processing of start and end
-// <tag>s. Conversely, explicit <tag>s in r's data can be silently dropped,
-// with no corresponding node in the resulting tree.
-//
-// The input is assumed to be UTF-8 encoded.
-func Parse(r io.Reader) (*Node, error) {
-	return ParseWithOptions(r)
-}
-
-// ParseFragment parses a fragment of HTML and returns the nodes that were
-// found. If the fragment is the InnerHTML for an existing element, pass that
-// element in context.
-//
-// It has the same intricacies as Parse.
-func ParseFragment(r io.Reader, context *Node) ([]*Node, error) {
-	return ParseFragmentWithOptions(r, context)
-}
-
-// ParseOption configures a parser.
-type ParseOption func(p *parser)
-
-// ParseOptionEnableScripting configures the scripting flag.
-// https://html.spec.whatwg.org/multipage/webappapis.html#enabling-and-disabling-scripting
-//
-// By default, scripting is enabled.
-func ParseOptionEnableScripting(enable bool) ParseOption {
-	return func(p *parser) {
-		p.scripting = enable
-	}
-}
-
-// ParseWithOptions is like Parse, with options.
-func ParseWithOptions(r io.Reader, opts ...ParseOption) (*Node, error) {
-	p := &parser{
-		tokenizer: NewTokenizer(r),
-		doc: &Node{
-			Type: DocumentNode,
-		},
-		scripting:  true,
-		framesetOK: true,
-		im:         initialIM,
-	}
-
-	for _, f := range opts {
-		f(p)
-	}
-
-	if err := p.parse(); err != nil {
-		return nil, err
-	}
-	return p.doc, nil
-}
-
-// ParseFragmentWithOptions is like ParseFragment, with options.
-func ParseFragmentWithOptions(r io.Reader, context *Node, opts ...ParseOption) ([]*Node, error) {
-	contextTag := ""
-	if context != nil {
-		if context.Type != ElementNode {
-			return nil, errors.New("html: ParseFragment of non-element Node")
-		}
-		// The next check isn't just context.DataAtom.String() == context.Data because
-		// it is valid to pass an element whose tag isn't a known atom. For example,
-		// DataAtom == 0 and Data = "tagfromthefuture" is perfectly consistent.
-		if context.DataAtom != a.Lookup([]byte(context.Data)) {
-			return nil, fmt.Errorf("html: inconsistent Node: DataAtom=%q, Data=%q", context.DataAtom, context.Data)
-		}
-		contextTag = context.DataAtom.String()
-	}
-	p := &parser{
-		doc: &Node{
-			Type: DocumentNode,
-		},
-		scripting: true,
-		fragment:  true,
-		context:   context,
-	}
-	if context != nil && context.Namespace != "" {
-		p.tokenizer = NewTokenizer(r)
-	} else {
-		p.tokenizer = NewTokenizerFragment(r, contextTag)
-	}
-
-	for _, f := range opts {
-		f(p)
-	}
-
-	root := &Node{
-		Type:     ElementNode,
-		DataAtom: a.Html,
-		Data:     a.Html.String(),
-	}
-	p.doc.AppendChild(root)
-	p.oe = nodeStack{root}
-	if context != nil && context.DataAtom == a.Template {
-		p.templateStack = append(p.templateStack, inTemplateIM)
-	}
-	p.resetInsertionMode()
-
-	for n := context; n != nil; n = n.Parent {
-		if n.Type == ElementNode && n.DataAtom == a.Form {
-			p.form = n
-			break
-		}
-	}
-
-	if err := p.parse(); err != nil {
-		return nil, err
-	}
-
-	parent := p.doc
-	if context != nil {
-		parent = root
-	}
-
-	var result []*Node
-	for c := parent.FirstChild; c != nil; {
-		next := c.NextSibling
-		parent.RemoveChild(c)
-		result = append(result, c)
-		c = next
-	}
-	return result, nil
-}
diff --git a/application/source/vendor/golang.org/x/net/html/render.go b/application/source/vendor/golang.org/x/net/html/render.go
deleted file mode 100644
index 8b28031905a4ecada583fbcf917c7ccc41532b69..0000000000000000000000000000000000000000
--- a/application/source/vendor/golang.org/x/net/html/render.go
+++ /dev/null
@@ -1,273 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
-	"bufio"
-	"errors"
-	"fmt"
-	"io"
-	"strings"
-)
-
-type writer interface {
-	io.Writer
-	io.ByteWriter
-	WriteString(string) (int, error)
-}
-
-// Render renders the parse tree n to the given writer.
-//
-// Rendering is done on a 'best effort' basis: calling Parse on the output of
-// Render will always result in something similar to the original tree, but it
-// is not necessarily an exact clone unless the original tree was 'well-formed'.
-// 'Well-formed' is not easily specified; the HTML5 specification is
-// complicated.
-//
-// Calling Parse on arbitrary input typically results in a 'well-formed' parse
-// tree. However, it is possible for Parse to yield a 'badly-formed' parse tree.
-// For example, in a 'well-formed' parse tree, no <a> element is a child of
-// another <a> element: parsing "<a><a>" results in two sibling elements.
-// Similarly, in a 'well-formed' parse tree, no <a> element is a child of a
-// <table> element: parsing "<p><table><a>" results in a <p> with two sibling
-// children; the <a> is reparented to the <table>'s parent. However, calling
-// Parse on "<a><table><a>" does not return an error, but the result has an <a>
-// element with an <a> child, and is therefore not 'well-formed'.
-//
-// Programmatically constructed trees are typically also 'well-formed', but it
-// is possible to construct a tree that looks innocuous but, when rendered and
-// re-parsed, results in a different tree. A simple example is that a solitary
-// text node would become a tree containing <html>, <head> and <body> elements.
-// Another example is that the programmatic equivalent of "a<head>b</head>c"
-// becomes "<html><head><head/><body>abc</body></html>".
-func Render(w io.Writer, n *Node) error {
-	if x, ok := w.(writer); ok {
-		return render(x, n)
-	}
-	buf := bufio.NewWriter(w)
-	if err := render(buf, n); err != nil {
-		return err
-	}
-	return buf.Flush()
-}
-
-// plaintextAbort is returned from render1 when a <plaintext> element
-// has been rendered. No more end tags should be rendered after that.
-var plaintextAbort = errors.New("html: internal error (plaintext abort)")
-
-func render(w writer, n *Node) error {
-	err := render1(w, n)
-	if err == plaintextAbort {
-		err = nil
-	}
-	return err
-}
-
-func render1(w writer, n *Node) error {
-	// Render non-element nodes; these are the easy cases.
-	switch n.Type {
-	case ErrorNode:
-		return errors.New("html: cannot render an ErrorNode node")
-	case TextNode:
-		return escape(w, n.Data)
-	case DocumentNode:
-		for c := n.FirstChild; c != nil; c = c.NextSibling {
-			if err := render1(w, c); err != nil {
-				return err
-			}
-		}
-		return nil
-	case ElementNode:
-		// No-op.
-	case CommentNode:
-		if _, err := w.WriteString("<!--"); err != nil {
-			return err
-		}
-		if err := escapeComment(w, n.Data); err != nil {
-			return err
-		}
-		if _, err := w.WriteString("-->"); err != nil {
-			return err
-		}
-		return nil
-	case DoctypeNode:
-		if _, err := w.WriteString("<!DOCTYPE "); err != nil {
-			return err
-		}
-		if err := escape(w, n.Data); err != nil {
-			return err
-		}
-		if n.Attr != nil {
-			var p, s string
-			for _, a := range n.Attr {
-				switch a.Key {
-				case "public":
-					p = a.Val
-				case "system":
-					s = a.Val
-				}
-			}
-			if p != "" {
-				if _, err := w.WriteString(" PUBLIC "); err != nil {
-					return err
-				}
-				if err := writeQuoted(w, p); err != nil {
-					return err
-				}
-				if s != "" {
-					if err := w.WriteByte(' '); err != nil {
-						return err
-					}
-					if err := writeQuoted(w, s); err != nil {
-						return err
-					}
-				}
-			} else if s != "" {
-				if _, err := w.WriteString(" SYSTEM "); err != nil {
-					return err
-				}
-				if err := writeQuoted(w, s); err != nil {
-					return err
-				}
-			}
-		}
-		return w.WriteByte('>')
-	case RawNode:
-		_, err := w.WriteString(n.Data)
-		return err
-	default:
-		return errors.New("html: unknown node type")
-	}
-
-	// Render the <xxx> opening tag.
-	if err := w.WriteByte('<'); err != nil {
-		return err
-	}
-	if _, err := w.WriteString(n.Data); err != nil {
-		return err
-	}
-	for _, a := range n.Attr {
-		if err := w.WriteByte(' '); err != nil {
-			return err
-		}
-		if a.Namespace != "" {
-			if _, err := w.WriteString(a.Namespace); err != nil {
-				return err
-			}
-			if err := w.WriteByte(':'); err != nil {
-				return err
-			}
-		}
-		if _, err := w.WriteString(a.Key); err != nil {
-			return err
-		}
-		if _, err := w.WriteString(`="`); err != nil {
-			return err
-		}
-		if err := escape(w, a.Val); err != nil {
-			return err
-		}
-		if err := w.WriteByte('"'); err != nil {
-			return err
-		}
-	}
-	if voidElements[n.Data] {
-		if n.FirstChild != nil {
-			return fmt.Errorf("html: void element <%s> has child nodes", n.Data)
-		}
-		_, err := w.WriteString("/>")
-		return err
-	}
-	if err := w.WriteByte('>'); err != nil {
-		return err
-	}
-
-	// Add initial newline where there is danger of a newline beging ignored.
-	if c := n.FirstChild; c != nil && c.Type == TextNode && strings.HasPrefix(c.Data, "\n") {
-		switch n.Data {
-		case "pre", "listing", "textarea":
-			if err := w.WriteByte('\n'); err != nil {
-				return err
-			}
-		}
-	}
-
-	// Render any child nodes.
-	switch n.Data {
-	case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp":
-		for c := n.FirstChild; c != nil; c = c.NextSibling {
-			if c.Type == TextNode {
-				if _, err := w.WriteString(c.Data); err != nil {
-					return err
-				}
-			} else {
-				if err := render1(w, c); err != nil {
-					return err
-				}
-			}
-		}
-		if n.Data == "plaintext" {
-			// Don't render anything else. <plaintext> must be the
-			// last element in the file, with no closing tag.
-			return plaintextAbort
-		}
-	default:
-		for c := n.FirstChild; c != nil; c = c.NextSibling {
-			if err := render1(w, c); err != nil {
-				return err
-			}
-		}
-	}
-
-	// Render the </xxx> closing tag.
-	if _, err := w.WriteString("</"); err != nil {
-		return err
-	}
-	if _, err := w.WriteString(n.Data); err != nil {
-		return err
-	}
-	return w.WriteByte('>')
-}
-
-// writeQuoted writes s to w surrounded by quotes. Normally it will use double
-// quotes, but if s contains a double quote, it will use single quotes.
-// It is used for writing the identifiers in a doctype declaration.
-// In valid HTML, they can't contain both types of quotes.
-func writeQuoted(w writer, s string) error {
-	var q byte = '"'
-	if strings.Contains(s, `"`) {
-		q = '\''
-	}
-	if err := w.WriteByte(q); err != nil {
-		return err
-	}
-	if _, err := w.WriteString(s); err != nil {
-		return err
-	}
-	if err := w.WriteByte(q); err != nil {
-		return err
-	}
-	return nil
-}
-
-// Section 12.1.2, "Elements", gives this list of void elements. Void elements
-// are those that can't have any contents.
-var voidElements = map[string]bool{
-	"area":   true,
-	"base":   true,
-	"br":     true,
-	"col":    true,
-	"embed":  true,
-	"hr":     true,
-	"img":    true,
-	"input":  true,
-	"keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility.
-	"link":   true,
-	"meta":   true,
-	"param":  true,
-	"source": true,
-	"track":  true,
-	"wbr":    true,
-}
diff --git a/application/source/vendor/golang.org/x/net/html/token.go b/application/source/vendor/golang.org/x/net/html/token.go
deleted file mode 100644
index de67f938a14b4b089fac1e828fdeab35377d0b7c..0000000000000000000000000000000000000000
--- a/application/source/vendor/golang.org/x/net/html/token.go
+++ /dev/null
@@ -1,1268 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
-	"bytes"
-	"errors"
-	"io"
-	"strconv"
-	"strings"
-
-	"golang.org/x/net/html/atom"
-)
-
-// A TokenType is the type of a Token.
-type TokenType uint32
-
-const (
-	// ErrorToken means that an error occurred during tokenization.
-	ErrorToken TokenType = iota
-	// TextToken means a text node.
-	TextToken
-	// A StartTagToken looks like <a>.
-	StartTagToken
-	// An EndTagToken looks like </a>.
-	EndTagToken
-	// A SelfClosingTagToken tag looks like <br/>.
-	SelfClosingTagToken
-	// A CommentToken looks like <!--x-->.
-	CommentToken
-	// A DoctypeToken looks like <!DOCTYPE x>
-	DoctypeToken
-)
-
-// ErrBufferExceeded means that the buffering limit was exceeded.
-var ErrBufferExceeded = errors.New("max buffer exceeded")
-
-// String returns a string representation of the TokenType.
-func (t TokenType) String() string {
-	switch t {
-	case ErrorToken:
-		return "Error"
-	case TextToken:
-		return "Text"
-	case StartTagToken:
-		return "StartTag"
-	case EndTagToken:
-		return "EndTag"
-	case SelfClosingTagToken:
-		return "SelfClosingTag"
-	case CommentToken:
-		return "Comment"
-	case DoctypeToken:
-		return "Doctype"
-	}
-	return "Invalid(" + strconv.Itoa(int(t)) + ")"
-}
-
-// An Attribute is an attribute namespace-key-value triple. Namespace is
-// non-empty for foreign attributes like xlink, Key is alphabetic (and hence
-// does not contain escapable characters like '&', '<' or '>'), and Val is
-// unescaped (it looks like "a<b" rather than "a&lt;b").
-//
-// Namespace is only used by the parser, not the tokenizer.
-type Attribute struct {
-	Namespace, Key, Val string
-}
-
-// A Token consists of a TokenType and some Data (tag name for start and end
-// tags, content for text, comments and doctypes). A tag Token may also contain
-// a slice of Attributes. Data is unescaped for all Tokens (it looks like "a<b"
-// rather than "a&lt;b"). For tag Tokens, DataAtom is the atom for Data, or
-// zero if Data is not a known tag name.
-type Token struct {
-	Type     TokenType
-	DataAtom atom.Atom
-	Data     string
-	Attr     []Attribute
-}
-
-// tagString returns a string representation of a tag Token's Data and Attr.
-func (t Token) tagString() string {
-	if len(t.Attr) == 0 {
-		return t.Data
-	}
-	buf := bytes.NewBufferString(t.Data)
-	for _, a := range t.Attr {
-		buf.WriteByte(' ')
-		buf.WriteString(a.Key)
-		buf.WriteString(`="`)
-		escape(buf, a.Val)
-		buf.WriteByte('"')
-	}
-	return buf.String()
-}
-
-// String returns a string representation of the Token.
-func (t Token) String() string {
-	switch t.Type {
-	case ErrorToken:
-		return ""
-	case TextToken:
-		return EscapeString(t.Data)
-	case StartTagToken:
-		return "<" + t.tagString() + ">"
-	case EndTagToken:
-		return "</" + t.tagString() + ">"
-	case SelfClosingTagToken:
-		return "<" + t.tagString() + "/>"
-	case CommentToken:
-		return "<!--" + escapeCommentString(t.Data) + "-->"
-	case DoctypeToken:
-		return "<!DOCTYPE " + EscapeString(t.Data) + ">"
-	}
-	return "Invalid(" + strconv.Itoa(int(t.Type)) + ")"
-}
-
-// span is a range of bytes in a Tokenizer's buffer. The start is inclusive,
-// the end is exclusive.
-type span struct {
-	start, end int
-}
-
-// A Tokenizer returns a stream of HTML Tokens.
-type Tokenizer struct {
-	// r is the source of the HTML text.
-	r io.Reader
-	// tt is the TokenType of the current token.
-	tt TokenType
-	// err is the first error encountered during tokenization. It is possible
-	// for tt != Error && err != nil to hold: this means that Next returned a
-	// valid token but the subsequent Next call will return an error token.
-	// For example, if the HTML text input was just "plain", then the first
-	// Next call would set z.err to io.EOF but return a TextToken, and all
-	// subsequent Next calls would return an ErrorToken.
-	// err is never reset. Once it becomes non-nil, it stays non-nil.
-	err error
-	// readErr is the error returned by the io.Reader r. It is separate from
-	// err because it is valid for an io.Reader to return (n int, err1 error)
-	// such that n > 0 && err1 != nil, and callers should always process the
-	// n > 0 bytes before considering the error err1.
-	readErr error
-	// buf[raw.start:raw.end] holds the raw bytes of the current token.
-	// buf[raw.end:] is buffered input that will yield future tokens.
-	raw span
-	buf []byte
-	// maxBuf limits the data buffered in buf. A value of 0 means unlimited.
-	maxBuf int
-	// buf[data.start:data.end] holds the raw bytes of the current token's data:
-	// a text token's text, a tag token's tag name, etc.
-	data span
-	// pendingAttr is the attribute key and value currently being tokenized.
-	// When complete, pendingAttr is pushed onto attr. nAttrReturned is
-	// incremented on each call to TagAttr.
-	pendingAttr   [2]span
-	attr          [][2]span
-	nAttrReturned int
-	// rawTag is the "script" in "</script>" that closes the next token. If
-	// non-empty, the subsequent call to Next will return a raw or RCDATA text
-	// token: one that treats "<p>" as text instead of an element.
-	// rawTag's contents are lower-cased.
-	rawTag string
-	// textIsRaw is whether the current text token's data is not escaped.
-	textIsRaw bool
-	// convertNUL is whether NUL bytes in the current token's data should
-	// be converted into \ufffd replacement characters.
-	convertNUL bool
-	// allowCDATA is whether CDATA sections are allowed in the current context.
-	allowCDATA bool
-}
-
-// AllowCDATA sets whether or not the tokenizer recognizes <![CDATA[foo]]> as
-// the text "foo". The default value is false, which means to recognize it as
-// a bogus comment "<!-- [CDATA[foo]] -->" instead.
-//
-// Strictly speaking, an HTML5 compliant tokenizer should allow CDATA if and
-// only if tokenizing foreign content, such as MathML and SVG. However,
-// tracking foreign-contentness is difficult to do purely in the tokenizer,
-// as opposed to the parser, due to HTML integration points: an <svg> element
-// can contain a <foreignObject> that is foreign-to-SVG but not foreign-to-
-// HTML. For strict compliance with the HTML5 tokenization algorithm, it is the
-// responsibility of the user of a tokenizer to call AllowCDATA as appropriate.
-// In practice, if using the tokenizer without caring whether MathML or SVG
-// CDATA is text or comments, such as tokenizing HTML to find all the anchor
-// text, it is acceptable to ignore this responsibility.
-func (z *Tokenizer) AllowCDATA(allowCDATA bool) {
-	z.allowCDATA = allowCDATA
-}
-
-// NextIsNotRawText instructs the tokenizer that the next token should not be
-// considered as 'raw text'. Some elements, such as script and title elements,
-// normally require the next token after the opening tag to be 'raw text' that
-// has no child elements. For example, tokenizing "<title>a<b>c</b>d</title>"
-// yields a start tag token for "<title>", a text token for "a<b>c</b>d", and
-// an end tag token for "</title>". There are no distinct start tag or end tag
-// tokens for the "<b>" and "</b>".
-//
-// This tokenizer implementation will generally look for raw text at the right
-// times. Strictly speaking, an HTML5 compliant tokenizer should not look for
-// raw text if in foreign content: <title> generally needs raw text, but a
-// <title> inside an <svg> does not. Another example is that a <textarea>
-// generally needs raw text, but a <textarea> is not allowed as an immediate
-// child of a <select>; in normal parsing, a <textarea> implies </select>, but
-// one cannot close the implicit element when parsing a <select>'s InnerHTML.
-// Similarly to AllowCDATA, tracking the correct moment to override raw-text-
-// ness is difficult to do purely in the tokenizer, as opposed to the parser.
-// For strict compliance with the HTML5 tokenization algorithm, it is the
-// responsibility of the user of a tokenizer to call NextIsNotRawText as
-// appropriate. In practice, like AllowCDATA, it is acceptable to ignore this
-// responsibility for basic usage.
-//
-// Note that this 'raw text' concept is different from the one offered by the
-// Tokenizer.Raw method.
-func (z *Tokenizer) NextIsNotRawText() {
-	z.rawTag = ""
-}
-
-// Err returns the error associated with the most recent ErrorToken token.
-// This is typically io.EOF, meaning the end of tokenization.
-func (z *Tokenizer) Err() error {
-	if z.tt != ErrorToken {
-		return nil
-	}
-	return z.err
-}
-
-// readByte returns the next byte from the input stream, doing a buffered read
-// from z.r into z.buf if necessary. z.buf[z.raw.start:z.raw.end] remains a contiguous byte
-// slice that holds all the bytes read so far for the current token.
-// It sets z.err if the underlying reader returns an error.
-// Pre-condition: z.err == nil.
-func (z *Tokenizer) readByte() byte {
-	if z.raw.end >= len(z.buf) {
-		// Our buffer is exhausted and we have to read from z.r. Check if the
-		// previous read resulted in an error.
-		if z.readErr != nil {
-			z.err = z.readErr
-			return 0
-		}
-		// We copy z.buf[z.raw.start:z.raw.end] to the beginning of z.buf. If the length
-		// z.raw.end - z.raw.start is more than half the capacity of z.buf, then we
-		// allocate a new buffer before the copy.
-		c := cap(z.buf)
-		d := z.raw.end - z.raw.start
-		var buf1 []byte
-		if 2*d > c {
-			buf1 = make([]byte, d, 2*c)
-		} else {
-			buf1 = z.buf[:d]
-		}
-		copy(buf1, z.buf[z.raw.start:z.raw.end])
-		if x := z.raw.start; x != 0 {
-			// Adjust the data/attr spans to refer to the same contents after the copy.
-			z.data.start -= x
-			z.data.end -= x
-			z.pendingAttr[0].start -= x
-			z.pendingAttr[0].end -= x
-			z.pendingAttr[1].start -= x
-			z.pendingAttr[1].end -= x
-			for i := range z.attr {
-				z.attr[i][0].start -= x
-				z.attr[i][0].end -= x
-				z.attr[i][1].start -= x
-				z.attr[i][1].end -= x
-			}
-		}
-		z.raw.start, z.raw.end, z.buf = 0, d, buf1[:d]
-		// Now that we have copied the live bytes to the start of the buffer,
-		// we read from z.r into the remainder.
-		var n int
-		n, z.readErr = readAtLeastOneByte(z.r, buf1[d:cap(buf1)])
-		if n == 0 {
-			z.err = z.readErr
-			return 0
-		}
-		z.buf = buf1[:d+n]
-	}
-	x := z.buf[z.raw.end]
-	z.raw.end++
-	if z.maxBuf > 0 && z.raw.end-z.raw.start >= z.maxBuf {
-		z.err = ErrBufferExceeded
-		return 0
-	}
-	return x
-}
-
-// Buffered returns a slice containing data buffered but not yet tokenized.
-func (z *Tokenizer) Buffered() []byte {
-	return z.buf[z.raw.end:]
-}
-
-// readAtLeastOneByte wraps an io.Reader so that reading cannot return (0, nil).
-// It returns io.ErrNoProgress if the underlying r.Read method returns (0, nil)
-// too many times in succession.
-func readAtLeastOneByte(r io.Reader, b []byte) (int, error) {
-	for i := 0; i < 100; i++ {
-		if n, err := r.Read(b); n != 0 || err != nil {
-			return n, err
-		}
-	}
-	return 0, io.ErrNoProgress
-}
-
-// skipWhiteSpace skips past any white space.
-func (z *Tokenizer) skipWhiteSpace() {
-	if z.err != nil {
-		return
-	}
-	for {
-		c := z.readByte()
-		if z.err != nil {
-			return
-		}
-		switch c {
-		case ' ', '\n', '\r', '\t', '\f':
-			// No-op.
-		default:
-			z.raw.end--
-			return
-		}
-	}
-}
-
-// readRawOrRCDATA reads until the next "</foo>", where "foo" is z.rawTag and
-// is typically something like "script" or "textarea".
-func (z *Tokenizer) readRawOrRCDATA() {
-	if z.rawTag == "script" {
-		z.readScript()
-		z.textIsRaw = true
-		z.rawTag = ""
-		return
-	}
-loop:
-	for {
-		c := z.readByte()
-		if z.err != nil {
-			break loop
-		}
-		if c != '<' {
-			continue loop
-		}
-		c = z.readByte()
-		if z.err != nil {
-			break loop
-		}
-		if c != '/' {
-			z.raw.end--
-			continue loop
-		}
-		if z.readRawEndTag() || z.err != nil {
-			break loop
-		}
-	}
-	z.data.end = z.raw.end
-	// A textarea's or title's RCDATA can contain escaped entities.
-	z.textIsRaw = z.rawTag != "textarea" && z.rawTag != "title"
-	z.rawTag = ""
-}
-
-// readRawEndTag attempts to read a tag like "</foo>", where "foo" is z.rawTag.
-// If it succeeds, it backs up the input position to reconsume the tag and
-// returns true. Otherwise it returns false. The opening "</" has already been
-// consumed.
-func (z *Tokenizer) readRawEndTag() bool {
-	for i := 0; i < len(z.rawTag); i++ {
-		c := z.readByte()
-		if z.err != nil {
-			return false
-		}
-		if c != z.rawTag[i] && c != z.rawTag[i]-('a'-'A') {
-			z.raw.end--
-			return false
-		}
-	}
-	c := z.readByte()
-	if z.err != nil {
-		return false
-	}
-	switch c {
-	case ' ', '\n', '\r', '\t', '\f', '/', '>':
-		// The 3 is 2 for the leading "</" plus 1 for the trailing character c.
-		z.raw.end -= 3 + len(z.rawTag)
-		return true
-	}
-	z.raw.end--
-	return false
-}
-
-// readScript reads until the next </script> tag, following the byzantine
-// rules for escaping/hiding the closing tag.
-func (z *Tokenizer) readScript() {
-	defer func() {
-		z.data.end = z.raw.end
-	}()
-	var c byte
-
-scriptData:
-	c = z.readByte()
-	if z.err != nil {
-		return
-	}
-	if c == '<' {
-		goto scriptDataLessThanSign
-	}
-	goto scriptData
-
-scriptDataLessThanSign:
-	c = z.readByte()
-	if z.err != nil {
-		return
-	}
-	switch c {
-	case '/':
-		goto scriptDataEndTagOpen
-	case '!':
-		goto scriptDataEscapeStart
-	}
-	z.raw.end--
-	goto scriptData
-
-scriptDataEndTagOpen:
-	if z.readRawEndTag() || z.err != nil {
-		return
-	}
-	goto scriptData
-
-scriptDataEscapeStart:
-	c = z.readByte()
-	if z.err != nil {
-		return
-	}
-	if c == '-' {
-		goto scriptDataEscapeStartDash
-	}
-	z.raw.end--
-	goto scriptData
-
-scriptDataEscapeStartDash:
-	c = z.readByte()
-	if z.err != nil {
-		return
-	}
-	if c == '-' {
-		goto scriptDataEscapedDashDash
-	}
-	z.raw.end--
-	goto scriptData
-
-scriptDataEscaped:
-	c = z.readByte()
-	if z.err != nil {
-		return
-	}
-	switch c {
-	case '-':
-		goto scriptDataEscapedDash
-	case '<':
-		goto scriptDataEscapedLessThanSign
-	}
-	goto scriptDataEscaped
-
-scriptDataEscapedDash:
-	c = z.readByte()
-	if z.err != nil {
-		return
-	}
-	switch c {
-	case '-':
-		goto scriptDataEscapedDashDash
-	case '<':
-		goto scriptDataEscapedLessThanSign
-	}
-	goto scriptDataEscaped
-
-scriptDataEscapedDashDash:
-	c = z.readByte()
-	if z.err != nil {
-		return
-	}
-	switch c {
-	case '-':
-		goto scriptDataEscapedDashDash
-	case '<':
-		goto scriptDataEscapedLessThanSign
-	case '>':
-		goto scriptData
-	}
-	goto scriptDataEscaped
-
-scriptDataEscapedLessThanSign:
-	c = z.readByte()
-	if z.err != nil {
-		return
-	}
-	if c == '/' {
-		goto scriptDataEscapedEndTagOpen
-	}
-	if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' {
-		goto scriptDataDoubleEscapeStart
-	}
-	z.raw.end--
-	goto scriptData
-
-scriptDataEscapedEndTagOpen:
-	if z.readRawEndTag() || z.err != nil {
-		return
-	}
-	goto scriptDataEscaped
-
-scriptDataDoubleEscapeStart:
-	z.raw.end--
-	for i := 0; i < len("script"); i++ {
-		c = z.readByte()
-		if z.err != nil {
-			return
-		}
-		if c != "script"[i] && c != "SCRIPT"[i] {
-			z.raw.end--
-			goto scriptDataEscaped
-		}
-	}
-	c = z.readByte()
-	if z.err != nil {
-		return
-	}
-	switch c {
-	case ' ', '\n', '\r', '\t', '\f', '/', '>':
-		goto scriptDataDoubleEscaped
-	}
-	z.raw.end--
-	goto scriptDataEscaped
-
-scriptDataDoubleEscaped:
-	c = z.readByte()
-	if z.err != nil {
-		return
-	}
-	switch c {
-	case '-':
-		goto scriptDataDoubleEscapedDash
-	case '<':
-		goto scriptDataDoubleEscapedLessThanSign
-	}
-	goto scriptDataDoubleEscaped
-
-scriptDataDoubleEscapedDash:
-	c = z.readByte()
-	if z.err != nil {
-		return
-	}
-	switch c {
-	case '-':
-		goto scriptDataDoubleEscapedDashDash
-	case '<':
-		goto scriptDataDoubleEscapedLessThanSign
-	}
-	goto scriptDataDoubleEscaped
-
-scriptDataDoubleEscapedDashDash:
-	c = z.readByte()
-	if z.err != nil {
-		return
-	}
-	switch c {
-	case '-':
-		goto scriptDataDoubleEscapedDashDash
-	case '<':
-		goto scriptDataDoubleEscapedLessThanSign
-	case '>':
-		goto scriptData
-	}
-	goto scriptDataDoubleEscaped
-
-scriptDataDoubleEscapedLessThanSign:
-	c = z.readByte()
-	if z.err != nil {
-		return
-	}
-	if c == '/' {
-		goto scriptDataDoubleEscapeEnd
-	}
-	z.raw.end--
-	goto scriptDataDoubleEscaped
-
-scriptDataDoubleEscapeEnd:
-	if z.readRawEndTag() {
-		z.raw.end += len("</script>")
-		goto scriptDataEscaped
-	}
-	if z.err != nil {
-		return
-	}
-	goto scriptDataDoubleEscaped
-}
-
-// readComment reads the next comment token starting with "<!--". The opening
-// "<!--" has already been consumed.
-func (z *Tokenizer) readComment() {
-	// When modifying this function, consider manually increasing the
-	// maxSuffixLen constant in func TestComments, from 6 to e.g. 9 or more.
-	// That increase should only be temporary, not committed, as it
-	// exponentially affects the test running time.
-
-	z.data.start = z.raw.end
-	defer func() {
-		if z.data.end < z.data.start {
-			// It's a comment with no data, like <!-->.
-			z.data.end = z.data.start
-		}
-	}()
-
-	var dashCount int
-	beginning := true
-	for {
-		c := z.readByte()
-		if z.err != nil {
-			z.data.end = z.calculateAbruptCommentDataEnd()
-			return
-		}
-		switch c {
-		case '-':
-			dashCount++
-			continue
-		case '>':
-			if dashCount >= 2 || beginning {
-				z.data.end = z.raw.end - len("-->")
-				return
-			}
-		case '!':
-			if dashCount >= 2 {
-				c = z.readByte()
-				if z.err != nil {
-					z.data.end = z.calculateAbruptCommentDataEnd()
-					return
-				} else if c == '>' {
-					z.data.end = z.raw.end - len("--!>")
-					return
-				} else if c == '-' {
-					dashCount = 1
-					beginning = false
-					continue
-				}
-			}
-		}
-		dashCount = 0
-		beginning = false
-	}
-}
-
-func (z *Tokenizer) calculateAbruptCommentDataEnd() int {
-	raw := z.Raw()
-	const prefixLen = len("<!--")
-	if len(raw) >= prefixLen {
-		raw = raw[prefixLen:]
-		if hasSuffix(raw, "--!") {
-			return z.raw.end - 3
-		} else if hasSuffix(raw, "--") {
-			return z.raw.end - 2
-		} else if hasSuffix(raw, "-") {
-			return z.raw.end - 1
-		}
-	}
-	return z.raw.end
-}
-
-func hasSuffix(b []byte, suffix string) bool {
-	if len(b) < len(suffix) {
-		return false
-	}
-	b = b[len(b)-len(suffix):]
-	for i := range b {
-		if b[i] != suffix[i] {
-			return false
-		}
-	}
-	return true
-}
-
-// readUntilCloseAngle reads until the next ">".
-func (z *Tokenizer) readUntilCloseAngle() {
-	z.data.start = z.raw.end
-	for {
-		c := z.readByte()
-		if z.err != nil {
-			z.data.end = z.raw.end
-			return
-		}
-		if c == '>' {
-			z.data.end = z.raw.end - len(">")
-			return
-		}
-	}
-}
-
-// readMarkupDeclaration reads the next token starting with "<!". It might be
-// a "<!--comment-->", a "<!DOCTYPE foo>", a "<![CDATA[section]]>" or
-// "<!a bogus comment". The opening "<!" has already been consumed.
-func (z *Tokenizer) readMarkupDeclaration() TokenType {
-	z.data.start = z.raw.end
-	var c [2]byte
-	for i := 0; i < 2; i++ {
-		c[i] = z.readByte()
-		if z.err != nil {
-			z.data.end = z.raw.end
-			return CommentToken
-		}
-	}
-	if c[0] == '-' && c[1] == '-' {
-		z.readComment()
-		return CommentToken
-	}
-	z.raw.end -= 2
-	if z.readDoctype() {
-		return DoctypeToken
-	}
-	if z.allowCDATA && z.readCDATA() {
-		z.convertNUL = true
-		return TextToken
-	}
-	// It's a bogus comment.
-	z.readUntilCloseAngle()
-	return CommentToken
-}
-
-// readDoctype attempts to read a doctype declaration and returns true if
-// successful. The opening "<!" has already been consumed.
-func (z *Tokenizer) readDoctype() bool {
-	const s = "DOCTYPE"
-	for i := 0; i < len(s); i++ {
-		c := z.readByte()
-		if z.err != nil {
-			z.data.end = z.raw.end
-			return false
-		}
-		if c != s[i] && c != s[i]+('a'-'A') {
-			// Back up to read the fragment of "DOCTYPE" again.
-			z.raw.end = z.data.start
-			return false
-		}
-	}
-	if z.skipWhiteSpace(); z.err != nil {
-		z.data.start = z.raw.end
-		z.data.end = z.raw.end
-		return true
-	}
-	z.readUntilCloseAngle()
-	return true
-}
-
-// readCDATA attempts to read a CDATA section and returns true if
-// successful. The opening "<!" has already been consumed.
-func (z *Tokenizer) readCDATA() bool {
-	const s = "[CDATA["
-	for i := 0; i < len(s); i++ {
-		c := z.readByte()
-		if z.err != nil {
-			z.data.end = z.raw.end
-			return false
-		}
-		if c != s[i] {
-			// Back up to read the fragment of "[CDATA[" again.
-			z.raw.end = z.data.start
-			return false
-		}
-	}
-	z.data.start = z.raw.end
-	brackets := 0
-	for {
-		c := z.readByte()
-		if z.err != nil {
-			z.data.end = z.raw.end
-			return true
-		}
-		switch c {
-		case ']':
-			brackets++
-		case '>':
-			if brackets >= 2 {
-				z.data.end = z.raw.end - len("]]>")
-				return true
-			}
-			brackets = 0
-		default:
-			brackets = 0
-		}
-	}
-}
-
-// startTagIn returns whether the start tag in z.buf[z.data.start:z.data.end]
-// case-insensitively matches any element of ss.
-func (z *Tokenizer) startTagIn(ss ...string) bool {
-loop:
-	for _, s := range ss {
-		if z.data.end-z.data.start != len(s) {
-			continue loop
-		}
-		for i := 0; i < len(s); i++ {
-			c := z.buf[z.data.start+i]
-			if 'A' <= c && c <= 'Z' {
-				c += 'a' - 'A'
-			}
-			if c != s[i] {
-				continue loop
-			}
-		}
-		return true
-	}
-	return false
-}
-
-// readStartTag reads the next start tag token. The opening "<a" has already
-// been consumed, where 'a' means anything in [A-Za-z].
-func (z *Tokenizer) readStartTag() TokenType {
-	z.readTag(true)
-	if z.err != nil {
-		return ErrorToken
-	}
-	// Several tags flag the tokenizer's next token as raw.
-	c, raw := z.buf[z.data.start], false
-	if 'A' <= c && c <= 'Z' {
-		c += 'a' - 'A'
-	}
-	switch c {
-	case 'i':
-		raw = z.startTagIn("iframe")
-	case 'n':
-		raw = z.startTagIn("noembed", "noframes", "noscript")
-	case 'p':
-		raw = z.startTagIn("plaintext")
-	case 's':
-		raw = z.startTagIn("script", "style")
-	case 't':
-		raw = z.startTagIn("textarea", "title")
-	case 'x':
-		raw = z.startTagIn("xmp")
-	}
-	if raw {
-		z.rawTag = strings.ToLower(string(z.buf[z.data.start:z.data.end]))
-	}
-	// Look for a self-closing token like "<br/>".
-	if z.err == nil && z.buf[z.raw.end-2] == '/' {
-		return SelfClosingTagToken
-	}
-	return StartTagToken
-}
-
-// readTag reads the next tag token and its attributes. If saveAttr, those
-// attributes are saved in z.attr, otherwise z.attr is set to an empty slice.
-// The opening "<a" or "</a" has already been consumed, where 'a' means anything
-// in [A-Za-z].
-func (z *Tokenizer) readTag(saveAttr bool) {
-	z.attr = z.attr[:0]
-	z.nAttrReturned = 0
-	// Read the tag name and attribute key/value pairs.
-	z.readTagName()
-	if z.skipWhiteSpace(); z.err != nil {
-		return
-	}
-	for {
-		c := z.readByte()
-		if z.err != nil || c == '>' {
-			break
-		}
-		z.raw.end--
-		z.readTagAttrKey()
-		z.readTagAttrVal()
-		// Save pendingAttr if saveAttr and that attribute has a non-empty key.
-		if saveAttr && z.pendingAttr[0].start != z.pendingAttr[0].end {
-			z.attr = append(z.attr, z.pendingAttr)
-		}
-		if z.skipWhiteSpace(); z.err != nil {
-			break
-		}
-	}
-}
-
-// readTagName sets z.data to the "div" in "<div k=v>". The reader (z.raw.end)
-// is positioned such that the first byte of the tag name (the "d" in "<div")
-// has already been consumed.
-func (z *Tokenizer) readTagName() {
-	z.data.start = z.raw.end - 1
-	for {
-		c := z.readByte()
-		if z.err != nil {
-			z.data.end = z.raw.end
-			return
-		}
-		switch c {
-		case ' ', '\n', '\r', '\t', '\f':
-			z.data.end = z.raw.end - 1
-			return
-		case '/', '>':
-			z.raw.end--
-			z.data.end = z.raw.end
-			return
-		}
-	}
-}
-
-// readTagAttrKey sets z.pendingAttr[0] to the "k" in "<div k=v>".
-// Precondition: z.err == nil.
-func (z *Tokenizer) readTagAttrKey() {
-	z.pendingAttr[0].start = z.raw.end
-	for {
-		c := z.readByte()
-		if z.err != nil {
-			z.pendingAttr[0].end = z.raw.end
-			return
-		}
-		switch c {
-		case ' ', '\n', '\r', '\t', '\f', '/':
-			z.pendingAttr[0].end = z.raw.end - 1
-			return
-		case '=':
-			if z.pendingAttr[0].start+1 == z.raw.end {
-				// WHATWG 13.2.5.32, if we see an equals sign before the attribute name
-				// begins, we treat it as a character in the attribute name and continue.
-				continue
-			}
-			fallthrough
-		case '>':
-			z.raw.end--
-			z.pendingAttr[0].end = z.raw.end
-			return
-		}
-	}
-}
-
-// readTagAttrVal sets z.pendingAttr[1] to the "v" in "<div k=v>".
-func (z *Tokenizer) readTagAttrVal() {
-	z.pendingAttr[1].start = z.raw.end
-	z.pendingAttr[1].end = z.raw.end
-	if z.skipWhiteSpace(); z.err != nil {
-		return
-	}
-	c := z.readByte()
-	if z.err != nil {
-		return
-	}
-	if c != '=' {
-		z.raw.end--
-		return
-	}
-	if z.skipWhiteSpace(); z.err != nil {
-		return
-	}
-	quote := z.readByte()
-	if z.err != nil {
-		return
-	}
-	switch quote {
-	case '>':
-		z.raw.end--
-		return
-
-	case '\'', '"':
-		z.pendingAttr[1].start = z.raw.end
-		for {
-			c := z.readByte()
-			if z.err != nil {
-				z.pendingAttr[1].end = z.raw.end
-				return
-			}
-			if c == quote {
-				z.pendingAttr[1].end = z.raw.end - 1
-				return
-			}
-		}
-
-	default:
-		z.pendingAttr[1].start = z.raw.end - 1
-		for {
-			c := z.readByte()
-			if z.err != nil {
-				z.pendingAttr[1].end = z.raw.end
-				return
-			}
-			switch c {
-			case ' ', '\n', '\r', '\t', '\f':
-				z.pendingAttr[1].end = z.raw.end - 1
-				return
-			case '>':
-				z.raw.end--
-				z.pendingAttr[1].end = z.raw.end
-				return
-			}
-		}
-	}
-}
-
-// Next scans the next token and returns its type.
-func (z *Tokenizer) Next() TokenType {
-	z.raw.start = z.raw.end
-	z.data.start = z.raw.end
-	z.data.end = z.raw.end
-	if z.err != nil {
-		z.tt = ErrorToken
-		return z.tt
-	}
-	if z.rawTag != "" {
-		if z.rawTag == "plaintext" {
-			// Read everything up to EOF.
-			for z.err == nil {
-				z.readByte()
-			}
-			z.data.end = z.raw.end
-			z.textIsRaw = true
-		} else {
-			z.readRawOrRCDATA()
-		}
-		if z.data.end > z.data.start {
-			z.tt = TextToken
-			z.convertNUL = true
-			return z.tt
-		}
-	}
-	z.textIsRaw = false
-	z.convertNUL = false
-
-loop:
-	for {
-		c := z.readByte()
-		if z.err != nil {
-			break loop
-		}
-		if c != '<' {
-			continue loop
-		}
-
-		// Check if the '<' we have just read is part of a tag, comment
-		// or doctype. If not, it's part of the accumulated text token.
-		c = z.readByte()
-		if z.err != nil {
-			break loop
-		}
-		var tokenType TokenType
-		switch {
-		case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z':
-			tokenType = StartTagToken
-		case c == '/':
-			tokenType = EndTagToken
-		case c == '!' || c == '?':
-			// We use CommentToken to mean any of "<!--actual comments-->",
-			// "<!DOCTYPE declarations>" and "<?xml processing instructions?>".
-			tokenType = CommentToken
-		default:
-			// Reconsume the current character.
-			z.raw.end--
-			continue
-		}
-
-		// We have a non-text token, but we might have accumulated some text
-		// before that. If so, we return the text first, and return the non-
-		// text token on the subsequent call to Next.
-		if x := z.raw.end - len("<a"); z.raw.start < x {
-			z.raw.end = x
-			z.data.end = x
-			z.tt = TextToken
-			return z.tt
-		}
-		switch tokenType {
-		case StartTagToken:
-			z.tt = z.readStartTag()
-			return z.tt
-		case EndTagToken:
-			c = z.readByte()
-			if z.err != nil {
-				break loop
-			}
-			if c == '>' {
-				// "</>" does not generate a token at all. Generate an empty comment
-				// to allow passthrough clients to pick up the data using Raw.
-				// Reset the tokenizer state and start again.
-				z.tt = CommentToken
-				return z.tt
-			}
-			if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' {
-				z.readTag(false)
-				if z.err != nil {
-					z.tt = ErrorToken
-				} else {
-					z.tt = EndTagToken
-				}
-				return z.tt
-			}
-			z.raw.end--
-			z.readUntilCloseAngle()
-			z.tt = CommentToken
-			return z.tt
-		case CommentToken:
-			if c == '!' {
-				z.tt = z.readMarkupDeclaration()
-				return z.tt
-			}
-			z.raw.end--
-			z.readUntilCloseAngle()
-			z.tt = CommentToken
-			return z.tt
-		}
-	}
-	if z.raw.start < z.raw.end {
-		z.data.end = z.raw.end
-		z.tt = TextToken
-		return z.tt
-	}
-	z.tt = ErrorToken
-	return z.tt
-}
-
-// Raw returns the unmodified text of the current token. Calling Next, Token,
-// Text, TagName or TagAttr may change the contents of the returned slice.
-//
-// The token stream's raw bytes partition the byte stream (up until an
-// ErrorToken). There are no overlaps or gaps between two consecutive token's
-// raw bytes. One implication is that the byte offset of the current token is
-// the sum of the lengths of all previous tokens' raw bytes.
-func (z *Tokenizer) Raw() []byte {
-	return z.buf[z.raw.start:z.raw.end]
-}
-
-// convertNewlines converts "\r" and "\r\n" in s to "\n".
-// The conversion happens in place, but the resulting slice may be shorter.
-func convertNewlines(s []byte) []byte {
-	for i, c := range s {
-		if c != '\r' {
-			continue
-		}
-
-		src := i + 1
-		if src >= len(s) || s[src] != '\n' {
-			s[i] = '\n'
-			continue
-		}
-
-		dst := i
-		for src < len(s) {
-			if s[src] == '\r' {
-				if src+1 < len(s) && s[src+1] == '\n' {
-					src++
-				}
-				s[dst] = '\n'
-			} else {
-				s[dst] = s[src]
-			}
-			src++
-			dst++
-		}
-		return s[:dst]
-	}
-	return s
-}
-
-var (
-	nul         = []byte("\x00")
-	replacement = []byte("\ufffd")
-)
-
-// Text returns the unescaped text of a text, comment or doctype token. The
-// contents of the returned slice may change on the next call to Next.
-func (z *Tokenizer) Text() []byte {
-	switch z.tt {
-	case TextToken, CommentToken, DoctypeToken:
-		s := z.buf[z.data.start:z.data.end]
-		z.data.start = z.raw.end
-		z.data.end = z.raw.end
-		s = convertNewlines(s)
-		if (z.convertNUL || z.tt == CommentToken) && bytes.Contains(s, nul) {
-			s = bytes.Replace(s, nul, replacement, -1)
-		}
-		if !z.textIsRaw {
-			s = unescape(s, false)
-		}
-		return s
-	}
-	return nil
-}
-
-// TagName returns the lower-cased name of a tag token (the `img` out of
-// `<IMG SRC="foo">`) and whether the tag has attributes.
-// The contents of the returned slice may change on the next call to Next.
-func (z *Tokenizer) TagName() (name []byte, hasAttr bool) {
-	if z.data.start < z.data.end {
-		switch z.tt {
-		case StartTagToken, EndTagToken, SelfClosingTagToken:
-			s := z.buf[z.data.start:z.data.end]
-			z.data.start = z.raw.end
-			z.data.end = z.raw.end
-			return lower(s), z.nAttrReturned < len(z.attr)
-		}
-	}
-	return nil, false
-}
-
-// TagAttr returns the lower-cased key and unescaped value of the next unparsed
-// attribute for the current tag token and whether there are more attributes.
-// The contents of the returned slices may change on the next call to Next.
-func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) {
-	if z.nAttrReturned < len(z.attr) {
-		switch z.tt {
-		case StartTagToken, SelfClosingTagToken:
-			x := z.attr[z.nAttrReturned]
-			z.nAttrReturned++
-			key = z.buf[x[0].start:x[0].end]
-			val = z.buf[x[1].start:x[1].end]
-			return lower(key), unescape(convertNewlines(val), true), z.nAttrReturned < len(z.attr)
-		}
-	}
-	return nil, nil, false
-}
-
-// Token returns the current Token. The result's Data and Attr values remain
-// valid after subsequent Next calls.
-func (z *Tokenizer) Token() Token {
-	t := Token{Type: z.tt}
-	switch z.tt {
-	case TextToken, CommentToken, DoctypeToken:
-		t.Data = string(z.Text())
-	case StartTagToken, SelfClosingTagToken, EndTagToken:
-		name, moreAttr := z.TagName()
-		for moreAttr {
-			var key, val []byte
-			key, val, moreAttr = z.TagAttr()
-			t.Attr = append(t.Attr, Attribute{"", atom.String(key), string(val)})
-		}
-		if a := atom.Lookup(name); a != 0 {
-			t.DataAtom, t.Data = a, a.String()
-		} else {
-			t.DataAtom, t.Data = 0, string(name)
-		}
-	}
-	return t
-}
-
-// SetMaxBuf sets a limit on the amount of data buffered during tokenization.
-// A value of 0 means unlimited.
-func (z *Tokenizer) SetMaxBuf(n int) {
-	z.maxBuf = n
-}
-
-// NewTokenizer returns a new HTML Tokenizer for the given Reader.
-// The input is assumed to be UTF-8 encoded.
-func NewTokenizer(r io.Reader) *Tokenizer {
-	return NewTokenizerFragment(r, "")
-}
-
-// NewTokenizerFragment returns a new HTML Tokenizer for the given Reader, for
-// tokenizing an existing element's InnerHTML fragment. contextTag is that
-// element's tag, such as "div" or "iframe".
-//
-// For example, how the InnerHTML "a<b" is tokenized depends on whether it is
-// for a <p> tag or a <script> tag.
-//
-// The input is assumed to be UTF-8 encoded.
-func NewTokenizerFragment(r io.Reader, contextTag string) *Tokenizer {
-	z := &Tokenizer{
-		r:   r,
-		buf: make([]byte, 0, 4096),
-	}
-	if contextTag != "" {
-		switch s := strings.ToLower(contextTag); s {
-		case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "title", "textarea", "xmp":
-			z.rawTag = s
-		}
-	}
-	return z
-}
diff --git a/application/source/vendor/gopkg.in/yaml.v3/LICENSE b/application/source/vendor/gopkg.in/yaml.v3/LICENSE
deleted file mode 100644
index 2683e4bb1f24c14aa2791e6d48ce0ecf3d8ab756..0000000000000000000000000000000000000000
--- a/application/source/vendor/gopkg.in/yaml.v3/LICENSE
+++ /dev/null
@@ -1,50 +0,0 @@
-
-This project is covered by two different licenses: MIT and Apache.
-
-#### MIT License ####
-
-The following files were ported to Go from C files of libyaml, and thus
-are still covered by their original MIT license, with the additional
-copyright staring in 2011 when the project was ported over:
-
-    apic.go emitterc.go parserc.go readerc.go scannerc.go
-    writerc.go yamlh.go yamlprivateh.go
-
-Copyright (c) 2006-2010 Kirill Simonov
-Copyright (c) 2006-2011 Kirill Simonov
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-### Apache License ###
-
-All the remaining project files are covered by the Apache license:
-
-Copyright (c) 2011-2019 Canonical Ltd
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/application/source/vendor/gopkg.in/yaml.v3/NOTICE b/application/source/vendor/gopkg.in/yaml.v3/NOTICE
deleted file mode 100644
index 866d74a7ad79165312a2ce3904b4bdb53e6aedf7..0000000000000000000000000000000000000000
--- a/application/source/vendor/gopkg.in/yaml.v3/NOTICE
+++ /dev/null
@@ -1,13 +0,0 @@
-Copyright 2011-2016 Canonical Ltd.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/application/source/vendor/gopkg.in/yaml.v3/README.md b/application/source/vendor/gopkg.in/yaml.v3/README.md
deleted file mode 100644
index 08eb1babddfac3d8f4e006448496d0e0d1f8d720..0000000000000000000000000000000000000000
--- a/application/source/vendor/gopkg.in/yaml.v3/README.md
+++ /dev/null
@@ -1,150 +0,0 @@
-# YAML support for the Go language
-
-Introduction
-------------
-
-The yaml package enables Go programs to comfortably encode and decode YAML
-values. It was developed within [Canonical](https://www.canonical.com) as
-part of the [juju](https://juju.ubuntu.com) project, and is based on a
-pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
-C library to parse and generate YAML data quickly and reliably.
-
-Compatibility
--------------
-
-The yaml package supports most of YAML 1.2, but preserves some behavior
-from 1.1 for backwards compatibility.
-
-Specifically, as of v3 of the yaml package:
-
- - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being
-   decoded into a typed bool value. Otherwise they behave as a string. Booleans
-   in YAML 1.2 are _true/false_ only.
- - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_
-   as specified in YAML 1.2, because most parsers still use the old format.
-   Octals in the  _0o777_ format are supported though, so new files work.
- - Does not support base-60 floats. These are gone from YAML 1.2, and were
-   actually never supported by this package as it's clearly a poor choice.
-
-and offers backwards
-compatibility with YAML 1.1 in some cases.
-1.2, including support for
-anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
-implemented, and base-60 floats from YAML 1.1 are purposefully not
-supported since they're a poor design and are gone in YAML 1.2.
-
-Installation and usage
-----------------------
-
-The import path for the package is *gopkg.in/yaml.v3*.
-
-To install it, run:
-
-    go get gopkg.in/yaml.v3
-
-API documentation
------------------
-
-If opened in a browser, the import path itself leads to the API documentation:
-
-  - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3)
-
-API stability
--------------
-
-The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in).
-
-
-License
--------
-
-The yaml package is licensed under the MIT and Apache License 2.0 licenses.
-Please see the LICENSE file for details.
-
-
-Example
--------
-
-```Go
-package main
-
-import (
-        "fmt"
-        "log"
-
-        "gopkg.in/yaml.v3"
-)
-
-var data = `
-a: Easy!
-b:
-  c: 2
-  d: [3, 4]
-`
-
-// Note: struct fields must be public in order for unmarshal to
-// correctly populate the data.
-type T struct {
-        A string
-        B struct {
-                RenamedC int   `yaml:"c"`
-                D        []int `yaml:",flow"`
-        }
-}
-
-func main() {
-        t := T{}
-    
-        err := yaml.Unmarshal([]byte(data), &t)
-        if err != nil {
-                log.Fatalf("error: %v", err)
-        }
-        fmt.Printf("--- t:\n%v\n\n", t)
-    
-        d, err := yaml.Marshal(&t)
-        if err != nil {
-                log.Fatalf("error: %v", err)
-        }
-        fmt.Printf("--- t dump:\n%s\n\n", string(d))
-    
-        m := make(map[interface{}]interface{})
-    
-        err = yaml.Unmarshal([]byte(data), &m)
-        if err != nil {
-                log.Fatalf("error: %v", err)
-        }
-        fmt.Printf("--- m:\n%v\n\n", m)
-    
-        d, err = yaml.Marshal(&m)
-        if err != nil {
-                log.Fatalf("error: %v", err)
-        }
-        fmt.Printf("--- m dump:\n%s\n\n", string(d))
-}
-```
-
-This example will generate the following output:
-
-```
---- t:
-{Easy! {2 [3 4]}}
-
---- t dump:
-a: Easy!
-b:
-  c: 2
-  d: [3, 4]
-
-
---- m:
-map[a:Easy! b:map[c:2 d:[3 4]]]
-
---- m dump:
-a: Easy!
-b:
-  c: 2
-  d:
-  - 3
-  - 4
-```
-
diff --git a/application/source/vendor/gopkg.in/yaml.v3/apic.go b/application/source/vendor/gopkg.in/yaml.v3/apic.go
deleted file mode 100644
index ae7d049f182ae2419ded608e4c763487c99dff52..0000000000000000000000000000000000000000
--- a/application/source/vendor/gopkg.in/yaml.v3/apic.go
+++ /dev/null
@@ -1,747 +0,0 @@
-// 
-// Copyright (c) 2011-2019 Canonical Ltd
-// Copyright (c) 2006-2010 Kirill Simonov
-// 
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-// of the Software, and to permit persons to whom the Software is furnished to do
-// so, subject to the following conditions:
-// 
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-// 
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package yaml
-
-import (
-	"io"
-)
-
-func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
-	//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
-
-	// Check if we can move the queue at the beginning of the buffer.
-	if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
-		if parser.tokens_head != len(parser.tokens) {
-			copy(parser.tokens, parser.tokens[parser.tokens_head:])
-		}
-		parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
-		parser.tokens_head = 0
-	}
-	parser.tokens = append(parser.tokens, *token)
-	if pos < 0 {
-		return
-	}
-	copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
-	parser.tokens[parser.tokens_head+pos] = *token
-}
-
-// Create a new parser object.
-func yaml_parser_initialize(parser *yaml_parser_t) bool {
-	*parser = yaml_parser_t{
-		raw_buffer: make([]byte, 0, input_raw_buffer_size),
-		buffer:     make([]byte, 0, input_buffer_size),
-	}
-	return true
-}
-
-// Destroy a parser object.
-func yaml_parser_delete(parser *yaml_parser_t) {
-	*parser = yaml_parser_t{}
-}
-
-// String read handler.
-func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
-	if parser.input_pos == len(parser.input) {
-		return 0, io.EOF
-	}
-	n = copy(buffer, parser.input[parser.input_pos:])
-	parser.input_pos += n
-	return n, nil
-}
-
-// Reader read handler.
-func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
-	return parser.input_reader.Read(buffer)
-}
-
-// Set a string input.
-func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
-	if parser.read_handler != nil {
-		panic("must set the input source only once")
-	}
-	parser.read_handler = yaml_string_read_handler
-	parser.input = input
-	parser.input_pos = 0
-}
-
-// Set a file input.
-func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
-	if parser.read_handler != nil {
-		panic("must set the input source only once")
-	}
-	parser.read_handler = yaml_reader_read_handler
-	parser.input_reader = r
-}
-
-// Set the source encoding.
-func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
-	if parser.encoding != yaml_ANY_ENCODING {
-		panic("must set the encoding only once")
-	}
-	parser.encoding = encoding
-}
-
-// Create a new emitter object.
-func yaml_emitter_initialize(emitter *yaml_emitter_t) {
-	*emitter = yaml_emitter_t{
-		buffer:     make([]byte, output_buffer_size),
-		raw_buffer: make([]byte, 0, output_raw_buffer_size),
-		states:     make([]yaml_emitter_state_t, 0, initial_stack_size),
-		events:     make([]yaml_event_t, 0, initial_queue_size),
-		best_width: -1,
-	}
-}
-
-// Destroy an emitter object.
-func yaml_emitter_delete(emitter *yaml_emitter_t) {
-	*emitter = yaml_emitter_t{}
-}
-
-// String write handler.
-func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
-	*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
-	return nil
-}
-
-// yaml_writer_write_handler uses emitter.output_writer to write the
-// emitted text.
-func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
-	_, err := emitter.output_writer.Write(buffer)
-	return err
-}
-
-// Set a string output.
-func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
-	if emitter.write_handler != nil {
-		panic("must set the output target only once")
-	}
-	emitter.write_handler = yaml_string_write_handler
-	emitter.output_buffer = output_buffer
-}
-
-// Set a file output.
-func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
-	if emitter.write_handler != nil {
-		panic("must set the output target only once")
-	}
-	emitter.write_handler = yaml_writer_write_handler
-	emitter.output_writer = w
-}
-
-// Set the output encoding.
-func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
-	if emitter.encoding != yaml_ANY_ENCODING {
-		panic("must set the output encoding only once")
-	}
-	emitter.encoding = encoding
-}
-
-// Set the canonical output style.
-func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
-	emitter.canonical = canonical
-}
-
-// Set the indentation increment.
-func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
-	if indent < 2 || indent > 9 {
-		indent = 2
-	}
-	emitter.best_indent = indent
-}
-
-// Set the preferred line width.
-func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
-	if width < 0 {
-		width = -1
-	}
-	emitter.best_width = width
-}
-
-// Set if unescaped non-ASCII characters are allowed.
-func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
-	emitter.unicode = unicode
-}
-
-// Set the preferred line break character.
-func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
-	emitter.line_break = line_break
-}
-
-///*
-// * Destroy a token object.
-// */
-//
-//YAML_DECLARE(void)
-//yaml_token_delete(yaml_token_t *token)
-//{
-//    assert(token);  // Non-NULL token object expected.
-//
-//    switch (token.type)
-//    {
-//        case YAML_TAG_DIRECTIVE_TOKEN:
-//            yaml_free(token.data.tag_directive.handle);
-//            yaml_free(token.data.tag_directive.prefix);
-//            break;
-//
-//        case YAML_ALIAS_TOKEN:
-//            yaml_free(token.data.alias.value);
-//            break;
-//
-//        case YAML_ANCHOR_TOKEN:
-//            yaml_free(token.data.anchor.value);
-//            break;
-//
-//        case YAML_TAG_TOKEN:
-//            yaml_free(token.data.tag.handle);
-//            yaml_free(token.data.tag.suffix);
-//            break;
-//
-//        case YAML_SCALAR_TOKEN:
-//            yaml_free(token.data.scalar.value);
-//            break;
-//
-//        default:
-//            break;
-//    }
-//
-//    memset(token, 0, sizeof(yaml_token_t));
-//}
-//
-///*
-// * Check if a string is a valid UTF-8 sequence.
-// *
-// * Check 'reader.c' for more details on UTF-8 encoding.
-// */
-//
-//static int
-//yaml_check_utf8(yaml_char_t *start, size_t length)
-//{
-//    yaml_char_t *end = start+length;
-//    yaml_char_t *pointer = start;
-//
-//    while (pointer < end) {
-//        unsigned char octet;
-//        unsigned int width;
-//        unsigned int value;
-//        size_t k;
-//
-//        octet = pointer[0];
-//        width = (octet & 0x80) == 0x00 ? 1 :
-//                (octet & 0xE0) == 0xC0 ? 2 :
-//                (octet & 0xF0) == 0xE0 ? 3 :
-//                (octet & 0xF8) == 0xF0 ? 4 : 0;
-//        value = (octet & 0x80) == 0x00 ? octet & 0x7F :
-//                (octet & 0xE0) == 0xC0 ? octet & 0x1F :
-//                (octet & 0xF0) == 0xE0 ? octet & 0x0F :
-//                (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
-//        if (!width) return 0;
-//        if (pointer+width > end) return 0;
-//        for (k = 1; k < width; k ++) {
-//            octet = pointer[k];
-//            if ((octet & 0xC0) != 0x80) return 0;
-//            value = (value << 6) + (octet & 0x3F);
-//        }
-//        if (!((width == 1) ||
-//            (width == 2 && value >= 0x80) ||
-//            (width == 3 && value >= 0x800) ||
-//            (width == 4 && value >= 0x10000))) return 0;
-//
-//        pointer += width;
-//    }
-//
-//    return 1;
-//}
-//
-
-// Create STREAM-START.
-func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
-	*event = yaml_event_t{
-		typ:      yaml_STREAM_START_EVENT,
-		encoding: encoding,
-	}
-}
-
-// Create STREAM-END.
-func yaml_stream_end_event_initialize(event *yaml_event_t) {
-	*event = yaml_event_t{
-		typ: yaml_STREAM_END_EVENT,
-	}
-}
-
-// Create DOCUMENT-START.
-func yaml_document_start_event_initialize(
-	event *yaml_event_t,
-	version_directive *yaml_version_directive_t,
-	tag_directives []yaml_tag_directive_t,
-	implicit bool,
-) {
-	*event = yaml_event_t{
-		typ:               yaml_DOCUMENT_START_EVENT,
-		version_directive: version_directive,
-		tag_directives:    tag_directives,
-		implicit:          implicit,
-	}
-}
-
-// Create DOCUMENT-END.
-func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
-	*event = yaml_event_t{
-		typ:      yaml_DOCUMENT_END_EVENT,
-		implicit: implicit,
-	}
-}
-
-// Create ALIAS.
-func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool {
-	*event = yaml_event_t{
-		typ:    yaml_ALIAS_EVENT,
-		anchor: anchor,
-	}
-	return true
-}
-
-// Create SCALAR.
-func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
-	*event = yaml_event_t{
-		typ:             yaml_SCALAR_EVENT,
-		anchor:          anchor,
-		tag:             tag,
-		value:           value,
-		implicit:        plain_implicit,
-		quoted_implicit: quoted_implicit,
-		style:           yaml_style_t(style),
-	}
-	return true
-}
-
-// Create SEQUENCE-START.
-func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
-	*event = yaml_event_t{
-		typ:      yaml_SEQUENCE_START_EVENT,
-		anchor:   anchor,
-		tag:      tag,
-		implicit: implicit,
-		style:    yaml_style_t(style),
-	}
-	return true
-}
-
-// Create SEQUENCE-END.
-func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
-	*event = yaml_event_t{
-		typ: yaml_SEQUENCE_END_EVENT,
-	}
-	return true
-}
-
-// Create MAPPING-START.
-func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
-	*event = yaml_event_t{
-		typ:      yaml_MAPPING_START_EVENT,
-		anchor:   anchor,
-		tag:      tag,
-		implicit: implicit,
-		style:    yaml_style_t(style),
-	}
-}
-
-// Create MAPPING-END.
-func yaml_mapping_end_event_initialize(event *yaml_event_t) {
-	*event = yaml_event_t{
-		typ: yaml_MAPPING_END_EVENT,
-	}
-}
-
-// Destroy an event object.
-func yaml_event_delete(event *yaml_event_t) {
-	*event = yaml_event_t{}
-}
-
-///*
-// * Create a document object.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_initialize(document *yaml_document_t,
-//        version_directive *yaml_version_directive_t,
-//        tag_directives_start *yaml_tag_directive_t,
-//        tag_directives_end *yaml_tag_directive_t,
-//        start_implicit int, end_implicit int)
-//{
-//    struct {
-//        error yaml_error_type_t
-//    } context
-//    struct {
-//        start *yaml_node_t
-//        end *yaml_node_t
-//        top *yaml_node_t
-//    } nodes = { NULL, NULL, NULL }
-//    version_directive_copy *yaml_version_directive_t = NULL
-//    struct {
-//        start *yaml_tag_directive_t
-//        end *yaml_tag_directive_t
-//        top *yaml_tag_directive_t
-//    } tag_directives_copy = { NULL, NULL, NULL }
-//    value yaml_tag_directive_t = { NULL, NULL }
-//    mark yaml_mark_t = { 0, 0, 0 }
-//
-//    assert(document) // Non-NULL document object is expected.
-//    assert((tag_directives_start && tag_directives_end) ||
-//            (tag_directives_start == tag_directives_end))
-//                            // Valid tag directives are expected.
-//
-//    if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
-//
-//    if (version_directive) {
-//        version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
-//        if (!version_directive_copy) goto error
-//        version_directive_copy.major = version_directive.major
-//        version_directive_copy.minor = version_directive.minor
-//    }
-//
-//    if (tag_directives_start != tag_directives_end) {
-//        tag_directive *yaml_tag_directive_t
-//        if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
-//            goto error
-//        for (tag_directive = tag_directives_start
-//                tag_directive != tag_directives_end; tag_directive ++) {
-//            assert(tag_directive.handle)
-//            assert(tag_directive.prefix)
-//            if (!yaml_check_utf8(tag_directive.handle,
-//                        strlen((char *)tag_directive.handle)))
-//                goto error
-//            if (!yaml_check_utf8(tag_directive.prefix,
-//                        strlen((char *)tag_directive.prefix)))
-//                goto error
-//            value.handle = yaml_strdup(tag_directive.handle)
-//            value.prefix = yaml_strdup(tag_directive.prefix)
-//            if (!value.handle || !value.prefix) goto error
-//            if (!PUSH(&context, tag_directives_copy, value))
-//                goto error
-//            value.handle = NULL
-//            value.prefix = NULL
-//        }
-//    }
-//
-//    DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
-//            tag_directives_copy.start, tag_directives_copy.top,
-//            start_implicit, end_implicit, mark, mark)
-//
-//    return 1
-//
-//error:
-//    STACK_DEL(&context, nodes)
-//    yaml_free(version_directive_copy)
-//    while (!STACK_EMPTY(&context, tag_directives_copy)) {
-//        value yaml_tag_directive_t = POP(&context, tag_directives_copy)
-//        yaml_free(value.handle)
-//        yaml_free(value.prefix)
-//    }
-//    STACK_DEL(&context, tag_directives_copy)
-//    yaml_free(value.handle)
-//    yaml_free(value.prefix)
-//
-//    return 0
-//}
-//
-///*
-// * Destroy a document object.
-// */
-//
-//YAML_DECLARE(void)
-//yaml_document_delete(document *yaml_document_t)
-//{
-//    struct {
-//        error yaml_error_type_t
-//    } context
-//    tag_directive *yaml_tag_directive_t
-//
-//    context.error = YAML_NO_ERROR // Eliminate a compiler warning.
-//
-//    assert(document) // Non-NULL document object is expected.
-//
-//    while (!STACK_EMPTY(&context, document.nodes)) {
-//        node yaml_node_t = POP(&context, document.nodes)
-//        yaml_free(node.tag)
-//        switch (node.type) {
-//            case YAML_SCALAR_NODE:
-//                yaml_free(node.data.scalar.value)
-//                break
-//            case YAML_SEQUENCE_NODE:
-//                STACK_DEL(&context, node.data.sequence.items)
-//                break
-//            case YAML_MAPPING_NODE:
-//                STACK_DEL(&context, node.data.mapping.pairs)
-//                break
-//            default:
-//                assert(0) // Should not happen.
-//        }
-//    }
-//    STACK_DEL(&context, document.nodes)
-//
-//    yaml_free(document.version_directive)
-//    for (tag_directive = document.tag_directives.start
-//            tag_directive != document.tag_directives.end
-//            tag_directive++) {
-//        yaml_free(tag_directive.handle)
-//        yaml_free(tag_directive.prefix)
-//    }
-//    yaml_free(document.tag_directives.start)
-//
-//    memset(document, 0, sizeof(yaml_document_t))
-//}
-//
-///**
-// * Get a document node.
-// */
-//
-//YAML_DECLARE(yaml_node_t *)
-//yaml_document_get_node(document *yaml_document_t, index int)
-//{
-//    assert(document) // Non-NULL document object is expected.
-//
-//    if (index > 0 && document.nodes.start + index <= document.nodes.top) {
-//        return document.nodes.start + index - 1
-//    }
-//    return NULL
-//}
-//
-///**
-// * Get the root object.
-// */
-//
-//YAML_DECLARE(yaml_node_t *)
-//yaml_document_get_root_node(document *yaml_document_t)
-//{
-//    assert(document) // Non-NULL document object is expected.
-//
-//    if (document.nodes.top != document.nodes.start) {
-//        return document.nodes.start
-//    }
-//    return NULL
-//}
-//
-///*
-// * Add a scalar node to a document.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_add_scalar(document *yaml_document_t,
-//        tag *yaml_char_t, value *yaml_char_t, length int,
-//        style yaml_scalar_style_t)
-//{
-//    struct {
-//        error yaml_error_type_t
-//    } context
-//    mark yaml_mark_t = { 0, 0, 0 }
-//    tag_copy *yaml_char_t = NULL
-//    value_copy *yaml_char_t = NULL
-//    node yaml_node_t
-//
-//    assert(document) // Non-NULL document object is expected.
-//    assert(value) // Non-NULL value is expected.
-//
-//    if (!tag) {
-//        tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
-//    }
-//
-//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
-//    tag_copy = yaml_strdup(tag)
-//    if (!tag_copy) goto error
-//
-//    if (length < 0) {
-//        length = strlen((char *)value)
-//    }
-//
-//    if (!yaml_check_utf8(value, length)) goto error
-//    value_copy = yaml_malloc(length+1)
-//    if (!value_copy) goto error
-//    memcpy(value_copy, value, length)
-//    value_copy[length] = '\0'
-//
-//    SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
-//    if (!PUSH(&context, document.nodes, node)) goto error
-//
-//    return document.nodes.top - document.nodes.start
-//
-//error:
-//    yaml_free(tag_copy)
-//    yaml_free(value_copy)
-//
-//    return 0
-//}
-//
-///*
-// * Add a sequence node to a document.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_add_sequence(document *yaml_document_t,
-//        tag *yaml_char_t, style yaml_sequence_style_t)
-//{
-//    struct {
-//        error yaml_error_type_t
-//    } context
-//    mark yaml_mark_t = { 0, 0, 0 }
-//    tag_copy *yaml_char_t = NULL
-//    struct {
-//        start *yaml_node_item_t
-//        end *yaml_node_item_t
-//        top *yaml_node_item_t
-//    } items = { NULL, NULL, NULL }
-//    node yaml_node_t
-//
-//    assert(document) // Non-NULL document object is expected.
-//
-//    if (!tag) {
-//        tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
-//    }
-//
-//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
-//    tag_copy = yaml_strdup(tag)
-//    if (!tag_copy) goto error
-//
-//    if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
-//
-//    SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
-//            style, mark, mark)
-//    if (!PUSH(&context, document.nodes, node)) goto error
-//
-//    return document.nodes.top - document.nodes.start
-//
-//error:
-//    STACK_DEL(&context, items)
-//    yaml_free(tag_copy)
-//
-//    return 0
-//}
-//
-///*
-// * Add a mapping node to a document.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_add_mapping(document *yaml_document_t,
-//        tag *yaml_char_t, style yaml_mapping_style_t)
-//{
-//    struct {
-//        error yaml_error_type_t
-//    } context
-//    mark yaml_mark_t = { 0, 0, 0 }
-//    tag_copy *yaml_char_t = NULL
-//    struct {
-//        start *yaml_node_pair_t
-//        end *yaml_node_pair_t
-//        top *yaml_node_pair_t
-//    } pairs = { NULL, NULL, NULL }
-//    node yaml_node_t
-//
-//    assert(document) // Non-NULL document object is expected.
-//
-//    if (!tag) {
-//        tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
-//    }
-//
-//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
-//    tag_copy = yaml_strdup(tag)
-//    if (!tag_copy) goto error
-//
-//    if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
-//
-//    MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
-//            style, mark, mark)
-//    if (!PUSH(&context, document.nodes, node)) goto error
-//
-//    return document.nodes.top - document.nodes.start
-//
-//error:
-//    STACK_DEL(&context, pairs)
-//    yaml_free(tag_copy)
-//
-//    return 0
-//}
-//
-///*
-// * Append an item to a sequence node.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_append_sequence_item(document *yaml_document_t,
-//        sequence int, item int)
-//{
-//    struct {
-//        error yaml_error_type_t
-//    } context
-//
-//    assert(document) // Non-NULL document is required.
-//    assert(sequence > 0
-//            && document.nodes.start + sequence <= document.nodes.top)
-//                            // Valid sequence id is required.
-//    assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
-//                            // A sequence node is required.
-//    assert(item > 0 && document.nodes.start + item <= document.nodes.top)
-//                            // Valid item id is required.
-//
-//    if (!PUSH(&context,
-//                document.nodes.start[sequence-1].data.sequence.items, item))
-//        return 0
-//
-//    return 1
-//}
-//
-///*
-// * Append a pair of a key and a value to a mapping node.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_append_mapping_pair(document *yaml_document_t,
-//        mapping int, key int, value int)
-//{
-//    struct {
-//        error yaml_error_type_t
-//    } context
-//
-//    pair yaml_node_pair_t
-//
-//    assert(document) // Non-NULL document is required.
-//    assert(mapping > 0
-//            && document.nodes.start + mapping <= document.nodes.top)
-//                            // Valid mapping id is required.
-//    assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
-//                            // A mapping node is required.
-//    assert(key > 0 && document.nodes.start + key <= document.nodes.top)
-//                            // Valid key id is required.
-//    assert(value > 0 && document.nodes.start + value <= document.nodes.top)
-//                            // Valid value id is required.
-//
-//    pair.key = key
-//    pair.value = value
-//
-//    if (!PUSH(&context,
-//                document.nodes.start[mapping-1].data.mapping.pairs, pair))
-//        return 0
-//
-//    return 1
-//}
-//
-//
diff --git a/application/source/vendor/gopkg.in/yaml.v3/decode.go b/application/source/vendor/gopkg.in/yaml.v3/decode.go
deleted file mode 100644
index 0173b6982e8437ee6b74c2708fc6c2f082ae650e..0000000000000000000000000000000000000000
--- a/application/source/vendor/gopkg.in/yaml.v3/decode.go
+++ /dev/null
@@ -1,1000 +0,0 @@
-//
-// Copyright (c) 2011-2019 Canonical Ltd
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package yaml
-
-import (
-	"encoding"
-	"encoding/base64"
-	"fmt"
-	"io"
-	"math"
-	"reflect"
-	"strconv"
-	"time"
-)
-
-// ----------------------------------------------------------------------------
-// Parser, produces a node tree out of a libyaml event stream.
-
-type parser struct {
-	parser   yaml_parser_t
-	event    yaml_event_t
-	doc      *Node
-	anchors  map[string]*Node
-	doneInit bool
-	textless bool
-}
-
-func newParser(b []byte) *parser {
-	p := parser{}
-	if !yaml_parser_initialize(&p.parser) {
-		panic("failed to initialize YAML emitter")
-	}
-	if len(b) == 0 {
-		b = []byte{'\n'}
-	}
-	yaml_parser_set_input_string(&p.parser, b)
-	return &p
-}
-
-func newParserFromReader(r io.Reader) *parser {
-	p := parser{}
-	if !yaml_parser_initialize(&p.parser) {
-		panic("failed to initialize YAML emitter")
-	}
-	yaml_parser_set_input_reader(&p.parser, r)
-	return &p
-}
-
-func (p *parser) init() {
-	if p.doneInit {
-		return
-	}
-	p.anchors = make(map[string]*Node)
-	p.expect(yaml_STREAM_START_EVENT)
-	p.doneInit = true
-}
-
-func (p *parser) destroy() {
-	if p.event.typ != yaml_NO_EVENT {
-		yaml_event_delete(&p.event)
-	}
-	yaml_parser_delete(&p.parser)
-}
-
-// expect consumes an event from the event stream and
-// checks that it's of the expected type.
-func (p *parser) expect(e yaml_event_type_t) {
-	if p.event.typ == yaml_NO_EVENT {
-		if !yaml_parser_parse(&p.parser, &p.event) {
-			p.fail()
-		}
-	}
-	if p.event.typ == yaml_STREAM_END_EVENT {
-		failf("attempted to go past the end of stream; corrupted value?")
-	}
-	if p.event.typ != e {
-		p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
-		p.fail()
-	}
-	yaml_event_delete(&p.event)
-	p.event.typ = yaml_NO_EVENT
-}
-
-// peek peeks at the next event in the event stream,
-// puts the results into p.event and returns the event type.
-func (p *parser) peek() yaml_event_type_t {
-	if p.event.typ != yaml_NO_EVENT {
-		return p.event.typ
-	}
-	// It's curious choice from the underlying API to generally return a
-	// positive result on success, but on this case return true in an error
-	// scenario. This was the source of bugs in the past (issue #666).
-	if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR {
-		p.fail()
-	}
-	return p.event.typ
-}
-
-func (p *parser) fail() {
-	var where string
-	var line int
-	if p.parser.context_mark.line != 0 {
-		line = p.parser.context_mark.line
-		// Scanner errors don't iterate line before returning error
-		if p.parser.error == yaml_SCANNER_ERROR {
-			line++
-		}
-	} else if p.parser.problem_mark.line != 0 {
-		line = p.parser.problem_mark.line
-		// Scanner errors don't iterate line before returning error
-		if p.parser.error == yaml_SCANNER_ERROR {
-			line++
-		}
-	}
-	if line != 0 {
-		where = "line " + strconv.Itoa(line) + ": "
-	}
-	var msg string
-	if len(p.parser.problem) > 0 {
-		msg = p.parser.problem
-	} else {
-		msg = "unknown problem parsing YAML content"
-	}
-	failf("%s%s", where, msg)
-}
-
-func (p *parser) anchor(n *Node, anchor []byte) {
-	if anchor != nil {
-		n.Anchor = string(anchor)
-		p.anchors[n.Anchor] = n
-	}
-}
-
-func (p *parser) parse() *Node {
-	p.init()
-	switch p.peek() {
-	case yaml_SCALAR_EVENT:
-		return p.scalar()
-	case yaml_ALIAS_EVENT:
-		return p.alias()
-	case yaml_MAPPING_START_EVENT:
-		return p.mapping()
-	case yaml_SEQUENCE_START_EVENT:
-		return p.sequence()
-	case yaml_DOCUMENT_START_EVENT:
-		return p.document()
-	case yaml_STREAM_END_EVENT:
-		// Happens when attempting to decode an empty buffer.
-		return nil
-	case yaml_TAIL_COMMENT_EVENT:
-		panic("internal error: unexpected tail comment event (please report)")
-	default:
-		panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String())
-	}
-}
-
-func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node {
-	var style Style
-	if tag != "" && tag != "!" {
-		tag = shortTag(tag)
-		style = TaggedStyle
-	} else if defaultTag != "" {
-		tag = defaultTag
-	} else if kind == ScalarNode {
-		tag, _ = resolve("", value)
-	}
-	n := &Node{
-		Kind:  kind,
-		Tag:   tag,
-		Value: value,
-		Style: style,
-	}
-	if !p.textless {
-		n.Line = p.event.start_mark.line + 1
-		n.Column = p.event.start_mark.column + 1
-		n.HeadComment = string(p.event.head_comment)
-		n.LineComment = string(p.event.line_comment)
-		n.FootComment = string(p.event.foot_comment)
-	}
-	return n
-}
-
-func (p *parser) parseChild(parent *Node) *Node {
-	child := p.parse()
-	parent.Content = append(parent.Content, child)
-	return child
-}
-
-func (p *parser) document() *Node {
-	n := p.node(DocumentNode, "", "", "")
-	p.doc = n
-	p.expect(yaml_DOCUMENT_START_EVENT)
-	p.parseChild(n)
-	if p.peek() == yaml_DOCUMENT_END_EVENT {
-		n.FootComment = string(p.event.foot_comment)
-	}
-	p.expect(yaml_DOCUMENT_END_EVENT)
-	return n
-}
-
-func (p *parser) alias() *Node {
-	n := p.node(AliasNode, "", "", string(p.event.anchor))
-	n.Alias = p.anchors[n.Value]
-	if n.Alias == nil {
-		failf("unknown anchor '%s' referenced", n.Value)
-	}
-	p.expect(yaml_ALIAS_EVENT)
-	return n
-}
-
-func (p *parser) scalar() *Node {
-	var parsedStyle = p.event.scalar_style()
-	var nodeStyle Style
-	switch {
-	case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0:
-		nodeStyle = DoubleQuotedStyle
-	case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0:
-		nodeStyle = SingleQuotedStyle
-	case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0:
-		nodeStyle = LiteralStyle
-	case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0:
-		nodeStyle = FoldedStyle
-	}
-	var nodeValue = string(p.event.value)
-	var nodeTag = string(p.event.tag)
-	var defaultTag string
-	if nodeStyle == 0 {
-		if nodeValue == "<<" {
-			defaultTag = mergeTag
-		}
-	} else {
-		defaultTag = strTag
-	}
-	n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue)
-	n.Style |= nodeStyle
-	p.anchor(n, p.event.anchor)
-	p.expect(yaml_SCALAR_EVENT)
-	return n
-}
-
-func (p *parser) sequence() *Node {
-	n := p.node(SequenceNode, seqTag, string(p.event.tag), "")
-	if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 {
-		n.Style |= FlowStyle
-	}
-	p.anchor(n, p.event.anchor)
-	p.expect(yaml_SEQUENCE_START_EVENT)
-	for p.peek() != yaml_SEQUENCE_END_EVENT {
-		p.parseChild(n)
-	}
-	n.LineComment = string(p.event.line_comment)
-	n.FootComment = string(p.event.foot_comment)
-	p.expect(yaml_SEQUENCE_END_EVENT)
-	return n
-}
-
-func (p *parser) mapping() *Node {
-	n := p.node(MappingNode, mapTag, string(p.event.tag), "")
-	block := true
-	if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 {
-		block = false
-		n.Style |= FlowStyle
-	}
-	p.anchor(n, p.event.anchor)
-	p.expect(yaml_MAPPING_START_EVENT)
-	for p.peek() != yaml_MAPPING_END_EVENT {
-		k := p.parseChild(n)
-		if block && k.FootComment != "" {
-			// Must be a foot comment for the prior value when being dedented.
-			if len(n.Content) > 2 {
-				n.Content[len(n.Content)-3].FootComment = k.FootComment
-				k.FootComment = ""
-			}
-		}
-		v := p.parseChild(n)
-		if k.FootComment == "" && v.FootComment != "" {
-			k.FootComment = v.FootComment
-			v.FootComment = ""
-		}
-		if p.peek() == yaml_TAIL_COMMENT_EVENT {
-			if k.FootComment == "" {
-				k.FootComment = string(p.event.foot_comment)
-			}
-			p.expect(yaml_TAIL_COMMENT_EVENT)
-		}
-	}
-	n.LineComment = string(p.event.line_comment)
-	n.FootComment = string(p.event.foot_comment)
-	if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 {
-		n.Content[len(n.Content)-2].FootComment = n.FootComment
-		n.FootComment = ""
-	}
-	p.expect(yaml_MAPPING_END_EVENT)
-	return n
-}
-
-// ----------------------------------------------------------------------------
-// Decoder, unmarshals a node into a provided value.
-
-type decoder struct {
-	doc     *Node
-	aliases map[*Node]bool
-	terrors []string
-
-	stringMapType  reflect.Type
-	generalMapType reflect.Type
-
-	knownFields bool
-	uniqueKeys  bool
-	decodeCount int
-	aliasCount  int
-	aliasDepth  int
-
-	mergedFields map[interface{}]bool
-}
-
-var (
-	nodeType       = reflect.TypeOf(Node{})
-	durationType   = reflect.TypeOf(time.Duration(0))
-	stringMapType  = reflect.TypeOf(map[string]interface{}{})
-	generalMapType = reflect.TypeOf(map[interface{}]interface{}{})
-	ifaceType      = generalMapType.Elem()
-	timeType       = reflect.TypeOf(time.Time{})
-	ptrTimeType    = reflect.TypeOf(&time.Time{})
-)
-
-func newDecoder() *decoder {
-	d := &decoder{
-		stringMapType:  stringMapType,
-		generalMapType: generalMapType,
-		uniqueKeys:     true,
-	}
-	d.aliases = make(map[*Node]bool)
-	return d
-}
-
-func (d *decoder) terror(n *Node, tag string, out reflect.Value) {
-	if n.Tag != "" {
-		tag = n.Tag
-	}
-	value := n.Value
-	if tag != seqTag && tag != mapTag {
-		if len(value) > 10 {
-			value = " `" + value[:7] + "...`"
-		} else {
-			value = " `" + value + "`"
-		}
-	}
-	d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type()))
-}
-
-func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) {
-	err := u.UnmarshalYAML(n)
-	if e, ok := err.(*TypeError); ok {
-		d.terrors = append(d.terrors, e.Errors...)
-		return false
-	}
-	if err != nil {
-		fail(err)
-	}
-	return true
-}
-
-func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) {
-	terrlen := len(d.terrors)
-	err := u.UnmarshalYAML(func(v interface{}) (err error) {
-		defer handleErr(&err)
-		d.unmarshal(n, reflect.ValueOf(v))
-		if len(d.terrors) > terrlen {
-			issues := d.terrors[terrlen:]
-			d.terrors = d.terrors[:terrlen]
-			return &TypeError{issues}
-		}
-		return nil
-	})
-	if e, ok := err.(*TypeError); ok {
-		d.terrors = append(d.terrors, e.Errors...)
-		return false
-	}
-	if err != nil {
-		fail(err)
-	}
-	return true
-}
-
-// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
-// if a value is found to implement it.
-// It returns the initialized and dereferenced out value, whether
-// unmarshalling was already done by UnmarshalYAML, and if so whether
-// its types unmarshalled appropriately.
-//
-// If n holds a null value, prepare returns before doing anything.
-func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
-	if n.ShortTag() == nullTag {
-		return out, false, false
-	}
-	again := true
-	for again {
-		again = false
-		if out.Kind() == reflect.Ptr {
-			if out.IsNil() {
-				out.Set(reflect.New(out.Type().Elem()))
-			}
-			out = out.Elem()
-			again = true
-		}
-		if out.CanAddr() {
-			outi := out.Addr().Interface()
-			if u, ok := outi.(Unmarshaler); ok {
-				good = d.callUnmarshaler(n, u)
-				return out, true, good
-			}
-			if u, ok := outi.(obsoleteUnmarshaler); ok {
-				good = d.callObsoleteUnmarshaler(n, u)
-				return out, true, good
-			}
-		}
-	}
-	return out, false, false
-}
-
-func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) {
-	if n.ShortTag() == nullTag {
-		return reflect.Value{}
-	}
-	for _, num := range index {
-		for {
-			if v.Kind() == reflect.Ptr {
-				if v.IsNil() {
-					v.Set(reflect.New(v.Type().Elem()))
-				}
-				v = v.Elem()
-				continue
-			}
-			break
-		}
-		v = v.Field(num)
-	}
-	return v
-}
-
-const (
-	// 400,000 decode operations is ~500kb of dense object declarations, or
-	// ~5kb of dense object declarations with 10000% alias expansion
-	alias_ratio_range_low = 400000
-
-	// 4,000,000 decode operations is ~5MB of dense object declarations, or
-	// ~4.5MB of dense object declarations with 10% alias expansion
-	alias_ratio_range_high = 4000000
-
-	// alias_ratio_range is the range over which we scale allowed alias ratios
-	alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
-)
-
-func allowedAliasRatio(decodeCount int) float64 {
-	switch {
-	case decodeCount <= alias_ratio_range_low:
-		// allow 99% to come from alias expansion for small-to-medium documents
-		return 0.99
-	case decodeCount >= alias_ratio_range_high:
-		// allow 10% to come from alias expansion for very large documents
-		return 0.10
-	default:
-		// scale smoothly from 99% down to 10% over the range.
-		// this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
-		// 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
-		return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
-	}
-}
-
-func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) {
-	d.decodeCount++
-	if d.aliasDepth > 0 {
-		d.aliasCount++
-	}
-	if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
-		failf("document contains excessive aliasing")
-	}
-	if out.Type() == nodeType {
-		out.Set(reflect.ValueOf(n).Elem())
-		return true
-	}
-	switch n.Kind {
-	case DocumentNode:
-		return d.document(n, out)
-	case AliasNode:
-		return d.alias(n, out)
-	}
-	out, unmarshaled, good := d.prepare(n, out)
-	if unmarshaled {
-		return good
-	}
-	switch n.Kind {
-	case ScalarNode:
-		good = d.scalar(n, out)
-	case MappingNode:
-		good = d.mapping(n, out)
-	case SequenceNode:
-		good = d.sequence(n, out)
-	case 0:
-		if n.IsZero() {
-			return d.null(out)
-		}
-		fallthrough
-	default:
-		failf("cannot decode node with unknown kind %d", n.Kind)
-	}
-	return good
-}
-
-func (d *decoder) document(n *Node, out reflect.Value) (good bool) {
-	if len(n.Content) == 1 {
-		d.doc = n
-		d.unmarshal(n.Content[0], out)
-		return true
-	}
-	return false
-}
-
-func (d *decoder) alias(n *Node, out reflect.Value) (good bool) {
-	if d.aliases[n] {
-		// TODO this could actually be allowed in some circumstances.
-		failf("anchor '%s' value contains itself", n.Value)
-	}
-	d.aliases[n] = true
-	d.aliasDepth++
-	good = d.unmarshal(n.Alias, out)
-	d.aliasDepth--
-	delete(d.aliases, n)
-	return good
-}
-
-var zeroValue reflect.Value
-
-func resetMap(out reflect.Value) {
-	for _, k := range out.MapKeys() {
-		out.SetMapIndex(k, zeroValue)
-	}
-}
-
-func (d *decoder) null(out reflect.Value) bool {
-	if out.CanAddr() {
-		switch out.Kind() {
-		case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
-			out.Set(reflect.Zero(out.Type()))
-			return true
-		}
-	}
-	return false
-}
-
-func (d *decoder) scalar(n *Node, out reflect.Value) bool {
-	var tag string
-	var resolved interface{}
-	if n.indicatedString() {
-		tag = strTag
-		resolved = n.Value
-	} else {
-		tag, resolved = resolve(n.Tag, n.Value)
-		if tag == binaryTag {
-			data, err := base64.StdEncoding.DecodeString(resolved.(string))
-			if err != nil {
-				failf("!!binary value contains invalid base64 data")
-			}
-			resolved = string(data)
-		}
-	}
-	if resolved == nil {
-		return d.null(out)
-	}
-	if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
-		// We've resolved to exactly the type we want, so use that.
-		out.Set(resolvedv)
-		return true
-	}
-	// Perhaps we can use the value as a TextUnmarshaler to
-	// set its value.
-	if out.CanAddr() {
-		u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
-		if ok {
-			var text []byte
-			if tag == binaryTag {
-				text = []byte(resolved.(string))
-			} else {
-				// We let any value be unmarshaled into TextUnmarshaler.
-				// That might be more lax than we'd like, but the
-				// TextUnmarshaler itself should bowl out any dubious values.
-				text = []byte(n.Value)
-			}
-			err := u.UnmarshalText(text)
-			if err != nil {
-				fail(err)
-			}
-			return true
-		}
-	}
-	switch out.Kind() {
-	case reflect.String:
-		if tag == binaryTag {
-			out.SetString(resolved.(string))
-			return true
-		}
-		out.SetString(n.Value)
-		return true
-	case reflect.Interface:
-		out.Set(reflect.ValueOf(resolved))
-		return true
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		// This used to work in v2, but it's very unfriendly.
-		isDuration := out.Type() == durationType
-
-		switch resolved := resolved.(type) {
-		case int:
-			if !isDuration && !out.OverflowInt(int64(resolved)) {
-				out.SetInt(int64(resolved))
-				return true
-			}
-		case int64:
-			if !isDuration && !out.OverflowInt(resolved) {
-				out.SetInt(resolved)
-				return true
-			}
-		case uint64:
-			if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
-				out.SetInt(int64(resolved))
-				return true
-			}
-		case float64:
-			if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
-				out.SetInt(int64(resolved))
-				return true
-			}
-		case string:
-			if out.Type() == durationType {
-				d, err := time.ParseDuration(resolved)
-				if err == nil {
-					out.SetInt(int64(d))
-					return true
-				}
-			}
-		}
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		switch resolved := resolved.(type) {
-		case int:
-			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
-				out.SetUint(uint64(resolved))
-				return true
-			}
-		case int64:
-			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
-				out.SetUint(uint64(resolved))
-				return true
-			}
-		case uint64:
-			if !out.OverflowUint(uint64(resolved)) {
-				out.SetUint(uint64(resolved))
-				return true
-			}
-		case float64:
-			if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
-				out.SetUint(uint64(resolved))
-				return true
-			}
-		}
-	case reflect.Bool:
-		switch resolved := resolved.(type) {
-		case bool:
-			out.SetBool(resolved)
-			return true
-		case string:
-			// This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html).
-			// It only works if explicitly attempting to unmarshal into a typed bool value.
-			switch resolved {
-			case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON":
-				out.SetBool(true)
-				return true
-			case "n", "N", "no", "No", "NO", "off", "Off", "OFF":
-				out.SetBool(false)
-				return true
-			}
-		}
-	case reflect.Float32, reflect.Float64:
-		switch resolved := resolved.(type) {
-		case int:
-			out.SetFloat(float64(resolved))
-			return true
-		case int64:
-			out.SetFloat(float64(resolved))
-			return true
-		case uint64:
-			out.SetFloat(float64(resolved))
-			return true
-		case float64:
-			out.SetFloat(resolved)
-			return true
-		}
-	case reflect.Struct:
-		if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
-			out.Set(resolvedv)
-			return true
-		}
-	case reflect.Ptr:
-		panic("yaml internal error: please report the issue")
-	}
-	d.terror(n, tag, out)
-	return false
-}
-
-func settableValueOf(i interface{}) reflect.Value {
-	v := reflect.ValueOf(i)
-	sv := reflect.New(v.Type()).Elem()
-	sv.Set(v)
-	return sv
-}
-
-func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) {
-	l := len(n.Content)
-
-	var iface reflect.Value
-	switch out.Kind() {
-	case reflect.Slice:
-		out.Set(reflect.MakeSlice(out.Type(), l, l))
-	case reflect.Array:
-		if l != out.Len() {
-			failf("invalid array: want %d elements but got %d", out.Len(), l)
-		}
-	case reflect.Interface:
-		// No type hints. Will have to use a generic sequence.
-		iface = out
-		out = settableValueOf(make([]interface{}, l))
-	default:
-		d.terror(n, seqTag, out)
-		return false
-	}
-	et := out.Type().Elem()
-
-	j := 0
-	for i := 0; i < l; i++ {
-		e := reflect.New(et).Elem()
-		if ok := d.unmarshal(n.Content[i], e); ok {
-			out.Index(j).Set(e)
-			j++
-		}
-	}
-	if out.Kind() != reflect.Array {
-		out.Set(out.Slice(0, j))
-	}
-	if iface.IsValid() {
-		iface.Set(out)
-	}
-	return true
-}
-
-func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) {
-	l := len(n.Content)
-	if d.uniqueKeys {
-		nerrs := len(d.terrors)
-		for i := 0; i < l; i += 2 {
-			ni := n.Content[i]
-			for j := i + 2; j < l; j += 2 {
-				nj := n.Content[j]
-				if ni.Kind == nj.Kind && ni.Value == nj.Value {
-					d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line))
-				}
-			}
-		}
-		if len(d.terrors) > nerrs {
-			return false
-		}
-	}
-	switch out.Kind() {
-	case reflect.Struct:
-		return d.mappingStruct(n, out)
-	case reflect.Map:
-		// okay
-	case reflect.Interface:
-		iface := out
-		if isStringMap(n) {
-			out = reflect.MakeMap(d.stringMapType)
-		} else {
-			out = reflect.MakeMap(d.generalMapType)
-		}
-		iface.Set(out)
-	default:
-		d.terror(n, mapTag, out)
-		return false
-	}
-
-	outt := out.Type()
-	kt := outt.Key()
-	et := outt.Elem()
-
-	stringMapType := d.stringMapType
-	generalMapType := d.generalMapType
-	if outt.Elem() == ifaceType {
-		if outt.Key().Kind() == reflect.String {
-			d.stringMapType = outt
-		} else if outt.Key() == ifaceType {
-			d.generalMapType = outt
-		}
-	}
-
-	mergedFields := d.mergedFields
-	d.mergedFields = nil
-
-	var mergeNode *Node
-
-	mapIsNew := false
-	if out.IsNil() {
-		out.Set(reflect.MakeMap(outt))
-		mapIsNew = true
-	}
-	for i := 0; i < l; i += 2 {
-		if isMerge(n.Content[i]) {
-			mergeNode = n.Content[i+1]
-			continue
-		}
-		k := reflect.New(kt).Elem()
-		if d.unmarshal(n.Content[i], k) {
-			if mergedFields != nil {
-				ki := k.Interface()
-				if mergedFields[ki] {
-					continue
-				}
-				mergedFields[ki] = true
-			}
-			kkind := k.Kind()
-			if kkind == reflect.Interface {
-				kkind = k.Elem().Kind()
-			}
-			if kkind == reflect.Map || kkind == reflect.Slice {
-				failf("invalid map key: %#v", k.Interface())
-			}
-			e := reflect.New(et).Elem()
-			if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) {
-				out.SetMapIndex(k, e)
-			}
-		}
-	}
-
-	d.mergedFields = mergedFields
-	if mergeNode != nil {
-		d.merge(n, mergeNode, out)
-	}
-
-	d.stringMapType = stringMapType
-	d.generalMapType = generalMapType
-	return true
-}
-
-func isStringMap(n *Node) bool {
-	if n.Kind != MappingNode {
-		return false
-	}
-	l := len(n.Content)
-	for i := 0; i < l; i += 2 {
-		shortTag := n.Content[i].ShortTag()
-		if shortTag != strTag && shortTag != mergeTag {
-			return false
-		}
-	}
-	return true
-}
-
-func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
-	sinfo, err := getStructInfo(out.Type())
-	if err != nil {
-		panic(err)
-	}
-
-	var inlineMap reflect.Value
-	var elemType reflect.Type
-	if sinfo.InlineMap != -1 {
-		inlineMap = out.Field(sinfo.InlineMap)
-		elemType = inlineMap.Type().Elem()
-	}
-
-	for _, index := range sinfo.InlineUnmarshalers {
-		field := d.fieldByIndex(n, out, index)
-		d.prepare(n, field)
-	}
-
-	mergedFields := d.mergedFields
-	d.mergedFields = nil
-	var mergeNode *Node
-	var doneFields []bool
-	if d.uniqueKeys {
-		doneFields = make([]bool, len(sinfo.FieldsList))
-	}
-	name := settableValueOf("")
-	l := len(n.Content)
-	for i := 0; i < l; i += 2 {
-		ni := n.Content[i]
-		if isMerge(ni) {
-			mergeNode = n.Content[i+1]
-			continue
-		}
-		if !d.unmarshal(ni, name) {
-			continue
-		}
-		sname := name.String()
-		if mergedFields != nil {
-			if mergedFields[sname] {
-				continue
-			}
-			mergedFields[sname] = true
-		}
-		if info, ok := sinfo.FieldsMap[sname]; ok {
-			if d.uniqueKeys {
-				if doneFields[info.Id] {
-					d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type()))
-					continue
-				}
-				doneFields[info.Id] = true
-			}
-			var field reflect.Value
-			if info.Inline == nil {
-				field = out.Field(info.Num)
-			} else {
-				field = d.fieldByIndex(n, out, info.Inline)
-			}
-			d.unmarshal(n.Content[i+1], field)
-		} else if sinfo.InlineMap != -1 {
-			if inlineMap.IsNil() {
-				inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
-			}
-			value := reflect.New(elemType).Elem()
-			d.unmarshal(n.Content[i+1], value)
-			inlineMap.SetMapIndex(name, value)
-		} else if d.knownFields {
-			d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type()))
-		}
-	}
-
-	d.mergedFields = mergedFields
-	if mergeNode != nil {
-		d.merge(n, mergeNode, out)
-	}
-	return true
-}
-
-func failWantMap() {
-	failf("map merge requires map or sequence of maps as the value")
-}
-
-func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) {
-	mergedFields := d.mergedFields
-	if mergedFields == nil {
-		d.mergedFields = make(map[interface{}]bool)
-		for i := 0; i < len(parent.Content); i += 2 {
-			k := reflect.New(ifaceType).Elem()
-			if d.unmarshal(parent.Content[i], k) {
-				d.mergedFields[k.Interface()] = true
-			}
-		}
-	}
-
-	switch merge.Kind {
-	case MappingNode:
-		d.unmarshal(merge, out)
-	case AliasNode:
-		if merge.Alias != nil && merge.Alias.Kind != MappingNode {
-			failWantMap()
-		}
-		d.unmarshal(merge, out)
-	case SequenceNode:
-		for i := 0; i < len(merge.Content); i++ {
-			ni := merge.Content[i]
-			if ni.Kind == AliasNode {
-				if ni.Alias != nil && ni.Alias.Kind != MappingNode {
-					failWantMap()
-				}
-			} else if ni.Kind != MappingNode {
-				failWantMap()
-			}
-			d.unmarshal(ni, out)
-		}
-	default:
-		failWantMap()
-	}
-
-	d.mergedFields = mergedFields
-}
-
-func isMerge(n *Node) bool {
-	return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag)
-}
diff --git a/application/source/vendor/gopkg.in/yaml.v3/emitterc.go b/application/source/vendor/gopkg.in/yaml.v3/emitterc.go
deleted file mode 100644
index 0f47c9ca8addf8e9d2e454e02842927ae825d0e9..0000000000000000000000000000000000000000
--- a/application/source/vendor/gopkg.in/yaml.v3/emitterc.go
+++ /dev/null
@@ -1,2020 +0,0 @@
-//
-// Copyright (c) 2011-2019 Canonical Ltd
-// Copyright (c) 2006-2010 Kirill Simonov
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-// of the Software, and to permit persons to whom the Software is furnished to do
-// so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package yaml
-
-import (
-	"bytes"
-	"fmt"
-)
-
-// Flush the buffer if needed.
-func flush(emitter *yaml_emitter_t) bool {
-	if emitter.buffer_pos+5 >= len(emitter.buffer) {
-		return yaml_emitter_flush(emitter)
-	}
-	return true
-}
-
-// Put a character to the output buffer.
-func put(emitter *yaml_emitter_t, value byte) bool {
-	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
-		return false
-	}
-	emitter.buffer[emitter.buffer_pos] = value
-	emitter.buffer_pos++
-	emitter.column++
-	return true
-}
-
-// Put a line break to the output buffer.
-func put_break(emitter *yaml_emitter_t) bool {
-	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
-		return false
-	}
-	switch emitter.line_break {
-	case yaml_CR_BREAK:
-		emitter.buffer[emitter.buffer_pos] = '\r'
-		emitter.buffer_pos += 1
-	case yaml_LN_BREAK:
-		emitter.buffer[emitter.buffer_pos] = '\n'
-		emitter.buffer_pos += 1
-	case yaml_CRLN_BREAK:
-		emitter.buffer[emitter.buffer_pos+0] = '\r'
-		emitter.buffer[emitter.buffer_pos+1] = '\n'
-		emitter.buffer_pos += 2
-	default:
-		panic("unknown line break setting")
-	}
-	if emitter.column == 0 {
-		emitter.space_above = true
-	}
-	emitter.column = 0
-	emitter.line++
-	// [Go] Do this here and below and drop from everywhere else (see commented lines).
-	emitter.indention = true
-	return true
-}
-
-// Copy a character from a string into buffer.
-func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
-	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
-		return false
-	}
-	p := emitter.buffer_pos
-	w := width(s[*i])
-	switch w {
-	case 4:
-		emitter.buffer[p+3] = s[*i+3]
-		fallthrough
-	case 3:
-		emitter.buffer[p+2] = s[*i+2]
-		fallthrough
-	case 2:
-		emitter.buffer[p+1] = s[*i+1]
-		fallthrough
-	case 1:
-		emitter.buffer[p+0] = s[*i+0]
-	default:
-		panic("unknown character width")
-	}
-	emitter.column++
-	emitter.buffer_pos += w
-	*i += w
-	return true
-}
-
-// Write a whole string into buffer.
-func write_all(emitter *yaml_emitter_t, s []byte) bool {
-	for i := 0; i < len(s); {
-		if !write(emitter, s, &i) {
-			return false
-		}
-	}
-	return true
-}
-
-// Copy a line break character from a string into buffer.
-func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
-	if s[*i] == '\n' {
-		if !put_break(emitter) {
-			return false
-		}
-		*i++
-	} else {
-		if !write(emitter, s, i) {
-			return false
-		}
-		if emitter.column == 0 {
-			emitter.space_above = true
-		}
-		emitter.column = 0
-		emitter.line++
-		// [Go] Do this here and above and drop from everywhere else (see commented lines).
-		emitter.indention = true
-	}
-	return true
-}
-
-// Set an emitter error and return false.
-func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
-	emitter.error = yaml_EMITTER_ERROR
-	emitter.problem = problem
-	return false
-}
-
-// Emit an event.
-func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
-	emitter.events = append(emitter.events, *event)
-	for !yaml_emitter_need_more_events(emitter) {
-		event := &emitter.events[emitter.events_head]
-		if !yaml_emitter_analyze_event(emitter, event) {
-			return false
-		}
-		if !yaml_emitter_state_machine(emitter, event) {
-			return false
-		}
-		yaml_event_delete(event)
-		emitter.events_head++
-	}
-	return true
-}
-
-// Check if we need to accumulate more events before emitting.
-//
-// We accumulate extra
-//  - 1 event for DOCUMENT-START
-//  - 2 events for SEQUENCE-START
-//  - 3 events for MAPPING-START
-//
-func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
-	if emitter.events_head == len(emitter.events) {
-		return true
-	}
-	var accumulate int
-	switch emitter.events[emitter.events_head].typ {
-	case yaml_DOCUMENT_START_EVENT:
-		accumulate = 1
-		break
-	case yaml_SEQUENCE_START_EVENT:
-		accumulate = 2
-		break
-	case yaml_MAPPING_START_EVENT:
-		accumulate = 3
-		break
-	default:
-		return false
-	}
-	if len(emitter.events)-emitter.events_head > accumulate {
-		return false
-	}
-	var level int
-	for i := emitter.events_head; i < len(emitter.events); i++ {
-		switch emitter.events[i].typ {
-		case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
-			level++
-		case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
-			level--
-		}
-		if level == 0 {
-			return false
-		}
-	}
-	return true
-}
-
-// Append a directive to the directives stack.
-func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
-	for i := 0; i < len(emitter.tag_directives); i++ {
-		if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
-			if allow_duplicates {
-				return true
-			}
-			return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
-		}
-	}
-
-	// [Go] Do we actually need to copy this given garbage collection
-	// and the lack of deallocating destructors?
-	tag_copy := yaml_tag_directive_t{
-		handle: make([]byte, len(value.handle)),
-		prefix: make([]byte, len(value.prefix)),
-	}
-	copy(tag_copy.handle, value.handle)
-	copy(tag_copy.prefix, value.prefix)
-	emitter.tag_directives = append(emitter.tag_directives, tag_copy)
-	return true
-}
-
-// Increase the indentation level.
-func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
-	emitter.indents = append(emitter.indents, emitter.indent)
-	if emitter.indent < 0 {
-		if flow {
-			emitter.indent = emitter.best_indent
-		} else {
-			emitter.indent = 0
-		}
-	} else if !indentless {
-		// [Go] This was changed so that indentations are more regular.
-		if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE {
-			// The first indent inside a sequence will just skip the "- " indicator.
-			emitter.indent += 2
-		} else {
-			// Everything else aligns to the chosen indentation.
-			emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent)
-		}
-	}
-	return true
-}
-
-// State dispatcher.
-func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
-	switch emitter.state {
-	default:
-	case yaml_EMIT_STREAM_START_STATE:
-		return yaml_emitter_emit_stream_start(emitter, event)
-
-	case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
-		return yaml_emitter_emit_document_start(emitter, event, true)
-
-	case yaml_EMIT_DOCUMENT_START_STATE:
-		return yaml_emitter_emit_document_start(emitter, event, false)
-
-	case yaml_EMIT_DOCUMENT_CONTENT_STATE:
-		return yaml_emitter_emit_document_content(emitter, event)
-
-	case yaml_EMIT_DOCUMENT_END_STATE:
-		return yaml_emitter_emit_document_end(emitter, event)
-
-	case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
-		return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false)
-
-	case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE:
-		return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true)
-
-	case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
-		return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false)
-
-	case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
-		return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false)
-
-	case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE:
-		return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true)
-
-	case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
-		return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false)
-
-	case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
-		return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
-
-	case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
-		return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
-
-	case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
-		return yaml_emitter_emit_block_sequence_item(emitter, event, true)
-
-	case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
-		return yaml_emitter_emit_block_sequence_item(emitter, event, false)
-
-	case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
-		return yaml_emitter_emit_block_mapping_key(emitter, event, true)
-
-	case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
-		return yaml_emitter_emit_block_mapping_key(emitter, event, false)
-
-	case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
-		return yaml_emitter_emit_block_mapping_value(emitter, event, true)
-
-	case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
-		return yaml_emitter_emit_block_mapping_value(emitter, event, false)
-
-	case yaml_EMIT_END_STATE:
-		return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
-	}
-	panic("invalid emitter state")
-}
-
-// Expect STREAM-START.
-func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
-	if event.typ != yaml_STREAM_START_EVENT {
-		return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
-	}
-	if emitter.encoding == yaml_ANY_ENCODING {
-		emitter.encoding = event.encoding
-		if emitter.encoding == yaml_ANY_ENCODING {
-			emitter.encoding = yaml_UTF8_ENCODING
-		}
-	}
-	if emitter.best_indent < 2 || emitter.best_indent > 9 {
-		emitter.best_indent = 2
-	}
-	if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
-		emitter.best_width = 80
-	}
-	if emitter.best_width < 0 {
-		emitter.best_width = 1<<31 - 1
-	}
-	if emitter.line_break == yaml_ANY_BREAK {
-		emitter.line_break = yaml_LN_BREAK
-	}
-
-	emitter.indent = -1
-	emitter.line = 0
-	emitter.column = 0
-	emitter.whitespace = true
-	emitter.indention = true
-	emitter.space_above = true
-	emitter.foot_indent = -1
-
-	if emitter.encoding != yaml_UTF8_ENCODING {
-		if !yaml_emitter_write_bom(emitter) {
-			return false
-		}
-	}
-	emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
-	return true
-}
-
-// Expect DOCUMENT-START or STREAM-END.
-func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
-
-	if event.typ == yaml_DOCUMENT_START_EVENT {
-
-		if event.version_directive != nil {
-			if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
-				return false
-			}
-		}
-
-		for i := 0; i < len(event.tag_directives); i++ {
-			tag_directive := &event.tag_directives[i]
-			if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
-				return false
-			}
-			if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
-				return false
-			}
-		}
-
-		for i := 0; i < len(default_tag_directives); i++ {
-			tag_directive := &default_tag_directives[i]
-			if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
-				return false
-			}
-		}
-
-		implicit := event.implicit
-		if !first || emitter.canonical {
-			implicit = false
-		}
-
-		if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
-			if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
-				return false
-			}
-			if !yaml_emitter_write_indent(emitter) {
-				return false
-			}
-		}
-
-		if event.version_directive != nil {
-			implicit = false
-			if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
-				return false
-			}
-			if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
-				return false
-			}
-			if !yaml_emitter_write_indent(emitter) {
-				return false
-			}
-		}
-
-		if len(event.tag_directives) > 0 {
-			implicit = false
-			for i := 0; i < len(event.tag_directives); i++ {
-				tag_directive := &event.tag_directives[i]
-				if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
-					return false
-				}
-				if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
-					return false
-				}
-				if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
-					return false
-				}
-				if !yaml_emitter_write_indent(emitter) {
-					return false
-				}
-			}
-		}
-
-		if yaml_emitter_check_empty_document(emitter) {
-			implicit = false
-		}
-		if !implicit {
-			if !yaml_emitter_write_indent(emitter) {
-				return false
-			}
-			if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
-				return false
-			}
-			if emitter.canonical || true {
-				if !yaml_emitter_write_indent(emitter) {
-					return false
-				}
-			}
-		}
-
-		if len(emitter.head_comment) > 0 {
-			if !yaml_emitter_process_head_comment(emitter) {
-				return false
-			}
-			if !put_break(emitter) {
-				return false
-			}
-		}
-
-		emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
-		return true
-	}
-
-	if event.typ == yaml_STREAM_END_EVENT {
-		if emitter.open_ended {
-			if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
-				return false
-			}
-			if !yaml_emitter_write_indent(emitter) {
-				return false
-			}
-		}
-		if !yaml_emitter_flush(emitter) {
-			return false
-		}
-		emitter.state = yaml_EMIT_END_STATE
-		return true
-	}
-
-	return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
-}
-
-// Expect the root node.
-func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
-	emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
-
-	if !yaml_emitter_process_head_comment(emitter) {
-		return false
-	}
-	if !yaml_emitter_emit_node(emitter, event, true, false, false, false) {
-		return false
-	}
-	if !yaml_emitter_process_line_comment(emitter) {
-		return false
-	}
-	if !yaml_emitter_process_foot_comment(emitter) {
-		return false
-	}
-	return true
-}
-
-// Expect DOCUMENT-END.
-func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
-	if event.typ != yaml_DOCUMENT_END_EVENT {
-		return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
-	}
-	// [Go] Force document foot separation.
-	emitter.foot_indent = 0
-	if !yaml_emitter_process_foot_comment(emitter) {
-		return false
-	}
-	emitter.foot_indent = -1
-	if !yaml_emitter_write_indent(emitter) {
-		return false
-	}
-	if !event.implicit {
-		// [Go] Allocate the slice elsewhere.
-		if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
-			return false
-		}
-		if !yaml_emitter_write_indent(emitter) {
-			return false
-		}
-	}
-	if !yaml_emitter_flush(emitter) {
-		return false
-	}
-	emitter.state = yaml_EMIT_DOCUMENT_START_STATE
-	emitter.tag_directives = emitter.tag_directives[:0]
-	return true
-}
-
-// Expect a flow item node.
-func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
-	if first {
-		if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
-			return false
-		}
-		if !yaml_emitter_increase_indent(emitter, true, false) {
-			return false
-		}
-		emitter.flow_level++
-	}
-
-	if event.typ == yaml_SEQUENCE_END_EVENT {
-		if emitter.canonical && !first && !trail {
-			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
-				return false
-			}
-		}
-		emitter.flow_level--
-		emitter.indent = emitter.indents[len(emitter.indents)-1]
-		emitter.indents = emitter.indents[:len(emitter.indents)-1]
-		if emitter.column == 0 || emitter.canonical && !first {
-			if !yaml_emitter_write_indent(emitter) {
-				return false
-			}
-		}
-		if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
-			return false
-		}
-		if !yaml_emitter_process_line_comment(emitter) {
-			return false
-		}
-		if !yaml_emitter_process_foot_comment(emitter) {
-			return false
-		}
-		emitter.state = emitter.states[len(emitter.states)-1]
-		emitter.states = emitter.states[:len(emitter.states)-1]
-
-		return true
-	}
-
-	if !first && !trail {
-		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
-			return false
-		}
-	}
-
-	if !yaml_emitter_process_head_comment(emitter) {
-		return false
-	}
-	if emitter.column == 0 {
-		if !yaml_emitter_write_indent(emitter) {
-			return false
-		}
-	}
-
-	if emitter.canonical || emitter.column > emitter.best_width {
-		if !yaml_emitter_write_indent(emitter) {
-			return false
-		}
-	}
-	if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
-		emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE)
-	} else {
-		emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
-	}
-	if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
-		return false
-	}
-	if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
-		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
-			return false
-		}
-	}
-	if !yaml_emitter_process_line_comment(emitter) {
-		return false
-	}
-	if !yaml_emitter_process_foot_comment(emitter) {
-		return false
-	}
-	return true
-}
-
-// Expect a flow key node.
-func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
-	if first {
-		if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
-			return false
-		}
-		if !yaml_emitter_increase_indent(emitter, true, false) {
-			return false
-		}
-		emitter.flow_level++
-	}
-
-	if event.typ == yaml_MAPPING_END_EVENT {
-		if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail {
-			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
-				return false
-			}
-		}
-		if !yaml_emitter_process_head_comment(emitter) {
-			return false
-		}
-		emitter.flow_level--
-		emitter.indent = emitter.indents[len(emitter.indents)-1]
-		emitter.indents = emitter.indents[:len(emitter.indents)-1]
-		if emitter.canonical && !first {
-			if !yaml_emitter_write_indent(emitter) {
-				return false
-			}
-		}
-		if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
-			return false
-		}
-		if !yaml_emitter_process_line_comment(emitter) {
-			return false
-		}
-		if !yaml_emitter_process_foot_comment(emitter) {
-			return false
-		}
-		emitter.state = emitter.states[len(emitter.states)-1]
-		emitter.states = emitter.states[:len(emitter.states)-1]
-		return true
-	}
-
-	if !first && !trail {
-		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
-			return false
-		}
-	}
-
-	if !yaml_emitter_process_head_comment(emitter) {
-		return false
-	}
-
-	if emitter.column == 0 {
-		if !yaml_emitter_write_indent(emitter) {
-			return false
-		}
-	}
-
-	if emitter.canonical || emitter.column > emitter.best_width {
-		if !yaml_emitter_write_indent(emitter) {
-			return false
-		}
-	}
-
-	if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
-		emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
-		return yaml_emitter_emit_node(emitter, event, false, false, true, true)
-	}
-	if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
-		return false
-	}
-	emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
-	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
-}
-
-// Expect a flow value node.
-func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
-	if simple {
-		if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
-			return false
-		}
-	} else {
-		if emitter.canonical || emitter.column > emitter.best_width {
-			if !yaml_emitter_write_indent(emitter) {
-				return false
-			}
-		}
-		if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
-			return false
-		}
-	}
-	if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
-		emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE)
-	} else {
-		emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
-	}
-	if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
-		return false
-	}
-	if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
-		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
-			return false
-		}
-	}
-	if !yaml_emitter_process_line_comment(emitter) {
-		return false
-	}
-	if !yaml_emitter_process_foot_comment(emitter) {
-		return false
-	}
-	return true
-}
-
-// Expect a block item node.
-func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
-	if first {
-		if !yaml_emitter_increase_indent(emitter, false, false) {
-			return false
-		}
-	}
-	if event.typ == yaml_SEQUENCE_END_EVENT {
-		emitter.indent = emitter.indents[len(emitter.indents)-1]
-		emitter.indents = emitter.indents[:len(emitter.indents)-1]
-		emitter.state = emitter.states[len(emitter.states)-1]
-		emitter.states = emitter.states[:len(emitter.states)-1]
-		return true
-	}
-	if !yaml_emitter_process_head_comment(emitter) {
-		return false
-	}
-	if !yaml_emitter_write_indent(emitter) {
-		return false
-	}
-	if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
-		return false
-	}
-	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
-	if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
-		return false
-	}
-	if !yaml_emitter_process_line_comment(emitter) {
-		return false
-	}
-	if !yaml_emitter_process_foot_comment(emitter) {
-		return false
-	}
-	return true
-}
-
-// Expect a block key node.
-func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
-	if first {
-		if !yaml_emitter_increase_indent(emitter, false, false) {
-			return false
-		}
-	}
-	if !yaml_emitter_process_head_comment(emitter) {
-		return false
-	}
-	if event.typ == yaml_MAPPING_END_EVENT {
-		emitter.indent = emitter.indents[len(emitter.indents)-1]
-		emitter.indents = emitter.indents[:len(emitter.indents)-1]
-		emitter.state = emitter.states[len(emitter.states)-1]
-		emitter.states = emitter.states[:len(emitter.states)-1]
-		return true
-	}
-	if !yaml_emitter_write_indent(emitter) {
-		return false
-	}
-	if len(emitter.line_comment) > 0 {
-		// [Go] A line comment was provided for the key. That's unusual as the
-		//      scanner associates line comments with the value. Either way,
-		//      save the line comment and render it appropriately later.
-		emitter.key_line_comment = emitter.line_comment
-		emitter.line_comment = nil
-	}
-	if yaml_emitter_check_simple_key(emitter) {
-		emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
-		return yaml_emitter_emit_node(emitter, event, false, false, true, true)
-	}
-	if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
-		return false
-	}
-	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
-	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
-}
-
-// Expect a block value node.
-func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
-	if simple {
-		if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
-			return false
-		}
-	} else {
-		if !yaml_emitter_write_indent(emitter) {
-			return false
-		}
-		if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
-			return false
-		}
-	}
-	if len(emitter.key_line_comment) > 0 {
-		// [Go] Line comments are generally associated with the value, but when there's
-		//      no value on the same line as a mapping key they end up attached to the
-		//      key itself.
-		if event.typ == yaml_SCALAR_EVENT {
-			if len(emitter.line_comment) == 0 {
-				// A scalar is coming and it has no line comments by itself yet,
-				// so just let it handle the line comment as usual. If it has a
-				// line comment, we can't have both so the one from the key is lost.
-				emitter.line_comment = emitter.key_line_comment
-				emitter.key_line_comment = nil
-			}
-		} else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) {
-			// An indented block follows, so write the comment right now.
-			emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment
-			if !yaml_emitter_process_line_comment(emitter) {
-				return false
-			}
-			emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment
-		}
-	}
-	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
-	if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
-		return false
-	}
-	if !yaml_emitter_process_line_comment(emitter) {
-		return false
-	}
-	if !yaml_emitter_process_foot_comment(emitter) {
-		return false
-	}
-	return true
-}
-
-func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
-	return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0
-}
-
-// Expect a node.
-func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
-	root bool, sequence bool, mapping bool, simple_key bool) bool {
-
-	emitter.root_context = root
-	emitter.sequence_context = sequence
-	emitter.mapping_context = mapping
-	emitter.simple_key_context = simple_key
-
-	switch event.typ {
-	case yaml_ALIAS_EVENT:
-		return yaml_emitter_emit_alias(emitter, event)
-	case yaml_SCALAR_EVENT:
-		return yaml_emitter_emit_scalar(emitter, event)
-	case yaml_SEQUENCE_START_EVENT:
-		return yaml_emitter_emit_sequence_start(emitter, event)
-	case yaml_MAPPING_START_EVENT:
-		return yaml_emitter_emit_mapping_start(emitter, event)
-	default:
-		return yaml_emitter_set_emitter_error(emitter,
-			fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
-	}
-}
-
-// Expect ALIAS.
-func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
-	if !yaml_emitter_process_anchor(emitter) {
-		return false
-	}
-	emitter.state = emitter.states[len(emitter.states)-1]
-	emitter.states = emitter.states[:len(emitter.states)-1]
-	return true
-}
-
-// Expect SCALAR.
-func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
-	if !yaml_emitter_select_scalar_style(emitter, event) {
-		return false
-	}
-	if !yaml_emitter_process_anchor(emitter) {
-		return false
-	}
-	if !yaml_emitter_process_tag(emitter) {
-		return false
-	}
-	if !yaml_emitter_increase_indent(emitter, true, false) {
-		return false
-	}
-	if !yaml_emitter_process_scalar(emitter) {
-		return false
-	}
-	emitter.indent = emitter.indents[len(emitter.indents)-1]
-	emitter.indents = emitter.indents[:len(emitter.indents)-1]
-	emitter.state = emitter.states[len(emitter.states)-1]
-	emitter.states = emitter.states[:len(emitter.states)-1]
-	return true
-}
-
-// Expect SEQUENCE-START.
-func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
-	if !yaml_emitter_process_anchor(emitter) {
-		return false
-	}
-	if !yaml_emitter_process_tag(emitter) {
-		return false
-	}
-	if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
-		yaml_emitter_check_empty_sequence(emitter) {
-		emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
-	} else {
-		emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
-	}
-	return true
-}
-
-// Expect MAPPING-START.
-func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
-	if !yaml_emitter_process_anchor(emitter) {
-		return false
-	}
-	if !yaml_emitter_process_tag(emitter) {
-		return false
-	}
-	if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
-		yaml_emitter_check_empty_mapping(emitter) {
-		emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
-	} else {
-		emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
-	}
-	return true
-}
-
-// Check if the document content is an empty scalar.
-func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
-	return false // [Go] Huh?
-}
-
-// Check if the next events represent an empty sequence.
-func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
-	if len(emitter.events)-emitter.events_head < 2 {
-		return false
-	}
-	return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
-		emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
-}
-
-// Check if the next events represent an empty mapping.
-func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
-	if len(emitter.events)-emitter.events_head < 2 {
-		return false
-	}
-	return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
-		emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
-}
-
-// Check if the next node can be expressed as a simple key.
-func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
-	length := 0
-	switch emitter.events[emitter.events_head].typ {
-	case yaml_ALIAS_EVENT:
-		length += len(emitter.anchor_data.anchor)
-	case yaml_SCALAR_EVENT:
-		if emitter.scalar_data.multiline {
-			return false
-		}
-		length += len(emitter.anchor_data.anchor) +
-			len(emitter.tag_data.handle) +
-			len(emitter.tag_data.suffix) +
-			len(emitter.scalar_data.value)
-	case yaml_SEQUENCE_START_EVENT:
-		if !yaml_emitter_check_empty_sequence(emitter) {
-			return false
-		}
-		length += len(emitter.anchor_data.anchor) +
-			len(emitter.tag_data.handle) +
-			len(emitter.tag_data.suffix)
-	case yaml_MAPPING_START_EVENT:
-		if !yaml_emitter_check_empty_mapping(emitter) {
-			return false
-		}
-		length += len(emitter.anchor_data.anchor) +
-			len(emitter.tag_data.handle) +
-			len(emitter.tag_data.suffix)
-	default:
-		return false
-	}
-	return length <= 128
-}
-
-// Determine an acceptable scalar style.
-func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
-
-	no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
-	if no_tag && !event.implicit && !event.quoted_implicit {
-		return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
-	}
-
-	style := event.scalar_style()
-	if style == yaml_ANY_SCALAR_STYLE {
-		style = yaml_PLAIN_SCALAR_STYLE
-	}
-	if emitter.canonical {
-		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
-	}
-	if emitter.simple_key_context && emitter.scalar_data.multiline {
-		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
-	}
-
-	if style == yaml_PLAIN_SCALAR_STYLE {
-		if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
-			emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
-			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
-		}
-		if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
-			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
-		}
-		if no_tag && !event.implicit {
-			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
-		}
-	}
-	if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
-		if !emitter.scalar_data.single_quoted_allowed {
-			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
-		}
-	}
-	if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
-		if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
-			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
-		}
-	}
-
-	if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
-		emitter.tag_data.handle = []byte{'!'}
-	}
-	emitter.scalar_data.style = style
-	return true
-}
-
-// Write an anchor.
-func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
-	if emitter.anchor_data.anchor == nil {
-		return true
-	}
-	c := []byte{'&'}
-	if emitter.anchor_data.alias {
-		c[0] = '*'
-	}
-	if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
-		return false
-	}
-	return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
-}
-
-// Write a tag.
-func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
-	if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
-		return true
-	}
-	if len(emitter.tag_data.handle) > 0 {
-		if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
-			return false
-		}
-		if len(emitter.tag_data.suffix) > 0 {
-			if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
-				return false
-			}
-		}
-	} else {
-		// [Go] Allocate these slices elsewhere.
-		if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
-			return false
-		}
-		if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
-			return false
-		}
-		if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
-			return false
-		}
-	}
-	return true
-}
-
-// Write a scalar.
-func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
-	switch emitter.scalar_data.style {
-	case yaml_PLAIN_SCALAR_STYLE:
-		return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
-
-	case yaml_SINGLE_QUOTED_SCALAR_STYLE:
-		return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
-
-	case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
-		return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
-
-	case yaml_LITERAL_SCALAR_STYLE:
-		return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
-
-	case yaml_FOLDED_SCALAR_STYLE:
-		return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
-	}
-	panic("unknown scalar style")
-}
-
-// Write a head comment.
-func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool {
-	if len(emitter.tail_comment) > 0 {
-		if !yaml_emitter_write_indent(emitter) {
-			return false
-		}
-		if !yaml_emitter_write_comment(emitter, emitter.tail_comment) {
-			return false
-		}
-		emitter.tail_comment = emitter.tail_comment[:0]
-		emitter.foot_indent = emitter.indent
-		if emitter.foot_indent < 0 {
-			emitter.foot_indent = 0
-		}
-	}
-
-	if len(emitter.head_comment) == 0 {
-		return true
-	}
-	if !yaml_emitter_write_indent(emitter) {
-		return false
-	}
-	if !yaml_emitter_write_comment(emitter, emitter.head_comment) {
-		return false
-	}
-	emitter.head_comment = emitter.head_comment[:0]
-	return true
-}
-
-// Write an line comment.
-func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool {
-	if len(emitter.line_comment) == 0 {
-		return true
-	}
-	if !emitter.whitespace {
-		if !put(emitter, ' ') {
-			return false
-		}
-	}
-	if !yaml_emitter_write_comment(emitter, emitter.line_comment) {
-		return false
-	}
-	emitter.line_comment = emitter.line_comment[:0]
-	return true
-}
-
-// Write a foot comment.
-func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool {
-	if len(emitter.foot_comment) == 0 {
-		return true
-	}
-	if !yaml_emitter_write_indent(emitter) {
-		return false
-	}
-	if !yaml_emitter_write_comment(emitter, emitter.foot_comment) {
-		return false
-	}
-	emitter.foot_comment = emitter.foot_comment[:0]
-	emitter.foot_indent = emitter.indent
-	if emitter.foot_indent < 0 {
-		emitter.foot_indent = 0
-	}
-	return true
-}
-
-// Check if a %YAML directive is valid.
-func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
-	if version_directive.major != 1 || version_directive.minor != 1 {
-		return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
-	}
-	return true
-}
-
-// Check if a %TAG directive is valid.
-func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
-	handle := tag_directive.handle
-	prefix := tag_directive.prefix
-	if len(handle) == 0 {
-		return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
-	}
-	if handle[0] != '!' {
-		return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
-	}
-	if handle[len(handle)-1] != '!' {
-		return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
-	}
-	for i := 1; i < len(handle)-1; i += width(handle[i]) {
-		if !is_alpha(handle, i) {
-			return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
-		}
-	}
-	if len(prefix) == 0 {
-		return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
-	}
-	return true
-}
-
-// Check if an anchor is valid.
-func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
-	if len(anchor) == 0 {
-		problem := "anchor value must not be empty"
-		if alias {
-			problem = "alias value must not be empty"
-		}
-		return yaml_emitter_set_emitter_error(emitter, problem)
-	}
-	for i := 0; i < len(anchor); i += width(anchor[i]) {
-		if !is_alpha(anchor, i) {
-			problem := "anchor value must contain alphanumerical characters only"
-			if alias {
-				problem = "alias value must contain alphanumerical characters only"
-			}
-			return yaml_emitter_set_emitter_error(emitter, problem)
-		}
-	}
-	emitter.anchor_data.anchor = anchor
-	emitter.anchor_data.alias = alias
-	return true
-}
-
-// Check if a tag is valid.
-func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
-	if len(tag) == 0 {
-		return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
-	}
-	for i := 0; i < len(emitter.tag_directives); i++ {
-		tag_directive := &emitter.tag_directives[i]
-		if bytes.HasPrefix(tag, tag_directive.prefix) {
-			emitter.tag_data.handle = tag_directive.handle
-			emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
-			return true
-		}
-	}
-	emitter.tag_data.suffix = tag
-	return true
-}
-
-// Check if a scalar is valid.
-func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
-	var (
-		block_indicators   = false
-		flow_indicators    = false
-		line_breaks        = false
-		special_characters = false
-		tab_characters     = false
-
-		leading_space  = false
-		leading_break  = false
-		trailing_space = false
-		trailing_break = false
-		break_space    = false
-		space_break    = false
-
-		preceded_by_whitespace = false
-		followed_by_whitespace = false
-		previous_space         = false
-		previous_break         = false
-	)
-
-	emitter.scalar_data.value = value
-
-	if len(value) == 0 {
-		emitter.scalar_data.multiline = false
-		emitter.scalar_data.flow_plain_allowed = false
-		emitter.scalar_data.block_plain_allowed = true
-		emitter.scalar_data.single_quoted_allowed = true
-		emitter.scalar_data.block_allowed = false
-		return true
-	}
-
-	if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
-		block_indicators = true
-		flow_indicators = true
-	}
-
-	preceded_by_whitespace = true
-	for i, w := 0, 0; i < len(value); i += w {
-		w = width(value[i])
-		followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
-
-		if i == 0 {
-			switch value[i] {
-			case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
-				flow_indicators = true
-				block_indicators = true
-			case '?', ':':
-				flow_indicators = true
-				if followed_by_whitespace {
-					block_indicators = true
-				}
-			case '-':
-				if followed_by_whitespace {
-					flow_indicators = true
-					block_indicators = true
-				}
-			}
-		} else {
-			switch value[i] {
-			case ',', '?', '[', ']', '{', '}':
-				flow_indicators = true
-			case ':':
-				flow_indicators = true
-				if followed_by_whitespace {
-					block_indicators = true
-				}
-			case '#':
-				if preceded_by_whitespace {
-					flow_indicators = true
-					block_indicators = true
-				}
-			}
-		}
-
-		if value[i] == '\t' {
-			tab_characters = true
-		} else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
-			special_characters = true
-		}
-		if is_space(value, i) {
-			if i == 0 {
-				leading_space = true
-			}
-			if i+width(value[i]) == len(value) {
-				trailing_space = true
-			}
-			if previous_break {
-				break_space = true
-			}
-			previous_space = true
-			previous_break = false
-		} else if is_break(value, i) {
-			line_breaks = true
-			if i == 0 {
-				leading_break = true
-			}
-			if i+width(value[i]) == len(value) {
-				trailing_break = true
-			}
-			if previous_space {
-				space_break = true
-			}
-			previous_space = false
-			previous_break = true
-		} else {
-			previous_space = false
-			previous_break = false
-		}
-
-		// [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
-		preceded_by_whitespace = is_blankz(value, i)
-	}
-
-	emitter.scalar_data.multiline = line_breaks
-	emitter.scalar_data.flow_plain_allowed = true
-	emitter.scalar_data.block_plain_allowed = true
-	emitter.scalar_data.single_quoted_allowed = true
-	emitter.scalar_data.block_allowed = true
-
-	if leading_space || leading_break || trailing_space || trailing_break {
-		emitter.scalar_data.flow_plain_allowed = false
-		emitter.scalar_data.block_plain_allowed = false
-	}
-	if trailing_space {
-		emitter.scalar_data.block_allowed = false
-	}
-	if break_space {
-		emitter.scalar_data.flow_plain_allowed = false
-		emitter.scalar_data.block_plain_allowed = false
-		emitter.scalar_data.single_quoted_allowed = false
-	}
-	if space_break || tab_characters || special_characters {
-		emitter.scalar_data.flow_plain_allowed = false
-		emitter.scalar_data.block_plain_allowed = false
-		emitter.scalar_data.single_quoted_allowed = false
-	}
-	if space_break || special_characters {
-		emitter.scalar_data.block_allowed = false
-	}
-	if line_breaks {
-		emitter.scalar_data.flow_plain_allowed = false
-		emitter.scalar_data.block_plain_allowed = false
-	}
-	if flow_indicators {
-		emitter.scalar_data.flow_plain_allowed = false
-	}
-	if block_indicators {
-		emitter.scalar_data.block_plain_allowed = false
-	}
-	return true
-}
-
-// Check if the event data is valid.
-func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
-
-	emitter.anchor_data.anchor = nil
-	emitter.tag_data.handle = nil
-	emitter.tag_data.suffix = nil
-	emitter.scalar_data.value = nil
-
-	if len(event.head_comment) > 0 {
-		emitter.head_comment = event.head_comment
-	}
-	if len(event.line_comment) > 0 {
-		emitter.line_comment = event.line_comment
-	}
-	if len(event.foot_comment) > 0 {
-		emitter.foot_comment = event.foot_comment
-	}
-	if len(event.tail_comment) > 0 {
-		emitter.tail_comment = event.tail_comment
-	}
-
-	switch event.typ {
-	case yaml_ALIAS_EVENT:
-		if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
-			return false
-		}
-
-	case yaml_SCALAR_EVENT:
-		if len(event.anchor) > 0 {
-			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
-				return false
-			}
-		}
-		if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
-			if !yaml_emitter_analyze_tag(emitter, event.tag) {
-				return false
-			}
-		}
-		if !yaml_emitter_analyze_scalar(emitter, event.value) {
-			return false
-		}
-
-	case yaml_SEQUENCE_START_EVENT:
-		if len(event.anchor) > 0 {
-			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
-				return false
-			}
-		}
-		if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
-			if !yaml_emitter_analyze_tag(emitter, event.tag) {
-				return false
-			}
-		}
-
-	case yaml_MAPPING_START_EVENT:
-		if len(event.anchor) > 0 {
-			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
-				return false
-			}
-		}
-		if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
-			if !yaml_emitter_analyze_tag(emitter, event.tag) {
-				return false
-			}
-		}
-	}
-	return true
-}
-
-// Write the BOM character.
-func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
-	if !flush(emitter) {
-		return false
-	}
-	pos := emitter.buffer_pos
-	emitter.buffer[pos+0] = '\xEF'
-	emitter.buffer[pos+1] = '\xBB'
-	emitter.buffer[pos+2] = '\xBF'
-	emitter.buffer_pos += 3
-	return true
-}
-
-func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
-	indent := emitter.indent
-	if indent < 0 {
-		indent = 0
-	}
-	if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
-		if !put_break(emitter) {
-			return false
-		}
-	}
-	if emitter.foot_indent == indent {
-		if !put_break(emitter) {
-			return false
-		}
-	}
-	for emitter.column < indent {
-		if !put(emitter, ' ') {
-			return false
-		}
-	}
-	emitter.whitespace = true
-	//emitter.indention = true
-	emitter.space_above = false
-	emitter.foot_indent = -1
-	return true
-}
-
-func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
-	if need_whitespace && !emitter.whitespace {
-		if !put(emitter, ' ') {
-			return false
-		}
-	}
-	if !write_all(emitter, indicator) {
-		return false
-	}
-	emitter.whitespace = is_whitespace
-	emitter.indention = (emitter.indention && is_indention)
-	emitter.open_ended = false
-	return true
-}
-
-func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
-	if !write_all(emitter, value) {
-		return false
-	}
-	emitter.whitespace = false
-	emitter.indention = false
-	return true
-}
-
-func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
-	if !emitter.whitespace {
-		if !put(emitter, ' ') {
-			return false
-		}
-	}
-	if !write_all(emitter, value) {
-		return false
-	}
-	emitter.whitespace = false
-	emitter.indention = false
-	return true
-}
-
-func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
-	if need_whitespace && !emitter.whitespace {
-		if !put(emitter, ' ') {
-			return false
-		}
-	}
-	for i := 0; i < len(value); {
-		var must_write bool
-		switch value[i] {
-		case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
-			must_write = true
-		default:
-			must_write = is_alpha(value, i)
-		}
-		if must_write {
-			if !write(emitter, value, &i) {
-				return false
-			}
-		} else {
-			w := width(value[i])
-			for k := 0; k < w; k++ {
-				octet := value[i]
-				i++
-				if !put(emitter, '%') {
-					return false
-				}
-
-				c := octet >> 4
-				if c < 10 {
-					c += '0'
-				} else {
-					c += 'A' - 10
-				}
-				if !put(emitter, c) {
-					return false
-				}
-
-				c = octet & 0x0f
-				if c < 10 {
-					c += '0'
-				} else {
-					c += 'A' - 10
-				}
-				if !put(emitter, c) {
-					return false
-				}
-			}
-		}
-	}
-	emitter.whitespace = false
-	emitter.indention = false
-	return true
-}
-
-func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
-	if len(value) > 0 && !emitter.whitespace {
-		if !put(emitter, ' ') {
-			return false
-		}
-	}
-
-	spaces := false
-	breaks := false
-	for i := 0; i < len(value); {
-		if is_space(value, i) {
-			if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
-				if !yaml_emitter_write_indent(emitter) {
-					return false
-				}
-				i += width(value[i])
-			} else {
-				if !write(emitter, value, &i) {
-					return false
-				}
-			}
-			spaces = true
-		} else if is_break(value, i) {
-			if !breaks && value[i] == '\n' {
-				if !put_break(emitter) {
-					return false
-				}
-			}
-			if !write_break(emitter, value, &i) {
-				return false
-			}
-			//emitter.indention = true
-			breaks = true
-		} else {
-			if breaks {
-				if !yaml_emitter_write_indent(emitter) {
-					return false
-				}
-			}
-			if !write(emitter, value, &i) {
-				return false
-			}
-			emitter.indention = false
-			spaces = false
-			breaks = false
-		}
-	}
-
-	if len(value) > 0 {
-		emitter.whitespace = false
-	}
-	emitter.indention = false
-	if emitter.root_context {
-		emitter.open_ended = true
-	}
-
-	return true
-}
-
-func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
-
-	if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
-		return false
-	}
-
-	spaces := false
-	breaks := false
-	for i := 0; i < len(value); {
-		if is_space(value, i) {
-			if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
-				if !yaml_emitter_write_indent(emitter) {
-					return false
-				}
-				i += width(value[i])
-			} else {
-				if !write(emitter, value, &i) {
-					return false
-				}
-			}
-			spaces = true
-		} else if is_break(value, i) {
-			if !breaks && value[i] == '\n' {
-				if !put_break(emitter) {
-					return false
-				}
-			}
-			if !write_break(emitter, value, &i) {
-				return false
-			}
-			//emitter.indention = true
-			breaks = true
-		} else {
-			if breaks {
-				if !yaml_emitter_write_indent(emitter) {
-					return false
-				}
-			}
-			if value[i] == '\'' {
-				if !put(emitter, '\'') {
-					return false
-				}
-			}
-			if !write(emitter, value, &i) {
-				return false
-			}
-			emitter.indention = false
-			spaces = false
-			breaks = false
-		}
-	}
-	if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
-		return false
-	}
-	emitter.whitespace = false
-	emitter.indention = false
-	return true
-}
-
-func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
-	spaces := false
-	if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
-		return false
-	}
-
-	for i := 0; i < len(value); {
-		if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
-			is_bom(value, i) || is_break(value, i) ||
-			value[i] == '"' || value[i] == '\\' {
-
-			octet := value[i]
-
-			var w int
-			var v rune
-			switch {
-			case octet&0x80 == 0x00:
-				w, v = 1, rune(octet&0x7F)
-			case octet&0xE0 == 0xC0:
-				w, v = 2, rune(octet&0x1F)
-			case octet&0xF0 == 0xE0:
-				w, v = 3, rune(octet&0x0F)
-			case octet&0xF8 == 0xF0:
-				w, v = 4, rune(octet&0x07)
-			}
-			for k := 1; k < w; k++ {
-				octet = value[i+k]
-				v = (v << 6) + (rune(octet) & 0x3F)
-			}
-			i += w
-
-			if !put(emitter, '\\') {
-				return false
-			}
-
-			var ok bool
-			switch v {
-			case 0x00:
-				ok = put(emitter, '0')
-			case 0x07:
-				ok = put(emitter, 'a')
-			case 0x08:
-				ok = put(emitter, 'b')
-			case 0x09:
-				ok = put(emitter, 't')
-			case 0x0A:
-				ok = put(emitter, 'n')
-			case 0x0b:
-				ok = put(emitter, 'v')
-			case 0x0c:
-				ok = put(emitter, 'f')
-			case 0x0d:
-				ok = put(emitter, 'r')
-			case 0x1b:
-				ok = put(emitter, 'e')
-			case 0x22:
-				ok = put(emitter, '"')
-			case 0x5c:
-				ok = put(emitter, '\\')
-			case 0x85:
-				ok = put(emitter, 'N')
-			case 0xA0:
-				ok = put(emitter, '_')
-			case 0x2028:
-				ok = put(emitter, 'L')
-			case 0x2029:
-				ok = put(emitter, 'P')
-			default:
-				if v <= 0xFF {
-					ok = put(emitter, 'x')
-					w = 2
-				} else if v <= 0xFFFF {
-					ok = put(emitter, 'u')
-					w = 4
-				} else {
-					ok = put(emitter, 'U')
-					w = 8
-				}
-				for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
-					digit := byte((v >> uint(k)) & 0x0F)
-					if digit < 10 {
-						ok = put(emitter, digit+'0')
-					} else {
-						ok = put(emitter, digit+'A'-10)
-					}
-				}
-			}
-			if !ok {
-				return false
-			}
-			spaces = false
-		} else if is_space(value, i) {
-			if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
-				if !yaml_emitter_write_indent(emitter) {
-					return false
-				}
-				if is_space(value, i+1) {
-					if !put(emitter, '\\') {
-						return false
-					}
-				}
-				i += width(value[i])
-			} else if !write(emitter, value, &i) {
-				return false
-			}
-			spaces = true
-		} else {
-			if !write(emitter, value, &i) {
-				return false
-			}
-			spaces = false
-		}
-	}
-	if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
-		return false
-	}
-	emitter.whitespace = false
-	emitter.indention = false
-	return true
-}
-
-func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
-	if is_space(value, 0) || is_break(value, 0) {
-		indent_hint := []byte{'0' + byte(emitter.best_indent)}
-		if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
-			return false
-		}
-	}
-
-	emitter.open_ended = false
-
-	var chomp_hint [1]byte
-	if len(value) == 0 {
-		chomp_hint[0] = '-'
-	} else {
-		i := len(value) - 1
-		for value[i]&0xC0 == 0x80 {
-			i--
-		}
-		if !is_break(value, i) {
-			chomp_hint[0] = '-'
-		} else if i == 0 {
-			chomp_hint[0] = '+'
-			emitter.open_ended = true
-		} else {
-			i--
-			for value[i]&0xC0 == 0x80 {
-				i--
-			}
-			if is_break(value, i) {
-				chomp_hint[0] = '+'
-				emitter.open_ended = true
-			}
-		}
-	}
-	if chomp_hint[0] != 0 {
-		if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
-			return false
-		}
-	}
-	return true
-}
-
-func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
-	if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
-		return false
-	}
-	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
-		return false
-	}
-	if !yaml_emitter_process_line_comment(emitter) {
-		return false
-	}
-	//emitter.indention = true
-	emitter.whitespace = true
-	breaks := true
-	for i := 0; i < len(value); {
-		if is_break(value, i) {
-			if !write_break(emitter, value, &i) {
-				return false
-			}
-			//emitter.indention = true
-			breaks = true
-		} else {
-			if breaks {
-				if !yaml_emitter_write_indent(emitter) {
-					return false
-				}
-			}
-			if !write(emitter, value, &i) {
-				return false
-			}
-			emitter.indention = false
-			breaks = false
-		}
-	}
-
-	return true
-}
-
-func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
-	if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
-		return false
-	}
-	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
-		return false
-	}
-	if !yaml_emitter_process_line_comment(emitter) {
-		return false
-	}
-
-	//emitter.indention = true
-	emitter.whitespace = true
-
-	breaks := true
-	leading_spaces := true
-	for i := 0; i < len(value); {
-		if is_break(value, i) {
-			if !breaks && !leading_spaces && value[i] == '\n' {
-				k := 0
-				for is_break(value, k) {
-					k += width(value[k])
-				}
-				if !is_blankz(value, k) {
-					if !put_break(emitter) {
-						return false
-					}
-				}
-			}
-			if !write_break(emitter, value, &i) {
-				return false
-			}
-			//emitter.indention = true
-			breaks = true
-		} else {
-			if breaks {
-				if !yaml_emitter_write_indent(emitter) {
-					return false
-				}
-				leading_spaces = is_blank(value, i)
-			}
-			if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
-				if !yaml_emitter_write_indent(emitter) {
-					return false
-				}
-				i += width(value[i])
-			} else {
-				if !write(emitter, value, &i) {
-					return false
-				}
-			}
-			emitter.indention = false
-			breaks = false
-		}
-	}
-	return true
-}
-
-func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool {
-	breaks := false
-	pound := false
-	for i := 0; i < len(comment); {
-		if is_break(comment, i) {
-			if !write_break(emitter, comment, &i) {
-				return false
-			}
-			//emitter.indention = true
-			breaks = true
-			pound = false
-		} else {
-			if breaks && !yaml_emitter_write_indent(emitter) {
-				return false
-			}
-			if !pound {
-				if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) {
-					return false
-				}
-				pound = true
-			}
-			if !write(emitter, comment, &i) {
-				return false
-			}
-			emitter.indention = false
-			breaks = false
-		}
-	}
-	if !breaks && !put_break(emitter) {
-		return false
-	}
-
-	emitter.whitespace = true
-	//emitter.indention = true
-	return true
-}
diff --git a/application/source/vendor/gopkg.in/yaml.v3/encode.go b/application/source/vendor/gopkg.in/yaml.v3/encode.go
deleted file mode 100644
index de9e72a3e638d166e96ceab3d77ce59afe6e6f8a..0000000000000000000000000000000000000000
--- a/application/source/vendor/gopkg.in/yaml.v3/encode.go
+++ /dev/null
@@ -1,577 +0,0 @@
-//
-// Copyright (c) 2011-2019 Canonical Ltd
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package yaml
-
-import (
-	"encoding"
-	"fmt"
-	"io"
-	"reflect"
-	"regexp"
-	"sort"
-	"strconv"
-	"strings"
-	"time"
-	"unicode/utf8"
-)
-
-type encoder struct {
-	emitter  yaml_emitter_t
-	event    yaml_event_t
-	out      []byte
-	flow     bool
-	indent   int
-	doneInit bool
-}
-
-func newEncoder() *encoder {
-	e := &encoder{}
-	yaml_emitter_initialize(&e.emitter)
-	yaml_emitter_set_output_string(&e.emitter, &e.out)
-	yaml_emitter_set_unicode(&e.emitter, true)
-	return e
-}
-
-func newEncoderWithWriter(w io.Writer) *encoder {
-	e := &encoder{}
-	yaml_emitter_initialize(&e.emitter)
-	yaml_emitter_set_output_writer(&e.emitter, w)
-	yaml_emitter_set_unicode(&e.emitter, true)
-	return e
-}
-
-func (e *encoder) init() {
-	if e.doneInit {
-		return
-	}
-	if e.indent == 0 {
-		e.indent = 4
-	}
-	e.emitter.best_indent = e.indent
-	yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
-	e.emit()
-	e.doneInit = true
-}
-
-func (e *encoder) finish() {
-	e.emitter.open_ended = false
-	yaml_stream_end_event_initialize(&e.event)
-	e.emit()
-}
-
-func (e *encoder) destroy() {
-	yaml_emitter_delete(&e.emitter)
-}
-
-func (e *encoder) emit() {
-	// This will internally delete the e.event value.
-	e.must(yaml_emitter_emit(&e.emitter, &e.event))
-}
-
-func (e *encoder) must(ok bool) {
-	if !ok {
-		msg := e.emitter.problem
-		if msg == "" {
-			msg = "unknown problem generating YAML content"
-		}
-		failf("%s", msg)
-	}
-}
-
-func (e *encoder) marshalDoc(tag string, in reflect.Value) {
-	e.init()
-	var node *Node
-	if in.IsValid() {
-		node, _ = in.Interface().(*Node)
-	}
-	if node != nil && node.Kind == DocumentNode {
-		e.nodev(in)
-	} else {
-		yaml_document_start_event_initialize(&e.event, nil, nil, true)
-		e.emit()
-		e.marshal(tag, in)
-		yaml_document_end_event_initialize(&e.event, true)
-		e.emit()
-	}
-}
-
-func (e *encoder) marshal(tag string, in reflect.Value) {
-	tag = shortTag(tag)
-	if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
-		e.nilv()
-		return
-	}
-	iface := in.Interface()
-	switch value := iface.(type) {
-	case *Node:
-		e.nodev(in)
-		return
-	case Node:
-		if !in.CanAddr() {
-			var n = reflect.New(in.Type()).Elem()
-			n.Set(in)
-			in = n
-		}
-		e.nodev(in.Addr())
-		return
-	case time.Time:
-		e.timev(tag, in)
-		return
-	case *time.Time:
-		e.timev(tag, in.Elem())
-		return
-	case time.Duration:
-		e.stringv(tag, reflect.ValueOf(value.String()))
-		return
-	case Marshaler:
-		v, err := value.MarshalYAML()
-		if err != nil {
-			fail(err)
-		}
-		if v == nil {
-			e.nilv()
-			return
-		}
-		e.marshal(tag, reflect.ValueOf(v))
-		return
-	case encoding.TextMarshaler:
-		text, err := value.MarshalText()
-		if err != nil {
-			fail(err)
-		}
-		in = reflect.ValueOf(string(text))
-	case nil:
-		e.nilv()
-		return
-	}
-	switch in.Kind() {
-	case reflect.Interface:
-		e.marshal(tag, in.Elem())
-	case reflect.Map:
-		e.mapv(tag, in)
-	case reflect.Ptr:
-		e.marshal(tag, in.Elem())
-	case reflect.Struct:
-		e.structv(tag, in)
-	case reflect.Slice, reflect.Array:
-		e.slicev(tag, in)
-	case reflect.String:
-		e.stringv(tag, in)
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		e.intv(tag, in)
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		e.uintv(tag, in)
-	case reflect.Float32, reflect.Float64:
-		e.floatv(tag, in)
-	case reflect.Bool:
-		e.boolv(tag, in)
-	default:
-		panic("cannot marshal type: " + in.Type().String())
-	}
-}
-
-func (e *encoder) mapv(tag string, in reflect.Value) {
-	e.mappingv(tag, func() {
-		keys := keyList(in.MapKeys())
-		sort.Sort(keys)
-		for _, k := range keys {
-			e.marshal("", k)
-			e.marshal("", in.MapIndex(k))
-		}
-	})
-}
-
-func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) {
-	for _, num := range index {
-		for {
-			if v.Kind() == reflect.Ptr {
-				if v.IsNil() {
-					return reflect.Value{}
-				}
-				v = v.Elem()
-				continue
-			}
-			break
-		}
-		v = v.Field(num)
-	}
-	return v
-}
-
-func (e *encoder) structv(tag string, in reflect.Value) {
-	sinfo, err := getStructInfo(in.Type())
-	if err != nil {
-		panic(err)
-	}
-	e.mappingv(tag, func() {
-		for _, info := range sinfo.FieldsList {
-			var value reflect.Value
-			if info.Inline == nil {
-				value = in.Field(info.Num)
-			} else {
-				value = e.fieldByIndex(in, info.Inline)
-				if !value.IsValid() {
-					continue
-				}
-			}
-			if info.OmitEmpty && isZero(value) {
-				continue
-			}
-			e.marshal("", reflect.ValueOf(info.Key))
-			e.flow = info.Flow
-			e.marshal("", value)
-		}
-		if sinfo.InlineMap >= 0 {
-			m := in.Field(sinfo.InlineMap)
-			if m.Len() > 0 {
-				e.flow = false
-				keys := keyList(m.MapKeys())
-				sort.Sort(keys)
-				for _, k := range keys {
-					if _, found := sinfo.FieldsMap[k.String()]; found {
-						panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String()))
-					}
-					e.marshal("", k)
-					e.flow = false
-					e.marshal("", m.MapIndex(k))
-				}
-			}
-		}
-	})
-}
-
-func (e *encoder) mappingv(tag string, f func()) {
-	implicit := tag == ""
-	style := yaml_BLOCK_MAPPING_STYLE
-	if e.flow {
-		e.flow = false
-		style = yaml_FLOW_MAPPING_STYLE
-	}
-	yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
-	e.emit()
-	f()
-	yaml_mapping_end_event_initialize(&e.event)
-	e.emit()
-}
-
-func (e *encoder) slicev(tag string, in reflect.Value) {
-	implicit := tag == ""
-	style := yaml_BLOCK_SEQUENCE_STYLE
-	if e.flow {
-		e.flow = false
-		style = yaml_FLOW_SEQUENCE_STYLE
-	}
-	e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
-	e.emit()
-	n := in.Len()
-	for i := 0; i < n; i++ {
-		e.marshal("", in.Index(i))
-	}
-	e.must(yaml_sequence_end_event_initialize(&e.event))
-	e.emit()
-}
-
-// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
-//
-// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
-// in YAML 1.2 and by this package, but these should be marshalled quoted for
-// the time being for compatibility with other parsers.
-func isBase60Float(s string) (result bool) {
-	// Fast path.
-	if s == "" {
-		return false
-	}
-	c := s[0]
-	if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
-		return false
-	}
-	// Do the full match.
-	return base60float.MatchString(s)
-}
-
-// From http://yaml.org/type/float.html, except the regular expression there
-// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
-var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
-
-// isOldBool returns whether s is bool notation as defined in YAML 1.1.
-//
-// We continue to force strings that YAML 1.1 would interpret as booleans to be
-// rendered as quotes strings so that the marshalled output valid for YAML 1.1
-// parsing.
-func isOldBool(s string) (result bool) {
-	switch s {
-	case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON",
-		"n", "N", "no", "No", "NO", "off", "Off", "OFF":
-		return true
-	default:
-		return false
-	}
-}
-
-func (e *encoder) stringv(tag string, in reflect.Value) {
-	var style yaml_scalar_style_t
-	s := in.String()
-	canUsePlain := true
-	switch {
-	case !utf8.ValidString(s):
-		if tag == binaryTag {
-			failf("explicitly tagged !!binary data must be base64-encoded")
-		}
-		if tag != "" {
-			failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
-		}
-		// It can't be encoded directly as YAML so use a binary tag
-		// and encode it as base64.
-		tag = binaryTag
-		s = encodeBase64(s)
-	case tag == "":
-		// Check to see if it would resolve to a specific
-		// tag when encoded unquoted. If it doesn't,
-		// there's no need to quote it.
-		rtag, _ := resolve("", s)
-		canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s))
-	}
-	// Note: it's possible for user code to emit invalid YAML
-	// if they explicitly specify a tag and a string containing
-	// text that's incompatible with that tag.
-	switch {
-	case strings.Contains(s, "\n"):
-		if e.flow {
-			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
-		} else {
-			style = yaml_LITERAL_SCALAR_STYLE
-		}
-	case canUsePlain:
-		style = yaml_PLAIN_SCALAR_STYLE
-	default:
-		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
-	}
-	e.emitScalar(s, "", tag, style, nil, nil, nil, nil)
-}
-
-func (e *encoder) boolv(tag string, in reflect.Value) {
-	var s string
-	if in.Bool() {
-		s = "true"
-	} else {
-		s = "false"
-	}
-	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
-}
-
-func (e *encoder) intv(tag string, in reflect.Value) {
-	s := strconv.FormatInt(in.Int(), 10)
-	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
-}
-
-func (e *encoder) uintv(tag string, in reflect.Value) {
-	s := strconv.FormatUint(in.Uint(), 10)
-	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
-}
-
-func (e *encoder) timev(tag string, in reflect.Value) {
-	t := in.Interface().(time.Time)
-	s := t.Format(time.RFC3339Nano)
-	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
-}
-
-func (e *encoder) floatv(tag string, in reflect.Value) {
-	// Issue #352: When formatting, use the precision of the underlying value
-	precision := 64
-	if in.Kind() == reflect.Float32 {
-		precision = 32
-	}
-
-	s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
-	switch s {
-	case "+Inf":
-		s = ".inf"
-	case "-Inf":
-		s = "-.inf"
-	case "NaN":
-		s = ".nan"
-	}
-	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
-}
-
-func (e *encoder) nilv() {
-	e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
-}
-
-func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) {
-	// TODO Kill this function. Replace all initialize calls by their underlining Go literals.
-	implicit := tag == ""
-	if !implicit {
-		tag = longTag(tag)
-	}
-	e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
-	e.event.head_comment = head
-	e.event.line_comment = line
-	e.event.foot_comment = foot
-	e.event.tail_comment = tail
-	e.emit()
-}
-
-func (e *encoder) nodev(in reflect.Value) {
-	e.node(in.Interface().(*Node), "")
-}
-
-func (e *encoder) node(node *Node, tail string) {
-	// Zero nodes behave as nil.
-	if node.Kind == 0 && node.IsZero() {
-		e.nilv()
-		return
-	}
-
-	// If the tag was not explicitly requested, and dropping it won't change the
-	// implicit tag of the value, don't include it in the presentation.
-	var tag = node.Tag
-	var stag = shortTag(tag)
-	var forceQuoting bool
-	if tag != "" && node.Style&TaggedStyle == 0 {
-		if node.Kind == ScalarNode {
-			if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 {
-				tag = ""
-			} else {
-				rtag, _ := resolve("", node.Value)
-				if rtag == stag {
-					tag = ""
-				} else if stag == strTag {
-					tag = ""
-					forceQuoting = true
-				}
-			}
-		} else {
-			var rtag string
-			switch node.Kind {
-			case MappingNode:
-				rtag = mapTag
-			case SequenceNode:
-				rtag = seqTag
-			}
-			if rtag == stag {
-				tag = ""
-			}
-		}
-	}
-
-	switch node.Kind {
-	case DocumentNode:
-		yaml_document_start_event_initialize(&e.event, nil, nil, true)
-		e.event.head_comment = []byte(node.HeadComment)
-		e.emit()
-		for _, node := range node.Content {
-			e.node(node, "")
-		}
-		yaml_document_end_event_initialize(&e.event, true)
-		e.event.foot_comment = []byte(node.FootComment)
-		e.emit()
-
-	case SequenceNode:
-		style := yaml_BLOCK_SEQUENCE_STYLE
-		if node.Style&FlowStyle != 0 {
-			style = yaml_FLOW_SEQUENCE_STYLE
-		}
-		e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style))
-		e.event.head_comment = []byte(node.HeadComment)
-		e.emit()
-		for _, node := range node.Content {
-			e.node(node, "")
-		}
-		e.must(yaml_sequence_end_event_initialize(&e.event))
-		e.event.line_comment = []byte(node.LineComment)
-		e.event.foot_comment = []byte(node.FootComment)
-		e.emit()
-
-	case MappingNode:
-		style := yaml_BLOCK_MAPPING_STYLE
-		if node.Style&FlowStyle != 0 {
-			style = yaml_FLOW_MAPPING_STYLE
-		}
-		yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)
-		e.event.tail_comment = []byte(tail)
-		e.event.head_comment = []byte(node.HeadComment)
-		e.emit()
-
-		// The tail logic below moves the foot comment of prior keys to the following key,
-		// since the value for each key may be a nested structure and the foot needs to be
-		// processed only the entirety of the value is streamed. The last tail is processed
-		// with the mapping end event.
-		var tail string
-		for i := 0; i+1 < len(node.Content); i += 2 {
-			k := node.Content[i]
-			foot := k.FootComment
-			if foot != "" {
-				kopy := *k
-				kopy.FootComment = ""
-				k = &kopy
-			}
-			e.node(k, tail)
-			tail = foot
-
-			v := node.Content[i+1]
-			e.node(v, "")
-		}
-
-		yaml_mapping_end_event_initialize(&e.event)
-		e.event.tail_comment = []byte(tail)
-		e.event.line_comment = []byte(node.LineComment)
-		e.event.foot_comment = []byte(node.FootComment)
-		e.emit()
-
-	case AliasNode:
-		yaml_alias_event_initialize(&e.event, []byte(node.Value))
-		e.event.head_comment = []byte(node.HeadComment)
-		e.event.line_comment = []byte(node.LineComment)
-		e.event.foot_comment = []byte(node.FootComment)
-		e.emit()
-
-	case ScalarNode:
-		value := node.Value
-		if !utf8.ValidString(value) {
-			if stag == binaryTag {
-				failf("explicitly tagged !!binary data must be base64-encoded")
-			}
-			if stag != "" {
-				failf("cannot marshal invalid UTF-8 data as %s", stag)
-			}
-			// It can't be encoded directly as YAML so use a binary tag
-			// and encode it as base64.
-			tag = binaryTag
-			value = encodeBase64(value)
-		}
-
-		style := yaml_PLAIN_SCALAR_STYLE
-		switch {
-		case node.Style&DoubleQuotedStyle != 0:
-			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
-		case node.Style&SingleQuotedStyle != 0:
-			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
-		case node.Style&LiteralStyle != 0:
-			style = yaml_LITERAL_SCALAR_STYLE
-		case node.Style&FoldedStyle != 0:
-			style = yaml_FOLDED_SCALAR_STYLE
-		case strings.Contains(value, "\n"):
-			style = yaml_LITERAL_SCALAR_STYLE
-		case forceQuoting:
-			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
-		}
-
-		e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail))
-	default:
-		failf("cannot encode node with unknown kind %d", node.Kind)
-	}
-}
diff --git a/application/source/vendor/gopkg.in/yaml.v3/parserc.go b/application/source/vendor/gopkg.in/yaml.v3/parserc.go
deleted file mode 100644
index 268558a0d6328a00db835512ca6d72369e2051d0..0000000000000000000000000000000000000000
--- a/application/source/vendor/gopkg.in/yaml.v3/parserc.go
+++ /dev/null
@@ -1,1258 +0,0 @@
-//
-// Copyright (c) 2011-2019 Canonical Ltd
-// Copyright (c) 2006-2010 Kirill Simonov
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-// of the Software, and to permit persons to whom the Software is furnished to do
-// so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package yaml
-
-import (
-	"bytes"
-)
-
-// The parser implements the following grammar:
-//
-// stream               ::= STREAM-START implicit_document? explicit_document* STREAM-END
-// implicit_document    ::= block_node DOCUMENT-END*
-// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-// block_node_or_indentless_sequence    ::=
-//                          ALIAS
-//                          | properties (block_content | indentless_block_sequence)?
-//                          | block_content
-//                          | indentless_block_sequence
-// block_node           ::= ALIAS
-//                          | properties block_content?
-//                          | block_content
-// flow_node            ::= ALIAS
-//                          | properties flow_content?
-//                          | flow_content
-// properties           ::= TAG ANCHOR? | ANCHOR TAG?
-// block_content        ::= block_collection | flow_collection | SCALAR
-// flow_content         ::= flow_collection | SCALAR
-// block_collection     ::= block_sequence | block_mapping
-// flow_collection      ::= flow_sequence | flow_mapping
-// block_sequence       ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
-// block_mapping        ::= BLOCK-MAPPING_START
-//                          ((KEY block_node_or_indentless_sequence?)?
-//                          (VALUE block_node_or_indentless_sequence?)?)*
-//                          BLOCK-END
-// flow_sequence        ::= FLOW-SEQUENCE-START
-//                          (flow_sequence_entry FLOW-ENTRY)*
-//                          flow_sequence_entry?
-//                          FLOW-SEQUENCE-END
-// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// flow_mapping         ::= FLOW-MAPPING-START
-//                          (flow_mapping_entry FLOW-ENTRY)*
-//                          flow_mapping_entry?
-//                          FLOW-MAPPING-END
-// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-
-// Peek the next token in the token queue.
-func peek_token(parser *yaml_parser_t) *yaml_token_t {
-	if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
-		token := &parser.tokens[parser.tokens_head]
-		yaml_parser_unfold_comments(parser, token)
-		return token
-	}
-	return nil
-}
-
-// yaml_parser_unfold_comments walks through the comments queue and joins all
-// comments behind the position of the provided token into the respective
-// top-level comment slices in the parser.
-func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) {
-	for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index {
-		comment := &parser.comments[parser.comments_head]
-		if len(comment.head) > 0 {
-			if token.typ == yaml_BLOCK_END_TOKEN {
-				// No heads on ends, so keep comment.head for a follow up token.
-				break
-			}
-			if len(parser.head_comment) > 0 {
-				parser.head_comment = append(parser.head_comment, '\n')
-			}
-			parser.head_comment = append(parser.head_comment, comment.head...)
-		}
-		if len(comment.foot) > 0 {
-			if len(parser.foot_comment) > 0 {
-				parser.foot_comment = append(parser.foot_comment, '\n')
-			}
-			parser.foot_comment = append(parser.foot_comment, comment.foot...)
-		}
-		if len(comment.line) > 0 {
-			if len(parser.line_comment) > 0 {
-				parser.line_comment = append(parser.line_comment, '\n')
-			}
-			parser.line_comment = append(parser.line_comment, comment.line...)
-		}
-		*comment = yaml_comment_t{}
-		parser.comments_head++
-	}
-}
-
-// Remove the next token from the queue (must be called after peek_token).
-func skip_token(parser *yaml_parser_t) {
-	parser.token_available = false
-	parser.tokens_parsed++
-	parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
-	parser.tokens_head++
-}
-
-// Get the next event.
-func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
-	// Erase the event object.
-	*event = yaml_event_t{}
-
-	// No events after the end of the stream or error.
-	if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
-		return true
-	}
-
-	// Generate the next event.
-	return yaml_parser_state_machine(parser, event)
-}
-
-// Set parser error.
-func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
-	parser.error = yaml_PARSER_ERROR
-	parser.problem = problem
-	parser.problem_mark = problem_mark
-	return false
-}
-
-func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
-	parser.error = yaml_PARSER_ERROR
-	parser.context = context
-	parser.context_mark = context_mark
-	parser.problem = problem
-	parser.problem_mark = problem_mark
-	return false
-}
-
-// State dispatcher.
-func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
-	//trace("yaml_parser_state_machine", "state:", parser.state.String())
-
-	switch parser.state {
-	case yaml_PARSE_STREAM_START_STATE:
-		return yaml_parser_parse_stream_start(parser, event)
-
-	case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
-		return yaml_parser_parse_document_start(parser, event, true)
-
-	case yaml_PARSE_DOCUMENT_START_STATE:
-		return yaml_parser_parse_document_start(parser, event, false)
-
-	case yaml_PARSE_DOCUMENT_CONTENT_STATE:
-		return yaml_parser_parse_document_content(parser, event)
-
-	case yaml_PARSE_DOCUMENT_END_STATE:
-		return yaml_parser_parse_document_end(parser, event)
-
-	case yaml_PARSE_BLOCK_NODE_STATE:
-		return yaml_parser_parse_node(parser, event, true, false)
-
-	case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
-		return yaml_parser_parse_node(parser, event, true, true)
-
-	case yaml_PARSE_FLOW_NODE_STATE:
-		return yaml_parser_parse_node(parser, event, false, false)
-
-	case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
-		return yaml_parser_parse_block_sequence_entry(parser, event, true)
-
-	case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
-		return yaml_parser_parse_block_sequence_entry(parser, event, false)
-
-	case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
-		return yaml_parser_parse_indentless_sequence_entry(parser, event)
-
-	case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
-		return yaml_parser_parse_block_mapping_key(parser, event, true)
-
-	case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
-		return yaml_parser_parse_block_mapping_key(parser, event, false)
-
-	case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
-		return yaml_parser_parse_block_mapping_value(parser, event)
-
-	case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
-		return yaml_parser_parse_flow_sequence_entry(parser, event, true)
-
-	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
-		return yaml_parser_parse_flow_sequence_entry(parser, event, false)
-
-	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
-		return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
-
-	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
-		return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
-
-	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
-		return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
-
-	case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
-		return yaml_parser_parse_flow_mapping_key(parser, event, true)
-
-	case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
-		return yaml_parser_parse_flow_mapping_key(parser, event, false)
-
-	case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
-		return yaml_parser_parse_flow_mapping_value(parser, event, false)
-
-	case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
-		return yaml_parser_parse_flow_mapping_value(parser, event, true)
-
-	default:
-		panic("invalid parser state")
-	}
-}
-
-// Parse the production:
-// stream   ::= STREAM-START implicit_document? explicit_document* STREAM-END
-//              ************
-func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
-	token := peek_token(parser)
-	if token == nil {
-		return false
-	}
-	if token.typ != yaml_STREAM_START_TOKEN {
-		return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
-	}
-	parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
-	*event = yaml_event_t{
-		typ:        yaml_STREAM_START_EVENT,
-		start_mark: token.start_mark,
-		end_mark:   token.end_mark,
-		encoding:   token.encoding,
-	}
-	skip_token(parser)
-	return true
-}
-
-// Parse the productions:
-// implicit_document    ::= block_node DOCUMENT-END*
-//                          *
-// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-//                          *************************
-func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
-
-	token := peek_token(parser)
-	if token == nil {
-		return false
-	}
-
-	// Parse extra document end indicators.
-	if !implicit {
-		for token.typ == yaml_DOCUMENT_END_TOKEN {
-			skip_token(parser)
-			token = peek_token(parser)
-			if token == nil {
-				return false
-			}
-		}
-	}
-
-	if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
-		token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
-		token.typ != yaml_DOCUMENT_START_TOKEN &&
-		token.typ != yaml_STREAM_END_TOKEN {
-		// Parse an implicit document.
-		if !yaml_parser_process_directives(parser, nil, nil) {
-			return false
-		}
-		parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
-		parser.state = yaml_PARSE_BLOCK_NODE_STATE
-
-		var head_comment []byte
-		if len(parser.head_comment) > 0 {
-			// [Go] Scan the header comment backwards, and if an empty line is found, break
-			//      the header so the part before the last empty line goes into the
-			//      document header, while the bottom of it goes into a follow up event.
-			for i := len(parser.head_comment) - 1; i > 0; i-- {
-				if parser.head_comment[i] == '\n' {
-					if i == len(parser.head_comment)-1 {
-						head_comment = parser.head_comment[:i]
-						parser.head_comment = parser.head_comment[i+1:]
-						break
-					} else if parser.head_comment[i-1] == '\n' {
-						head_comment = parser.head_comment[:i-1]
-						parser.head_comment = parser.head_comment[i+1:]
-						break
-					}
-				}
-			}
-		}
-
-		*event = yaml_event_t{
-			typ:        yaml_DOCUMENT_START_EVENT,
-			start_mark: token.start_mark,
-			end_mark:   token.end_mark,
-
-			head_comment: head_comment,
-		}
-
-	} else if token.typ != yaml_STREAM_END_TOKEN {
-		// Parse an explicit document.
-		var version_directive *yaml_version_directive_t
-		var tag_directives []yaml_tag_directive_t
-		start_mark := token.start_mark
-		if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
-			return false
-		}
-		token = peek_token(parser)
-		if token == nil {
-			return false
-		}
-		if token.typ != yaml_DOCUMENT_START_TOKEN {
-			yaml_parser_set_parser_error(parser,
-				"did not find expected <document start>", token.start_mark)
-			return false
-		}
-		parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
-		parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
-		end_mark := token.end_mark
-
-		*event = yaml_event_t{
-			typ:               yaml_DOCUMENT_START_EVENT,
-			start_mark:        start_mark,
-			end_mark:          end_mark,
-			version_directive: version_directive,
-			tag_directives:    tag_directives,
-			implicit:          false,
-		}
-		skip_token(parser)
-
-	} else {
-		// Parse the stream end.
-		parser.state = yaml_PARSE_END_STATE
-		*event = yaml_event_t{
-			typ:        yaml_STREAM_END_EVENT,
-			start_mark: token.start_mark,
-			end_mark:   token.end_mark,
-		}
-		skip_token(parser)
-	}
-
-	return true
-}
-
-// Parse the productions:
-// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-//                                                    ***********
-//
-func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
-	token := peek_token(parser)
-	if token == nil {
-		return false
-	}
-
-	if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
-		token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
-		token.typ == yaml_DOCUMENT_START_TOKEN ||
-		token.typ == yaml_DOCUMENT_END_TOKEN ||
-		token.typ == yaml_STREAM_END_TOKEN {
-		parser.state = parser.states[len(parser.states)-1]
-		parser.states = parser.states[:len(parser.states)-1]
-		return yaml_parser_process_empty_scalar(parser, event,
-			token.start_mark)
-	}
-	return yaml_parser_parse_node(parser, event, true, false)
-}
-
-// Parse the productions:
-// implicit_document    ::= block_node DOCUMENT-END*
-//                                     *************
-// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-//
-func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
-	token := peek_token(parser)
-	if token == nil {
-		return false
-	}
-
-	start_mark := token.start_mark
-	end_mark := token.start_mark
-
-	implicit := true
-	if token.typ == yaml_DOCUMENT_END_TOKEN {
-		end_mark = token.end_mark
-		skip_token(parser)
-		implicit = false
-	}
-
-	parser.tag_directives = parser.tag_directives[:0]
-
-	parser.state = yaml_PARSE_DOCUMENT_START_STATE
-	*event = yaml_event_t{
-		typ:        yaml_DOCUMENT_END_EVENT,
-		start_mark: start_mark,
-		end_mark:   end_mark,
-		implicit:   implicit,
-	}
-	yaml_parser_set_event_comments(parser, event)
-	if len(event.head_comment) > 0 && len(event.foot_comment) == 0 {
-		event.foot_comment = event.head_comment
-		event.head_comment = nil
-	}
-	return true
-}
-
-func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) {
-	event.head_comment = parser.head_comment
-	event.line_comment = parser.line_comment
-	event.foot_comment = parser.foot_comment
-	parser.head_comment = nil
-	parser.line_comment = nil
-	parser.foot_comment = nil
-	parser.tail_comment = nil
-	parser.stem_comment = nil
-}
-
-// Parse the productions:
-// block_node_or_indentless_sequence    ::=
-//                          ALIAS
-//                          *****
-//                          | properties (block_content | indentless_block_sequence)?
-//                            **********  *
-//                          | block_content | indentless_block_sequence
-//                            *
-// block_node           ::= ALIAS
-//                          *****
-//                          | properties block_content?
-//                            ********** *
-//                          | block_content
-//                            *
-// flow_node            ::= ALIAS
-//                          *****
-//                          | properties flow_content?
-//                            ********** *
-//                          | flow_content
-//                            *
-// properties           ::= TAG ANCHOR? | ANCHOR TAG?
-//                          *************************
-// block_content        ::= block_collection | flow_collection | SCALAR
-//                                                               ******
-// flow_content         ::= flow_collection | SCALAR
-//                                            ******
-func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
-	//defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
-
-	token := peek_token(parser)
-	if token == nil {
-		return false
-	}
-
-	if token.typ == yaml_ALIAS_TOKEN {
-		parser.state = parser.states[len(parser.states)-1]
-		parser.states = parser.states[:len(parser.states)-1]
-		*event = yaml_event_t{
-			typ:        yaml_ALIAS_EVENT,
-			start_mark: token.start_mark,
-			end_mark:   token.end_mark,
-			anchor:     token.value,
-		}
-		yaml_parser_set_event_comments(parser, event)
-		skip_token(parser)
-		return true
-	}
-
-	start_mark := token.start_mark
-	end_mark := token.start_mark
-
-	var tag_token bool
-	var tag_handle, tag_suffix, anchor []byte
-	var tag_mark yaml_mark_t
-	if token.typ == yaml_ANCHOR_TOKEN {
-		anchor = token.value
-		start_mark = token.start_mark
-		end_mark = token.end_mark
-		skip_token(parser)
-		token = peek_token(parser)
-		if token == nil {
-			return false
-		}
-		if token.typ == yaml_TAG_TOKEN {
-			tag_token = true
-			tag_handle = token.value
-			tag_suffix = token.suffix
-			tag_mark = token.start_mark
-			end_mark = token.end_mark
-			skip_token(parser)
-			token = peek_token(parser)
-			if token == nil {
-				return false
-			}
-		}
-	} else if token.typ == yaml_TAG_TOKEN {
-		tag_token = true
-		tag_handle = token.value
-		tag_suffix = token.suffix
-		start_mark = token.start_mark
-		tag_mark = token.start_mark
-		end_mark = token.end_mark
-		skip_token(parser)
-		token = peek_token(parser)
-		if token == nil {
-			return false
-		}
-		if token.typ == yaml_ANCHOR_TOKEN {
-			anchor = token.value
-			end_mark = token.end_mark
-			skip_token(parser)
-			token = peek_token(parser)
-			if token == nil {
-				return false
-			}
-		}
-	}
-
-	var tag []byte
-	if tag_token {
-		if len(tag_handle) == 0 {
-			tag = tag_suffix
-			tag_suffix = nil
-		} else {
-			for i := range parser.tag_directives {
-				if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
-					tag = append([]byte(nil), parser.tag_directives[i].prefix...)
-					tag = append(tag, tag_suffix...)
-					break
-				}
-			}
-			if len(tag) == 0 {
-				yaml_parser_set_parser_error_context(parser,
-					"while parsing a node", start_mark,
-					"found undefined tag handle", tag_mark)
-				return false
-			}
-		}
-	}
-
-	implicit := len(tag) == 0
-	if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
-		end_mark = token.end_mark
-		parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
-		*event = yaml_event_t{
-			typ:        yaml_SEQUENCE_START_EVENT,
-			start_mark: start_mark,
-			end_mark:   end_mark,
-			anchor:     anchor,
-			tag:        tag,
-			implicit:   implicit,
-			style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
-		}
-		return true
-	}
-	if token.typ == yaml_SCALAR_TOKEN {
-		var plain_implicit, quoted_implicit bool
-		end_mark = token.end_mark
-		if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
-			plain_implicit = true
-		} else if len(tag) == 0 {
-			quoted_implicit = true
-		}
-		parser.state = parser.states[len(parser.states)-1]
-		parser.states = parser.states[:len(parser.states)-1]
-
-		*event = yaml_event_t{
-			typ:             yaml_SCALAR_EVENT,
-			start_mark:      start_mark,
-			end_mark:        end_mark,
-			anchor:          anchor,
-			tag:             tag,
-			value:           token.value,
-			implicit:        plain_implicit,
-			quoted_implicit: quoted_implicit,
-			style:           yaml_style_t(token.style),
-		}
-		yaml_parser_set_event_comments(parser, event)
-		skip_token(parser)
-		return true
-	}
-	if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
-		// [Go] Some of the events below can be merged as they differ only on style.
-		end_mark = token.end_mark
-		parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
-		*event = yaml_event_t{
-			typ:        yaml_SEQUENCE_START_EVENT,
-			start_mark: start_mark,
-			end_mark:   end_mark,
-			anchor:     anchor,
-			tag:        tag,
-			implicit:   implicit,
-			style:      yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
-		}
-		yaml_parser_set_event_comments(parser, event)
-		return true
-	}
-	if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
-		end_mark = token.end_mark
-		parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
-		*event = yaml_event_t{
-			typ:        yaml_MAPPING_START_EVENT,
-			start_mark: start_mark,
-			end_mark:   end_mark,
-			anchor:     anchor,
-			tag:        tag,
-			implicit:   implicit,
-			style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
-		}
-		yaml_parser_set_event_comments(parser, event)
-		return true
-	}
-	if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
-		end_mark = token.end_mark
-		parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
-		*event = yaml_event_t{
-			typ:        yaml_SEQUENCE_START_EVENT,
-			start_mark: start_mark,
-			end_mark:   end_mark,
-			anchor:     anchor,
-			tag:        tag,
-			implicit:   implicit,
-			style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
-		}
-		if parser.stem_comment != nil {
-			event.head_comment = parser.stem_comment
-			parser.stem_comment = nil
-		}
-		return true
-	}
-	if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
-		end_mark = token.end_mark
-		parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
-		*event = yaml_event_t{
-			typ:        yaml_MAPPING_START_EVENT,
-			start_mark: start_mark,
-			end_mark:   end_mark,
-			anchor:     anchor,
-			tag:        tag,
-			implicit:   implicit,
-			style:      yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
-		}
-		if parser.stem_comment != nil {
-			event.head_comment = parser.stem_comment
-			parser.stem_comment = nil
-		}
-		return true
-	}
-	if len(anchor) > 0 || len(tag) > 0 {
-		parser.state = parser.states[len(parser.states)-1]
-		parser.states = parser.states[:len(parser.states)-1]
-
-		*event = yaml_event_t{
-			typ:             yaml_SCALAR_EVENT,
-			start_mark:      start_mark,
-			end_mark:        end_mark,
-			anchor:          anchor,
-			tag:             tag,
-			implicit:        implicit,
-			quoted_implicit: false,
-			style:           yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
-		}
-		return true
-	}
-
-	context := "while parsing a flow node"
-	if block {
-		context = "while parsing a block node"
-	}
-	yaml_parser_set_parser_error_context(parser, context, start_mark,
-		"did not find expected node content", token.start_mark)
-	return false
-}
-
-// Parse the productions:
-// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-//                    ********************  *********** *             *********
-//
-func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
-	if first {
-		token := peek_token(parser)
-		if token == nil {
-			return false
-		}
-		parser.marks = append(parser.marks, token.start_mark)
-		skip_token(parser)
-	}
-
-	token := peek_token(parser)
-	if token == nil {
-		return false
-	}
-
-	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
-		mark := token.end_mark
-		prior_head_len := len(parser.head_comment)
-		skip_token(parser)
-		yaml_parser_split_stem_comment(parser, prior_head_len)
-		token = peek_token(parser)
-		if token == nil {
-			return false
-		}
-		if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
-			parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
-			return yaml_parser_parse_node(parser, event, true, false)
-		} else {
-			parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
-			return yaml_parser_process_empty_scalar(parser, event, mark)
-		}
-	}
-	if token.typ == yaml_BLOCK_END_TOKEN {
-		parser.state = parser.states[len(parser.states)-1]
-		parser.states = parser.states[:len(parser.states)-1]
-		parser.marks = parser.marks[:len(parser.marks)-1]
-
-		*event = yaml_event_t{
-			typ:        yaml_SEQUENCE_END_EVENT,
-			start_mark: token.start_mark,
-			end_mark:   token.end_mark,
-		}
-
-		skip_token(parser)
-		return true
-	}
-
-	context_mark := parser.marks[len(parser.marks)-1]
-	parser.marks = parser.marks[:len(parser.marks)-1]
-	return yaml_parser_set_parser_error_context(parser,
-		"while parsing a block collection", context_mark,
-		"did not find expected '-' indicator", token.start_mark)
-}
-
-// Parse the productions:
-// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
-//                           *********** *
-func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
-	token := peek_token(parser)
-	if token == nil {
-		return false
-	}
-
-	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
-		mark := token.end_mark
-		prior_head_len := len(parser.head_comment)
-		skip_token(parser)
-		yaml_parser_split_stem_comment(parser, prior_head_len)
-		token = peek_token(parser)
-		if token == nil {
-			return false
-		}
-		if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
-			token.typ != yaml_KEY_TOKEN &&
-			token.typ != yaml_VALUE_TOKEN &&
-			token.typ != yaml_BLOCK_END_TOKEN {
-			parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
-			return yaml_parser_parse_node(parser, event, true, false)
-		}
-		parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
-		return yaml_parser_process_empty_scalar(parser, event, mark)
-	}
-	parser.state = parser.states[len(parser.states)-1]
-	parser.states = parser.states[:len(parser.states)-1]
-
-	*event = yaml_event_t{
-		typ:        yaml_SEQUENCE_END_EVENT,
-		start_mark: token.start_mark,
-		end_mark:   token.start_mark, // [Go] Shouldn't this be token.end_mark?
-	}
-	return true
-}
-
-// Split stem comment from head comment.
-//
-// When a sequence or map is found under a sequence entry, the former head comment
-// is assigned to the underlying sequence or map as a whole, not the individual
-// sequence or map entry as would be expected otherwise. To handle this case the
-// previous head comment is moved aside as the stem comment.
-func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) {
-	if stem_len == 0 {
-		return
-	}
-
-	token := peek_token(parser)
-	if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN {
-		return
-	}
-
-	parser.stem_comment = parser.head_comment[:stem_len]
-	if len(parser.head_comment) == stem_len {
-		parser.head_comment = nil
-	} else {
-		// Copy suffix to prevent very strange bugs if someone ever appends
-		// further bytes to the prefix in the stem_comment slice above.
-		parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...)
-	}
-}
-
-// Parse the productions:
-// block_mapping        ::= BLOCK-MAPPING_START
-//                          *******************
-//                          ((KEY block_node_or_indentless_sequence?)?
-//                            *** *
-//                          (VALUE block_node_or_indentless_sequence?)?)*
-//
-//                          BLOCK-END
-//                          *********
-//
-func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
-	if first {
-		token := peek_token(parser)
-		if token == nil {
-			return false
-		}
-		parser.marks = append(parser.marks, token.start_mark)
-		skip_token(parser)
-	}
-
-	token := peek_token(parser)
-	if token == nil {
-		return false
-	}
-
-	// [Go] A tail comment was left from the prior mapping value processed. Emit an event
-	//      as it needs to be processed with that value and not the following key.
-	if len(parser.tail_comment) > 0 {
-		*event = yaml_event_t{
-			typ:          yaml_TAIL_COMMENT_EVENT,
-			start_mark:   token.start_mark,
-			end_mark:     token.end_mark,
-			foot_comment: parser.tail_comment,
-		}
-		parser.tail_comment = nil
-		return true
-	}
-
-	if token.typ == yaml_KEY_TOKEN {
-		mark := token.end_mark
-		skip_token(parser)
-		token = peek_token(parser)
-		if token == nil {
-			return false
-		}
-		if token.typ != yaml_KEY_TOKEN &&
-			token.typ != yaml_VALUE_TOKEN &&
-			token.typ != yaml_BLOCK_END_TOKEN {
-			parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
-			return yaml_parser_parse_node(parser, event, true, true)
-		} else {
-			parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
-			return yaml_parser_process_empty_scalar(parser, event, mark)
-		}
-	} else if token.typ == yaml_BLOCK_END_TOKEN {
-		parser.state = parser.states[len(parser.states)-1]
-		parser.states = parser.states[:len(parser.states)-1]
-		parser.marks = parser.marks[:len(parser.marks)-1]
-		*event = yaml_event_t{
-			typ:        yaml_MAPPING_END_EVENT,
-			start_mark: token.start_mark,
-			end_mark:   token.end_mark,
-		}
-		yaml_parser_set_event_comments(parser, event)
-		skip_token(parser)
-		return true
-	}
-
-	context_mark := parser.marks[len(parser.marks)-1]
-	parser.marks = parser.marks[:len(parser.marks)-1]
-	return yaml_parser_set_parser_error_context(parser,
-		"while parsing a block mapping", context_mark,
-		"did not find expected key", token.start_mark)
-}
-
-// Parse the productions:
-// block_mapping        ::= BLOCK-MAPPING_START
-//
-//                          ((KEY block_node_or_indentless_sequence?)?
-//
-//                          (VALUE block_node_or_indentless_sequence?)?)*
-//                           ***** *
-//                          BLOCK-END
-//
-//
-func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
-	token := peek_token(parser)
-	if token == nil {
-		return false
-	}
-	if token.typ == yaml_VALUE_TOKEN {
-		mark := token.end_mark
-		skip_token(parser)
-		token = peek_token(parser)
-		if token == nil {
-			return false
-		}
-		if token.typ != yaml_KEY_TOKEN &&
-			token.typ != yaml_VALUE_TOKEN &&
-			token.typ != yaml_BLOCK_END_TOKEN {
-			parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
-			return yaml_parser_parse_node(parser, event, true, true)
-		}
-		parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
-		return yaml_parser_process_empty_scalar(parser, event, mark)
-	}
-	parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
-	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
-}
-
-// Parse the productions:
-// flow_sequence        ::= FLOW-SEQUENCE-START
-//                          *******************
-//                          (flow_sequence_entry FLOW-ENTRY)*
-//                           *                   **********
-//                          flow_sequence_entry?
-//                          *
-//                          FLOW-SEQUENCE-END
-//                          *****************
-// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-//                          *
-//
-func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
-	if first {
-		token := peek_token(parser)
-		if token == nil {
-			return false
-		}
-		parser.marks = append(parser.marks, token.start_mark)
-		skip_token(parser)
-	}
-	token := peek_token(parser)
-	if token == nil {
-		return false
-	}
-	if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
-		if !first {
-			if token.typ == yaml_FLOW_ENTRY_TOKEN {
-				skip_token(parser)
-				token = peek_token(parser)
-				if token == nil {
-					return false
-				}
-			} else {
-				context_mark := parser.marks[len(parser.marks)-1]
-				parser.marks = parser.marks[:len(parser.marks)-1]
-				return yaml_parser_set_parser_error_context(parser,
-					"while parsing a flow sequence", context_mark,
-					"did not find expected ',' or ']'", token.start_mark)
-			}
-		}
-
-		if token.typ == yaml_KEY_TOKEN {
-			parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
-			*event = yaml_event_t{
-				typ:        yaml_MAPPING_START_EVENT,
-				start_mark: token.start_mark,
-				end_mark:   token.end_mark,
-				implicit:   true,
-				style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
-			}
-			skip_token(parser)
-			return true
-		} else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
-			parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
-			return yaml_parser_parse_node(parser, event, false, false)
-		}
-	}
-
-	parser.state = parser.states[len(parser.states)-1]
-	parser.states = parser.states[:len(parser.states)-1]
-	parser.marks = parser.marks[:len(parser.marks)-1]
-
-	*event = yaml_event_t{
-		typ:        yaml_SEQUENCE_END_EVENT,
-		start_mark: token.start_mark,
-		end_mark:   token.end_mark,
-	}
-	yaml_parser_set_event_comments(parser, event)
-
-	skip_token(parser)
-	return true
-}
-
-//
-// Parse the productions:
-// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-//                                      *** *
-//
-func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
-	token := peek_token(parser)
-	if token == nil {
-		return false
-	}
-	if token.typ != yaml_VALUE_TOKEN &&
-		token.typ != yaml_FLOW_ENTRY_TOKEN &&
-		token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
-		parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
-		return yaml_parser_parse_node(parser, event, false, false)
-	}
-	mark := token.end_mark
-	skip_token(parser)
-	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
-	return yaml_parser_process_empty_scalar(parser, event, mark)
-}
-
-// Parse the productions:
-// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-//                                                      ***** *
-//
-func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
-	token := peek_token(parser)
-	if token == nil {
-		return false
-	}
-	if token.typ == yaml_VALUE_TOKEN {
-		skip_token(parser)
-		token := peek_token(parser)
-		if token == nil {
-			return false
-		}
-		if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
-			parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
-			return yaml_parser_parse_node(parser, event, false, false)
-		}
-	}
-	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
-	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
-}
-
-// Parse the productions:
-// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-//                                                                      *
-//
-func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
-	token := peek_token(parser)
-	if token == nil {
-		return false
-	}
-	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
-	*event = yaml_event_t{
-		typ:        yaml_MAPPING_END_EVENT,
-		start_mark: token.start_mark,
-		end_mark:   token.start_mark, // [Go] Shouldn't this be end_mark?
-	}
-	return true
-}
-
-// Parse the productions:
-// flow_mapping         ::= FLOW-MAPPING-START
-//                          ******************
-//                          (flow_mapping_entry FLOW-ENTRY)*
-//                           *                  **********
-//                          flow_mapping_entry?
-//                          ******************
-//                          FLOW-MAPPING-END
-//                          ****************
-// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-//                          *           *** *
-//
-func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
-	if first {
-		token := peek_token(parser)
-		parser.marks = append(parser.marks, token.start_mark)
-		skip_token(parser)
-	}
-
-	token := peek_token(parser)
-	if token == nil {
-		return false
-	}
-
-	if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
-		if !first {
-			if token.typ == yaml_FLOW_ENTRY_TOKEN {
-				skip_token(parser)
-				token = peek_token(parser)
-				if token == nil {
-					return false
-				}
-			} else {
-				context_mark := parser.marks[len(parser.marks)-1]
-				parser.marks = parser.marks[:len(parser.marks)-1]
-				return yaml_parser_set_parser_error_context(parser,
-					"while parsing a flow mapping", context_mark,
-					"did not find expected ',' or '}'", token.start_mark)
-			}
-		}
-
-		if token.typ == yaml_KEY_TOKEN {
-			skip_token(parser)
-			token = peek_token(parser)
-			if token == nil {
-				return false
-			}
-			if token.typ != yaml_VALUE_TOKEN &&
-				token.typ != yaml_FLOW_ENTRY_TOKEN &&
-				token.typ != yaml_FLOW_MAPPING_END_TOKEN {
-				parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
-				return yaml_parser_parse_node(parser, event, false, false)
-			} else {
-				parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
-				return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
-			}
-		} else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
-			parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
-			return yaml_parser_parse_node(parser, event, false, false)
-		}
-	}
-
-	parser.state = parser.states[len(parser.states)-1]
-	parser.states = parser.states[:len(parser.states)-1]
-	parser.marks = parser.marks[:len(parser.marks)-1]
-	*event = yaml_event_t{
-		typ:        yaml_MAPPING_END_EVENT,
-		start_mark: token.start_mark,
-		end_mark:   token.end_mark,
-	}
-	yaml_parser_set_event_comments(parser, event)
-	skip_token(parser)
-	return true
-}
-
-// Parse the productions:
-// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-//                                   *                  ***** *
-//
-func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
-	token := peek_token(parser)
-	if token == nil {
-		return false
-	}
-	if empty {
-		parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
-		return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
-	}
-	if token.typ == yaml_VALUE_TOKEN {
-		skip_token(parser)
-		token = peek_token(parser)
-		if token == nil {
-			return false
-		}
-		if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
-			parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
-			return yaml_parser_parse_node(parser, event, false, false)
-		}
-	}
-	parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
-	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
-}
-
-// Generate an empty scalar event.
-func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
-	*event = yaml_event_t{
-		typ:        yaml_SCALAR_EVENT,
-		start_mark: mark,
-		end_mark:   mark,
-		value:      nil, // Empty
-		implicit:   true,
-		style:      yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
-	}
-	return true
-}
-
-var default_tag_directives = []yaml_tag_directive_t{
-	{[]byte("!"), []byte("!")},
-	{[]byte("!!"), []byte("tag:yaml.org,2002:")},
-}
-
-// Parse directives.
-func yaml_parser_process_directives(parser *yaml_parser_t,
-	version_directive_ref **yaml_version_directive_t,
-	tag_directives_ref *[]yaml_tag_directive_t) bool {
-
-	var version_directive *yaml_version_directive_t
-	var tag_directives []yaml_tag_directive_t
-
-	token := peek_token(parser)
-	if token == nil {
-		return false
-	}
-
-	for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
-		if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
-			if version_directive != nil {
-				yaml_parser_set_parser_error(parser,
-					"found duplicate %YAML directive", token.start_mark)
-				return false
-			}
-			if token.major != 1 || token.minor != 1 {
-				yaml_parser_set_parser_error(parser,
-					"found incompatible YAML document", token.start_mark)
-				return false
-			}
-			version_directive = &yaml_version_directive_t{
-				major: token.major,
-				minor: token.minor,
-			}
-		} else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
-			value := yaml_tag_directive_t{
-				handle: token.value,
-				prefix: token.prefix,
-			}
-			if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
-				return false
-			}
-			tag_directives = append(tag_directives, value)
-		}
-
-		skip_token(parser)
-		token = peek_token(parser)
-		if token == nil {
-			return false
-		}
-	}
-
-	for i := range default_tag_directives {
-		if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
-			return false
-		}
-	}
-
-	if version_directive_ref != nil {
-		*version_directive_ref = version_directive
-	}
-	if tag_directives_ref != nil {
-		*tag_directives_ref = tag_directives
-	}
-	return true
-}
-
-// Append a tag directive to the directives stack.
-func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
-	for i := range parser.tag_directives {
-		if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
-			if allow_duplicates {
-				return true
-			}
-			return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
-		}
-	}
-
-	// [Go] I suspect the copy is unnecessary. This was likely done
-	// because there was no way to track ownership of the data.
-	value_copy := yaml_tag_directive_t{
-		handle: make([]byte, len(value.handle)),
-		prefix: make([]byte, len(value.prefix)),
-	}
-	copy(value_copy.handle, value.handle)
-	copy(value_copy.prefix, value.prefix)
-	parser.tag_directives = append(parser.tag_directives, value_copy)
-	return true
-}
diff --git a/application/source/vendor/gopkg.in/yaml.v3/readerc.go b/application/source/vendor/gopkg.in/yaml.v3/readerc.go
deleted file mode 100644
index b7de0a89c462af605f889bc46ce165e5d4238add..0000000000000000000000000000000000000000
--- a/application/source/vendor/gopkg.in/yaml.v3/readerc.go
+++ /dev/null
@@ -1,434 +0,0 @@
-// 
-// Copyright (c) 2011-2019 Canonical Ltd
-// Copyright (c) 2006-2010 Kirill Simonov
-// 
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-// of the Software, and to permit persons to whom the Software is furnished to do
-// so, subject to the following conditions:
-// 
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-// 
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package yaml
-
-import (
-	"io"
-)
-
-// Set the reader error and return 0.
-func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
-	parser.error = yaml_READER_ERROR
-	parser.problem = problem
-	parser.problem_offset = offset
-	parser.problem_value = value
-	return false
-}
-
-// Byte order marks.
-const (
-	bom_UTF8    = "\xef\xbb\xbf"
-	bom_UTF16LE = "\xff\xfe"
-	bom_UTF16BE = "\xfe\xff"
-)
-
-// Determine the input stream encoding by checking the BOM symbol. If no BOM is
-// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
-func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
-	// Ensure that we had enough bytes in the raw buffer.
-	for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
-		if !yaml_parser_update_raw_buffer(parser) {
-			return false
-		}
-	}
-
-	// Determine the encoding.
-	buf := parser.raw_buffer
-	pos := parser.raw_buffer_pos
-	avail := len(buf) - pos
-	if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
-		parser.encoding = yaml_UTF16LE_ENCODING
-		parser.raw_buffer_pos += 2
-		parser.offset += 2
-	} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
-		parser.encoding = yaml_UTF16BE_ENCODING
-		parser.raw_buffer_pos += 2
-		parser.offset += 2
-	} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
-		parser.encoding = yaml_UTF8_ENCODING
-		parser.raw_buffer_pos += 3
-		parser.offset += 3
-	} else {
-		parser.encoding = yaml_UTF8_ENCODING
-	}
-	return true
-}
-
-// Update the raw buffer.
-func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
-	size_read := 0
-
-	// Return if the raw buffer is full.
-	if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
-		return true
-	}
-
-	// Return on EOF.
-	if parser.eof {
-		return true
-	}
-
-	// Move the remaining bytes in the raw buffer to the beginning.
-	if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
-		copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
-	}
-	parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
-	parser.raw_buffer_pos = 0
-
-	// Call the read handler to fill the buffer.
-	size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
-	parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
-	if err == io.EOF {
-		parser.eof = true
-	} else if err != nil {
-		return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
-	}
-	return true
-}
-
-// Ensure that the buffer contains at least `length` characters.
-// Return true on success, false on failure.
-//
-// The length is supposed to be significantly less that the buffer size.
-func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
-	if parser.read_handler == nil {
-		panic("read handler must be set")
-	}
-
-	// [Go] This function was changed to guarantee the requested length size at EOF.
-	// The fact we need to do this is pretty awful, but the description above implies
-	// for that to be the case, and there are tests
-
-	// If the EOF flag is set and the raw buffer is empty, do nothing.
-	if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
-		// [Go] ACTUALLY! Read the documentation of this function above.
-		// This is just broken. To return true, we need to have the
-		// given length in the buffer. Not doing that means every single
-		// check that calls this function to make sure the buffer has a
-		// given length is Go) panicking; or C) accessing invalid memory.
-		//return true
-	}
-
-	// Return if the buffer contains enough characters.
-	if parser.unread >= length {
-		return true
-	}
-
-	// Determine the input encoding if it is not known yet.
-	if parser.encoding == yaml_ANY_ENCODING {
-		if !yaml_parser_determine_encoding(parser) {
-			return false
-		}
-	}
-
-	// Move the unread characters to the beginning of the buffer.
-	buffer_len := len(parser.buffer)
-	if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
-		copy(parser.buffer, parser.buffer[parser.buffer_pos:])
-		buffer_len -= parser.buffer_pos
-		parser.buffer_pos = 0
-	} else if parser.buffer_pos == buffer_len {
-		buffer_len = 0
-		parser.buffer_pos = 0
-	}
-
-	// Open the whole buffer for writing, and cut it before returning.
-	parser.buffer = parser.buffer[:cap(parser.buffer)]
-
-	// Fill the buffer until it has enough characters.
-	first := true
-	for parser.unread < length {
-
-		// Fill the raw buffer if necessary.
-		if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
-			if !yaml_parser_update_raw_buffer(parser) {
-				parser.buffer = parser.buffer[:buffer_len]
-				return false
-			}
-		}
-		first = false
-
-		// Decode the raw buffer.
-	inner:
-		for parser.raw_buffer_pos != len(parser.raw_buffer) {
-			var value rune
-			var width int
-
-			raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
-
-			// Decode the next character.
-			switch parser.encoding {
-			case yaml_UTF8_ENCODING:
-				// Decode a UTF-8 character.  Check RFC 3629
-				// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
-				//
-				// The following table (taken from the RFC) is used for
-				// decoding.
-				//
-				//    Char. number range |        UTF-8 octet sequence
-				//      (hexadecimal)    |              (binary)
-				//   --------------------+------------------------------------
-				//   0000 0000-0000 007F | 0xxxxxxx
-				//   0000 0080-0000 07FF | 110xxxxx 10xxxxxx
-				//   0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
-				//   0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
-				//
-				// Additionally, the characters in the range 0xD800-0xDFFF
-				// are prohibited as they are reserved for use with UTF-16
-				// surrogate pairs.
-
-				// Determine the length of the UTF-8 sequence.
-				octet := parser.raw_buffer[parser.raw_buffer_pos]
-				switch {
-				case octet&0x80 == 0x00:
-					width = 1
-				case octet&0xE0 == 0xC0:
-					width = 2
-				case octet&0xF0 == 0xE0:
-					width = 3
-				case octet&0xF8 == 0xF0:
-					width = 4
-				default:
-					// The leading octet is invalid.
-					return yaml_parser_set_reader_error(parser,
-						"invalid leading UTF-8 octet",
-						parser.offset, int(octet))
-				}
-
-				// Check if the raw buffer contains an incomplete character.
-				if width > raw_unread {
-					if parser.eof {
-						return yaml_parser_set_reader_error(parser,
-							"incomplete UTF-8 octet sequence",
-							parser.offset, -1)
-					}
-					break inner
-				}
-
-				// Decode the leading octet.
-				switch {
-				case octet&0x80 == 0x00:
-					value = rune(octet & 0x7F)
-				case octet&0xE0 == 0xC0:
-					value = rune(octet & 0x1F)
-				case octet&0xF0 == 0xE0:
-					value = rune(octet & 0x0F)
-				case octet&0xF8 == 0xF0:
-					value = rune(octet & 0x07)
-				default:
-					value = 0
-				}
-
-				// Check and decode the trailing octets.
-				for k := 1; k < width; k++ {
-					octet = parser.raw_buffer[parser.raw_buffer_pos+k]
-
-					// Check if the octet is valid.
-					if (octet & 0xC0) != 0x80 {
-						return yaml_parser_set_reader_error(parser,
-							"invalid trailing UTF-8 octet",
-							parser.offset+k, int(octet))
-					}
-
-					// Decode the octet.
-					value = (value << 6) + rune(octet&0x3F)
-				}
-
-				// Check the length of the sequence against the value.
-				switch {
-				case width == 1:
-				case width == 2 && value >= 0x80:
-				case width == 3 && value >= 0x800:
-				case width == 4 && value >= 0x10000:
-				default:
-					return yaml_parser_set_reader_error(parser,
-						"invalid length of a UTF-8 sequence",
-						parser.offset, -1)
-				}
-
-				// Check the range of the value.
-				if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
-					return yaml_parser_set_reader_error(parser,
-						"invalid Unicode character",
-						parser.offset, int(value))
-				}
-
-			case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
-				var low, high int
-				if parser.encoding == yaml_UTF16LE_ENCODING {
-					low, high = 0, 1
-				} else {
-					low, high = 1, 0
-				}
-
-				// The UTF-16 encoding is not as simple as one might
-				// naively think.  Check RFC 2781
-				// (http://www.ietf.org/rfc/rfc2781.txt).
-				//
-				// Normally, two subsequent bytes describe a Unicode
-				// character.  However a special technique (called a
-				// surrogate pair) is used for specifying character
-				// values larger than 0xFFFF.
-				//
-				// A surrogate pair consists of two pseudo-characters:
-				//      high surrogate area (0xD800-0xDBFF)
-				//      low surrogate area (0xDC00-0xDFFF)
-				//
-				// The following formulas are used for decoding
-				// and encoding characters using surrogate pairs:
-				//
-				//  U  = U' + 0x10000   (0x01 00 00 <= U <= 0x10 FF FF)
-				//  U' = yyyyyyyyyyxxxxxxxxxx   (0 <= U' <= 0x0F FF FF)
-				//  W1 = 110110yyyyyyyyyy
-				//  W2 = 110111xxxxxxxxxx
-				//
-				// where U is the character value, W1 is the high surrogate
-				// area, W2 is the low surrogate area.
-
-				// Check for incomplete UTF-16 character.
-				if raw_unread < 2 {
-					if parser.eof {
-						return yaml_parser_set_reader_error(parser,
-							"incomplete UTF-16 character",
-							parser.offset, -1)
-					}
-					break inner
-				}
-
-				// Get the character.
-				value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
-					(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
-
-				// Check for unexpected low surrogate area.
-				if value&0xFC00 == 0xDC00 {
-					return yaml_parser_set_reader_error(parser,
-						"unexpected low surrogate area",
-						parser.offset, int(value))
-				}
-
-				// Check for a high surrogate area.
-				if value&0xFC00 == 0xD800 {
-					width = 4
-
-					// Check for incomplete surrogate pair.
-					if raw_unread < 4 {
-						if parser.eof {
-							return yaml_parser_set_reader_error(parser,
-								"incomplete UTF-16 surrogate pair",
-								parser.offset, -1)
-						}
-						break inner
-					}
-
-					// Get the next character.
-					value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
-						(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
-
-					// Check for a low surrogate area.
-					if value2&0xFC00 != 0xDC00 {
-						return yaml_parser_set_reader_error(parser,
-							"expected low surrogate area",
-							parser.offset+2, int(value2))
-					}
-
-					// Generate the value of the surrogate pair.
-					value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
-				} else {
-					width = 2
-				}
-
-			default:
-				panic("impossible")
-			}
-
-			// Check if the character is in the allowed range:
-			//      #x9 | #xA | #xD | [#x20-#x7E]               (8 bit)
-			//      | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD]    (16 bit)
-			//      | [#x10000-#x10FFFF]                        (32 bit)
-			switch {
-			case value == 0x09:
-			case value == 0x0A:
-			case value == 0x0D:
-			case value >= 0x20 && value <= 0x7E:
-			case value == 0x85:
-			case value >= 0xA0 && value <= 0xD7FF:
-			case value >= 0xE000 && value <= 0xFFFD:
-			case value >= 0x10000 && value <= 0x10FFFF:
-			default:
-				return yaml_parser_set_reader_error(parser,
-					"control characters are not allowed",
-					parser.offset, int(value))
-			}
-
-			// Move the raw pointers.
-			parser.raw_buffer_pos += width
-			parser.offset += width
-
-			// Finally put the character into the buffer.
-			if value <= 0x7F {
-				// 0000 0000-0000 007F . 0xxxxxxx
-				parser.buffer[buffer_len+0] = byte(value)
-				buffer_len += 1
-			} else if value <= 0x7FF {
-				// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
-				parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
-				parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
-				buffer_len += 2
-			} else if value <= 0xFFFF {
-				// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
-				parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
-				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
-				parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
-				buffer_len += 3
-			} else {
-				// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
-				parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
-				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
-				parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
-				parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
-				buffer_len += 4
-			}
-
-			parser.unread++
-		}
-
-		// On EOF, put NUL into the buffer and return.
-		if parser.eof {
-			parser.buffer[buffer_len] = 0
-			buffer_len++
-			parser.unread++
-			break
-		}
-	}
-	// [Go] Read the documentation of this function above. To return true,
-	// we need to have the given length in the buffer. Not doing that means
-	// every single check that calls this function to make sure the buffer
-	// has a given length is Go) panicking; or C) accessing invalid memory.
-	// This happens here due to the EOF above breaking early.
-	for buffer_len < length {
-		parser.buffer[buffer_len] = 0
-		buffer_len++
-	}
-	parser.buffer = parser.buffer[:buffer_len]
-	return true
-}
diff --git a/application/source/vendor/gopkg.in/yaml.v3/resolve.go b/application/source/vendor/gopkg.in/yaml.v3/resolve.go
deleted file mode 100644
index 64ae888057a5aa24c5a3a6ca0fcb08a06269e3ad..0000000000000000000000000000000000000000
--- a/application/source/vendor/gopkg.in/yaml.v3/resolve.go
+++ /dev/null
@@ -1,326 +0,0 @@
-//
-// Copyright (c) 2011-2019 Canonical Ltd
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package yaml
-
-import (
-	"encoding/base64"
-	"math"
-	"regexp"
-	"strconv"
-	"strings"
-	"time"
-)
-
-type resolveMapItem struct {
-	value interface{}
-	tag   string
-}
-
-var resolveTable = make([]byte, 256)
-var resolveMap = make(map[string]resolveMapItem)
-
-func init() {
-	t := resolveTable
-	t[int('+')] = 'S' // Sign
-	t[int('-')] = 'S'
-	for _, c := range "0123456789" {
-		t[int(c)] = 'D' // Digit
-	}
-	for _, c := range "yYnNtTfFoO~" {
-		t[int(c)] = 'M' // In map
-	}
-	t[int('.')] = '.' // Float (potentially in map)
-
-	var resolveMapList = []struct {
-		v   interface{}
-		tag string
-		l   []string
-	}{
-		{true, boolTag, []string{"true", "True", "TRUE"}},
-		{false, boolTag, []string{"false", "False", "FALSE"}},
-		{nil, nullTag, []string{"", "~", "null", "Null", "NULL"}},
-		{math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}},
-		{math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}},
-		{math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}},
-		{math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}},
-		{"<<", mergeTag, []string{"<<"}},
-	}
-
-	m := resolveMap
-	for _, item := range resolveMapList {
-		for _, s := range item.l {
-			m[s] = resolveMapItem{item.v, item.tag}
-		}
-	}
-}
-
-const (
-	nullTag      = "!!null"
-	boolTag      = "!!bool"
-	strTag       = "!!str"
-	intTag       = "!!int"
-	floatTag     = "!!float"
-	timestampTag = "!!timestamp"
-	seqTag       = "!!seq"
-	mapTag       = "!!map"
-	binaryTag    = "!!binary"
-	mergeTag     = "!!merge"
-)
-
-var longTags = make(map[string]string)
-var shortTags = make(map[string]string)
-
-func init() {
-	for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} {
-		ltag := longTag(stag)
-		longTags[stag] = ltag
-		shortTags[ltag] = stag
-	}
-}
-
-const longTagPrefix = "tag:yaml.org,2002:"
-
-func shortTag(tag string) string {
-	if strings.HasPrefix(tag, longTagPrefix) {
-		if stag, ok := shortTags[tag]; ok {
-			return stag
-		}
-		return "!!" + tag[len(longTagPrefix):]
-	}
-	return tag
-}
-
-func longTag(tag string) string {
-	if strings.HasPrefix(tag, "!!") {
-		if ltag, ok := longTags[tag]; ok {
-			return ltag
-		}
-		return longTagPrefix + tag[2:]
-	}
-	return tag
-}
-
-func resolvableTag(tag string) bool {
-	switch tag {
-	case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag:
-		return true
-	}
-	return false
-}
-
-var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
-
-func resolve(tag string, in string) (rtag string, out interface{}) {
-	tag = shortTag(tag)
-	if !resolvableTag(tag) {
-		return tag, in
-	}
-
-	defer func() {
-		switch tag {
-		case "", rtag, strTag, binaryTag:
-			return
-		case floatTag:
-			if rtag == intTag {
-				switch v := out.(type) {
-				case int64:
-					rtag = floatTag
-					out = float64(v)
-					return
-				case int:
-					rtag = floatTag
-					out = float64(v)
-					return
-				}
-			}
-		}
-		failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
-	}()
-
-	// Any data is accepted as a !!str or !!binary.
-	// Otherwise, the prefix is enough of a hint about what it might be.
-	hint := byte('N')
-	if in != "" {
-		hint = resolveTable[in[0]]
-	}
-	if hint != 0 && tag != strTag && tag != binaryTag {
-		// Handle things we can lookup in a map.
-		if item, ok := resolveMap[in]; ok {
-			return item.tag, item.value
-		}
-
-		// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
-		// are purposefully unsupported here. They're still quoted on
-		// the way out for compatibility with other parser, though.
-
-		switch hint {
-		case 'M':
-			// We've already checked the map above.
-
-		case '.':
-			// Not in the map, so maybe a normal float.
-			floatv, err := strconv.ParseFloat(in, 64)
-			if err == nil {
-				return floatTag, floatv
-			}
-
-		case 'D', 'S':
-			// Int, float, or timestamp.
-			// Only try values as a timestamp if the value is unquoted or there's an explicit
-			// !!timestamp tag.
-			if tag == "" || tag == timestampTag {
-				t, ok := parseTimestamp(in)
-				if ok {
-					return timestampTag, t
-				}
-			}
-
-			plain := strings.Replace(in, "_", "", -1)
-			intv, err := strconv.ParseInt(plain, 0, 64)
-			if err == nil {
-				if intv == int64(int(intv)) {
-					return intTag, int(intv)
-				} else {
-					return intTag, intv
-				}
-			}
-			uintv, err := strconv.ParseUint(plain, 0, 64)
-			if err == nil {
-				return intTag, uintv
-			}
-			if yamlStyleFloat.MatchString(plain) {
-				floatv, err := strconv.ParseFloat(plain, 64)
-				if err == nil {
-					return floatTag, floatv
-				}
-			}
-			if strings.HasPrefix(plain, "0b") {
-				intv, err := strconv.ParseInt(plain[2:], 2, 64)
-				if err == nil {
-					if intv == int64(int(intv)) {
-						return intTag, int(intv)
-					} else {
-						return intTag, intv
-					}
-				}
-				uintv, err := strconv.ParseUint(plain[2:], 2, 64)
-				if err == nil {
-					return intTag, uintv
-				}
-			} else if strings.HasPrefix(plain, "-0b") {
-				intv, err := strconv.ParseInt("-"+plain[3:], 2, 64)
-				if err == nil {
-					if true || intv == int64(int(intv)) {
-						return intTag, int(intv)
-					} else {
-						return intTag, intv
-					}
-				}
-			}
-			// Octals as introduced in version 1.2 of the spec.
-			// Octals from the 1.1 spec, spelled as 0777, are still
-			// decoded by default in v3 as well for compatibility.
-			// May be dropped in v4 depending on how usage evolves.
-			if strings.HasPrefix(plain, "0o") {
-				intv, err := strconv.ParseInt(plain[2:], 8, 64)
-				if err == nil {
-					if intv == int64(int(intv)) {
-						return intTag, int(intv)
-					} else {
-						return intTag, intv
-					}
-				}
-				uintv, err := strconv.ParseUint(plain[2:], 8, 64)
-				if err == nil {
-					return intTag, uintv
-				}
-			} else if strings.HasPrefix(plain, "-0o") {
-				intv, err := strconv.ParseInt("-"+plain[3:], 8, 64)
-				if err == nil {
-					if true || intv == int64(int(intv)) {
-						return intTag, int(intv)
-					} else {
-						return intTag, intv
-					}
-				}
-			}
-		default:
-			panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")")
-		}
-	}
-	return strTag, in
-}
-
-// encodeBase64 encodes s as base64 that is broken up into multiple lines
-// as appropriate for the resulting length.
-func encodeBase64(s string) string {
-	const lineLen = 70
-	encLen := base64.StdEncoding.EncodedLen(len(s))
-	lines := encLen/lineLen + 1
-	buf := make([]byte, encLen*2+lines)
-	in := buf[0:encLen]
-	out := buf[encLen:]
-	base64.StdEncoding.Encode(in, []byte(s))
-	k := 0
-	for i := 0; i < len(in); i += lineLen {
-		j := i + lineLen
-		if j > len(in) {
-			j = len(in)
-		}
-		k += copy(out[k:], in[i:j])
-		if lines > 1 {
-			out[k] = '\n'
-			k++
-		}
-	}
-	return string(out[:k])
-}
-
-// This is a subset of the formats allowed by the regular expression
-// defined at http://yaml.org/type/timestamp.html.
-var allowedTimestampFormats = []string{
-	"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
-	"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
-	"2006-1-2 15:4:5.999999999",       // space separated with no time zone
-	"2006-1-2",                        // date only
-	// Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
-	// from the set of examples.
-}
-
-// parseTimestamp parses s as a timestamp string and
-// returns the timestamp and reports whether it succeeded.
-// Timestamp formats are defined at http://yaml.org/type/timestamp.html
-func parseTimestamp(s string) (time.Time, bool) {
-	// TODO write code to check all the formats supported by
-	// http://yaml.org/type/timestamp.html instead of using time.Parse.
-
-	// Quick check: all date formats start with YYYY-.
-	i := 0
-	for ; i < len(s); i++ {
-		if c := s[i]; c < '0' || c > '9' {
-			break
-		}
-	}
-	if i != 4 || i == len(s) || s[i] != '-' {
-		return time.Time{}, false
-	}
-	for _, format := range allowedTimestampFormats {
-		if t, err := time.Parse(format, s); err == nil {
-			return t, true
-		}
-	}
-	return time.Time{}, false
-}
diff --git a/application/source/vendor/gopkg.in/yaml.v3/scannerc.go b/application/source/vendor/gopkg.in/yaml.v3/scannerc.go
deleted file mode 100644
index ca0070108f4ebe6a09a222075267e0ffca996e72..0000000000000000000000000000000000000000
--- a/application/source/vendor/gopkg.in/yaml.v3/scannerc.go
+++ /dev/null
@@ -1,3038 +0,0 @@
-//
-// Copyright (c) 2011-2019 Canonical Ltd
-// Copyright (c) 2006-2010 Kirill Simonov
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-// of the Software, and to permit persons to whom the Software is furnished to do
-// so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package yaml
-
-import (
-	"bytes"
-	"fmt"
-)
-
-// Introduction
-// ************
-//
-// The following notes assume that you are familiar with the YAML specification
-// (http://yaml.org/spec/1.2/spec.html).  We mostly follow it, although in
-// some cases we are less restrictive that it requires.
-//
-// The process of transforming a YAML stream into a sequence of events is
-// divided on two steps: Scanning and Parsing.
-//
-// The Scanner transforms the input stream into a sequence of tokens, while the
-// parser transform the sequence of tokens produced by the Scanner into a
-// sequence of parsing events.
-//
-// The Scanner is rather clever and complicated. The Parser, on the contrary,
-// is a straightforward implementation of a recursive-descendant parser (or,
-// LL(1) parser, as it is usually called).
-//
-// Actually there are two issues of Scanning that might be called "clever", the
-// rest is quite straightforward.  The issues are "block collection start" and
-// "simple keys".  Both issues are explained below in details.
-//
-// Here the Scanning step is explained and implemented.  We start with the list
-// of all the tokens produced by the Scanner together with short descriptions.
-//
-// Now, tokens:
-//
-//      STREAM-START(encoding)          # The stream start.
-//      STREAM-END                      # The stream end.
-//      VERSION-DIRECTIVE(major,minor)  # The '%YAML' directive.
-//      TAG-DIRECTIVE(handle,prefix)    # The '%TAG' directive.
-//      DOCUMENT-START                  # '---'
-//      DOCUMENT-END                    # '...'
-//      BLOCK-SEQUENCE-START            # Indentation increase denoting a block
-//      BLOCK-MAPPING-START             # sequence or a block mapping.
-//      BLOCK-END                       # Indentation decrease.
-//      FLOW-SEQUENCE-START             # '['
-//      FLOW-SEQUENCE-END               # ']'
-//      BLOCK-SEQUENCE-START            # '{'
-//      BLOCK-SEQUENCE-END              # '}'
-//      BLOCK-ENTRY                     # '-'
-//      FLOW-ENTRY                      # ','
-//      KEY                             # '?' or nothing (simple keys).
-//      VALUE                           # ':'
-//      ALIAS(anchor)                   # '*anchor'
-//      ANCHOR(anchor)                  # '&anchor'
-//      TAG(handle,suffix)              # '!handle!suffix'
-//      SCALAR(value,style)             # A scalar.
-//
-// The following two tokens are "virtual" tokens denoting the beginning and the
-// end of the stream:
-//
-//      STREAM-START(encoding)
-//      STREAM-END
-//
-// We pass the information about the input stream encoding with the
-// STREAM-START token.
-//
-// The next two tokens are responsible for tags:
-//
-//      VERSION-DIRECTIVE(major,minor)
-//      TAG-DIRECTIVE(handle,prefix)
-//
-// Example:
-//
-//      %YAML   1.1
-//      %TAG    !   !foo
-//      %TAG    !yaml!  tag:yaml.org,2002:
-//      ---
-//
-// The correspoding sequence of tokens:
-//
-//      STREAM-START(utf-8)
-//      VERSION-DIRECTIVE(1,1)
-//      TAG-DIRECTIVE("!","!foo")
-//      TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
-//      DOCUMENT-START
-//      STREAM-END
-//
-// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
-// line.
-//
-// The document start and end indicators are represented by:
-//
-//      DOCUMENT-START
-//      DOCUMENT-END
-//
-// Note that if a YAML stream contains an implicit document (without '---'
-// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
-// produced.
-//
-// In the following examples, we present whole documents together with the
-// produced tokens.
-//
-//      1. An implicit document:
-//
-//          'a scalar'
-//
-//      Tokens:
-//
-//          STREAM-START(utf-8)
-//          SCALAR("a scalar",single-quoted)
-//          STREAM-END
-//
-//      2. An explicit document:
-//
-//          ---
-//          'a scalar'
-//          ...
-//
-//      Tokens:
-//
-//          STREAM-START(utf-8)
-//          DOCUMENT-START
-//          SCALAR("a scalar",single-quoted)
-//          DOCUMENT-END
-//          STREAM-END
-//
-//      3. Several documents in a stream:
-//
-//          'a scalar'
-//          ---
-//          'another scalar'
-//          ---
-//          'yet another scalar'
-//
-//      Tokens:
-//
-//          STREAM-START(utf-8)
-//          SCALAR("a scalar",single-quoted)
-//          DOCUMENT-START
-//          SCALAR("another scalar",single-quoted)
-//          DOCUMENT-START
-//          SCALAR("yet another scalar",single-quoted)
-//          STREAM-END
-//
-// We have already introduced the SCALAR token above.  The following tokens are
-// used to describe aliases, anchors, tag, and scalars:
-//
-//      ALIAS(anchor)
-//      ANCHOR(anchor)
-//      TAG(handle,suffix)
-//      SCALAR(value,style)
-//
-// The following series of examples illustrate the usage of these tokens:
-//
-//      1. A recursive sequence:
-//
-//          &A [ *A ]
-//
-//      Tokens:
-//
-//          STREAM-START(utf-8)
-//          ANCHOR("A")
-//          FLOW-SEQUENCE-START
-//          ALIAS("A")
-//          FLOW-SEQUENCE-END
-//          STREAM-END
-//
-//      2. A tagged scalar:
-//
-//          !!float "3.14"  # A good approximation.
-//
-//      Tokens:
-//
-//          STREAM-START(utf-8)
-//          TAG("!!","float")
-//          SCALAR("3.14",double-quoted)
-//          STREAM-END
-//
-//      3. Various scalar styles:
-//
-//          --- # Implicit empty plain scalars do not produce tokens.
-//          --- a plain scalar
-//          --- 'a single-quoted scalar'
-//          --- "a double-quoted scalar"
-//          --- |-
-//            a literal scalar
-//          --- >-
-//            a folded
-//            scalar
-//
-//      Tokens:
-//
-//          STREAM-START(utf-8)
-//          DOCUMENT-START
-//          DOCUMENT-START
-//          SCALAR("a plain scalar",plain)
-//          DOCUMENT-START
-//          SCALAR("a single-quoted scalar",single-quoted)
-//          DOCUMENT-START
-//          SCALAR("a double-quoted scalar",double-quoted)
-//          DOCUMENT-START
-//          SCALAR("a literal scalar",literal)
-//          DOCUMENT-START
-//          SCALAR("a folded scalar",folded)
-//          STREAM-END
-//
-// Now it's time to review collection-related tokens. We will start with
-// flow collections:
-//
-//      FLOW-SEQUENCE-START
-//      FLOW-SEQUENCE-END
-//      FLOW-MAPPING-START
-//      FLOW-MAPPING-END
-//      FLOW-ENTRY
-//      KEY
-//      VALUE
-//
-// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
-// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
-// correspondingly.  FLOW-ENTRY represent the ',' indicator.  Finally the
-// indicators '?' and ':', which are used for denoting mapping keys and values,
-// are represented by the KEY and VALUE tokens.
-//
-// The following examples show flow collections:
-//
-//      1. A flow sequence:
-//
-//          [item 1, item 2, item 3]
-//
-//      Tokens:
-//
-//          STREAM-START(utf-8)
-//          FLOW-SEQUENCE-START
-//          SCALAR("item 1",plain)
-//          FLOW-ENTRY
-//          SCALAR("item 2",plain)
-//          FLOW-ENTRY
-//          SCALAR("item 3",plain)
-//          FLOW-SEQUENCE-END
-//          STREAM-END
-//
-//      2. A flow mapping:
-//
-//          {
-//              a simple key: a value,  # Note that the KEY token is produced.
-//              ? a complex key: another value,
-//          }
-//
-//      Tokens:
-//
-//          STREAM-START(utf-8)
-//          FLOW-MAPPING-START
-//          KEY
-//          SCALAR("a simple key",plain)
-//          VALUE
-//          SCALAR("a value",plain)
-//          FLOW-ENTRY
-//          KEY
-//          SCALAR("a complex key",plain)
-//          VALUE
-//          SCALAR("another value",plain)
-//          FLOW-ENTRY
-//          FLOW-MAPPING-END
-//          STREAM-END
-//
-// A simple key is a key which is not denoted by the '?' indicator.  Note that
-// the Scanner still produce the KEY token whenever it encounters a simple key.
-//
-// For scanning block collections, the following tokens are used (note that we
-// repeat KEY and VALUE here):
-//
-//      BLOCK-SEQUENCE-START
-//      BLOCK-MAPPING-START
-//      BLOCK-END
-//      BLOCK-ENTRY
-//      KEY
-//      VALUE
-//
-// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
-// increase that precedes a block collection (cf. the INDENT token in Python).
-// The token BLOCK-END denote indentation decrease that ends a block collection
-// (cf. the DEDENT token in Python).  However YAML has some syntax pecularities
-// that makes detections of these tokens more complex.
-//
-// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
-// '-', '?', and ':' correspondingly.
-//
-// The following examples show how the tokens BLOCK-SEQUENCE-START,
-// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
-//
-//      1. Block sequences:
-//
-//          - item 1
-//          - item 2
-//          -
-//            - item 3.1
-//            - item 3.2
-//          -
-//            key 1: value 1
-//            key 2: value 2
-//
-//      Tokens:
-//
-//          STREAM-START(utf-8)
-//          BLOCK-SEQUENCE-START
-//          BLOCK-ENTRY
-//          SCALAR("item 1",plain)
-//          BLOCK-ENTRY
-//          SCALAR("item 2",plain)
-//          BLOCK-ENTRY
-//          BLOCK-SEQUENCE-START
-//          BLOCK-ENTRY
-//          SCALAR("item 3.1",plain)
-//          BLOCK-ENTRY
-//          SCALAR("item 3.2",plain)
-//          BLOCK-END
-//          BLOCK-ENTRY
-//          BLOCK-MAPPING-START
-//          KEY
-//          SCALAR("key 1",plain)
-//          VALUE
-//          SCALAR("value 1",plain)
-//          KEY
-//          SCALAR("key 2",plain)
-//          VALUE
-//          SCALAR("value 2",plain)
-//          BLOCK-END
-//          BLOCK-END
-//          STREAM-END
-//
-//      2. Block mappings:
-//
-//          a simple key: a value   # The KEY token is produced here.
-//          ? a complex key
-//          : another value
-//          a mapping:
-//            key 1: value 1
-//            key 2: value 2
-//          a sequence:
-//            - item 1
-//            - item 2
-//
-//      Tokens:
-//
-//          STREAM-START(utf-8)
-//          BLOCK-MAPPING-START
-//          KEY
-//          SCALAR("a simple key",plain)
-//          VALUE
-//          SCALAR("a value",plain)
-//          KEY
-//          SCALAR("a complex key",plain)
-//          VALUE
-//          SCALAR("another value",plain)
-//          KEY
-//          SCALAR("a mapping",plain)
-//          BLOCK-MAPPING-START
-//          KEY
-//          SCALAR("key 1",plain)
-//          VALUE
-//          SCALAR("value 1",plain)
-//          KEY
-//          SCALAR("key 2",plain)
-//          VALUE
-//          SCALAR("value 2",plain)
-//          BLOCK-END
-//          KEY
-//          SCALAR("a sequence",plain)
-//          VALUE
-//          BLOCK-SEQUENCE-START
-//          BLOCK-ENTRY
-//          SCALAR("item 1",plain)
-//          BLOCK-ENTRY
-//          SCALAR("item 2",plain)
-//          BLOCK-END
-//          BLOCK-END
-//          STREAM-END
-//
-// YAML does not always require to start a new block collection from a new
-// line.  If the current line contains only '-', '?', and ':' indicators, a new
-// block collection may start at the current line.  The following examples
-// illustrate this case:
-//
-//      1. Collections in a sequence:
-//
-//          - - item 1
-//            - item 2
-//          - key 1: value 1
-//            key 2: value 2
-//          - ? complex key
-//            : complex value
-//
-//      Tokens:
-//
-//          STREAM-START(utf-8)
-//          BLOCK-SEQUENCE-START
-//          BLOCK-ENTRY
-//          BLOCK-SEQUENCE-START
-//          BLOCK-ENTRY
-//          SCALAR("item 1",plain)
-//          BLOCK-ENTRY
-//          SCALAR("item 2",plain)
-//          BLOCK-END
-//          BLOCK-ENTRY
-//          BLOCK-MAPPING-START
-//          KEY
-//          SCALAR("key 1",plain)
-//          VALUE
-//          SCALAR("value 1",plain)
-//          KEY
-//          SCALAR("key 2",plain)
-//          VALUE
-//          SCALAR("value 2",plain)
-//          BLOCK-END
-//          BLOCK-ENTRY
-//          BLOCK-MAPPING-START
-//          KEY
-//          SCALAR("complex key")
-//          VALUE
-//          SCALAR("complex value")
-//          BLOCK-END
-//          BLOCK-END
-//          STREAM-END
-//
-//      2. Collections in a mapping:
-//
-//          ? a sequence
-//          : - item 1
-//            - item 2
-//          ? a mapping
-//          : key 1: value 1
-//            key 2: value 2
-//
-//      Tokens:
-//
-//          STREAM-START(utf-8)
-//          BLOCK-MAPPING-START
-//          KEY
-//          SCALAR("a sequence",plain)
-//          VALUE
-//          BLOCK-SEQUENCE-START
-//          BLOCK-ENTRY
-//          SCALAR("item 1",plain)
-//          BLOCK-ENTRY
-//          SCALAR("item 2",plain)
-//          BLOCK-END
-//          KEY
-//          SCALAR("a mapping",plain)
-//          VALUE
-//          BLOCK-MAPPING-START
-//          KEY
-//          SCALAR("key 1",plain)
-//          VALUE
-//          SCALAR("value 1",plain)
-//          KEY
-//          SCALAR("key 2",plain)
-//          VALUE
-//          SCALAR("value 2",plain)
-//          BLOCK-END
-//          BLOCK-END
-//          STREAM-END
-//
-// YAML also permits non-indented sequences if they are included into a block
-// mapping.  In this case, the token BLOCK-SEQUENCE-START is not produced:
-//
-//      key:
-//      - item 1    # BLOCK-SEQUENCE-START is NOT produced here.
-//      - item 2
-//
-// Tokens:
-//
-//      STREAM-START(utf-8)
-//      BLOCK-MAPPING-START
-//      KEY
-//      SCALAR("key",plain)
-//      VALUE
-//      BLOCK-ENTRY
-//      SCALAR("item 1",plain)
-//      BLOCK-ENTRY
-//      SCALAR("item 2",plain)
-//      BLOCK-END
-//
-
-// Ensure that the buffer contains the required number of characters.
-// Return true on success, false on failure (reader error or memory error).
-func cache(parser *yaml_parser_t, length int) bool {
-	// [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
-	return parser.unread >= length || yaml_parser_update_buffer(parser, length)
-}
-
-// Advance the buffer pointer.
-func skip(parser *yaml_parser_t) {
-	if !is_blank(parser.buffer, parser.buffer_pos) {
-		parser.newlines = 0
-	}
-	parser.mark.index++
-	parser.mark.column++
-	parser.unread--
-	parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
-}
-
-func skip_line(parser *yaml_parser_t) {
-	if is_crlf(parser.buffer, parser.buffer_pos) {
-		parser.mark.index += 2
-		parser.mark.column = 0
-		parser.mark.line++
-		parser.unread -= 2
-		parser.buffer_pos += 2
-		parser.newlines++
-	} else if is_break(parser.buffer, parser.buffer_pos) {
-		parser.mark.index++
-		parser.mark.column = 0
-		parser.mark.line++
-		parser.unread--
-		parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
-		parser.newlines++
-	}
-}
-
-// Copy a character to a string buffer and advance pointers.
-func read(parser *yaml_parser_t, s []byte) []byte {
-	if !is_blank(parser.buffer, parser.buffer_pos) {
-		parser.newlines = 0
-	}
-	w := width(parser.buffer[parser.buffer_pos])
-	if w == 0 {
-		panic("invalid character sequence")
-	}
-	if len(s) == 0 {
-		s = make([]byte, 0, 32)
-	}
-	if w == 1 && len(s)+w <= cap(s) {
-		s = s[:len(s)+1]
-		s[len(s)-1] = parser.buffer[parser.buffer_pos]
-		parser.buffer_pos++
-	} else {
-		s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
-		parser.buffer_pos += w
-	}
-	parser.mark.index++
-	parser.mark.column++
-	parser.unread--
-	return s
-}
-
-// Copy a line break character to a string buffer and advance pointers.
-func read_line(parser *yaml_parser_t, s []byte) []byte {
-	buf := parser.buffer
-	pos := parser.buffer_pos
-	switch {
-	case buf[pos] == '\r' && buf[pos+1] == '\n':
-		// CR LF . LF
-		s = append(s, '\n')
-		parser.buffer_pos += 2
-		parser.mark.index++
-		parser.unread--
-	case buf[pos] == '\r' || buf[pos] == '\n':
-		// CR|LF . LF
-		s = append(s, '\n')
-		parser.buffer_pos += 1
-	case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
-		// NEL . LF
-		s = append(s, '\n')
-		parser.buffer_pos += 2
-	case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
-		// LS|PS . LS|PS
-		s = append(s, buf[parser.buffer_pos:pos+3]...)
-		parser.buffer_pos += 3
-	default:
-		return s
-	}
-	parser.mark.index++
-	parser.mark.column = 0
-	parser.mark.line++
-	parser.unread--
-	parser.newlines++
-	return s
-}
-
-// Get the next token.
-func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
-	// Erase the token object.
-	*token = yaml_token_t{} // [Go] Is this necessary?
-
-	// No tokens after STREAM-END or error.
-	if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
-		return true
-	}
-
-	// Ensure that the tokens queue contains enough tokens.
-	if !parser.token_available {
-		if !yaml_parser_fetch_more_tokens(parser) {
-			return false
-		}
-	}
-
-	// Fetch the next token from the queue.
-	*token = parser.tokens[parser.tokens_head]
-	parser.tokens_head++
-	parser.tokens_parsed++
-	parser.token_available = false
-
-	if token.typ == yaml_STREAM_END_TOKEN {
-		parser.stream_end_produced = true
-	}
-	return true
-}
-
-// Set the scanner error and return false.
-func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
-	parser.error = yaml_SCANNER_ERROR
-	parser.context = context
-	parser.context_mark = context_mark
-	parser.problem = problem
-	parser.problem_mark = parser.mark
-	return false
-}
-
-func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
-	context := "while parsing a tag"
-	if directive {
-		context = "while parsing a %TAG directive"
-	}
-	return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
-}
-
-func trace(args ...interface{}) func() {
-	pargs := append([]interface{}{"+++"}, args...)
-	fmt.Println(pargs...)
-	pargs = append([]interface{}{"---"}, args...)
-	return func() { fmt.Println(pargs...) }
-}
-
-// Ensure that the tokens queue contains at least one token which can be
-// returned to the Parser.
-func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
-	// While we need more tokens to fetch, do it.
-	for {
-		// [Go] The comment parsing logic requires a lookahead of two tokens
-		// so that foot comments may be parsed in time of associating them
-		// with the tokens that are parsed before them, and also for line
-		// comments to be transformed into head comments in some edge cases.
-		if parser.tokens_head < len(parser.tokens)-2 {
-			// If a potential simple key is at the head position, we need to fetch
-			// the next token to disambiguate it.
-			head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
-			if !ok {
-				break
-			} else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
-				return false
-			} else if !valid {
-				break
-			}
-		}
-		// Fetch the next token.
-		if !yaml_parser_fetch_next_token(parser) {
-			return false
-		}
-	}
-
-	parser.token_available = true
-	return true
-}
-
-// The dispatcher for token fetchers.
-func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) {
-	// Ensure that the buffer is initialized.
-	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-		return false
-	}
-
-	// Check if we just started scanning.  Fetch STREAM-START then.
-	if !parser.stream_start_produced {
-		return yaml_parser_fetch_stream_start(parser)
-	}
-
-	scan_mark := parser.mark
-
-	// Eat whitespaces and comments until we reach the next token.
-	if !yaml_parser_scan_to_next_token(parser) {
-		return false
-	}
-
-	// [Go] While unrolling indents, transform the head comments of prior
-	// indentation levels observed after scan_start into foot comments at
-	// the respective indexes.
-
-	// Check the indentation level against the current column.
-	if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) {
-		return false
-	}
-
-	// Ensure that the buffer contains at least 4 characters.  4 is the length
-	// of the longest indicators ('--- ' and '... ').
-	if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
-		return false
-	}
-
-	// Is it the end of the stream?
-	if is_z(parser.buffer, parser.buffer_pos) {
-		return yaml_parser_fetch_stream_end(parser)
-	}
-
-	// Is it a directive?
-	if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
-		return yaml_parser_fetch_directive(parser)
-	}
-
-	buf := parser.buffer
-	pos := parser.buffer_pos
-
-	// Is it the document start indicator?
-	if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
-		return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
-	}
-
-	// Is it the document end indicator?
-	if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
-		return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
-	}
-
-	comment_mark := parser.mark
-	if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') {
-		// Associate any following comments with the prior token.
-		comment_mark = parser.tokens[len(parser.tokens)-1].start_mark
-	}
-	defer func() {
-		if !ok {
-			return
-		}
-		if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN {
-			// Sequence indicators alone have no line comments. It becomes
-			// a head comment for whatever follows.
-			return
-		}
-		if !yaml_parser_scan_line_comment(parser, comment_mark) {
-			ok = false
-			return
-		}
-	}()
-
-	// Is it the flow sequence start indicator?
-	if buf[pos] == '[' {
-		return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
-	}
-
-	// Is it the flow mapping start indicator?
-	if parser.buffer[parser.buffer_pos] == '{' {
-		return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
-	}
-
-	// Is it the flow sequence end indicator?
-	if parser.buffer[parser.buffer_pos] == ']' {
-		return yaml_parser_fetch_flow_collection_end(parser,
-			yaml_FLOW_SEQUENCE_END_TOKEN)
-	}
-
-	// Is it the flow mapping end indicator?
-	if parser.buffer[parser.buffer_pos] == '}' {
-		return yaml_parser_fetch_flow_collection_end(parser,
-			yaml_FLOW_MAPPING_END_TOKEN)
-	}
-
-	// Is it the flow entry indicator?
-	if parser.buffer[parser.buffer_pos] == ',' {
-		return yaml_parser_fetch_flow_entry(parser)
-	}
-
-	// Is it the block entry indicator?
-	if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
-		return yaml_parser_fetch_block_entry(parser)
-	}
-
-	// Is it the key indicator?
-	if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
-		return yaml_parser_fetch_key(parser)
-	}
-
-	// Is it the value indicator?
-	if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
-		return yaml_parser_fetch_value(parser)
-	}
-
-	// Is it an alias?
-	if parser.buffer[parser.buffer_pos] == '*' {
-		return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
-	}
-
-	// Is it an anchor?
-	if parser.buffer[parser.buffer_pos] == '&' {
-		return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
-	}
-
-	// Is it a tag?
-	if parser.buffer[parser.buffer_pos] == '!' {
-		return yaml_parser_fetch_tag(parser)
-	}
-
-	// Is it a literal scalar?
-	if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
-		return yaml_parser_fetch_block_scalar(parser, true)
-	}
-
-	// Is it a folded scalar?
-	if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
-		return yaml_parser_fetch_block_scalar(parser, false)
-	}
-
-	// Is it a single-quoted scalar?
-	if parser.buffer[parser.buffer_pos] == '\'' {
-		return yaml_parser_fetch_flow_scalar(parser, true)
-	}
-
-	// Is it a double-quoted scalar?
-	if parser.buffer[parser.buffer_pos] == '"' {
-		return yaml_parser_fetch_flow_scalar(parser, false)
-	}
-
-	// Is it a plain scalar?
-	//
-	// A plain scalar may start with any non-blank characters except
-	//
-	//      '-', '?', ':', ',', '[', ']', '{', '}',
-	//      '#', '&', '*', '!', '|', '>', '\'', '\"',
-	//      '%', '@', '`'.
-	//
-	// In the block context (and, for the '-' indicator, in the flow context
-	// too), it may also start with the characters
-	//
-	//      '-', '?', ':'
-	//
-	// if it is followed by a non-space character.
-	//
-	// The last rule is more restrictive than the specification requires.
-	// [Go] TODO Make this logic more reasonable.
-	//switch parser.buffer[parser.buffer_pos] {
-	//case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
-	//}
-	if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
-		parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
-		parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
-		parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
-		parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
-		parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
-		parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
-		parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
-		parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
-		parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
-		(parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
-		(parser.flow_level == 0 &&
-			(parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
-			!is_blankz(parser.buffer, parser.buffer_pos+1)) {
-		return yaml_parser_fetch_plain_scalar(parser)
-	}
-
-	// If we don't determine the token type so far, it is an error.
-	return yaml_parser_set_scanner_error(parser,
-		"while scanning for the next token", parser.mark,
-		"found character that cannot start any token")
-}
-
-func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) {
-	if !simple_key.possible {
-		return false, true
-	}
-
-	// The 1.2 specification says:
-	//
-	//     "If the ? indicator is omitted, parsing needs to see past the
-	//     implicit key to recognize it as such. To limit the amount of
-	//     lookahead required, the “:” indicator must appear at most 1024
-	//     Unicode characters beyond the start of the key. In addition, the key
-	//     is restricted to a single line."
-	//
-	if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index {
-		// Check if the potential simple key to be removed is required.
-		if simple_key.required {
-			return false, yaml_parser_set_scanner_error(parser,
-				"while scanning a simple key", simple_key.mark,
-				"could not find expected ':'")
-		}
-		simple_key.possible = false
-		return false, true
-	}
-	return true, true
-}
-
-// Check if a simple key may start at the current position and add it if
-// needed.
-func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
-	// A simple key is required at the current position if the scanner is in
-	// the block context and the current column coincides with the indentation
-	// level.
-
-	required := parser.flow_level == 0 && parser.indent == parser.mark.column
-
-	//
-	// If the current position may start a simple key, save it.
-	//
-	if parser.simple_key_allowed {
-		simple_key := yaml_simple_key_t{
-			possible:     true,
-			required:     required,
-			token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
-			mark:         parser.mark,
-		}
-
-		if !yaml_parser_remove_simple_key(parser) {
-			return false
-		}
-		parser.simple_keys[len(parser.simple_keys)-1] = simple_key
-		parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
-	}
-	return true
-}
-
-// Remove a potential simple key at the current flow level.
-func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
-	i := len(parser.simple_keys) - 1
-	if parser.simple_keys[i].possible {
-		// If the key is required, it is an error.
-		if parser.simple_keys[i].required {
-			return yaml_parser_set_scanner_error(parser,
-				"while scanning a simple key", parser.simple_keys[i].mark,
-				"could not find expected ':'")
-		}
-		// Remove the key from the stack.
-		parser.simple_keys[i].possible = false
-		delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
-	}
-	return true
-}
-
-// max_flow_level limits the flow_level
-const max_flow_level = 10000
-
-// Increase the flow level and resize the simple key list if needed.
-func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
-	// Reset the simple key on the next level.
-	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{
-		possible:     false,
-		required:     false,
-		token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
-		mark:         parser.mark,
-	})
-
-	// Increase the flow level.
-	parser.flow_level++
-	if parser.flow_level > max_flow_level {
-		return yaml_parser_set_scanner_error(parser,
-			"while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
-			fmt.Sprintf("exceeded max depth of %d", max_flow_level))
-	}
-	return true
-}
-
-// Decrease the flow level.
-func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
-	if parser.flow_level > 0 {
-		parser.flow_level--
-		last := len(parser.simple_keys) - 1
-		delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
-		parser.simple_keys = parser.simple_keys[:last]
-	}
-	return true
-}
-
-// max_indents limits the indents stack size
-const max_indents = 10000
-
-// Push the current indentation level to the stack and set the new level
-// the current column is greater than the indentation level.  In this case,
-// append or insert the specified token into the token queue.
-func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
-	// In the flow context, do nothing.
-	if parser.flow_level > 0 {
-		return true
-	}
-
-	if parser.indent < column {
-		// Push the current indentation level to the stack and set the new
-		// indentation level.
-		parser.indents = append(parser.indents, parser.indent)
-		parser.indent = column
-		if len(parser.indents) > max_indents {
-			return yaml_parser_set_scanner_error(parser,
-				"while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
-				fmt.Sprintf("exceeded max depth of %d", max_indents))
-		}
-
-		// Create a token and insert it into the queue.
-		token := yaml_token_t{
-			typ:        typ,
-			start_mark: mark,
-			end_mark:   mark,
-		}
-		if number > -1 {
-			number -= parser.tokens_parsed
-		}
-		yaml_insert_token(parser, number, &token)
-	}
-	return true
-}
-
-// Pop indentation levels from the indents stack until the current level
-// becomes less or equal to the column.  For each indentation level, append
-// the BLOCK-END token.
-func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool {
-	// In the flow context, do nothing.
-	if parser.flow_level > 0 {
-		return true
-	}
-
-	block_mark := scan_mark
-	block_mark.index--
-
-	// Loop through the indentation levels in the stack.
-	for parser.indent > column {
-
-		// [Go] Reposition the end token before potential following
-		//      foot comments of parent blocks. For that, search
-		//      backwards for recent comments that were at the same
-		//      indent as the block that is ending now.
-		stop_index := block_mark.index
-		for i := len(parser.comments) - 1; i >= 0; i-- {
-			comment := &parser.comments[i]
-
-			if comment.end_mark.index < stop_index {
-				// Don't go back beyond the start of the comment/whitespace scan, unless column < 0.
-				// If requested indent column is < 0, then the document is over and everything else
-				// is a foot anyway.
-				break
-			}
-			if comment.start_mark.column == parser.indent+1 {
-				// This is a good match. But maybe there's a former comment
-				// at that same indent level, so keep searching.
-				block_mark = comment.start_mark
-			}
-
-			// While the end of the former comment matches with
-			// the start of the following one, we know there's
-			// nothing in between and scanning is still safe.
-			stop_index = comment.scan_mark.index
-		}
-
-		// Create a token and append it to the queue.
-		token := yaml_token_t{
-			typ:        yaml_BLOCK_END_TOKEN,
-			start_mark: block_mark,
-			end_mark:   block_mark,
-		}
-		yaml_insert_token(parser, -1, &token)
-
-		// Pop the indentation level.
-		parser.indent = parser.indents[len(parser.indents)-1]
-		parser.indents = parser.indents[:len(parser.indents)-1]
-	}
-	return true
-}
-
-// Initialize the scanner and produce the STREAM-START token.
-func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
-
-	// Set the initial indentation.
-	parser.indent = -1
-
-	// Initialize the simple key stack.
-	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
-
-	parser.simple_keys_by_tok = make(map[int]int)
-
-	// A simple key is allowed at the beginning of the stream.
-	parser.simple_key_allowed = true
-
-	// We have started.
-	parser.stream_start_produced = true
-
-	// Create the STREAM-START token and append it to the queue.
-	token := yaml_token_t{
-		typ:        yaml_STREAM_START_TOKEN,
-		start_mark: parser.mark,
-		end_mark:   parser.mark,
-		encoding:   parser.encoding,
-	}
-	yaml_insert_token(parser, -1, &token)
-	return true
-}
-
-// Produce the STREAM-END token and shut down the scanner.
-func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
-
-	// Force new line.
-	if parser.mark.column != 0 {
-		parser.mark.column = 0
-		parser.mark.line++
-	}
-
-	// Reset the indentation level.
-	if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
-		return false
-	}
-
-	// Reset simple keys.
-	if !yaml_parser_remove_simple_key(parser) {
-		return false
-	}
-
-	parser.simple_key_allowed = false
-
-	// Create the STREAM-END token and append it to the queue.
-	token := yaml_token_t{
-		typ:        yaml_STREAM_END_TOKEN,
-		start_mark: parser.mark,
-		end_mark:   parser.mark,
-	}
-	yaml_insert_token(parser, -1, &token)
-	return true
-}
-
-// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
-func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
-	// Reset the indentation level.
-	if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
-		return false
-	}
-
-	// Reset simple keys.
-	if !yaml_parser_remove_simple_key(parser) {
-		return false
-	}
-
-	parser.simple_key_allowed = false
-
-	// Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
-	token := yaml_token_t{}
-	if !yaml_parser_scan_directive(parser, &token) {
-		return false
-	}
-	// Append the token to the queue.
-	yaml_insert_token(parser, -1, &token)
-	return true
-}
-
-// Produce the DOCUMENT-START or DOCUMENT-END token.
-func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
-	// Reset the indentation level.
-	if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
-		return false
-	}
-
-	// Reset simple keys.
-	if !yaml_parser_remove_simple_key(parser) {
-		return false
-	}
-
-	parser.simple_key_allowed = false
-
-	// Consume the token.
-	start_mark := parser.mark
-
-	skip(parser)
-	skip(parser)
-	skip(parser)
-
-	end_mark := parser.mark
-
-	// Create the DOCUMENT-START or DOCUMENT-END token.
-	token := yaml_token_t{
-		typ:        typ,
-		start_mark: start_mark,
-		end_mark:   end_mark,
-	}
-	// Append the token to the queue.
-	yaml_insert_token(parser, -1, &token)
-	return true
-}
-
-// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
-func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
-
-	// The indicators '[' and '{' may start a simple key.
-	if !yaml_parser_save_simple_key(parser) {
-		return false
-	}
-
-	// Increase the flow level.
-	if !yaml_parser_increase_flow_level(parser) {
-		return false
-	}
-
-	// A simple key may follow the indicators '[' and '{'.
-	parser.simple_key_allowed = true
-
-	// Consume the token.
-	start_mark := parser.mark
-	skip(parser)
-	end_mark := parser.mark
-
-	// Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
-	token := yaml_token_t{
-		typ:        typ,
-		start_mark: start_mark,
-		end_mark:   end_mark,
-	}
-	// Append the token to the queue.
-	yaml_insert_token(parser, -1, &token)
-	return true
-}
-
-// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
-func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
-	// Reset any potential simple key on the current flow level.
-	if !yaml_parser_remove_simple_key(parser) {
-		return false
-	}
-
-	// Decrease the flow level.
-	if !yaml_parser_decrease_flow_level(parser) {
-		return false
-	}
-
-	// No simple keys after the indicators ']' and '}'.
-	parser.simple_key_allowed = false
-
-	// Consume the token.
-
-	start_mark := parser.mark
-	skip(parser)
-	end_mark := parser.mark
-
-	// Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
-	token := yaml_token_t{
-		typ:        typ,
-		start_mark: start_mark,
-		end_mark:   end_mark,
-	}
-	// Append the token to the queue.
-	yaml_insert_token(parser, -1, &token)
-	return true
-}
-
-// Produce the FLOW-ENTRY token.
-func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
-	// Reset any potential simple keys on the current flow level.
-	if !yaml_parser_remove_simple_key(parser) {
-		return false
-	}
-
-	// Simple keys are allowed after ','.
-	parser.simple_key_allowed = true
-
-	// Consume the token.
-	start_mark := parser.mark
-	skip(parser)
-	end_mark := parser.mark
-
-	// Create the FLOW-ENTRY token and append it to the queue.
-	token := yaml_token_t{
-		typ:        yaml_FLOW_ENTRY_TOKEN,
-		start_mark: start_mark,
-		end_mark:   end_mark,
-	}
-	yaml_insert_token(parser, -1, &token)
-	return true
-}
-
-// Produce the BLOCK-ENTRY token.
-func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
-	// Check if the scanner is in the block context.
-	if parser.flow_level == 0 {
-		// Check if we are allowed to start a new entry.
-		if !parser.simple_key_allowed {
-			return yaml_parser_set_scanner_error(parser, "", parser.mark,
-				"block sequence entries are not allowed in this context")
-		}
-		// Add the BLOCK-SEQUENCE-START token if needed.
-		if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
-			return false
-		}
-	} else {
-		// It is an error for the '-' indicator to occur in the flow context,
-		// but we let the Parser detect and report about it because the Parser
-		// is able to point to the context.
-	}
-
-	// Reset any potential simple keys on the current flow level.
-	if !yaml_parser_remove_simple_key(parser) {
-		return false
-	}
-
-	// Simple keys are allowed after '-'.
-	parser.simple_key_allowed = true
-
-	// Consume the token.
-	start_mark := parser.mark
-	skip(parser)
-	end_mark := parser.mark
-
-	// Create the BLOCK-ENTRY token and append it to the queue.
-	token := yaml_token_t{
-		typ:        yaml_BLOCK_ENTRY_TOKEN,
-		start_mark: start_mark,
-		end_mark:   end_mark,
-	}
-	yaml_insert_token(parser, -1, &token)
-	return true
-}
-
-// Produce the KEY token.
-func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
-
-	// In the block context, additional checks are required.
-	if parser.flow_level == 0 {
-		// Check if we are allowed to start a new key (not nessesary simple).
-		if !parser.simple_key_allowed {
-			return yaml_parser_set_scanner_error(parser, "", parser.mark,
-				"mapping keys are not allowed in this context")
-		}
-		// Add the BLOCK-MAPPING-START token if needed.
-		if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
-			return false
-		}
-	}
-
-	// Reset any potential simple keys on the current flow level.
-	if !yaml_parser_remove_simple_key(parser) {
-		return false
-	}
-
-	// Simple keys are allowed after '?' in the block context.
-	parser.simple_key_allowed = parser.flow_level == 0
-
-	// Consume the token.
-	start_mark := parser.mark
-	skip(parser)
-	end_mark := parser.mark
-
-	// Create the KEY token and append it to the queue.
-	token := yaml_token_t{
-		typ:        yaml_KEY_TOKEN,
-		start_mark: start_mark,
-		end_mark:   end_mark,
-	}
-	yaml_insert_token(parser, -1, &token)
-	return true
-}
-
-// Produce the VALUE token.
-func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
-
-	simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
-
-	// Have we found a simple key?
-	if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
-		return false
-
-	} else if valid {
-
-		// Create the KEY token and insert it into the queue.
-		token := yaml_token_t{
-			typ:        yaml_KEY_TOKEN,
-			start_mark: simple_key.mark,
-			end_mark:   simple_key.mark,
-		}
-		yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
-
-		// In the block context, we may need to add the BLOCK-MAPPING-START token.
-		if !yaml_parser_roll_indent(parser, simple_key.mark.column,
-			simple_key.token_number,
-			yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
-			return false
-		}
-
-		// Remove the simple key.
-		simple_key.possible = false
-		delete(parser.simple_keys_by_tok, simple_key.token_number)
-
-		// A simple key cannot follow another simple key.
-		parser.simple_key_allowed = false
-
-	} else {
-		// The ':' indicator follows a complex key.
-
-		// In the block context, extra checks are required.
-		if parser.flow_level == 0 {
-
-			// Check if we are allowed to start a complex value.
-			if !parser.simple_key_allowed {
-				return yaml_parser_set_scanner_error(parser, "", parser.mark,
-					"mapping values are not allowed in this context")
-			}
-
-			// Add the BLOCK-MAPPING-START token if needed.
-			if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
-				return false
-			}
-		}
-
-		// Simple keys after ':' are allowed in the block context.
-		parser.simple_key_allowed = parser.flow_level == 0
-	}
-
-	// Consume the token.
-	start_mark := parser.mark
-	skip(parser)
-	end_mark := parser.mark
-
-	// Create the VALUE token and append it to the queue.
-	token := yaml_token_t{
-		typ:        yaml_VALUE_TOKEN,
-		start_mark: start_mark,
-		end_mark:   end_mark,
-	}
-	yaml_insert_token(parser, -1, &token)
-	return true
-}
-
-// Produce the ALIAS or ANCHOR token.
-func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
-	// An anchor or an alias could be a simple key.
-	if !yaml_parser_save_simple_key(parser) {
-		return false
-	}
-
-	// A simple key cannot follow an anchor or an alias.
-	parser.simple_key_allowed = false
-
-	// Create the ALIAS or ANCHOR token and append it to the queue.
-	var token yaml_token_t
-	if !yaml_parser_scan_anchor(parser, &token, typ) {
-		return false
-	}
-	yaml_insert_token(parser, -1, &token)
-	return true
-}
-
-// Produce the TAG token.
-func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
-	// A tag could be a simple key.
-	if !yaml_parser_save_simple_key(parser) {
-		return false
-	}
-
-	// A simple key cannot follow a tag.
-	parser.simple_key_allowed = false
-
-	// Create the TAG token and append it to the queue.
-	var token yaml_token_t
-	if !yaml_parser_scan_tag(parser, &token) {
-		return false
-	}
-	yaml_insert_token(parser, -1, &token)
-	return true
-}
-
-// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
-func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
-	// Remove any potential simple keys.
-	if !yaml_parser_remove_simple_key(parser) {
-		return false
-	}
-
-	// A simple key may follow a block scalar.
-	parser.simple_key_allowed = true
-
-	// Create the SCALAR token and append it to the queue.
-	var token yaml_token_t
-	if !yaml_parser_scan_block_scalar(parser, &token, literal) {
-		return false
-	}
-	yaml_insert_token(parser, -1, &token)
-	return true
-}
-
-// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
-func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
-	// A plain scalar could be a simple key.
-	if !yaml_parser_save_simple_key(parser) {
-		return false
-	}
-
-	// A simple key cannot follow a flow scalar.
-	parser.simple_key_allowed = false
-
-	// Create the SCALAR token and append it to the queue.
-	var token yaml_token_t
-	if !yaml_parser_scan_flow_scalar(parser, &token, single) {
-		return false
-	}
-	yaml_insert_token(parser, -1, &token)
-	return true
-}
-
-// Produce the SCALAR(...,plain) token.
-func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
-	// A plain scalar could be a simple key.
-	if !yaml_parser_save_simple_key(parser) {
-		return false
-	}
-
-	// A simple key cannot follow a flow scalar.
-	parser.simple_key_allowed = false
-
-	// Create the SCALAR token and append it to the queue.
-	var token yaml_token_t
-	if !yaml_parser_scan_plain_scalar(parser, &token) {
-		return false
-	}
-	yaml_insert_token(parser, -1, &token)
-	return true
-}
-
-// Eat whitespaces and comments until the next token is found.
-func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
-
-	scan_mark := parser.mark
-
-	// Until the next token is not found.
-	for {
-		// Allow the BOM mark to start a line.
-		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-			return false
-		}
-		if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
-			skip(parser)
-		}
-
-		// Eat whitespaces.
-		// Tabs are allowed:
-		//  - in the flow context
-		//  - in the block context, but not at the beginning of the line or
-		//  after '-', '?', or ':' (complex value).
-		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-			return false
-		}
-
-		for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
-			skip(parser)
-			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-				return false
-			}
-		}
-
-		// Check if we just had a line comment under a sequence entry that
-		// looks more like a header to the following content. Similar to this:
-		//
-		// - # The comment
-		//   - Some data
-		//
-		// If so, transform the line comment to a head comment and reposition.
-		if len(parser.comments) > 0 && len(parser.tokens) > 1 {
-			tokenA := parser.tokens[len(parser.tokens)-2]
-			tokenB := parser.tokens[len(parser.tokens)-1]
-			comment := &parser.comments[len(parser.comments)-1]
-			if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) {
-				// If it was in the prior line, reposition so it becomes a
-				// header of the follow up token. Otherwise, keep it in place
-				// so it becomes a header of the former.
-				comment.head = comment.line
-				comment.line = nil
-				if comment.start_mark.line == parser.mark.line-1 {
-					comment.token_mark = parser.mark
-				}
-			}
-		}
-
-		// Eat a comment until a line break.
-		if parser.buffer[parser.buffer_pos] == '#' {
-			if !yaml_parser_scan_comments(parser, scan_mark) {
-				return false
-			}
-		}
-
-		// If it is a line break, eat it.
-		if is_break(parser.buffer, parser.buffer_pos) {
-			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
-				return false
-			}
-			skip_line(parser)
-
-			// In the block context, a new line may start a simple key.
-			if parser.flow_level == 0 {
-				parser.simple_key_allowed = true
-			}
-		} else {
-			break // We have found a token.
-		}
-	}
-
-	return true
-}
-
-// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
-//
-// Scope:
-//      %YAML    1.1    # a comment \n
-//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-//      %TAG    !yaml!  tag:yaml.org,2002:  \n
-//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-//
-func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
-	// Eat '%'.
-	start_mark := parser.mark
-	skip(parser)
-
-	// Scan the directive name.
-	var name []byte
-	if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
-		return false
-	}
-
-	// Is it a YAML directive?
-	if bytes.Equal(name, []byte("YAML")) {
-		// Scan the VERSION directive value.
-		var major, minor int8
-		if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
-			return false
-		}
-		end_mark := parser.mark
-
-		// Create a VERSION-DIRECTIVE token.
-		*token = yaml_token_t{
-			typ:        yaml_VERSION_DIRECTIVE_TOKEN,
-			start_mark: start_mark,
-			end_mark:   end_mark,
-			major:      major,
-			minor:      minor,
-		}
-
-		// Is it a TAG directive?
-	} else if bytes.Equal(name, []byte("TAG")) {
-		// Scan the TAG directive value.
-		var handle, prefix []byte
-		if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
-			return false
-		}
-		end_mark := parser.mark
-
-		// Create a TAG-DIRECTIVE token.
-		*token = yaml_token_t{
-			typ:        yaml_TAG_DIRECTIVE_TOKEN,
-			start_mark: start_mark,
-			end_mark:   end_mark,
-			value:      handle,
-			prefix:     prefix,
-		}
-
-		// Unknown directive.
-	} else {
-		yaml_parser_set_scanner_error(parser, "while scanning a directive",
-			start_mark, "found unknown directive name")
-		return false
-	}
-
-	// Eat the rest of the line including any comments.
-	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-		return false
-	}
-
-	for is_blank(parser.buffer, parser.buffer_pos) {
-		skip(parser)
-		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-			return false
-		}
-	}
-
-	if parser.buffer[parser.buffer_pos] == '#' {
-		// [Go] Discard this inline comment for the time being.
-		//if !yaml_parser_scan_line_comment(parser, start_mark) {
-		//	return false
-		//}
-		for !is_breakz(parser.buffer, parser.buffer_pos) {
-			skip(parser)
-			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-				return false
-			}
-		}
-	}
-
-	// Check if we are at the end of the line.
-	if !is_breakz(parser.buffer, parser.buffer_pos) {
-		yaml_parser_set_scanner_error(parser, "while scanning a directive",
-			start_mark, "did not find expected comment or line break")
-		return false
-	}
-
-	// Eat a line break.
-	if is_break(parser.buffer, parser.buffer_pos) {
-		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
-			return false
-		}
-		skip_line(parser)
-	}
-
-	return true
-}
-
-// Scan the directive name.
-//
-// Scope:
-//      %YAML   1.1     # a comment \n
-//       ^^^^
-//      %TAG    !yaml!  tag:yaml.org,2002:  \n
-//       ^^^
-//
-func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
-	// Consume the directive name.
-	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-		return false
-	}
-
-	var s []byte
-	for is_alpha(parser.buffer, parser.buffer_pos) {
-		s = read(parser, s)
-		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-			return false
-		}
-	}
-
-	// Check if the name is empty.
-	if len(s) == 0 {
-		yaml_parser_set_scanner_error(parser, "while scanning a directive",
-			start_mark, "could not find expected directive name")
-		return false
-	}
-
-	// Check for an blank character after the name.
-	if !is_blankz(parser.buffer, parser.buffer_pos) {
-		yaml_parser_set_scanner_error(parser, "while scanning a directive",
-			start_mark, "found unexpected non-alphabetical character")
-		return false
-	}
-	*name = s
-	return true
-}
-
-// Scan the value of VERSION-DIRECTIVE.
-//
-// Scope:
-//      %YAML   1.1     # a comment \n
-//           ^^^^^^
-func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
-	// Eat whitespaces.
-	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-		return false
-	}
-	for is_blank(parser.buffer, parser.buffer_pos) {
-		skip(parser)
-		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-			return false
-		}
-	}
-
-	// Consume the major version number.
-	if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
-		return false
-	}
-
-	// Eat '.'.
-	if parser.buffer[parser.buffer_pos] != '.' {
-		return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
-			start_mark, "did not find expected digit or '.' character")
-	}
-
-	skip(parser)
-
-	// Consume the minor version number.
-	if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
-		return false
-	}
-	return true
-}
-
-const max_number_length = 2
-
-// Scan the version number of VERSION-DIRECTIVE.
-//
-// Scope:
-//      %YAML   1.1     # a comment \n
-//              ^
-//      %YAML   1.1     # a comment \n
-//                ^
-func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
-
-	// Repeat while the next character is digit.
-	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-		return false
-	}
-	var value, length int8
-	for is_digit(parser.buffer, parser.buffer_pos) {
-		// Check if the number is too long.
-		length++
-		if length > max_number_length {
-			return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
-				start_mark, "found extremely long version number")
-		}
-		value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
-		skip(parser)
-		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-			return false
-		}
-	}
-
-	// Check if the number was present.
-	if length == 0 {
-		return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
-			start_mark, "did not find expected version number")
-	}
-	*number = value
-	return true
-}
-
-// Scan the value of a TAG-DIRECTIVE token.
-//
-// Scope:
-//      %TAG    !yaml!  tag:yaml.org,2002:  \n
-//          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-//
-func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
-	var handle_value, prefix_value []byte
-
-	// Eat whitespaces.
-	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-		return false
-	}
-
-	for is_blank(parser.buffer, parser.buffer_pos) {
-		skip(parser)
-		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-			return false
-		}
-	}
-
-	// Scan a handle.
-	if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
-		return false
-	}
-
-	// Expect a whitespace.
-	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-		return false
-	}
-	if !is_blank(parser.buffer, parser.buffer_pos) {
-		yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
-			start_mark, "did not find expected whitespace")
-		return false
-	}
-
-	// Eat whitespaces.
-	for is_blank(parser.buffer, parser.buffer_pos) {
-		skip(parser)
-		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-			return false
-		}
-	}
-
-	// Scan a prefix.
-	if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
-		return false
-	}
-
-	// Expect a whitespace or line break.
-	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-		return false
-	}
-	if !is_blankz(parser.buffer, parser.buffer_pos) {
-		yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
-			start_mark, "did not find expected whitespace or line break")
-		return false
-	}
-
-	*handle = handle_value
-	*prefix = prefix_value
-	return true
-}
-
-func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
-	var s []byte
-
-	// Eat the indicator character.
-	start_mark := parser.mark
-	skip(parser)
-
-	// Consume the value.
-	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-		return false
-	}
-
-	for is_alpha(parser.buffer, parser.buffer_pos) {
-		s = read(parser, s)
-		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-			return false
-		}
-	}
-
-	end_mark := parser.mark
-
-	/*
-	 * Check if length of the anchor is greater than 0 and it is followed by
-	 * a whitespace character or one of the indicators:
-	 *
-	 *      '?', ':', ',', ']', '}', '%', '@', '`'.
-	 */
-
-	if len(s) == 0 ||
-		!(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
-			parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
-			parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
-			parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
-			parser.buffer[parser.buffer_pos] == '`') {
-		context := "while scanning an alias"
-		if typ == yaml_ANCHOR_TOKEN {
-			context = "while scanning an anchor"
-		}
-		yaml_parser_set_scanner_error(parser, context, start_mark,
-			"did not find expected alphabetic or numeric character")
-		return false
-	}
-
-	// Create a token.
-	*token = yaml_token_t{
-		typ:        typ,
-		start_mark: start_mark,
-		end_mark:   end_mark,
-		value:      s,
-	}
-
-	return true
-}
-
-/*
- * Scan a TAG token.
- */
-
-func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
-	var handle, suffix []byte
-
-	start_mark := parser.mark
-
-	// Check if the tag is in the canonical form.
-	if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
-		return false
-	}
-
-	if parser.buffer[parser.buffer_pos+1] == '<' {
-		// Keep the handle as ''
-
-		// Eat '!<'
-		skip(parser)
-		skip(parser)
-
-		// Consume the tag value.
-		if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
-			return false
-		}
-
-		// Check for '>' and eat it.
-		if parser.buffer[parser.buffer_pos] != '>' {
-			yaml_parser_set_scanner_error(parser, "while scanning a tag",
-				start_mark, "did not find the expected '>'")
-			return false
-		}
-
-		skip(parser)
-	} else {
-		// The tag has either the '!suffix' or the '!handle!suffix' form.
-
-		// First, try to scan a handle.
-		if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
-			return false
-		}
-
-		// Check if it is, indeed, handle.
-		if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
-			// Scan the suffix now.
-			if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
-				return false
-			}
-		} else {
-			// It wasn't a handle after all.  Scan the rest of the tag.
-			if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
-				return false
-			}
-
-			// Set the handle to '!'.
-			handle = []byte{'!'}
-
-			// A special case: the '!' tag.  Set the handle to '' and the
-			// suffix to '!'.
-			if len(suffix) == 0 {
-				handle, suffix = suffix, handle
-			}
-		}
-	}
-
-	// Check the character which ends the tag.
-	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-		return false
-	}
-	if !is_blankz(parser.buffer, parser.buffer_pos) {
-		yaml_parser_set_scanner_error(parser, "while scanning a tag",
-			start_mark, "did not find expected whitespace or line break")
-		return false
-	}
-
-	end_mark := parser.mark
-
-	// Create a token.
-	*token = yaml_token_t{
-		typ:        yaml_TAG_TOKEN,
-		start_mark: start_mark,
-		end_mark:   end_mark,
-		value:      handle,
-		suffix:     suffix,
-	}
-	return true
-}
-
-// Scan a tag handle.
-func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
-	// Check the initial '!' character.
-	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-		return false
-	}
-	if parser.buffer[parser.buffer_pos] != '!' {
-		yaml_parser_set_scanner_tag_error(parser, directive,
-			start_mark, "did not find expected '!'")
-		return false
-	}
-
-	var s []byte
-
-	// Copy the '!' character.
-	s = read(parser, s)
-
-	// Copy all subsequent alphabetical and numerical characters.
-	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-		return false
-	}
-	for is_alpha(parser.buffer, parser.buffer_pos) {
-		s = read(parser, s)
-		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-			return false
-		}
-	}
-
-	// Check if the trailing character is '!' and copy it.
-	if parser.buffer[parser.buffer_pos] == '!' {
-		s = read(parser, s)
-	} else {
-		// It's either the '!' tag or not really a tag handle.  If it's a %TAG
-		// directive, it's an error.  If it's a tag token, it must be a part of URI.
-		if directive && string(s) != "!" {
-			yaml_parser_set_scanner_tag_error(parser, directive,
-				start_mark, "did not find expected '!'")
-			return false
-		}
-	}
-
-	*handle = s
-	return true
-}
-
-// Scan a tag.
-func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
-	//size_t length = head ? strlen((char *)head) : 0
-	var s []byte
-	hasTag := len(head) > 0
-
-	// Copy the head if needed.
-	//
-	// Note that we don't copy the leading '!' character.
-	if len(head) > 1 {
-		s = append(s, head[1:]...)
-	}
-
-	// Scan the tag.
-	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-		return false
-	}
-
-	// The set of characters that may appear in URI is as follows:
-	//
-	//      '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
-	//      '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
-	//      '%'.
-	// [Go] TODO Convert this into more reasonable logic.
-	for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
-		parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
-		parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
-		parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
-		parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
-		parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
-		parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
-		parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
-		parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
-		parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
-		parser.buffer[parser.buffer_pos] == '%' {
-		// Check if it is a URI-escape sequence.
-		if parser.buffer[parser.buffer_pos] == '%' {
-			if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
-				return false
-			}
-		} else {
-			s = read(parser, s)
-		}
-		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-			return false
-		}
-		hasTag = true
-	}
-
-	if !hasTag {
-		yaml_parser_set_scanner_tag_error(parser, directive,
-			start_mark, "did not find expected tag URI")
-		return false
-	}
-	*uri = s
-	return true
-}
-
-// Decode an URI-escape sequence corresponding to a single UTF-8 character.
-func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
-
-	// Decode the required number of characters.
-	w := 1024
-	for w > 0 {
-		// Check for a URI-escaped octet.
-		if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
-			return false
-		}
-
-		if !(parser.buffer[parser.buffer_pos] == '%' &&
-			is_hex(parser.buffer, parser.buffer_pos+1) &&
-			is_hex(parser.buffer, parser.buffer_pos+2)) {
-			return yaml_parser_set_scanner_tag_error(parser, directive,
-				start_mark, "did not find URI escaped octet")
-		}
-
-		// Get the octet.
-		octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
-
-		// If it is the leading octet, determine the length of the UTF-8 sequence.
-		if w == 1024 {
-			w = width(octet)
-			if w == 0 {
-				return yaml_parser_set_scanner_tag_error(parser, directive,
-					start_mark, "found an incorrect leading UTF-8 octet")
-			}
-		} else {
-			// Check if the trailing octet is correct.
-			if octet&0xC0 != 0x80 {
-				return yaml_parser_set_scanner_tag_error(parser, directive,
-					start_mark, "found an incorrect trailing UTF-8 octet")
-			}
-		}
-
-		// Copy the octet and move the pointers.
-		*s = append(*s, octet)
-		skip(parser)
-		skip(parser)
-		skip(parser)
-		w--
-	}
-	return true
-}
-
-// Scan a block scalar.
-func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
-	// Eat the indicator '|' or '>'.
-	start_mark := parser.mark
-	skip(parser)
-
-	// Scan the additional block scalar indicators.
-	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-		return false
-	}
-
-	// Check for a chomping indicator.
-	var chomping, increment int
-	if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
-		// Set the chomping method and eat the indicator.
-		if parser.buffer[parser.buffer_pos] == '+' {
-			chomping = +1
-		} else {
-			chomping = -1
-		}
-		skip(parser)
-
-		// Check for an indentation indicator.
-		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-			return false
-		}
-		if is_digit(parser.buffer, parser.buffer_pos) {
-			// Check that the indentation is greater than 0.
-			if parser.buffer[parser.buffer_pos] == '0' {
-				yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
-					start_mark, "found an indentation indicator equal to 0")
-				return false
-			}
-
-			// Get the indentation level and eat the indicator.
-			increment = as_digit(parser.buffer, parser.buffer_pos)
-			skip(parser)
-		}
-
-	} else if is_digit(parser.buffer, parser.buffer_pos) {
-		// Do the same as above, but in the opposite order.
-
-		if parser.buffer[parser.buffer_pos] == '0' {
-			yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
-				start_mark, "found an indentation indicator equal to 0")
-			return false
-		}
-		increment = as_digit(parser.buffer, parser.buffer_pos)
-		skip(parser)
-
-		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-			return false
-		}
-		if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
-			if parser.buffer[parser.buffer_pos] == '+' {
-				chomping = +1
-			} else {
-				chomping = -1
-			}
-			skip(parser)
-		}
-	}
-
-	// Eat whitespaces and comments to the end of the line.
-	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-		return false
-	}
-	for is_blank(parser.buffer, parser.buffer_pos) {
-		skip(parser)
-		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-			return false
-		}
-	}
-	if parser.buffer[parser.buffer_pos] == '#' {
-		if !yaml_parser_scan_line_comment(parser, start_mark) {
-			return false
-		}
-		for !is_breakz(parser.buffer, parser.buffer_pos) {
-			skip(parser)
-			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-				return false
-			}
-		}
-	}
-
-	// Check if we are at the end of the line.
-	if !is_breakz(parser.buffer, parser.buffer_pos) {
-		yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
-			start_mark, "did not find expected comment or line break")
-		return false
-	}
-
-	// Eat a line break.
-	if is_break(parser.buffer, parser.buffer_pos) {
-		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
-			return false
-		}
-		skip_line(parser)
-	}
-
-	end_mark := parser.mark
-
-	// Set the indentation level if it was specified.
-	var indent int
-	if increment > 0 {
-		if parser.indent >= 0 {
-			indent = parser.indent + increment
-		} else {
-			indent = increment
-		}
-	}
-
-	// Scan the leading line breaks and determine the indentation level if needed.
-	var s, leading_break, trailing_breaks []byte
-	if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
-		return false
-	}
-
-	// Scan the block scalar content.
-	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-		return false
-	}
-	var leading_blank, trailing_blank bool
-	for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
-		// We are at the beginning of a non-empty line.
-
-		// Is it a trailing whitespace?
-		trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
-
-		// Check if we need to fold the leading line break.
-		if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
-			// Do we need to join the lines by space?
-			if len(trailing_breaks) == 0 {
-				s = append(s, ' ')
-			}
-		} else {
-			s = append(s, leading_break...)
-		}
-		leading_break = leading_break[:0]
-
-		// Append the remaining line breaks.
-		s = append(s, trailing_breaks...)
-		trailing_breaks = trailing_breaks[:0]
-
-		// Is it a leading whitespace?
-		leading_blank = is_blank(parser.buffer, parser.buffer_pos)
-
-		// Consume the current line.
-		for !is_breakz(parser.buffer, parser.buffer_pos) {
-			s = read(parser, s)
-			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-				return false
-			}
-		}
-
-		// Consume the line break.
-		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
-			return false
-		}
-
-		leading_break = read_line(parser, leading_break)
-
-		// Eat the following indentation spaces and line breaks.
-		if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
-			return false
-		}
-	}
-
-	// Chomp the tail.
-	if chomping != -1 {
-		s = append(s, leading_break...)
-	}
-	if chomping == 1 {
-		s = append(s, trailing_breaks...)
-	}
-
-	// Create a token.
-	*token = yaml_token_t{
-		typ:        yaml_SCALAR_TOKEN,
-		start_mark: start_mark,
-		end_mark:   end_mark,
-		value:      s,
-		style:      yaml_LITERAL_SCALAR_STYLE,
-	}
-	if !literal {
-		token.style = yaml_FOLDED_SCALAR_STYLE
-	}
-	return true
-}
-
-// Scan indentation spaces and line breaks for a block scalar.  Determine the
-// indentation level if needed.
-func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
-	*end_mark = parser.mark
-
-	// Eat the indentation spaces and line breaks.
-	max_indent := 0
-	for {
-		// Eat the indentation spaces.
-		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-			return false
-		}
-		for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
-			skip(parser)
-			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-				return false
-			}
-		}
-		if parser.mark.column > max_indent {
-			max_indent = parser.mark.column
-		}
-
-		// Check for a tab character messing the indentation.
-		if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
-			return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
-				start_mark, "found a tab character where an indentation space is expected")
-		}
-
-		// Have we found a non-empty line?
-		if !is_break(parser.buffer, parser.buffer_pos) {
-			break
-		}
-
-		// Consume the line break.
-		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
-			return false
-		}
-		// [Go] Should really be returning breaks instead.
-		*breaks = read_line(parser, *breaks)
-		*end_mark = parser.mark
-	}
-
-	// Determine the indentation level if needed.
-	if *indent == 0 {
-		*indent = max_indent
-		if *indent < parser.indent+1 {
-			*indent = parser.indent + 1
-		}
-		if *indent < 1 {
-			*indent = 1
-		}
-	}
-	return true
-}
-
-// Scan a quoted scalar.
-func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
-	// Eat the left quote.
-	start_mark := parser.mark
-	skip(parser)
-
-	// Consume the content of the quoted scalar.
-	var s, leading_break, trailing_breaks, whitespaces []byte
-	for {
-		// Check that there are no document indicators at the beginning of the line.
-		if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
-			return false
-		}
-
-		if parser.mark.column == 0 &&
-			((parser.buffer[parser.buffer_pos+0] == '-' &&
-				parser.buffer[parser.buffer_pos+1] == '-' &&
-				parser.buffer[parser.buffer_pos+2] == '-') ||
-				(parser.buffer[parser.buffer_pos+0] == '.' &&
-					parser.buffer[parser.buffer_pos+1] == '.' &&
-					parser.buffer[parser.buffer_pos+2] == '.')) &&
-			is_blankz(parser.buffer, parser.buffer_pos+3) {
-			yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
-				start_mark, "found unexpected document indicator")
-			return false
-		}
-
-		// Check for EOF.
-		if is_z(parser.buffer, parser.buffer_pos) {
-			yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
-				start_mark, "found unexpected end of stream")
-			return false
-		}
-
-		// Consume non-blank characters.
-		leading_blanks := false
-		for !is_blankz(parser.buffer, parser.buffer_pos) {
-			if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
-				// Is is an escaped single quote.
-				s = append(s, '\'')
-				skip(parser)
-				skip(parser)
-
-			} else if single && parser.buffer[parser.buffer_pos] == '\'' {
-				// It is a right single quote.
-				break
-			} else if !single && parser.buffer[parser.buffer_pos] == '"' {
-				// It is a right double quote.
-				break
-
-			} else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
-				// It is an escaped line break.
-				if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
-					return false
-				}
-				skip(parser)
-				skip_line(parser)
-				leading_blanks = true
-				break
-
-			} else if !single && parser.buffer[parser.buffer_pos] == '\\' {
-				// It is an escape sequence.
-				code_length := 0
-
-				// Check the escape character.
-				switch parser.buffer[parser.buffer_pos+1] {
-				case '0':
-					s = append(s, 0)
-				case 'a':
-					s = append(s, '\x07')
-				case 'b':
-					s = append(s, '\x08')
-				case 't', '\t':
-					s = append(s, '\x09')
-				case 'n':
-					s = append(s, '\x0A')
-				case 'v':
-					s = append(s, '\x0B')
-				case 'f':
-					s = append(s, '\x0C')
-				case 'r':
-					s = append(s, '\x0D')
-				case 'e':
-					s = append(s, '\x1B')
-				case ' ':
-					s = append(s, '\x20')
-				case '"':
-					s = append(s, '"')
-				case '\'':
-					s = append(s, '\'')
-				case '\\':
-					s = append(s, '\\')
-				case 'N': // NEL (#x85)
-					s = append(s, '\xC2')
-					s = append(s, '\x85')
-				case '_': // #xA0
-					s = append(s, '\xC2')
-					s = append(s, '\xA0')
-				case 'L': // LS (#x2028)
-					s = append(s, '\xE2')
-					s = append(s, '\x80')
-					s = append(s, '\xA8')
-				case 'P': // PS (#x2029)
-					s = append(s, '\xE2')
-					s = append(s, '\x80')
-					s = append(s, '\xA9')
-				case 'x':
-					code_length = 2
-				case 'u':
-					code_length = 4
-				case 'U':
-					code_length = 8
-				default:
-					yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
-						start_mark, "found unknown escape character")
-					return false
-				}
-
-				skip(parser)
-				skip(parser)
-
-				// Consume an arbitrary escape code.
-				if code_length > 0 {
-					var value int
-
-					// Scan the character value.
-					if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
-						return false
-					}
-					for k := 0; k < code_length; k++ {
-						if !is_hex(parser.buffer, parser.buffer_pos+k) {
-							yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
-								start_mark, "did not find expected hexdecimal number")
-							return false
-						}
-						value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
-					}
-
-					// Check the value and write the character.
-					if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
-						yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
-							start_mark, "found invalid Unicode character escape code")
-						return false
-					}
-					if value <= 0x7F {
-						s = append(s, byte(value))
-					} else if value <= 0x7FF {
-						s = append(s, byte(0xC0+(value>>6)))
-						s = append(s, byte(0x80+(value&0x3F)))
-					} else if value <= 0xFFFF {
-						s = append(s, byte(0xE0+(value>>12)))
-						s = append(s, byte(0x80+((value>>6)&0x3F)))
-						s = append(s, byte(0x80+(value&0x3F)))
-					} else {
-						s = append(s, byte(0xF0+(value>>18)))
-						s = append(s, byte(0x80+((value>>12)&0x3F)))
-						s = append(s, byte(0x80+((value>>6)&0x3F)))
-						s = append(s, byte(0x80+(value&0x3F)))
-					}
-
-					// Advance the pointer.
-					for k := 0; k < code_length; k++ {
-						skip(parser)
-					}
-				}
-			} else {
-				// It is a non-escaped non-blank character.
-				s = read(parser, s)
-			}
-			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
-				return false
-			}
-		}
-
-		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-			return false
-		}
-
-		// Check if we are at the end of the scalar.
-		if single {
-			if parser.buffer[parser.buffer_pos] == '\'' {
-				break
-			}
-		} else {
-			if parser.buffer[parser.buffer_pos] == '"' {
-				break
-			}
-		}
-
-		// Consume blank characters.
-		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
-			if is_blank(parser.buffer, parser.buffer_pos) {
-				// Consume a space or a tab character.
-				if !leading_blanks {
-					whitespaces = read(parser, whitespaces)
-				} else {
-					skip(parser)
-				}
-			} else {
-				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
-					return false
-				}
-
-				// Check if it is a first line break.
-				if !leading_blanks {
-					whitespaces = whitespaces[:0]
-					leading_break = read_line(parser, leading_break)
-					leading_blanks = true
-				} else {
-					trailing_breaks = read_line(parser, trailing_breaks)
-				}
-			}
-			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-				return false
-			}
-		}
-
-		// Join the whitespaces or fold line breaks.
-		if leading_blanks {
-			// Do we need to fold line breaks?
-			if len(leading_break) > 0 && leading_break[0] == '\n' {
-				if len(trailing_breaks) == 0 {
-					s = append(s, ' ')
-				} else {
-					s = append(s, trailing_breaks...)
-				}
-			} else {
-				s = append(s, leading_break...)
-				s = append(s, trailing_breaks...)
-			}
-			trailing_breaks = trailing_breaks[:0]
-			leading_break = leading_break[:0]
-		} else {
-			s = append(s, whitespaces...)
-			whitespaces = whitespaces[:0]
-		}
-	}
-
-	// Eat the right quote.
-	skip(parser)
-	end_mark := parser.mark
-
-	// Create a token.
-	*token = yaml_token_t{
-		typ:        yaml_SCALAR_TOKEN,
-		start_mark: start_mark,
-		end_mark:   end_mark,
-		value:      s,
-		style:      yaml_SINGLE_QUOTED_SCALAR_STYLE,
-	}
-	if !single {
-		token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
-	}
-	return true
-}
-
-// Scan a plain scalar.
-func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
-
-	var s, leading_break, trailing_breaks, whitespaces []byte
-	var leading_blanks bool
-	var indent = parser.indent + 1
-
-	start_mark := parser.mark
-	end_mark := parser.mark
-
-	// Consume the content of the plain scalar.
-	for {
-		// Check for a document indicator.
-		if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
-			return false
-		}
-		if parser.mark.column == 0 &&
-			((parser.buffer[parser.buffer_pos+0] == '-' &&
-				parser.buffer[parser.buffer_pos+1] == '-' &&
-				parser.buffer[parser.buffer_pos+2] == '-') ||
-				(parser.buffer[parser.buffer_pos+0] == '.' &&
-					parser.buffer[parser.buffer_pos+1] == '.' &&
-					parser.buffer[parser.buffer_pos+2] == '.')) &&
-			is_blankz(parser.buffer, parser.buffer_pos+3) {
-			break
-		}
-
-		// Check for a comment.
-		if parser.buffer[parser.buffer_pos] == '#' {
-			break
-		}
-
-		// Consume non-blank characters.
-		for !is_blankz(parser.buffer, parser.buffer_pos) {
-
-			// Check for indicators that may end a plain scalar.
-			if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
-				(parser.flow_level > 0 &&
-					(parser.buffer[parser.buffer_pos] == ',' ||
-						parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
-						parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
-						parser.buffer[parser.buffer_pos] == '}')) {
-				break
-			}
-
-			// Check if we need to join whitespaces and breaks.
-			if leading_blanks || len(whitespaces) > 0 {
-				if leading_blanks {
-					// Do we need to fold line breaks?
-					if leading_break[0] == '\n' {
-						if len(trailing_breaks) == 0 {
-							s = append(s, ' ')
-						} else {
-							s = append(s, trailing_breaks...)
-						}
-					} else {
-						s = append(s, leading_break...)
-						s = append(s, trailing_breaks...)
-					}
-					trailing_breaks = trailing_breaks[:0]
-					leading_break = leading_break[:0]
-					leading_blanks = false
-				} else {
-					s = append(s, whitespaces...)
-					whitespaces = whitespaces[:0]
-				}
-			}
-
-			// Copy the character.
-			s = read(parser, s)
-
-			end_mark = parser.mark
-			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
-				return false
-			}
-		}
-
-		// Is it the end?
-		if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
-			break
-		}
-
-		// Consume blank characters.
-		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-			return false
-		}
-
-		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
-			if is_blank(parser.buffer, parser.buffer_pos) {
-
-				// Check for tab characters that abuse indentation.
-				if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
-					yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
-						start_mark, "found a tab character that violates indentation")
-					return false
-				}
-
-				// Consume a space or a tab character.
-				if !leading_blanks {
-					whitespaces = read(parser, whitespaces)
-				} else {
-					skip(parser)
-				}
-			} else {
-				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
-					return false
-				}
-
-				// Check if it is a first line break.
-				if !leading_blanks {
-					whitespaces = whitespaces[:0]
-					leading_break = read_line(parser, leading_break)
-					leading_blanks = true
-				} else {
-					trailing_breaks = read_line(parser, trailing_breaks)
-				}
-			}
-			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-				return false
-			}
-		}
-
-		// Check indentation level.
-		if parser.flow_level == 0 && parser.mark.column < indent {
-			break
-		}
-	}
-
-	// Create a token.
-	*token = yaml_token_t{
-		typ:        yaml_SCALAR_TOKEN,
-		start_mark: start_mark,
-		end_mark:   end_mark,
-		value:      s,
-		style:      yaml_PLAIN_SCALAR_STYLE,
-	}
-
-	// Note that we change the 'simple_key_allowed' flag.
-	if leading_blanks {
-		parser.simple_key_allowed = true
-	}
-	return true
-}
-
-func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool {
-	if parser.newlines > 0 {
-		return true
-	}
-
-	var start_mark yaml_mark_t
-	var text []byte
-
-	for peek := 0; peek < 512; peek++ {
-		if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
-			break
-		}
-		if is_blank(parser.buffer, parser.buffer_pos+peek) {
-			continue
-		}
-		if parser.buffer[parser.buffer_pos+peek] == '#' {
-			seen := parser.mark.index+peek
-			for {
-				if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-					return false
-				}
-				if is_breakz(parser.buffer, parser.buffer_pos) {
-					if parser.mark.index >= seen {
-						break
-					}
-					if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
-						return false
-					}
-					skip_line(parser)
-				} else if parser.mark.index >= seen {
-					if len(text) == 0 {
-						start_mark = parser.mark
-					}
-					text = read(parser, text)
-				} else {
-					skip(parser)
-				}
-			}
-		}
-		break
-	}
-	if len(text) > 0 {
-		parser.comments = append(parser.comments, yaml_comment_t{
-			token_mark: token_mark,
-			start_mark: start_mark,
-			line: text,
-		})
-	}
-	return true
-}
-
-func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool {
-	token := parser.tokens[len(parser.tokens)-1]
-
-	if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 {
-		token = parser.tokens[len(parser.tokens)-2]
-	}
-
-	var token_mark = token.start_mark
-	var start_mark yaml_mark_t
-	var next_indent = parser.indent
-	if next_indent < 0 {
-		next_indent = 0
-	}
-
-	var recent_empty = false
-	var first_empty = parser.newlines <= 1
-
-	var line = parser.mark.line
-	var column = parser.mark.column
-
-	var text []byte
-
-	// The foot line is the place where a comment must start to
-	// still be considered as a foot of the prior content.
-	// If there's some content in the currently parsed line, then
-	// the foot is the line below it.
-	var foot_line = -1
-	if scan_mark.line > 0 {
-		foot_line = parser.mark.line-parser.newlines+1
-		if parser.newlines == 0 && parser.mark.column > 1 {
-			foot_line++
-		}
-	}
-
-	var peek = 0
-	for ; peek < 512; peek++ {
-		if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
-			break
-		}
-		column++
-		if is_blank(parser.buffer, parser.buffer_pos+peek) {
-			continue
-		}
-		c := parser.buffer[parser.buffer_pos+peek]
-		var close_flow = parser.flow_level > 0 && (c == ']' || c == '}')
-		if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) {
-			// Got line break or terminator.
-			if close_flow || !recent_empty {
-				if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) {
-					// This is the first empty line and there were no empty lines before,
-					// so this initial part of the comment is a foot of the prior token
-					// instead of being a head for the following one. Split it up.
-					// Alternatively, this might also be the last comment inside a flow
-					// scope, so it must be a footer.
-					if len(text) > 0 {
-						if start_mark.column-1 < next_indent {
-							// If dedented it's unrelated to the prior token.
-							token_mark = start_mark
-						}
-						parser.comments = append(parser.comments, yaml_comment_t{
-							scan_mark:  scan_mark,
-							token_mark: token_mark,
-							start_mark: start_mark,
-							end_mark:   yaml_mark_t{parser.mark.index + peek, line, column},
-							foot:       text,
-						})
-						scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
-						token_mark = scan_mark
-						text = nil
-					}
-				} else {
-					if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 {
-						text = append(text, '\n')
-					}
-				}
-			}
-			if !is_break(parser.buffer, parser.buffer_pos+peek) {
-				break
-			}
-			first_empty = false
-			recent_empty = true
-			column = 0
-			line++
-			continue
-		}
-
-		if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) {
-			// The comment at the different indentation is a foot of the
-			// preceding data rather than a head of the upcoming one.
-			parser.comments = append(parser.comments, yaml_comment_t{
-				scan_mark:  scan_mark,
-				token_mark: token_mark,
-				start_mark: start_mark,
-				end_mark:   yaml_mark_t{parser.mark.index + peek, line, column},
-				foot:       text,
-			})
-			scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
-			token_mark = scan_mark
-			text = nil
-		}
-
-		if parser.buffer[parser.buffer_pos+peek] != '#' {
-			break
-		}
-
-		if len(text) == 0 {
-			start_mark = yaml_mark_t{parser.mark.index + peek, line, column}
-		} else {
-			text = append(text, '\n')
-		}
-
-		recent_empty = false
-
-		// Consume until after the consumed comment line.
-		seen := parser.mark.index+peek
-		for {
-			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-				return false
-			}
-			if is_breakz(parser.buffer, parser.buffer_pos) {
-				if parser.mark.index >= seen {
-					break
-				}
-				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
-					return false
-				}
-				skip_line(parser)
-			} else if parser.mark.index >= seen {
-				text = read(parser, text)
-			} else {
-				skip(parser)
-			}
-		}
-
-		peek = 0
-		column = 0
-		line = parser.mark.line
-		next_indent = parser.indent
-		if next_indent < 0 {
-			next_indent = 0
-		}
-	}
-
-	if len(text) > 0 {
-		parser.comments = append(parser.comments, yaml_comment_t{
-			scan_mark:  scan_mark,
-			token_mark: start_mark,
-			start_mark: start_mark,
-			end_mark:   yaml_mark_t{parser.mark.index + peek - 1, line, column},
-			head:       text,
-		})
-	}
-	return true
-}
diff --git a/application/source/vendor/gopkg.in/yaml.v3/sorter.go b/application/source/vendor/gopkg.in/yaml.v3/sorter.go
deleted file mode 100644
index 9210ece7e97232891625ed08c549b92c0e9bb169..0000000000000000000000000000000000000000
--- a/application/source/vendor/gopkg.in/yaml.v3/sorter.go
+++ /dev/null
@@ -1,134 +0,0 @@
-//
-// Copyright (c) 2011-2019 Canonical Ltd
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package yaml
-
-import (
-	"reflect"
-	"unicode"
-)
-
-type keyList []reflect.Value
-
-func (l keyList) Len() int      { return len(l) }
-func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
-func (l keyList) Less(i, j int) bool {
-	a := l[i]
-	b := l[j]
-	ak := a.Kind()
-	bk := b.Kind()
-	for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
-		a = a.Elem()
-		ak = a.Kind()
-	}
-	for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
-		b = b.Elem()
-		bk = b.Kind()
-	}
-	af, aok := keyFloat(a)
-	bf, bok := keyFloat(b)
-	if aok && bok {
-		if af != bf {
-			return af < bf
-		}
-		if ak != bk {
-			return ak < bk
-		}
-		return numLess(a, b)
-	}
-	if ak != reflect.String || bk != reflect.String {
-		return ak < bk
-	}
-	ar, br := []rune(a.String()), []rune(b.String())
-	digits := false
-	for i := 0; i < len(ar) && i < len(br); i++ {
-		if ar[i] == br[i] {
-			digits = unicode.IsDigit(ar[i])
-			continue
-		}
-		al := unicode.IsLetter(ar[i])
-		bl := unicode.IsLetter(br[i])
-		if al && bl {
-			return ar[i] < br[i]
-		}
-		if al || bl {
-			if digits {
-				return al
-			} else {
-				return bl
-			}
-		}
-		var ai, bi int
-		var an, bn int64
-		if ar[i] == '0' || br[i] == '0' {
-			for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
-				if ar[j] != '0' {
-					an = 1
-					bn = 1
-					break
-				}
-			}
-		}
-		for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
-			an = an*10 + int64(ar[ai]-'0')
-		}
-		for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
-			bn = bn*10 + int64(br[bi]-'0')
-		}
-		if an != bn {
-			return an < bn
-		}
-		if ai != bi {
-			return ai < bi
-		}
-		return ar[i] < br[i]
-	}
-	return len(ar) < len(br)
-}
-
-// keyFloat returns a float value for v if it is a number/bool
-// and whether it is a number/bool or not.
-func keyFloat(v reflect.Value) (f float64, ok bool) {
-	switch v.Kind() {
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return float64(v.Int()), true
-	case reflect.Float32, reflect.Float64:
-		return v.Float(), true
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return float64(v.Uint()), true
-	case reflect.Bool:
-		if v.Bool() {
-			return 1, true
-		}
-		return 0, true
-	}
-	return 0, false
-}
-
-// numLess returns whether a < b.
-// a and b must necessarily have the same kind.
-func numLess(a, b reflect.Value) bool {
-	switch a.Kind() {
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return a.Int() < b.Int()
-	case reflect.Float32, reflect.Float64:
-		return a.Float() < b.Float()
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return a.Uint() < b.Uint()
-	case reflect.Bool:
-		return !a.Bool() && b.Bool()
-	}
-	panic("not a number")
-}
diff --git a/application/source/vendor/gopkg.in/yaml.v3/writerc.go b/application/source/vendor/gopkg.in/yaml.v3/writerc.go
deleted file mode 100644
index b8a116bf9a22b9911958f44904289a8c6b482bd2..0000000000000000000000000000000000000000
--- a/application/source/vendor/gopkg.in/yaml.v3/writerc.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// 
-// Copyright (c) 2011-2019 Canonical Ltd
-// Copyright (c) 2006-2010 Kirill Simonov
-// 
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-// of the Software, and to permit persons to whom the Software is furnished to do
-// so, subject to the following conditions:
-// 
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-// 
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package yaml
-
-// Set the writer error and return false.
-func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
-	emitter.error = yaml_WRITER_ERROR
-	emitter.problem = problem
-	return false
-}
-
-// Flush the output buffer.
-func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
-	if emitter.write_handler == nil {
-		panic("write handler not set")
-	}
-
-	// Check if the buffer is empty.
-	if emitter.buffer_pos == 0 {
-		return true
-	}
-
-	if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
-		return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
-	}
-	emitter.buffer_pos = 0
-	return true
-}
diff --git a/application/source/vendor/gopkg.in/yaml.v3/yaml.go b/application/source/vendor/gopkg.in/yaml.v3/yaml.go
deleted file mode 100644
index 8cec6da48d3ec4d8858ca622383c75e359faee1f..0000000000000000000000000000000000000000
--- a/application/source/vendor/gopkg.in/yaml.v3/yaml.go
+++ /dev/null
@@ -1,698 +0,0 @@
-//
-// Copyright (c) 2011-2019 Canonical Ltd
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package yaml implements YAML support for the Go language.
-//
-// Source code and other details for the project are available at GitHub:
-//
-//   https://github.com/go-yaml/yaml
-//
-package yaml
-
-import (
-	"errors"
-	"fmt"
-	"io"
-	"reflect"
-	"strings"
-	"sync"
-	"unicode/utf8"
-)
-
-// The Unmarshaler interface may be implemented by types to customize their
-// behavior when being unmarshaled from a YAML document.
-type Unmarshaler interface {
-	UnmarshalYAML(value *Node) error
-}
-
-type obsoleteUnmarshaler interface {
-	UnmarshalYAML(unmarshal func(interface{}) error) error
-}
-
-// The Marshaler interface may be implemented by types to customize their
-// behavior when being marshaled into a YAML document. The returned value
-// is marshaled in place of the original value implementing Marshaler.
-//
-// If an error is returned by MarshalYAML, the marshaling procedure stops
-// and returns with the provided error.
-type Marshaler interface {
-	MarshalYAML() (interface{}, error)
-}
-
-// Unmarshal decodes the first document found within the in byte slice
-// and assigns decoded values into the out value.
-//
-// Maps and pointers (to a struct, string, int, etc) are accepted as out
-// values. If an internal pointer within a struct is not initialized,
-// the yaml package will initialize it if necessary for unmarshalling
-// the provided data. The out parameter must not be nil.
-//
-// The type of the decoded values should be compatible with the respective
-// values in out. If one or more values cannot be decoded due to a type
-// mismatches, decoding continues partially until the end of the YAML
-// content, and a *yaml.TypeError is returned with details for all
-// missed values.
-//
-// Struct fields are only unmarshalled if they are exported (have an
-// upper case first letter), and are unmarshalled using the field name
-// lowercased as the default key. Custom keys may be defined via the
-// "yaml" name in the field tag: the content preceding the first comma
-// is used as the key, and the following comma-separated options are
-// used to tweak the marshalling process (see Marshal).
-// Conflicting names result in a runtime error.
-//
-// For example:
-//
-//     type T struct {
-//         F int `yaml:"a,omitempty"`
-//         B int
-//     }
-//     var t T
-//     yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
-//
-// See the documentation of Marshal for the format of tags and a list of
-// supported tag options.
-//
-func Unmarshal(in []byte, out interface{}) (err error) {
-	return unmarshal(in, out, false)
-}
-
-// A Decoder reads and decodes YAML values from an input stream.
-type Decoder struct {
-	parser      *parser
-	knownFields bool
-}
-
-// NewDecoder returns a new decoder that reads from r.
-//
-// The decoder introduces its own buffering and may read
-// data from r beyond the YAML values requested.
-func NewDecoder(r io.Reader) *Decoder {
-	return &Decoder{
-		parser: newParserFromReader(r),
-	}
-}
-
-// KnownFields ensures that the keys in decoded mappings to
-// exist as fields in the struct being decoded into.
-func (dec *Decoder) KnownFields(enable bool) {
-	dec.knownFields = enable
-}
-
-// Decode reads the next YAML-encoded value from its input
-// and stores it in the value pointed to by v.
-//
-// See the documentation for Unmarshal for details about the
-// conversion of YAML into a Go value.
-func (dec *Decoder) Decode(v interface{}) (err error) {
-	d := newDecoder()
-	d.knownFields = dec.knownFields
-	defer handleErr(&err)
-	node := dec.parser.parse()
-	if node == nil {
-		return io.EOF
-	}
-	out := reflect.ValueOf(v)
-	if out.Kind() == reflect.Ptr && !out.IsNil() {
-		out = out.Elem()
-	}
-	d.unmarshal(node, out)
-	if len(d.terrors) > 0 {
-		return &TypeError{d.terrors}
-	}
-	return nil
-}
-
-// Decode decodes the node and stores its data into the value pointed to by v.
-//
-// See the documentation for Unmarshal for details about the
-// conversion of YAML into a Go value.
-func (n *Node) Decode(v interface{}) (err error) {
-	d := newDecoder()
-	defer handleErr(&err)
-	out := reflect.ValueOf(v)
-	if out.Kind() == reflect.Ptr && !out.IsNil() {
-		out = out.Elem()
-	}
-	d.unmarshal(n, out)
-	if len(d.terrors) > 0 {
-		return &TypeError{d.terrors}
-	}
-	return nil
-}
-
-func unmarshal(in []byte, out interface{}, strict bool) (err error) {
-	defer handleErr(&err)
-	d := newDecoder()
-	p := newParser(in)
-	defer p.destroy()
-	node := p.parse()
-	if node != nil {
-		v := reflect.ValueOf(out)
-		if v.Kind() == reflect.Ptr && !v.IsNil() {
-			v = v.Elem()
-		}
-		d.unmarshal(node, v)
-	}
-	if len(d.terrors) > 0 {
-		return &TypeError{d.terrors}
-	}
-	return nil
-}
-
-// Marshal serializes the value provided into a YAML document. The structure
-// of the generated document will reflect the structure of the value itself.
-// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
-//
-// Struct fields are only marshalled if they are exported (have an upper case
-// first letter), and are marshalled using the field name lowercased as the
-// default key. Custom keys may be defined via the "yaml" name in the field
-// tag: the content preceding the first comma is used as the key, and the
-// following comma-separated options are used to tweak the marshalling process.
-// Conflicting names result in a runtime error.
-//
-// The field tag format accepted is:
-//
-//     `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
-//
-// The following flags are currently supported:
-//
-//     omitempty    Only include the field if it's not set to the zero
-//                  value for the type or to empty slices or maps.
-//                  Zero valued structs will be omitted if all their public
-//                  fields are zero, unless they implement an IsZero
-//                  method (see the IsZeroer interface type), in which
-//                  case the field will be excluded if IsZero returns true.
-//
-//     flow         Marshal using a flow style (useful for structs,
-//                  sequences and maps).
-//
-//     inline       Inline the field, which must be a struct or a map,
-//                  causing all of its fields or keys to be processed as if
-//                  they were part of the outer struct. For maps, keys must
-//                  not conflict with the yaml keys of other struct fields.
-//
-// In addition, if the key is "-", the field is ignored.
-//
-// For example:
-//
-//     type T struct {
-//         F int `yaml:"a,omitempty"`
-//         B int
-//     }
-//     yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
-//     yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
-//
-func Marshal(in interface{}) (out []byte, err error) {
-	defer handleErr(&err)
-	e := newEncoder()
-	defer e.destroy()
-	e.marshalDoc("", reflect.ValueOf(in))
-	e.finish()
-	out = e.out
-	return
-}
-
-// An Encoder writes YAML values to an output stream.
-type Encoder struct {
-	encoder *encoder
-}
-
-// NewEncoder returns a new encoder that writes to w.
-// The Encoder should be closed after use to flush all data
-// to w.
-func NewEncoder(w io.Writer) *Encoder {
-	return &Encoder{
-		encoder: newEncoderWithWriter(w),
-	}
-}
-
-// Encode writes the YAML encoding of v to the stream.
-// If multiple items are encoded to the stream, the
-// second and subsequent document will be preceded
-// with a "---" document separator, but the first will not.
-//
-// See the documentation for Marshal for details about the conversion of Go
-// values to YAML.
-func (e *Encoder) Encode(v interface{}) (err error) {
-	defer handleErr(&err)
-	e.encoder.marshalDoc("", reflect.ValueOf(v))
-	return nil
-}
-
-// Encode encodes value v and stores its representation in n.
-//
-// See the documentation for Marshal for details about the
-// conversion of Go values into YAML.
-func (n *Node) Encode(v interface{}) (err error) {
-	defer handleErr(&err)
-	e := newEncoder()
-	defer e.destroy()
-	e.marshalDoc("", reflect.ValueOf(v))
-	e.finish()
-	p := newParser(e.out)
-	p.textless = true
-	defer p.destroy()
-	doc := p.parse()
-	*n = *doc.Content[0]
-	return nil
-}
-
-// SetIndent changes the used indentation used when encoding.
-func (e *Encoder) SetIndent(spaces int) {
-	if spaces < 0 {
-		panic("yaml: cannot indent to a negative number of spaces")
-	}
-	e.encoder.indent = spaces
-}
-
-// Close closes the encoder by writing any remaining data.
-// It does not write a stream terminating string "...".
-func (e *Encoder) Close() (err error) {
-	defer handleErr(&err)
-	e.encoder.finish()
-	return nil
-}
-
-func handleErr(err *error) {
-	if v := recover(); v != nil {
-		if e, ok := v.(yamlError); ok {
-			*err = e.err
-		} else {
-			panic(v)
-		}
-	}
-}
-
-type yamlError struct {
-	err error
-}
-
-func fail(err error) {
-	panic(yamlError{err})
-}
-
-func failf(format string, args ...interface{}) {
-	panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
-}
-
-// A TypeError is returned by Unmarshal when one or more fields in
-// the YAML document cannot be properly decoded into the requested
-// types. When this error is returned, the value is still
-// unmarshaled partially.
-type TypeError struct {
-	Errors []string
-}
-
-func (e *TypeError) Error() string {
-	return fmt.Sprintf("yaml: unmarshal errors:\n  %s", strings.Join(e.Errors, "\n  "))
-}
-
-type Kind uint32
-
-const (
-	DocumentNode Kind = 1 << iota
-	SequenceNode
-	MappingNode
-	ScalarNode
-	AliasNode
-)
-
-type Style uint32
-
-const (
-	TaggedStyle Style = 1 << iota
-	DoubleQuotedStyle
-	SingleQuotedStyle
-	LiteralStyle
-	FoldedStyle
-	FlowStyle
-)
-
-// Node represents an element in the YAML document hierarchy. While documents
-// are typically encoded and decoded into higher level types, such as structs
-// and maps, Node is an intermediate representation that allows detailed
-// control over the content being decoded or encoded.
-//
-// It's worth noting that although Node offers access into details such as
-// line numbers, colums, and comments, the content when re-encoded will not
-// have its original textual representation preserved. An effort is made to
-// render the data plesantly, and to preserve comments near the data they
-// describe, though.
-//
-// Values that make use of the Node type interact with the yaml package in the
-// same way any other type would do, by encoding and decoding yaml data
-// directly or indirectly into them.
-//
-// For example:
-//
-//     var person struct {
-//             Name    string
-//             Address yaml.Node
-//     }
-//     err := yaml.Unmarshal(data, &person)
-// 
-// Or by itself:
-//
-//     var person Node
-//     err := yaml.Unmarshal(data, &person)
-//
-type Node struct {
-	// Kind defines whether the node is a document, a mapping, a sequence,
-	// a scalar value, or an alias to another node. The specific data type of
-	// scalar nodes may be obtained via the ShortTag and LongTag methods.
-	Kind  Kind
-
-	// Style allows customizing the apperance of the node in the tree.
-	Style Style
-
-	// Tag holds the YAML tag defining the data type for the value.
-	// When decoding, this field will always be set to the resolved tag,
-	// even when it wasn't explicitly provided in the YAML content.
-	// When encoding, if this field is unset the value type will be
-	// implied from the node properties, and if it is set, it will only
-	// be serialized into the representation if TaggedStyle is used or
-	// the implicit tag diverges from the provided one.
-	Tag string
-
-	// Value holds the unescaped and unquoted represenation of the value.
-	Value string
-
-	// Anchor holds the anchor name for this node, which allows aliases to point to it.
-	Anchor string
-
-	// Alias holds the node that this alias points to. Only valid when Kind is AliasNode.
-	Alias *Node
-
-	// Content holds contained nodes for documents, mappings, and sequences.
-	Content []*Node
-
-	// HeadComment holds any comments in the lines preceding the node and
-	// not separated by an empty line.
-	HeadComment string
-
-	// LineComment holds any comments at the end of the line where the node is in.
-	LineComment string
-
-	// FootComment holds any comments following the node and before empty lines.
-	FootComment string
-
-	// Line and Column hold the node position in the decoded YAML text.
-	// These fields are not respected when encoding the node.
-	Line   int
-	Column int
-}
-
-// IsZero returns whether the node has all of its fields unset.
-func (n *Node) IsZero() bool {
-	return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil &&
-		n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0
-}
-
-
-// LongTag returns the long form of the tag that indicates the data type for
-// the node. If the Tag field isn't explicitly defined, one will be computed
-// based on the node properties.
-func (n *Node) LongTag() string {
-	return longTag(n.ShortTag())
-}
-
-// ShortTag returns the short form of the YAML tag that indicates data type for
-// the node. If the Tag field isn't explicitly defined, one will be computed
-// based on the node properties.
-func (n *Node) ShortTag() string {
-	if n.indicatedString() {
-		return strTag
-	}
-	if n.Tag == "" || n.Tag == "!" {
-		switch n.Kind {
-		case MappingNode:
-			return mapTag
-		case SequenceNode:
-			return seqTag
-		case AliasNode:
-			if n.Alias != nil {
-				return n.Alias.ShortTag()
-			}
-		case ScalarNode:
-			tag, _ := resolve("", n.Value)
-			return tag
-		case 0:
-			// Special case to make the zero value convenient.
-			if n.IsZero() {
-				return nullTag
-			}
-		}
-		return ""
-	}
-	return shortTag(n.Tag)
-}
-
-func (n *Node) indicatedString() bool {
-	return n.Kind == ScalarNode &&
-		(shortTag(n.Tag) == strTag ||
-			(n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0)
-}
-
-// SetString is a convenience function that sets the node to a string value
-// and defines its style in a pleasant way depending on its content.
-func (n *Node) SetString(s string) {
-	n.Kind = ScalarNode
-	if utf8.ValidString(s) {
-		n.Value = s
-		n.Tag = strTag
-	} else {
-		n.Value = encodeBase64(s)
-		n.Tag = binaryTag
-	}
-	if strings.Contains(n.Value, "\n") {
-		n.Style = LiteralStyle
-	}
-}
-
-// --------------------------------------------------------------------------
-// Maintain a mapping of keys to structure field indexes
-
-// The code in this section was copied from mgo/bson.
-
-// structInfo holds details for the serialization of fields of
-// a given struct.
-type structInfo struct {
-	FieldsMap  map[string]fieldInfo
-	FieldsList []fieldInfo
-
-	// InlineMap is the number of the field in the struct that
-	// contains an ,inline map, or -1 if there's none.
-	InlineMap int
-
-	// InlineUnmarshalers holds indexes to inlined fields that
-	// contain unmarshaler values.
-	InlineUnmarshalers [][]int
-}
-
-type fieldInfo struct {
-	Key       string
-	Num       int
-	OmitEmpty bool
-	Flow      bool
-	// Id holds the unique field identifier, so we can cheaply
-	// check for field duplicates without maintaining an extra map.
-	Id int
-
-	// Inline holds the field index if the field is part of an inlined struct.
-	Inline []int
-}
-
-var structMap = make(map[reflect.Type]*structInfo)
-var fieldMapMutex sync.RWMutex
-var unmarshalerType reflect.Type
-
-func init() {
-	var v Unmarshaler
-	unmarshalerType = reflect.ValueOf(&v).Elem().Type()
-}
-
-func getStructInfo(st reflect.Type) (*structInfo, error) {
-	fieldMapMutex.RLock()
-	sinfo, found := structMap[st]
-	fieldMapMutex.RUnlock()
-	if found {
-		return sinfo, nil
-	}
-
-	n := st.NumField()
-	fieldsMap := make(map[string]fieldInfo)
-	fieldsList := make([]fieldInfo, 0, n)
-	inlineMap := -1
-	inlineUnmarshalers := [][]int(nil)
-	for i := 0; i != n; i++ {
-		field := st.Field(i)
-		if field.PkgPath != "" && !field.Anonymous {
-			continue // Private field
-		}
-
-		info := fieldInfo{Num: i}
-
-		tag := field.Tag.Get("yaml")
-		if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
-			tag = string(field.Tag)
-		}
-		if tag == "-" {
-			continue
-		}
-
-		inline := false
-		fields := strings.Split(tag, ",")
-		if len(fields) > 1 {
-			for _, flag := range fields[1:] {
-				switch flag {
-				case "omitempty":
-					info.OmitEmpty = true
-				case "flow":
-					info.Flow = true
-				case "inline":
-					inline = true
-				default:
-					return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st))
-				}
-			}
-			tag = fields[0]
-		}
-
-		if inline {
-			switch field.Type.Kind() {
-			case reflect.Map:
-				if inlineMap >= 0 {
-					return nil, errors.New("multiple ,inline maps in struct " + st.String())
-				}
-				if field.Type.Key() != reflect.TypeOf("") {
-					return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String())
-				}
-				inlineMap = info.Num
-			case reflect.Struct, reflect.Ptr:
-				ftype := field.Type
-				for ftype.Kind() == reflect.Ptr {
-					ftype = ftype.Elem()
-				}
-				if ftype.Kind() != reflect.Struct {
-					return nil, errors.New("option ,inline may only be used on a struct or map field")
-				}
-				if reflect.PtrTo(ftype).Implements(unmarshalerType) {
-					inlineUnmarshalers = append(inlineUnmarshalers, []int{i})
-				} else {
-					sinfo, err := getStructInfo(ftype)
-					if err != nil {
-						return nil, err
-					}
-					for _, index := range sinfo.InlineUnmarshalers {
-						inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...))
-					}
-					for _, finfo := range sinfo.FieldsList {
-						if _, found := fieldsMap[finfo.Key]; found {
-							msg := "duplicated key '" + finfo.Key + "' in struct " + st.String()
-							return nil, errors.New(msg)
-						}
-						if finfo.Inline == nil {
-							finfo.Inline = []int{i, finfo.Num}
-						} else {
-							finfo.Inline = append([]int{i}, finfo.Inline...)
-						}
-						finfo.Id = len(fieldsList)
-						fieldsMap[finfo.Key] = finfo
-						fieldsList = append(fieldsList, finfo)
-					}
-				}
-			default:
-				return nil, errors.New("option ,inline may only be used on a struct or map field")
-			}
-			continue
-		}
-
-		if tag != "" {
-			info.Key = tag
-		} else {
-			info.Key = strings.ToLower(field.Name)
-		}
-
-		if _, found = fieldsMap[info.Key]; found {
-			msg := "duplicated key '" + info.Key + "' in struct " + st.String()
-			return nil, errors.New(msg)
-		}
-
-		info.Id = len(fieldsList)
-		fieldsList = append(fieldsList, info)
-		fieldsMap[info.Key] = info
-	}
-
-	sinfo = &structInfo{
-		FieldsMap:          fieldsMap,
-		FieldsList:         fieldsList,
-		InlineMap:          inlineMap,
-		InlineUnmarshalers: inlineUnmarshalers,
-	}
-
-	fieldMapMutex.Lock()
-	structMap[st] = sinfo
-	fieldMapMutex.Unlock()
-	return sinfo, nil
-}
-
-// IsZeroer is used to check whether an object is zero to
-// determine whether it should be omitted when marshaling
-// with the omitempty flag. One notable implementation
-// is time.Time.
-type IsZeroer interface {
-	IsZero() bool
-}
-
-func isZero(v reflect.Value) bool {
-	kind := v.Kind()
-	if z, ok := v.Interface().(IsZeroer); ok {
-		if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
-			return true
-		}
-		return z.IsZero()
-	}
-	switch kind {
-	case reflect.String:
-		return len(v.String()) == 0
-	case reflect.Interface, reflect.Ptr:
-		return v.IsNil()
-	case reflect.Slice:
-		return v.Len() == 0
-	case reflect.Map:
-		return v.Len() == 0
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return v.Int() == 0
-	case reflect.Float32, reflect.Float64:
-		return v.Float() == 0
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return v.Uint() == 0
-	case reflect.Bool:
-		return !v.Bool()
-	case reflect.Struct:
-		vt := v.Type()
-		for i := v.NumField() - 1; i >= 0; i-- {
-			if vt.Field(i).PkgPath != "" {
-				continue // Private field
-			}
-			if !isZero(v.Field(i)) {
-				return false
-			}
-		}
-		return true
-	}
-	return false
-}
diff --git a/application/source/vendor/gopkg.in/yaml.v3/yamlh.go b/application/source/vendor/gopkg.in/yaml.v3/yamlh.go
deleted file mode 100644
index 7c6d0077061933c97979f6c84cb659b17391e1a3..0000000000000000000000000000000000000000
--- a/application/source/vendor/gopkg.in/yaml.v3/yamlh.go
+++ /dev/null
@@ -1,807 +0,0 @@
-//
-// Copyright (c) 2011-2019 Canonical Ltd
-// Copyright (c) 2006-2010 Kirill Simonov
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-// of the Software, and to permit persons to whom the Software is furnished to do
-// so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package yaml
-
-import (
-	"fmt"
-	"io"
-)
-
-// The version directive data.
-type yaml_version_directive_t struct {
-	major int8 // The major version number.
-	minor int8 // The minor version number.
-}
-
-// The tag directive data.
-type yaml_tag_directive_t struct {
-	handle []byte // The tag handle.
-	prefix []byte // The tag prefix.
-}
-
-type yaml_encoding_t int
-
-// The stream encoding.
-const (
-	// Let the parser choose the encoding.
-	yaml_ANY_ENCODING yaml_encoding_t = iota
-
-	yaml_UTF8_ENCODING    // The default UTF-8 encoding.
-	yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
-	yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
-)
-
-type yaml_break_t int
-
-// Line break types.
-const (
-	// Let the parser choose the break type.
-	yaml_ANY_BREAK yaml_break_t = iota
-
-	yaml_CR_BREAK   // Use CR for line breaks (Mac style).
-	yaml_LN_BREAK   // Use LN for line breaks (Unix style).
-	yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
-)
-
-type yaml_error_type_t int
-
-// Many bad things could happen with the parser and emitter.
-const (
-	// No error is produced.
-	yaml_NO_ERROR yaml_error_type_t = iota
-
-	yaml_MEMORY_ERROR   // Cannot allocate or reallocate a block of memory.
-	yaml_READER_ERROR   // Cannot read or decode the input stream.
-	yaml_SCANNER_ERROR  // Cannot scan the input stream.
-	yaml_PARSER_ERROR   // Cannot parse the input stream.
-	yaml_COMPOSER_ERROR // Cannot compose a YAML document.
-	yaml_WRITER_ERROR   // Cannot write to the output stream.
-	yaml_EMITTER_ERROR  // Cannot emit a YAML stream.
-)
-
-// The pointer position.
-type yaml_mark_t struct {
-	index  int // The position index.
-	line   int // The position line.
-	column int // The position column.
-}
-
-// Node Styles
-
-type yaml_style_t int8
-
-type yaml_scalar_style_t yaml_style_t
-
-// Scalar styles.
-const (
-	// Let the emitter choose the style.
-	yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0
-
-	yaml_PLAIN_SCALAR_STYLE         yaml_scalar_style_t = 1 << iota // The plain scalar style.
-	yaml_SINGLE_QUOTED_SCALAR_STYLE                                 // The single-quoted scalar style.
-	yaml_DOUBLE_QUOTED_SCALAR_STYLE                                 // The double-quoted scalar style.
-	yaml_LITERAL_SCALAR_STYLE                                       // The literal scalar style.
-	yaml_FOLDED_SCALAR_STYLE                                        // The folded scalar style.
-)
-
-type yaml_sequence_style_t yaml_style_t
-
-// Sequence styles.
-const (
-	// Let the emitter choose the style.
-	yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
-
-	yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
-	yaml_FLOW_SEQUENCE_STYLE  // The flow sequence style.
-)
-
-type yaml_mapping_style_t yaml_style_t
-
-// Mapping styles.
-const (
-	// Let the emitter choose the style.
-	yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
-
-	yaml_BLOCK_MAPPING_STYLE // The block mapping style.
-	yaml_FLOW_MAPPING_STYLE  // The flow mapping style.
-)
-
-// Tokens
-
-type yaml_token_type_t int
-
-// Token types.
-const (
-	// An empty token.
-	yaml_NO_TOKEN yaml_token_type_t = iota
-
-	yaml_STREAM_START_TOKEN // A STREAM-START token.
-	yaml_STREAM_END_TOKEN   // A STREAM-END token.
-
-	yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
-	yaml_TAG_DIRECTIVE_TOKEN     // A TAG-DIRECTIVE token.
-	yaml_DOCUMENT_START_TOKEN    // A DOCUMENT-START token.
-	yaml_DOCUMENT_END_TOKEN      // A DOCUMENT-END token.
-
-	yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
-	yaml_BLOCK_MAPPING_START_TOKEN  // A BLOCK-SEQUENCE-END token.
-	yaml_BLOCK_END_TOKEN            // A BLOCK-END token.
-
-	yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
-	yaml_FLOW_SEQUENCE_END_TOKEN   // A FLOW-SEQUENCE-END token.
-	yaml_FLOW_MAPPING_START_TOKEN  // A FLOW-MAPPING-START token.
-	yaml_FLOW_MAPPING_END_TOKEN    // A FLOW-MAPPING-END token.
-
-	yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
-	yaml_FLOW_ENTRY_TOKEN  // A FLOW-ENTRY token.
-	yaml_KEY_TOKEN         // A KEY token.
-	yaml_VALUE_TOKEN       // A VALUE token.
-
-	yaml_ALIAS_TOKEN  // An ALIAS token.
-	yaml_ANCHOR_TOKEN // An ANCHOR token.
-	yaml_TAG_TOKEN    // A TAG token.
-	yaml_SCALAR_TOKEN // A SCALAR token.
-)
-
-func (tt yaml_token_type_t) String() string {
-	switch tt {
-	case yaml_NO_TOKEN:
-		return "yaml_NO_TOKEN"
-	case yaml_STREAM_START_TOKEN:
-		return "yaml_STREAM_START_TOKEN"
-	case yaml_STREAM_END_TOKEN:
-		return "yaml_STREAM_END_TOKEN"
-	case yaml_VERSION_DIRECTIVE_TOKEN:
-		return "yaml_VERSION_DIRECTIVE_TOKEN"
-	case yaml_TAG_DIRECTIVE_TOKEN:
-		return "yaml_TAG_DIRECTIVE_TOKEN"
-	case yaml_DOCUMENT_START_TOKEN:
-		return "yaml_DOCUMENT_START_TOKEN"
-	case yaml_DOCUMENT_END_TOKEN:
-		return "yaml_DOCUMENT_END_TOKEN"
-	case yaml_BLOCK_SEQUENCE_START_TOKEN:
-		return "yaml_BLOCK_SEQUENCE_START_TOKEN"
-	case yaml_BLOCK_MAPPING_START_TOKEN:
-		return "yaml_BLOCK_MAPPING_START_TOKEN"
-	case yaml_BLOCK_END_TOKEN:
-		return "yaml_BLOCK_END_TOKEN"
-	case yaml_FLOW_SEQUENCE_START_TOKEN:
-		return "yaml_FLOW_SEQUENCE_START_TOKEN"
-	case yaml_FLOW_SEQUENCE_END_TOKEN:
-		return "yaml_FLOW_SEQUENCE_END_TOKEN"
-	case yaml_FLOW_MAPPING_START_TOKEN:
-		return "yaml_FLOW_MAPPING_START_TOKEN"
-	case yaml_FLOW_MAPPING_END_TOKEN:
-		return "yaml_FLOW_MAPPING_END_TOKEN"
-	case yaml_BLOCK_ENTRY_TOKEN:
-		return "yaml_BLOCK_ENTRY_TOKEN"
-	case yaml_FLOW_ENTRY_TOKEN:
-		return "yaml_FLOW_ENTRY_TOKEN"
-	case yaml_KEY_TOKEN:
-		return "yaml_KEY_TOKEN"
-	case yaml_VALUE_TOKEN:
-		return "yaml_VALUE_TOKEN"
-	case yaml_ALIAS_TOKEN:
-		return "yaml_ALIAS_TOKEN"
-	case yaml_ANCHOR_TOKEN:
-		return "yaml_ANCHOR_TOKEN"
-	case yaml_TAG_TOKEN:
-		return "yaml_TAG_TOKEN"
-	case yaml_SCALAR_TOKEN:
-		return "yaml_SCALAR_TOKEN"
-	}
-	return "<unknown token>"
-}
-
-// The token structure.
-type yaml_token_t struct {
-	// The token type.
-	typ yaml_token_type_t
-
-	// The start/end of the token.
-	start_mark, end_mark yaml_mark_t
-
-	// The stream encoding (for yaml_STREAM_START_TOKEN).
-	encoding yaml_encoding_t
-
-	// The alias/anchor/scalar value or tag/tag directive handle
-	// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
-	value []byte
-
-	// The tag suffix (for yaml_TAG_TOKEN).
-	suffix []byte
-
-	// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
-	prefix []byte
-
-	// The scalar style (for yaml_SCALAR_TOKEN).
-	style yaml_scalar_style_t
-
-	// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
-	major, minor int8
-}
-
-// Events
-
-type yaml_event_type_t int8
-
-// Event types.
-const (
-	// An empty event.
-	yaml_NO_EVENT yaml_event_type_t = iota
-
-	yaml_STREAM_START_EVENT   // A STREAM-START event.
-	yaml_STREAM_END_EVENT     // A STREAM-END event.
-	yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
-	yaml_DOCUMENT_END_EVENT   // A DOCUMENT-END event.
-	yaml_ALIAS_EVENT          // An ALIAS event.
-	yaml_SCALAR_EVENT         // A SCALAR event.
-	yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
-	yaml_SEQUENCE_END_EVENT   // A SEQUENCE-END event.
-	yaml_MAPPING_START_EVENT  // A MAPPING-START event.
-	yaml_MAPPING_END_EVENT    // A MAPPING-END event.
-	yaml_TAIL_COMMENT_EVENT
-)
-
-var eventStrings = []string{
-	yaml_NO_EVENT:             "none",
-	yaml_STREAM_START_EVENT:   "stream start",
-	yaml_STREAM_END_EVENT:     "stream end",
-	yaml_DOCUMENT_START_EVENT: "document start",
-	yaml_DOCUMENT_END_EVENT:   "document end",
-	yaml_ALIAS_EVENT:          "alias",
-	yaml_SCALAR_EVENT:         "scalar",
-	yaml_SEQUENCE_START_EVENT: "sequence start",
-	yaml_SEQUENCE_END_EVENT:   "sequence end",
-	yaml_MAPPING_START_EVENT:  "mapping start",
-	yaml_MAPPING_END_EVENT:    "mapping end",
-	yaml_TAIL_COMMENT_EVENT:   "tail comment",
-}
-
-func (e yaml_event_type_t) String() string {
-	if e < 0 || int(e) >= len(eventStrings) {
-		return fmt.Sprintf("unknown event %d", e)
-	}
-	return eventStrings[e]
-}
-
-// The event structure.
-type yaml_event_t struct {
-
-	// The event type.
-	typ yaml_event_type_t
-
-	// The start and end of the event.
-	start_mark, end_mark yaml_mark_t
-
-	// The document encoding (for yaml_STREAM_START_EVENT).
-	encoding yaml_encoding_t
-
-	// The version directive (for yaml_DOCUMENT_START_EVENT).
-	version_directive *yaml_version_directive_t
-
-	// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
-	tag_directives []yaml_tag_directive_t
-
-	// The comments
-	head_comment []byte
-	line_comment []byte
-	foot_comment []byte
-	tail_comment []byte
-
-	// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
-	anchor []byte
-
-	// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
-	tag []byte
-
-	// The scalar value (for yaml_SCALAR_EVENT).
-	value []byte
-
-	// Is the document start/end indicator implicit, or the tag optional?
-	// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
-	implicit bool
-
-	// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
-	quoted_implicit bool
-
-	// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
-	style yaml_style_t
-}
-
-func (e *yaml_event_t) scalar_style() yaml_scalar_style_t     { return yaml_scalar_style_t(e.style) }
-func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
-func (e *yaml_event_t) mapping_style() yaml_mapping_style_t   { return yaml_mapping_style_t(e.style) }
-
-// Nodes
-
-const (
-	yaml_NULL_TAG      = "tag:yaml.org,2002:null"      // The tag !!null with the only possible value: null.
-	yaml_BOOL_TAG      = "tag:yaml.org,2002:bool"      // The tag !!bool with the values: true and false.
-	yaml_STR_TAG       = "tag:yaml.org,2002:str"       // The tag !!str for string values.
-	yaml_INT_TAG       = "tag:yaml.org,2002:int"       // The tag !!int for integer values.
-	yaml_FLOAT_TAG     = "tag:yaml.org,2002:float"     // The tag !!float for float values.
-	yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
-
-	yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
-	yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
-
-	// Not in original libyaml.
-	yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
-	yaml_MERGE_TAG  = "tag:yaml.org,2002:merge"
-
-	yaml_DEFAULT_SCALAR_TAG   = yaml_STR_TAG // The default scalar tag is !!str.
-	yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
-	yaml_DEFAULT_MAPPING_TAG  = yaml_MAP_TAG // The default mapping tag is !!map.
-)
-
-type yaml_node_type_t int
-
-// Node types.
-const (
-	// An empty node.
-	yaml_NO_NODE yaml_node_type_t = iota
-
-	yaml_SCALAR_NODE   // A scalar node.
-	yaml_SEQUENCE_NODE // A sequence node.
-	yaml_MAPPING_NODE  // A mapping node.
-)
-
-// An element of a sequence node.
-type yaml_node_item_t int
-
-// An element of a mapping node.
-type yaml_node_pair_t struct {
-	key   int // The key of the element.
-	value int // The value of the element.
-}
-
-// The node structure.
-type yaml_node_t struct {
-	typ yaml_node_type_t // The node type.
-	tag []byte           // The node tag.
-
-	// The node data.
-
-	// The scalar parameters (for yaml_SCALAR_NODE).
-	scalar struct {
-		value  []byte              // The scalar value.
-		length int                 // The length of the scalar value.
-		style  yaml_scalar_style_t // The scalar style.
-	}
-
-	// The sequence parameters (for YAML_SEQUENCE_NODE).
-	sequence struct {
-		items_data []yaml_node_item_t    // The stack of sequence items.
-		style      yaml_sequence_style_t // The sequence style.
-	}
-
-	// The mapping parameters (for yaml_MAPPING_NODE).
-	mapping struct {
-		pairs_data  []yaml_node_pair_t   // The stack of mapping pairs (key, value).
-		pairs_start *yaml_node_pair_t    // The beginning of the stack.
-		pairs_end   *yaml_node_pair_t    // The end of the stack.
-		pairs_top   *yaml_node_pair_t    // The top of the stack.
-		style       yaml_mapping_style_t // The mapping style.
-	}
-
-	start_mark yaml_mark_t // The beginning of the node.
-	end_mark   yaml_mark_t // The end of the node.
-
-}
-
-// The document structure.
-type yaml_document_t struct {
-
-	// The document nodes.
-	nodes []yaml_node_t
-
-	// The version directive.
-	version_directive *yaml_version_directive_t
-
-	// The list of tag directives.
-	tag_directives_data  []yaml_tag_directive_t
-	tag_directives_start int // The beginning of the tag directives list.
-	tag_directives_end   int // The end of the tag directives list.
-
-	start_implicit int // Is the document start indicator implicit?
-	end_implicit   int // Is the document end indicator implicit?
-
-	// The start/end of the document.
-	start_mark, end_mark yaml_mark_t
-}
-
-// The prototype of a read handler.
-//
-// The read handler is called when the parser needs to read more bytes from the
-// source. The handler should write not more than size bytes to the buffer.
-// The number of written bytes should be set to the size_read variable.
-//
-// [in,out]   data        A pointer to an application data specified by
-//                        yaml_parser_set_input().
-// [out]      buffer      The buffer to write the data from the source.
-// [in]       size        The size of the buffer.
-// [out]      size_read   The actual number of bytes read from the source.
-//
-// On success, the handler should return 1.  If the handler failed,
-// the returned value should be 0. On EOF, the handler should set the
-// size_read to 0 and return 1.
-type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
-
-// This structure holds information about a potential simple key.
-type yaml_simple_key_t struct {
-	possible     bool        // Is a simple key possible?
-	required     bool        // Is a simple key required?
-	token_number int         // The number of the token.
-	mark         yaml_mark_t // The position mark.
-}
-
-// The states of the parser.
-type yaml_parser_state_t int
-
-const (
-	yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
-
-	yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE           // Expect the beginning of an implicit document.
-	yaml_PARSE_DOCUMENT_START_STATE                    // Expect DOCUMENT-START.
-	yaml_PARSE_DOCUMENT_CONTENT_STATE                  // Expect the content of a document.
-	yaml_PARSE_DOCUMENT_END_STATE                      // Expect DOCUMENT-END.
-	yaml_PARSE_BLOCK_NODE_STATE                        // Expect a block node.
-	yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
-	yaml_PARSE_FLOW_NODE_STATE                         // Expect a flow node.
-	yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE        // Expect the first entry of a block sequence.
-	yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE              // Expect an entry of a block sequence.
-	yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE         // Expect an entry of an indentless sequence.
-	yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE           // Expect the first key of a block mapping.
-	yaml_PARSE_BLOCK_MAPPING_KEY_STATE                 // Expect a block mapping key.
-	yaml_PARSE_BLOCK_MAPPING_VALUE_STATE               // Expect a block mapping value.
-	yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE         // Expect the first entry of a flow sequence.
-	yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE               // Expect an entry of a flow sequence.
-	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE   // Expect a key of an ordered mapping.
-	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
-	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE   // Expect the and of an ordered mapping entry.
-	yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE            // Expect the first key of a flow mapping.
-	yaml_PARSE_FLOW_MAPPING_KEY_STATE                  // Expect a key of a flow mapping.
-	yaml_PARSE_FLOW_MAPPING_VALUE_STATE                // Expect a value of a flow mapping.
-	yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE          // Expect an empty value of a flow mapping.
-	yaml_PARSE_END_STATE                               // Expect nothing.
-)
-
-func (ps yaml_parser_state_t) String() string {
-	switch ps {
-	case yaml_PARSE_STREAM_START_STATE:
-		return "yaml_PARSE_STREAM_START_STATE"
-	case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
-		return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
-	case yaml_PARSE_DOCUMENT_START_STATE:
-		return "yaml_PARSE_DOCUMENT_START_STATE"
-	case yaml_PARSE_DOCUMENT_CONTENT_STATE:
-		return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
-	case yaml_PARSE_DOCUMENT_END_STATE:
-		return "yaml_PARSE_DOCUMENT_END_STATE"
-	case yaml_PARSE_BLOCK_NODE_STATE:
-		return "yaml_PARSE_BLOCK_NODE_STATE"
-	case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
-		return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
-	case yaml_PARSE_FLOW_NODE_STATE:
-		return "yaml_PARSE_FLOW_NODE_STATE"
-	case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
-		return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
-	case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
-		return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
-	case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
-		return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
-	case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
-		return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
-	case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
-		return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
-	case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
-		return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
-	case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
-		return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
-	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
-		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
-	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
-		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
-	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
-		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
-	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
-		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
-	case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
-		return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
-	case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
-		return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
-	case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
-		return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
-	case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
-		return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
-	case yaml_PARSE_END_STATE:
-		return "yaml_PARSE_END_STATE"
-	}
-	return "<unknown parser state>"
-}
-
-// This structure holds aliases data.
-type yaml_alias_data_t struct {
-	anchor []byte      // The anchor.
-	index  int         // The node id.
-	mark   yaml_mark_t // The anchor mark.
-}
-
-// The parser structure.
-//
-// All members are internal. Manage the structure using the
-// yaml_parser_ family of functions.
-type yaml_parser_t struct {
-
-	// Error handling
-
-	error yaml_error_type_t // Error type.
-
-	problem string // Error description.
-
-	// The byte about which the problem occurred.
-	problem_offset int
-	problem_value  int
-	problem_mark   yaml_mark_t
-
-	// The error context.
-	context      string
-	context_mark yaml_mark_t
-
-	// Reader stuff
-
-	read_handler yaml_read_handler_t // Read handler.
-
-	input_reader io.Reader // File input data.
-	input        []byte    // String input data.
-	input_pos    int
-
-	eof bool // EOF flag
-
-	buffer     []byte // The working buffer.
-	buffer_pos int    // The current position of the buffer.
-
-	unread int // The number of unread characters in the buffer.
-
-	newlines int // The number of line breaks since last non-break/non-blank character
-
-	raw_buffer     []byte // The raw buffer.
-	raw_buffer_pos int    // The current position of the buffer.
-
-	encoding yaml_encoding_t // The input encoding.
-
-	offset int         // The offset of the current position (in bytes).
-	mark   yaml_mark_t // The mark of the current position.
-
-	// Comments
-
-	head_comment []byte // The current head comments
-	line_comment []byte // The current line comments
-	foot_comment []byte // The current foot comments
-	tail_comment []byte // Foot comment that happens at the end of a block.
-	stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc)
-
-	comments      []yaml_comment_t // The folded comments for all parsed tokens
-	comments_head int
-
-	// Scanner stuff
-
-	stream_start_produced bool // Have we started to scan the input stream?
-	stream_end_produced   bool // Have we reached the end of the input stream?
-
-	flow_level int // The number of unclosed '[' and '{' indicators.
-
-	tokens          []yaml_token_t // The tokens queue.
-	tokens_head     int            // The head of the tokens queue.
-	tokens_parsed   int            // The number of tokens fetched from the queue.
-	token_available bool           // Does the tokens queue contain a token ready for dequeueing.
-
-	indent  int   // The current indentation level.
-	indents []int // The indentation levels stack.
-
-	simple_key_allowed bool                // May a simple key occur at the current position?
-	simple_keys        []yaml_simple_key_t // The stack of simple keys.
-	simple_keys_by_tok map[int]int         // possible simple_key indexes indexed by token_number
-
-	// Parser stuff
-
-	state          yaml_parser_state_t    // The current parser state.
-	states         []yaml_parser_state_t  // The parser states stack.
-	marks          []yaml_mark_t          // The stack of marks.
-	tag_directives []yaml_tag_directive_t // The list of TAG directives.
-
-	// Dumper stuff
-
-	aliases []yaml_alias_data_t // The alias data.
-
-	document *yaml_document_t // The currently parsed document.
-}
-
-type yaml_comment_t struct {
-
-	scan_mark  yaml_mark_t // Position where scanning for comments started
-	token_mark yaml_mark_t // Position after which tokens will be associated with this comment
-	start_mark yaml_mark_t // Position of '#' comment mark
-	end_mark   yaml_mark_t // Position where comment terminated
-
-	head []byte
-	line []byte
-	foot []byte
-}
-
-// Emitter Definitions
-
-// The prototype of a write handler.
-//
-// The write handler is called when the emitter needs to flush the accumulated
-// characters to the output.  The handler should write @a size bytes of the
-// @a buffer to the output.
-//
-// @param[in,out]   data        A pointer to an application data specified by
-//                              yaml_emitter_set_output().
-// @param[in]       buffer      The buffer with bytes to be written.
-// @param[in]       size        The size of the buffer.
-//
-// @returns On success, the handler should return @c 1.  If the handler failed,
-// the returned value should be @c 0.
-//
-type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
-
-type yaml_emitter_state_t int
-
-// The emitter states.
-const (
-	// Expect STREAM-START.
-	yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
-
-	yaml_EMIT_FIRST_DOCUMENT_START_STATE       // Expect the first DOCUMENT-START or STREAM-END.
-	yaml_EMIT_DOCUMENT_START_STATE             // Expect DOCUMENT-START or STREAM-END.
-	yaml_EMIT_DOCUMENT_CONTENT_STATE           // Expect the content of a document.
-	yaml_EMIT_DOCUMENT_END_STATE               // Expect DOCUMENT-END.
-	yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE   // Expect the first item of a flow sequence.
-	yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE   // Expect the next item of a flow sequence, with the comma already written out
-	yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE         // Expect an item of a flow sequence.
-	yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE     // Expect the first key of a flow mapping.
-	yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE     // Expect the next key of a flow mapping, with the comma already written out
-	yaml_EMIT_FLOW_MAPPING_KEY_STATE           // Expect a key of a flow mapping.
-	yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE  // Expect a value for a simple key of a flow mapping.
-	yaml_EMIT_FLOW_MAPPING_VALUE_STATE         // Expect a value of a flow mapping.
-	yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE  // Expect the first item of a block sequence.
-	yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE        // Expect an item of a block sequence.
-	yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE    // Expect the first key of a block mapping.
-	yaml_EMIT_BLOCK_MAPPING_KEY_STATE          // Expect the key of a block mapping.
-	yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
-	yaml_EMIT_BLOCK_MAPPING_VALUE_STATE        // Expect a value of a block mapping.
-	yaml_EMIT_END_STATE                        // Expect nothing.
-)
-
-// The emitter structure.
-//
-// All members are internal.  Manage the structure using the @c yaml_emitter_
-// family of functions.
-type yaml_emitter_t struct {
-
-	// Error handling
-
-	error   yaml_error_type_t // Error type.
-	problem string            // Error description.
-
-	// Writer stuff
-
-	write_handler yaml_write_handler_t // Write handler.
-
-	output_buffer *[]byte   // String output data.
-	output_writer io.Writer // File output data.
-
-	buffer     []byte // The working buffer.
-	buffer_pos int    // The current position of the buffer.
-
-	raw_buffer     []byte // The raw buffer.
-	raw_buffer_pos int    // The current position of the buffer.
-
-	encoding yaml_encoding_t // The stream encoding.
-
-	// Emitter stuff
-
-	canonical   bool         // If the output is in the canonical style?
-	best_indent int          // The number of indentation spaces.
-	best_width  int          // The preferred width of the output lines.
-	unicode     bool         // Allow unescaped non-ASCII characters?
-	line_break  yaml_break_t // The preferred line break.
-
-	state  yaml_emitter_state_t   // The current emitter state.
-	states []yaml_emitter_state_t // The stack of states.
-
-	events      []yaml_event_t // The event queue.
-	events_head int            // The head of the event queue.
-
-	indents []int // The stack of indentation levels.
-
-	tag_directives []yaml_tag_directive_t // The list of tag directives.
-
-	indent int // The current indentation level.
-
-	flow_level int // The current flow level.
-
-	root_context       bool // Is it the document root context?
-	sequence_context   bool // Is it a sequence context?
-	mapping_context    bool // Is it a mapping context?
-	simple_key_context bool // Is it a simple mapping key context?
-
-	line       int  // The current line.
-	column     int  // The current column.
-	whitespace bool // If the last character was a whitespace?
-	indention  bool // If the last character was an indentation character (' ', '-', '?', ':')?
-	open_ended bool // If an explicit document end is required?
-
-	space_above bool // Is there's an empty line above?
-	foot_indent int  // The indent used to write the foot comment above, or -1 if none.
-
-	// Anchor analysis.
-	anchor_data struct {
-		anchor []byte // The anchor value.
-		alias  bool   // Is it an alias?
-	}
-
-	// Tag analysis.
-	tag_data struct {
-		handle []byte // The tag handle.
-		suffix []byte // The tag suffix.
-	}
-
-	// Scalar analysis.
-	scalar_data struct {
-		value                 []byte              // The scalar value.
-		multiline             bool                // Does the scalar contain line breaks?
-		flow_plain_allowed    bool                // Can the scalar be expessed in the flow plain style?
-		block_plain_allowed   bool                // Can the scalar be expressed in the block plain style?
-		single_quoted_allowed bool                // Can the scalar be expressed in the single quoted style?
-		block_allowed         bool                // Can the scalar be expressed in the literal or folded styles?
-		style                 yaml_scalar_style_t // The output style.
-	}
-
-	// Comments
-	head_comment []byte
-	line_comment []byte
-	foot_comment []byte
-	tail_comment []byte
-
-	key_line_comment []byte
-
-	// Dumper stuff
-
-	opened bool // If the stream was already opened?
-	closed bool // If the stream was already closed?
-
-	// The information associated with the document nodes.
-	anchors *struct {
-		references int  // The number of references.
-		anchor     int  // The anchor id.
-		serialized bool // If the node has been emitted?
-	}
-
-	last_anchor_id int // The last assigned anchor id.
-
-	document *yaml_document_t // The currently emitted document.
-}
diff --git a/application/source/vendor/gopkg.in/yaml.v3/yamlprivateh.go b/application/source/vendor/gopkg.in/yaml.v3/yamlprivateh.go
deleted file mode 100644
index e88f9c54aecb54ed42665b2a08b66a4f03d999bc..0000000000000000000000000000000000000000
--- a/application/source/vendor/gopkg.in/yaml.v3/yamlprivateh.go
+++ /dev/null
@@ -1,198 +0,0 @@
-// 
-// Copyright (c) 2011-2019 Canonical Ltd
-// Copyright (c) 2006-2010 Kirill Simonov
-// 
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-// of the Software, and to permit persons to whom the Software is furnished to do
-// so, subject to the following conditions:
-// 
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-// 
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package yaml
-
-const (
-	// The size of the input raw buffer.
-	input_raw_buffer_size = 512
-
-	// The size of the input buffer.
-	// It should be possible to decode the whole raw buffer.
-	input_buffer_size = input_raw_buffer_size * 3
-
-	// The size of the output buffer.
-	output_buffer_size = 128
-
-	// The size of the output raw buffer.
-	// It should be possible to encode the whole output buffer.
-	output_raw_buffer_size = (output_buffer_size*2 + 2)
-
-	// The size of other stacks and queues.
-	initial_stack_size  = 16
-	initial_queue_size  = 16
-	initial_string_size = 16
-)
-
-// Check if the character at the specified position is an alphabetical
-// character, a digit, '_', or '-'.
-func is_alpha(b []byte, i int) bool {
-	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
-}
-
-// Check if the character at the specified position is a digit.
-func is_digit(b []byte, i int) bool {
-	return b[i] >= '0' && b[i] <= '9'
-}
-
-// Get the value of a digit.
-func as_digit(b []byte, i int) int {
-	return int(b[i]) - '0'
-}
-
-// Check if the character at the specified position is a hex-digit.
-func is_hex(b []byte, i int) bool {
-	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
-}
-
-// Get the value of a hex-digit.
-func as_hex(b []byte, i int) int {
-	bi := b[i]
-	if bi >= 'A' && bi <= 'F' {
-		return int(bi) - 'A' + 10
-	}
-	if bi >= 'a' && bi <= 'f' {
-		return int(bi) - 'a' + 10
-	}
-	return int(bi) - '0'
-}
-
-// Check if the character is ASCII.
-func is_ascii(b []byte, i int) bool {
-	return b[i] <= 0x7F
-}
-
-// Check if the character at the start of the buffer can be printed unescaped.
-func is_printable(b []byte, i int) bool {
-	return ((b[i] == 0x0A) || // . == #x0A
-		(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
-		(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
-		(b[i] > 0xC2 && b[i] < 0xED) ||
-		(b[i] == 0xED && b[i+1] < 0xA0) ||
-		(b[i] == 0xEE) ||
-		(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
-			!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
-			!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
-}
-
-// Check if the character at the specified position is NUL.
-func is_z(b []byte, i int) bool {
-	return b[i] == 0x00
-}
-
-// Check if the beginning of the buffer is a BOM.
-func is_bom(b []byte, i int) bool {
-	return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
-}
-
-// Check if the character at the specified position is space.
-func is_space(b []byte, i int) bool {
-	return b[i] == ' '
-}
-
-// Check if the character at the specified position is tab.
-func is_tab(b []byte, i int) bool {
-	return b[i] == '\t'
-}
-
-// Check if the character at the specified position is blank (space or tab).
-func is_blank(b []byte, i int) bool {
-	//return is_space(b, i) || is_tab(b, i)
-	return b[i] == ' ' || b[i] == '\t'
-}
-
-// Check if the character at the specified position is a line break.
-func is_break(b []byte, i int) bool {
-	return (b[i] == '\r' || // CR (#xD)
-		b[i] == '\n' || // LF (#xA)
-		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
-		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
-		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
-}
-
-func is_crlf(b []byte, i int) bool {
-	return b[i] == '\r' && b[i+1] == '\n'
-}
-
-// Check if the character is a line break or NUL.
-func is_breakz(b []byte, i int) bool {
-	//return is_break(b, i) || is_z(b, i)
-	return (
-		// is_break:
-		b[i] == '\r' || // CR (#xD)
-		b[i] == '\n' || // LF (#xA)
-		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
-		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
-		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
-		// is_z:
-		b[i] == 0)
-}
-
-// Check if the character is a line break, space, or NUL.
-func is_spacez(b []byte, i int) bool {
-	//return is_space(b, i) || is_breakz(b, i)
-	return (
-		// is_space:
-		b[i] == ' ' ||
-		// is_breakz:
-		b[i] == '\r' || // CR (#xD)
-		b[i] == '\n' || // LF (#xA)
-		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
-		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
-		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
-		b[i] == 0)
-}
-
-// Check if the character is a line break, space, tab, or NUL.
-func is_blankz(b []byte, i int) bool {
-	//return is_blank(b, i) || is_breakz(b, i)
-	return (
-		// is_blank:
-		b[i] == ' ' || b[i] == '\t' ||
-		// is_breakz:
-		b[i] == '\r' || // CR (#xD)
-		b[i] == '\n' || // LF (#xA)
-		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
-		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
-		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
-		b[i] == 0)
-}
-
-// Determine the width of the character.
-func width(b byte) int {
-	// Don't replace these by a switch without first
-	// confirming that it is being inlined.
-	if b&0x80 == 0x00 {
-		return 1
-	}
-	if b&0xE0 == 0xC0 {
-		return 2
-	}
-	if b&0xF0 == 0xE0 {
-		return 3
-	}
-	if b&0xF8 == 0xF0 {
-		return 4
-	}
-	return 0
-
-}
diff --git a/application/source/vendor/modules.txt b/application/source/vendor/modules.txt
deleted file mode 100644
index cce681ca1caa3887a59a7638486fb1f4e783c6e1..0000000000000000000000000000000000000000
--- a/application/source/vendor/modules.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-# github.com/andybalholm/cascadia v1.3.2
-## explicit; go 1.16
-github.com/andybalholm/cascadia
-# github.com/volker-schukai/tokenizer v1.0.0
-## explicit; go 1.13
-github.com/volker-schukai/tokenizer
-# gitlab.schukai.com/oss/libraries/go/application/xflags v1.16.0
-## explicit; go 1.19
-gitlab.schukai.com/oss/libraries/go/application/xflags
-# gitlab.schukai.com/oss/libraries/go/markup/html v0.4.0
-## explicit; go 1.20
-gitlab.schukai.com/oss/libraries/go/markup/html/engine
-# gitlab.schukai.com/oss/libraries/go/utilities/data.git v0.2.0
-## explicit; go 1.19
-gitlab.schukai.com/oss/libraries/go/utilities/data.git
-# gitlab.schukai.com/oss/libraries/go/utilities/pathfinder v0.5.2
-## explicit; go 1.19
-gitlab.schukai.com/oss/libraries/go/utilities/pathfinder
-# golang.org/x/crypto v0.11.0
-## explicit; go 1.17
-golang.org/x/crypto/bcrypt
-golang.org/x/crypto/blowfish
-# golang.org/x/net v0.12.0
-## explicit; go 1.17
-golang.org/x/net/html
-golang.org/x/net/html/atom
-# gopkg.in/yaml.v3 v3.0.1
-## explicit
-gopkg.in/yaml.v3
diff --git a/flake.lock b/flake.lock
index a398c99623e29ce1fc2c3c89c22f410ca1df99de..50ec8644049e4877f098ee369af247868cbc9102 100644
--- a/flake.lock
+++ b/flake.lock
@@ -10,15 +10,48 @@
         "type": "github"
       },
       "original": {
-        "owner": "NixOS",
+        "id": "nixpkgs",
         "ref": "nixos-unstable",
-        "repo": "nixpkgs",
-        "type": "github"
+        "type": "indirect"
       }
     },
     "root": {
       "inputs": {
-        "nixpkgs": "nixpkgs"
+        "nixpkgs": "nixpkgs",
+        "utils": "utils"
+      }
+    },
+    "systems": {
+      "locked": {
+        "lastModified": 1681028828,
+        "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+        "owner": "nix-systems",
+        "repo": "default",
+        "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+        "type": "github"
+      },
+      "original": {
+        "owner": "nix-systems",
+        "repo": "default",
+        "type": "github"
+      }
+    },
+    "utils": {
+      "inputs": {
+        "systems": "systems"
+      },
+      "locked": {
+        "lastModified": 1687709756,
+        "narHash": "sha256-Y5wKlQSkgEK2weWdOu4J3riRd+kV/VCgHsqLNTTWQ/0=",
+        "owner": "numtide",
+        "repo": "flake-utils",
+        "rev": "dbabf0ca0c0c4bce6ea5eaf65af5cb694d2082c7",
+        "type": "github"
+      },
+      "original": {
+        "owner": "numtide",
+        "repo": "flake-utils",
+        "type": "github"
       }
     }
   },
diff --git a/flake.nix b/flake.nix
index 54366cacd7b15564bc534d703ba815d2a6a1a148..297c2bf350eb4309393118d96da8c2ffa615e33d 100644
--- a/flake.nix
+++ b/flake.nix
@@ -1,46 +1,233 @@
 {
   description = "Bob: The HTML and HTML fragment builder";
-  inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
+  
+  inputs = {
+      nixpkgs.url = "nixpkgs/nixos-unstable";
+      utils.url = "github:numtide/flake-utils";
+  };  
 
-  outputs = { self, nixpkgs, ... }: 
+  outputs = { self, nixpkgs, ... }@inputs:
     let
-      rootPath = ./.;
-      releaseJson = builtins.fromJSON (builtins.readFile "${rootPath}/release.json");
-      currentVersion = releaseJson.version;
-    
-      systems = [ "x86_64-linux" "aarch64-linux" ];
-      forAllSystems = f: nixpkgs.lib.genAttrs systems (system: f system);
-      makePackage = system: 
-        let
-          pkgs = import nixpkgs { inherit system; overlays = [ self.overlay ]; };
-        in
-          pkgs.bob;
+      # to work with older version of flakes
+      lastModifiedDate = self.lastModifiedDate or self.lastModified or "19700101";
+      # System types to support.
+      supportedSystems = [ "x86_64-linux" "x86_64-darwin" "aarch64-linux" "aarch64-darwin" ];
+
+      # Helper function to generate an attrset '{ x86_64-linux = f "x86_64-linux"; ... }'.
+      forAllSystems = nixpkgs.lib.genAttrs supportedSystems;
+
+      # Nixpkgs instantiated for supported system types.
+      nixpkgsFor = forAllSystems (system: import nixpkgs { inherit system; });
     in
     {
-      overlay = final: prev: {
-        bob =  final.buildGoModule rec {
-          name = "bob";
-          src = ././application/source; 
-          doCheck = true;
-          vendorHash = null;
-          nativeBuildInputs = [ final.jq ];
-          ldflags=''-X main.version=${currentVersion} -X main.build=${self.lastModifiedDate}'';
-          installPhase = ''
-               mkdir -p $out/bin
-               
-          '';
-
-        };
-      };
-
-      defaultPackage = forAllSystems makePackage;
-
-      packages = forAllSystems (system: {
-        bob = makePackage system;
-      });
-
-      checks = {
-        test = forAllSystems makePackage;
-      };
+
+      # Provide some binary packages for selected system types.
+      packages = forAllSystems (system:
+        let
+            pkgs = nixpkgs.legacyPackages.${system};
+        in
+        {
+          go-hello = pkgs.buildGoModule {
+            pname = "bob";
+            version = "0.5.3";
+
+            # In 'nix develop', we don't need a copy of the source tree
+            # in the Nix store.
+            src = ././application/source;
+
+            # This hash locks the dependencies of this package. It is
+            # necessary because of how Go requires network access to resolve
+            # VCS.  See https://www.tweag.io/blog/2021-03-04-gomod2nix/ for
+            # details. Normally one can build with a fake sha256 and rely on native Go
+            # mechanisms to tell you what the hash should be or determine what
+            # it should be "out-of-band" with other tooling (eg. gomod2nix).
+            # To begin with it is recommended to set this, but one must
+            # remeber to bump this hash when your dependencies change.
+            #vendorSha256 = pkgs.lib.fakeSha256;
+            vendorSha256 = "sha256-XdB+u94Rqsb29jVs4miyOq1NEYaVJHWFXg6QebFJzNQ=";
+
+            excludedPackages = [ "example/server-db" "test/e2e" "tui-example" ];
+
+            doCheck = false;
+          };
+        });
+
+      # The default package for 'nix build'. This makes sense if the
+      # flake provides only one package or there is a clear "main"
+      # package.
+      defaultPackage = forAllSystems (system: self.packages.${system}.go-hello);
     };
 }
+
+
+#
+#{
+#  description = "Bob: The HTML and HTML fragment builder";
+#  
+#    inputs = {
+#      nixpkgs.url = "nixpkgs/nixos-unstable";
+#      utils.url = "github:numtide/flake-utils";
+#    };
+#  
+#    outputs = { self, lib, nixpkgs, ... }@inputs: 
+#    
+#    let 
+#    
+#      lastModifiedDate = self.lastModifiedDate or self.lastModified or "19700101";
+#      
+#      supportedSystems = [ "x86_64-linux" "x86_64-darwin" "aarch64-linux" "aarch64-darwin" ];
+#
+#      forAllSystems = nixpkgs.lib.genAttrs supportedSystems;
+#      
+#      nixpkgsFor = forAllSystems (system: import nixpkgs { inherit system; });
+#
+#    
+#    in {
+#    
+#    bob = buildGoModule rec {
+#      pname = "bob";
+#      version = "0.3.4";
+#    
+#      src = ././application/source;
+#    
+#      vendorHash = lib.fakeSha256;
+#    
+#      meta = with lib; {
+#        description = "Bob: The HTML and HTML fragment builder";
+#        homepage = "https://gitlab.schukai.com/oss/bob";
+#        license = licenses.mit;
+#        maintainers = with maintainers; [ "schukai GmbH" ];
+#      };
+#      
+#      CGO_ENABLED = 0;
+#      
+#    };
+
+    
+    
+#          # Notice the reference to nixpkgs here.
+#          with import nixpkgs { system = "x86_64-linux"; };
+#          stdenv.mkDerivation {
+#            name = "bob";
+#            src = self;
+#            buildPhase = "env GOFLAGS= GOWORK=off GO111MODULE=on GOOS=$GOOS GOARCH=$GOARCH go build -o bob";
+#            installPhase = "mkdir -p $out/bin; install -t $out/bin bob";
+#          };
+    
+#    
+#    };
+#}
+  
+#  outputs = { self, nixpkgs, flake-utils }:
+#    flake-utils.lib.eachDefaultSystem (system:
+#      let
+#        
+#      rootPath = ./.;
+#      releaseJson = builtins.fromJSON (builtins.readFile "${rootPath}/release.json");
+#      currentVersion = releaseJson.version;        
+#        
+#         overlay = (final: prev: {
+#        
+# bob =  final.buildGoModule rec {
+#          name = "bob";
+#          src = bobSource; 
+#          vendorHash = null;
+#          ldflags=''-X main.version=${currentVersion} -X main.build=${self.lastModifiedDate}'';
+#        };        
+#        });        
+#
+#        pkgs = import nixpkgs {
+#          inherit system;
+#          overlays = [ overlay ];
+#        };
+#
+#      in {
+#        packages = { bob = pkgs.bob; };
+#      });
+#}
+#  
+   
+
+
+
+#    let
+#      rootPath = ./.;
+#      releaseJson = builtins.fromJSON (builtins.readFile "${rootPath}/release.json");
+#      currentVersion = releaseJson.version;
+#    
+#    gomod2nix = {
+#      url = "github:tweag/gomod2nix";
+#      inputs.nixpkgs.follows = "nixpkgs";
+#      inputs.utils.follows = "utils";
+#    };    
+#    
+#      systems = [ "x86_64-linux" "aarch64-linux" ];
+#      forAllSystems = f: nixpkgs.lib.genAttrs systems (system: f system);
+##      makePackage = system: 
+##        let
+##          pkgs = import nixpkgs { inherit system; overlays = [ self.overlay ]; };
+##        in
+##          pkgs.bob;
+#          
+#        everything = nixpkgs.buildGoApplication {
+#          pname = "xe-x-composite";
+#          version = "1.0.0";
+#          src = ./.;
+#          modules = ./gomod2nix.toml;
+#
+#          buildInputs = with nixpkgs; [ pkg-config libaom libavif ];
+#        };
+#
+#        copyFile = { pname, path ? pname }:
+#          nixpkgs.stdenv.mkDerivation {
+#            inherit pname;
+#            inherit (everything) version;
+#            src = everything;
+#
+#            installPhase = ''
+#              mkdir -p $out/bin
+#              cp $src/bin/$pname $out/bin/$path
+#            '';
+#          };          
+#          
+#    in
+#    {
+#    
+#    devShells.default = nixpkgs.mkShell {
+#      buildInputs = with nixpkgs; [
+#        go
+#        gopls
+#        gotools
+#        go-tools
+#        gomod2nix.packages.${system}.default
+#        sqlite-interactive
+#      ];
+#    };
+#    
+#      overlay = final: prev: {
+#        bob =  final.buildGoModule rec {
+#          name = "bob";
+#          src = ././application/source; 
+#          doCheck = true;
+#          vendorHash = null;
+#          nativeBuildInputs = [ final.jq ];
+#          ldflags=''-X main.version=${currentVersion} -X main.build=${self.lastModifiedDate}'';
+#          installPhase = ''
+#               mkdir -p $out/bin
+#               
+#          '';
+#
+#        };
+#      };
+#
+#      defaultPackage = forAllSystems makePackage;
+#
+#      packages = forAllSystems (system: {
+#        bob = makePackage system;
+#      });
+#
+#      checks = {
+#        test = forAllSystems makePackage;
+#      };
+#    };
+#}
diff --git a/result b/result
index 2f3865c3b00edb4a32fc7d91d1a68f41adb7e931..d77674f6c51587591057be306c36066e2d147865 120000
--- a/result
+++ b/result
@@ -1 +1 @@
-/nix/store/249v9i92xn42lyv68j4pjjbq0gjll62r-bob
\ No newline at end of file
+/nix/store/q21v9ipxpaaix7ygr29vxgh6gv2v5d59-bob-0.5.3
\ No newline at end of file