Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision

Target

Select target project
  • oss/utilities/requirements-manager
1 result
Select Git revision
Show changes
Showing
with 2831 additions and 0 deletions
package config
import (
"bytes"
"errors"
"regexp"
format "github.com/go-git/go-git/v5/plumbing/format/config"
)
var (
ErrModuleEmptyURL = errors.New("module config: empty URL")
ErrModuleEmptyPath = errors.New("module config: empty path")
ErrModuleBadPath = errors.New("submodule has an invalid path")
)
var (
// Matches module paths with dotdot ".." components.
dotdotPath = regexp.MustCompile(`(^|[/\\])\.\.([/\\]|$)`)
)
// Modules defines the submodules properties, represents a .gitmodules file
// https://www.kernel.org/pub/software/scm/git/docs/gitmodules.html
type Modules struct {
// Submodules is a map of submodules being the key the name of the submodule.
Submodules map[string]*Submodule
raw *format.Config
}
// NewModules returns a new empty Modules
func NewModules() *Modules {
return &Modules{
Submodules: make(map[string]*Submodule),
raw: format.New(),
}
}
const (
pathKey = "path"
branchKey = "branch"
)
// Unmarshal parses a git-config file and stores it.
func (m *Modules) Unmarshal(b []byte) error {
r := bytes.NewBuffer(b)
d := format.NewDecoder(r)
m.raw = format.New()
if err := d.Decode(m.raw); err != nil {
return err
}
unmarshalSubmodules(m.raw, m.Submodules)
return nil
}
// Marshal returns Modules encoded as a git-config file.
func (m *Modules) Marshal() ([]byte, error) {
s := m.raw.Section(submoduleSection)
s.Subsections = make(format.Subsections, len(m.Submodules))
var i int
for _, r := range m.Submodules {
s.Subsections[i] = r.marshal()
i++
}
buf := bytes.NewBuffer(nil)
if err := format.NewEncoder(buf).Encode(m.raw); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// Submodule defines a submodule.
type Submodule struct {
// Name module name
Name string
// Path defines the path, relative to the top-level directory of the Git
// working tree.
Path string
// URL defines a URL from which the submodule repository can be cloned.
URL string
// Branch is a remote branch name for tracking updates in the upstream
// submodule. Optional value.
Branch string
// raw representation of the subsection, filled by marshal or unmarshal are
// called.
raw *format.Subsection
}
// Validate validates the fields and sets the default values.
func (m *Submodule) Validate() error {
if m.Path == "" {
return ErrModuleEmptyPath
}
if m.URL == "" {
return ErrModuleEmptyURL
}
if dotdotPath.MatchString(m.Path) {
return ErrModuleBadPath
}
return nil
}
func (m *Submodule) unmarshal(s *format.Subsection) {
m.raw = s
m.Name = m.raw.Name
m.Path = m.raw.Option(pathKey)
m.URL = m.raw.Option(urlKey)
m.Branch = m.raw.Option(branchKey)
}
func (m *Submodule) marshal() *format.Subsection {
if m.raw == nil {
m.raw = &format.Subsection{}
}
m.raw.Name = m.Name
if m.raw.Name == "" {
m.raw.Name = m.Path
}
m.raw.SetOption(pathKey, m.Path)
m.raw.SetOption(urlKey, m.URL)
if m.Branch != "" {
m.raw.SetOption(branchKey, m.Branch)
}
return m.raw
}
package config
import (
"errors"
"strings"
"github.com/go-git/go-git/v5/plumbing"
)
const (
refSpecWildcard = "*"
refSpecForce = "+"
refSpecSeparator = ":"
)
var (
ErrRefSpecMalformedSeparator = errors.New("malformed refspec, separators are wrong")
ErrRefSpecMalformedWildcard = errors.New("malformed refspec, mismatched number of wildcards")
)
// RefSpec is a mapping from local branches to remote references.
// The format of the refspec is an optional +, followed by <src>:<dst>, where
// <src> is the pattern for references on the remote side and <dst> is where
// those references will be written locally. The + tells Git to update the
// reference even if it isn’t a fast-forward.
// eg.: "+refs/heads/*:refs/remotes/origin/*"
//
// https://git-scm.com/book/en/v2/Git-Internals-The-Refspec
type RefSpec string
// Validate validates the RefSpec
func (s RefSpec) Validate() error {
spec := string(s)
if strings.Count(spec, refSpecSeparator) != 1 {
return ErrRefSpecMalformedSeparator
}
sep := strings.Index(spec, refSpecSeparator)
if sep == len(spec)-1 {
return ErrRefSpecMalformedSeparator
}
ws := strings.Count(spec[0:sep], refSpecWildcard)
wd := strings.Count(spec[sep+1:], refSpecWildcard)
if ws == wd && ws < 2 && wd < 2 {
return nil
}
return ErrRefSpecMalformedWildcard
}
// IsForceUpdate returns if update is allowed in non fast-forward merges.
func (s RefSpec) IsForceUpdate() bool {
return s[0] == refSpecForce[0]
}
// IsDelete returns true if the refspec indicates a delete (empty src).
func (s RefSpec) IsDelete() bool {
return s[0] == refSpecSeparator[0]
}
// IsExactSHA1 returns true if the source is a SHA1 hash.
func (s RefSpec) IsExactSHA1() bool {
return plumbing.IsHash(s.Src())
}
// Src returns the src side.
func (s RefSpec) Src() string {
spec := string(s)
var start int
if s.IsForceUpdate() {
start = 1
} else {
start = 0
}
end := strings.Index(spec, refSpecSeparator)
return spec[start:end]
}
// Match match the given plumbing.ReferenceName against the source.
func (s RefSpec) Match(n plumbing.ReferenceName) bool {
if !s.IsWildcard() {
return s.matchExact(n)
}
return s.matchGlob(n)
}
// IsWildcard returns true if the RefSpec contains a wildcard.
func (s RefSpec) IsWildcard() bool {
return strings.Contains(string(s), refSpecWildcard)
}
func (s RefSpec) matchExact(n plumbing.ReferenceName) bool {
return s.Src() == n.String()
}
func (s RefSpec) matchGlob(n plumbing.ReferenceName) bool {
src := s.Src()
name := n.String()
wildcard := strings.Index(src, refSpecWildcard)
var prefix, suffix string
prefix = src[0:wildcard]
if len(src) > wildcard+1 {
suffix = src[wildcard+1:]
}
return len(name) >= len(prefix)+len(suffix) &&
strings.HasPrefix(name, prefix) &&
strings.HasSuffix(name, suffix)
}
// Dst returns the destination for the given remote reference.
func (s RefSpec) Dst(n plumbing.ReferenceName) plumbing.ReferenceName {
spec := string(s)
start := strings.Index(spec, refSpecSeparator) + 1
dst := spec[start:]
src := s.Src()
if !s.IsWildcard() {
return plumbing.ReferenceName(dst)
}
name := n.String()
ws := strings.Index(src, refSpecWildcard)
wd := strings.Index(dst, refSpecWildcard)
match := name[ws : len(name)-(len(src)-(ws+1))]
return plumbing.ReferenceName(dst[0:wd] + match + dst[wd+1:])
}
func (s RefSpec) Reverse() RefSpec {
spec := string(s)
separator := strings.Index(spec, refSpecSeparator)
return RefSpec(spec[separator+1:] + refSpecSeparator + spec[:separator])
}
func (s RefSpec) String() string {
return string(s)
}
// MatchAny returns true if any of the RefSpec match with the given ReferenceName.
func MatchAny(l []RefSpec, n plumbing.ReferenceName) bool {
for _, r := range l {
if r.Match(n) {
return true
}
}
return false
}
package config
import (
"errors"
"strings"
format "github.com/go-git/go-git/v5/plumbing/format/config"
)
var (
errURLEmptyInsteadOf = errors.New("url config: empty insteadOf")
)
// Url defines Url rewrite rules
type URL struct {
// Name new base url
Name string
// Any URL that starts with this value will be rewritten to start, instead, with <base>.
// When more than one insteadOf strings match a given URL, the longest match is used.
InsteadOf string
// raw representation of the subsection, filled by marshal or unmarshal are
// called.
raw *format.Subsection
}
// Validate validates fields of branch
func (b *URL) Validate() error {
if b.InsteadOf == "" {
return errURLEmptyInsteadOf
}
return nil
}
const (
insteadOfKey = "insteadOf"
)
func (u *URL) unmarshal(s *format.Subsection) error {
u.raw = s
u.Name = s.Name
u.InsteadOf = u.raw.Option(insteadOfKey)
return nil
}
func (u *URL) marshal() *format.Subsection {
if u.raw == nil {
u.raw = &format.Subsection{}
}
u.raw.Name = u.Name
u.raw.SetOption(insteadOfKey, u.InsteadOf)
return u.raw
}
func findLongestInsteadOfMatch(remoteURL string, urls map[string]*URL) *URL {
var longestMatch *URL
for _, u := range urls {
if !strings.HasPrefix(remoteURL, u.InsteadOf) {
continue
}
// according to spec if there is more than one match, take the logest
if longestMatch == nil || len(longestMatch.InsteadOf) < len(u.InsteadOf) {
longestMatch = u
}
}
return longestMatch
}
func (u *URL) ApplyInsteadOf(url string) string {
if !strings.HasPrefix(url, u.InsteadOf) {
return url
}
return u.Name + url[len(u.InsteadOf):]
}
// A highly extensible git implementation in pure Go.
//
// go-git aims to reach the completeness of libgit2 or jgit, nowadays covers the
// majority of the plumbing read operations and some of the main write
// operations, but lacks the main porcelain operations such as merges.
//
// It is highly extensible, we have been following the open/close principle in
// its design to facilitate extensions, mainly focusing the efforts on the
// persistence of the objects.
package git
package path_util
import (
"os"
"os/user"
"strings"
)
func ReplaceTildeWithHome(path string) (string, error) {
if strings.HasPrefix(path, "~") {
firstSlash := strings.Index(path, "/")
if firstSlash == 1 {
home, err := os.UserHomeDir()
if err != nil {
return path, err
}
return strings.Replace(path, "~", home, 1), nil
} else if firstSlash > 1 {
username := path[1:firstSlash]
userAccount, err := user.Lookup(username)
if err != nil {
return path, err
}
return strings.Replace(path, path[:firstSlash], userAccount.HomeDir, 1), nil
}
}
return path, nil
}
// Package revision extracts git revision from string
// More information about revision : https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html
package revision
import (
"bytes"
"fmt"
"io"
"regexp"
"strconv"
"time"
)
// ErrInvalidRevision is emitted if string doesn't match valid revision
type ErrInvalidRevision struct {
s string
}
func (e *ErrInvalidRevision) Error() string {
return "Revision invalid : " + e.s
}
// Revisioner represents a revision component.
// A revision is made of multiple revision components
// obtained after parsing a revision string,
// for instance revision "master~" will be converted in
// two revision components Ref and TildePath
type Revisioner interface {
}
// Ref represents a reference name : HEAD, master, <hash>
type Ref string
// TildePath represents ~, ~{n}
type TildePath struct {
Depth int
}
// CaretPath represents ^, ^{n}
type CaretPath struct {
Depth int
}
// CaretReg represents ^{/foo bar}
type CaretReg struct {
Regexp *regexp.Regexp
Negate bool
}
// CaretType represents ^{commit}
type CaretType struct {
ObjectType string
}
// AtReflog represents @{n}
type AtReflog struct {
Depth int
}
// AtCheckout represents @{-n}
type AtCheckout struct {
Depth int
}
// AtUpstream represents @{upstream}, @{u}
type AtUpstream struct {
BranchName string
}
// AtPush represents @{push}
type AtPush struct {
BranchName string
}
// AtDate represents @{"2006-01-02T15:04:05Z"}
type AtDate struct {
Date time.Time
}
// ColonReg represents :/foo bar
type ColonReg struct {
Regexp *regexp.Regexp
Negate bool
}
// ColonPath represents :./<path> :<path>
type ColonPath struct {
Path string
}
// ColonStagePath represents :<n>:/<path>
type ColonStagePath struct {
Path string
Stage int
}
// Parser represents a parser
// use to tokenize and transform to revisioner chunks
// a given string
type Parser struct {
s *scanner
currentParsedChar struct {
tok token
lit string
}
unreadLastChar bool
}
// NewParserFromString returns a new instance of parser from a string.
func NewParserFromString(s string) *Parser {
return NewParser(bytes.NewBufferString(s))
}
// NewParser returns a new instance of parser.
func NewParser(r io.Reader) *Parser {
return &Parser{s: newScanner(r)}
}
// scan returns the next token from the underlying scanner
// or the last scanned token if an unscan was requested
func (p *Parser) scan() (token, string, error) {
if p.unreadLastChar {
p.unreadLastChar = false
return p.currentParsedChar.tok, p.currentParsedChar.lit, nil
}
tok, lit, err := p.s.scan()
p.currentParsedChar.tok, p.currentParsedChar.lit = tok, lit
return tok, lit, err
}
// unscan pushes the previously read token back onto the buffer.
func (p *Parser) unscan() { p.unreadLastChar = true }
// Parse explode a revision string into revisioner chunks
func (p *Parser) Parse() ([]Revisioner, error) {
var rev Revisioner
var revs []Revisioner
var tok token
var err error
for {
tok, _, err = p.scan()
if err != nil {
return nil, err
}
switch tok {
case at:
rev, err = p.parseAt()
case tilde:
rev, err = p.parseTilde()
case caret:
rev, err = p.parseCaret()
case colon:
rev, err = p.parseColon()
case eof:
err = p.validateFullRevision(&revs)
if err != nil {
return []Revisioner{}, err
}
return revs, nil
default:
p.unscan()
rev, err = p.parseRef()
}
if err != nil {
return []Revisioner{}, err
}
revs = append(revs, rev)
}
}
// validateFullRevision ensures all revisioner chunks make a valid revision
func (p *Parser) validateFullRevision(chunks *[]Revisioner) error {
var hasReference bool
for i, chunk := range *chunks {
switch chunk.(type) {
case Ref:
if i == 0 {
hasReference = true
} else {
return &ErrInvalidRevision{`reference must be defined once at the beginning`}
}
case AtDate:
if len(*chunks) == 1 || hasReference && len(*chunks) == 2 {
return nil
}
return &ErrInvalidRevision{`"@" statement is not valid, could be : <refname>@{<ISO-8601 date>}, @{<ISO-8601 date>}`}
case AtReflog:
if len(*chunks) == 1 || hasReference && len(*chunks) == 2 {
return nil
}
return &ErrInvalidRevision{`"@" statement is not valid, could be : <refname>@{<n>}, @{<n>}`}
case AtCheckout:
if len(*chunks) == 1 {
return nil
}
return &ErrInvalidRevision{`"@" statement is not valid, could be : @{-<n>}`}
case AtUpstream:
if len(*chunks) == 1 || hasReference && len(*chunks) == 2 {
return nil
}
return &ErrInvalidRevision{`"@" statement is not valid, could be : <refname>@{upstream}, @{upstream}, <refname>@{u}, @{u}`}
case AtPush:
if len(*chunks) == 1 || hasReference && len(*chunks) == 2 {
return nil
}
return &ErrInvalidRevision{`"@" statement is not valid, could be : <refname>@{push}, @{push}`}
case TildePath, CaretPath, CaretReg:
if !hasReference {
return &ErrInvalidRevision{`"~" or "^" statement must have a reference defined at the beginning`}
}
case ColonReg:
if len(*chunks) == 1 {
return nil
}
return &ErrInvalidRevision{`":" statement is not valid, could be : :/<regexp>`}
case ColonPath:
if i == len(*chunks)-1 && hasReference || len(*chunks) == 1 {
return nil
}
return &ErrInvalidRevision{`":" statement is not valid, could be : <revision>:<path>`}
case ColonStagePath:
if len(*chunks) == 1 {
return nil
}
return &ErrInvalidRevision{`":" statement is not valid, could be : :<n>:<path>`}
}
}
return nil
}
// parseAt extract @ statements
func (p *Parser) parseAt() (Revisioner, error) {
var tok, nextTok token
var lit, nextLit string
var err error
tok, _, err = p.scan()
if err != nil {
return nil, err
}
if tok != obrace {
p.unscan()
return Ref("HEAD"), nil
}
tok, lit, err = p.scan()
if err != nil {
return nil, err
}
nextTok, nextLit, err = p.scan()
if err != nil {
return nil, err
}
switch {
case tok == word && (lit == "u" || lit == "upstream") && nextTok == cbrace:
return AtUpstream{}, nil
case tok == word && lit == "push" && nextTok == cbrace:
return AtPush{}, nil
case tok == number && nextTok == cbrace:
n, _ := strconv.Atoi(lit)
return AtReflog{n}, nil
case tok == minus && nextTok == number:
n, _ := strconv.Atoi(nextLit)
t, _, err := p.scan()
if err != nil {
return nil, err
}
if t != cbrace {
return nil, &ErrInvalidRevision{s: `missing "}" in @{-n} structure`}
}
return AtCheckout{n}, nil
default:
p.unscan()
date := lit
for {
tok, lit, err = p.scan()
if err != nil {
return nil, err
}
switch {
case tok == cbrace:
t, err := time.Parse("2006-01-02T15:04:05Z", date)
if err != nil {
return nil, &ErrInvalidRevision{fmt.Sprintf(`wrong date "%s" must fit ISO-8601 format : 2006-01-02T15:04:05Z`, date)}
}
return AtDate{t}, nil
case tok == eof:
return nil, &ErrInvalidRevision{s: `missing "}" in @{<data>} structure`}
default:
date += lit
}
}
}
}
// parseTilde extract ~ statements
func (p *Parser) parseTilde() (Revisioner, error) {
var tok token
var lit string
var err error
tok, lit, err = p.scan()
if err != nil {
return nil, err
}
switch {
case tok == number:
n, _ := strconv.Atoi(lit)
return TildePath{n}, nil
default:
p.unscan()
return TildePath{1}, nil
}
}
// parseCaret extract ^ statements
func (p *Parser) parseCaret() (Revisioner, error) {
var tok token
var lit string
var err error
tok, lit, err = p.scan()
if err != nil {
return nil, err
}
switch {
case tok == obrace:
r, err := p.parseCaretBraces()
if err != nil {
return nil, err
}
return r, nil
case tok == number:
n, _ := strconv.Atoi(lit)
if n > 2 {
return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" found must be 0, 1 or 2 after "^"`, lit)}
}
return CaretPath{n}, nil
default:
p.unscan()
return CaretPath{1}, nil
}
}
// parseCaretBraces extract ^{<data>} statements
func (p *Parser) parseCaretBraces() (Revisioner, error) {
var tok, nextTok token
var lit, _ string
start := true
var re string
var negate bool
var err error
for {
tok, lit, err = p.scan()
if err != nil {
return nil, err
}
nextTok, _, err = p.scan()
if err != nil {
return nil, err
}
switch {
case tok == word && nextTok == cbrace && (lit == "commit" || lit == "tree" || lit == "blob" || lit == "tag" || lit == "object"):
return CaretType{lit}, nil
case re == "" && tok == cbrace:
return CaretType{"tag"}, nil
case re == "" && tok == emark && nextTok == emark:
re += lit
case re == "" && tok == emark && nextTok == minus:
negate = true
case re == "" && tok == emark:
return nil, &ErrInvalidRevision{s: `revision suffix brace component sequences starting with "/!" others than those defined are reserved`}
case re == "" && tok == slash:
p.unscan()
case tok != slash && start:
return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" is not a valid revision suffix brace component`, lit)}
case tok == eof:
return nil, &ErrInvalidRevision{s: `missing "}" in ^{<data>} structure`}
case tok != cbrace:
p.unscan()
re += lit
case tok == cbrace:
p.unscan()
reg, err := regexp.Compile(re)
if err != nil {
return CaretReg{}, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component, %s`, err.Error())}
}
return CaretReg{reg, negate}, nil
}
start = false
}
}
// parseColon extract : statements
func (p *Parser) parseColon() (Revisioner, error) {
var tok token
var err error
tok, _, err = p.scan()
if err != nil {
return nil, err
}
switch tok {
case slash:
return p.parseColonSlash()
default:
p.unscan()
return p.parseColonDefault()
}
}
// parseColonSlash extract :/<data> statements
func (p *Parser) parseColonSlash() (Revisioner, error) {
var tok, nextTok token
var lit string
var re string
var negate bool
var err error
for {
tok, lit, err = p.scan()
if err != nil {
return nil, err
}
nextTok, _, err = p.scan()
if err != nil {
return nil, err
}
switch {
case tok == emark && nextTok == emark:
re += lit
case re == "" && tok == emark && nextTok == minus:
negate = true
case re == "" && tok == emark:
return nil, &ErrInvalidRevision{s: `revision suffix brace component sequences starting with "/!" others than those defined are reserved`}
case tok == eof:
p.unscan()
reg, err := regexp.Compile(re)
if err != nil {
return ColonReg{}, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component, %s`, err.Error())}
}
return ColonReg{reg, negate}, nil
default:
p.unscan()
re += lit
}
}
}
// parseColonDefault extract :<data> statements
func (p *Parser) parseColonDefault() (Revisioner, error) {
var tok token
var lit string
var path string
var stage int
var err error
var n = -1
tok, lit, err = p.scan()
if err != nil {
return nil, err
}
nextTok, _, err := p.scan()
if err != nil {
return nil, err
}
if tok == number && nextTok == colon {
n, _ = strconv.Atoi(lit)
}
switch n {
case 0, 1, 2, 3:
stage = n
default:
path += lit
p.unscan()
}
for {
tok, lit, err = p.scan()
if err != nil {
return nil, err
}
switch {
case tok == eof && n == -1:
return ColonPath{path}, nil
case tok == eof:
return ColonStagePath{path, stage}, nil
default:
path += lit
}
}
}
// parseRef extract reference name
func (p *Parser) parseRef() (Revisioner, error) {
var tok, prevTok token
var lit, buf string
var endOfRef bool
var err error
for {
tok, lit, err = p.scan()
if err != nil {
return nil, err
}
switch tok {
case eof, at, colon, tilde, caret:
endOfRef = true
}
err := p.checkRefFormat(tok, lit, prevTok, buf, endOfRef)
if err != nil {
return "", err
}
if endOfRef {
p.unscan()
return Ref(buf), nil
}
buf += lit
prevTok = tok
}
}
// checkRefFormat ensure reference name follow rules defined here :
// https://git-scm.com/docs/git-check-ref-format
func (p *Parser) checkRefFormat(token token, literal string, previousToken token, buffer string, endOfRef bool) error {
switch token {
case aslash, space, control, qmark, asterisk, obracket:
return &ErrInvalidRevision{fmt.Sprintf(`must not contains "%s"`, literal)}
}
switch {
case (token == dot || token == slash) && buffer == "":
return &ErrInvalidRevision{fmt.Sprintf(`must not start with "%s"`, literal)}
case previousToken == slash && endOfRef:
return &ErrInvalidRevision{`must not end with "/"`}
case previousToken == dot && endOfRef:
return &ErrInvalidRevision{`must not end with "."`}
case token == dot && previousToken == slash:
return &ErrInvalidRevision{`must not contains "/."`}
case previousToken == dot && token == dot:
return &ErrInvalidRevision{`must not contains ".."`}
case previousToken == slash && token == slash:
return &ErrInvalidRevision{`must not contains consecutively "/"`}
case (token == slash || endOfRef) && len(buffer) > 4 && buffer[len(buffer)-5:] == ".lock":
return &ErrInvalidRevision{"cannot end with .lock"}
}
return nil
}
package revision
import (
"bufio"
"io"
"unicode"
)
// runeCategoryValidator takes a rune as input and
// validates it belongs to a rune category
type runeCategoryValidator func(r rune) bool
// tokenizeExpression aggregates a series of runes matching check predicate into a single
// string and provides given tokenType as token type
func tokenizeExpression(ch rune, tokenType token, check runeCategoryValidator, r *bufio.Reader) (token, string, error) {
var data []rune
data = append(data, ch)
for {
c, _, err := r.ReadRune()
if c == zeroRune {
break
}
if err != nil {
return tokenError, "", err
}
if check(c) {
data = append(data, c)
} else {
err := r.UnreadRune()
if err != nil {
return tokenError, "", err
}
return tokenType, string(data), nil
}
}
return tokenType, string(data), nil
}
// maxRevisionLength holds the maximum length that will be parsed for a
// revision. Git itself doesn't enforce a max length, but rather leans on
// the OS to enforce it via its ARG_MAX.
const maxRevisionLength = 128 * 1024 // 128kb
var zeroRune = rune(0)
// scanner represents a lexical scanner.
type scanner struct {
r *bufio.Reader
}
// newScanner returns a new instance of scanner.
func newScanner(r io.Reader) *scanner {
return &scanner{r: bufio.NewReader(io.LimitReader(r, maxRevisionLength))}
}
// Scan extracts tokens and their strings counterpart
// from the reader
func (s *scanner) scan() (token, string, error) {
ch, _, err := s.r.ReadRune()
if err != nil && err != io.EOF {
return tokenError, "", err
}
switch ch {
case zeroRune:
return eof, "", nil
case ':':
return colon, string(ch), nil
case '~':
return tilde, string(ch), nil
case '^':
return caret, string(ch), nil
case '.':
return dot, string(ch), nil
case '/':
return slash, string(ch), nil
case '{':
return obrace, string(ch), nil
case '}':
return cbrace, string(ch), nil
case '-':
return minus, string(ch), nil
case '@':
return at, string(ch), nil
case '\\':
return aslash, string(ch), nil
case '?':
return qmark, string(ch), nil
case '*':
return asterisk, string(ch), nil
case '[':
return obracket, string(ch), nil
case '!':
return emark, string(ch), nil
}
if unicode.IsSpace(ch) {
return space, string(ch), nil
}
if unicode.IsControl(ch) {
return control, string(ch), nil
}
if unicode.IsLetter(ch) {
return tokenizeExpression(ch, word, unicode.IsLetter, s.r)
}
if unicode.IsNumber(ch) {
return tokenizeExpression(ch, number, unicode.IsNumber, s.r)
}
return tokenError, string(ch), nil
}
package revision
// token represents a entity extracted from string parsing
type token int
const (
eof token = iota
aslash
asterisk
at
caret
cbrace
colon
control
dot
emark
minus
number
obrace
obracket
qmark
slash
space
tilde
tokenError
word
)
package url
import (
"regexp"
)
var (
isSchemeRegExp = regexp.MustCompile(`^[^:]+://`)
// Ref: https://github.com/git/git/blob/master/Documentation/urls.txt#L37
scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P<user>[^@]+)@)?(?P<host>[^:\s]+):(?:(?P<port>[0-9]{1,5}):)?(?P<path>[^\\].*)$`)
)
// MatchesScheme returns true if the given string matches a URL-like
// format scheme.
func MatchesScheme(url string) bool {
return isSchemeRegExp.MatchString(url)
}
// MatchesScpLike returns true if the given string matches an SCP-like
// format scheme.
func MatchesScpLike(url string) bool {
return scpLikeUrlRegExp.MatchString(url)
}
// FindScpLikeComponents returns the user, host, port and path of the
// given SCP-like URL.
func FindScpLikeComponents(url string) (user, host, port, path string) {
m := scpLikeUrlRegExp.FindStringSubmatch(url)
return m[1], m[2], m[3], m[4]
}
// IsLocalEndpoint returns true if the given URL string specifies a
// local file endpoint. For example, on a Linux machine,
// `/home/user/src/go-git` would match as a local endpoint, but
// `https://github.com/src-d/go-git` would not.
func IsLocalEndpoint(url string) bool {
return !MatchesScheme(url) && !MatchesScpLike(url)
}
package git
import (
"fmt"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/filemode"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/go-git/go-git/v5/storage"
)
type objectWalker struct {
Storer storage.Storer
// seen is the set of objects seen in the repo.
// seen map can become huge if walking over large
// repos. Thus using struct{} as the value type.
seen map[plumbing.Hash]struct{}
}
func newObjectWalker(s storage.Storer) *objectWalker {
return &objectWalker{s, map[plumbing.Hash]struct{}{}}
}
// walkAllRefs walks all (hash) references from the repo.
func (p *objectWalker) walkAllRefs() error {
// Walk over all the references in the repo.
it, err := p.Storer.IterReferences()
if err != nil {
return err
}
defer it.Close()
err = it.ForEach(func(ref *plumbing.Reference) error {
// Exit this iteration early for non-hash references.
if ref.Type() != plumbing.HashReference {
return nil
}
return p.walkObjectTree(ref.Hash())
})
return err
}
func (p *objectWalker) isSeen(hash plumbing.Hash) bool {
_, seen := p.seen[hash]
return seen
}
func (p *objectWalker) add(hash plumbing.Hash) {
p.seen[hash] = struct{}{}
}
// walkObjectTree walks over all objects and remembers references
// to them in the objectWalker. This is used instead of the revlist
// walks because memory usage is tight with huge repos.
func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error {
// Check if we have already seen, and mark this object
if p.isSeen(hash) {
return nil
}
p.add(hash)
// Fetch the object.
obj, err := object.GetObject(p.Storer, hash)
if err != nil {
return fmt.Errorf("getting object %s failed: %v", hash, err)
}
// Walk all children depending on object type.
switch obj := obj.(type) {
case *object.Commit:
err = p.walkObjectTree(obj.TreeHash)
if err != nil {
return err
}
for _, h := range obj.ParentHashes {
err = p.walkObjectTree(h)
if err != nil {
return err
}
}
case *object.Tree:
for i := range obj.Entries {
// Shortcut for blob objects:
// 'or' the lower bits of a mode and check that it
// it matches a filemode.Executable. The type information
// is in the higher bits, but this is the cleanest way
// to handle plain files with different modes.
// Other non-tree objects are somewhat rare, so they
// are not special-cased.
if obj.Entries[i].Mode|0755 == filemode.Executable {
p.add(obj.Entries[i].Hash)
continue
}
// Normal walk for sub-trees (and symlinks etc).
err = p.walkObjectTree(obj.Entries[i].Hash)
if err != nil {
return err
}
}
case *object.Tag:
return p.walkObjectTree(obj.Target)
default:
// Error out on unhandled object types.
return fmt.Errorf("unknown object %X %s %T", obj.ID(), obj.Type(), obj)
}
return nil
}
package git
import (
"errors"
"fmt"
"regexp"
"strings"
"time"
"github.com/ProtonMail/go-crypto/openpgp"
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/plumbing"
formatcfg "github.com/go-git/go-git/v5/plumbing/format/config"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband"
"github.com/go-git/go-git/v5/plumbing/transport"
)
// SubmoduleRescursivity defines how depth will affect any submodule recursive
// operation.
type SubmoduleRescursivity uint
const (
// DefaultRemoteName name of the default Remote, just like git command.
DefaultRemoteName = "origin"
// NoRecurseSubmodules disables the recursion for a submodule operation.
NoRecurseSubmodules SubmoduleRescursivity = 0
// DefaultSubmoduleRecursionDepth allow recursion in a submodule operation.
DefaultSubmoduleRecursionDepth SubmoduleRescursivity = 10
)
var (
ErrMissingURL = errors.New("URL field is required")
)
// CloneOptions describes how a clone should be performed.
type CloneOptions struct {
// The (possibly remote) repository URL to clone from.
URL string
// Auth credentials, if required, to use with the remote repository.
Auth transport.AuthMethod
// Name of the remote to be added, by default `origin`.
RemoteName string
// Remote branch to clone.
ReferenceName plumbing.ReferenceName
// Fetch only ReferenceName if true.
SingleBranch bool
// Mirror clones the repository as a mirror.
//
// Compared to a bare clone, mirror not only maps local branches of the
// source to local branches of the target, it maps all refs (including
// remote-tracking branches, notes etc.) and sets up a refspec configuration
// such that all these refs are overwritten by a git remote update in the
// target repository.
Mirror bool
// No checkout of HEAD after clone if true.
NoCheckout bool
// Limit fetching to the specified number of commits.
Depth int
// RecurseSubmodules after the clone is created, initialize all submodules
// within, using their default settings. This option is ignored if the
// cloned repository does not have a worktree.
RecurseSubmodules SubmoduleRescursivity
// ShallowSubmodules limit cloning submodules to the 1 level of depth.
// It matches the git command --shallow-submodules.
ShallowSubmodules bool
// Progress is where the human readable information sent by the server is
// stored, if nil nothing is stored and the capability (if supported)
// no-progress, is sent to the server to avoid send this information.
Progress sideband.Progress
// Tags describe how the tags will be fetched from the remote repository,
// by default is AllTags.
Tags TagMode
// InsecureSkipTLS skips ssl verify if protocol is https
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte
// ProxyOptions provides info required for connecting to a proxy.
ProxyOptions transport.ProxyOptions
// When the repository to clone is on the local machine, instead of
// using hard links, automatically setup .git/objects/info/alternates
// to share the objects with the source repository.
// The resulting repository starts out without any object of its own.
// NOTE: this is a possibly dangerous operation; do not use it unless
// you understand what it does.
//
// [Reference]: https://git-scm.com/docs/git-clone#Documentation/git-clone.txt---shared
Shared bool
}
// MergeOptions describes how a merge should be performed.
type MergeOptions struct {
// Strategy defines the merge strategy to be used.
Strategy MergeStrategy
}
// MergeStrategy represents the different types of merge strategies.
type MergeStrategy int8
const (
// FastForwardMerge represents a Git merge strategy where the current
// branch can be simply updated to point to the HEAD of the branch being
// merged. This is only possible if the history of the branch being merged
// is a linear descendant of the current branch, with no conflicting commits.
//
// This is the default option.
FastForwardMerge MergeStrategy = iota
)
// Validate validates the fields and sets the default values.
func (o *CloneOptions) Validate() error {
if o.URL == "" {
return ErrMissingURL
}
if o.RemoteName == "" {
o.RemoteName = DefaultRemoteName
}
if o.ReferenceName == "" {
o.ReferenceName = plumbing.HEAD
}
if o.Tags == InvalidTagMode {
o.Tags = AllTags
}
return nil
}
// PullOptions describes how a pull should be performed.
type PullOptions struct {
// Name of the remote to be pulled. If empty, uses the default.
RemoteName string
// RemoteURL overrides the remote repo address with a custom URL
RemoteURL string
// Remote branch to clone. If empty, uses HEAD.
ReferenceName plumbing.ReferenceName
// Fetch only ReferenceName if true.
SingleBranch bool
// Limit fetching to the specified number of commits.
Depth int
// Auth credentials, if required, to use with the remote repository.
Auth transport.AuthMethod
// RecurseSubmodules controls if new commits of all populated submodules
// should be fetched too.
RecurseSubmodules SubmoduleRescursivity
// Progress is where the human readable information sent by the server is
// stored, if nil nothing is stored and the capability (if supported)
// no-progress, is sent to the server to avoid send this information.
Progress sideband.Progress
// Force allows the pull to update a local branch even when the remote
// branch does not descend from it.
Force bool
// InsecureSkipTLS skips ssl verify if protocol is https
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte
// ProxyOptions provides info required for connecting to a proxy.
ProxyOptions transport.ProxyOptions
}
// Validate validates the fields and sets the default values.
func (o *PullOptions) Validate() error {
if o.RemoteName == "" {
o.RemoteName = DefaultRemoteName
}
if o.ReferenceName == "" {
o.ReferenceName = plumbing.HEAD
}
return nil
}
type TagMode int
const (
InvalidTagMode TagMode = iota
// TagFollowing any tag that points into the histories being fetched is also
// fetched. TagFollowing requires a server with `include-tag` capability
// in order to fetch the annotated tags objects.
TagFollowing
// AllTags fetch all tags from the remote (i.e., fetch remote tags
// refs/tags/* into local tags with the same name)
AllTags
// NoTags fetch no tags from the remote at all
NoTags
)
// FetchOptions describes how a fetch should be performed
type FetchOptions struct {
// Name of the remote to fetch from. Defaults to origin.
RemoteName string
// RemoteURL overrides the remote repo address with a custom URL
RemoteURL string
RefSpecs []config.RefSpec
// Depth limit fetching to the specified number of commits from the tip of
// each remote branch history.
Depth int
// Auth credentials, if required, to use with the remote repository.
Auth transport.AuthMethod
// Progress is where the human readable information sent by the server is
// stored, if nil nothing is stored and the capability (if supported)
// no-progress, is sent to the server to avoid send this information.
Progress sideband.Progress
// Tags describe how the tags will be fetched from the remote repository,
// by default is TagFollowing.
Tags TagMode
// Force allows the fetch to update a local branch even when the remote
// branch does not descend from it.
Force bool
// InsecureSkipTLS skips ssl verify if protocol is https
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte
// ProxyOptions provides info required for connecting to a proxy.
ProxyOptions transport.ProxyOptions
// Prune specify that local refs that match given RefSpecs and that do
// not exist remotely will be removed.
Prune bool
}
// Validate validates the fields and sets the default values.
func (o *FetchOptions) Validate() error {
if o.RemoteName == "" {
o.RemoteName = DefaultRemoteName
}
if o.Tags == InvalidTagMode {
o.Tags = TagFollowing
}
for _, r := range o.RefSpecs {
if err := r.Validate(); err != nil {
return err
}
}
return nil
}
// PushOptions describes how a push should be performed.
type PushOptions struct {
// RemoteName is the name of the remote to be pushed to.
RemoteName string
// RemoteURL overrides the remote repo address with a custom URL
RemoteURL string
// RefSpecs specify what destination ref to update with what source object.
//
// The format of a <refspec> parameter is an optional plus +, followed by
// the source object <src>, followed by a colon :, followed by the destination ref <dst>.
// The <src> is often the name of the branch you would want to push, but it can be a SHA-1.
// The <dst> tells which ref on the remote side is updated with this push.
//
// A refspec with empty src can be used to delete a reference.
RefSpecs []config.RefSpec
// Auth credentials, if required, to use with the remote repository.
Auth transport.AuthMethod
// Progress is where the human readable information sent by the server is
// stored, if nil nothing is stored.
Progress sideband.Progress
// Prune specify that remote refs that match given RefSpecs and that do
// not exist locally will be removed.
Prune bool
// Force allows the push to update a remote branch even when the local
// branch does not descend from it.
Force bool
// InsecureSkipTLS skips ssl verify if protocol is https
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte
// RequireRemoteRefs only allows a remote ref to be updated if its current
// value is the one specified here.
RequireRemoteRefs []config.RefSpec
// FollowTags will send any annotated tags with a commit target reachable from
// the refs already being pushed
FollowTags bool
// ForceWithLease allows a force push as long as the remote ref adheres to a "lease"
ForceWithLease *ForceWithLease
// PushOptions sets options to be transferred to the server during push.
Options map[string]string
// Atomic sets option to be an atomic push
Atomic bool
// ProxyOptions provides info required for connecting to a proxy.
ProxyOptions transport.ProxyOptions
}
// ForceWithLease sets fields on the lease
// If neither RefName nor Hash are set, ForceWithLease protects
// all refs in the refspec by ensuring the ref of the remote in the local repsitory
// matches the one in the ref advertisement.
type ForceWithLease struct {
// RefName, when set will protect the ref by ensuring it matches the
// hash in the ref advertisement.
RefName plumbing.ReferenceName
// Hash is the expected object id of RefName. The push will be rejected unless this
// matches the corresponding object id of RefName in the refs advertisement.
Hash plumbing.Hash
}
// Validate validates the fields and sets the default values.
func (o *PushOptions) Validate() error {
if o.RemoteName == "" {
o.RemoteName = DefaultRemoteName
}
if len(o.RefSpecs) == 0 {
o.RefSpecs = []config.RefSpec{
config.RefSpec(config.DefaultPushRefSpec),
}
}
for _, r := range o.RefSpecs {
if err := r.Validate(); err != nil {
return err
}
}
return nil
}
// SubmoduleUpdateOptions describes how a submodule update should be performed.
type SubmoduleUpdateOptions struct {
// Init, if true initializes the submodules recorded in the index.
Init bool
// NoFetch tell to the update command to not fetch new objects from the
// remote site.
NoFetch bool
// RecurseSubmodules the update is performed not only in the submodules of
// the current repository but also in any nested submodules inside those
// submodules (and so on). Until the SubmoduleRescursivity is reached.
RecurseSubmodules SubmoduleRescursivity
// Auth credentials, if required, to use with the remote repository.
Auth transport.AuthMethod
// Depth limit fetching to the specified number of commits from the tip of
// each remote branch history.
Depth int
}
var (
ErrBranchHashExclusive = errors.New("Branch and Hash are mutually exclusive")
ErrCreateRequiresBranch = errors.New("Branch is mandatory when Create is used")
)
// CheckoutOptions describes how a checkout operation should be performed.
type CheckoutOptions struct {
// Hash is the hash of a commit or tag to be checked out. If used, HEAD
// will be in detached mode. If Create is not used, Branch and Hash are
// mutually exclusive.
Hash plumbing.Hash
// Branch to be checked out, if Branch and Hash are empty is set to `master`.
Branch plumbing.ReferenceName
// Create a new branch named Branch and start it at Hash.
Create bool
// Force, if true when switching branches, proceed even if the index or the
// working tree differs from HEAD. This is used to throw away local changes
Force bool
// Keep, if true when switching branches, local changes (the index or the
// working tree changes) will be kept so that they can be committed to the
// target branch. Force and Keep are mutually exclusive, should not be both
// set to true.
Keep bool
// SparseCheckoutDirectories
SparseCheckoutDirectories []string
}
// Validate validates the fields and sets the default values.
func (o *CheckoutOptions) Validate() error {
if !o.Create && !o.Hash.IsZero() && o.Branch != "" {
return ErrBranchHashExclusive
}
if o.Create && o.Branch == "" {
return ErrCreateRequiresBranch
}
if o.Branch == "" {
o.Branch = plumbing.Master
}
return nil
}
// ResetMode defines the mode of a reset operation.
type ResetMode int8
const (
// MixedReset resets the index but not the working tree (i.e., the changed
// files are preserved but not marked for commit) and reports what has not
// been updated. This is the default action.
MixedReset ResetMode = iota
// HardReset resets the index and working tree. Any changes to tracked files
// in the working tree are discarded.
HardReset
// MergeReset resets the index and updates the files in the working tree
// that are different between Commit and HEAD, but keeps those which are
// different between the index and working tree (i.e. which have changes
// which have not been added).
//
// If a file that is different between Commit and the index has unstaged
// changes, reset is aborted.
MergeReset
// SoftReset does not touch the index file or the working tree at all (but
// resets the head to <commit>, just like all modes do). This leaves all
// your changed files "Changes to be committed", as git status would put it.
SoftReset
)
// ResetOptions describes how a reset operation should be performed.
type ResetOptions struct {
// Commit, if commit is present set the current branch head (HEAD) to it.
Commit plumbing.Hash
// Mode, form resets the current branch head to Commit and possibly updates
// the index (resetting it to the tree of Commit) and the working tree
// depending on Mode. If empty MixedReset is used.
Mode ResetMode
// Files, if not empty will constrain the reseting the index to only files
// specified in this list.
Files []string
}
// Validate validates the fields and sets the default values.
func (o *ResetOptions) Validate(r *Repository) error {
if o.Commit == plumbing.ZeroHash {
ref, err := r.Head()
if err != nil {
return err
}
o.Commit = ref.Hash()
} else {
_, err := r.CommitObject(o.Commit)
if err != nil {
return fmt.Errorf("invalid reset option: %w", err)
}
}
return nil
}
type LogOrder int8
const (
LogOrderDefault LogOrder = iota
LogOrderDFS
LogOrderDFSPost
LogOrderBSF
LogOrderCommitterTime
)
// LogOptions describes how a log action should be performed.
type LogOptions struct {
// When the From option is set the log will only contain commits
// reachable from it. If this option is not set, HEAD will be used as
// the default From.
From plumbing.Hash
// The default traversal algorithm is Depth-first search
// set Order=LogOrderCommitterTime for ordering by committer time (more compatible with `git log`)
// set Order=LogOrderBSF for Breadth-first search
Order LogOrder
// Show only those commits in which the specified file was inserted/updated.
// It is equivalent to running `git log -- <file-name>`.
// this field is kept for compatibility, it can be replaced with PathFilter
FileName *string
// Filter commits based on the path of files that are updated
// takes file path as argument and should return true if the file is desired
// It can be used to implement `git log -- <path>`
// either <path> is a file path, or directory path, or a regexp of file/directory path
PathFilter func(string) bool
// Pretend as if all the refs in refs/, along with HEAD, are listed on the command line as <commit>.
// It is equivalent to running `git log --all`.
// If set on true, the From option will be ignored.
All bool
// Show commits more recent than a specific date.
// It is equivalent to running `git log --since <date>` or `git log --after <date>`.
Since *time.Time
// Show commits older than a specific date.
// It is equivalent to running `git log --until <date>` or `git log --before <date>`.
Until *time.Time
}
var (
ErrMissingAuthor = errors.New("author field is required")
)
// AddOptions describes how an `add` operation should be performed
type AddOptions struct {
// All equivalent to `git add -A`, update the index not only where the
// working tree has a file matching `Path` but also where the index already
// has an entry. This adds, modifies, and removes index entries to match the
// working tree. If no `Path` nor `Glob` is given when `All` option is
// used, all files in the entire working tree are updated.
All bool
// Path is the exact filepath to the file or directory to be added.
Path string
// Glob adds all paths, matching pattern, to the index. If pattern matches a
// directory path, all directory contents are added to the index recursively.
Glob string
// SkipStatus adds the path with no status check. This option is relevant only
// when the `Path` option is specified and does not apply when the `All` option is used.
// Notice that when passing an ignored path it will be added anyway.
// When true it can speed up adding files to the worktree in very large repositories.
SkipStatus bool
}
// Validate validates the fields and sets the default values.
func (o *AddOptions) Validate(r *Repository) error {
if o.Path != "" && o.Glob != "" {
return fmt.Errorf("fields Path and Glob are mutual exclusive")
}
return nil
}
// CommitOptions describes how a commit operation should be performed.
type CommitOptions struct {
// All automatically stage files that have been modified and deleted, but
// new files you have not told Git about are not affected.
All bool
// AllowEmptyCommits enable empty commits to be created. An empty commit
// is when no changes to the tree were made, but a new commit message is
// provided. The default behavior is false, which results in ErrEmptyCommit.
AllowEmptyCommits bool
// Author is the author's signature of the commit. If Author is empty the
// Name and Email is read from the config, and time.Now it's used as When.
Author *object.Signature
// Committer is the committer's signature of the commit. If Committer is
// nil the Author signature is used.
Committer *object.Signature
// Parents are the parents commits for the new commit, by default when
// len(Parents) is zero, the hash of HEAD reference is used.
Parents []plumbing.Hash
// SignKey denotes a key to sign the commit with. A nil value here means the
// commit will not be signed. The private key must be present and already
// decrypted.
SignKey *openpgp.Entity
// Signer denotes a cryptographic signer to sign the commit with.
// A nil value here means the commit will not be signed.
// Takes precedence over SignKey.
Signer Signer
// Amend will create a new commit object and replace the commit that HEAD currently
// points to. Cannot be used with All nor Parents.
Amend bool
}
// Validate validates the fields and sets the default values.
func (o *CommitOptions) Validate(r *Repository) error {
if o.All && o.Amend {
return errors.New("all and amend cannot be used together")
}
if o.Amend && len(o.Parents) > 0 {
return errors.New("parents cannot be used with amend")
}
if o.Author == nil {
if err := o.loadConfigAuthorAndCommitter(r); err != nil {
return err
}
}
if o.Committer == nil {
o.Committer = o.Author
}
if len(o.Parents) == 0 {
head, err := r.Head()
if err != nil && err != plumbing.ErrReferenceNotFound {
return err
}
if head != nil {
o.Parents = []plumbing.Hash{head.Hash()}
}
}
return nil
}
func (o *CommitOptions) loadConfigAuthorAndCommitter(r *Repository) error {
cfg, err := r.ConfigScoped(config.SystemScope)
if err != nil {
return err
}
if o.Author == nil && cfg.Author.Email != "" && cfg.Author.Name != "" {
o.Author = &object.Signature{
Name: cfg.Author.Name,
Email: cfg.Author.Email,
When: time.Now(),
}
}
if o.Committer == nil && cfg.Committer.Email != "" && cfg.Committer.Name != "" {
o.Committer = &object.Signature{
Name: cfg.Committer.Name,
Email: cfg.Committer.Email,
When: time.Now(),
}
}
if o.Author == nil && cfg.User.Email != "" && cfg.User.Name != "" {
o.Author = &object.Signature{
Name: cfg.User.Name,
Email: cfg.User.Email,
When: time.Now(),
}
}
if o.Author == nil {
return ErrMissingAuthor
}
return nil
}
var (
ErrMissingName = errors.New("name field is required")
ErrMissingTagger = errors.New("tagger field is required")
ErrMissingMessage = errors.New("message field is required")
)
// CreateTagOptions describes how a tag object should be created.
type CreateTagOptions struct {
// Tagger defines the signature of the tag creator. If Tagger is empty the
// Name and Email is read from the config, and time.Now it's used as When.
Tagger *object.Signature
// Message defines the annotation of the tag. It is canonicalized during
// validation into the format expected by git - no leading whitespace and
// ending in a newline.
Message string
// SignKey denotes a key to sign the tag with. A nil value here means the tag
// will not be signed. The private key must be present and already decrypted.
SignKey *openpgp.Entity
}
// Validate validates the fields and sets the default values.
func (o *CreateTagOptions) Validate(r *Repository, hash plumbing.Hash) error {
if o.Tagger == nil {
if err := o.loadConfigTagger(r); err != nil {
return err
}
}
if o.Message == "" {
return ErrMissingMessage
}
// Canonicalize the message into the expected message format.
o.Message = strings.TrimSpace(o.Message) + "\n"
return nil
}
func (o *CreateTagOptions) loadConfigTagger(r *Repository) error {
cfg, err := r.ConfigScoped(config.SystemScope)
if err != nil {
return err
}
if o.Tagger == nil && cfg.Author.Email != "" && cfg.Author.Name != "" {
o.Tagger = &object.Signature{
Name: cfg.Author.Name,
Email: cfg.Author.Email,
When: time.Now(),
}
}
if o.Tagger == nil && cfg.User.Email != "" && cfg.User.Name != "" {
o.Tagger = &object.Signature{
Name: cfg.User.Name,
Email: cfg.User.Email,
When: time.Now(),
}
}
if o.Tagger == nil {
return ErrMissingTagger
}
return nil
}
// ListOptions describes how a remote list should be performed.
type ListOptions struct {
// Auth credentials, if required, to use with the remote repository.
Auth transport.AuthMethod
// InsecureSkipTLS skips ssl verify if protocol is https
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte
// PeelingOption defines how peeled objects are handled during a
// remote list.
PeelingOption PeelingOption
// ProxyOptions provides info required for connecting to a proxy.
ProxyOptions transport.ProxyOptions
// Timeout specifies the timeout in seconds for list operations
Timeout int
}
// PeelingOption represents the different ways to handle peeled references.
//
// Peeled references represent the underlying object of an annotated
// (or signed) tag. Refer to upstream documentation for more info:
// https://github.com/git/git/blob/master/Documentation/technical/reftable.txt
type PeelingOption uint8
const (
// IgnorePeeled ignores all peeled reference names. This is the default behavior.
IgnorePeeled PeelingOption = 0
// OnlyPeeled returns only peeled reference names.
OnlyPeeled PeelingOption = 1
// AppendPeeled appends peeled reference names to the reference list.
AppendPeeled PeelingOption = 2
)
// CleanOptions describes how a clean should be performed.
type CleanOptions struct {
Dir bool
}
// GrepOptions describes how a grep should be performed.
type GrepOptions struct {
// Patterns are compiled Regexp objects to be matched.
Patterns []*regexp.Regexp
// InvertMatch selects non-matching lines.
InvertMatch bool
// CommitHash is the hash of the commit from which worktree should be derived.
CommitHash plumbing.Hash
// ReferenceName is the branch or tag name from which worktree should be derived.
ReferenceName plumbing.ReferenceName
// PathSpecs are compiled Regexp objects of pathspec to use in the matching.
PathSpecs []*regexp.Regexp
}
var (
ErrHashOrReference = errors.New("ambiguous options, only one of CommitHash or ReferenceName can be passed")
)
// Validate validates the fields and sets the default values.
//
// TODO: deprecate in favor of Validate(r *Repository) in v6.
func (o *GrepOptions) Validate(w *Worktree) error {
return o.validate(w.r)
}
func (o *GrepOptions) validate(r *Repository) error {
if !o.CommitHash.IsZero() && o.ReferenceName != "" {
return ErrHashOrReference
}
// If none of CommitHash and ReferenceName are provided, set commit hash of
// the repository's head.
if o.CommitHash.IsZero() && o.ReferenceName == "" {
ref, err := r.Head()
if err != nil {
return err
}
o.CommitHash = ref.Hash()
}
return nil
}
// PlainOpenOptions describes how opening a plain repository should be
// performed.
type PlainOpenOptions struct {
// DetectDotGit defines whether parent directories should be
// walked until a .git directory or file is found.
DetectDotGit bool
// Enable .git/commondir support (see https://git-scm.com/docs/gitrepository-layout#Documentation/gitrepository-layout.txt).
// NOTE: This option will only work with the filesystem storage.
EnableDotGitCommonDir bool
}
// Validate validates the fields and sets the default values.
func (o *PlainOpenOptions) Validate() error { return nil }
type PlainInitOptions struct {
InitOptions
// Determines if the repository will have a worktree (non-bare) or not (bare).
Bare bool
ObjectFormat formatcfg.ObjectFormat
}
// Validate validates the fields and sets the default values.
func (o *PlainInitOptions) Validate() error { return nil }
var (
ErrNoRestorePaths = errors.New("you must specify path(s) to restore")
)
// RestoreOptions describes how a restore should be performed.
type RestoreOptions struct {
// Marks to restore the content in the index
Staged bool
// Marks to restore the content of the working tree
Worktree bool
// List of file paths that will be restored
Files []string
}
// Validate validates the fields and sets the default values.
func (o *RestoreOptions) Validate() error {
if len(o.Files) == 0 {
return ErrNoRestorePaths
}
return nil
}
#!/bin/bash -eu
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
go mod download
go get github.com/AdamKorcz/go-118-fuzz-build/testing
if [ "$SANITIZER" != "coverage" ]; then
sed -i '/func (s \*DecoderSuite) TestDecode(/,/^}/ s/^/\/\//' plumbing/format/config/decoder_test.go
sed -n '35,$p' plumbing/format/packfile/common_test.go >> plumbing/format/packfile/delta_test.go
sed -n '20,53p' plumbing/object/object_test.go >> plumbing/object/tree_test.go
sed -i 's|func Test|// func Test|' plumbing/transport/common_test.go
fi
compile_native_go_fuzzer $(pwd)/internal/revision FuzzParser fuzz_parser
compile_native_go_fuzzer $(pwd)/plumbing/format/config FuzzDecoder fuzz_decoder_config
compile_native_go_fuzzer $(pwd)/plumbing/format/packfile FuzzPatchDelta fuzz_patch_delta
compile_native_go_fuzzer $(pwd)/plumbing/object FuzzParseSignedBytes fuzz_parse_signed_bytes
compile_native_go_fuzzer $(pwd)/plumbing/object FuzzDecode fuzz_decode
compile_native_go_fuzzer $(pwd)/plumbing/protocol/packp FuzzDecoder fuzz_decoder_packp
compile_native_go_fuzzer $(pwd)/plumbing/transport FuzzNewEndpoint fuzz_new_endpoint
package cache
import (
"container/list"
"sync"
)
// BufferLRU implements an object cache with an LRU eviction policy and a
// maximum size (measured in object size).
type BufferLRU struct {
MaxSize FileSize
actualSize FileSize
ll *list.List
cache map[int64]*list.Element
mut sync.Mutex
}
// NewBufferLRU creates a new BufferLRU with the given maximum size. The maximum
// size will never be exceeded.
func NewBufferLRU(maxSize FileSize) *BufferLRU {
return &BufferLRU{MaxSize: maxSize}
}
// NewBufferLRUDefault creates a new BufferLRU with the default cache size.
func NewBufferLRUDefault() *BufferLRU {
return &BufferLRU{MaxSize: DefaultMaxSize}
}
type buffer struct {
Key int64
Slice []byte
}
// Put puts a buffer into the cache. If the buffer is already in the cache, it
// will be marked as used. Otherwise, it will be inserted. A buffers might
// be evicted to make room for the new one.
func (c *BufferLRU) Put(key int64, slice []byte) {
c.mut.Lock()
defer c.mut.Unlock()
if c.cache == nil {
c.actualSize = 0
c.cache = make(map[int64]*list.Element, 1000)
c.ll = list.New()
}
bufSize := FileSize(len(slice))
if ee, ok := c.cache[key]; ok {
oldBuf := ee.Value.(buffer)
// in this case bufSize is a delta: new size - old size
bufSize -= FileSize(len(oldBuf.Slice))
c.ll.MoveToFront(ee)
ee.Value = buffer{key, slice}
} else {
if bufSize > c.MaxSize {
return
}
ee := c.ll.PushFront(buffer{key, slice})
c.cache[key] = ee
}
c.actualSize += bufSize
for c.actualSize > c.MaxSize {
last := c.ll.Back()
lastObj := last.Value.(buffer)
lastSize := FileSize(len(lastObj.Slice))
c.ll.Remove(last)
delete(c.cache, lastObj.Key)
c.actualSize -= lastSize
}
}
// Get returns a buffer by its key. It marks the buffer as used. If the buffer
// is not in the cache, (nil, false) will be returned.
func (c *BufferLRU) Get(key int64) ([]byte, bool) {
c.mut.Lock()
defer c.mut.Unlock()
ee, ok := c.cache[key]
if !ok {
return nil, false
}
c.ll.MoveToFront(ee)
return ee.Value.(buffer).Slice, true
}
// Clear the content of this buffer cache.
func (c *BufferLRU) Clear() {
c.mut.Lock()
defer c.mut.Unlock()
c.ll = nil
c.cache = nil
c.actualSize = 0
}
package cache
import "github.com/go-git/go-git/v5/plumbing"
const (
Byte FileSize = 1 << (iota * 10)
KiByte
MiByte
GiByte
)
type FileSize int64
const DefaultMaxSize FileSize = 96 * MiByte
// Object is an interface to a object cache.
type Object interface {
// Put puts the given object into the cache. Whether this object will
// actually be put into the cache or not is implementation specific.
Put(o plumbing.EncodedObject)
// Get gets an object from the cache given its hash. The second return value
// is true if the object was returned, and false otherwise.
Get(k plumbing.Hash) (plumbing.EncodedObject, bool)
// Clear clears every object from the cache.
Clear()
}
// Buffer is an interface to a buffer cache.
type Buffer interface {
// Put puts a buffer into the cache. If the buffer is already in the cache,
// it will be marked as used. Otherwise, it will be inserted. Buffer might
// be evicted to make room for the new one.
Put(key int64, slice []byte)
// Get returns a buffer by its key. It marks the buffer as used. If the
// buffer is not in the cache, (nil, false) will be returned.
Get(key int64) ([]byte, bool)
// Clear clears every object from the cache.
Clear()
}
package cache
import (
"container/list"
"sync"
"github.com/go-git/go-git/v5/plumbing"
)
// ObjectLRU implements an object cache with an LRU eviction policy and a
// maximum size (measured in object size).
type ObjectLRU struct {
MaxSize FileSize
actualSize FileSize
ll *list.List
cache map[interface{}]*list.Element
mut sync.Mutex
}
// NewObjectLRU creates a new ObjectLRU with the given maximum size. The maximum
// size will never be exceeded.
func NewObjectLRU(maxSize FileSize) *ObjectLRU {
return &ObjectLRU{MaxSize: maxSize}
}
// NewObjectLRUDefault creates a new ObjectLRU with the default cache size.
func NewObjectLRUDefault() *ObjectLRU {
return &ObjectLRU{MaxSize: DefaultMaxSize}
}
// Put puts an object into the cache. If the object is already in the cache, it
// will be marked as used. Otherwise, it will be inserted. A single object might
// be evicted to make room for the new object.
func (c *ObjectLRU) Put(obj plumbing.EncodedObject) {
c.mut.Lock()
defer c.mut.Unlock()
if c.cache == nil {
c.actualSize = 0
c.cache = make(map[interface{}]*list.Element, 1000)
c.ll = list.New()
}
objSize := FileSize(obj.Size())
key := obj.Hash()
if ee, ok := c.cache[key]; ok {
oldObj := ee.Value.(plumbing.EncodedObject)
// in this case objSize is a delta: new size - old size
objSize -= FileSize(oldObj.Size())
c.ll.MoveToFront(ee)
ee.Value = obj
} else {
if objSize > c.MaxSize {
return
}
ee := c.ll.PushFront(obj)
c.cache[key] = ee
}
c.actualSize += objSize
for c.actualSize > c.MaxSize {
last := c.ll.Back()
if last == nil {
c.actualSize = 0
break
}
lastObj := last.Value.(plumbing.EncodedObject)
lastSize := FileSize(lastObj.Size())
c.ll.Remove(last)
delete(c.cache, lastObj.Hash())
c.actualSize -= lastSize
}
}
// Get returns an object by its hash. It marks the object as used. If the object
// is not in the cache, (nil, false) will be returned.
func (c *ObjectLRU) Get(k plumbing.Hash) (plumbing.EncodedObject, bool) {
c.mut.Lock()
defer c.mut.Unlock()
ee, ok := c.cache[k]
if !ok {
return nil, false
}
c.ll.MoveToFront(ee)
return ee.Value.(plumbing.EncodedObject), true
}
// Clear the content of this object cache.
func (c *ObjectLRU) Clear() {
c.mut.Lock()
defer c.mut.Unlock()
c.ll = nil
c.cache = nil
c.actualSize = 0
}
package color
// TODO read colors from a github.com/go-git/go-git/plumbing/format/config.Config struct
// TODO implement color parsing, see https://github.com/git/git/blob/v2.26.2/color.c
// Colors. See https://github.com/git/git/blob/v2.26.2/color.h#L24-L53.
const (
Normal = ""
Reset = "\033[m"
Bold = "\033[1m"
Red = "\033[31m"
Green = "\033[32m"
Yellow = "\033[33m"
Blue = "\033[34m"
Magenta = "\033[35m"
Cyan = "\033[36m"
BoldRed = "\033[1;31m"
BoldGreen = "\033[1;32m"
BoldYellow = "\033[1;33m"
BoldBlue = "\033[1;34m"
BoldMagenta = "\033[1;35m"
BoldCyan = "\033[1;36m"
FaintRed = "\033[2;31m"
FaintGreen = "\033[2;32m"
FaintYellow = "\033[2;33m"
FaintBlue = "\033[2;34m"
FaintMagenta = "\033[2;35m"
FaintCyan = "\033[2;36m"
BgRed = "\033[41m"
BgGreen = "\033[42m"
BgYellow = "\033[43m"
BgBlue = "\033[44m"
BgMagenta = "\033[45m"
BgCyan = "\033[46m"
Faint = "\033[2m"
FaintItalic = "\033[2;3m"
Reverse = "\033[7m"
)
package plumbing
import "fmt"
type PermanentError struct {
Err error
}
func NewPermanentError(err error) *PermanentError {
if err == nil {
return nil
}
return &PermanentError{Err: err}
}
func (e *PermanentError) Error() string {
return fmt.Sprintf("permanent client error: %s", e.Err.Error())
}
type UnexpectedError struct {
Err error
}
func NewUnexpectedError(err error) *UnexpectedError {
if err == nil {
return nil
}
return &UnexpectedError{Err: err}
}
func (e *UnexpectedError) Error() string {
return fmt.Sprintf("unexpected client error: %s", e.Err.Error())
}
package filemode
import (
"encoding/binary"
"fmt"
"os"
"strconv"
)
// A FileMode represents the kind of tree entries used by git. It
// resembles regular file systems modes, although FileModes are
// considerably simpler (there are not so many), and there are some,
// like Submodule that has no file system equivalent.
type FileMode uint32
const (
// Empty is used as the FileMode of tree elements when comparing
// trees in the following situations:
//
// - the mode of tree elements before their creation. - the mode of
// tree elements after their deletion. - the mode of unmerged
// elements when checking the index.
//
// Empty has no file system equivalent. As Empty is the zero value
// of FileMode, it is also returned by New and
// NewFromOsNewFromOSFileMode along with an error, when they fail.
Empty FileMode = 0
// Dir represent a Directory.
Dir FileMode = 0040000
// Regular represent non-executable files. Please note this is not
// the same as golang regular files, which include executable files.
Regular FileMode = 0100644
// Deprecated represent non-executable files with the group writable
// bit set. This mode was supported by the first versions of git,
// but it has been deprecated nowadays. This library uses them
// internally, so you can read old packfiles, but will treat them as
// Regulars when interfacing with the outside world. This is the
// standard git behaviour.
Deprecated FileMode = 0100664
// Executable represents executable files.
Executable FileMode = 0100755
// Symlink represents symbolic links to files.
Symlink FileMode = 0120000
// Submodule represents git submodules. This mode has no file system
// equivalent.
Submodule FileMode = 0160000
)
// New takes the octal string representation of a FileMode and returns
// the FileMode and a nil error. If the string can not be parsed to a
// 32 bit unsigned octal number, it returns Empty and the parsing error.
//
// Example: "40000" means Dir, "100644" means Regular.
//
// Please note this function does not check if the returned FileMode
// is valid in git or if it is malformed. For instance, "1" will
// return the malformed FileMode(1) and a nil error.
func New(s string) (FileMode, error) {
n, err := strconv.ParseUint(s, 8, 32)
if err != nil {
return Empty, err
}
return FileMode(n), nil
}
// NewFromOSFileMode returns the FileMode used by git to represent
// the provided file system modes and a nil error on success. If the
// file system mode cannot be mapped to any valid git mode (as with
// sockets or named pipes), it will return Empty and an error.
//
// Note that some git modes cannot be generated from os.FileModes, like
// Deprecated and Submodule; while Empty will be returned, along with an
// error, only when the method fails.
func NewFromOSFileMode(m os.FileMode) (FileMode, error) {
if m.IsRegular() {
if isSetTemporary(m) {
return Empty, fmt.Errorf("no equivalent git mode for %s", m)
}
if isSetCharDevice(m) {
return Empty, fmt.Errorf("no equivalent git mode for %s", m)
}
if isSetUserExecutable(m) {
return Executable, nil
}
return Regular, nil
}
if m.IsDir() {
return Dir, nil
}
if isSetSymLink(m) {
return Symlink, nil
}
return Empty, fmt.Errorf("no equivalent git mode for %s", m)
}
func isSetCharDevice(m os.FileMode) bool {
return m&os.ModeCharDevice != 0
}
func isSetTemporary(m os.FileMode) bool {
return m&os.ModeTemporary != 0
}
func isSetUserExecutable(m os.FileMode) bool {
return m&0100 != 0
}
func isSetSymLink(m os.FileMode) bool {
return m&os.ModeSymlink != 0
}
// Bytes return a slice of 4 bytes with the mode in little endian
// encoding.
func (m FileMode) Bytes() []byte {
ret := make([]byte, 4)
binary.LittleEndian.PutUint32(ret, uint32(m))
return ret
}
// IsMalformed returns if the FileMode should not appear in a git packfile,
// this is: Empty and any other mode not mentioned as a constant in this
// package.
func (m FileMode) IsMalformed() bool {
return m != Dir &&
m != Regular &&
m != Deprecated &&
m != Executable &&
m != Symlink &&
m != Submodule
}
// String returns the FileMode as a string in the standard git format,
// this is, an octal number padded with ceros to 7 digits. Malformed
// modes are printed in that same format, for easier debugging.
//
// Example: Regular is "0100644", Empty is "0000000".
func (m FileMode) String() string {
return fmt.Sprintf("%07o", uint32(m))
}
// IsRegular returns if the FileMode represents that of a regular file,
// this is, either Regular or Deprecated. Please note that Executable
// are not regular even though in the UNIX tradition, they usually are:
// See the IsFile method.
func (m FileMode) IsRegular() bool {
return m == Regular ||
m == Deprecated
}
// IsFile returns if the FileMode represents that of a file, this is,
// Regular, Deprecated, Executable or Link.
func (m FileMode) IsFile() bool {
return m == Regular ||
m == Deprecated ||
m == Executable ||
m == Symlink
}
// ToOSFileMode returns the os.FileMode to be used when creating file
// system elements with the given git mode and a nil error on success.
//
// When the provided mode cannot be mapped to a valid file system mode
// (e.g. Submodule) it returns os.FileMode(0) and an error.
//
// The returned file mode does not take into account the umask.
func (m FileMode) ToOSFileMode() (os.FileMode, error) {
switch m {
case Dir:
return os.ModePerm | os.ModeDir, nil
case Submodule:
return os.ModePerm | os.ModeDir, nil
case Regular:
return os.FileMode(0644), nil
// Deprecated is no longer allowed: treated as a Regular instead
case Deprecated:
return os.FileMode(0644), nil
case Executable:
return os.FileMode(0755), nil
case Symlink:
return os.ModePerm | os.ModeSymlink, nil
}
return os.FileMode(0), fmt.Errorf("malformed mode (%s)", m)
}
package config
// New creates a new config instance.
func New() *Config {
return &Config{}
}
// Config contains all the sections, comments and includes from a config file.
type Config struct {
Comment *Comment
Sections Sections
Includes Includes
}
// Includes is a list of Includes in a config file.
type Includes []*Include
// Include is a reference to an included config file.
type Include struct {
Path string
Config *Config
}
// Comment string without the prefix '#' or ';'.
type Comment string
const (
// NoSubsection token is passed to Config.Section and Config.SetSection to
// represent the absence of a section.
NoSubsection = ""
)
// Section returns a existing section with the given name or creates a new one.
func (c *Config) Section(name string) *Section {
for i := len(c.Sections) - 1; i >= 0; i-- {
s := c.Sections[i]
if s.IsName(name) {
return s
}
}
s := &Section{Name: name}
c.Sections = append(c.Sections, s)
return s
}
// HasSection checks if the Config has a section with the specified name.
func (c *Config) HasSection(name string) bool {
for _, s := range c.Sections {
if s.IsName(name) {
return true
}
}
return false
}
// RemoveSection removes a section from a config file.
func (c *Config) RemoveSection(name string) *Config {
result := Sections{}
for _, s := range c.Sections {
if !s.IsName(name) {
result = append(result, s)
}
}
c.Sections = result
return c
}
// RemoveSubsection remove a subsection from a config file.
func (c *Config) RemoveSubsection(section string, subsection string) *Config {
for _, s := range c.Sections {
if s.IsName(section) {
result := Subsections{}
for _, ss := range s.Subsections {
if !ss.IsName(subsection) {
result = append(result, ss)
}
}
s.Subsections = result
}
}
return c
}
// AddOption adds an option to a given section and subsection. Use the
// NoSubsection constant for the subsection argument if no subsection is wanted.
func (c *Config) AddOption(section string, subsection string, key string, value string) *Config {
if subsection == "" {
c.Section(section).AddOption(key, value)
} else {
c.Section(section).Subsection(subsection).AddOption(key, value)
}
return c
}
// SetOption sets an option to a given section and subsection. Use the
// NoSubsection constant for the subsection argument if no subsection is wanted.
func (c *Config) SetOption(section string, subsection string, key string, value string) *Config {
if subsection == "" {
c.Section(section).SetOption(key, value)
} else {
c.Section(section).Subsection(subsection).SetOption(key, value)
}
return c
}
package config
import (
"io"
"github.com/go-git/gcfg"
)
// A Decoder reads and decodes config files from an input stream.
type Decoder struct {
io.Reader
}
// NewDecoder returns a new decoder that reads from r.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r}
}
// Decode reads the whole config from its input and stores it in the
// value pointed to by config.
func (d *Decoder) Decode(config *Config) error {
cb := func(s string, ss string, k string, v string, bv bool) error {
if ss == "" && k == "" {
config.Section(s)
return nil
}
if ss != "" && k == "" {
config.Section(s).Subsection(ss)
return nil
}
config.AddOption(s, ss, k, v)
return nil
}
return gcfg.ReadWithCallback(d, cb)
}