Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
  • master
  • 0.1.0
  • 0.1.1
3 results

Target

Select target project
  • oss/utilities/requirements-manager
1 result
Select Git revision
Show changes
Showing
with 2989 additions and 0 deletions
//go:build plan9
// +build plan9
package osfs
import (
"io"
"os"
"path/filepath"
"syscall"
)
func (f *file) Lock() error {
// Plan 9 uses a mode bit instead of explicit lock/unlock syscalls.
//
// Per http://man.cat-v.org/plan_9/5/stat: “Exclusive use files may be open
// for I/O by only one fid at a time across all clients of the server. If a
// second open is attempted, it draws an error.”
//
// There is no obvious way to implement this function using the exclusive use bit.
// See https://golang.org/src/cmd/go/internal/lockedfile/lockedfile_plan9.go
// for how file locking is done by the go tool on Plan 9.
return nil
}
func (f *file) Unlock() error {
return nil
}
func rename(from, to string) error {
// If from and to are in different directories, copy the file
// since Plan 9 does not support cross-directory rename.
if filepath.Dir(from) != filepath.Dir(to) {
fi, err := os.Stat(from)
if err != nil {
return &os.LinkError{"rename", from, to, err}
}
if fi.Mode().IsDir() {
return &os.LinkError{"rename", from, to, syscall.EISDIR}
}
fromFile, err := os.Open(from)
if err != nil {
return &os.LinkError{"rename", from, to, err}
}
toFile, err := os.OpenFile(to, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fi.Mode())
if err != nil {
return &os.LinkError{"rename", from, to, err}
}
_, err = io.Copy(toFile, fromFile)
if err != nil {
return &os.LinkError{"rename", from, to, err}
}
// Copy mtime and mode from original file.
// We need only one syscall if we avoid os.Chmod and os.Chtimes.
dir := fi.Sys().(*syscall.Dir)
var d syscall.Dir
d.Null()
d.Mtime = dir.Mtime
d.Mode = dir.Mode
if err = dirwstat(to, &d); err != nil {
return &os.LinkError{"rename", from, to, err}
}
// Remove original file.
err = os.Remove(from)
if err != nil {
return &os.LinkError{"rename", from, to, err}
}
return nil
}
return os.Rename(from, to)
}
func dirwstat(name string, d *syscall.Dir) error {
var buf [syscall.STATFIXLEN]byte
n, err := d.Marshal(buf[:])
if err != nil {
return &os.PathError{"dirwstat", name, err}
}
if err = syscall.Wstat(name, buf[:n]); err != nil {
return &os.PathError{"dirwstat", name, err}
}
return nil
}
func umask(new int) func() {
return func() {
}
}
//go:build !plan9 && !windows && !wasm
// +build !plan9,!windows,!wasm
package osfs
import (
"os"
"syscall"
"golang.org/x/sys/unix"
)
func (f *file) Lock() error {
f.m.Lock()
defer f.m.Unlock()
return unix.Flock(int(f.File.Fd()), unix.LOCK_EX)
}
func (f *file) Unlock() error {
f.m.Lock()
defer f.m.Unlock()
return unix.Flock(int(f.File.Fd()), unix.LOCK_UN)
}
func rename(from, to string) error {
return os.Rename(from, to)
}
// umask sets umask to a new value, and returns a func which allows the
// caller to reset it back to what it was originally.
func umask(new int) func() {
old := syscall.Umask(new)
return func() {
syscall.Umask(old)
}
}
//go:build wasip1
// +build wasip1
package osfs
import (
"os"
"syscall"
)
func (f *file) Lock() error {
f.m.Lock()
defer f.m.Unlock()
return nil
}
func (f *file) Unlock() error {
f.m.Lock()
defer f.m.Unlock()
return nil
}
func rename(from, to string) error {
return os.Rename(from, to)
}
// umask sets umask to a new value, and returns a func which allows the
// caller to reset it back to what it was originally.
func umask(new int) func() {
old := syscall.Umask(new)
return func() {
syscall.Umask(old)
}
}
//go:build windows
// +build windows
package osfs
import (
"os"
"runtime"
"unsafe"
"golang.org/x/sys/windows"
)
var (
kernel32DLL = windows.NewLazySystemDLL("kernel32.dll")
lockFileExProc = kernel32DLL.NewProc("LockFileEx")
unlockFileProc = kernel32DLL.NewProc("UnlockFile")
)
const (
lockfileExclusiveLock = 0x2
)
func (f *file) Lock() error {
f.m.Lock()
defer f.m.Unlock()
var overlapped windows.Overlapped
// err is always non-nil as per sys/windows semantics.
ret, _, err := lockFileExProc.Call(f.File.Fd(), lockfileExclusiveLock, 0, 0xFFFFFFFF, 0,
uintptr(unsafe.Pointer(&overlapped)))
runtime.KeepAlive(&overlapped)
if ret == 0 {
return err
}
return nil
}
func (f *file) Unlock() error {
f.m.Lock()
defer f.m.Unlock()
// err is always non-nil as per sys/windows semantics.
ret, _, err := unlockFileProc.Call(f.File.Fd(), 0, 0, 0xFFFFFFFF, 0)
if ret == 0 {
return err
}
return nil
}
func rename(from, to string) error {
return os.Rename(from, to)
}
func umask(new int) func() {
return func() {
}
}
package util
import (
"path/filepath"
"sort"
"strings"
"github.com/go-git/go-billy/v5"
)
// Glob returns the names of all files matching pattern or nil
// if there is no matching file. The syntax of patterns is the same
// as in Match. The pattern may describe hierarchical names such as
// /usr/*/bin/ed (assuming the Separator is '/').
//
// Glob ignores file system errors such as I/O errors reading directories.
// The only possible returned error is ErrBadPattern, when pattern
// is malformed.
//
// Function originally from https://golang.org/src/path/filepath/match_test.go
func Glob(fs billy.Filesystem, pattern string) (matches []string, err error) {
if !hasMeta(pattern) {
if _, err = fs.Lstat(pattern); err != nil {
return nil, nil
}
return []string{pattern}, nil
}
dir, file := filepath.Split(pattern)
// Prevent infinite recursion. See issue 15879.
if dir == pattern {
return nil, filepath.ErrBadPattern
}
var m []string
m, err = Glob(fs, cleanGlobPath(dir))
if err != nil {
return
}
for _, d := range m {
matches, err = glob(fs, d, file, matches)
if err != nil {
return
}
}
return
}
// cleanGlobPath prepares path for glob matching.
func cleanGlobPath(path string) string {
switch path {
case "":
return "."
case string(filepath.Separator):
// do nothing to the path
return path
default:
return path[0 : len(path)-1] // chop off trailing separator
}
}
// glob searches for files matching pattern in the directory dir
// and appends them to matches. If the directory cannot be
// opened, it returns the existing matches. New matches are
// added in lexicographical order.
func glob(fs billy.Filesystem, dir, pattern string, matches []string) (m []string, e error) {
m = matches
fi, err := fs.Stat(dir)
if err != nil {
return
}
if !fi.IsDir() {
return
}
names, _ := readdirnames(fs, dir)
sort.Strings(names)
for _, n := range names {
matched, err := filepath.Match(pattern, n)
if err != nil {
return m, err
}
if matched {
m = append(m, filepath.Join(dir, n))
}
}
return
}
// hasMeta reports whether path contains any of the magic characters
// recognized by Match.
func hasMeta(path string) bool {
// TODO(niemeyer): Should other magic characters be added here?
return strings.ContainsAny(path, "*?[")
}
func readdirnames(fs billy.Filesystem, dir string) ([]string, error) {
files, err := fs.ReadDir(dir)
if err != nil {
return nil, err
}
var names []string
for _, file := range files {
names = append(names, file.Name())
}
return names, nil
}
package util
import (
"errors"
"io"
"os"
"path/filepath"
"strconv"
"sync"
"time"
"github.com/go-git/go-billy/v5"
)
// RemoveAll removes path and any children it contains. It removes everything it
// can but returns the first error it encounters. If the path does not exist,
// RemoveAll returns nil (no error).
func RemoveAll(fs billy.Basic, path string) error {
fs, path = getUnderlyingAndPath(fs, path)
if r, ok := fs.(removerAll); ok {
return r.RemoveAll(path)
}
return removeAll(fs, path)
}
type removerAll interface {
RemoveAll(string) error
}
func removeAll(fs billy.Basic, path string) error {
// This implementation is adapted from os.RemoveAll.
// Simple case: if Remove works, we're done.
err := fs.Remove(path)
if err == nil || errors.Is(err, os.ErrNotExist) {
return nil
}
// Otherwise, is this a directory we need to recurse into?
dir, serr := fs.Stat(path)
if serr != nil {
if errors.Is(serr, os.ErrNotExist) {
return nil
}
return serr
}
if !dir.IsDir() {
// Not a directory; return the error from Remove.
return err
}
dirfs, ok := fs.(billy.Dir)
if !ok {
return billy.ErrNotSupported
}
// Directory.
fis, err := dirfs.ReadDir(path)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
// Race. It was deleted between the Lstat and Open.
// Return nil per RemoveAll's docs.
return nil
}
return err
}
// Remove contents & return first error.
err = nil
for _, fi := range fis {
cpath := fs.Join(path, fi.Name())
err1 := removeAll(fs, cpath)
if err == nil {
err = err1
}
}
// Remove directory.
err1 := fs.Remove(path)
if err1 == nil || errors.Is(err1, os.ErrNotExist) {
return nil
}
if err == nil {
err = err1
}
return err
}
// WriteFile writes data to a file named by filename in the given filesystem.
// If the file does not exist, WriteFile creates it with permissions perm;
// otherwise WriteFile truncates it before writing.
func WriteFile(fs billy.Basic, filename string, data []byte, perm os.FileMode) (err error) {
f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
return err
}
defer func() {
if f != nil {
err1 := f.Close()
if err == nil {
err = err1
}
}
}()
n, err := f.Write(data)
if err == nil && n < len(data) {
err = io.ErrShortWrite
}
return nil
}
// Random number state.
// We generate random temporary file names so that there's a good
// chance the file doesn't exist yet - keeps the number of tries in
// TempFile to a minimum.
var rand uint32
var randmu sync.Mutex
func reseed() uint32 {
return uint32(time.Now().UnixNano() + int64(os.Getpid()))
}
func nextSuffix() string {
randmu.Lock()
r := rand
if r == 0 {
r = reseed()
}
r = r*1664525 + 1013904223 // constants from Numerical Recipes
rand = r
randmu.Unlock()
return strconv.Itoa(int(1e9 + r%1e9))[1:]
}
// TempFile creates a new temporary file in the directory dir with a name
// beginning with prefix, opens the file for reading and writing, and returns
// the resulting *os.File. If dir is the empty string, TempFile uses the default
// directory for temporary files (see os.TempDir). Multiple programs calling
// TempFile simultaneously will not choose the same file. The caller can use
// f.Name() to find the pathname of the file. It is the caller's responsibility
// to remove the file when no longer needed.
func TempFile(fs billy.Basic, dir, prefix string) (f billy.File, err error) {
// This implementation is based on stdlib ioutil.TempFile.
if dir == "" {
dir = getTempDir(fs)
}
nconflict := 0
for i := 0; i < 10000; i++ {
name := filepath.Join(dir, prefix+nextSuffix())
f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
if errors.Is(err, os.ErrExist) {
if nconflict++; nconflict > 10 {
randmu.Lock()
rand = reseed()
randmu.Unlock()
}
continue
}
break
}
return
}
// TempDir creates a new temporary directory in the directory dir
// with a name beginning with prefix and returns the path of the
// new directory. If dir is the empty string, TempDir uses the
// default directory for temporary files (see os.TempDir).
// Multiple programs calling TempDir simultaneously
// will not choose the same directory. It is the caller's responsibility
// to remove the directory when no longer needed.
func TempDir(fs billy.Dir, dir, prefix string) (name string, err error) {
// This implementation is based on stdlib ioutil.TempDir
if dir == "" {
dir = getTempDir(fs.(billy.Basic))
}
nconflict := 0
for i := 0; i < 10000; i++ {
try := filepath.Join(dir, prefix+nextSuffix())
err = fs.MkdirAll(try, 0700)
if errors.Is(err, os.ErrExist) {
if nconflict++; nconflict > 10 {
randmu.Lock()
rand = reseed()
randmu.Unlock()
}
continue
}
if errors.Is(err, os.ErrNotExist) {
if _, err := os.Stat(dir); errors.Is(err, os.ErrNotExist) {
return "", err
}
}
if err == nil {
name = try
}
break
}
return
}
func getTempDir(fs billy.Basic) string {
ch, ok := fs.(billy.Chroot)
if !ok || ch.Root() == "" || ch.Root() == "/" || ch.Root() == string(filepath.Separator) {
return os.TempDir()
}
return ".tmp"
}
type underlying interface {
Underlying() billy.Basic
}
func getUnderlyingAndPath(fs billy.Basic, path string) (billy.Basic, string) {
u, ok := fs.(underlying)
if !ok {
return fs, path
}
if ch, ok := fs.(billy.Chroot); ok {
path = fs.Join(ch.Root(), path)
}
return u.Underlying(), path
}
// ReadFile reads the named file and returns the contents from the given filesystem.
// A successful call returns err == nil, not err == EOF.
// Because ReadFile reads the whole file, it does not treat an EOF from Read
// as an error to be reported.
func ReadFile(fs billy.Basic, name string) ([]byte, error) {
f, err := fs.Open(name)
if err != nil {
return nil, err
}
defer f.Close()
var size int
if info, err := fs.Stat(name); err == nil {
size64 := info.Size()
if int64(int(size64)) == size64 {
size = int(size64)
}
}
size++ // one byte for final read at EOF
// If a file claims a small size, read at least 512 bytes.
// In particular, files in Linux's /proc claim size 0 but
// then do not work right if read in small pieces,
// so an initial read of 1 byte would not work correctly.
if size < 512 {
size = 512
}
data := make([]byte, 0, size)
for {
if len(data) >= cap(data) {
d := append(data[:cap(data)], 0)
data = d[:len(data)]
}
n, err := f.Read(data[len(data):cap(data)])
data = data[:len(data)+n]
if err != nil {
if errors.Is(err, io.EOF) {
err = nil
}
return data, err
}
}
}
package util
import (
"os"
"path/filepath"
"github.com/go-git/go-billy/v5"
)
// walk recursively descends path, calling walkFn
// adapted from https://golang.org/src/path/filepath/path.go
func walk(fs billy.Filesystem, path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
if !info.IsDir() {
return walkFn(path, info, nil)
}
names, err := readdirnames(fs, path)
err1 := walkFn(path, info, err)
// If err != nil, walk can't walk into this directory.
// err1 != nil means walkFn want walk to skip this directory or stop walking.
// Therefore, if one of err and err1 isn't nil, walk will return.
if err != nil || err1 != nil {
// The caller's behavior is controlled by the return value, which is decided
// by walkFn. walkFn may ignore err and return nil.
// If walkFn returns SkipDir, it will be handled by the caller.
// So walk should return whatever walkFn returns.
return err1
}
for _, name := range names {
filename := filepath.Join(path, name)
fileInfo, err := fs.Lstat(filename)
if err != nil {
if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
return err
}
} else {
err = walk(fs, filename, fileInfo, walkFn)
if err != nil {
if !fileInfo.IsDir() || err != filepath.SkipDir {
return err
}
}
}
}
return nil
}
// Walk walks the file tree rooted at root, calling fn for each file or
// directory in the tree, including root. All errors that arise visiting files
// and directories are filtered by fn: see the WalkFunc documentation for
// details.
//
// The files are walked in lexical order, which makes the output deterministic
// but requires Walk to read an entire directory into memory before proceeding
// to walk that directory. Walk does not follow symbolic links.
//
// Function adapted from https://github.com/golang/go/blob/3b770f2ccb1fa6fecc22ea822a19447b10b70c5c/src/path/filepath/path.go#L500
func Walk(fs billy.Filesystem, root string, walkFn filepath.WalkFunc) error {
info, err := fs.Lstat(root)
if err != nil {
err = walkFn(root, nil, err)
} else {
err = walk(fs, root, info, walkFn)
}
if err == filepath.SkipDir {
return nil
}
return err
}
coverage.out
*~
coverage.txt
profile.out
.tmp/
.git-dist/
.vscode
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, gender identity and expression, level of experience,
education, socio-economic status, nationality, personal appearance, race,
religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at conduct@sourced.tech. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org
# Supported Features
Here is a non-comprehensive table of git commands and features and their
compatibility status with go-git.
## Getting and creating repositories
| Feature | Sub-feature | Status | Notes | Examples |
| ------- | ------------------------------------------------------------------------------------------------------------------ | ------ | ----- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `init` | | ✅ | | |
| `init` | `--bare` | ✅ | | |
| `init` | `--template` <br/> `--separate-git-dir` <br/> `--shared` | ❌ | | |
| `clone` | | ✅ | | - [PlainClone](_examples/clone/main.go) |
| `clone` | Authentication: <br/> - none <br/> - access token <br/> - username + password <br/> - ssh | ✅ | | - [clone ssh (private_key)](_examples/clone/auth/ssh/private_key/main.go) <br/> - [clone ssh (ssh_agent)](_examples/clone/auth/ssh/ssh_agent/main.go) <br/> - [clone access token](_examples/clone/auth/basic/access_token/main.go) <br/> - [clone user + password](_examples/clone/auth/basic/username_password/main.go) |
| `clone` | `--progress` <br/> `--single-branch` <br/> `--depth` <br/> `--origin` <br/> `--recurse-submodules` <br/>`--shared` | ✅ | | - [recurse submodules](_examples/clone/main.go) <br/> - [progress](_examples/progress/main.go) |
## Basic snapshotting
| Feature | Sub-feature | Status | Notes | Examples |
| -------- | ----------- | ------ | -------------------------------------------------------- | ------------------------------------ |
| `add` | | ✅ | Plain add is supported. Any other flags aren't supported | |
| `status` | | ✅ | | |
| `commit` | | ✅ | | - [commit](_examples/commit/main.go) |
| `reset` | | ✅ | | |
| `rm` | | ✅ | | |
| `mv` | | ✅ | | |
## Branching and merging
| Feature | Sub-feature | Status | Notes | Examples |
| ----------- | ----------- | ------------ | --------------------------------------- | ----------------------------------------------------------------------------------------------- |
| `branch` | | ✅ | | - [branch](_examples/branch/main.go) |
| `checkout` | | ✅ | Basic usages of checkout are supported. | - [checkout](_examples/checkout/main.go) |
| `merge` | | ⚠️ (partial) | Fast-forward only | |
| `mergetool` | | ❌ | | |
| `stash` | | ❌ | | |
| `sparse-checkout` | | ✅ | | - [sparse-checkout](_examples/sparse-checkout/main.go) |
| `tag` | | ✅ | | - [tag](_examples/tag/main.go) <br/> - [tag create and push](_examples/tag-create-push/main.go) |
## Sharing and updating projects
| Feature | Sub-feature | Status | Notes | Examples |
| ----------- | ----------- | ------ | ----------------------------------------------------------------------- | ------------------------------------------ |
| `fetch` | | ✅ | | |
| `pull` | | ✅ | Only supports merges where the merge can be resolved as a fast-forward. | - [pull](_examples/pull/main.go) |
| `push` | | ✅ | | - [push](_examples/push/main.go) |
| `remote` | | ✅ | | - [remotes](_examples/remotes/main.go) |
| `submodule` | | ✅ | | - [submodule](_examples/submodule/main.go) |
| `submodule` | deinit | ❌ | | |
## Inspection and comparison
| Feature | Sub-feature | Status | Notes | Examples |
| ---------- | ----------- | --------- | ----- | ------------------------------ |
| `show` | | ✅ | | |
| `log` | | ✅ | | - [log](_examples/log/main.go) |
| `shortlog` | | (see log) | | |
| `describe` | | ❌ | | |
## Patching
| Feature | Sub-feature | Status | Notes | Examples |
| ------------- | ----------- | ------ | ---------------------------------------------------- | -------- |
| `apply` | | ❌ | | |
| `cherry-pick` | | ❌ | | |
| `diff` | | ✅ | Patch object with UnifiedDiff output representation. | |
| `rebase` | | ❌ | | |
| `revert` | | ❌ | | |
## Debugging
| Feature | Sub-feature | Status | Notes | Examples |
| -------- | ----------- | ------ | ----- | ---------------------------------- |
| `bisect` | | ❌ | | |
| `blame` | | ✅ | | - [blame](_examples/blame/main.go) |
| `grep` | | ✅ | | |
## Email
| Feature | Sub-feature | Status | Notes | Examples |
| -------------- | ----------- | ------ | ----- | -------- |
| `am` | | ❌ | | |
| `apply` | | ❌ | | |
| `format-patch` | | ❌ | | |
| `send-email` | | ❌ | | |
| `request-pull` | | ❌ | | |
## External systems
| Feature | Sub-feature | Status | Notes | Examples |
| ------------- | ----------- | ------ | ----- | -------- |
| `svn` | | ❌ | | |
| `fast-import` | | ❌ | | |
| `lfs` | | ❌ | | |
## Administration
| Feature | Sub-feature | Status | Notes | Examples |
| --------------- | ----------- | ------ | ----- | -------- |
| `clean` | | ✅ | | |
| `gc` | | ❌ | | |
| `fsck` | | ❌ | | |
| `reflog` | | ❌ | | |
| `filter-branch` | | ❌ | | |
| `instaweb` | | ❌ | | |
| `archive` | | ❌ | | |
| `bundle` | | ❌ | | |
| `prune` | | ❌ | | |
| `repack` | | ❌ | | |
## Server admin
| Feature | Sub-feature | Status | Notes | Examples |
| -------------------- | ----------- | ------ | ----- | ----------------------------------------- |
| `daemon` | | ❌ | | |
| `update-server-info` | | ✅ | | [cli](./cli/go-git/update_server_info.go) |
## Advanced
| Feature | Sub-feature | Status | Notes | Examples |
| ---------- | ----------- | ----------- | ----- | -------- |
| `notes` | | ❌ | | |
| `replace` | | ❌ | | |
| `worktree` | | ❌ | | |
| `annotate` | | (see blame) | | |
## GPG
| Feature | Sub-feature | Status | Notes | Examples |
| ------------------- | ----------- | ------ | ----- | -------- |
| `git-verify-commit` | | ✅ | | |
| `git-verify-tag` | | ✅ | | |
## Plumbing commands
| Feature | Sub-feature | Status | Notes | Examples |
| --------------- | ------------------------------------- | ------------ | --------------------------------------------------- | -------------------------------------------- |
| `cat-file` | | ✅ | | |
| `check-ignore` | | ❌ | | |
| `commit-tree` | | ❌ | | |
| `count-objects` | | ❌ | | |
| `diff-index` | | ❌ | | |
| `for-each-ref` | | ✅ | | |
| `hash-object` | | ✅ | | |
| `ls-files` | | ✅ | | |
| `ls-remote` | | ✅ | | - [ls-remote](_examples/ls-remote/main.go) |
| `merge-base` | `--independent` <br/> `--is-ancestor` | ⚠️ (partial) | Calculates the merge-base only between two commits. | - [merge-base](_examples/merge_base/main.go) |
| `merge-base` | `--fork-point` <br/> `--octopus` | ❌ | | |
| `read-tree` | | ❌ | | |
| `rev-list` | | ✅ | | |
| `rev-parse` | | ❌ | | |
| `show-ref` | | ✅ | | |
| `symbolic-ref` | | ✅ | | |
| `update-index` | | ❌ | | |
| `update-ref` | | ❌ | | |
| `verify-pack` | | ❌ | | |
| `write-tree` | | ❌ | | |
## Indexes and Git Protocols
| Feature | Version | Status | Notes |
| -------------------- | ------------------------------------------------------------------------------- | ------ | ----- |
| index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | |
| index | [v2](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ✅ | |
| index | [v3](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | |
| pack-protocol | [v1](https://github.com/git/git/blob/master/Documentation/gitprotocol-pack.txt) | ✅ | |
| pack-protocol | [v2](https://github.com/git/git/blob/master/Documentation/gitprotocol-v2.txt) | ❌ | |
| multi-pack-index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
| pack-\*.rev files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
| pack-\*.mtimes files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
| cruft packs | | ❌ | |
## Capabilities
| Feature | Status | Notes |
| ------------------------------ | ------------ | ----- |
| `multi_ack` | ❌ | |
| `multi_ack_detailed` | ❌ | |
| `no-done` | ❌ | |
| `thin-pack` | ❌ | |
| `side-band` | ⚠️ (partial) | |
| `side-band-64k` | ⚠️ (partial) | |
| `ofs-delta` | ✅ | |
| `agent` | ✅ | |
| `object-format` | ❌ | |
| `symref` | ✅ | |
| `shallow` | ✅ | |
| `deepen-since` | ✅ | |
| `deepen-not` | ❌ | |
| `deepen-relative` | ❌ | |
| `no-progress` | ✅ | |
| `include-tag` | ✅ | |
| `report-status` | ✅ | |
| `report-status-v2` | ❌ | |
| `delete-refs` | ✅ | |
| `quiet` | ❌ | |
| `atomic` | ✅ | |
| `push-options` | ✅ | |
| `allow-tip-sha1-in-want` | ✅ | |
| `allow-reachable-sha1-in-want` | ❌ | |
| `push-cert=<nonce>` | ❌ | |
| `filter` | ❌ | |
| `session-id=<session id>` | ❌ | |
## Transport Schemes
| Scheme | Status | Notes | Examples |
| -------------------- | ------------ | ---------------------------------------------------------------------- | ---------------------------------------------- |
| `http(s)://` (dumb) | ❌ | | |
| `http(s)://` (smart) | ✅ | | |
| `git://` | ✅ | | |
| `ssh://` | ✅ | | |
| `file://` | ⚠️ (partial) | Warning: this is not pure Golang. This shells out to the `git` binary. | |
| Custom | ✅ | All existing schemes can be replaced by custom implementations. | - [custom_http](_examples/custom_http/main.go) |
## SHA256
| Feature | Sub-feature | Status | Notes | Examples |
| -------- | ----------- | ------ | ---------------------------------- | ------------------------------------ |
| `init` | | ✅ | Requires building with tag sha256. | - [init](_examples/sha256/main.go) |
| `commit` | | ✅ | Requires building with tag sha256. | - [commit](_examples/sha256/main.go) |
| `pull` | | ❌ | | |
| `fetch` | | ❌ | | |
| `push` | | ❌ | | |
## Other features
| Feature | Sub-feature | Status | Notes | Examples |
| --------------- | --------------------------- | ------ | ---------------------------------------------- | -------- |
| `config` | `--local` | ✅ | Read and write per-repository (`.git/config`). | |
| `config` | `--global` <br/> `--system` | ✅ | Read-only. | |
| `gitignore` | | ✅ | | |
| `gitattributes` | | ✅ | | |
| `git-worktree` | | ❌ | Multiple worktrees are not supported. | |
# Contributing Guidelines
source{d} go-git project is [Apache 2.0 licensed](LICENSE) and accepts
contributions via GitHub pull requests. This document outlines some of the
conventions on development workflow, commit message formatting, contact points,
and other resources to make it easier to get your contribution accepted.
## Support Channels
The official support channels, for both users and contributors, are:
- [StackOverflow go-git tag](https://stackoverflow.com/questions/tagged/go-git) for user questions.
- GitHub [Issues](https://github.com/src-d/go-git/issues)* for bug reports and feature requests.
*Before opening a new issue or submitting a new pull request, it's helpful to
search the project - it's likely that another user has already reported the
issue you're facing, or it's a known issue that we're already aware of.
## How to Contribute
Pull Requests (PRs) are the main and exclusive way to contribute to the official go-git project.
In order for a PR to be accepted it needs to pass a list of requirements:
- You should be able to run the same query using `git`. We don't accept features that are not implemented in the official git implementation.
- The expected behavior must match the [official git implementation](https://github.com/git/git).
- The actual behavior must be correctly explained with natural language and providing a minimum working example in Go that reproduces it.
- All PRs must be written in idiomatic Go, formatted according to [gofmt](https://golang.org/cmd/gofmt/), and without any warnings from [go lint](https://github.com/golang/lint) nor [go vet](https://golang.org/cmd/vet/).
- They should in general include tests, and those shall pass.
- If the PR is a bug fix, it has to include a suite of unit tests for the new functionality.
- If the PR is a new feature, it has to come with a suite of unit tests, that tests the new functionality.
- In any case, all the PRs have to pass the personal evaluation of at least one of the maintainers of go-git.
### Branches
The `master` branch is currently used for maintaining the `v5` major release only. The accepted changes would
be dependency bumps, bug fixes and small changes that aren't needed for `v6`. New development should target the
`v6-exp` branch, and if agreed with at least one go-git maintainer, it can be back ported to `v5` by creating
a new PR that targets `master`.
### Format of the commit message
Every commit message should describe what was changed, under which context and, if applicable, the GitHub issue it relates to:
```
plumbing: packp, Skip argument validations for unknown capabilities. Fixes #623
```
The format can be described more formally as follows:
```
<package>: <subpackage>, <what changed>. [Fixes #<issue-number>]
```
# Extending go-git
`go-git` was built in a highly extensible manner, which enables some of its functionalities to be changed or extended without the need of changing its codebase. Here are the key extensibility features:
## Dot Git Storers
Dot git storers are the components responsible for storing the Git internal files, including objects and references.
The built-in storer implementations include [memory](storage/memory) and [filesystem](storage/filesystem). The `memory` storer stores all the data in memory, and its use look like this:
```go
r, err := git.Init(memory.NewStorage(), nil)
```
The `filesystem` storer stores the data in the OS filesystem, and can be used as follows:
```go
r, err := git.Init(filesystem.NewStorage(osfs.New("/tmp/foo")), nil)
```
New implementations can be created by implementing the [storage.Storer interface](storage/storer.go#L16).
## Filesystem
Git repository worktrees are managed using a filesystem abstraction based on [go-billy](https://github.com/go-git/go-billy). The Git operations will take place against the specific filesystem implementation. Initialising a repository in Memory can be done as follows:
```go
fs := memfs.New()
r, err := git.Init(memory.NewStorage(), fs)
```
The same operation can be done against the OS filesystem:
```go
fs := osfs.New("/tmp/foo")
r, err := git.Init(memory.NewStorage(), fs)
```
New filesystems (e.g. cloud based storage) could be created by implementing `go-billy`'s [Filesystem interface](https://github.com/go-git/go-billy/blob/326c59f064021b821a55371d57794fbfb86d4cb3/fs.go#L52).
## Transport Schemes
Git supports various transport schemes, including `http`, `https`, `ssh`, `git`, `file`. `go-git` defines the [transport.Transport interface](plumbing/transport/common.go#L48) to represent them.
The built-in implementations can be replaced by calling `client.InstallProtocol`.
An example of changing the built-in `https` implementation to skip TLS could look like this:
```go
customClient := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
client.InstallProtocol("https", githttp.NewClient(customClient))
```
Some internal implementations enables code reuse amongst the different transport implementations. Some of these may be made public in the future (e.g. `plumbing/transport/internal/common`).
## Cache
Several different operations across `go-git` lean on caching of objects in order to achieve optimal performance. The caching functionality is defined by the [cache.Object interface](plumbing/cache/common.go#L17).
Two built-in implementations are `cache.ObjectLRU` and `cache.BufferLRU`. However, the caching functionality can be customized by implementing the interface `cache.Object` interface.
## Hash
`go-git` uses the `crypto.Hash` interface to represent hash functions. The built-in implementations are `github.com/pjbgf/sha1cd` for SHA1 and Go's `crypto/SHA256`.
The default hash functions can be changed by calling `hash.RegisterHash`.
```go
func init() {
hash.RegisterHash(crypto.SHA1, sha1.New)
}
```
New `SHA1` or `SHA256` hash functions that implement the `hash.RegisterHash` interface can be registered by calling `RegisterHash`.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2018 Sourced Technologies, S.L.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
# General
WORKDIR = $(PWD)
# Go parameters
GOCMD = go
GOTEST = $(GOCMD) test
# Git config
GIT_VERSION ?=
GIT_DIST_PATH ?= $(PWD)/.git-dist
GIT_REPOSITORY = http://github.com/git/git.git
# Coverage
COVERAGE_REPORT = coverage.out
COVERAGE_MODE = count
build-git:
@if [ -f $(GIT_DIST_PATH)/git ]; then \
echo "nothing to do, using cache $(GIT_DIST_PATH)"; \
else \
git clone $(GIT_REPOSITORY) -b $(GIT_VERSION) --depth 1 --single-branch $(GIT_DIST_PATH); \
cd $(GIT_DIST_PATH); \
make configure; \
./configure; \
make all; \
fi
test:
@echo "running against `git version`"; \
$(GOTEST) -race ./...
$(GOTEST) -v _examples/common_test.go _examples/common.go --examples
TEMP_REPO := $(shell mktemp)
test-sha256:
$(GOCMD) run -tags sha256 _examples/sha256/main.go $(TEMP_REPO)
cd $(TEMP_REPO) && git fsck
rm -rf $(TEMP_REPO)
test-coverage:
@echo "running against `git version`"; \
echo "" > $(COVERAGE_REPORT); \
$(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./...
clean:
rm -rf $(GIT_DIST_PATH)
fuzz:
@go test -fuzz=FuzzParser $(PWD)/internal/revision
@go test -fuzz=FuzzDecoder $(PWD)/plumbing/format/config
@go test -fuzz=FuzzPatchDelta $(PWD)/plumbing/format/packfile
@go test -fuzz=FuzzParseSignedBytes $(PWD)/plumbing/object
@go test -fuzz=FuzzDecode $(PWD)/plumbing/object
@go test -fuzz=FuzzDecoder $(PWD)/plumbing/protocol/packp
@go test -fuzz=FuzzNewEndpoint $(PWD)/plumbing/transport
![go-git logo](https://cdn.rawgit.com/src-d/artwork/02036484/go-git/files/go-git-github-readme-header.png)
[![GoDoc](https://godoc.org/github.com/go-git/go-git/v5?status.svg)](https://pkg.go.dev/github.com/go-git/go-git/v5) [![Build Status](https://github.com/go-git/go-git/workflows/Test/badge.svg)](https://github.com/go-git/go-git/actions) [![Go Report Card](https://goreportcard.com/badge/github.com/go-git/go-git)](https://goreportcard.com/report/github.com/go-git/go-git)
*go-git* is a highly extensible git implementation library written in **pure Go**.
It can be used to manipulate git repositories at low level *(plumbing)* or high level *(porcelain)*, through an idiomatic Go API. It also supports several types of storage, such as in-memory filesystems, or custom implementations, thanks to the [`Storer`](https://pkg.go.dev/github.com/go-git/go-git/v5/plumbing/storer) interface.
It's being actively developed since 2015 and is being used extensively by [Keybase](https://keybase.io/blog/encrypted-git-for-everyone), [Gitea](https://gitea.io/en-us/) or [Pulumi](https://github.com/search?q=org%3Apulumi+go-git&type=Code), and by many other libraries and tools.
Project Status
--------------
After the legal issues with the [`src-d`](https://github.com/src-d) organization, the lack of update for four months and the requirement to make a hard fork, the project is **now back to normality**.
The project is currently actively maintained by individual contributors, including several of the original authors, but also backed by a new company, [gitsight](https://github.com/gitsight), where `go-git` is a critical component used at scale.
Comparison with git
-------------------
*go-git* aims to be fully compatible with [git](https://github.com/git/git), all the *porcelain* operations are implemented to work exactly as *git* does.
*git* is a humongous project with years of development by thousands of contributors, making it challenging for *go-git* to implement all the features. You can find a comparison of *go-git* vs *git* in the [compatibility documentation](COMPATIBILITY.md).
Installation
------------
The recommended way to install *go-git* is:
```go
import "github.com/go-git/go-git/v5" // with go modules enabled (GO111MODULE=on or outside GOPATH)
import "github.com/go-git/go-git" // with go modules disabled
```
Examples
--------
> Please note that the `CheckIfError` and `Info` functions used in the examples are from the [examples package](https://github.com/go-git/go-git/blob/master/_examples/common.go#L19) just to be used in the examples.
### Basic example
A basic example that mimics the standard `git clone` command
```go
// Clone the given repository to the given directory
Info("git clone https://github.com/go-git/go-git")
_, err := git.PlainClone("/tmp/foo", false, &git.CloneOptions{
URL: "https://github.com/go-git/go-git",
Progress: os.Stdout,
})
CheckIfError(err)
```
Outputs:
```
Counting objects: 4924, done.
Compressing objects: 100% (1333/1333), done.
Total 4924 (delta 530), reused 6 (delta 6), pack-reused 3533
```
### In-memory example
Cloning a repository into memory and printing the history of HEAD, just like `git log` does
```go
// Clones the given repository in memory, creating the remote, the local
// branches and fetching the objects, exactly as:
Info("git clone https://github.com/go-git/go-billy")
r, err := git.Clone(memory.NewStorage(), nil, &git.CloneOptions{
URL: "https://github.com/go-git/go-billy",
})
CheckIfError(err)
// Gets the HEAD history from HEAD, just like this command:
Info("git log")
// ... retrieves the branch pointed by HEAD
ref, err := r.Head()
CheckIfError(err)
// ... retrieves the commit history
cIter, err := r.Log(&git.LogOptions{From: ref.Hash()})
CheckIfError(err)
// ... just iterates over the commits, printing it
err = cIter.ForEach(func(c *object.Commit) error {
fmt.Println(c)
return nil
})
CheckIfError(err)
```
Outputs:
```
commit ded8054fd0c3994453e9c8aacaf48d118d42991e
Author: Santiago M. Mola <santi@mola.io>
Date: Sat Nov 12 21:18:41 2016 +0100
index: ReadFrom/WriteTo returns IndexReadError/IndexWriteError. (#9)
commit df707095626f384ce2dc1a83b30f9a21d69b9dfc
Author: Santiago M. Mola <santi@mola.io>
Date: Fri Nov 11 13:23:22 2016 +0100
readwriter: fix bug when writing index. (#10)
When using ReadWriter on an existing siva file, absolute offset for
index entries was not being calculated correctly.
...
```
You can find this [example](_examples/log/main.go) and many others in the [examples](_examples) folder.
Contribute
----------
[Contributions](https://github.com/go-git/go-git/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) are more than welcome, if you are interested please take a look to
our [Contributing Guidelines](CONTRIBUTING.md).
License
-------
Apache License Version 2.0, see [LICENSE](LICENSE)
# go-git Security Policy
The purpose of this security policy is to outline `go-git`'s process
for reporting, handling and disclosing security sensitive information.
## Supported Versions
The project follows a version support policy where only the latest minor
release is actively supported. Therefore, only issues that impact the latest
minor release will be fixed. Users are encouraged to upgrade to the latest
minor/patch release to benefit from the most up-to-date features, bug fixes,
and security enhancements.​
The supported versions policy applies to both the `go-git` library and its
associated repositories within the `go-git` org.
## Reporting Security Issues
Please report any security vulnerabilities or potential weaknesses in `go-git`
privately via go-git-security@googlegroups.com. Do not publicly disclose the
details of the vulnerability until a fix has been implemented and released.
During the process the project maintainers will investigate the report, so please
provide detailed information, including steps to reproduce, affected versions, and any mitigations if known.
The project maintainers will acknowledge the receipt of the report and work with
the reporter to validate and address the issue.
Please note that `go-git` does not have any bounty programs, and therefore do
not provide financial compensation for disclosures.
## Security Disclosure Process
The project maintainers will make every effort to promptly address security issues.
Once a security vulnerability is fixed, a security advisory will be published to notify users and provide appropriate mitigation measures.
All `go-git` advisories can be found at https://github.com/go-git/go-git/security/advisories.
package git
import (
"bytes"
"container/heap"
"errors"
"fmt"
"io"
"strconv"
"time"
"unicode/utf8"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/go-git/go-git/v5/utils/diff"
"github.com/sergi/go-diff/diffmatchpatch"
)
// BlameResult represents the result of a Blame operation.
type BlameResult struct {
// Path is the path of the File that we're blaming.
Path string
// Rev (Revision) is the hash of the specified Commit used to generate this result.
Rev plumbing.Hash
// Lines contains every line with its authorship.
Lines []*Line
}
// Blame returns a BlameResult with the information about the last author of
// each line from file `path` at commit `c`.
func Blame(c *object.Commit, path string) (*BlameResult, error) {
// The file to blame is identified by the input arguments:
// commit and path. commit is a Commit object obtained from a Repository. Path
// represents a path to a specific file contained in the repository.
//
// Blaming a file is done by walking the tree in reverse order trying to find where each line was last modified.
//
// When a diff is found it cannot immediately assume it came from that commit, as it may have come from 1 of its
// parents, so it will first try to resolve those diffs from its parents, if it couldn't find the change in its
// parents then it will assign the change to itself.
//
// When encountering 2 parents that have made the same change to a file it will choose the parent that was merged
// into the current branch first (this is determined by the order of the parents inside the commit).
//
// This currently works on a line by line basis, if performance becomes an issue it could be changed to work with
// hunks rather than lines. Then when encountering diff hunks it would need to split them where necessary.
b := new(blame)
b.fRev = c
b.path = path
b.q = new(priorityQueue)
file, err := b.fRev.File(path)
if err != nil {
return nil, err
}
finalLines, err := file.Lines()
if err != nil {
return nil, err
}
finalLength := len(finalLines)
needsMap := make([]lineMap, finalLength)
for i := range needsMap {
needsMap[i] = lineMap{i, i, nil, -1}
}
contents, err := file.Contents()
if err != nil {
return nil, err
}
b.q.Push(&queueItem{
nil,
nil,
c,
path,
contents,
needsMap,
0,
false,
0,
})
items := make([]*queueItem, 0)
for {
items = items[:0]
for {
if b.q.Len() == 0 {
return nil, errors.New("invalid state: no items left on the blame queue")
}
item := b.q.Pop()
items = append(items, item)
next := b.q.Peek()
if next == nil || next.Hash != item.Commit.Hash {
break
}
}
finished, err := b.addBlames(items)
if err != nil {
return nil, err
}
if finished {
break
}
}
b.lineToCommit = make([]*object.Commit, finalLength)
for i := range needsMap {
b.lineToCommit[i] = needsMap[i].Commit
}
lines, err := newLines(finalLines, b.lineToCommit)
if err != nil {
return nil, err
}
return &BlameResult{
Path: path,
Rev: c.Hash,
Lines: lines,
}, nil
}
// Line values represent the contents and author of a line in BlamedResult values.
type Line struct {
// Author is the email address of the last author that modified the line.
Author string
// AuthorName is the name of the last author that modified the line.
AuthorName string
// Text is the original text of the line.
Text string
// Date is when the original text of the line was introduced
Date time.Time
// Hash is the commit hash that introduced the original line
Hash plumbing.Hash
}
func newLine(author, authorName, text string, date time.Time, hash plumbing.Hash) *Line {
return &Line{
Author: author,
AuthorName: authorName,
Text: text,
Hash: hash,
Date: date,
}
}
func newLines(contents []string, commits []*object.Commit) ([]*Line, error) {
result := make([]*Line, 0, len(contents))
for i := range contents {
result = append(result, newLine(
commits[i].Author.Email, commits[i].Author.Name, contents[i],
commits[i].Author.When, commits[i].Hash,
))
}
return result, nil
}
// this struct is internally used by the blame function to hold its
// inputs, outputs and state.
type blame struct {
// the path of the file to blame
path string
// the commit of the final revision of the file to blame
fRev *object.Commit
// resolved lines
lineToCommit []*object.Commit
// queue of commits that need resolving
q *priorityQueue
}
type lineMap struct {
Orig, Cur int
Commit *object.Commit
FromParentNo int
}
func (b *blame) addBlames(curItems []*queueItem) (bool, error) {
curItem := curItems[0]
// Simple optimisation to merge paths, there is potential to go a bit further here and check for any duplicates
// not only if they are all the same.
if len(curItems) == 1 {
curItems = nil
} else if curItem.IdenticalToChild {
allSame := true
lenCurItems := len(curItems)
lowestParentNo := curItem.ParentNo
for i := 1; i < lenCurItems; i++ {
if !curItems[i].IdenticalToChild || curItem.Child != curItems[i].Child {
allSame = false
break
}
lowestParentNo = min(lowestParentNo, curItems[i].ParentNo)
}
if allSame {
curItem.Child.numParentsNeedResolving = curItem.Child.numParentsNeedResolving - lenCurItems + 1
curItems = nil // free the memory
curItem.ParentNo = lowestParentNo
// Now check if we can remove the parent completely
for curItem.Child.IdenticalToChild && curItem.Child.MergedChildren == nil && curItem.Child.numParentsNeedResolving == 1 {
oldChild := curItem.Child
curItem.Child = oldChild.Child
curItem.ParentNo = oldChild.ParentNo
}
}
}
// if we have more than 1 item for this commit, create a single needsMap
if len(curItems) > 1 {
curItem.MergedChildren = make([]childToNeedsMap, len(curItems))
for i, c := range curItems {
curItem.MergedChildren[i] = childToNeedsMap{c.Child, c.NeedsMap, c.IdenticalToChild, c.ParentNo}
}
newNeedsMap := make([]lineMap, 0, len(curItem.NeedsMap))
newNeedsMap = append(newNeedsMap, curItems[0].NeedsMap...)
for i := 1; i < len(curItems); i++ {
cur := curItems[i].NeedsMap
n := 0 // position in newNeedsMap
c := 0 // position in current list
for c < len(cur) {
if n == len(newNeedsMap) {
newNeedsMap = append(newNeedsMap, cur[c:]...)
break
} else if newNeedsMap[n].Cur == cur[c].Cur {
n++
c++
} else if newNeedsMap[n].Cur < cur[c].Cur {
n++
} else {
newNeedsMap = append(newNeedsMap, cur[c])
newPos := len(newNeedsMap) - 1
for newPos > n {
newNeedsMap[newPos-1], newNeedsMap[newPos] = newNeedsMap[newPos], newNeedsMap[newPos-1]
newPos--
}
}
}
}
curItem.NeedsMap = newNeedsMap
curItem.IdenticalToChild = false
curItem.Child = nil
curItems = nil // free the memory
}
parents, err := parentsContainingPath(curItem.path, curItem.Commit)
if err != nil {
return false, err
}
anyPushed := false
for parnetNo, prev := range parents {
currentHash, err := blobHash(curItem.path, curItem.Commit)
if err != nil {
return false, err
}
prevHash, err := blobHash(prev.Path, prev.Commit)
if err != nil {
return false, err
}
if currentHash == prevHash {
if len(parents) == 1 && curItem.MergedChildren == nil && curItem.IdenticalToChild {
// commit that has 1 parent and 1 child and is the same as both, bypass it completely
b.q.Push(&queueItem{
Child: curItem.Child,
Commit: prev.Commit,
path: prev.Path,
Contents: curItem.Contents,
NeedsMap: curItem.NeedsMap, // reuse the NeedsMap as we are throwing away this item
IdenticalToChild: true,
ParentNo: curItem.ParentNo,
})
} else {
b.q.Push(&queueItem{
Child: curItem,
Commit: prev.Commit,
path: prev.Path,
Contents: curItem.Contents,
NeedsMap: append([]lineMap(nil), curItem.NeedsMap...), // create new slice and copy
IdenticalToChild: true,
ParentNo: parnetNo,
})
curItem.numParentsNeedResolving++
}
anyPushed = true
continue
}
// get the contents of the file
file, err := prev.Commit.File(prev.Path)
if err != nil {
return false, err
}
prevContents, err := file.Contents()
if err != nil {
return false, err
}
hunks := diff.Do(prevContents, curItem.Contents)
prevl := -1
curl := -1
need := 0
getFromParent := make([]lineMap, 0)
out:
for h := range hunks {
hLines := countLines(hunks[h].Text)
for hl := 0; hl < hLines; hl++ {
switch hunks[h].Type {
case diffmatchpatch.DiffEqual:
prevl++
curl++
if curl == curItem.NeedsMap[need].Cur {
// add to needs
getFromParent = append(getFromParent, lineMap{curl, prevl, nil, -1})
// move to next need
need++
if need >= len(curItem.NeedsMap) {
break out
}
}
case diffmatchpatch.DiffInsert:
curl++
if curl == curItem.NeedsMap[need].Cur {
// the line we want is added, it may have been added here (or by another parent), skip it for now
need++
if need >= len(curItem.NeedsMap) {
break out
}
}
case diffmatchpatch.DiffDelete:
prevl += hLines
continue out
default:
return false, errors.New("invalid state: invalid hunk Type")
}
}
}
if len(getFromParent) > 0 {
b.q.Push(&queueItem{
curItem,
nil,
prev.Commit,
prev.Path,
prevContents,
getFromParent,
0,
false,
parnetNo,
})
curItem.numParentsNeedResolving++
anyPushed = true
}
}
curItem.Contents = "" // no longer need, free the memory
if !anyPushed {
return finishNeeds(curItem)
}
return false, nil
}
func finishNeeds(curItem *queueItem) (bool, error) {
// any needs left in the needsMap must have come from this revision
for i := range curItem.NeedsMap {
if curItem.NeedsMap[i].Commit == nil {
curItem.NeedsMap[i].Commit = curItem.Commit
curItem.NeedsMap[i].FromParentNo = -1
}
}
if curItem.Child == nil && curItem.MergedChildren == nil {
return true, nil
}
if curItem.MergedChildren == nil {
return applyNeeds(curItem.Child, curItem.NeedsMap, curItem.IdenticalToChild, curItem.ParentNo)
}
for _, ctn := range curItem.MergedChildren {
m := 0 // position in merged needs map
p := 0 // position in parent needs map
for p < len(ctn.NeedsMap) {
if ctn.NeedsMap[p].Cur == curItem.NeedsMap[m].Cur {
ctn.NeedsMap[p].Commit = curItem.NeedsMap[m].Commit
m++
p++
} else if ctn.NeedsMap[p].Cur < curItem.NeedsMap[m].Cur {
p++
} else {
m++
}
}
finished, err := applyNeeds(ctn.Child, ctn.NeedsMap, ctn.IdenticalToChild, ctn.ParentNo)
if finished || err != nil {
return finished, err
}
}
return false, nil
}
func applyNeeds(child *queueItem, needsMap []lineMap, identicalToChild bool, parentNo int) (bool, error) {
if identicalToChild {
for i := range child.NeedsMap {
l := &child.NeedsMap[i]
if l.Cur != needsMap[i].Cur || l.Orig != needsMap[i].Orig {
return false, errors.New("needsMap isn't the same? Why not??")
}
if l.Commit == nil || parentNo < l.FromParentNo {
l.Commit = needsMap[i].Commit
l.FromParentNo = parentNo
}
}
} else {
i := 0
out:
for j := range child.NeedsMap {
l := &child.NeedsMap[j]
for needsMap[i].Orig < l.Cur {
i++
if i == len(needsMap) {
break out
}
}
if l.Cur == needsMap[i].Orig {
if l.Commit == nil || parentNo < l.FromParentNo {
l.Commit = needsMap[i].Commit
l.FromParentNo = parentNo
}
}
}
}
child.numParentsNeedResolving--
if child.numParentsNeedResolving == 0 {
finished, err := finishNeeds(child)
if finished || err != nil {
return finished, err
}
}
return false, nil
}
// String prints the results of a Blame using git-blame's style.
func (b BlameResult) String() string {
var buf bytes.Buffer
// max line number length
mlnl := len(strconv.Itoa(len(b.Lines)))
// max author length
mal := b.maxAuthorLength()
format := fmt.Sprintf("%%s (%%-%ds %%s %%%dd) %%s\n", mal, mlnl)
for ln := range b.Lines {
_, _ = fmt.Fprintf(&buf, format, b.Lines[ln].Hash.String()[:8],
b.Lines[ln].AuthorName, b.Lines[ln].Date.Format("2006-01-02 15:04:05 -0700"), ln+1, b.Lines[ln].Text)
}
return buf.String()
}
// utility function to calculate the number of runes needed
// to print the longest author name in the blame of a file.
func (b BlameResult) maxAuthorLength() int {
m := 0
for ln := range b.Lines {
m = max(m, utf8.RuneCountInString(b.Lines[ln].AuthorName))
}
return m
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
type childToNeedsMap struct {
Child *queueItem
NeedsMap []lineMap
IdenticalToChild bool
ParentNo int
}
type queueItem struct {
Child *queueItem
MergedChildren []childToNeedsMap
Commit *object.Commit
path string
Contents string
NeedsMap []lineMap
numParentsNeedResolving int
IdenticalToChild bool
ParentNo int
}
type priorityQueueImp []*queueItem
func (pq *priorityQueueImp) Len() int { return len(*pq) }
func (pq *priorityQueueImp) Less(i, j int) bool {
return !(*pq)[i].Commit.Less((*pq)[j].Commit)
}
func (pq *priorityQueueImp) Swap(i, j int) { (*pq)[i], (*pq)[j] = (*pq)[j], (*pq)[i] }
func (pq *priorityQueueImp) Push(x any) { *pq = append(*pq, x.(*queueItem)) }
func (pq *priorityQueueImp) Pop() any {
n := len(*pq)
ret := (*pq)[n-1]
(*pq)[n-1] = nil // ovoid memory leak
*pq = (*pq)[0 : n-1]
return ret
}
func (pq *priorityQueueImp) Peek() *object.Commit {
if len(*pq) == 0 {
return nil
}
return (*pq)[0].Commit
}
type priorityQueue priorityQueueImp
func (pq *priorityQueue) Init() { heap.Init((*priorityQueueImp)(pq)) }
func (pq *priorityQueue) Len() int { return (*priorityQueueImp)(pq).Len() }
func (pq *priorityQueue) Push(c *queueItem) {
heap.Push((*priorityQueueImp)(pq), c)
}
func (pq *priorityQueue) Pop() *queueItem {
return heap.Pop((*priorityQueueImp)(pq)).(*queueItem)
}
func (pq *priorityQueue) Peek() *object.Commit { return (*priorityQueueImp)(pq).Peek() }
type parentCommit struct {
Commit *object.Commit
Path string
}
func parentsContainingPath(path string, c *object.Commit) ([]parentCommit, error) {
// TODO: benchmark this method making git.object.Commit.parent public instead of using
// an iterator
var result []parentCommit
iter := c.Parents()
for {
parent, err := iter.Next()
if err == io.EOF {
return result, nil
}
if err != nil {
return nil, err
}
if _, err := parent.File(path); err == nil {
result = append(result, parentCommit{parent, path})
} else {
// look for renames
patch, err := parent.Patch(c)
if err != nil {
return nil, err
} else if patch != nil {
for _, fp := range patch.FilePatches() {
from, to := fp.Files()
if from != nil && to != nil && to.Path() == path {
result = append(result, parentCommit{parent, from.Path()})
break
}
}
}
}
}
}
func blobHash(path string, commit *object.Commit) (plumbing.Hash, error) {
file, err := commit.File(path)
if err != nil {
return plumbing.ZeroHash, err
}
return file.Hash, nil
}
package git
import "strings"
// countLines returns the number of lines in a string à la git, this is
// The newline character is assumed to be '\n'. The empty string
// contains 0 lines. If the last line of the string doesn't end with a
// newline, it will still be considered a line.
func countLines(s string) int {
if s == "" {
return 0
}
nEOL := strings.Count(s, "\n")
if strings.HasSuffix(s, "\n") {
return nEOL
}
return nEOL + 1
}
package config
import (
"errors"
"strings"
"github.com/go-git/go-git/v5/plumbing"
format "github.com/go-git/go-git/v5/plumbing/format/config"
)
var (
errBranchEmptyName = errors.New("branch config: empty name")
errBranchInvalidMerge = errors.New("branch config: invalid merge")
errBranchInvalidRebase = errors.New("branch config: rebase must be one of 'true' or 'interactive'")
)
// Branch contains information on the
// local branches and which remote to track
type Branch struct {
// Name of branch
Name string
// Remote name of remote to track
Remote string
// Merge is the local refspec for the branch
Merge plumbing.ReferenceName
// Rebase instead of merge when pulling. Valid values are
// "true" and "interactive". "false" is undocumented and
// typically represented by the non-existence of this field
Rebase string
// Description explains what the branch is for.
// Multi-line explanations may be used.
//
// Original git command to edit:
// git branch --edit-description
Description string
raw *format.Subsection
}
// Validate validates fields of branch
func (b *Branch) Validate() error {
if b.Name == "" {
return errBranchEmptyName
}
if b.Merge != "" && !b.Merge.IsBranch() {
return errBranchInvalidMerge
}
if b.Rebase != "" &&
b.Rebase != "true" &&
b.Rebase != "interactive" &&
b.Rebase != "false" {
return errBranchInvalidRebase
}
return plumbing.NewBranchReferenceName(b.Name).Validate()
}
func (b *Branch) marshal() *format.Subsection {
if b.raw == nil {
b.raw = &format.Subsection{}
}
b.raw.Name = b.Name
if b.Remote == "" {
b.raw.RemoveOption(remoteSection)
} else {
b.raw.SetOption(remoteSection, b.Remote)
}
if b.Merge == "" {
b.raw.RemoveOption(mergeKey)
} else {
b.raw.SetOption(mergeKey, string(b.Merge))
}
if b.Rebase == "" {
b.raw.RemoveOption(rebaseKey)
} else {
b.raw.SetOption(rebaseKey, b.Rebase)
}
if b.Description == "" {
b.raw.RemoveOption(descriptionKey)
} else {
desc := quoteDescription(b.Description)
b.raw.SetOption(descriptionKey, desc)
}
return b.raw
}
// hack to trigger conditional quoting in the
// plumbing/format/config/Encoder.encodeOptions
//
// Current Encoder implementation uses Go %q format if value contains a backslash character,
// which is not consistent with reference git implementation.
// git just replaces newline characters with \n, while Encoder prints them directly.
// Until value quoting fix, we should escape description value by replacing newline characters with \n.
func quoteDescription(desc string) string {
return strings.ReplaceAll(desc, "\n", `\n`)
}
func (b *Branch) unmarshal(s *format.Subsection) error {
b.raw = s
b.Name = b.raw.Name
b.Remote = b.raw.Options.Get(remoteSection)
b.Merge = plumbing.ReferenceName(b.raw.Options.Get(mergeKey))
b.Rebase = b.raw.Options.Get(rebaseKey)
b.Description = unquoteDescription(b.raw.Options.Get(descriptionKey))
return b.Validate()
}
// hack to enable conditional quoting in the
// plumbing/format/config/Encoder.encodeOptions
// goto quoteDescription for details.
func unquoteDescription(desc string) string {
return strings.ReplaceAll(desc, `\n`, "\n")
}
// Package config contains the abstraction of multiple config files
package config
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"github.com/go-git/go-billy/v5/osfs"
"github.com/go-git/go-git/v5/internal/url"
"github.com/go-git/go-git/v5/plumbing"
format "github.com/go-git/go-git/v5/plumbing/format/config"
)
const (
// DefaultFetchRefSpec is the default refspec used for fetch.
DefaultFetchRefSpec = "+refs/heads/*:refs/remotes/%s/*"
// DefaultPushRefSpec is the default refspec used for push.
DefaultPushRefSpec = "refs/heads/*:refs/heads/*"
)
// ConfigStorer generic storage of Config object
type ConfigStorer interface {
Config() (*Config, error)
SetConfig(*Config) error
}
var (
ErrInvalid = errors.New("config invalid key in remote or branch")
ErrRemoteConfigNotFound = errors.New("remote config not found")
ErrRemoteConfigEmptyURL = errors.New("remote config: empty URL")
ErrRemoteConfigEmptyName = errors.New("remote config: empty name")
)
// Scope defines the scope of a config file, such as local, global or system.
type Scope int
// Available ConfigScope's
const (
LocalScope Scope = iota
GlobalScope
SystemScope
)
// Config contains the repository configuration
// https://www.kernel.org/pub/software/scm/git/docs/git-config.html#FILES
type Config struct {
Core struct {
// IsBare if true this repository is assumed to be bare and has no
// working directory associated with it.
IsBare bool
// Worktree is the path to the root of the working tree.
Worktree string
// CommentChar is the character indicating the start of a
// comment for commands like commit and tag
CommentChar string
// RepositoryFormatVersion identifies the repository format and layout version.
RepositoryFormatVersion format.RepositoryFormatVersion
}
User struct {
// Name is the personal name of the author and the committer of a commit.
Name string
// Email is the email of the author and the committer of a commit.
Email string
}
Author struct {
// Name is the personal name of the author of a commit.
Name string
// Email is the email of the author of a commit.
Email string
}
Committer struct {
// Name is the personal name of the committer of a commit.
Name string
// Email is the email of the committer of a commit.
Email string
}
Pack struct {
// Window controls the size of the sliding window for delta
// compression. The default is 10. A value of 0 turns off
// delta compression entirely.
Window uint
}
Init struct {
// DefaultBranch Allows overriding the default branch name
// e.g. when initializing a new repository or when cloning
// an empty repository.
DefaultBranch string
}
Extensions struct {
// ObjectFormat specifies the hash algorithm to use. The
// acceptable values are sha1 and sha256. If not specified,
// sha1 is assumed. It is an error to specify this key unless
// core.repositoryFormatVersion is 1.
//
// This setting must not be changed after repository initialization
// (e.g. clone or init).
ObjectFormat format.ObjectFormat
}
// Remotes list of repository remotes, the key of the map is the name
// of the remote, should equal to RemoteConfig.Name.
Remotes map[string]*RemoteConfig
// Submodules list of repository submodules, the key of the map is the name
// of the submodule, should equal to Submodule.Name.
Submodules map[string]*Submodule
// Branches list of branches, the key is the branch name and should
// equal Branch.Name
Branches map[string]*Branch
// URLs list of url rewrite rules, if repo url starts with URL.InsteadOf value, it will be replaced with the
// key instead.
URLs map[string]*URL
// Raw contains the raw information of a config file. The main goal is
// preserve the parsed information from the original format, to avoid
// dropping unsupported fields.
Raw *format.Config
}
// NewConfig returns a new empty Config.
func NewConfig() *Config {
config := &Config{
Remotes: make(map[string]*RemoteConfig),
Submodules: make(map[string]*Submodule),
Branches: make(map[string]*Branch),
URLs: make(map[string]*URL),
Raw: format.New(),
}
config.Pack.Window = DefaultPackWindow
return config
}
// ReadConfig reads a config file from a io.Reader.
func ReadConfig(r io.Reader) (*Config, error) {
b, err := io.ReadAll(r)
if err != nil {
return nil, err
}
cfg := NewConfig()
if err = cfg.Unmarshal(b); err != nil {
return nil, err
}
return cfg, nil
}
// LoadConfig loads a config file from a given scope. The returned Config,
// contains exclusively information from the given scope. If it couldn't find a
// config file to the given scope, an empty one is returned.
func LoadConfig(scope Scope) (*Config, error) {
if scope == LocalScope {
return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer")
}
files, err := Paths(scope)
if err != nil {
return nil, err
}
for _, file := range files {
f, err := osfs.Default.Open(file)
if err != nil {
if os.IsNotExist(err) {
continue
}
return nil, err
}
defer f.Close()
return ReadConfig(f)
}
return NewConfig(), nil
}
// Paths returns the config file location for a given scope.
func Paths(scope Scope) ([]string, error) {
var files []string
switch scope {
case GlobalScope:
xdg := os.Getenv("XDG_CONFIG_HOME")
if xdg != "" {
files = append(files, filepath.Join(xdg, "git/config"))
}
home, err := os.UserHomeDir()
if err != nil {
return nil, err
}
files = append(files,
filepath.Join(home, ".gitconfig"),
filepath.Join(home, ".config/git/config"),
)
case SystemScope:
files = append(files, "/etc/gitconfig")
}
return files, nil
}
// Validate validates the fields and sets the default values.
func (c *Config) Validate() error {
for name, r := range c.Remotes {
if r.Name != name {
return ErrInvalid
}
if err := r.Validate(); err != nil {
return err
}
}
for name, b := range c.Branches {
if b.Name != name {
return ErrInvalid
}
if err := b.Validate(); err != nil {
return err
}
}
return nil
}
const (
remoteSection = "remote"
submoduleSection = "submodule"
branchSection = "branch"
coreSection = "core"
packSection = "pack"
userSection = "user"
authorSection = "author"
committerSection = "committer"
initSection = "init"
urlSection = "url"
extensionsSection = "extensions"
fetchKey = "fetch"
urlKey = "url"
pushurlKey = "pushurl"
bareKey = "bare"
worktreeKey = "worktree"
commentCharKey = "commentChar"
windowKey = "window"
mergeKey = "merge"
rebaseKey = "rebase"
nameKey = "name"
emailKey = "email"
descriptionKey = "description"
defaultBranchKey = "defaultBranch"
repositoryFormatVersionKey = "repositoryformatversion"
objectFormat = "objectformat"
mirrorKey = "mirror"
// DefaultPackWindow holds the number of previous objects used to
// generate deltas. The value 10 is the same used by git command.
DefaultPackWindow = uint(10)
)
// Unmarshal parses a git-config file and stores it.
func (c *Config) Unmarshal(b []byte) error {
r := bytes.NewBuffer(b)
d := format.NewDecoder(r)
c.Raw = format.New()
if err := d.Decode(c.Raw); err != nil {
return err
}
c.unmarshalCore()
c.unmarshalUser()
c.unmarshalInit()
if err := c.unmarshalPack(); err != nil {
return err
}
unmarshalSubmodules(c.Raw, c.Submodules)
if err := c.unmarshalBranches(); err != nil {
return err
}
if err := c.unmarshalURLs(); err != nil {
return err
}
return c.unmarshalRemotes()
}
func (c *Config) unmarshalCore() {
s := c.Raw.Section(coreSection)
if s.Options.Get(bareKey) == "true" {
c.Core.IsBare = true
}
c.Core.Worktree = s.Options.Get(worktreeKey)
c.Core.CommentChar = s.Options.Get(commentCharKey)
}
func (c *Config) unmarshalUser() {
s := c.Raw.Section(userSection)
c.User.Name = s.Options.Get(nameKey)
c.User.Email = s.Options.Get(emailKey)
s = c.Raw.Section(authorSection)
c.Author.Name = s.Options.Get(nameKey)
c.Author.Email = s.Options.Get(emailKey)
s = c.Raw.Section(committerSection)
c.Committer.Name = s.Options.Get(nameKey)
c.Committer.Email = s.Options.Get(emailKey)
}
func (c *Config) unmarshalPack() error {
s := c.Raw.Section(packSection)
window := s.Options.Get(windowKey)
if window == "" {
c.Pack.Window = DefaultPackWindow
} else {
winUint, err := strconv.ParseUint(window, 10, 32)
if err != nil {
return err
}
c.Pack.Window = uint(winUint)
}
return nil
}
func (c *Config) unmarshalRemotes() error {
s := c.Raw.Section(remoteSection)
for _, sub := range s.Subsections {
r := &RemoteConfig{}
if err := r.unmarshal(sub); err != nil {
return err
}
c.Remotes[r.Name] = r
}
// Apply insteadOf url rules
for _, r := range c.Remotes {
r.applyURLRules(c.URLs)
}
return nil
}
func (c *Config) unmarshalURLs() error {
s := c.Raw.Section(urlSection)
for _, sub := range s.Subsections {
r := &URL{}
if err := r.unmarshal(sub); err != nil {
return err
}
c.URLs[r.Name] = r
}
return nil
}
func unmarshalSubmodules(fc *format.Config, submodules map[string]*Submodule) {
s := fc.Section(submoduleSection)
for _, sub := range s.Subsections {
m := &Submodule{}
m.unmarshal(sub)
if m.Validate() == ErrModuleBadPath {
continue
}
submodules[m.Name] = m
}
}
func (c *Config) unmarshalBranches() error {
bs := c.Raw.Section(branchSection)
for _, sub := range bs.Subsections {
b := &Branch{}
if err := b.unmarshal(sub); err != nil {
return err
}
c.Branches[b.Name] = b
}
return nil
}
func (c *Config) unmarshalInit() {
s := c.Raw.Section(initSection)
c.Init.DefaultBranch = s.Options.Get(defaultBranchKey)
}
// Marshal returns Config encoded as a git-config file.
func (c *Config) Marshal() ([]byte, error) {
c.marshalCore()
c.marshalExtensions()
c.marshalUser()
c.marshalPack()
c.marshalRemotes()
c.marshalSubmodules()
c.marshalBranches()
c.marshalURLs()
c.marshalInit()
buf := bytes.NewBuffer(nil)
if err := format.NewEncoder(buf).Encode(c.Raw); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (c *Config) marshalCore() {
s := c.Raw.Section(coreSection)
s.SetOption(bareKey, fmt.Sprintf("%t", c.Core.IsBare))
if string(c.Core.RepositoryFormatVersion) != "" {
s.SetOption(repositoryFormatVersionKey, string(c.Core.RepositoryFormatVersion))
}
if c.Core.Worktree != "" {
s.SetOption(worktreeKey, c.Core.Worktree)
}
}
func (c *Config) marshalExtensions() {
// Extensions are only supported on Version 1, therefore
// ignore them otherwise.
if c.Core.RepositoryFormatVersion == format.Version_1 {
s := c.Raw.Section(extensionsSection)
s.SetOption(objectFormat, string(c.Extensions.ObjectFormat))
}
}
func (c *Config) marshalUser() {
s := c.Raw.Section(userSection)
if c.User.Name != "" {
s.SetOption(nameKey, c.User.Name)
}
if c.User.Email != "" {
s.SetOption(emailKey, c.User.Email)
}
s = c.Raw.Section(authorSection)
if c.Author.Name != "" {
s.SetOption(nameKey, c.Author.Name)
}
if c.Author.Email != "" {
s.SetOption(emailKey, c.Author.Email)
}
s = c.Raw.Section(committerSection)
if c.Committer.Name != "" {
s.SetOption(nameKey, c.Committer.Name)
}
if c.Committer.Email != "" {
s.SetOption(emailKey, c.Committer.Email)
}
}
func (c *Config) marshalPack() {
s := c.Raw.Section(packSection)
if c.Pack.Window != DefaultPackWindow {
s.SetOption(windowKey, fmt.Sprintf("%d", c.Pack.Window))
}
}
func (c *Config) marshalRemotes() {
s := c.Raw.Section(remoteSection)
newSubsections := make(format.Subsections, 0, len(c.Remotes))
added := make(map[string]bool)
for _, subsection := range s.Subsections {
if remote, ok := c.Remotes[subsection.Name]; ok {
newSubsections = append(newSubsections, remote.marshal())
added[subsection.Name] = true
}
}
remoteNames := make([]string, 0, len(c.Remotes))
for name := range c.Remotes {
remoteNames = append(remoteNames, name)
}
sort.Strings(remoteNames)
for _, name := range remoteNames {
if !added[name] {
newSubsections = append(newSubsections, c.Remotes[name].marshal())
}
}
s.Subsections = newSubsections
}
func (c *Config) marshalSubmodules() {
s := c.Raw.Section(submoduleSection)
s.Subsections = make(format.Subsections, len(c.Submodules))
var i int
for _, r := range c.Submodules {
section := r.marshal()
// the submodule section at config is a subset of the .gitmodule file
// we should remove the non-valid options for the config file.
section.RemoveOption(pathKey)
s.Subsections[i] = section
i++
}
}
func (c *Config) marshalBranches() {
s := c.Raw.Section(branchSection)
newSubsections := make(format.Subsections, 0, len(c.Branches))
added := make(map[string]bool)
for _, subsection := range s.Subsections {
if branch, ok := c.Branches[subsection.Name]; ok {
newSubsections = append(newSubsections, branch.marshal())
added[subsection.Name] = true
}
}
branchNames := make([]string, 0, len(c.Branches))
for name := range c.Branches {
branchNames = append(branchNames, name)
}
sort.Strings(branchNames)
for _, name := range branchNames {
if !added[name] {
newSubsections = append(newSubsections, c.Branches[name].marshal())
}
}
s.Subsections = newSubsections
}
func (c *Config) marshalURLs() {
s := c.Raw.Section(urlSection)
s.Subsections = make(format.Subsections, len(c.URLs))
var i int
for _, r := range c.URLs {
section := r.marshal()
// the submodule section at config is a subset of the .gitmodule file
// we should remove the non-valid options for the config file.
s.Subsections[i] = section
i++
}
}
func (c *Config) marshalInit() {
s := c.Raw.Section(initSection)
if c.Init.DefaultBranch != "" {
s.SetOption(defaultBranchKey, c.Init.DefaultBranch)
}
}
// RemoteConfig contains the configuration for a given remote repository.
type RemoteConfig struct {
// Name of the remote
Name string
// URLs the URLs of a remote repository. It must be non-empty. Fetch will
// always use the first URL, while push will use all of them.
URLs []string
// Mirror indicates that the repository is a mirror of remote.
Mirror bool
// insteadOfRulesApplied have urls been modified
insteadOfRulesApplied bool
// originalURLs are the urls before applying insteadOf rules
originalURLs []string
// Fetch the default set of "refspec" for fetch operation
Fetch []RefSpec
// raw representation of the subsection, filled by marshal or unmarshal are
// called
raw *format.Subsection
}
// Validate validates the fields and sets the default values.
func (c *RemoteConfig) Validate() error {
if c.Name == "" {
return ErrRemoteConfigEmptyName
}
if len(c.URLs) == 0 {
return ErrRemoteConfigEmptyURL
}
for _, r := range c.Fetch {
if err := r.Validate(); err != nil {
return err
}
}
if len(c.Fetch) == 0 {
c.Fetch = []RefSpec{RefSpec(fmt.Sprintf(DefaultFetchRefSpec, c.Name))}
}
return plumbing.NewRemoteHEADReferenceName(c.Name).Validate()
}
func (c *RemoteConfig) unmarshal(s *format.Subsection) error {
c.raw = s
fetch := []RefSpec{}
for _, f := range c.raw.Options.GetAll(fetchKey) {
rs := RefSpec(f)
if err := rs.Validate(); err != nil {
return err
}
fetch = append(fetch, rs)
}
c.Name = c.raw.Name
c.URLs = append([]string(nil), c.raw.Options.GetAll(urlKey)...)
c.URLs = append(c.URLs, c.raw.Options.GetAll(pushurlKey)...)
c.Fetch = fetch
c.Mirror = c.raw.Options.Get(mirrorKey) == "true"
return nil
}
func (c *RemoteConfig) marshal() *format.Subsection {
if c.raw == nil {
c.raw = &format.Subsection{}
}
c.raw.Name = c.Name
if len(c.URLs) == 0 {
c.raw.RemoveOption(urlKey)
} else {
urls := c.URLs
if c.insteadOfRulesApplied {
urls = c.originalURLs
}
c.raw.SetOption(urlKey, urls...)
}
if len(c.Fetch) == 0 {
c.raw.RemoveOption(fetchKey)
} else {
var values []string
for _, rs := range c.Fetch {
values = append(values, rs.String())
}
c.raw.SetOption(fetchKey, values...)
}
if c.Mirror {
c.raw.SetOption(mirrorKey, strconv.FormatBool(c.Mirror))
}
return c.raw
}
func (c *RemoteConfig) IsFirstURLLocal() bool {
return url.IsLocalEndpoint(c.URLs[0])
}
func (c *RemoteConfig) applyURLRules(urlRules map[string]*URL) {
// save original urls
originalURLs := make([]string, len(c.URLs))
copy(originalURLs, c.URLs)
for i, url := range c.URLs {
if matchingURLRule := findLongestInsteadOfMatch(url, urlRules); matchingURLRule != nil {
c.URLs[i] = matchingURLRule.ApplyInsteadOf(c.URLs[i])
c.insteadOfRulesApplied = true
}
}
if c.insteadOfRulesApplied {
c.originalURLs = originalURLs
}
}