mirror of https://github.com/go-gitea/gitea.git
Change use of Walk to WalkDir to improve disk performance (#22462)
As suggest by Go developers, use `filepath.WalkDir` instead of `filepath.Walk` because [*Walk is less efficient than WalkDir, introduced in Go 1.16, which avoids calling `os.Lstat` on every file or directory visited](https://pkg.go.dev/path/filepath#Walk). This proposition address that, in a similar way as https://github.com/go-gitea/gitea/pull/22392 did. Co-authored-by: zeripath <art27@cantab.net> Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
This commit is contained in:
parent
da274380a7
commit
04c97aa364
|
@ -32,11 +32,15 @@ func needsUpdate(dir, filename string) (bool, []byte) {
|
|||
|
||||
hasher := sha1.New()
|
||||
|
||||
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
err = filepath.WalkDir(dir, func(path string, d os.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, _ = hasher.Write([]byte(info.Name()))
|
||||
info, err := d.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, _ = hasher.Write([]byte(d.Name()))
|
||||
_, _ = hasher.Write([]byte(info.ModTime().String()))
|
||||
_, _ = hasher.Write([]byte(strconv.FormatInt(info.Size(), 16)))
|
||||
return nil
|
||||
|
|
|
@ -225,14 +225,24 @@ func compressOldLogFile(fname string, compressionLevel int) error {
|
|||
|
||||
func (log *FileLogger) deleteOldLog() {
|
||||
dir := filepath.Dir(log.Filename)
|
||||
_ = filepath.Walk(dir, func(path string, info os.FileInfo, err error) (returnErr error) {
|
||||
_ = filepath.WalkDir(dir, func(path string, d os.DirEntry, err error) (returnErr error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
returnErr = fmt.Errorf("Unable to delete old log '%s', error: %+v", path, r)
|
||||
}
|
||||
}()
|
||||
|
||||
if !info.IsDir() && info.ModTime().Unix() < (time.Now().Unix()-60*60*24*log.Maxdays) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
info, err := d.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.ModTime().Unix() < (time.Now().Unix() - 60*60*24*log.Maxdays) {
|
||||
if strings.HasPrefix(filepath.Base(path), filepath.Base(log.Filename)) {
|
||||
if err := util.Remove(path); err != nil {
|
||||
returnErr = fmt.Errorf("Failed to remove %s: %w", path, err)
|
||||
|
|
|
@ -173,12 +173,12 @@ func generateRepoCommit(ctx context.Context, repo, templateRepo, generateRepo *r
|
|||
// Avoid walking tree if there are no globs
|
||||
if len(gt.Globs()) > 0 {
|
||||
tmpDirSlash := strings.TrimSuffix(filepath.ToSlash(tmpDir), "/") + "/"
|
||||
if err := filepath.Walk(tmpDirSlash, func(path string, info os.FileInfo, walkErr error) error {
|
||||
if err := filepath.WalkDir(tmpDirSlash, func(path string, d os.DirEntry, walkErr error) error {
|
||||
if walkErr != nil {
|
||||
return walkErr
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -129,7 +129,7 @@ func (l *LocalStorage) URL(path, name string) (*url.URL, error) {
|
|||
|
||||
// IterateObjects iterates across the objects in the local storage
|
||||
func (l *LocalStorage) IterateObjects(fn func(path string, obj Object) error) error {
|
||||
return filepath.Walk(l.dir, func(path string, info os.FileInfo, err error) error {
|
||||
return filepath.WalkDir(l.dir, func(path string, d os.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -141,7 +141,7 @@ func (l *LocalStorage) IterateObjects(fn func(path string, obj Object) error) er
|
|||
if path == l.dir {
|
||||
return nil
|
||||
}
|
||||
if info.IsDir() {
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
relPath, err := filepath.Rel(l.dir, path)
|
||||
|
|
|
@ -280,17 +280,17 @@ func Repos(ctx *context.Context) {
|
|||
repos := map[string]*repo_model.Repository{}
|
||||
// We're going to iterate by pagesize.
|
||||
root := user_model.UserPath(ctxUser.Name)
|
||||
if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
|
||||
if err := filepath.WalkDir(root, func(path string, d os.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() || path == root {
|
||||
if !d.IsDir() || path == root {
|
||||
return nil
|
||||
}
|
||||
name := info.Name()
|
||||
name := d.Name()
|
||||
if !strings.HasSuffix(name, ".git") {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
@ -304,7 +304,7 @@ func Repos(ctx *context.Context) {
|
|||
count++
|
||||
return filepath.SkipDir
|
||||
}); err != nil {
|
||||
ctx.ServerError("filepath.Walk", err)
|
||||
ctx.ServerError("filepath.WalkDir", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -303,14 +303,16 @@ func ListUnadoptedRepositories(query string, opts *db.ListOptions) ([]string, in
|
|||
|
||||
// We're going to iterate by pagesize.
|
||||
root := filepath.Clean(setting.RepoRootPath)
|
||||
if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
|
||||
if err := filepath.WalkDir(root, func(path string, d os.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() || path == root {
|
||||
if !d.IsDir() || path == root {
|
||||
return nil
|
||||
}
|
||||
|
||||
name := d.Name()
|
||||
|
||||
if !strings.ContainsRune(path[len(root)+1:], filepath.Separator) {
|
||||
// Got a new user
|
||||
if err = checkUnadoptedRepositories(userName, repoNamesToCheck, unadopted); err != nil {
|
||||
|
@ -318,16 +320,14 @@ func ListUnadoptedRepositories(query string, opts *db.ListOptions) ([]string, in
|
|||
}
|
||||
repoNamesToCheck = repoNamesToCheck[:0]
|
||||
|
||||
if !globUser.Match(info.Name()) {
|
||||
if !globUser.Match(name) {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
userName = info.Name()
|
||||
userName = name
|
||||
return nil
|
||||
}
|
||||
|
||||
name := info.Name()
|
||||
|
||||
if !strings.HasSuffix(name, ".git") {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue