mirror of https://github.com/go-gitea/gitea.git
migrations: add test for importing pull requests in gitea uploader (#18752)
* logs: add the buffer logger to inspect logs during testing Signed-off-by: Loïc Dachary <loic@dachary.org> * migrations: add test for importing pull requests in gitea uploader Signed-off-by: Loïc Dachary <loic@dachary.org> * for each git.OpenRepositoryCtx, call Close * Content is expected to return the content of the log * test for errors before defer Co-authored-by: Loïc Dachary <loic@dachary.org> Co-authored-by: zeripath <art27@cantab.net> Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
This commit is contained in:
parent
e4ef61ee0f
commit
49cab2b01f
|
@ -181,6 +181,11 @@ func (log *TestLogger) Init(config string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Content returns the content accumulated in the content provider
|
||||||
|
func (log *TestLogger) Content() (string, error) {
|
||||||
|
return "", fmt.Errorf("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
// Flush when log should be flushed
|
// Flush when log should be flushed
|
||||||
func (log *TestLogger) Flush() {
|
func (log *TestLogger) Flush() {
|
||||||
}
|
}
|
||||||
|
|
|
@ -166,6 +166,11 @@ func (log *TestLogger) Init(config string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Content returns the content accumulated in the content provider
|
||||||
|
func (log *TestLogger) Content() (string, error) {
|
||||||
|
return "", fmt.Errorf("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
// Flush when log should be flushed
|
// Flush when log should be flushed
|
||||||
func (log *TestLogger) Flush() {
|
func (log *TestLogger) Flush() {
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,72 @@
|
||||||
|
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type bufferWriteCloser struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
buffer bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bufferWriteCloser) Write(p []byte) (int, error) {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
return b.buffer.Write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bufferWriteCloser) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bufferWriteCloser) String() string {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
return b.buffer.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// BufferLogger implements LoggerProvider and writes messages in a buffer.
|
||||||
|
type BufferLogger struct {
|
||||||
|
WriterLogger
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBufferLogger create BufferLogger returning as LoggerProvider.
|
||||||
|
func NewBufferLogger() LoggerProvider {
|
||||||
|
log := &BufferLogger{}
|
||||||
|
log.NewWriterLogger(&bufferWriteCloser{})
|
||||||
|
return log
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init inits connection writer
|
||||||
|
func (log *BufferLogger) Init(string) error {
|
||||||
|
log.NewWriterLogger(log.out)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Content returns the content accumulated in the content provider
|
||||||
|
func (log *BufferLogger) Content() (string, error) {
|
||||||
|
return log.out.(*bufferWriteCloser).String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush when log should be flushed
|
||||||
|
func (log *BufferLogger) Flush() {
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReleaseReopen does nothing
|
||||||
|
func (log *BufferLogger) ReleaseReopen() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetName returns the default name for this implementation
|
||||||
|
func (log *BufferLogger) GetName() string {
|
||||||
|
return "buffer"
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
Register("buffer", NewBufferLogger)
|
||||||
|
}
|
|
@ -0,0 +1,64 @@
|
||||||
|
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBufferLogger(t *testing.T) {
|
||||||
|
logger := NewBufferLogger()
|
||||||
|
bufferLogger := logger.(*BufferLogger)
|
||||||
|
assert.NotNil(t, bufferLogger)
|
||||||
|
|
||||||
|
err := logger.Init("")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
location, _ := time.LoadLocation("EST")
|
||||||
|
date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
|
||||||
|
|
||||||
|
msg := "TEST MSG"
|
||||||
|
event := Event{
|
||||||
|
level: INFO,
|
||||||
|
msg: msg,
|
||||||
|
caller: "CALLER",
|
||||||
|
filename: "FULL/FILENAME",
|
||||||
|
line: 1,
|
||||||
|
time: date,
|
||||||
|
}
|
||||||
|
logger.LogEvent(&event)
|
||||||
|
content, err := bufferLogger.Content()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Contains(t, content, msg)
|
||||||
|
logger.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBufferLoggerContent(t *testing.T) {
|
||||||
|
level := INFO
|
||||||
|
logger := NewLogger(0, "console", "console", fmt.Sprintf(`{"level":"%s"}`, level.String()))
|
||||||
|
|
||||||
|
logger.SetLogger("buffer", "buffer", "{}")
|
||||||
|
defer logger.DelLogger("buffer")
|
||||||
|
|
||||||
|
msg := "A UNIQUE MESSAGE"
|
||||||
|
Error(msg)
|
||||||
|
|
||||||
|
found := false
|
||||||
|
for i := 0; i < 30000; i++ {
|
||||||
|
content, err := logger.GetLoggerProviderContent("buffer")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
if strings.Contains(content, msg) {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
time.Sleep(1 * time.Millisecond)
|
||||||
|
}
|
||||||
|
assert.True(t, found)
|
||||||
|
}
|
|
@ -119,6 +119,11 @@ func (log *ConnLogger) Init(jsonconfig string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Content returns the content accumulated in the content provider
|
||||||
|
func (log *ConnLogger) Content() (string, error) {
|
||||||
|
return "", fmt.Errorf("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
// Flush does nothing for this implementation
|
// Flush does nothing for this implementation
|
||||||
func (log *ConnLogger) Flush() {
|
func (log *ConnLogger) Flush() {
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,6 +66,11 @@ func (log *ConsoleLogger) Init(config string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Content returns the content accumulated in the content provider
|
||||||
|
func (log *ConsoleLogger) Content() (string, error) {
|
||||||
|
return "", fmt.Errorf("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
// Flush when log should be flushed
|
// Flush when log should be flushed
|
||||||
func (log *ConsoleLogger) Flush() {
|
func (log *ConsoleLogger) Flush() {
|
||||||
}
|
}
|
||||||
|
|
|
@ -216,6 +216,12 @@ func (m *MultiChannelledLog) GetEventLogger(name string) EventLogger {
|
||||||
return m.loggers[name]
|
return m.loggers[name]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetEventProvider returns a sub logger provider content from this MultiChannelledLog
|
||||||
|
func (m *MultiChannelledLog) GetLoggerProviderContent(name string) (string, error) {
|
||||||
|
channelledLogger := m.GetEventLogger(name).(*ChannelledLog)
|
||||||
|
return channelledLogger.loggerProvider.Content()
|
||||||
|
}
|
||||||
|
|
||||||
// GetEventLoggerNames returns a list of names
|
// GetEventLoggerNames returns a list of names
|
||||||
func (m *MultiChannelledLog) GetEventLoggerNames() []string {
|
func (m *MultiChannelledLog) GetEventLoggerNames() []string {
|
||||||
m.rwmutex.RLock()
|
m.rwmutex.RLock()
|
||||||
|
|
|
@ -243,6 +243,15 @@ func (log *FileLogger) deleteOldLog() {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Content returns the content accumulated in the content provider
|
||||||
|
func (log *FileLogger) Content() (string, error) {
|
||||||
|
b, err := os.ReadFile(log.Filename)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return string(b), nil
|
||||||
|
}
|
||||||
|
|
||||||
// Flush flush file logger.
|
// Flush flush file logger.
|
||||||
// there are no buffering messages in file logger in memory.
|
// there are no buffering messages in file logger in memory.
|
||||||
// flush file means sync file from disk.
|
// flush file means sync file from disk.
|
||||||
|
|
|
@ -7,6 +7,7 @@ package log
|
||||||
// LoggerProvider represents behaviors of a logger provider.
|
// LoggerProvider represents behaviors of a logger provider.
|
||||||
type LoggerProvider interface {
|
type LoggerProvider interface {
|
||||||
Init(config string) error
|
Init(config string) error
|
||||||
|
Content() (string, error)
|
||||||
EventLogger
|
EventLogger
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -95,6 +95,11 @@ func (log *SMTPLogger) sendMail(p []byte) (int, error) {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Content returns the content accumulated in the content provider
|
||||||
|
func (log *SMTPLogger) Content() (string, error) {
|
||||||
|
return "", fmt.Errorf("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
// Flush when log should be flushed
|
// Flush when log should be flushed
|
||||||
func (log *SMTPLogger) Flush() {
|
func (log *SMTPLogger) Flush() {
|
||||||
}
|
}
|
||||||
|
|
|
@ -484,19 +484,9 @@ func (g *GiteaLocalUploader) CreatePullRequests(prs ...*base.PullRequest) error
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *GiteaLocalUploader) newPullRequest(pr *base.PullRequest) (*models.PullRequest, error) {
|
func (g *GiteaLocalUploader) updateGitForPullRequest(pr *base.PullRequest) (head string, err error) {
|
||||||
var labels []*models.Label
|
|
||||||
for _, label := range pr.Labels {
|
|
||||||
lb, ok := g.labels[label.Name]
|
|
||||||
if ok {
|
|
||||||
labels = append(labels, lb)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
milestoneID := g.milestones[pr.Milestone]
|
|
||||||
|
|
||||||
// download patch file
|
// download patch file
|
||||||
err := func() error {
|
err = func() error {
|
||||||
if pr.PatchURL == "" {
|
if pr.PatchURL == "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -519,25 +509,25 @@ func (g *GiteaLocalUploader) newPullRequest(pr *base.PullRequest) (*models.PullR
|
||||||
return err
|
return err
|
||||||
}()
|
}()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
// set head information
|
// set head information
|
||||||
pullHead := filepath.Join(g.repo.RepoPath(), "refs", "pull", fmt.Sprintf("%d", pr.Number))
|
pullHead := filepath.Join(g.repo.RepoPath(), "refs", "pull", fmt.Sprintf("%d", pr.Number))
|
||||||
if err := os.MkdirAll(pullHead, os.ModePerm); err != nil {
|
if err := os.MkdirAll(pullHead, os.ModePerm); err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
p, err := os.Create(filepath.Join(pullHead, "head"))
|
p, err := os.Create(filepath.Join(pullHead, "head"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
_, err = p.WriteString(pr.Head.SHA)
|
_, err = p.WriteString(pr.Head.SHA)
|
||||||
p.Close()
|
p.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
head := "unknown repository"
|
head = "unknown repository"
|
||||||
if pr.IsForkPullRequest() && pr.State != "closed" {
|
if pr.IsForkPullRequest() && pr.State != "closed" {
|
||||||
if pr.Head.OwnerName != "" {
|
if pr.Head.OwnerName != "" {
|
||||||
remote := pr.Head.OwnerName
|
remote := pr.Head.OwnerName
|
||||||
|
@ -560,16 +550,16 @@ func (g *GiteaLocalUploader) newPullRequest(pr *base.PullRequest) (*models.PullR
|
||||||
} else {
|
} else {
|
||||||
headBranch := filepath.Join(g.repo.RepoPath(), "refs", "heads", pr.Head.OwnerName, pr.Head.Ref)
|
headBranch := filepath.Join(g.repo.RepoPath(), "refs", "heads", pr.Head.OwnerName, pr.Head.Ref)
|
||||||
if err := os.MkdirAll(filepath.Dir(headBranch), os.ModePerm); err != nil {
|
if err := os.MkdirAll(filepath.Dir(headBranch), os.ModePerm); err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
b, err := os.Create(headBranch)
|
b, err := os.Create(headBranch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
_, err = b.WriteString(pr.Head.SHA)
|
_, err = b.WriteString(pr.Head.SHA)
|
||||||
b.Close()
|
b.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
head = pr.Head.OwnerName + "/" + pr.Head.Ref
|
head = pr.Head.OwnerName + "/" + pr.Head.Ref
|
||||||
}
|
}
|
||||||
|
@ -595,6 +585,25 @@ func (g *GiteaLocalUploader) newPullRequest(pr *base.PullRequest) (*models.PullR
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return head, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *GiteaLocalUploader) newPullRequest(pr *base.PullRequest) (*models.PullRequest, error) {
|
||||||
|
var labels []*models.Label
|
||||||
|
for _, label := range pr.Labels {
|
||||||
|
lb, ok := g.labels[label.Name]
|
||||||
|
if ok {
|
||||||
|
labels = append(labels, lb)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
milestoneID := g.milestones[pr.Milestone]
|
||||||
|
|
||||||
|
head, err := g.updateGitForPullRequest(pr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("updateGitForPullRequest: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
if pr.Created.IsZero() {
|
if pr.Created.IsZero() {
|
||||||
if pr.Closed != nil {
|
if pr.Closed != nil {
|
||||||
pr.Created = *pr.Closed
|
pr.Created = *pr.Closed
|
||||||
|
|
|
@ -7,7 +7,11 @@ package migrations
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -16,7 +20,9 @@ import (
|
||||||
repo_model "code.gitea.io/gitea/models/repo"
|
repo_model "code.gitea.io/gitea/models/repo"
|
||||||
"code.gitea.io/gitea/models/unittest"
|
"code.gitea.io/gitea/models/unittest"
|
||||||
user_model "code.gitea.io/gitea/models/user"
|
user_model "code.gitea.io/gitea/models/user"
|
||||||
|
"code.gitea.io/gitea/modules/git"
|
||||||
"code.gitea.io/gitea/modules/graceful"
|
"code.gitea.io/gitea/modules/graceful"
|
||||||
|
"code.gitea.io/gitea/modules/log"
|
||||||
base "code.gitea.io/gitea/modules/migration"
|
base "code.gitea.io/gitea/modules/migration"
|
||||||
"code.gitea.io/gitea/modules/structs"
|
"code.gitea.io/gitea/modules/structs"
|
||||||
"code.gitea.io/gitea/modules/util"
|
"code.gitea.io/gitea/modules/util"
|
||||||
|
@ -217,3 +223,303 @@ func TestGiteaUploadRemapExternalUser(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.EqualValues(t, linkedUser.ID, target.GetUserID())
|
assert.EqualValues(t, linkedUser.ID, target.GetUserID())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGiteaUploadUpdateGitForPullRequest(t *testing.T) {
|
||||||
|
unittest.PrepareTestEnv(t)
|
||||||
|
|
||||||
|
//
|
||||||
|
// fromRepo master
|
||||||
|
//
|
||||||
|
fromRepo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1}).(*repo_model.Repository)
|
||||||
|
baseRef := "master"
|
||||||
|
assert.NoError(t, git.InitRepository(git.DefaultContext, fromRepo.RepoPath(), false))
|
||||||
|
_, err := git.NewCommand(git.DefaultContext, "symbolic-ref", "HEAD", git.BranchPrefix+baseRef).RunInDir(fromRepo.RepoPath())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NoError(t, os.WriteFile(filepath.Join(fromRepo.RepoPath(), "README.md"), []byte(fmt.Sprintf("# Testing Repository\n\nOriginally created in: %s", fromRepo.RepoPath())), 0o644))
|
||||||
|
assert.NoError(t, git.AddChanges(fromRepo.RepoPath(), true))
|
||||||
|
signature := git.Signature{
|
||||||
|
Email: "test@example.com",
|
||||||
|
Name: "test",
|
||||||
|
When: time.Now(),
|
||||||
|
}
|
||||||
|
assert.NoError(t, git.CommitChanges(fromRepo.RepoPath(), git.CommitChangesOptions{
|
||||||
|
Committer: &signature,
|
||||||
|
Author: &signature,
|
||||||
|
Message: "Initial Commit",
|
||||||
|
}))
|
||||||
|
fromGitRepo, err := git.OpenRepositoryCtx(git.DefaultContext, fromRepo.RepoPath())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
defer fromGitRepo.Close()
|
||||||
|
baseSHA, err := fromGitRepo.GetBranchCommitID(baseRef)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
//
|
||||||
|
// fromRepo branch1
|
||||||
|
//
|
||||||
|
headRef := "branch1"
|
||||||
|
_, err = git.NewCommand(git.DefaultContext, "checkout", "-b", headRef).RunInDir(fromRepo.RepoPath())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NoError(t, os.WriteFile(filepath.Join(fromRepo.RepoPath(), "README.md"), []byte("SOMETHING"), 0o644))
|
||||||
|
assert.NoError(t, git.AddChanges(fromRepo.RepoPath(), true))
|
||||||
|
signature.When = time.Now()
|
||||||
|
assert.NoError(t, git.CommitChanges(fromRepo.RepoPath(), git.CommitChangesOptions{
|
||||||
|
Committer: &signature,
|
||||||
|
Author: &signature,
|
||||||
|
Message: "Pull request",
|
||||||
|
}))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
headSHA, err := fromGitRepo.GetBranchCommitID(headRef)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
fromRepoOwner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: fromRepo.OwnerID}).(*user_model.User)
|
||||||
|
|
||||||
|
//
|
||||||
|
// forkRepo branch2
|
||||||
|
//
|
||||||
|
forkHeadRef := "branch2"
|
||||||
|
forkRepo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 8}).(*repo_model.Repository)
|
||||||
|
assert.NoError(t, git.CloneWithArgs(git.DefaultContext, fromRepo.RepoPath(), forkRepo.RepoPath(), []string{}, git.CloneRepoOptions{
|
||||||
|
Branch: headRef,
|
||||||
|
}))
|
||||||
|
_, err = git.NewCommand(git.DefaultContext, "checkout", "-b", forkHeadRef).RunInDir(forkRepo.RepoPath())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NoError(t, os.WriteFile(filepath.Join(forkRepo.RepoPath(), "README.md"), []byte(fmt.Sprintf("# branch2 %s", forkRepo.RepoPath())), 0o644))
|
||||||
|
assert.NoError(t, git.AddChanges(forkRepo.RepoPath(), true))
|
||||||
|
assert.NoError(t, git.CommitChanges(forkRepo.RepoPath(), git.CommitChangesOptions{
|
||||||
|
Committer: &signature,
|
||||||
|
Author: &signature,
|
||||||
|
Message: "branch2 commit",
|
||||||
|
}))
|
||||||
|
forkGitRepo, err := git.OpenRepositoryCtx(git.DefaultContext, forkRepo.RepoPath())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
defer forkGitRepo.Close()
|
||||||
|
forkHeadSHA, err := forkGitRepo.GetBranchCommitID(forkHeadRef)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
toRepoName := "migrated"
|
||||||
|
uploader := NewGiteaLocalUploader(context.Background(), fromRepoOwner, fromRepoOwner.Name, toRepoName)
|
||||||
|
uploader.gitServiceType = structs.GiteaService
|
||||||
|
assert.NoError(t, uploader.CreateRepo(&base.Repository{
|
||||||
|
Description: "description",
|
||||||
|
OriginalURL: fromRepo.RepoPath(),
|
||||||
|
CloneURL: fromRepo.RepoPath(),
|
||||||
|
IsPrivate: false,
|
||||||
|
IsMirror: true,
|
||||||
|
}, base.MigrateOptions{
|
||||||
|
GitServiceType: structs.GiteaService,
|
||||||
|
Private: false,
|
||||||
|
Mirror: true,
|
||||||
|
}))
|
||||||
|
|
||||||
|
for _, testCase := range []struct {
|
||||||
|
name string
|
||||||
|
head string
|
||||||
|
assertContent func(t *testing.T, content string)
|
||||||
|
pr base.PullRequest
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "fork, good Head.SHA",
|
||||||
|
head: fmt.Sprintf("%s/%s", forkRepo.OwnerName, forkHeadRef),
|
||||||
|
pr: base.PullRequest{
|
||||||
|
PatchURL: "",
|
||||||
|
Number: 1,
|
||||||
|
State: "open",
|
||||||
|
Base: base.PullRequestBranch{
|
||||||
|
CloneURL: fromRepo.RepoPath(),
|
||||||
|
Ref: baseRef,
|
||||||
|
SHA: baseSHA,
|
||||||
|
RepoName: fromRepo.Name,
|
||||||
|
OwnerName: fromRepo.OwnerName,
|
||||||
|
},
|
||||||
|
Head: base.PullRequestBranch{
|
||||||
|
CloneURL: forkRepo.RepoPath(),
|
||||||
|
Ref: forkHeadRef,
|
||||||
|
SHA: forkHeadSHA,
|
||||||
|
RepoName: forkRepo.Name,
|
||||||
|
OwnerName: forkRepo.OwnerName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fork, invalid Head.Ref",
|
||||||
|
head: "unknown repository",
|
||||||
|
pr: base.PullRequest{
|
||||||
|
PatchURL: "",
|
||||||
|
Number: 1,
|
||||||
|
State: "open",
|
||||||
|
Base: base.PullRequestBranch{
|
||||||
|
CloneURL: fromRepo.RepoPath(),
|
||||||
|
Ref: baseRef,
|
||||||
|
SHA: baseSHA,
|
||||||
|
RepoName: fromRepo.Name,
|
||||||
|
OwnerName: fromRepo.OwnerName,
|
||||||
|
},
|
||||||
|
Head: base.PullRequestBranch{
|
||||||
|
CloneURL: forkRepo.RepoPath(),
|
||||||
|
Ref: "INVALID",
|
||||||
|
SHA: forkHeadSHA,
|
||||||
|
RepoName: forkRepo.Name,
|
||||||
|
OwnerName: forkRepo.OwnerName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
assertContent: func(t *testing.T, content string) {
|
||||||
|
assert.Contains(t, content, "Fetch branch from")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid fork CloneURL",
|
||||||
|
head: "unknown repository",
|
||||||
|
pr: base.PullRequest{
|
||||||
|
PatchURL: "",
|
||||||
|
Number: 1,
|
||||||
|
State: "open",
|
||||||
|
Base: base.PullRequestBranch{
|
||||||
|
CloneURL: fromRepo.RepoPath(),
|
||||||
|
Ref: baseRef,
|
||||||
|
SHA: baseSHA,
|
||||||
|
RepoName: fromRepo.Name,
|
||||||
|
OwnerName: fromRepo.OwnerName,
|
||||||
|
},
|
||||||
|
Head: base.PullRequestBranch{
|
||||||
|
CloneURL: "UNLIKELY",
|
||||||
|
Ref: forkHeadRef,
|
||||||
|
SHA: forkHeadSHA,
|
||||||
|
RepoName: forkRepo.Name,
|
||||||
|
OwnerName: "WRONG",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
assertContent: func(t *testing.T, content string) {
|
||||||
|
assert.Contains(t, content, "AddRemote failed")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no fork, good Head.SHA",
|
||||||
|
head: headRef,
|
||||||
|
pr: base.PullRequest{
|
||||||
|
PatchURL: "",
|
||||||
|
Number: 1,
|
||||||
|
State: "open",
|
||||||
|
Base: base.PullRequestBranch{
|
||||||
|
CloneURL: fromRepo.RepoPath(),
|
||||||
|
Ref: baseRef,
|
||||||
|
SHA: baseSHA,
|
||||||
|
RepoName: fromRepo.Name,
|
||||||
|
OwnerName: fromRepo.OwnerName,
|
||||||
|
},
|
||||||
|
Head: base.PullRequestBranch{
|
||||||
|
CloneURL: fromRepo.RepoPath(),
|
||||||
|
Ref: headRef,
|
||||||
|
SHA: headSHA,
|
||||||
|
RepoName: fromRepo.Name,
|
||||||
|
OwnerName: fromRepo.OwnerName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no fork, empty Head.SHA",
|
||||||
|
head: headRef,
|
||||||
|
pr: base.PullRequest{
|
||||||
|
PatchURL: "",
|
||||||
|
Number: 1,
|
||||||
|
State: "open",
|
||||||
|
Base: base.PullRequestBranch{
|
||||||
|
CloneURL: fromRepo.RepoPath(),
|
||||||
|
Ref: baseRef,
|
||||||
|
SHA: baseSHA,
|
||||||
|
RepoName: fromRepo.Name,
|
||||||
|
OwnerName: fromRepo.OwnerName,
|
||||||
|
},
|
||||||
|
Head: base.PullRequestBranch{
|
||||||
|
CloneURL: fromRepo.RepoPath(),
|
||||||
|
Ref: headRef,
|
||||||
|
SHA: "",
|
||||||
|
RepoName: fromRepo.Name,
|
||||||
|
OwnerName: fromRepo.OwnerName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
assertContent: func(t *testing.T, content string) {
|
||||||
|
assert.Contains(t, content, "Empty reference, removing")
|
||||||
|
assert.NotContains(t, content, "Cannot remove local head")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no fork, invalid Head.SHA",
|
||||||
|
head: headRef,
|
||||||
|
pr: base.PullRequest{
|
||||||
|
PatchURL: "",
|
||||||
|
Number: 1,
|
||||||
|
State: "open",
|
||||||
|
Base: base.PullRequestBranch{
|
||||||
|
CloneURL: fromRepo.RepoPath(),
|
||||||
|
Ref: baseRef,
|
||||||
|
SHA: baseSHA,
|
||||||
|
RepoName: fromRepo.Name,
|
||||||
|
OwnerName: fromRepo.OwnerName,
|
||||||
|
},
|
||||||
|
Head: base.PullRequestBranch{
|
||||||
|
CloneURL: fromRepo.RepoPath(),
|
||||||
|
Ref: headRef,
|
||||||
|
SHA: "brokenSHA",
|
||||||
|
RepoName: fromRepo.Name,
|
||||||
|
OwnerName: fromRepo.OwnerName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
assertContent: func(t *testing.T, content string) {
|
||||||
|
assert.Contains(t, content, "Deprecated local head")
|
||||||
|
assert.Contains(t, content, "Cannot remove local head")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no fork, not found Head.SHA",
|
||||||
|
head: headRef,
|
||||||
|
pr: base.PullRequest{
|
||||||
|
PatchURL: "",
|
||||||
|
Number: 1,
|
||||||
|
State: "open",
|
||||||
|
Base: base.PullRequestBranch{
|
||||||
|
CloneURL: fromRepo.RepoPath(),
|
||||||
|
Ref: baseRef,
|
||||||
|
SHA: baseSHA,
|
||||||
|
RepoName: fromRepo.Name,
|
||||||
|
OwnerName: fromRepo.OwnerName,
|
||||||
|
},
|
||||||
|
Head: base.PullRequestBranch{
|
||||||
|
CloneURL: fromRepo.RepoPath(),
|
||||||
|
Ref: headRef,
|
||||||
|
SHA: "2697b352310fcd01cbd1f3dbd43b894080027f68",
|
||||||
|
RepoName: fromRepo.Name,
|
||||||
|
OwnerName: fromRepo.OwnerName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
assertContent: func(t *testing.T, content string) {
|
||||||
|
assert.Contains(t, content, "Deprecated local head")
|
||||||
|
assert.NotContains(t, content, "Cannot remove local head")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(testCase.name, func(t *testing.T) {
|
||||||
|
logger, ok := log.NamedLoggers.Load(log.DEFAULT)
|
||||||
|
assert.True(t, ok)
|
||||||
|
logger.SetLogger("buffer", "buffer", "{}")
|
||||||
|
defer logger.DelLogger("buffer")
|
||||||
|
|
||||||
|
head, err := uploader.updateGitForPullRequest(&testCase.pr)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.EqualValues(t, testCase.head, head)
|
||||||
|
if testCase.assertContent != nil {
|
||||||
|
fence := fmt.Sprintf(">>>>>>>>>>>>>FENCE %s<<<<<<<<<<<<<<<", testCase.name)
|
||||||
|
log.Error(fence)
|
||||||
|
var content string
|
||||||
|
for i := 0; i < 5000; i++ {
|
||||||
|
content, err = logger.GetLoggerProviderContent("buffer")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
if strings.Contains(content, fence) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
time.Sleep(1 * time.Millisecond)
|
||||||
|
}
|
||||||
|
testCase.assertContent(t, content)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue