2019-11-21 11:32:02 -07:00
|
|
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
2022-11-27 11:20:29 -07:00
|
|
|
// SPDX-License-Identifier: MIT
|
2019-11-21 11:32:02 -07:00
|
|
|
|
2021-08-24 10:47:09 -06:00
|
|
|
//go:build !windows
|
|
|
|
|
2019-11-21 11:32:02 -07:00
|
|
|
package graceful
|
|
|
|
|
|
|
|
import (
|
2019-11-30 07:40:22 -07:00
|
|
|
"context"
|
2019-11-21 11:32:02 -07:00
|
|
|
"errors"
|
|
|
|
"os"
|
|
|
|
"os/signal"
|
2022-03-25 06:47:12 -06:00
|
|
|
"runtime/pprof"
|
2023-05-15 16:20:30 -06:00
|
|
|
"strconv"
|
2019-11-21 11:32:02 -07:00
|
|
|
"sync"
|
|
|
|
"syscall"
|
|
|
|
"time"
|
|
|
|
|
Rewrite logger system (#24726)
## ⚠️ Breaking
The `log.<mode>.<logger>` style config has been dropped. If you used it,
please check the new config manual & app.example.ini to make your
instance output logs as expected.
Although many legacy options still work, it's encouraged to upgrade to
the new options.
The SMTP logger is deleted because SMTP is not suitable to collect logs.
If you have manually configured Gitea log options, please confirm the
logger system works as expected after upgrading.
## Description
Close #12082 and maybe more log-related issues, resolve some related
FIXMEs in old code (which seems unfixable before)
Just like rewriting queue #24505 : make code maintainable, clear legacy
bugs, and add the ability to support more writers (eg: JSON, structured
log)
There is a new document (with examples): `logging-config.en-us.md`
This PR is safer than the queue rewriting, because it's just for
logging, it won't break other logic.
## The old problems
The logging system is quite old and difficult to maintain:
* Unclear concepts: Logger, NamedLogger, MultiChannelledLogger,
SubLogger, EventLogger, WriterLogger etc
* Some code is diffuclt to konw whether it is right:
`log.DelNamedLogger("console")` vs `log.DelNamedLogger(log.DEFAULT)` vs
`log.DelLogger("console")`
* The old system heavily depends on ini config system, it's difficult to
create new logger for different purpose, and it's very fragile.
* The "color" trick is difficult to use and read, many colors are
unnecessary, and in the future structured log could help
* It's difficult to add other log formats, eg: JSON format
* The log outputer doesn't have full control of its goroutine, it's
difficult to make outputer have advanced behaviors
* The logs could be lost in some cases: eg: no Fatal error when using
CLI.
* Config options are passed by JSON, which is quite fragile.
* INI package makes the KEY in `[log]` section visible in `[log.sub1]`
and `[log.sub1.subA]`, this behavior is quite fragile and would cause
more unclear problems, and there is no strong requirement to support
`log.<mode>.<logger>` syntax.
## The new design
See `logger.go` for documents.
## Screenshot
<details>
![image](https://github.com/go-gitea/gitea/assets/2114189/4462d713-ba39-41f5-bb08-de912e67e1ff)
![image](https://github.com/go-gitea/gitea/assets/2114189/b188035e-f691-428b-8b2d-ff7b2199b2f9)
![image](https://github.com/go-gitea/gitea/assets/2114189/132e9745-1c3b-4e00-9e0d-15eaea495dee)
</details>
## TODO
* [x] add some new tests
* [x] fix some tests
* [x] test some sub-commands (manually ....)
---------
Co-authored-by: Jason Song <i@wolfogre.com>
Co-authored-by: delvh <dev.lh@web.de>
Co-authored-by: Giteabot <teabot@gitea.io>
2023-05-21 16:35:11 -06:00
|
|
|
"code.gitea.io/gitea/modules/graceful/releasereopen"
|
2019-11-21 11:32:02 -07:00
|
|
|
"code.gitea.io/gitea/modules/log"
|
2022-03-31 11:01:43 -06:00
|
|
|
"code.gitea.io/gitea/modules/process"
|
2019-11-21 11:32:02 -07:00
|
|
|
"code.gitea.io/gitea/modules/setting"
|
|
|
|
)
|
|
|
|
|
2019-12-15 02:51:28 -07:00
|
|
|
// Manager manages the graceful shutdown process
|
|
|
|
type Manager struct {
|
2019-11-21 11:32:02 -07:00
|
|
|
isChild bool
|
|
|
|
forked bool
|
|
|
|
lock *sync.RWMutex
|
|
|
|
state state
|
2021-05-15 08:22:26 -06:00
|
|
|
shutdownCtx context.Context
|
|
|
|
hammerCtx context.Context
|
|
|
|
terminateCtx context.Context
|
2022-03-25 06:47:12 -06:00
|
|
|
managerCtx context.Context
|
2021-05-15 08:22:26 -06:00
|
|
|
shutdownCtxCancel context.CancelFunc
|
|
|
|
hammerCtxCancel context.CancelFunc
|
|
|
|
terminateCtxCancel context.CancelFunc
|
2022-03-25 06:47:12 -06:00
|
|
|
managerCtxCancel context.CancelFunc
|
2019-11-21 11:32:02 -07:00
|
|
|
runningServerWaitGroup sync.WaitGroup
|
|
|
|
createServerWaitGroup sync.WaitGroup
|
|
|
|
terminateWaitGroup sync.WaitGroup
|
2021-05-15 08:22:26 -06:00
|
|
|
|
|
|
|
toRunAtShutdown []func()
|
|
|
|
toRunAtTerminate []func()
|
2019-11-21 11:32:02 -07:00
|
|
|
}
|
|
|
|
|
2019-12-15 02:51:28 -07:00
|
|
|
func newGracefulManager(ctx context.Context) *Manager {
|
|
|
|
manager := &Manager{
|
2023-05-15 16:20:30 -06:00
|
|
|
isChild: len(os.Getenv(listenFDsEnv)) > 0 && os.Getppid() > 1,
|
2019-11-21 11:32:02 -07:00
|
|
|
lock: &sync.RWMutex{},
|
|
|
|
}
|
|
|
|
manager.createServerWaitGroup.Add(numberOfServersToCreate)
|
2019-12-15 02:51:28 -07:00
|
|
|
manager.start(ctx)
|
2019-11-21 11:32:02 -07:00
|
|
|
return manager
|
|
|
|
}
|
|
|
|
|
2023-05-15 16:20:30 -06:00
|
|
|
type systemdNotifyMsg string
|
|
|
|
|
|
|
|
const (
|
|
|
|
readyMsg systemdNotifyMsg = "READY=1"
|
|
|
|
stoppingMsg systemdNotifyMsg = "STOPPING=1"
|
|
|
|
reloadingMsg systemdNotifyMsg = "RELOADING=1"
|
|
|
|
watchdogMsg systemdNotifyMsg = "WATCHDOG=1"
|
|
|
|
)
|
|
|
|
|
|
|
|
func statusMsg(msg string) systemdNotifyMsg {
|
|
|
|
return systemdNotifyMsg("STATUS=" + msg)
|
|
|
|
}
|
|
|
|
|
|
|
|
func pidMsg() systemdNotifyMsg {
|
|
|
|
return systemdNotifyMsg("MAINPID=" + strconv.Itoa(os.Getpid()))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Notify systemd of status via the notify protocol
|
|
|
|
func (g *Manager) notify(msg systemdNotifyMsg) {
|
|
|
|
conn, err := getNotifySocket()
|
|
|
|
if err != nil {
|
|
|
|
// the err is logged in getNotifySocket
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if conn == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer conn.Close()
|
|
|
|
|
|
|
|
if _, err = conn.Write([]byte(msg)); err != nil {
|
|
|
|
log.Warn("Failed to notify NOTIFY_SOCKET: %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-15 02:51:28 -07:00
|
|
|
func (g *Manager) start(ctx context.Context) {
|
2021-05-15 08:22:26 -06:00
|
|
|
// Make contexts
|
|
|
|
g.terminateCtx, g.terminateCtxCancel = context.WithCancel(ctx)
|
|
|
|
g.shutdownCtx, g.shutdownCtxCancel = context.WithCancel(ctx)
|
|
|
|
g.hammerCtx, g.hammerCtxCancel = context.WithCancel(ctx)
|
2022-03-25 06:47:12 -06:00
|
|
|
g.managerCtx, g.managerCtxCancel = context.WithCancel(ctx)
|
|
|
|
|
|
|
|
// Next add pprof labels to these contexts
|
|
|
|
g.terminateCtx = pprof.WithLabels(g.terminateCtx, pprof.Labels("graceful-lifecycle", "with-terminate"))
|
|
|
|
g.shutdownCtx = pprof.WithLabels(g.shutdownCtx, pprof.Labels("graceful-lifecycle", "with-shutdown"))
|
|
|
|
g.hammerCtx = pprof.WithLabels(g.hammerCtx, pprof.Labels("graceful-lifecycle", "with-hammer"))
|
|
|
|
g.managerCtx = pprof.WithLabels(g.managerCtx, pprof.Labels("graceful-lifecycle", "with-manager"))
|
|
|
|
|
|
|
|
// Now label this and all goroutines created by this goroutine with the graceful-lifecycle manager
|
|
|
|
pprof.SetGoroutineLabels(g.managerCtx)
|
|
|
|
defer pprof.SetGoroutineLabels(ctx)
|
2019-12-15 02:51:28 -07:00
|
|
|
|
|
|
|
// Set the running state & handle signals
|
2019-11-21 11:32:02 -07:00
|
|
|
g.setState(stateRunning)
|
2023-05-15 16:20:30 -06:00
|
|
|
g.notify(statusMsg("Starting Gitea"))
|
|
|
|
g.notify(pidMsg())
|
2022-03-31 11:01:43 -06:00
|
|
|
go g.handleSignals(g.managerCtx)
|
2019-12-15 02:51:28 -07:00
|
|
|
|
|
|
|
// Handle clean up of unused provided listeners and delayed start-up
|
|
|
|
startupDone := make(chan struct{})
|
2019-11-21 11:32:02 -07:00
|
|
|
go func() {
|
2019-12-15 02:51:28 -07:00
|
|
|
defer close(startupDone)
|
2019-11-21 11:32:02 -07:00
|
|
|
// Wait till we're done getting all of the listeners and then close
|
|
|
|
// the unused ones
|
2024-03-06 04:00:21 -07:00
|
|
|
func() {
|
|
|
|
// FIXME: there is a fundamental design problem of the "manager" and the "wait group".
|
|
|
|
// If nothing has started, the "Wait" just panics: sync: WaitGroup is reused before previous Wait has returned
|
|
|
|
// There is no clear solution besides a complete rewriting of the "manager"
|
|
|
|
defer func() {
|
|
|
|
_ = recover()
|
|
|
|
}()
|
|
|
|
g.createServerWaitGroup.Wait()
|
|
|
|
}()
|
2019-11-21 11:32:02 -07:00
|
|
|
// Ignore the error here there's not much we can do with it
|
|
|
|
// They're logged in the CloseProvidedListeners function
|
|
|
|
_ = CloseProvidedListeners()
|
2023-05-15 16:20:30 -06:00
|
|
|
g.notify(readyMsg)
|
2019-11-21 11:32:02 -07:00
|
|
|
}()
|
|
|
|
if setting.StartupTimeout > 0 {
|
|
|
|
go func() {
|
|
|
|
select {
|
2019-12-15 02:51:28 -07:00
|
|
|
case <-startupDone:
|
2019-11-21 11:32:02 -07:00
|
|
|
return
|
|
|
|
case <-g.IsShutdown():
|
2019-12-15 02:51:28 -07:00
|
|
|
func() {
|
|
|
|
// When waitgroup counter goes negative it will panic - we don't care about this so we can just ignore it.
|
|
|
|
defer func() {
|
|
|
|
_ = recover()
|
|
|
|
}()
|
|
|
|
// Ensure that the createServerWaitGroup stops waiting
|
|
|
|
for {
|
|
|
|
g.createServerWaitGroup.Done()
|
|
|
|
}
|
|
|
|
}()
|
2019-11-21 11:32:02 -07:00
|
|
|
return
|
|
|
|
case <-time.After(setting.StartupTimeout):
|
|
|
|
log.Error("Startup took too long! Shutting down")
|
2023-05-15 16:20:30 -06:00
|
|
|
g.notify(statusMsg("Startup took too long! Shutting down"))
|
|
|
|
g.notify(stoppingMsg)
|
2019-11-21 11:32:02 -07:00
|
|
|
g.doShutdown()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-15 02:51:28 -07:00
|
|
|
func (g *Manager) handleSignals(ctx context.Context) {
|
2022-03-31 11:01:43 -06:00
|
|
|
ctx, _, finished := process.GetManager().AddTypedContext(ctx, "Graceful: HandleSignals", process.SystemProcessType, true)
|
|
|
|
defer finished()
|
|
|
|
|
2019-11-21 11:32:02 -07:00
|
|
|
signalChannel := make(chan os.Signal, 1)
|
|
|
|
|
|
|
|
signal.Notify(
|
|
|
|
signalChannel,
|
|
|
|
syscall.SIGHUP,
|
|
|
|
syscall.SIGUSR1,
|
|
|
|
syscall.SIGUSR2,
|
|
|
|
syscall.SIGINT,
|
|
|
|
syscall.SIGTERM,
|
|
|
|
syscall.SIGTSTP,
|
|
|
|
)
|
|
|
|
|
2023-05-15 16:20:30 -06:00
|
|
|
watchdogTimeout := getWatchdogTimeout()
|
|
|
|
t := &time.Ticker{}
|
|
|
|
if watchdogTimeout != 0 {
|
|
|
|
g.notify(watchdogMsg)
|
|
|
|
t = time.NewTicker(watchdogTimeout / 2)
|
|
|
|
}
|
|
|
|
|
2019-11-21 11:32:02 -07:00
|
|
|
pid := syscall.Getpid()
|
|
|
|
for {
|
2019-11-30 07:40:22 -07:00
|
|
|
select {
|
|
|
|
case sig := <-signalChannel:
|
|
|
|
switch sig {
|
|
|
|
case syscall.SIGHUP:
|
2020-02-10 22:29:45 -07:00
|
|
|
log.Info("PID: %d. Received SIGHUP. Attempting GracefulRestart...", pid)
|
|
|
|
g.DoGracefulRestart()
|
2019-11-30 07:40:22 -07:00
|
|
|
case syscall.SIGUSR1:
|
2020-07-05 18:07:07 -06:00
|
|
|
log.Warn("PID %d. Received SIGUSR1. Releasing and reopening logs", pid)
|
2023-05-15 16:20:30 -06:00
|
|
|
g.notify(statusMsg("Releasing and reopening logs"))
|
Rewrite logger system (#24726)
## ⚠️ Breaking
The `log.<mode>.<logger>` style config has been dropped. If you used it,
please check the new config manual & app.example.ini to make your
instance output logs as expected.
Although many legacy options still work, it's encouraged to upgrade to
the new options.
The SMTP logger is deleted because SMTP is not suitable to collect logs.
If you have manually configured Gitea log options, please confirm the
logger system works as expected after upgrading.
## Description
Close #12082 and maybe more log-related issues, resolve some related
FIXMEs in old code (which seems unfixable before)
Just like rewriting queue #24505 : make code maintainable, clear legacy
bugs, and add the ability to support more writers (eg: JSON, structured
log)
There is a new document (with examples): `logging-config.en-us.md`
This PR is safer than the queue rewriting, because it's just for
logging, it won't break other logic.
## The old problems
The logging system is quite old and difficult to maintain:
* Unclear concepts: Logger, NamedLogger, MultiChannelledLogger,
SubLogger, EventLogger, WriterLogger etc
* Some code is diffuclt to konw whether it is right:
`log.DelNamedLogger("console")` vs `log.DelNamedLogger(log.DEFAULT)` vs
`log.DelLogger("console")`
* The old system heavily depends on ini config system, it's difficult to
create new logger for different purpose, and it's very fragile.
* The "color" trick is difficult to use and read, many colors are
unnecessary, and in the future structured log could help
* It's difficult to add other log formats, eg: JSON format
* The log outputer doesn't have full control of its goroutine, it's
difficult to make outputer have advanced behaviors
* The logs could be lost in some cases: eg: no Fatal error when using
CLI.
* Config options are passed by JSON, which is quite fragile.
* INI package makes the KEY in `[log]` section visible in `[log.sub1]`
and `[log.sub1.subA]`, this behavior is quite fragile and would cause
more unclear problems, and there is no strong requirement to support
`log.<mode>.<logger>` syntax.
## The new design
See `logger.go` for documents.
## Screenshot
<details>
![image](https://github.com/go-gitea/gitea/assets/2114189/4462d713-ba39-41f5-bb08-de912e67e1ff)
![image](https://github.com/go-gitea/gitea/assets/2114189/b188035e-f691-428b-8b2d-ff7b2199b2f9)
![image](https://github.com/go-gitea/gitea/assets/2114189/132e9745-1c3b-4e00-9e0d-15eaea495dee)
</details>
## TODO
* [x] add some new tests
* [x] fix some tests
* [x] test some sub-commands (manually ....)
---------
Co-authored-by: Jason Song <i@wolfogre.com>
Co-authored-by: delvh <dev.lh@web.de>
Co-authored-by: Giteabot <teabot@gitea.io>
2023-05-21 16:35:11 -06:00
|
|
|
if err := releasereopen.GetManager().ReleaseReopen(); err != nil {
|
2020-07-05 18:07:07 -06:00
|
|
|
log.Error("Error whilst releasing and reopening logs: %v", err)
|
|
|
|
}
|
2019-11-30 07:40:22 -07:00
|
|
|
case syscall.SIGUSR2:
|
|
|
|
log.Warn("PID %d. Received SIGUSR2. Hammering...", pid)
|
2020-01-28 18:01:06 -07:00
|
|
|
g.DoImmediateHammer()
|
2019-11-30 07:40:22 -07:00
|
|
|
case syscall.SIGINT:
|
|
|
|
log.Warn("PID %d. Received SIGINT. Shutting down...", pid)
|
2020-01-28 18:01:06 -07:00
|
|
|
g.DoGracefulShutdown()
|
2019-11-30 07:40:22 -07:00
|
|
|
case syscall.SIGTERM:
|
|
|
|
log.Warn("PID %d. Received SIGTERM. Shutting down...", pid)
|
2020-01-28 18:01:06 -07:00
|
|
|
g.DoGracefulShutdown()
|
2019-11-30 07:40:22 -07:00
|
|
|
case syscall.SIGTSTP:
|
|
|
|
log.Info("PID %d. Received SIGTSTP.", pid)
|
|
|
|
default:
|
|
|
|
log.Info("PID %d. Received %v.", pid, sig)
|
2019-11-21 11:32:02 -07:00
|
|
|
}
|
2023-05-15 16:20:30 -06:00
|
|
|
case <-t.C:
|
|
|
|
g.notify(watchdogMsg)
|
2019-11-30 07:40:22 -07:00
|
|
|
case <-ctx.Done():
|
|
|
|
log.Warn("PID: %d. Background context for manager closed - %v - Shutting down...", pid, ctx.Err())
|
2020-01-28 18:01:06 -07:00
|
|
|
g.DoGracefulShutdown()
|
2022-06-12 07:50:18 -06:00
|
|
|
return
|
2019-11-21 11:32:02 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-15 02:51:28 -07:00
|
|
|
func (g *Manager) doFork() error {
|
2019-11-21 11:32:02 -07:00
|
|
|
g.lock.Lock()
|
|
|
|
if g.forked {
|
|
|
|
g.lock.Unlock()
|
|
|
|
return errors.New("another process already forked. Ignoring this one")
|
|
|
|
}
|
|
|
|
g.forked = true
|
|
|
|
g.lock.Unlock()
|
2023-05-15 16:20:30 -06:00
|
|
|
|
|
|
|
g.notify(reloadingMsg)
|
|
|
|
|
2019-11-21 11:32:02 -07:00
|
|
|
// We need to move the file logs to append pids
|
|
|
|
setting.RestartLogsWithPIDSuffix()
|
|
|
|
|
|
|
|
_, err := RestartProcess()
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-01-28 18:01:06 -07:00
|
|
|
// DoGracefulRestart causes a graceful restart
|
|
|
|
func (g *Manager) DoGracefulRestart() {
|
|
|
|
if setting.GracefulRestartable {
|
|
|
|
log.Info("PID: %d. Forking...", os.Getpid())
|
|
|
|
err := g.doFork()
|
2022-02-19 09:36:25 -07:00
|
|
|
if err != nil {
|
|
|
|
if err.Error() == "another process already forked. Ignoring this one" {
|
|
|
|
g.DoImmediateHammer()
|
|
|
|
} else {
|
|
|
|
log.Error("Error whilst forking from PID: %d : %v", os.Getpid(), err)
|
|
|
|
}
|
2020-01-28 18:01:06 -07:00
|
|
|
}
|
2023-05-21 19:38:38 -06:00
|
|
|
// doFork calls RestartProcess which starts a new Gitea process, so this parent process needs to exit
|
|
|
|
// Otherwise some resources (eg: leveldb lock) will be held by this parent process and the new process will fail to start
|
|
|
|
log.Info("PID: %d. Shutting down after forking ...", os.Getpid())
|
|
|
|
g.doShutdown()
|
2020-01-28 18:01:06 -07:00
|
|
|
} else {
|
|
|
|
log.Info("PID: %d. Not set restartable. Shutting down...", os.Getpid())
|
2023-05-15 16:20:30 -06:00
|
|
|
g.notify(stoppingMsg)
|
2020-01-28 18:01:06 -07:00
|
|
|
g.doShutdown()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// DoImmediateHammer causes an immediate hammer
|
|
|
|
func (g *Manager) DoImmediateHammer() {
|
2023-05-15 16:20:30 -06:00
|
|
|
g.notify(statusMsg("Sending immediate hammer"))
|
2020-01-28 18:01:06 -07:00
|
|
|
g.doHammerTime(0 * time.Second)
|
|
|
|
}
|
|
|
|
|
|
|
|
// DoGracefulShutdown causes a graceful shutdown
|
|
|
|
func (g *Manager) DoGracefulShutdown() {
|
2023-05-15 16:20:30 -06:00
|
|
|
g.lock.Lock()
|
|
|
|
if !g.forked {
|
|
|
|
g.lock.Unlock()
|
|
|
|
g.notify(stoppingMsg)
|
|
|
|
} else {
|
|
|
|
g.lock.Unlock()
|
|
|
|
g.notify(statusMsg("Shutting down after fork"))
|
|
|
|
}
|
2020-01-28 18:01:06 -07:00
|
|
|
g.doShutdown()
|
|
|
|
}
|
|
|
|
|
2019-12-15 02:51:28 -07:00
|
|
|
// RegisterServer registers the running of a listening server, in the case of unix this means that the parent process can now die.
|
|
|
|
// Any call to RegisterServer must be matched by a call to ServerDone
|
|
|
|
func (g *Manager) RegisterServer() {
|
2019-11-21 11:32:02 -07:00
|
|
|
KillParent()
|
|
|
|
g.runningServerWaitGroup.Add(1)
|
|
|
|
}
|