Enable golangci linter contextcheck (#3170)

Split out from https://github.com/woodpecker-ci/woodpecker/pull/2960
This commit is contained in:
Robert Kaussow 2024-01-11 22:15:15 +01:00 committed by GitHub
parent d0380e31b5
commit f813badcf9
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 32 additions and 35 deletions

View file

@ -163,6 +163,7 @@ linters:
- gocritic
- nolintlint
- stylecheck
- contextcheck
run:
timeout: 15m

View file

@ -133,12 +133,13 @@ func (r *Runner) Run(runnerCtx context.Context) error {
state := rpc.State{}
state.Started = time.Now().Unix()
err = r.client.Init(ctxmeta, work.ID, state)
err = r.client.Init(runnerCtx, work.ID, state)
if err != nil {
logger.Error().Err(err).Msg("pipeline initialization failed")
}
var uploads sync.WaitGroup
//nolint:contextcheck
err = pipeline.New(work.Config,
pipeline.WithContext(workflowCtx),
pipeline.WithTaskUUID(fmt.Sprint(work.ID)),
@ -188,7 +189,7 @@ func (r *Runner) Run(runnerCtx context.Context) error {
Int("exit_code", state.ExitCode).
Msg("updating pipeline status")
if err := r.client.Done(ctxmeta, work.ID, state); err != nil {
if err := r.client.Done(runnerCtx, work.ID, state); err != nil {
logger.Error().Err(err).Msg("updating pipeline status failed")
} else {
logger.Debug().Msg("updating pipeline status complete")

View file

@ -213,7 +213,7 @@ func execWithAxis(c *cli.Context, file, repoPath string, axis matrix.Axis) error
}
backendCtx := context.WithValue(c.Context, backendTypes.CliContext, c)
backend.Init(backendCtx)
backend.Init()
backendEngine, err := backend.FindBackend(backendCtx, c.String("backend-engine"))
if err != nil {

View file

@ -259,7 +259,7 @@ func getBackendEngine(backendCtx context.Context, backendName string, addons []s
return addonBackend.Value, nil
}
backend.Init(backendCtx)
backend.Init()
engine, err := backend.FindBackend(backendCtx, backendName)
if err != nil {
log.Error().Err(err).Msgf("cannot find backend engine '%s'", backendName)

View file

@ -29,11 +29,11 @@ var (
backends []types.Backend
)
func Init(ctx context.Context) {
func Init() {
backends = []types.Backend{
docker.New(),
local.New(),
kubernetes.New(ctx),
kubernetes.New(),
}
backendsByName = make(map[string]types.Backend)

View file

@ -147,11 +147,11 @@ func (e *docker) Load(ctx context.Context) (*backend.BackendInfo, error) {
}, nil
}
func (e *docker) SetupWorkflow(_ context.Context, conf *backend.Config, taskUUID string) error {
func (e *docker) SetupWorkflow(ctx context.Context, conf *backend.Config, taskUUID string) error {
log.Trace().Str("taskUUID", taskUUID).Msg("create workflow environment")
for _, vol := range conf.Volumes {
_, err := e.client.VolumeCreate(noContext, volume.CreateOptions{
_, err := e.client.VolumeCreate(ctx, volume.CreateOptions{
Name: vol.Name,
Driver: volumeDriver,
})
@ -165,7 +165,7 @@ func (e *docker) SetupWorkflow(_ context.Context, conf *backend.Config, taskUUID
networkDriver = networkDriverNAT
}
for _, n := range conf.Networks {
_, err := e.client.NetworkCreate(noContext, n.Name, types.NetworkCreate{
_, err := e.client.NetworkCreate(ctx, n.Name, types.NetworkCreate{
Driver: networkDriver,
EnableIPv6: e.enableIPv6,
})
@ -311,27 +311,27 @@ func (e *docker) DestroyStep(ctx context.Context, step *backend.Step, taskUUID s
return nil
}
func (e *docker) DestroyWorkflow(_ context.Context, conf *backend.Config, taskUUID string) error {
func (e *docker) DestroyWorkflow(ctx context.Context, conf *backend.Config, taskUUID string) error {
log.Trace().Str("taskUUID", taskUUID).Msgf("delete workflow environment")
for _, stage := range conf.Stages {
for _, step := range stage.Steps {
containerName := toContainerName(step)
if err := e.client.ContainerKill(noContext, containerName, "9"); err != nil && !isErrContainerNotFoundOrNotRunning(err) {
if err := e.client.ContainerKill(ctx, containerName, "9"); err != nil && !isErrContainerNotFoundOrNotRunning(err) {
log.Error().Err(err).Msgf("could not kill container '%s'", step.Name)
}
if err := e.client.ContainerRemove(noContext, containerName, removeOpts); err != nil && !isErrContainerNotFoundOrNotRunning(err) {
if err := e.client.ContainerRemove(ctx, containerName, removeOpts); err != nil && !isErrContainerNotFoundOrNotRunning(err) {
log.Error().Err(err).Msgf("could not remove container '%s'", step.Name)
}
}
}
for _, v := range conf.Volumes {
if err := e.client.VolumeRemove(noContext, v.Name, true); err != nil {
if err := e.client.VolumeRemove(ctx, v.Name, true); err != nil {
log.Error().Err(err).Msgf("could not remove volume '%s'", v.Name)
}
}
for _, n := range conf.Networks {
if err := e.client.NetworkRemove(noContext, n.Name); err != nil {
if err := e.client.NetworkRemove(ctx, n.Name); err != nil {
log.Error().Err(err).Msgf("could not remove network '%s'", n.Name)
}
}
@ -339,8 +339,6 @@ func (e *docker) DestroyWorkflow(_ context.Context, conf *backend.Config, taskUU
}
var (
noContext = context.Background()
startOpts = types.ContainerStartOptions{}
removeOpts = types.ContainerRemoveOptions{

View file

@ -47,7 +47,6 @@ const (
var defaultDeleteOptions = newDefaultDeleteOptions()
type kube struct {
ctx context.Context
client kubernetes.Interface
config *config
goos string
@ -117,10 +116,8 @@ func configFromCliContext(ctx context.Context) (*config, error) {
}
// New returns a new Kubernetes Backend.
func New(ctx context.Context) types.Backend {
return &kube{
ctx: ctx,
}
func New() types.Backend {
return &kube{}
}
func (e *kube) Name() string {
@ -132,8 +129,8 @@ func (e *kube) IsAvailable(context.Context) bool {
return len(host) > 0
}
func (e *kube) Load(context.Context) (*types.BackendInfo, error) {
config, err := configFromCliContext(e.ctx)
func (e *kube) Load(ctx context.Context) (*types.BackendInfo, error) {
config, err := configFromCliContext(ctx)
if err != nil {
return nil, err
}
@ -334,31 +331,31 @@ func (e *kube) TailStep(ctx context.Context, step *types.Step, taskUUID string)
return rc, nil
}
func (e *kube) DestroyStep(_ context.Context, step *types.Step, taskUUID string) error {
func (e *kube) DestroyStep(ctx context.Context, step *types.Step, taskUUID string) error {
if step.Type == types.StepTypeService {
// a service should be stopped by DestroyWorkflow so we can ignore it
log.Trace().Msgf("DestroyStep got service '%s', ignoring it.", step.Name)
log.Trace().Msgf("destroyStep got service '%s', ignoring it.", step.Name)
return nil
}
log.Trace().Str("taskUUID", taskUUID).Msgf("stopping step: %s", step.Name)
err := stopPod(e.ctx, e, step, defaultDeleteOptions)
log.Trace().Str("taskUUID", taskUUID).Msgf("Stopping step: %s", step.Name)
err := stopPod(ctx, e, step, defaultDeleteOptions)
return err
}
// Destroy the pipeline environment.
func (e *kube) DestroyWorkflow(_ context.Context, conf *types.Config, taskUUID string) error {
func (e *kube) DestroyWorkflow(ctx context.Context, conf *types.Config, taskUUID string) error {
log.Trace().Str("taskUUID", taskUUID).Msg("deleting Kubernetes primitives")
// Use noContext because the ctx sent to this function will be canceled/done in case of error or canceled by user.
for _, stage := range conf.Stages {
for _, step := range stage.Steps {
err := stopPod(e.ctx, e, step, defaultDeleteOptions)
err := stopPod(ctx, e, step, defaultDeleteOptions)
if err != nil {
return err
}
if step.Type == types.StepTypeService {
err := stopService(e.ctx, e, step, defaultDeleteOptions)
err := stopService(ctx, e, step, defaultDeleteOptions)
if err != nil {
return err
}
@ -367,7 +364,7 @@ func (e *kube) DestroyWorkflow(_ context.Context, conf *types.Config, taskUUID s
}
for _, vol := range conf.Volumes {
err := stopVolume(e.ctx, e, vol.Name, defaultDeleteOptions)
err := stopVolume(ctx, e, vol.Name, defaultDeleteOptions)
if err != nil {
return err
}

View file

@ -112,7 +112,7 @@ func (r *Runtime) Run(runnerCtx context.Context) error {
}()
r.started = time.Now().Unix()
if err := r.engine.SetupWorkflow(r.ctx, r.spec, r.taskUUID); err != nil {
if err := r.engine.SetupWorkflow(runnerCtx, r.spec, r.taskUUID); err != nil {
return err
}

View file

@ -25,7 +25,7 @@ import (
"go.woodpecker-ci.org/woodpecker/v2/server/pipeline/stepbuilder"
)
func queuePipeline(repo *model.Repo, pipelineItems []*stepbuilder.Item) error {
func queuePipeline(ctx context.Context, repo *model.Repo, pipelineItems []*stepbuilder.Item) error {
var tasks []*model.Task
for _, item := range pipelineItems {
if item.Workflow.State == model.StatusSkipped {
@ -54,7 +54,7 @@ func queuePipeline(repo *model.Repo, pipelineItems []*stepbuilder.Item) error {
tasks = append(tasks, task)
}
return server.Config.Services.Queue.PushAtOnce(context.Background(), tasks)
return server.Config.Services.Queue.PushAtOnce(ctx, tasks)
}
func taskIds(dependsOn []string, pipelineItems []*stepbuilder.Item) (taskIds []string) {

View file

@ -39,7 +39,7 @@ func start(ctx context.Context, store store.Store, activePipeline *model.Pipelin
publishPipeline(ctx, activePipeline, repo, user)
if err := queuePipeline(repo, pipelineItems); err != nil {
if err := queuePipeline(ctx, repo, pipelineItems); err != nil {
log.Error().Err(err).Msg("queuePipeline")
return nil, err
}