Add depends_on support for steps (#2771)

Co-authored-by: 6543 <6543@obermui.de>
This commit is contained in:
Anbraten 2023-12-24 12:14:30 +01:00 committed by GitHub
parent 9d9bcbf363
commit 2b1e5f35de
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
12 changed files with 494 additions and 145 deletions

View file

@ -101,7 +101,7 @@ func lintFile(_ *cli.Context, file string) error {
// TODO: lint multiple files at once to allow checks for sth like "depends_on" to work // TODO: lint multiple files at once to allow checks for sth like "depends_on" to work
err = linter.New(linter.WithTrusted(true)).Lint([]*linter.WorkflowConfig{config}) err = linter.New(linter.WithTrusted(true)).Lint([]*linter.WorkflowConfig{config})
if err != nil { if err != nil {
fmt.Printf("🔥 %s has warning / errors:\n", output.String(config.File).Underline()) fmt.Printf("🔥 %s has warnings / errors:\n", output.String(config.File).Underline())
hasErrors := false hasErrors := false
for _, err := range pipeline_errors.GetPipelineErrors(err) { for _, err := range pipeline_errors.GetPipelineErrors(err) {

View file

@ -443,33 +443,28 @@ when:
- evaluate: 'SKIP != "true"' - evaluate: 'SKIP != "true"'
``` ```
### `group` - Parallel execution ### `depends_on`
Woodpecker supports parallel step execution for same-machine fan-in and fan-out. Parallel steps are configured using the `group` attribute. This instructs the agent to execute the named group in parallel. Normally steps of a workflow are executed serially in the order in which they are defined. As soon as you set `depends_on` for a step a [directed acyclic graph](https://en.wikipedia.org/wiki/Directed_acyclic_graph) will be used and all steps of the workflow will be executed in parallel besides the steps that have a dependency set to another step using `depends_on`:
Example parallel configuration:
```diff ```diff
steps: steps:
backend: build: # build will be executed immediately
+ group: build image: golang
image: golang commands:
commands: - go build
- go build
- go test
frontend:
+ group: build
image: node
commands:
- npm install
- npm run test
- npm run build
publish:
image: plugins/docker
repo: octocat/hello-world
```
In the above example, the `frontend` and `backend` steps are executed in parallel. The agent will not execute the `publish` step until the group completes. deploy:
image: plugins/docker
settings:
repo: foo/bar
+ depends_on: [build, test] # deploy will be executed after build and test finished
test: # test will be executed immediately as no dependencies are set
image: golang
commands:
- go test
```
### `volumes` ### `volumes`

View file

@ -4,6 +4,7 @@ Some versions need some changes to the server configuration or the pipeline conf
## `next` ## `next`
- Deprecated `steps.[name].group` in favor of `steps.[name].depends_on` (see [workflow syntax](./20-usage/20-workflow-syntax.md#depends_on) to learn how to set dependencies)
- Removed `WOODPECKER_ROOT_PATH` and `WOODPECKER_ROOT_URL` config variables. Use `WOODPECKER_HOST` with a path instead - Removed `WOODPECKER_ROOT_PATH` and `WOODPECKER_ROOT_URL` config variables. Use `WOODPECKER_HOST` with a path instead
- Pipelines without a config file will now be skipped instead of failing - Pipelines without a config file will now be skipped instead of failing

View file

@ -233,10 +233,9 @@ func (c *Compiler) Compile(conf *yaml_types.Workflow) (*backend_types.Config, er
config.Stages = append(config.Stages, stage) config.Stages = append(config.Stages, stage)
} }
// add pipeline steps. 1 pipeline step per stage, at the moment // add pipeline steps
var stage *backend_types.Stage steps := make([]*dagCompilerStep, 0, len(conf.Steps.ContainerList))
var group string for pos, container := range conf.Steps.ContainerList {
for i, container := range conf.Steps.ContainerList {
// Skip if local and should not run local // Skip if local and should not run local
if c.local && !container.When.IsLocal() { if c.local && !container.When.IsLocal() {
continue continue
@ -248,16 +247,7 @@ func (c *Compiler) Compile(conf *yaml_types.Workflow) (*backend_types.Config, er
return nil, err return nil, err
} }
if stage == nil || group != container.Group || container.Group == "" { name := fmt.Sprintf("%s_step_%d", c.prefix, pos)
group = container.Group
stage = new(backend_types.Stage)
stage.Name = fmt.Sprintf("%s_stage_%v", c.prefix, i)
stage.Alias = container.Name
config.Stages = append(config.Stages, stage)
}
name := fmt.Sprintf("%s_step_%d", c.prefix, i)
stepType := backend_types.StepTypeCommands stepType := backend_types.StepTypeCommands
if container.IsPlugin() { if container.IsPlugin() {
stepType = backend_types.StepTypePlugin stepType = backend_types.StepTypePlugin
@ -274,9 +264,23 @@ func (c *Compiler) Compile(conf *yaml_types.Workflow) (*backend_types.Config, er
} }
} }
stage.Steps = append(stage.Steps, step) steps = append(steps, &dagCompilerStep{
step: step,
position: pos,
name: container.Name,
group: container.Group,
dependsOn: container.DependsOn,
})
} }
// generate stages out of steps
stepStages, err := newDAGCompiler(steps, c.prefix).compile()
if err != nil {
return nil, err
}
config.Stages = append(config.Stages, stepStages...)
err = c.setupCacheRebuild(conf, config) err = c.setupCacheRebuild(conf, config)
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -99,120 +99,199 @@ func TestCompilerCompile(t *testing.T) {
fronConf *yaml_types.Workflow fronConf *yaml_types.Workflow
backConf *backend_types.Config backConf *backend_types.Config
expectedErr string expectedErr string
}{{ }{
name: "empty workflow, no clone", {
fronConf: &yaml_types.Workflow{SkipClone: true}, name: "empty workflow, no clone",
backConf: &backend_types.Config{ fronConf: &yaml_types.Workflow{SkipClone: true},
Networks: defaultNetworks, backConf: &backend_types.Config{
Volumes: defaultVolumes, Networks: defaultNetworks,
Volumes: defaultVolumes,
},
}, },
}, { {
name: "empty workflow, default clone", name: "empty workflow, default clone",
fronConf: &yaml_types.Workflow{}, fronConf: &yaml_types.Workflow{},
backConf: &backend_types.Config{ backConf: &backend_types.Config{
Networks: defaultNetworks, Networks: defaultNetworks,
Volumes: defaultVolumes, Volumes: defaultVolumes,
Stages: []*backend_types.Stage{defaultCloneStage}, Stages: []*backend_types.Stage{defaultCloneStage},
},
}, },
}, { {
name: "workflow with one dummy step", name: "workflow with one dummy step",
fronConf: &yaml_types.Workflow{Steps: yaml_types.ContainerList{ContainerList: []*yaml_types.Container{{ fronConf: &yaml_types.Workflow{Steps: yaml_types.ContainerList{ContainerList: []*yaml_types.Container{{
Name: "dummy", Name: "dummy",
Image: "dummy_img", Image: "dummy_img",
}}}}, }}}},
backConf: &backend_types.Config{ backConf: &backend_types.Config{
Networks: defaultNetworks, Networks: defaultNetworks,
Volumes: defaultVolumes, Volumes: defaultVolumes,
Stages: []*backend_types.Stage{defaultCloneStage, { Stages: []*backend_types.Stage{defaultCloneStage, {
Name: "test_stage_0", Name: "test_stage_0",
Alias: "dummy", Alias: "dummy",
Steps: []*backend_types.Step{{ Steps: []*backend_types.Step{{
Name: "test_step_0", Name: "test_step_0",
Alias: "dummy", Alias: "dummy",
Type: backend_types.StepTypePlugin, Type: backend_types.StepTypePlugin,
Image: "dummy_img", Image: "dummy_img",
OnSuccess: true, OnSuccess: true,
Failure: "fail", Failure: "fail",
Volumes: []string{defaultVolumes[0].Name + ":"}, Volumes: []string{defaultVolumes[0].Name + ":"},
Networks: []backend_types.Conn{{Name: "test_default", Aliases: []string{"dummy"}}}, Networks: []backend_types.Conn{{Name: "test_default", Aliases: []string{"dummy"}}},
ExtraHosts: []backend_types.HostAlias{}, ExtraHosts: []backend_types.HostAlias{},
}},
}}, }},
}}, },
}, },
}, { {
name: "workflow with three steps and one group", name: "workflow with three steps and one group",
fronConf: &yaml_types.Workflow{Steps: yaml_types.ContainerList{ContainerList: []*yaml_types.Container{{ fronConf: &yaml_types.Workflow{Steps: yaml_types.ContainerList{ContainerList: []*yaml_types.Container{{
Name: "echo env", Name: "echo env",
Image: "bash", Image: "bash",
Commands: []string{"env"}, Commands: []string{"env"},
}, {
Name: "parallel echo 1",
Group: "parallel",
Image: "bash",
Commands: []string{"echo 1"},
}, {
Name: "parallel echo 2",
Group: "parallel",
Image: "bash",
Commands: []string{"echo 2"},
}}}},
backConf: &backend_types.Config{
Networks: defaultNetworks,
Volumes: defaultVolumes,
Stages: []*backend_types.Stage{defaultCloneStage, {
Name: "test_stage_0",
Alias: "echo env",
Steps: []*backend_types.Step{{
Name: "test_step_0",
Alias: "echo env",
Type: backend_types.StepTypeCommands,
Image: "bash",
Commands: []string{"env"},
OnSuccess: true,
Failure: "fail",
Volumes: []string{defaultVolumes[0].Name + ":"},
Networks: []backend_types.Conn{{Name: "test_default", Aliases: []string{"echo env"}}},
ExtraHosts: []backend_types.HostAlias{},
}},
}, { }, {
Name: "test_stage_1", Name: "parallel echo 1",
Alias: "parallel echo 1", Group: "parallel",
Steps: []*backend_types.Step{{ Image: "bash",
Name: "test_step_1", Commands: []string{"echo 1"},
Alias: "parallel echo 1", }, {
Type: backend_types.StepTypeCommands, Name: "parallel echo 2",
Image: "bash", Group: "parallel",
Commands: []string{"echo 1"}, Image: "bash",
OnSuccess: true, Commands: []string{"echo 2"},
Failure: "fail", }}}},
Volumes: []string{defaultVolumes[0].Name + ":"}, backConf: &backend_types.Config{
Networks: []backend_types.Conn{{Name: "test_default", Aliases: []string{"parallel echo 1"}}}, Networks: defaultNetworks,
ExtraHosts: []backend_types.HostAlias{}, Volumes: defaultVolumes,
Stages: []*backend_types.Stage{defaultCloneStage, {
Name: "test_stage_0",
Alias: "echo env",
Steps: []*backend_types.Step{{
Name: "test_step_0",
Alias: "echo env",
Type: backend_types.StepTypeCommands,
Image: "bash",
Commands: []string{"env"},
OnSuccess: true,
Failure: "fail",
Volumes: []string{defaultVolumes[0].Name + ":"},
Networks: []backend_types.Conn{{Name: "test_default", Aliases: []string{"echo env"}}},
ExtraHosts: []backend_types.HostAlias{},
}},
}, { }, {
Name: "test_step_2", Name: "test_stage_1",
Alias: "parallel echo 2", Alias: "parallel echo 1",
Type: backend_types.StepTypeCommands, Steps: []*backend_types.Step{{
Image: "bash", Name: "test_step_1",
Commands: []string{"echo 2"}, Alias: "parallel echo 1",
OnSuccess: true, Type: backend_types.StepTypeCommands,
Failure: "fail", Image: "bash",
Volumes: []string{defaultVolumes[0].Name + ":"}, Commands: []string{"echo 1"},
Networks: []backend_types.Conn{{Name: "test_default", Aliases: []string{"parallel echo 2"}}}, OnSuccess: true,
ExtraHosts: []backend_types.HostAlias{}, Failure: "fail",
Volumes: []string{defaultVolumes[0].Name + ":"},
Networks: []backend_types.Conn{{Name: "test_default", Aliases: []string{"parallel echo 1"}}},
ExtraHosts: []backend_types.HostAlias{},
}, {
Name: "test_step_2",
Alias: "parallel echo 2",
Type: backend_types.StepTypeCommands,
Image: "bash",
Commands: []string{"echo 2"},
OnSuccess: true,
Failure: "fail",
Volumes: []string{defaultVolumes[0].Name + ":"},
Networks: []backend_types.Conn{{Name: "test_default", Aliases: []string{"parallel echo 2"}}},
ExtraHosts: []backend_types.HostAlias{},
}},
}}, }},
}}, },
}, },
}, { {
name: "workflow with missing secret", name: "workflow with three steps and depends_on",
fronConf: &yaml_types.Workflow{Steps: yaml_types.ContainerList{ContainerList: []*yaml_types.Container{{ fronConf: &yaml_types.Workflow{Steps: yaml_types.ContainerList{ContainerList: []*yaml_types.Container{{
Name: "step", Name: "echo env",
Image: "bash", Image: "bash",
Commands: []string{"env"}, Commands: []string{"env"},
Secrets: yaml_types.Secrets{Secrets: []*yaml_types.Secret{{Source: "missing", Target: "missing"}}}, }, {
}}}}, Name: "echo 1",
backConf: nil, Image: "bash",
expectedErr: "secret \"missing\" not found or not allowed to be used", Commands: []string{"echo 1"},
}} DependsOn: []string{"echo env", "echo 2"},
}, {
Name: "echo 2",
Image: "bash",
Commands: []string{"echo 2"},
}}}},
backConf: &backend_types.Config{
Networks: defaultNetworks,
Volumes: defaultVolumes,
Stages: []*backend_types.Stage{defaultCloneStage, {
Name: "test_stage_0",
Alias: "test_stage_0",
Steps: []*backend_types.Step{{
Name: "test_step_0",
Alias: "echo env",
Type: backend_types.StepTypeCommands,
Image: "bash",
Commands: []string{"env"},
OnSuccess: true,
Failure: "fail",
Volumes: []string{defaultVolumes[0].Name + ":"},
Networks: []backend_types.Conn{{Name: "test_default", Aliases: []string{"echo env"}}},
ExtraHosts: []backend_types.HostAlias{},
}, {
Name: "test_step_2",
Alias: "echo 2",
Type: backend_types.StepTypeCommands,
Image: "bash",
Commands: []string{"echo 2"},
OnSuccess: true,
Failure: "fail",
Volumes: []string{defaultVolumes[0].Name + ":"},
Networks: []backend_types.Conn{{Name: "test_default", Aliases: []string{"echo 2"}}},
ExtraHosts: []backend_types.HostAlias{},
}},
}, {
Name: "test_stage_1",
Alias: "test_stage_1",
Steps: []*backend_types.Step{{
Name: "test_step_1",
Alias: "echo 1",
Type: backend_types.StepTypeCommands,
Image: "bash",
Commands: []string{"echo 1"},
OnSuccess: true,
Failure: "fail",
Volumes: []string{defaultVolumes[0].Name + ":"},
Networks: []backend_types.Conn{{Name: "test_default", Aliases: []string{"echo 1"}}},
ExtraHosts: []backend_types.HostAlias{},
}},
}},
},
},
{
name: "workflow with missing secret",
fronConf: &yaml_types.Workflow{Steps: yaml_types.ContainerList{ContainerList: []*yaml_types.Container{{
Name: "step",
Image: "bash",
Commands: []string{"env"},
Secrets: yaml_types.Secrets{Secrets: []*yaml_types.Secret{{Source: "missing", Target: "missing"}}},
}}}},
backConf: nil,
expectedErr: "secret \"missing\" not found or not allowed to be used",
},
{
name: "workflow with broken step dependency",
fronConf: &yaml_types.Workflow{Steps: yaml_types.ContainerList{ContainerList: []*yaml_types.Container{{
Name: "dummy",
Image: "dummy_img",
DependsOn: []string{"not exist"},
}}}},
backConf: nil,
expectedErr: "step 'dummy' depends on unknown step 'not exist'",
},
}
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {

View file

@ -0,0 +1,159 @@
// Copyright 2023 Woodpecker Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compiler
import (
"fmt"
backend_types "go.woodpecker-ci.org/woodpecker/v2/pipeline/backend/types"
)
type dagCompilerStep struct {
step *backend_types.Step
position int
name string
group string
dependsOn []string
}
type dagCompiler struct {
steps []*dagCompilerStep
prefix string
}
func newDAGCompiler(steps []*dagCompilerStep, prefix string) dagCompiler {
return dagCompiler{
steps: steps,
prefix: prefix,
}
}
func (c dagCompiler) isDAG() bool {
for _, v := range c.steps {
if len(v.dependsOn) != 0 {
return true
}
}
return false
}
func (c dagCompiler) compile() ([]*backend_types.Stage, error) {
if c.isDAG() {
return c.compileByDependsOn()
}
return c.compileByGroup()
}
func (c dagCompiler) compileByGroup() ([]*backend_types.Stage, error) {
stages := make([]*backend_types.Stage, 0, len(c.steps))
var currentStage *backend_types.Stage
var currentGroup string
for _, s := range c.steps {
// create a new stage if current step is in a new group compared to last one
if currentStage == nil || currentGroup != s.group || s.group == "" {
currentGroup = s.group
currentStage = new(backend_types.Stage)
currentStage.Name = fmt.Sprintf("%s_stage_%v", c.prefix, s.position)
currentStage.Alias = s.name
stages = append(stages, currentStage)
}
// add step to current stage
currentStage.Steps = append(currentStage.Steps, s.step)
}
return stages, nil
}
func (c dagCompiler) compileByDependsOn() ([]*backend_types.Stage, error) {
stepMap := make(map[string]*dagCompilerStep, len(c.steps))
for _, s := range c.steps {
stepMap[s.name] = s
}
return convertDAGToStages(stepMap, c.prefix)
}
func dfsVisit(steps map[string]*dagCompilerStep, name string, visited map[string]struct{}, path []string) error {
if _, ok := visited[name]; ok {
return &ErrStepDependencyCycle{path: path}
}
visited[name] = struct{}{}
path = append(path, name)
for _, dep := range steps[name].dependsOn {
if err := dfsVisit(steps, dep, visited, path); err != nil {
return err
}
}
return nil
}
func convertDAGToStages(steps map[string]*dagCompilerStep, prefix string) ([]*backend_types.Stage, error) {
addedSteps := make(map[string]struct{})
stages := make([]*backend_types.Stage, 0)
for name, step := range steps {
// check if all depends_on are valid
for _, dep := range step.dependsOn {
if _, ok := steps[dep]; !ok {
return nil, &ErrStepMissingDependency{name: name, dep: dep}
}
}
// check if there are cycles
visited := make(map[string]struct{})
if err := dfsVisit(steps, name, visited, []string{}); err != nil {
return nil, err
}
}
for len(steps) > 0 {
addedNodesThisLevel := make(map[string]struct{})
stage := &backend_types.Stage{
Name: fmt.Sprintf("%s_stage_%d", prefix, len(stages)),
Alias: fmt.Sprintf("%s_stage_%d", prefix, len(stages)),
}
for name, step := range steps {
if allDependenciesSatisfied(step, addedSteps) {
stage.Steps = append(stage.Steps, step.step)
addedNodesThisLevel[name] = struct{}{}
delete(steps, name)
}
}
for name := range addedNodesThisLevel {
addedSteps[name] = struct{}{}
}
stages = append(stages, stage)
}
return stages, nil
}
func allDependenciesSatisfied(step *dagCompilerStep, addedSteps map[string]struct{}) bool {
for _, childName := range step.dependsOn {
_, ok := addedSteps[childName]
if !ok {
return false
}
}
return true
}

View file

@ -0,0 +1,62 @@
// Copyright 2023 Woodpecker Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compiler
import (
"testing"
"github.com/stretchr/testify/assert"
backend_types "go.woodpecker-ci.org/woodpecker/v2/pipeline/backend/types"
)
func TestConvertDAGToStages(t *testing.T) {
steps := map[string]*dagCompilerStep{
"step1": {
step: &backend_types.Step{},
dependsOn: []string{"step3"},
},
"step2": {
step: &backend_types.Step{},
dependsOn: []string{"step1"},
},
"step3": {
step: &backend_types.Step{},
dependsOn: []string{"step2"},
},
}
_, err := convertDAGToStages(steps, "")
assert.ErrorIs(t, err, &ErrStepDependencyCycle{})
steps = map[string]*dagCompilerStep{
"step1": {
step: &backend_types.Step{},
dependsOn: []string{"step2"},
},
"step2": {
step: &backend_types.Step{},
},
}
_, err = convertDAGToStages(steps, "")
assert.NoError(t, err)
steps = map[string]*dagCompilerStep{
"step1": {
step: &backend_types.Step{},
dependsOn: []string{"not-existing-step"},
},
}
_, err = convertDAGToStages(steps, "")
assert.ErrorIs(t, err, &ErrStepMissingDependency{})
}

View file

@ -28,3 +28,30 @@ func (*ErrExtraHostFormat) Is(target error) bool {
_, ok := target.(*ErrExtraHostFormat) //nolint:errorlint _, ok := target.(*ErrExtraHostFormat) //nolint:errorlint
return ok return ok
} }
type ErrStepMissingDependency struct {
name,
dep string
}
func (err *ErrStepMissingDependency) Error() string {
return fmt.Sprintf("step '%s' depends on unknown step '%s'", err.name, err.dep)
}
func (*ErrStepMissingDependency) Is(target error) bool {
_, ok := target.(*ErrStepMissingDependency) //nolint:errorlint
return ok
}
type ErrStepDependencyCycle struct {
path []string
}
func (err *ErrStepDependencyCycle) Error() string {
return fmt.Sprintf("cycle detected: %v", err.path)
}
func (*ErrStepDependencyCycle) Is(target error) bool {
_, ok := target.(*ErrStepDependencyCycle) //nolint:errorlint
return ok
}

View file

@ -254,6 +254,21 @@ func (l *Linter) lintDeprecations(config *WorkflowConfig) (err error) {
}) })
} }
for _, step := range parsed.Steps.ContainerList {
if step.Group != "" {
err = multierr.Append(err, &errors.PipelineError{
Type: errors.PipelineErrorTypeDeprecation,
Message: "Please use depends_on instead of deprecated 'group' setting",
Data: errors.DeprecationErrorData{
File: config.File,
Field: "steps." + step.Name + ".group",
Docs: "https://woodpecker-ci.org/docs/next/usage/workflow-syntax#depends_on",
},
IsWarning: true,
})
}
}
return err return err
} }

View file

@ -344,6 +344,12 @@
"description": "Execute multiple steps with the same group key in parallel. Read more: https://woodpecker-ci.org/docs/usage/pipeline-syntax#step-group---parallel-execution", "description": "Execute multiple steps with the same group key in parallel. Read more: https://woodpecker-ci.org/docs/usage/pipeline-syntax#step-group---parallel-execution",
"type": "string" "type": "string"
}, },
"depends_on": {
"description": "Execute a step after another step has finished.",
"type": "array",
"items": { "type": "string" },
"minLength": 1
},
"detach": { "detach": {
"description": "Detach a step to run in background until pipeline finishes. Read more: https://woodpecker-ci.org/docs/usage/services#detachment", "description": "Detach a step to run in background until pipeline finishes. Read more: https://woodpecker-ci.org/docs/usage/services#detachment",
"type": "boolean" "type": "boolean"

View file

@ -48,6 +48,7 @@ type (
Volumes Volumes `yaml:"volumes,omitempty"` Volumes Volumes `yaml:"volumes,omitempty"`
When constraint.When `yaml:"when,omitempty"` When constraint.When `yaml:"when,omitempty"`
Ports []base.StringOrInt `yaml:"ports,omitempty"` Ports []base.StringOrInt `yaml:"ports,omitempty"`
DependsOn base.StringOrSlice `yaml:"depends_on,omitempty"`
// Docker Specific // Docker Specific
Privileged bool `yaml:"privileged,omitempty"` Privileged bool `yaml:"privileged,omitempty"`

View file

@ -354,7 +354,7 @@ func (s *RPC) RegisterAgent(ctx context.Context, platform, backend, version stri
func (s *RPC) UnregisterAgent(ctx context.Context) error { func (s *RPC) UnregisterAgent(ctx context.Context) error {
agent, err := s.getAgentFromContext(ctx) agent, err := s.getAgentFromContext(ctx)
if agent.OwnerID > 0 { if !agent.IsSystemAgent() {
// registered with individual agent token -> do not unregister // registered with individual agent token -> do not unregister
return nil return nil
} }