Renamed procs/jobs to steps in code (#1331)

Renamed `procs` to `steps` in code for the issue #1288

Co-authored-by: Harikesh Prajapati <harikesh.prajapati@druva.com>
Co-authored-by: qwerty287 <ndev@web.de>
Co-authored-by: qwerty287 <80460567+qwerty287@users.noreply.github.com>
Co-authored-by: 6543 <6543@obermui.de>
This commit is contained in:
Harikesh00 2022-10-28 21:08:53 +05:30 committed by GitHub
parent b44e895017
commit 36e42914fa
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
109 changed files with 1474 additions and 1364 deletions

View file

@ -23,7 +23,7 @@ tasks:
env: env:
WOODPECKER_SERVER: localhost:9000 WOODPECKER_SERVER: localhost:9000
WOODPECKER_SECRET: "1234" WOODPECKER_SECRET: "1234"
WOODPECKER_MAX_PROCS: 1 WOODPECKER_MAX_WORKFLOWS: 1
WOODPECKER_HEALTHCHECK: false WOODPECKER_HEALTHCHECK: false
command: | command: |
gp sync-await woodpecker-server gp sync-await woodpecker-server

View file

@ -67,7 +67,7 @@ func (r *Runner) Run(ctx context.Context) error {
meta, _ := metadata.FromOutgoingContext(ctx) meta, _ := metadata.FromOutgoingContext(ctx)
ctxmeta := metadata.NewOutgoingContext(context.Background(), meta) ctxmeta := metadata.NewOutgoingContext(context.Background(), meta)
// get the next job from the queue // get the next workflow from the queue
work, err := r.client.Next(ctx, r.filter) work, err := r.client.Next(ctx, r.filter)
if err != nil { if err != nil {
return err return err
@ -150,10 +150,10 @@ func (r *Runner) Run(ctx context.Context) error {
} }
var uploads sync.WaitGroup var uploads sync.WaitGroup
defaultLogger := pipeline.LogFunc(func(proc *backend.Step, rc multipart.Reader) error { defaultLogger := pipeline.LogFunc(func(step *backend.Step, rc multipart.Reader) error {
loglogger := logger.With(). loglogger := logger.With().
Str("image", proc.Image). Str("image", step.Image).
Str("stage", proc.Alias). Str("stage", step.Alias).
Logger() Logger()
part, rerr := rc.NextPart() part, rerr := rc.NextPart()
@ -172,7 +172,7 @@ func (r *Runner) Run(ctx context.Context) error {
loglogger.Debug().Msg("log stream opened") loglogger.Debug().Msg("log stream opened")
limitedPart := io.LimitReader(part, maxLogsUpload) limitedPart := io.LimitReader(part, maxLogsUpload)
logStream := rpc.NewLineWriter(r.client, work.ID, proc.Alias, secrets...) logStream := rpc.NewLineWriter(r.client, work.ID, step.Alias, secrets...)
if _, err := io.Copy(logStream, limitedPart); err != nil { if _, err := io.Copy(logStream, limitedPart); err != nil {
log.Error().Err(err).Msg("copy limited logStream part") log.Error().Err(err).Msg("copy limited logStream part")
} }
@ -186,7 +186,7 @@ func (r *Runner) Run(ctx context.Context) error {
file := &rpc.File{ file := &rpc.File{
Mime: "application/json+logs", Mime: "application/json+logs",
Proc: proc.Alias, Step: step.Alias,
Name: "logs.json", Name: "logs.json",
Data: data, Data: data,
Size: len(data), Size: len(data),
@ -218,7 +218,7 @@ func (r *Runner) Run(ctx context.Context) error {
file = &rpc.File{ file = &rpc.File{
Mime: part.Header().Get("Content-Type"), Mime: part.Header().Get("Content-Type"),
Proc: proc.Alias, Step: step.Alias,
Name: part.FileName(), Name: part.FileName(),
Data: data, Data: data,
Size: len(data), Size: len(data),
@ -250,7 +250,7 @@ func (r *Runner) Run(ctx context.Context) error {
}) })
defaultTracer := pipeline.TraceFunc(func(state *pipeline.State) error { defaultTracer := pipeline.TraceFunc(func(state *pipeline.State) error {
proclogger := logger.With(). steplogger := logger.With().
Str("image", state.Pipeline.Step.Image). Str("image", state.Pipeline.Step.Image).
Str("stage", state.Pipeline.Step.Alias). Str("stage", state.Pipeline.Step.Alias).
Err(state.Process.Error). Err(state.Process.Error).
@ -258,27 +258,27 @@ func (r *Runner) Run(ctx context.Context) error {
Bool("exited", state.Process.Exited). Bool("exited", state.Process.Exited).
Logger() Logger()
procState := rpc.State{ stepState := rpc.State{
Proc: state.Pipeline.Step.Alias, Step: state.Pipeline.Step.Alias,
Exited: state.Process.Exited, Exited: state.Process.Exited,
ExitCode: state.Process.ExitCode, ExitCode: state.Process.ExitCode,
Started: time.Now().Unix(), // TODO do not do this Started: time.Now().Unix(), // TODO do not do this
Finished: time.Now().Unix(), Finished: time.Now().Unix(),
} }
if state.Process.Error != nil { if state.Process.Error != nil {
procState.Error = state.Process.Error.Error() stepState.Error = state.Process.Error.Error()
} }
defer func() { defer func() {
proclogger.Debug().Msg("update step status") steplogger.Debug().Msg("update step status")
if uerr := r.client.Update(ctxmeta, work.ID, procState); uerr != nil { if uerr := r.client.Update(ctxmeta, work.ID, stepState); uerr != nil {
proclogger.Debug(). steplogger.Debug().
Err(uerr). Err(uerr).
Msg("update step status error") Msg("update step status error")
} }
proclogger.Debug().Msg("update step status complete") steplogger.Debug().Msg("update step status complete")
}() }()
if state.Process.Exited { if state.Process.Exited {
return nil return nil
@ -289,22 +289,29 @@ func (r *Runner) Run(ctx context.Context) error {
// TODO: find better way to update this state and move it to pipeline to have the same env in cli-exec // TODO: find better way to update this state and move it to pipeline to have the same env in cli-exec
state.Pipeline.Step.Environment["CI_MACHINE"] = r.hostname state.Pipeline.Step.Environment["CI_MACHINE"] = r.hostname
state.Pipeline.Step.Environment["CI_PIPELINE_STATUS"] = "success" state.Pipeline.Step.Environment["CI_PIPELINE_STATUS"] = "success"
state.Pipeline.Step.Environment["CI_PIPELINE_STARTED"] = strconv.FormatInt(state.Pipeline.Time, 10) state.Pipeline.Step.Environment["CI_PIPELINE_STARTED"] = strconv.FormatInt(state.Pipeline.Time, 10)
state.Pipeline.Step.Environment["CI_PIPELINE_FINISHED"] = strconv.FormatInt(time.Now().Unix(), 10) state.Pipeline.Step.Environment["CI_PIPELINE_FINISHED"] = strconv.FormatInt(time.Now().Unix(), 10)
state.Pipeline.Step.Environment["CI_STEP_STATUS"] = "success"
state.Pipeline.Step.Environment["CI_STEP_STARTED"] = strconv.FormatInt(state.Pipeline.Time, 10)
state.Pipeline.Step.Environment["CI_STEP_FINISHED"] = strconv.FormatInt(time.Now().Unix(), 10)
state.Pipeline.Step.Environment["CI_SYSTEM_ARCH"] = runtime.GOOS + "/" + runtime.GOARCH
// DEPRECATED // DEPRECATED
state.Pipeline.Step.Environment["CI_BUILD_STATUS"] = "success" state.Pipeline.Step.Environment["CI_BUILD_STATUS"] = "success"
state.Pipeline.Step.Environment["CI_BUILD_STARTED"] = strconv.FormatInt(state.Pipeline.Time, 10) state.Pipeline.Step.Environment["CI_BUILD_STARTED"] = strconv.FormatInt(state.Pipeline.Time, 10)
state.Pipeline.Step.Environment["CI_BUILD_FINISHED"] = strconv.FormatInt(time.Now().Unix(), 10) state.Pipeline.Step.Environment["CI_BUILD_FINISHED"] = strconv.FormatInt(time.Now().Unix(), 10)
state.Pipeline.Step.Environment["CI_JOB_STATUS"] = "success" state.Pipeline.Step.Environment["CI_JOB_STATUS"] = "success"
state.Pipeline.Step.Environment["CI_JOB_STARTED"] = strconv.FormatInt(state.Pipeline.Time, 10) state.Pipeline.Step.Environment["CI_JOB_STARTED"] = strconv.FormatInt(state.Pipeline.Time, 10)
state.Pipeline.Step.Environment["CI_JOB_FINISHED"] = strconv.FormatInt(time.Now().Unix(), 10) state.Pipeline.Step.Environment["CI_JOB_FINISHED"] = strconv.FormatInt(time.Now().Unix(), 10)
state.Pipeline.Step.Environment["CI_SYSTEM_ARCH"] = runtime.GOOS + "/" + runtime.GOARCH
if state.Pipeline.Error != nil { if state.Pipeline.Error != nil {
state.Pipeline.Step.Environment["CI_PIPELINE_STATUS"] = "failure" state.Pipeline.Step.Environment["CI_PIPELINE_STATUS"] = "failure"
state.Pipeline.Step.Environment["CI_STEP_STATUS"] = "failure"
// DEPRECATED // DEPRECATED
state.Pipeline.Step.Environment["CI_BUILD_STATUS"] = "failure" state.Pipeline.Step.Environment["CI_BUILD_STATUS"] = "failure"
state.Pipeline.Step.Environment["CI_JOB_STATUS"] = "failure" state.Pipeline.Step.Environment["CI_JOB_STATUS"] = "failure"

View file

@ -53,7 +53,7 @@ var Command = &cli.Command{
&cli.StringSliceFlag{ &cli.StringSliceFlag{
Name: "param", Name: "param",
Aliases: []string{"p"}, Aliases: []string{"p"},
Usage: "custom parameters to be injected into the job environment. Format: KEY=value", Usage: "custom parameters to be injected into the step environment. Format: KEY=value",
}, },
), ),
} }

View file

@ -112,7 +112,7 @@ func execWithAxis(c *cli.Context, file, repoPath string, axis matrix.Axis) error
metadata := metadataFromContext(c, axis) metadata := metadataFromContext(c, axis)
environ := metadata.Environ() environ := metadata.Environ()
var secrets []compiler.Secret var secrets []compiler.Secret
for key, val := range metadata.Job.Matrix { for key, val := range metadata.Step.Matrix {
environ[key] = val environ[key] = val
secrets = append(secrets, compiler.Secret{ secrets = append(secrets, compiler.Secret{
Name: key, Name: key,
@ -289,8 +289,8 @@ func metadataFromContext(c *cli.Context, axis matrix.Axis) frontend.Metadata {
}, },
}, },
}, },
Job: frontend.Job{ Step: frontend.Step{
Number: c.Int("job-number"), Number: c.Int("step-number"),
Matrix: axis, Matrix: axis,
}, },
Sys: frontend.System{ Sys: frontend.System{
@ -312,13 +312,13 @@ func convertPathForWindows(path string) string {
return filepath.ToSlash(path) return filepath.ToSlash(path)
} }
var defaultLogger = pipeline.LogFunc(func(proc *backendTypes.Step, rc multipart.Reader) error { var defaultLogger = pipeline.LogFunc(func(step *backendTypes.Step, rc multipart.Reader) error {
part, err := rc.NextPart() part, err := rc.NextPart()
if err != nil { if err != nil {
return err return err
} }
logStream := NewLineWriter(proc.Alias) logStream := NewLineWriter(step.Alias)
_, err = io.Copy(logStream, part) _, err = io.Copy(logStream, part)
return err return err
}) })

View file

@ -260,8 +260,8 @@ var flags = []cli.Flag{
Name: "prev-commit-author-email", Name: "prev-commit-author-email",
}, },
&cli.IntFlag{ &cli.IntFlag{
EnvVars: []string{"CI_JOB_NUMBER"}, EnvVars: []string{"CI_STEP_NUMBER", "CI_JOB_NUMBER"},
Name: "job-number", Name: "step-number",
}, },
&cli.StringSliceFlag{ &cli.StringSliceFlag{
EnvVars: []string{"CI_ENV"}, EnvVars: []string{"CI_ENV"},

View file

@ -32,7 +32,7 @@ const (
// Line is a line of console output. // Line is a line of console output.
type Line struct { type Line struct {
Proc string `json:"proc,omitempty"` Step string `json:"step,omitempty"`
Time int64 `json:"time,omitempty"` Time int64 `json:"time,omitempty"`
Type int `json:"type,omitempty"` Type int `json:"type,omitempty"`
Pos int `json:"pos,omitempty"` Pos int `json:"pos,omitempty"`
@ -66,7 +66,7 @@ func (w *LineWriter) Write(p []byte) (n int, err error) {
line := &Line{ line := &Line{
Out: out, Out: out,
Proc: w.name, Step: w.name,
Pos: w.num, Pos: w.num,
Time: int64(time.Since(w.now).Seconds()), Time: int64(time.Since(w.now).Seconds()),
Type: LineStdout, Type: LineStdout,

View file

@ -27,7 +27,7 @@ import (
var pipelineLogsCmd = &cli.Command{ var pipelineLogsCmd = &cli.Command{
Name: "logs", Name: "logs",
Usage: "show pipeline logs", Usage: "show pipeline logs",
ArgsUsage: "<repo/name> [pipeline] [job]", ArgsUsage: "<repo/name> [pipeline] [step]",
Action: pipelineLogs, Action: pipelineLogs,
Flags: common.GlobalFlags, Flags: common.GlobalFlags,
} }
@ -44,7 +44,7 @@ func pipelineLogs(c *cli.Context) error {
return err return err
} }
job, err := strconv.Atoi(c.Args().Get(2)) step, err := strconv.Atoi(c.Args().Get(2))
if err != nil { if err != nil {
return err return err
} }
@ -54,7 +54,7 @@ func pipelineLogs(c *cli.Context) error {
return err return err
} }
logs, err := client.PipelineLogs(owner, name, number, job) logs, err := client.PipelineLogs(owner, name, number, step)
if err != nil { if err != nil {
return err return err
} }

View file

@ -76,8 +76,8 @@ func pipelinePs(c *cli.Context) error {
return err return err
} }
for _, proc := range pipeline.Procs { for _, step := range pipeline.Steps {
for _, child := range proc.Children { for _, child := range step.Children {
if err := tmpl.Execute(os.Stdout, child); err != nil { if err := tmpl.Execute(os.Stdout, child); err != nil {
return err return err
} }
@ -88,7 +88,7 @@ func pipelinePs(c *cli.Context) error {
} }
// template for pipeline ps information // template for pipeline ps information
var tmplPipelinePs = "\x1b[33mProc #{{ .PID }} \x1b[0m" + ` var tmplPipelinePs = "\x1b[33mStep #{{ .PID }} \x1b[0m" + `
Step: {{ .Name }} Step: {{ .Name }}
State: {{ .State }} State: {{ .State }}
` `

View file

@ -34,7 +34,7 @@ var pipelineStartCmd = &cli.Command{
&cli.StringSliceFlag{ &cli.StringSliceFlag{
Name: "param", Name: "param",
Aliases: []string{"p"}, Aliases: []string{"p"},
Usage: "custom parameters to be injected into the job environment. Format: KEY=value", Usage: "custom parameters to be injected into the step environment. Format: KEY=value",
}, },
), ),
} }
@ -62,7 +62,7 @@ func pipelineStart(c *cli.Context) (err error) {
number = pipeline.Number number = pipeline.Number
} else { } else {
if len(pipelineArg) == 0 { if len(pipelineArg) == 0 {
return errors.New("missing job number") return errors.New("missing step number")
} }
number, err = strconv.Atoi(pipelineArg) number, err = strconv.Atoi(pipelineArg)
if err != nil { if err != nil {

View file

@ -27,7 +27,7 @@ import (
var pipelineStopCmd = &cli.Command{ var pipelineStopCmd = &cli.Command{
Name: "stop", Name: "stop",
Usage: "stop a pipeline", Usage: "stop a pipeline",
ArgsUsage: "<repo/name> [pipeline] [job]", ArgsUsage: "<repo/name> [pipeline] [step]",
Flags: common.GlobalFlags, Flags: common.GlobalFlags,
Action: pipelineStop, Action: pipelineStop,
} }
@ -42,9 +42,9 @@ func pipelineStop(c *cli.Context) (err error) {
if err != nil { if err != nil {
return err return err
} }
job, _ := strconv.Atoi(c.Args().Get(2)) step, _ := strconv.Atoi(c.Args().Get(2))
if job == 0 { if step == 0 {
job = 1 step = 1
} }
client, err := internal.NewClient(c) client, err := internal.NewClient(c)
@ -52,11 +52,11 @@ func pipelineStop(c *cli.Context) (err error) {
return err return err
} }
err = client.PipelineStop(owner, name, number, job) err = client.PipelineStop(owner, name, number, step)
if err != nil { if err != nil {
return err return err
} }
fmt.Printf("Stopping pipeline %s/%s#%d.%d\n", owner, name, number, job) fmt.Printf("Stopping pipeline %s/%s#%d.%d\n", owner, name, number, step)
return nil return nil
} }

View file

@ -85,7 +85,7 @@ func loop(c *cli.Context) error {
log.Logger = log.With().Caller().Logger() log.Logger = log.With().Caller().Logger()
} }
counter.Polling = c.Int("max-procs") counter.Polling = c.Int("max-workflows")
counter.Running = 0 counter.Running = 0
if c.Bool("healthcheck") { if c.Bool("healthcheck") {
@ -139,7 +139,7 @@ func loop(c *cli.Context) error {
backend.Init(context.WithValue(ctx, types.CliContext, c)) backend.Init(context.WithValue(ctx, types.CliContext, c))
var wg sync.WaitGroup var wg sync.WaitGroup
parallel := c.Int("max-procs") parallel := c.Int("max-workflows")
wg.Add(parallel) wg.Add(parallel)
// new engine // new engine
@ -169,7 +169,7 @@ func loop(c *cli.Context) error {
return return
} }
log.Debug().Msg("polling new jobs") log.Debug().Msg("polling new steps")
if err := r.Run(ctx); err != nil { if err := r.Run(ctx); err != nil {
log.Error().Err(err).Msg("pipeline done with error") log.Error().Err(err).Msg("pipeline done with error")
return return

View file

@ -79,8 +79,8 @@ var flags = []cli.Flag{
Usage: "List of labels to filter tasks on. An agent must be assigned every tag listed in a task to be selected.", Usage: "List of labels to filter tasks on. An agent must be assigned every tag listed in a task to be selected.",
}, },
&cli.IntFlag{ &cli.IntFlag{
EnvVars: []string{"WOODPECKER_MAX_PROCS"}, EnvVars: []string{"WOODPECKER_MAX_WORKFLOWS", "WOODPECKER_MAX_PROCS"},
Name: "max-procs", Name: "max-workflows",
Usage: "agent parallel workflows", Usage: "agent parallel workflows",
Value: 1, Value: 1,
}, },

View file

@ -304,20 +304,20 @@ func setupCoding(c *cli.Context) (remote.Remote, error) {
} }
func setupMetrics(g *errgroup.Group, _store store.Store) { func setupMetrics(g *errgroup.Group, _store store.Store) {
pendingJobs := promauto.NewGauge(prometheus.GaugeOpts{ pendingSteps := promauto.NewGauge(prometheus.GaugeOpts{
Namespace: "woodpecker", Namespace: "woodpecker",
Name: "pending_jobs", Name: "pending_steps",
Help: "Total number of pending pipeline processes.", Help: "Total number of pending pipeline steps.",
}) })
waitingJobs := promauto.NewGauge(prometheus.GaugeOpts{ waitingSteps := promauto.NewGauge(prometheus.GaugeOpts{
Namespace: "woodpecker", Namespace: "woodpecker",
Name: "waiting_jobs", Name: "waiting_steps",
Help: "Total number of pipeline waiting on deps.", Help: "Total number of pipeline waiting on deps.",
}) })
runningJobs := promauto.NewGauge(prometheus.GaugeOpts{ runningSteps := promauto.NewGauge(prometheus.GaugeOpts{
Namespace: "woodpecker", Namespace: "woodpecker",
Name: "running_jobs", Name: "running_steps",
Help: "Total number of running pipeline processes.", Help: "Total number of running pipeline steps.",
}) })
workers := promauto.NewGauge(prometheus.GaugeOpts{ workers := promauto.NewGauge(prometheus.GaugeOpts{
Namespace: "woodpecker", Namespace: "woodpecker",
@ -343,9 +343,9 @@ func setupMetrics(g *errgroup.Group, _store store.Store) {
g.Go(func() error { g.Go(func() error {
for { for {
stats := server.Config.Services.Queue.Info(context.TODO()) stats := server.Config.Services.Queue.Info(context.TODO())
pendingJobs.Set(float64(stats.Stats.Pending)) pendingSteps.Set(float64(stats.Stats.Pending))
waitingJobs.Set(float64(stats.Stats.WaitingOnDeps)) waitingSteps.Set(float64(stats.Stats.WaitingOnDeps))
runningJobs.Set(float64(stats.Stats.Running)) runningSteps.Set(float64(stats.Stats.Running))
workers.Set(float64(stats.Stats.Workers)) workers.Set(float64(stats.Stats.Workers))
time.Sleep(500 * time.Millisecond) time.Sleep(500 * time.Millisecond)
} }

View file

@ -28,4 +28,4 @@ services:
environment: environment:
- WOODPECKER_SERVER=woodpecker-server:9000 - WOODPECKER_SERVER=woodpecker-server:9000
- WOODPECKER_AGENT_SECRET=${WOODPECKER_AGENT_SECRET} - WOODPECKER_AGENT_SECRET=${WOODPECKER_AGENT_SECRET}
- WOODPECKER_MAX_PROCS=2 - WOODPECKER_MAX_WORKFLOWS=2

View file

@ -75,7 +75,7 @@ This is the reference list of all environment variables available to your pipeli
| `CI_COMMIT_AUTHOR_AVATAR` | commit author avatar | | `CI_COMMIT_AUTHOR_AVATAR` | commit author avatar |
| | **Current pipeline** | | | **Current pipeline** |
| `CI_PIPELINE_NUMBER` | pipeline number | | `CI_PIPELINE_NUMBER` | pipeline number |
| `CI_PIPELINE_PARENT` | number of parent pipeline | | `CI_PIPELINE_PARENT` | number of parent pipeline |
| `CI_PIPELINE_EVENT` | pipeline event (push, pull_request, tag, deployment) | | `CI_PIPELINE_EVENT` | pipeline event (push, pull_request, tag, deployment) |
| `CI_PIPELINE_LINK` | pipeline link in CI | | `CI_PIPELINE_LINK` | pipeline link in CI |
| `CI_PIPELINE_DEPLOY_TARGET` | pipeline deploy target for `deployment` events (ie production) | | `CI_PIPELINE_DEPLOY_TARGET` | pipeline deploy target for `deployment` events (ie production) |
@ -83,11 +83,11 @@ This is the reference list of all environment variables available to your pipeli
| `CI_PIPELINE_CREATED` | pipeline created UNIX timestamp | | `CI_PIPELINE_CREATED` | pipeline created UNIX timestamp |
| `CI_PIPELINE_STARTED` | pipeline started UNIX timestamp | | `CI_PIPELINE_STARTED` | pipeline started UNIX timestamp |
| `CI_PIPELINE_FINISHED` | pipeline finished UNIX timestamp | | `CI_PIPELINE_FINISHED` | pipeline finished UNIX timestamp |
| | **Current job** | | | **Current step** |
| `CI_JOB_NUMBER` | job number | | `CI_STEP_NUMBER` | step number |
| `CI_JOB_STATUS` | job status (success, failure) | | `CI_STEP_STATUS` | step status (success, failure) |
| `CI_JOB_STARTED` | job started UNIX timestamp | | `CI_STEP_STARTED` | step started UNIX timestamp |
| `CI_JOB_FINISHED` | job finished UNIX timestamp | | `CI_STEP_FINISHED` | step finished UNIX timestamp |
| | **Previous commit** | | | **Previous commit** |
| `CI_PREV_COMMIT_SHA` | previous commit SHA | | `CI_PREV_COMMIT_SHA` | previous commit SHA |
| `CI_PREV_COMMIT_REF` | previous commit ref | | `CI_PREV_COMMIT_REF` | previous commit ref |

View file

@ -9,7 +9,7 @@ A Woodpecker deployment consists of two parts:
> >
> If you have 4 agents installed and connected to the Woodpecker server, your system will process 4 builds in parallel. > If you have 4 agents installed and connected to the Woodpecker server, your system will process 4 builds in parallel.
> >
> You can add more agents to increase the number of parallel builds or set the agent's `WOODPECKER_MAX_PROCS=1` environment variable to increase the number of parallel builds for that agent. > You can add more agents to increase the number of parallel steps or set the agent's `WOODPECKER_MAX_WORKFLOWS=1` environment variable to increase the number of parallel workflows for that agent.
## Installation ## Installation

View file

@ -17,11 +17,11 @@ services:
The following are automatically set and can be overridden: The following are automatically set and can be overridden:
- WOODPECKER_HOSTNAME if not set, becomes the OS' hostname - WOODPECKER_HOSTNAME if not set, becomes the OS' hostname
- WOODPECKER_MAX_PROCS if not set, defaults to 1 - WOODPECKER_MAX_WORKFLOWS if not set, defaults to 1
## Processes per agent ## Processes per agent
By default the maximum processes that are run per agent is 1. If required you can add `WOODPECKER_MAX_PROCS` to increase your parallel processing on a per-agent basis. By default the maximum processes that are run per agent is 1. If required you can add `WOODPECKER_MAX_WORKFLOWS` to increase your parallel processing on a per-agent basis.
```yaml ```yaml
# docker-compose.yml # docker-compose.yml
@ -33,7 +33,7 @@ services:
environment: environment:
- WOODPECKER_SERVER=localhost:9000 - WOODPECKER_SERVER=localhost:9000
- WOODPECKER_AGENT_SECRET="your-shared-secret-goes-here" - WOODPECKER_AGENT_SECRET="your-shared-secret-goes-here"
+ - WOODPECKER_MAX_PROCS=4 + - WOODPECKER_MAX_WORKFLOWS=4
``` ```
## All agent configuration options ## All agent configuration options
@ -80,10 +80,10 @@ Disable colored debug output.
Configures the agent hostname. Configures the agent hostname.
### `WOODPECKER_MAX_PROCS` ### `WOODPECKER_MAX_WORKFLOWS`
> Default: `1` > Default: `1`
Configures the number of parallel builds. Configures the number of parallel workflows.
### `WOODPECKER_FILTER_LABELS` ### `WOODPECKER_FILTER_LABELS`
> Default: empty > Default: empty

View file

@ -35,7 +35,7 @@ scrape_configs:
List of Prometheus metrics specific to Woodpecker: List of Prometheus metrics specific to Woodpecker:
``` ```
# HELP woodpecker_pipeline_count Build count. # HELP woodpecker_pipeline_count Pipeline count.
# TYPE woodpecker_pipeline_count counter # TYPE woodpecker_pipeline_count counter
woodpecker_build_count{branch="master",pipeline="total",repo="woodpecker-ci/woodpecker",status="success"} 3 woodpecker_build_count{branch="master",pipeline="total",repo="woodpecker-ci/woodpecker",status="success"} 3
woodpecker_build_count{branch="mkdocs",pipeline="total",repo="woodpecker-ci/woodpecker",status="success"} 3 woodpecker_build_count{branch="mkdocs",pipeline="total",repo="woodpecker-ci/woodpecker",status="success"} 3
@ -46,21 +46,21 @@ woodpecker_build_time{branch="mkdocs",pipeline="total",repo="woodpecker-ci/woodp
# HELP woodpecker_pipeline_total_count Total number of builds. # HELP woodpecker_pipeline_total_count Total number of builds.
# TYPE woodpecker_pipeline_total_count gauge # TYPE woodpecker_pipeline_total_count gauge
woodpecker_build_total_count 1025 woodpecker_build_total_count 1025
# HELP woodpecker_pending_jobs Total number of pending build processes. # HELP woodpecker_pending_steps Total number of pending pipeline steps.
# TYPE woodpecker_pending_jobs gauge # TYPE woodpecker_pending_steps gauge
woodpecker_pending_jobs 0 woodpecker_pending_steps 0
# HELP woodpecker_repo_count Total number of repos. # HELP woodpecker_repo_count Total number of repos.
# TYPE woodpecker_repo_count gauge # TYPE woodpecker_repo_count gauge
woodpecker_repo_count 9 woodpecker_repo_count 9
# HELP woodpecker_running_jobs Total number of running build processes. # HELP woodpecker_running_steps Total number of running pipeline steps.
# TYPE woodpecker_running_jobs gauge # TYPE woodpecker_running_steps gauge
woodpecker_running_jobs 0 woodpecker_running_steps 0
# HELP woodpecker_user_count Total number of users. # HELP woodpecker_user_count Total number of users.
# TYPE woodpecker_user_count gauge # TYPE woodpecker_user_count gauge
woodpecker_user_count 1 woodpecker_user_count 1
# HELP woodpecker_waiting_jobs Total number of builds waiting on deps. # HELP woodpecker_waiting_steps Total number of pipeline waiting on deps.
# TYPE woodpecker_waiting_jobs gauge # TYPE woodpecker_waiting_steps gauge
woodpecker_waiting_jobs 0 woodpecker_waiting_steps 0
# HELP woodpecker_worker_count Total number of workers. # HELP woodpecker_worker_count Total number of workers.
# TYPE woodpecker_worker_count gauge # TYPE woodpecker_worker_count gauge
woodpecker_worker_count 4 woodpecker_worker_count 4

View file

@ -8,8 +8,11 @@ Some versions need some changes to the server configuration or the pipeline conf
- Refactored support of old agent filter labels and expression. Learn how to use the new [filter](./20-usage/20-pipeline-syntax.md#labels) - Refactored support of old agent filter labels and expression. Learn how to use the new [filter](./20-usage/20-pipeline-syntax.md#labels)
- Renamed step environment variable `CI_SYSTEM_ARCH` to `CI_SYSTEM_PLATFORM`. Same applies for the cli exec variable. - Renamed step environment variable `CI_SYSTEM_ARCH` to `CI_SYSTEM_PLATFORM`. Same applies for the cli exec variable.
- Renamed environment variables `CI_BUILD_*` and `CI_PREV_BUILD_*` to `CI_PIPELINE_*` and `CI_PREV_PIPELINE_*`, old ones are still available but deprecated - Renamed environment variables `CI_BUILD_*` and `CI_PREV_BUILD_*` to `CI_PIPELINE_*` and `CI_PREV_PIPELINE_*`, old ones are still available but deprecated
- Renamed environment variables `CI_JOB_*` to `CI_STEP_*`, old ones are still available but deprecated
- Renamed API endpoints for pipelines (`<owner>/<repo>/builds/<buildId>` -> `<owner>/<repo>/pipelines/<pipelineId>`), old ones are still available but deprecated - Renamed API endpoints for pipelines (`<owner>/<repo>/builds/<buildId>` -> `<owner>/<repo>/pipelines/<pipelineId>`), old ones are still available but deprecated
- Updated Prometheus gauge `build_*` to `pipeline_*` - Updated Prometheus gauge `build_*` to `pipeline_*`
- Updated Prometheus gauge `*_job_*` to `*_step_*`
- Renamed config env `WOODPECKER_MAX_PROCS` to `WOODPECKER_MAX_WORKFLOWS` (still available as fallback)
## 0.15.0 ## 0.15.0

View file

@ -22,7 +22,7 @@ If you have some missing resources, please feel free to [open a pull-request](ht
- [Convert Drone CI pipelines to Woodpecker CI](https://codeberg.org/lafriks/woodpecker-pipeline-transform) - [Convert Drone CI pipelines to Woodpecker CI](https://codeberg.org/lafriks/woodpecker-pipeline-transform)
- [Ansible NAS](https://github.com/davestephens/ansible-nas/) - a homelab Ansible playbook that can set up Woodpecker-CI and Gitea - [Ansible NAS](https://github.com/davestephens/ansible-nas/) - a homelab Ansible playbook that can set up Woodpecker-CI and Gitea
- [picus](https://github.com/windsource/picus) - Picus connects to a Woodpecker CI server and creates an agent in the cloud when there are pending jobs. - [picus](https://github.com/windsource/picus) - Picus connects to a Woodpecker CI server and creates an agent in the cloud when there are pending workflows.
## Templates ## Templates

View file

@ -58,7 +58,7 @@ WOODPECKER_GITHUB_SECRET=<redacted>
# agent # agent
WOODPECKER_SERVER=localhost:9000 WOODPECKER_SERVER=localhost:9000
WOODPECKER_SECRET=a-long-and-secure-password-used-for-the-local-development-system WOODPECKER_SECRET=a-long-and-secure-password-used-for-the-local-development-system
WOODPECKER_MAX_PROCS=1 WOODPECKER_MAX_WORKFLOWS=1
# enable if you want to develop the UI # enable if you want to develop the UI
# WOODPECKER_DEV_WWW_PROXY=http://localhost:8010 # WOODPECKER_DEV_WWW_PROXY=http://localhost:8010

View file

@ -12,73 +12,73 @@ import (
) )
// returns a container configuration. // returns a container configuration.
func toConfig(proc *types.Step) *container.Config { func toConfig(step *types.Step) *container.Config {
config := &container.Config{ config := &container.Config{
Image: proc.Image, Image: step.Image,
Labels: proc.Labels, Labels: step.Labels,
WorkingDir: proc.WorkingDir, WorkingDir: step.WorkingDir,
AttachStdout: true, AttachStdout: true,
AttachStderr: true, AttachStderr: true,
} }
if len(proc.Environment) != 0 { if len(step.Environment) != 0 {
config.Env = toEnv(proc.Environment) config.Env = toEnv(step.Environment)
} }
if len(proc.Command) != 0 { if len(step.Command) != 0 {
config.Cmd = proc.Command config.Cmd = step.Command
} }
if len(proc.Entrypoint) != 0 { if len(step.Entrypoint) != 0 {
config.Entrypoint = proc.Entrypoint config.Entrypoint = step.Entrypoint
} }
if len(proc.Volumes) != 0 { if len(step.Volumes) != 0 {
config.Volumes = toVol(proc.Volumes) config.Volumes = toVol(step.Volumes)
} }
return config return config
} }
// returns a container host configuration. // returns a container host configuration.
func toHostConfig(proc *types.Step) *container.HostConfig { func toHostConfig(step *types.Step) *container.HostConfig {
config := &container.HostConfig{ config := &container.HostConfig{
Resources: container.Resources{ Resources: container.Resources{
CPUQuota: proc.CPUQuota, CPUQuota: step.CPUQuota,
CPUShares: proc.CPUShares, CPUShares: step.CPUShares,
CpusetCpus: proc.CPUSet, CpusetCpus: step.CPUSet,
Memory: proc.MemLimit, Memory: step.MemLimit,
MemorySwap: proc.MemSwapLimit, MemorySwap: step.MemSwapLimit,
}, },
LogConfig: container.LogConfig{ LogConfig: container.LogConfig{
Type: "json-file", Type: "json-file",
}, },
Privileged: proc.Privileged, Privileged: step.Privileged,
ShmSize: proc.ShmSize, ShmSize: step.ShmSize,
Sysctls: proc.Sysctls, Sysctls: step.Sysctls,
} }
// if len(proc.VolumesFrom) != 0 { // if len(step.VolumesFrom) != 0 {
// config.VolumesFrom = proc.VolumesFrom // config.VolumesFrom = step.VolumesFrom
// } // }
if len(proc.NetworkMode) != 0 { if len(step.NetworkMode) != 0 {
config.NetworkMode = container.NetworkMode(proc.NetworkMode) config.NetworkMode = container.NetworkMode(step.NetworkMode)
} }
if len(proc.IpcMode) != 0 { if len(step.IpcMode) != 0 {
config.IpcMode = container.IpcMode(proc.IpcMode) config.IpcMode = container.IpcMode(step.IpcMode)
} }
if len(proc.DNS) != 0 { if len(step.DNS) != 0 {
config.DNS = proc.DNS config.DNS = step.DNS
} }
if len(proc.DNSSearch) != 0 { if len(step.DNSSearch) != 0 {
config.DNSSearch = proc.DNSSearch config.DNSSearch = step.DNSSearch
} }
if len(proc.ExtraHosts) != 0 { if len(step.ExtraHosts) != 0 {
config.ExtraHosts = proc.ExtraHosts config.ExtraHosts = step.ExtraHosts
} }
if len(proc.Devices) != 0 { if len(step.Devices) != 0 {
config.Devices = toDev(proc.Devices) config.Devices = toDev(step.Devices)
} }
if len(proc.Volumes) != 0 { if len(step.Volumes) != 0 {
config.Binds = proc.Volumes config.Binds = step.Volumes
} }
config.Tmpfs = map[string]string{} config.Tmpfs = map[string]string{}
for _, path := range proc.Tmpfs { for _, path := range step.Tmpfs {
if !strings.Contains(path, ":") { if !strings.Contains(path, ":") {
config.Tmpfs[path] = "" config.Tmpfs[path] = ""
continue continue

View file

@ -104,19 +104,19 @@ func (e *docker) Setup(_ context.Context, conf *backend.Config) error {
return nil return nil
} }
func (e *docker) Exec(ctx context.Context, proc *backend.Step) error { func (e *docker) Exec(ctx context.Context, step *backend.Step) error {
config := toConfig(proc) config := toConfig(step)
hostConfig := toHostConfig(proc) hostConfig := toHostConfig(step)
// create pull options with encoded authorization credentials. // create pull options with encoded authorization credentials.
pullopts := types.ImagePullOptions{} pullopts := types.ImagePullOptions{}
if proc.AuthConfig.Username != "" && proc.AuthConfig.Password != "" { if step.AuthConfig.Username != "" && step.AuthConfig.Password != "" {
pullopts.RegistryAuth, _ = encodeAuthToBase64(proc.AuthConfig) pullopts.RegistryAuth, _ = encodeAuthToBase64(step.AuthConfig)
} }
// automatically pull the latest version of the image if requested // automatically pull the latest version of the image if requested
// by the process configuration. // by the process configuration.
if proc.Pull { if step.Pull {
responseBody, perr := e.client.ImagePull(ctx, config.Image, pullopts) responseBody, perr := e.client.ImagePull(ctx, config.Image, pullopts)
if perr == nil { if perr == nil {
defer responseBody.Close() defer responseBody.Close()
@ -128,7 +128,7 @@ func (e *docker) Exec(ctx context.Context, proc *backend.Step) error {
} }
// Fix "Show warning when fail to auth to docker registry" // Fix "Show warning when fail to auth to docker registry"
// (https://web.archive.org/web/20201023145804/https://github.com/drone/drone/issues/1917) // (https://web.archive.org/web/20201023145804/https://github.com/drone/drone/issues/1917)
if perr != nil && proc.AuthConfig.Password != "" { if perr != nil && step.AuthConfig.Password != "" {
return perr return perr
} }
} }
@ -136,7 +136,7 @@ func (e *docker) Exec(ctx context.Context, proc *backend.Step) error {
// add default volumes to the host configuration // add default volumes to the host configuration
hostConfig.Binds = append(hostConfig.Binds, e.volumes...) hostConfig.Binds = append(hostConfig.Binds, e.volumes...)
_, err := e.client.ContainerCreate(ctx, config, hostConfig, nil, nil, proc.Name) _, err := e.client.ContainerCreate(ctx, config, hostConfig, nil, nil, step.Name)
if client.IsErrNotFound(err) { if client.IsErrNotFound(err) {
// automatically pull and try to re-create the image if the // automatically pull and try to re-create the image if the
// failure is caused because the image does not exist. // failure is caused because the image does not exist.
@ -150,15 +150,15 @@ func (e *docker) Exec(ctx context.Context, proc *backend.Step) error {
log.Error().Err(err).Msg("DisplayJSONMessagesStream") log.Error().Err(err).Msg("DisplayJSONMessagesStream")
} }
_, err = e.client.ContainerCreate(ctx, config, hostConfig, nil, nil, proc.Name) _, err = e.client.ContainerCreate(ctx, config, hostConfig, nil, nil, step.Name)
} }
if err != nil { if err != nil {
return err return err
} }
if len(proc.NetworkMode) == 0 { if len(step.NetworkMode) == 0 {
for _, net := range proc.Networks { for _, net := range step.Networks {
err = e.client.NetworkConnect(ctx, net.Name, proc.Name, &network.EndpointSettings{ err = e.client.NetworkConnect(ctx, net.Name, step.Name, &network.EndpointSettings{
Aliases: net.Aliases, Aliases: net.Aliases,
}) })
if err != nil { if err != nil {
@ -168,24 +168,24 @@ func (e *docker) Exec(ctx context.Context, proc *backend.Step) error {
// join the container to an existing network // join the container to an existing network
if e.network != "" { if e.network != "" {
err = e.client.NetworkConnect(ctx, e.network, proc.Name, &network.EndpointSettings{}) err = e.client.NetworkConnect(ctx, e.network, step.Name, &network.EndpointSettings{})
if err != nil { if err != nil {
return err return err
} }
} }
} }
return e.client.ContainerStart(ctx, proc.Name, startOpts) return e.client.ContainerStart(ctx, step.Name, startOpts)
} }
func (e *docker) Wait(ctx context.Context, proc *backend.Step) (*backend.State, error) { func (e *docker) Wait(ctx context.Context, step *backend.Step) (*backend.State, error) {
wait, errc := e.client.ContainerWait(ctx, proc.Name, "") wait, errc := e.client.ContainerWait(ctx, step.Name, "")
select { select {
case <-wait: case <-wait:
case <-errc: case <-errc:
} }
info, err := e.client.ContainerInspect(ctx, proc.Name) info, err := e.client.ContainerInspect(ctx, step.Name)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -200,8 +200,8 @@ func (e *docker) Wait(ctx context.Context, proc *backend.Step) (*backend.State,
}, nil }, nil
} }
func (e *docker) Tail(ctx context.Context, proc *backend.Step) (io.ReadCloser, error) { func (e *docker) Tail(ctx context.Context, step *backend.Step) (io.ReadCloser, error) {
logs, err := e.client.ContainerLogs(ctx, proc.Name, logsOpts) logs, err := e.client.ContainerLogs(ctx, step.Name, logsOpts)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -55,33 +55,33 @@ func (e *local) Load() error {
} }
// Setup the pipeline environment. // Setup the pipeline environment.
func (e *local) Setup(ctx context.Context, proc *types.Config) error { func (e *local) Setup(ctx context.Context, config *types.Config) error {
return nil return nil
} }
// Exec the pipeline step. // Exec the pipeline step.
func (e *local) Exec(ctx context.Context, proc *types.Step) error { func (e *local) Exec(ctx context.Context, step *types.Step) error {
// Get environment variables // Get environment variables
Env := os.Environ() Env := os.Environ()
for a, b := range proc.Environment { for a, b := range step.Environment {
if a != "HOME" && a != "SHELL" { // Don't override $HOME and $SHELL if a != "HOME" && a != "SHELL" { // Don't override $HOME and $SHELL
Env = append(Env, a+"="+b) Env = append(Env, a+"="+b)
} }
} }
Command := []string{} Command := []string{}
if proc.Image == constant.DefaultCloneImage { if step.Image == constant.DefaultCloneImage {
// Default clone step // Default clone step
Env = append(Env, "CI_WORKSPACE="+e.workingdir+"/"+proc.Environment["CI_REPO"]) Env = append(Env, "CI_WORKSPACE="+e.workingdir+"/"+step.Environment["CI_REPO"])
Command = append(Command, "plugin-git") Command = append(Command, "plugin-git")
} else { } else {
// Use "image name" as run command // Use "image name" as run command
Command = append(Command, proc.Image) Command = append(Command, step.Image)
Command = append(Command, "-c") Command = append(Command, "-c")
// Decode script and delete initial lines // Decode script and delete initial lines
// Deleting the initial lines removes netrc support but adds compatibility for more shells like fish // Deleting the initial lines removes netrc support but adds compatibility for more shells like fish
Script, _ := base64.RawStdEncoding.DecodeString(proc.Environment["CI_SCRIPT"]) Script, _ := base64.RawStdEncoding.DecodeString(step.Environment["CI_SCRIPT"])
Command = append(Command, string(Script)[strings.Index(string(Script), "\n\n")+2:]) Command = append(Command, string(Script)[strings.Index(string(Script), "\n\n")+2:])
} }
@ -90,10 +90,10 @@ func (e *local) Exec(ctx context.Context, proc *types.Step) error {
e.cmd.Env = Env e.cmd.Env = Env
// Prepare working directory // Prepare working directory
if proc.Image == constant.DefaultCloneImage { if step.Image == constant.DefaultCloneImage {
e.cmd.Dir = e.workingdir + "/" + proc.Environment["CI_REPO_OWNER"] e.cmd.Dir = e.workingdir + "/" + step.Environment["CI_REPO_OWNER"]
} else { } else {
e.cmd.Dir = e.workingdir + "/" + proc.Environment["CI_REPO"] e.cmd.Dir = e.workingdir + "/" + step.Environment["CI_REPO"]
} }
err := os.MkdirAll(e.cmd.Dir, 0o700) err := os.MkdirAll(e.cmd.Dir, 0o700)
if err != nil { if err != nil {

View file

@ -84,33 +84,33 @@ func (e *ssh) Load() error {
} }
// Setup the pipeline environment. // Setup the pipeline environment.
func (e *ssh) Setup(ctx context.Context, proc *types.Config) error { func (e *ssh) Setup(ctx context.Context, config *types.Config) error {
return nil return nil
} }
// Exec the pipeline step. // Exec the pipeline step.
func (e *ssh) Exec(ctx context.Context, proc *types.Step) error { func (e *ssh) Exec(ctx context.Context, step *types.Step) error {
// Get environment variables // Get environment variables
Command := []string{} Command := []string{}
for a, b := range proc.Environment { for a, b := range step.Environment {
if a != "HOME" && a != "SHELL" { // Don't override $HOME and $SHELL if a != "HOME" && a != "SHELL" { // Don't override $HOME and $SHELL
Command = append(Command, a+"="+b) Command = append(Command, a+"="+b)
} }
} }
if proc.Image == constant.DefaultCloneImage { if step.Image == constant.DefaultCloneImage {
// Default clone step // Default clone step
Command = append(Command, "CI_WORKSPACE="+e.workingdir+"/"+proc.Environment["CI_REPO"]) Command = append(Command, "CI_WORKSPACE="+e.workingdir+"/"+step.Environment["CI_REPO"])
Command = append(Command, "plugin-git") Command = append(Command, "plugin-git")
} else { } else {
// Use "image name" as run command // Use "image name" as run command
Command = append(Command, proc.Image) Command = append(Command, step.Image)
Command = append(Command, "-c") Command = append(Command, "-c")
// Decode script and delete initial lines // Decode script and delete initial lines
// Deleting the initial lines removes netrc support but adds compatibility for more shells like fish // Deleting the initial lines removes netrc support but adds compatibility for more shells like fish
Script, _ := base64.RawStdEncoding.DecodeString(proc.Environment["CI_SCRIPT"]) Script, _ := base64.RawStdEncoding.DecodeString(step.Environment["CI_SCRIPT"])
Command = append(Command, "cd "+e.workingdir+"/"+proc.Environment["CI_REPO"]+" && "+string(Script)[strings.Index(string(Script), "\n\n")+2:]) Command = append(Command, "cd "+e.workingdir+"/"+step.Environment["CI_REPO"]+" && "+string(Script)[strings.Index(string(Script), "\n\n")+2:])
} }
// Prepare command // Prepare command

View file

@ -55,5 +55,5 @@ func (m *Metadata) setDroneEnviron(env map[string]string) {
env["DRONE_GIT_HTTP_URL"] = env["CI_REPO_REMOTE"] env["DRONE_GIT_HTTP_URL"] = env["CI_REPO_REMOTE"]
// misc // misc
env["DRONE_SYSTEM_HOST"] = env["CI_SYSTEM_HOST"] env["DRONE_SYSTEM_HOST"] = env["CI_SYSTEM_HOST"]
env["DRONE_STEP_NUMBER"] = env["CI_JOB_NUMBER"] env["DRONE_STEP_NUMBER"] = env["CI_STEP_NUMBER"]
} }

View file

@ -39,7 +39,7 @@ type (
Repo Repo `json:"repo,omitempty"` Repo Repo `json:"repo,omitempty"`
Curr Pipeline `json:"curr,omitempty"` Curr Pipeline `json:"curr,omitempty"`
Prev Pipeline `json:"prev,omitempty"` Prev Pipeline `json:"prev,omitempty"`
Job Job `json:"job,omitempty"` Step Step `json:"step,omitempty"`
Sys System `json:"sys,omitempty"` Sys System `json:"sys,omitempty"`
} }
@ -88,8 +88,8 @@ type (
Avatar string `json:"avatar,omitempty"` Avatar string `json:"avatar,omitempty"`
} }
// Job defines runtime metadata for a job. // Step defines runtime metadata for a step.
Job struct { Step struct {
Number int `json:"number,omitempty"` Number int `json:"number,omitempty"`
Matrix map[string]string `json:"matrix,omitempty"` Matrix map[string]string `json:"matrix,omitempty"`
} }
@ -171,10 +171,10 @@ func (m *Metadata) Environ() map[string]string {
"CI_PIPELINE_STARTED": strconv.FormatInt(m.Curr.Started, 10), "CI_PIPELINE_STARTED": strconv.FormatInt(m.Curr.Started, 10),
"CI_PIPELINE_FINISHED": strconv.FormatInt(m.Curr.Finished, 10), "CI_PIPELINE_FINISHED": strconv.FormatInt(m.Curr.Finished, 10),
"CI_JOB_NUMBER": strconv.Itoa(m.Job.Number), "CI_STEP_NUMBER": strconv.Itoa(m.Step.Number),
"CI_JOB_STATUS": "", // will be set by agent "CI_STEP_STATUS": "", // will be set by agent
"CI_JOB_STARTED": "", // will be set by agent "CI_STEP_STARTED": "", // will be set by agent
"CI_JOB_FINISHED": "", // will be set by agent "CI_STEP_FINISHED": "", // will be set by agent
"CI_PREV_COMMIT_SHA": m.Prev.Commit.Sha, "CI_PREV_COMMIT_SHA": m.Prev.Commit.Sha,
"CI_PREV_COMMIT_REF": m.Prev.Commit.Ref, "CI_PREV_COMMIT_REF": m.Prev.Commit.Ref,
@ -224,6 +224,11 @@ func (m *Metadata) Environ() map[string]string {
"CI_PREV_BUILD_CREATED": strconv.FormatInt(m.Prev.Created, 10), "CI_PREV_BUILD_CREATED": strconv.FormatInt(m.Prev.Created, 10),
"CI_PREV_BUILD_STARTED": strconv.FormatInt(m.Prev.Started, 10), "CI_PREV_BUILD_STARTED": strconv.FormatInt(m.Prev.Started, 10),
"CI_PREV_BUILD_FINISHED": strconv.FormatInt(m.Prev.Finished, 10), "CI_PREV_BUILD_FINISHED": strconv.FormatInt(m.Prev.Finished, 10),
// use CI_STEP_*
"CI_JOB_NUMBER": strconv.Itoa(m.Step.Number),
"CI_JOB_STATUS": "", // will be set by agent
"CI_JOB_STARTED": "", // will be set by agent
"CI_JOB_FINISHED": "", // will be set by agent
} }
if m.Curr.Event == EventTag { if m.Curr.Event == EventTag {
params["CI_COMMIT_TAG"] = strings.TrimPrefix(m.Curr.Commit.Ref, "refs/tags/") params["CI_COMMIT_TAG"] = strings.TrimPrefix(m.Curr.Commit.Ref, "refs/tags/")

View file

@ -138,7 +138,7 @@ func (c *Constraint) Match(metadata frontend.Metadata, global bool) (bool, error
c.SetDefaultEventFilter() c.SetDefaultEventFilter()
// apply step only filters // apply step only filters
match = c.Matrix.Match(metadata.Job.Matrix) match = c.Matrix.Match(metadata.Step.Matrix)
} }
match = match && c.Platform.Match(metadata.Sys.Platform) && match = match && c.Platform.Match(metadata.Sys.Platform) &&

View file

@ -21,11 +21,11 @@ func TestMarshalUlimit(t *testing.T) {
Soft: 65535, Soft: 65535,
Hard: 65535, Hard: 65535,
}, },
Name: "nproc", Name: "nstep",
}, },
}, },
}, },
expected: `nproc: 65535 expected: `nstep: 65535
`, `,
}, },
{ {
@ -61,11 +61,11 @@ func TestUnmarshalUlimits(t *testing.T) {
expected *Ulimits expected *Ulimits
}{ }{
{ {
yaml: "nproc: 65535", yaml: "nstep: 65535",
expected: &Ulimits{ expected: &Ulimits{
Elements: []Ulimit{ Elements: []Ulimit{
{ {
Name: "nproc", Name: "nstep",
ulimitValues: ulimitValues{ ulimitValues: ulimitValues{
Soft: 65535, Soft: 65535,
Hard: 65535, Hard: 65535,
@ -91,7 +91,7 @@ func TestUnmarshalUlimits(t *testing.T) {
}, },
}, },
{ {
yaml: `nproc: 65535 yaml: `nstep: 65535
nofile: nofile:
soft: 20000 soft: 20000
hard: 40000`, hard: 40000`,
@ -105,7 +105,7 @@ nofile:
}, },
}, },
{ {
Name: "nproc", Name: "nstep",
ulimitValues: ulimitValues{ ulimitValues: ulimitValues{
Soft: 65535, Soft: 65535,
Hard: 65535, Hard: 65535,

View file

@ -14,7 +14,7 @@ type Logger interface {
// function for process logging. // function for process logging.
type LogFunc func(*backend.Step, multipart.Reader) error type LogFunc func(*backend.Step, multipart.Reader) error
// Log calls f(proc, r). // Log calls f(step, r).
func (f LogFunc) Log(step *backend.Step, r multipart.Reader) error { func (f LogFunc) Log(step *backend.Step, r multipart.Reader) error {
return f(step, r) return f(step, r)
} }

View file

@ -122,7 +122,7 @@ func (c *client) Init(ctx context.Context, id string, state State) (err error) {
req.State.Exited = state.Exited req.State.Exited = state.Exited
req.State.Finished = state.Finished req.State.Finished = state.Finished
req.State.Started = state.Started req.State.Started = state.Started
req.State.Name = state.Proc req.State.Name = state.Step
for { for {
_, err = c.client.Init(ctx, req) _, err = c.client.Init(ctx, req)
if err == nil { if err == nil {
@ -156,7 +156,7 @@ func (c *client) Done(ctx context.Context, id string, state State) (err error) {
req.State.Exited = state.Exited req.State.Exited = state.Exited
req.State.Finished = state.Finished req.State.Finished = state.Finished
req.State.Started = state.Started req.State.Started = state.Started
req.State.Name = state.Proc req.State.Name = state.Step
for { for {
_, err = c.client.Done(ctx, req) _, err = c.client.Done(ctx, req)
if err == nil { if err == nil {
@ -217,7 +217,7 @@ func (c *client) Update(ctx context.Context, id string, state State) (err error)
req.State.Exited = state.Exited req.State.Exited = state.Exited
req.State.Finished = state.Finished req.State.Finished = state.Finished
req.State.Started = state.Started req.State.Started = state.Started
req.State.Name = state.Proc req.State.Name = state.Step
for { for {
_, err = c.client.Update(ctx, req) _, err = c.client.Update(ctx, req)
if err == nil { if err == nil {
@ -248,7 +248,7 @@ func (c *client) Upload(ctx context.Context, id string, file *File) (err error)
req.File = new(proto.File) req.File = new(proto.File)
req.File.Name = file.Name req.File.Name = file.Name
req.File.Mime = file.Mime req.File.Mime = file.Mime
req.File.Proc = file.Proc req.File.Step = file.Step
req.File.Size = int32(file.Size) req.File.Size = int32(file.Size)
req.File.Time = file.Time req.File.Time = file.Time
req.File.Data = file.Data req.File.Data = file.Data
@ -283,7 +283,7 @@ func (c *client) Log(ctx context.Context, id string, line *Line) (err error) {
req.Line = new(proto.Line) req.Line = new(proto.Line)
req.Line.Out = line.Out req.Line.Out = line.Out
req.Line.Pos = int32(line.Pos) req.Line.Pos = int32(line.Pos)
req.Line.Proc = line.Proc req.Line.Step = line.Step
req.Line.Time = line.Time req.Line.Time = line.Time
for { for {
_, err = c.client.Log(ctx, req) _, err = c.client.Log(ctx, req)

View file

@ -22,7 +22,7 @@ const (
// Line is a line of console output. // Line is a line of console output.
type Line struct { type Line struct {
Proc string `json:"proc,omitempty"` Step string `json:"step,omitempty"`
Time int64 `json:"time,omitempty"` Time int64 `json:"time,omitempty"`
Type int `json:"type,omitempty"` Type int `json:"type,omitempty"`
Pos int `json:"pos,omitempty"` Pos int `json:"pos,omitempty"`
@ -32,9 +32,9 @@ type Line struct {
func (l *Line) String() string { func (l *Line) String() string {
switch l.Type { switch l.Type {
case LineExitCode: case LineExitCode:
return fmt.Sprintf("[%s] exit code %s", l.Proc, l.Out) return fmt.Sprintf("[%s] exit code %s", l.Step, l.Out)
default: default:
return fmt.Sprintf("[%s:L%v:%vs] %s", l.Proc, l.Pos, l.Time, l.Out) return fmt.Sprintf("[%s:L%v:%vs] %s", l.Step, l.Pos, l.Time, l.Out)
} }
} }
@ -70,7 +70,7 @@ func (w *LineWriter) Write(p []byte) (n int, err error) {
line := &Line{ line := &Line{
Out: out, Out: out,
Proc: w.name, Step: w.name,
Pos: w.num, Pos: w.num,
Time: int64(time.Since(w.now).Seconds()), Time: int64(time.Since(w.now).Seconds()),
Type: LineStdout, Type: LineStdout,
@ -83,7 +83,7 @@ func (w *LineWriter) Write(p []byte) (n int, err error) {
// for _, part := range bytes.Split(p, []byte{'\n'}) { // for _, part := range bytes.Split(p, []byte{'\n'}) {
// line := &Line{ // line := &Line{
// Out: string(part), // Out: string(part),
// Proc: w.name, // Step: w.name,
// Pos: w.num, // Pos: w.num,
// Time: int64(time.Since(w.now).Seconds()), // Time: int64(time.Since(w.now).Seconds()),
// Type: LineStdout, // Type: LineStdout,

View file

@ -6,7 +6,7 @@ import (
func TestLine(t *testing.T) { func TestLine(t *testing.T) {
line := Line{ line := Line{
Proc: "redis", Step: "redis",
Time: 60, Time: 60,
Pos: 1, Pos: 1,
Out: "starting redis server", Out: "starting redis server",

View file

@ -14,7 +14,7 @@ type (
// State defines the pipeline state. // State defines the pipeline state.
State struct { State struct {
Proc string `json:"proc"` Step string `json:"step"`
Exited bool `json:"exited"` Exited bool `json:"exited"`
ExitCode int `json:"exit_code"` ExitCode int `json:"exit_code"`
Started int64 `json:"started"` Started int64 `json:"started"`
@ -32,7 +32,7 @@ type (
// File defines a pipeline artifact. // File defines a pipeline artifact.
File struct { File struct {
Name string `json:"name"` Name string `json:"name"`
Proc string `json:"proc"` Step string `json:"step"`
Mime string `json:"mime"` Mime string `json:"mime"`
Time int64 `json:"time"` Time int64 `json:"time"`
Size int `json:"size"` Size int `json:"size"`

View file

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// versions: // versions:
// protoc-gen-go v1.28.1 // protoc-gen-go v1.28.1
// protoc v3.12.4 // protoc v3.21.7
// source: woodpecker.proto // source: woodpecker.proto
package proto package proto
@ -75,7 +75,7 @@ type File struct {
unknownFields protoimpl.UnknownFields unknownFields protoimpl.UnknownFields
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Proc string `protobuf:"bytes,2,opt,name=proc,proto3" json:"proc,omitempty"` Step string `protobuf:"bytes,2,opt,name=step,proto3" json:"step,omitempty"`
Mime string `protobuf:"bytes,3,opt,name=mime,proto3" json:"mime,omitempty"` Mime string `protobuf:"bytes,3,opt,name=mime,proto3" json:"mime,omitempty"`
Time int64 `protobuf:"varint,4,opt,name=time,proto3" json:"time,omitempty"` Time int64 `protobuf:"varint,4,opt,name=time,proto3" json:"time,omitempty"`
Size int32 `protobuf:"varint,5,opt,name=size,proto3" json:"size,omitempty"` Size int32 `protobuf:"varint,5,opt,name=size,proto3" json:"size,omitempty"`
@ -122,9 +122,9 @@ func (x *File) GetName() string {
return "" return ""
} }
func (x *File) GetProc() string { func (x *File) GetStep() string {
if x != nil { if x != nil {
return x.Proc return x.Step
} }
return "" return ""
} }
@ -256,7 +256,7 @@ type Line struct {
sizeCache protoimpl.SizeCache sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields unknownFields protoimpl.UnknownFields
Proc string `protobuf:"bytes,1,opt,name=proc,proto3" json:"proc,omitempty"` Step string `protobuf:"bytes,1,opt,name=step,proto3" json:"step,omitempty"`
Time int64 `protobuf:"varint,2,opt,name=time,proto3" json:"time,omitempty"` Time int64 `protobuf:"varint,2,opt,name=time,proto3" json:"time,omitempty"`
Pos int32 `protobuf:"varint,3,opt,name=pos,proto3" json:"pos,omitempty"` Pos int32 `protobuf:"varint,3,opt,name=pos,proto3" json:"pos,omitempty"`
Out string `protobuf:"bytes,4,opt,name=out,proto3" json:"out,omitempty"` Out string `protobuf:"bytes,4,opt,name=out,proto3" json:"out,omitempty"`
@ -294,9 +294,9 @@ func (*Line) Descriptor() ([]byte, []int) {
return file_woodpecker_proto_rawDescGZIP(), []int{2} return file_woodpecker_proto_rawDescGZIP(), []int{2}
} }
func (x *Line) GetProc() string { func (x *Line) GetStep() string {
if x != nil { if x != nil {
return x.Proc return x.Step
} }
return "" return ""
} }
@ -1033,8 +1033,8 @@ var file_woodpecker_proto_rawDesc = []byte{
0x0a, 0x10, 0x77, 0x6f, 0x6f, 0x64, 0x70, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x0a, 0x10, 0x77, 0x6f, 0x6f, 0x64, 0x70, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x12, 0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe2, 0x01, 0x0a, 0x04, 0x46, 0x69, 0x74, 0x6f, 0x12, 0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe2, 0x01, 0x0a, 0x04, 0x46, 0x69,
0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x72, 0x6f, 0x63, 0x18, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x74, 0x65, 0x70, 0x18, 0x02,
0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x72, 0x6f, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x69, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x73, 0x74, 0x65, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x69,
0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6d, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6d, 0x69, 0x6d, 0x65, 0x12, 0x12,
0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x69, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x69,
0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05,
@ -1056,8 +1056,8 @@ var file_woodpecker_proto_rawDesc = []byte{
0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x66,
0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72,
0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x52, 0x0a, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x52, 0x0a,
0x04, 0x4c, 0x69, 0x6e, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x72, 0x6f, 0x63, 0x18, 0x01, 0x20, 0x04, 0x4c, 0x69, 0x6e, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x74, 0x65, 0x70, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x72, 0x6f, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x01, 0x28, 0x09, 0x52, 0x04, 0x73, 0x74, 0x65, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x69, 0x6d,
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x10, 0x0a,
0x03, 0x70, 0x6f, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x6f, 0x73, 0x12, 0x03, 0x70, 0x6f, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x6f, 0x73, 0x12,
0x10, 0x0a, 0x03, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6f, 0x75, 0x10, 0x0a, 0x03, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6f, 0x75,

View file

@ -5,7 +5,7 @@ package proto;
message File { message File {
string name = 1; string name = 1;
string proc = 2; string step = 2;
string mime = 3; string mime = 3;
int64 time = 4; int64 time = 4;
int32 size = 5; int32 size = 5;
@ -23,7 +23,7 @@ message State {
} }
message Line { message Line {
string proc = 1; string step = 1;
int64 time = 2; int64 time = 2;
int32 pos = 3; int32 pos = 3;
string out = 4; string out = 4;

View file

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT. // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions: // versions:
// - protoc-gen-go-grpc v1.2.0 // - protoc-gen-go-grpc v1.2.0
// - protoc v3.12.4 // - protoc v3.21.7
// source: woodpecker.proto // source: woodpecker.proto
package proto package proto

View file

@ -14,7 +14,7 @@ type Tracer interface {
// functions as a Tracer. // functions as a Tracer.
type TraceFunc func(*State) error type TraceFunc func(*State) error
// Trace calls f(proc, state). // Trace calls f(state).
func (f TraceFunc) Trace(state *State) error { func (f TraceFunc) Trace(state *State) error {
return f(state) return f(state)
} }
@ -32,20 +32,24 @@ var DefaultTracer = TraceFunc(func(state *State) error {
state.Pipeline.Step.Environment["CI_PIPELINE_STATUS"] = "success" state.Pipeline.Step.Environment["CI_PIPELINE_STATUS"] = "success"
state.Pipeline.Step.Environment["CI_PIPELINE_STARTED"] = strconv.FormatInt(state.Pipeline.Time, 10) state.Pipeline.Step.Environment["CI_PIPELINE_STARTED"] = strconv.FormatInt(state.Pipeline.Time, 10)
state.Pipeline.Step.Environment["CI_PIPELINE_FINISHED"] = strconv.FormatInt(time.Now().Unix(), 10) state.Pipeline.Step.Environment["CI_PIPELINE_FINISHED"] = strconv.FormatInt(time.Now().Unix(), 10)
state.Pipeline.Step.Environment["CI_STEP_STATUS"] = "success"
state.Pipeline.Step.Environment["CI_STEP_STARTED"] = strconv.FormatInt(state.Pipeline.Time, 10)
state.Pipeline.Step.Environment["CI_STEP_FINISHED"] = strconv.FormatInt(time.Now().Unix(), 10)
// DEPRECATED // DEPRECATED
state.Pipeline.Step.Environment["CI_BUILD_STATUS"] = "success" state.Pipeline.Step.Environment["CI_BUILD_STATUS"] = "success"
state.Pipeline.Step.Environment["CI_BUILD_STARTED"] = strconv.FormatInt(state.Pipeline.Time, 10) state.Pipeline.Step.Environment["CI_BUILD_STARTED"] = strconv.FormatInt(state.Pipeline.Time, 10)
state.Pipeline.Step.Environment["CI_BUILD_FINISHED"] = strconv.FormatInt(time.Now().Unix(), 10) state.Pipeline.Step.Environment["CI_BUILD_FINISHED"] = strconv.FormatInt(time.Now().Unix(), 10)
state.Pipeline.Step.Environment["CI_JOB_STATUS"] = "success" state.Pipeline.Step.Environment["CI_JOB_STATUS"] = "success"
state.Pipeline.Step.Environment["CI_JOB_STARTED"] = strconv.FormatInt(state.Pipeline.Time, 10) state.Pipeline.Step.Environment["CI_JOB_STARTED"] = strconv.FormatInt(state.Pipeline.Time, 10)
state.Pipeline.Step.Environment["CI_JOB_FINISHED"] = strconv.FormatInt(time.Now().Unix(), 10) state.Pipeline.Step.Environment["CI_JOB_FINISHED"] = strconv.FormatInt(time.Now().Unix(), 10)
if state.Pipeline.Error != nil { if state.Pipeline.Error != nil {
state.Pipeline.Step.Environment["CI_PIPELINE_STATUS"] = "failure" state.Pipeline.Step.Environment["CI_PIPELINE_STATUS"] = "failure"
state.Pipeline.Step.Environment["CI_STEP_STATUS"] = "failure"
// DEPRECATED // DEPRECATED
state.Pipeline.Step.Environment["CI_BUILD_STATUS"] = "failure" state.Pipeline.Step.Environment["CI_BUILD_STATUS"] = "failure"
state.Pipeline.Step.Environment["CI_JOB_STATUS"] = "failure" state.Pipeline.Step.Environment["CI_JOB_STATUS"] = "failure"
} }
return nil return nil

View file

@ -71,7 +71,7 @@ func FileGet(c *gin.Context) {
return return
} }
pid, err := strconv.Atoi(c.Param("proc")) pid, err := strconv.Atoi(c.Param("step"))
if err != nil { if err != nil {
_ = c.AbortWithError(http.StatusBadRequest, err) _ = c.AbortWithError(http.StatusBadRequest, err)
return return
@ -83,13 +83,13 @@ func FileGet(c *gin.Context) {
return return
} }
proc, err := _store.ProcFind(pipeline, pid) step, err := _store.StepFind(pipeline, pid)
if err != nil { if err != nil {
_ = c.AbortWithError(http.StatusInternalServerError, err) _ = c.AbortWithError(http.StatusInternalServerError, err)
return return
} }
file, err := _store.FileFind(proc, name) file, err := _store.FileFind(step, name)
if err != nil { if err != nil {
c.String(404, "Error getting file %q. %s", name, err) c.String(404, "Error getting file %q. %s", name, err)
return return
@ -100,7 +100,7 @@ func FileGet(c *gin.Context) {
return return
} }
rc, err := _store.FileRead(proc, file.Name) rc, err := _store.FileRead(step, file.Name)
if err != nil { if err != nil {
c.String(404, "Error getting file stream %q. %s", name, err) c.String(404, "Error getting file stream %q. %s", name, err)
return return

View file

@ -127,8 +127,8 @@ func GetPipeline(c *gin.Context) {
return return
} }
files, _ := _store.FileList(pl) files, _ := _store.FileList(pl)
procs, _ := _store.ProcList(pl) steps, _ := _store.StepList(pl)
if pl.Procs, err = model.Tree(procs); err != nil { if pl.Steps, err = model.Tree(steps); err != nil {
_ = c.AbortWithError(http.StatusInternalServerError, err) _ = c.AbortWithError(http.StatusInternalServerError, err)
return return
} }
@ -148,12 +148,12 @@ func GetPipelineLast(c *gin.Context) {
return return
} }
procs, err := _store.ProcList(pl) steps, err := _store.StepList(pl)
if err != nil { if err != nil {
_ = c.AbortWithError(http.StatusInternalServerError, err) _ = c.AbortWithError(http.StatusInternalServerError, err)
return return
} }
if pl.Procs, err = model.Tree(procs); err != nil { if pl.Steps, err = model.Tree(steps); err != nil {
_ = c.AbortWithError(http.StatusInternalServerError, err) _ = c.AbortWithError(http.StatusInternalServerError, err)
return return
} }
@ -164,11 +164,11 @@ func GetPipelineLogs(c *gin.Context) {
_store := store.FromContext(c) _store := store.FromContext(c)
repo := session.Repo(c) repo := session.Repo(c)
// parse the pipeline number and job sequence number from // parse the pipeline number and step sequence number from
// the request parameter. // the request parameter.
num, _ := strconv.ParseInt(c.Params.ByName("number"), 10, 64) num, _ := strconv.ParseInt(c.Params.ByName("number"), 10, 64)
ppid, _ := strconv.Atoi(c.Params.ByName("pid")) ppid, _ := strconv.Atoi(c.Params.ByName("pid"))
name := c.Params.ByName("proc") name := c.Params.ByName("step")
pl, err := _store.GetPipelineNumber(repo, num) pl, err := _store.GetPipelineNumber(repo, num)
if err != nil { if err != nil {
@ -176,13 +176,13 @@ func GetPipelineLogs(c *gin.Context) {
return return
} }
proc, err := _store.ProcChild(pl, ppid, name) step, err := _store.StepChild(pl, ppid, name)
if err != nil { if err != nil {
_ = c.AbortWithError(404, err) _ = c.AbortWithError(404, err)
return return
} }
rc, err := _store.LogFind(proc) rc, err := _store.LogFind(step)
if err != nil { if err != nil {
_ = c.AbortWithError(404, err) _ = c.AbortWithError(404, err)
return return
@ -196,11 +196,11 @@ func GetPipelineLogs(c *gin.Context) {
} }
} }
func GetProcLogs(c *gin.Context) { func GetStepLogs(c *gin.Context) {
_store := store.FromContext(c) _store := store.FromContext(c)
repo := session.Repo(c) repo := session.Repo(c)
// parse the pipeline number and job sequence number from // parse the pipeline number and step sequence number from
// the request parameter. // the request parameter.
num, _ := strconv.ParseInt(c.Params.ByName("number"), 10, 64) num, _ := strconv.ParseInt(c.Params.ByName("number"), 10, 64)
pid, _ := strconv.Atoi(c.Params.ByName("pid")) pid, _ := strconv.Atoi(c.Params.ByName("pid"))
@ -211,13 +211,13 @@ func GetProcLogs(c *gin.Context) {
return return
} }
proc, err := _store.ProcFind(pl, pid) step, err := _store.StepFind(pl, pid)
if err != nil { if err != nil {
_ = c.AbortWithError(http.StatusNotFound, err) _ = c.AbortWithError(http.StatusNotFound, err)
return return
} }
rc, err := _store.LogFind(proc) rc, err := _store.LogFind(step)
if err != nil { if err != nil {
_ = c.AbortWithError(http.StatusNotFound, err) _ = c.AbortWithError(http.StatusNotFound, err)
return return
@ -407,7 +407,7 @@ func DeletePipelineLogs(c *gin.Context) {
return return
} }
procs, err := _store.ProcList(pl) steps, err := _store.StepList(pl)
if err != nil { if err != nil {
_ = c.AbortWithError(404, err) _ = c.AbortWithError(404, err)
return return
@ -419,10 +419,10 @@ func DeletePipelineLogs(c *gin.Context) {
return return
} }
for _, proc := range procs { for _, step := range steps {
t := time.Now().UTC() t := time.Now().UTC()
buf := bytes.NewBufferString(fmt.Sprintf(deleteStr, proc.Name, user.Login, t.Format(time.UnixDate))) buf := bytes.NewBufferString(fmt.Sprintf(deleteStr, step.Name, user.Login, t.Format(time.UnixDate)))
lerr := _store.LogSave(proc, buf) lerr := _store.LogSave(step, buf)
if lerr != nil { if lerr != nil {
err = lerr err = lerr
} }
@ -437,7 +437,7 @@ func DeletePipelineLogs(c *gin.Context) {
var deleteStr = `[ var deleteStr = `[
{ {
"proc": %q, "step": %q,
"pos": 0, "pos": 0,
"out": "logs purged by %s on %s\n" "out": "logs purged by %s on %s\n"
} }

View file

@ -141,10 +141,10 @@ func LogStreamSSE(c *gin.Context) {
repo := session.Repo(c) repo := session.Repo(c)
_store := store.FromContext(c) _store := store.FromContext(c)
// // parse the pipeline number and job sequence number from // // parse the pipeline number and step sequence number from
// // the repquest parameter. // // the repquest parameter.
pipelinen, _ := strconv.ParseInt(c.Param("pipeline"), 10, 64) pipelinen, _ := strconv.ParseInt(c.Param("pipeline"), 10, 64)
jobn, _ := strconv.Atoi(c.Param("number")) stepn, _ := strconv.Atoi(c.Param("number"))
pipeline, err := _store.GetPipelineNumber(repo, pipelinen) pipeline, err := _store.GetPipelineNumber(repo, pipelinen)
if err != nil { if err != nil {
@ -152,13 +152,13 @@ func LogStreamSSE(c *gin.Context) {
logWriteStringErr(io.WriteString(rw, "event: error\ndata: pipeline not found\n\n")) logWriteStringErr(io.WriteString(rw, "event: error\ndata: pipeline not found\n\n"))
return return
} }
proc, err := _store.ProcFind(pipeline, jobn) step, err := _store.StepFind(pipeline, stepn)
if err != nil { if err != nil {
log.Debug().Msgf("stream cannot get proc number: %v", err) log.Debug().Msgf("stream cannot get step number: %v", err)
logWriteStringErr(io.WriteString(rw, "event: error\ndata: process not found\n\n")) logWriteStringErr(io.WriteString(rw, "event: error\ndata: process not found\n\n"))
return return
} }
if proc.State != model.StatusRunning { if step.State != model.StatusRunning {
log.Debug().Msg("stream not found.") log.Debug().Msg("stream not found.")
logWriteStringErr(io.WriteString(rw, "event: error\ndata: stream not found\n\n")) logWriteStringErr(io.WriteString(rw, "event: error\ndata: stream not found\n\n"))
return return
@ -179,7 +179,7 @@ func LogStreamSSE(c *gin.Context) {
go func() { go func() {
// TODO remove global variable // TODO remove global variable
err := server.Config.Services.Logs.Tail(ctx, fmt.Sprint(proc.ID), func(entries ...*logging.Entry) { err := server.Config.Services.Logs.Tail(ctx, fmt.Sprint(step.ID), func(entries ...*logging.Entry) {
defer func() { defer func() {
obj := recover() // fix #2480 // TODO: check if it's still needed obj := recover() // fix #2480 // TODO: check if it's still needed
log.Trace().Msgf("pubsub subscribe recover return: %v", obj) log.Trace().Msgf("pubsub subscribe recover return: %v", obj)

View file

@ -50,7 +50,7 @@ var Config = struct {
// Builds model.BuildStore // Builds model.BuildStore
// Logs model.LogStore // Logs model.LogStore
Files model.FileStore Files model.FileStore
Procs model.ProcStore Steps model.StepStore
// Registries model.RegistryStore // Registries model.RegistryStore
// Secrets model.SecretStore // Secrets model.SecretStore
} }

View file

@ -97,26 +97,26 @@ func (s *RPC) Extend(c context.Context, id string) error {
// Update implements the rpc.Update function // Update implements the rpc.Update function
func (s *RPC) Update(c context.Context, id string, state rpc.State) error { func (s *RPC) Update(c context.Context, id string, state rpc.State) error {
procID, err := strconv.ParseInt(id, 10, 64) stepID, err := strconv.ParseInt(id, 10, 64)
if err != nil { if err != nil {
return err return err
} }
pproc, err := s.store.ProcLoad(procID) pstep, err := s.store.StepLoad(stepID)
if err != nil { if err != nil {
log.Error().Msgf("error: rpc.update: cannot find pproc with id %d: %s", procID, err) log.Error().Msgf("error: rpc.update: cannot find step with id %d: %s", stepID, err)
return err return err
} }
pipeline, err := s.store.GetPipeline(pproc.PipelineID) pipeline, err := s.store.GetPipeline(pstep.PipelineID)
if err != nil { if err != nil {
log.Error().Msgf("error: cannot find pipeline with id %d: %s", pproc.PipelineID, err) log.Error().Msgf("error: cannot find pipeline with id %d: %s", pstep.PipelineID, err)
return err return err
} }
proc, err := s.store.ProcChild(pipeline, pproc.PID, state.Proc) step, err := s.store.StepChild(pipeline, pstep.PID, state.Step)
if err != nil { if err != nil {
log.Error().Msgf("error: cannot find proc with name %s: %s", state.Proc, err) log.Error().Msgf("error: cannot find step with name %s: %s", state.Step, err)
return err return err
} }
@ -124,7 +124,7 @@ func (s *RPC) Update(c context.Context, id string, state rpc.State) error {
if ok { if ok {
hostname, ok := metadata["hostname"] hostname, ok := metadata["hostname"]
if ok && len(hostname) != 0 { if ok && len(hostname) != 0 {
proc.Machine = hostname[0] step.Machine = hostname[0]
} }
} }
@ -134,15 +134,15 @@ func (s *RPC) Update(c context.Context, id string, state rpc.State) error {
return err return err
} }
if _, err = shared.UpdateProcStatus(s.store, *proc, state, pipeline.Started); err != nil { if _, err = shared.UpdateStepStatus(s.store, *step, state, pipeline.Started); err != nil {
log.Error().Err(err).Msg("rpc.update: cannot update proc") log.Error().Err(err).Msg("rpc.update: cannot update step")
} }
if pipeline.Procs, err = s.store.ProcList(pipeline); err != nil { if pipeline.Steps, err = s.store.StepList(pipeline); err != nil {
log.Error().Err(err).Msg("can not get proc list from store") log.Error().Err(err).Msg("can not get step list from store")
} }
if pipeline.Procs, err = model.Tree(pipeline.Procs); err != nil { if pipeline.Steps, err = model.Tree(pipeline.Steps); err != nil {
log.Error().Err(err).Msg("can not build tree from proc list") log.Error().Err(err).Msg("can not build tree from step list")
return err return err
} }
message := pubsub.Message{ message := pubsub.Message{
@ -156,7 +156,7 @@ func (s *RPC) Update(c context.Context, id string, state rpc.State) error {
Pipeline: *pipeline, Pipeline: *pipeline,
}) })
if err := s.pubsub.Publish(c, "topic/events", message); err != nil { if err := s.pubsub.Publish(c, "topic/events", message); err != nil {
log.Error().Err(err).Msg("can not publish proc list to") log.Error().Err(err).Msg("can not publish step list to")
} }
return nil return nil
@ -164,40 +164,40 @@ func (s *RPC) Update(c context.Context, id string, state rpc.State) error {
// Upload implements the rpc.Upload function // Upload implements the rpc.Upload function
func (s *RPC) Upload(c context.Context, id string, file *rpc.File) error { func (s *RPC) Upload(c context.Context, id string, file *rpc.File) error {
procID, err := strconv.ParseInt(id, 10, 64) stepID, err := strconv.ParseInt(id, 10, 64)
if err != nil { if err != nil {
return err return err
} }
pproc, err := s.store.ProcLoad(procID) pstep, err := s.store.StepLoad(stepID)
if err != nil { if err != nil {
log.Error().Msgf("error: cannot find parent proc with id %d: %s", procID, err) log.Error().Msgf("error: cannot find parent step with id %d: %s", stepID, err)
return err return err
} }
pipeline, err := s.store.GetPipeline(pproc.PipelineID) pipeline, err := s.store.GetPipeline(pstep.PipelineID)
if err != nil { if err != nil {
log.Error().Msgf("error: cannot find pipeline with id %d: %s", pproc.PipelineID, err) log.Error().Msgf("error: cannot find pipeline with id %d: %s", pstep.PipelineID, err)
return err return err
} }
proc, err := s.store.ProcChild(pipeline, pproc.PID, file.Proc) step, err := s.store.StepChild(pipeline, pstep.PID, file.Step)
if err != nil { if err != nil {
log.Error().Msgf("error: cannot find child proc with name %s: %s", file.Proc, err) log.Error().Msgf("error: cannot find child step with name %s: %s", file.Step, err)
return err return err
} }
if file.Mime == "application/json+logs" { if file.Mime == "application/json+logs" {
return s.store.LogSave( return s.store.LogSave(
proc, step,
bytes.NewBuffer(file.Data), bytes.NewBuffer(file.Data),
) )
} }
report := &model.File{ report := &model.File{
PipelineID: proc.PipelineID, PipelineID: step.PipelineID,
ProcID: proc.ID, StepID: step.ID,
PID: proc.PID, PID: step.PID,
Mime: file.Mime, Mime: file.Mime,
Name: file.Name, Name: file.Name,
Size: file.Size, Size: file.Size,
@ -237,27 +237,27 @@ func (s *RPC) Upload(c context.Context, id string, file *rpc.File) error {
// Init implements the rpc.Init function // Init implements the rpc.Init function
func (s *RPC) Init(c context.Context, id string, state rpc.State) error { func (s *RPC) Init(c context.Context, id string, state rpc.State) error {
procID, err := strconv.ParseInt(id, 10, 64) stepID, err := strconv.ParseInt(id, 10, 64)
if err != nil { if err != nil {
return err return err
} }
proc, err := s.store.ProcLoad(procID) step, err := s.store.StepLoad(stepID)
if err != nil { if err != nil {
log.Error().Msgf("error: cannot find proc with id %d: %s", procID, err) log.Error().Msgf("error: cannot find step with id %d: %s", stepID, err)
return err return err
} }
metadata, ok := grpcMetadata.FromIncomingContext(c) metadata, ok := grpcMetadata.FromIncomingContext(c)
if ok { if ok {
hostname, ok := metadata["hostname"] hostname, ok := metadata["hostname"]
if ok && len(hostname) != 0 { if ok && len(hostname) != 0 {
proc.Machine = hostname[0] step.Machine = hostname[0]
} }
} }
pipeline, err := s.store.GetPipeline(proc.PipelineID) pipeline, err := s.store.GetPipeline(step.PipelineID)
if err != nil { if err != nil {
log.Error().Msgf("error: cannot find pipeline with id %d: %s", proc.PipelineID, err) log.Error().Msgf("error: cannot find pipeline with id %d: %s", step.PipelineID, err)
return err return err
} }
@ -274,7 +274,7 @@ func (s *RPC) Init(c context.Context, id string, state rpc.State) error {
} }
defer func() { defer func() {
pipeline.Procs, _ = s.store.ProcList(pipeline) pipeline.Steps, _ = s.store.StepList(pipeline)
message := pubsub.Message{ message := pubsub.Message{
Labels: map[string]string{ Labels: map[string]string{
"repo": repo.FullName, "repo": repo.FullName,
@ -286,30 +286,30 @@ func (s *RPC) Init(c context.Context, id string, state rpc.State) error {
Pipeline: *pipeline, Pipeline: *pipeline,
}) })
if err := s.pubsub.Publish(c, "topic/events", message); err != nil { if err := s.pubsub.Publish(c, "topic/events", message); err != nil {
log.Error().Err(err).Msg("can not publish proc list to") log.Error().Err(err).Msg("can not publish step list to")
} }
}() }()
_, err = shared.UpdateProcToStatusStarted(s.store, *proc, state) _, err = shared.UpdateStepToStatusStarted(s.store, *step, state)
return err return err
} }
// Done implements the rpc.Done function // Done implements the rpc.Done function
func (s *RPC) Done(c context.Context, id string, state rpc.State) error { func (s *RPC) Done(c context.Context, id string, state rpc.State) error {
procID, err := strconv.ParseInt(id, 10, 64) stepID, err := strconv.ParseInt(id, 10, 64)
if err != nil { if err != nil {
return err return err
} }
proc, err := s.store.ProcLoad(procID) step, err := s.store.StepLoad(stepID)
if err != nil { if err != nil {
log.Error().Msgf("error: cannot find proc with id %d: %s", procID, err) log.Error().Msgf("error: cannot find step with id %d: %s", stepID, err)
return err return err
} }
pipeline, err := s.store.GetPipeline(proc.PipelineID) pipeline, err := s.store.GetPipeline(step.PipelineID)
if err != nil { if err != nil {
log.Error().Msgf("error: cannot find pipeline with id %d: %s", proc.PipelineID, err) log.Error().Msgf("error: cannot find pipeline with id %d: %s", step.PipelineID, err)
return err return err
} }
@ -322,42 +322,42 @@ func (s *RPC) Done(c context.Context, id string, state rpc.State) error {
log.Trace(). log.Trace().
Str("repo_id", fmt.Sprint(repo.ID)). Str("repo_id", fmt.Sprint(repo.ID)).
Str("build_id", fmt.Sprint(pipeline.ID)). Str("build_id", fmt.Sprint(pipeline.ID)).
Str("proc_id", id). Str("step_id", id).
Msgf("gRPC Done with state: %#v", state) Msgf("gRPC Done with state: %#v", state)
if proc, err = shared.UpdateProcStatusToDone(s.store, *proc, state); err != nil { if step, err = shared.UpdateStepStatusToDone(s.store, *step, state); err != nil {
log.Error().Msgf("error: done: cannot update proc_id %d state: %s", proc.ID, err) log.Error().Msgf("error: done: cannot update step_id %d state: %s", step.ID, err)
} }
var queueErr error var queueErr error
if proc.Failing() { if step.Failing() {
queueErr = s.queue.Error(c, id, fmt.Errorf("Proc finished with exitcode %d, %s", state.ExitCode, state.Error)) queueErr = s.queue.Error(c, id, fmt.Errorf("Step finished with exitcode %d, %s", state.ExitCode, state.Error))
} else { } else {
queueErr = s.queue.Done(c, id, proc.State) queueErr = s.queue.Done(c, id, step.State)
} }
if queueErr != nil { if queueErr != nil {
log.Error().Msgf("error: done: cannot ack proc_id %d: %s", procID, err) log.Error().Msgf("error: done: cannot ack step_id %d: %s", stepID, err)
} }
procs, err := s.store.ProcList(pipeline) steps, err := s.store.StepList(pipeline)
if err != nil { if err != nil {
return err return err
} }
s.completeChildrenIfParentCompleted(procs, proc) s.completeChildrenIfParentCompleted(steps, step)
if !model.IsThereRunningStage(procs) { if !model.IsThereRunningStage(steps) {
if pipeline, err = shared.UpdateStatusToDone(s.store, *pipeline, model.PipelineStatus(procs), proc.Stopped); err != nil { if pipeline, err = shared.UpdateStatusToDone(s.store, *pipeline, model.PipelineStatus(steps), step.Stopped); err != nil {
log.Error().Err(err).Msgf("error: done: cannot update build_id %d final state", pipeline.ID) log.Error().Err(err).Msgf("error: done: cannot update build_id %d final state", pipeline.ID)
} }
} }
s.updateRemoteStatus(c, repo, pipeline, proc) s.updateRemoteStatus(c, repo, pipeline, step)
if err := s.logger.Close(c, id); err != nil { if err := s.logger.Close(c, id); err != nil {
log.Error().Err(err).Msgf("done: cannot close build_id %d logger", proc.ID) log.Error().Err(err).Msgf("done: cannot close build_id %d logger", step.ID)
} }
if err := s.notify(c, repo, pipeline, procs); err != nil { if err := s.notify(c, repo, pipeline, steps); err != nil {
return err return err
} }
@ -365,8 +365,8 @@ func (s *RPC) Done(c context.Context, id string, state rpc.State) error {
s.pipelineCount.WithLabelValues(repo.FullName, pipeline.Branch, string(pipeline.Status), "total").Inc() s.pipelineCount.WithLabelValues(repo.FullName, pipeline.Branch, string(pipeline.Status), "total").Inc()
s.pipelineTime.WithLabelValues(repo.FullName, pipeline.Branch, string(pipeline.Status), "total").Set(float64(pipeline.Finished - pipeline.Started)) s.pipelineTime.WithLabelValues(repo.FullName, pipeline.Branch, string(pipeline.Status), "total").Set(float64(pipeline.Finished - pipeline.Started))
} }
if model.IsMultiPipeline(procs) { if model.IsMultiPipeline(steps) {
s.pipelineTime.WithLabelValues(repo.FullName, pipeline.Branch, string(proc.State), proc.Name).Set(float64(proc.Stopped - proc.Started)) s.pipelineTime.WithLabelValues(repo.FullName, pipeline.Branch, string(step.State), step.Name).Set(float64(step.Stopped - step.Started))
} }
return nil return nil
@ -382,17 +382,17 @@ func (s *RPC) Log(c context.Context, id string, line *rpc.Line) error {
return nil return nil
} }
func (s *RPC) completeChildrenIfParentCompleted(procs []*model.Proc, completedProc *model.Proc) { func (s *RPC) completeChildrenIfParentCompleted(steps []*model.Step, completedStep *model.Step) {
for _, p := range procs { for _, p := range steps {
if p.Running() && p.PPID == completedProc.PID { if p.Running() && p.PPID == completedStep.PID {
if _, err := shared.UpdateProcToStatusSkipped(s.store, *p, completedProc.Stopped); err != nil { if _, err := shared.UpdateStepToStatusSkipped(s.store, *p, completedStep.Stopped); err != nil {
log.Error().Msgf("error: done: cannot update proc_id %d child state: %s", p.ID, err) log.Error().Msgf("error: done: cannot update step_id %d child state: %s", p.ID, err)
} }
} }
} }
} }
func (s *RPC) updateRemoteStatus(ctx context.Context, repo *model.Repo, pipeline *model.Pipeline, proc *model.Proc) { func (s *RPC) updateRemoteStatus(ctx context.Context, repo *model.Repo, pipeline *model.Pipeline, step *model.Step) {
user, err := s.store.GetUser(repo.UserID) user, err := s.store.GetUser(repo.UserID)
if err != nil { if err != nil {
log.Error().Err(err).Msgf("can not get user with id '%d'", repo.UserID) log.Error().Err(err).Msgf("can not get user with id '%d'", repo.UserID)
@ -410,17 +410,17 @@ func (s *RPC) updateRemoteStatus(ctx context.Context, repo *model.Repo, pipeline
} }
} }
// only do status updates for parent procs // only do status updates for parent steps
if proc != nil && proc.IsParent() { if step != nil && step.IsParent() {
err = s.remote.Status(ctx, user, repo, pipeline, proc) err = s.remote.Status(ctx, user, repo, pipeline, step)
if err != nil { if err != nil {
log.Error().Err(err).Msgf("error setting commit status for %s/%d", repo.FullName, pipeline.Number) log.Error().Err(err).Msgf("error setting commit status for %s/%d", repo.FullName, pipeline.Number)
} }
} }
} }
func (s *RPC) notify(c context.Context, repo *model.Repo, pipeline *model.Pipeline, procs []*model.Proc) (err error) { func (s *RPC) notify(c context.Context, repo *model.Repo, pipeline *model.Pipeline, steps []*model.Step) (err error) {
if pipeline.Procs, err = model.Tree(procs); err != nil { if pipeline.Steps, err = model.Tree(steps); err != nil {
return err return err
} }
message := pubsub.Message{ message := pubsub.Message{

View file

@ -88,7 +88,7 @@ func (s *WoodpeckerServer) Init(c context.Context, req *proto.InitRequest) (*pro
ExitCode: int(req.GetState().GetExitCode()), ExitCode: int(req.GetState().GetExitCode()),
Finished: req.GetState().GetFinished(), Finished: req.GetState().GetFinished(),
Started: req.GetState().GetStarted(), Started: req.GetState().GetStarted(),
Proc: req.GetState().GetName(), Step: req.GetState().GetName(),
Exited: req.GetState().GetExited(), Exited: req.GetState().GetExited(),
} }
res := new(proto.Empty) res := new(proto.Empty)
@ -102,7 +102,7 @@ func (s *WoodpeckerServer) Update(c context.Context, req *proto.UpdateRequest) (
ExitCode: int(req.GetState().GetExitCode()), ExitCode: int(req.GetState().GetExitCode()),
Finished: req.GetState().GetFinished(), Finished: req.GetState().GetFinished(),
Started: req.GetState().GetStarted(), Started: req.GetState().GetStarted(),
Proc: req.GetState().GetName(), Step: req.GetState().GetName(),
Exited: req.GetState().GetExited(), Exited: req.GetState().GetExited(),
} }
res := new(proto.Empty) res := new(proto.Empty)
@ -115,7 +115,7 @@ func (s *WoodpeckerServer) Upload(c context.Context, req *proto.UploadRequest) (
Data: req.GetFile().GetData(), Data: req.GetFile().GetData(),
Mime: req.GetFile().GetMime(), Mime: req.GetFile().GetMime(),
Name: req.GetFile().GetName(), Name: req.GetFile().GetName(),
Proc: req.GetFile().GetProc(), Step: req.GetFile().GetStep(),
Size: int(req.GetFile().GetSize()), Size: int(req.GetFile().GetSize()),
Time: req.GetFile().GetTime(), Time: req.GetFile().GetTime(),
Meta: req.GetFile().GetMeta(), Meta: req.GetFile().GetMeta(),
@ -132,7 +132,7 @@ func (s *WoodpeckerServer) Done(c context.Context, req *proto.DoneRequest) (*pro
ExitCode: int(req.GetState().GetExitCode()), ExitCode: int(req.GetState().GetExitCode()),
Finished: req.GetState().GetFinished(), Finished: req.GetState().GetFinished(),
Started: req.GetState().GetStarted(), Started: req.GetState().GetStarted(),
Proc: req.GetState().GetName(), Step: req.GetState().GetName(),
Exited: req.GetState().GetExited(), Exited: req.GetState().GetExited(),
} }
res := new(proto.Empty) res := new(proto.Empty)
@ -157,7 +157,7 @@ func (s *WoodpeckerServer) Log(c context.Context, req *proto.LogRequest) (*proto
Out: req.GetLine().GetOut(), Out: req.GetLine().GetOut(),
Pos: int(req.GetLine().GetPos()), Pos: int(req.GetLine().GetPos()),
Time: req.GetLine().GetTime(), Time: req.GetLine().GetTime(),
Proc: req.GetLine().GetProc(), Step: req.GetLine().GetStep(),
} }
res := new(proto.Empty) res := new(proto.Empty)
err := s.peer.Log(c, req.GetId(), line) err := s.peer.Log(c, req.GetId(), line)

View file

@ -20,8 +20,8 @@ import "io"
// FileStore persists pipeline artifacts to storage. // FileStore persists pipeline artifacts to storage.
type FileStore interface { type FileStore interface {
FileList(*Pipeline) ([]*File, error) FileList(*Pipeline) ([]*File, error)
FileFind(*Proc, string) (*File, error) FileFind(*Step, string) (*File, error)
FileRead(*Proc, string) (io.ReadCloser, error) FileRead(*Step, string) (io.ReadCloser, error)
FileCreate(*File, io.Reader) error FileCreate(*File, io.Reader) error
} }
@ -29,7 +29,7 @@ type FileStore interface {
type File struct { type File struct {
ID int64 `json:"id" xorm:"pk autoincr 'file_id'"` ID int64 `json:"id" xorm:"pk autoincr 'file_id'"`
PipelineID int64 `json:"-" xorm:"INDEX 'file_pipeline_id'"` PipelineID int64 `json:"-" xorm:"INDEX 'file_pipeline_id'"`
ProcID int64 `json:"proc_id" xorm:"UNIQUE(s) INDEX 'file_proc_id'"` StepID int64 `json:"step_id" xorm:"UNIQUE(s) INDEX 'file_step_id'"`
PID int `json:"pid" xorm:"file_pid"` PID int `json:"pid" xorm:"file_pid"`
Name string `json:"name" xorm:"UNIQUE(s) file_name"` Name string `json:"name" xorm:"UNIQUE(s) file_name"`
Size int `json:"size" xorm:"file_size"` Size int `json:"size" xorm:"file_size"`

View file

@ -16,7 +16,7 @@ package model
type Logs struct { type Logs struct {
ID int64 `xorm:"pk autoincr 'log_id'"` ID int64 `xorm:"pk autoincr 'log_id'"`
ProcID int64 `xorm:"UNIQUE 'log_job_id'"` StepID int64 `xorm:"UNIQUE 'log_step_id'"`
Data []byte `xorm:"LONGBLOB 'log_data'"` Data []byte `xorm:"LONGBLOB 'log_data'"`
// TODO: add create timestamp // TODO: add create timestamp
} }

View file

@ -48,7 +48,7 @@ type Pipeline struct {
Verified bool `json:"verified" xorm:"pipeline_verified"` // deprecate Verified bool `json:"verified" xorm:"pipeline_verified"` // deprecate
Reviewer string `json:"reviewed_by" xorm:"pipeline_reviewer"` Reviewer string `json:"reviewed_by" xorm:"pipeline_reviewer"`
Reviewed int64 `json:"reviewed_at" xorm:"pipeline_reviewed"` Reviewed int64 `json:"reviewed_at" xorm:"pipeline_reviewed"`
Procs []*Proc `json:"procs,omitempty" xorm:"-"` Steps []*Step `json:"steps,omitempty" xorm:"-"`
Files []*File `json:"files,omitempty" xorm:"-"` Files []*File `json:"files,omitempty" xorm:"-"`
ChangedFiles []string `json:"changed_files,omitempty" xorm:"json 'changed_files'"` ChangedFiles []string `json:"changed_files,omitempty" xorm:"json 'changed_files'"`
AdditionalVariables map[string]string `json:"variables,omitempty" xorm:"json 'additional_variables'"` AdditionalVariables map[string]string `json:"variables,omitempty" xorm:"json 'additional_variables'"`

View file

@ -17,66 +17,66 @@ package model
import "fmt" import "fmt"
// ProcStore persists process information to storage. // StepStore persists process information to storage.
type ProcStore interface { type StepStore interface {
ProcLoad(int64) (*Proc, error) StepLoad(int64) (*Step, error)
ProcFind(*Pipeline, int) (*Proc, error) StepFind(*Pipeline, int) (*Step, error)
ProcChild(*Pipeline, int, string) (*Proc, error) StepChild(*Pipeline, int, string) (*Step, error)
ProcList(*Pipeline) ([]*Proc, error) StepList(*Pipeline) ([]*Step, error)
ProcCreate([]*Proc) error StepCreate([]*Step) error
ProcUpdate(*Proc) error StepUpdate(*Step) error
ProcClear(*Pipeline) error StepClear(*Pipeline) error
} }
// Proc represents a process in the pipeline. // Step represents a process in the pipeline.
// swagger:model proc // swagger:model step
type Proc struct { type Step struct {
ID int64 `json:"id" xorm:"pk autoincr 'proc_id'"` ID int64 `json:"id" xorm:"pk autoincr 'step_id'"`
PipelineID int64 `json:"pipeline_id" xorm:"UNIQUE(s) INDEX 'proc_pipeline_id'"` PipelineID int64 `json:"pipeline_id" xorm:"UNIQUE(s) INDEX 'step_pipeline_id'"`
PID int `json:"pid" xorm:"UNIQUE(s) 'proc_pid'"` PID int `json:"pid" xorm:"UNIQUE(s) 'step_pid'"`
PPID int `json:"ppid" xorm:"proc_ppid"` PPID int `json:"ppid" xorm:"step_ppid"`
PGID int `json:"pgid" xorm:"proc_pgid"` PGID int `json:"pgid" xorm:"step_pgid"`
Name string `json:"name" xorm:"proc_name"` Name string `json:"name" xorm:"step_name"`
State StatusValue `json:"state" xorm:"proc_state"` State StatusValue `json:"state" xorm:"step_state"`
Error string `json:"error,omitempty" xorm:"VARCHAR(500) proc_error"` Error string `json:"error,omitempty" xorm:"VARCHAR(500) step_error"`
ExitCode int `json:"exit_code" xorm:"proc_exit_code"` ExitCode int `json:"exit_code" xorm:"step_exit_code"`
Started int64 `json:"start_time,omitempty" xorm:"proc_started"` Started int64 `json:"start_time,omitempty" xorm:"step_started"`
Stopped int64 `json:"end_time,omitempty" xorm:"proc_stopped"` Stopped int64 `json:"end_time,omitempty" xorm:"step_stopped"`
Machine string `json:"machine,omitempty" xorm:"proc_machine"` Machine string `json:"machine,omitempty" xorm:"step_machine"`
Platform string `json:"platform,omitempty" xorm:"proc_platform"` Platform string `json:"platform,omitempty" xorm:"step_platform"`
Environ map[string]string `json:"environ,omitempty" xorm:"json 'proc_environ'"` Environ map[string]string `json:"environ,omitempty" xorm:"json 'step_environ'"`
Children []*Proc `json:"children,omitempty" xorm:"-"` Children []*Step `json:"children,omitempty" xorm:"-"`
} }
type UpdateProcStore interface { type UpdateStepStore interface {
ProcUpdate(*Proc) error StepUpdate(*Step) error
} }
// TableName return database table name for xorm // TableName return database table name for xorm
func (Proc) TableName() string { func (Step) TableName() string {
return "procs" return "steps"
} }
// Running returns true if the process state is pending or running. // Running returns true if the process state is pending or running.
func (p *Proc) Running() bool { func (p *Step) Running() bool {
return p.State == StatusPending || p.State == StatusRunning return p.State == StatusPending || p.State == StatusRunning
} }
// Failing returns true if the process state is failed, killed or error. // Failing returns true if the process state is failed, killed or error.
func (p *Proc) Failing() bool { func (p *Step) Failing() bool {
return p.State == StatusError || p.State == StatusKilled || p.State == StatusFailure return p.State == StatusError || p.State == StatusKilled || p.State == StatusFailure
} }
// IsParent returns true if the process is a parent process. // IsParent returns true if the process is a parent process.
func (p *Proc) IsParent() bool { func (p *Step) IsParent() bool {
return p.PPID == 0 return p.PPID == 0
} }
// IsMultiPipeline checks if proc list contain more than one parent proc // IsMultiPipeline checks if step list contain more than one parent step
func IsMultiPipeline(procs []*Proc) bool { func IsMultiPipeline(steps []*Step) bool {
c := 0 c := 0
for _, proc := range procs { for _, step := range steps {
if proc.IsParent() { if step.IsParent() {
c++ c++
} }
if c > 1 { if c > 1 {
@ -87,35 +87,35 @@ func IsMultiPipeline(procs []*Proc) bool {
} }
// Tree creates a process tree from a flat process list. // Tree creates a process tree from a flat process list.
func Tree(procs []*Proc) ([]*Proc, error) { func Tree(steps []*Step) ([]*Step, error) {
var nodes []*Proc var nodes []*Step
// init parent nodes // init parent nodes
for i := range procs { for i := range steps {
if procs[i].IsParent() { if steps[i].IsParent() {
nodes = append(nodes, procs[i]) nodes = append(nodes, steps[i])
} }
} }
// assign children to parrents // assign children to parrents
for i := range procs { for i := range steps {
if !procs[i].IsParent() { if !steps[i].IsParent() {
parent, err := findNode(nodes, procs[i].PPID) parent, err := findNode(nodes, steps[i].PPID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
parent.Children = append(parent.Children, procs[i]) parent.Children = append(parent.Children, steps[i])
} }
} }
return nodes, nil return nodes, nil
} }
// PipelineStatus determine pipeline status based on corresponding proc list // PipelineStatus determine pipeline status based on corresponding step list
func PipelineStatus(procs []*Proc) StatusValue { func PipelineStatus(steps []*Step) StatusValue {
status := StatusSuccess status := StatusSuccess
for _, p := range procs { for _, p := range steps {
if p.IsParent() && p.Failing() { if p.IsParent() && p.Failing() {
status = p.State status = p.State
} }
@ -124,10 +124,10 @@ func PipelineStatus(procs []*Proc) StatusValue {
return status return status
} }
// IsThereRunningStage determine if it contains procs running or pending to run // IsThereRunningStage determine if it contains steps running or pending to run
// TODO: return false based on depends_on (https://github.com/woodpecker-ci/woodpecker/pull/730#discussion_r795681697) // TODO: return false based on depends_on (https://github.com/woodpecker-ci/woodpecker/pull/730#discussion_r795681697)
func IsThereRunningStage(procs []*Proc) bool { func IsThereRunningStage(steps []*Step) bool {
for _, p := range procs { for _, p := range steps {
if p.IsParent() { if p.IsParent() {
if p.Running() { if p.Running() {
return true return true
@ -137,12 +137,12 @@ func IsThereRunningStage(procs []*Proc) bool {
return false return false
} }
func findNode(nodes []*Proc, pid int) (*Proc, error) { func findNode(nodes []*Step, pid int) (*Step, error) {
for _, node := range nodes { for _, node := range nodes {
if node.PID == pid { if node.PID == pid {
return node, nil return node, nil
} }
} }
return nil, fmt.Errorf("Corrupt proc structure") return nil, fmt.Errorf("Corrupt step structure")
} }

View file

@ -21,7 +21,7 @@ import (
) )
func TestTree(t *testing.T) { func TestTree(t *testing.T) {
procs := []*Proc{{ steps := []*Step{{
ID: 25, ID: 25,
PID: 2, PID: 2,
PipelineID: 6, PipelineID: 6,
@ -49,12 +49,12 @@ func TestTree(t *testing.T) {
State: StatusFailure, State: StatusFailure,
Error: "1", Error: "1",
}} }}
procs, err := Tree(procs) steps, err := Tree(steps)
assert.NoError(t, err) assert.NoError(t, err)
assert.Len(t, procs, 1) assert.Len(t, steps, 1)
assert.Len(t, procs[0].Children, 2) assert.Len(t, steps[0].Children, 2)
procs = []*Proc{{ steps = []*Step{{
ID: 25, ID: 25,
PID: 2, PID: 2,
PipelineID: 6, PipelineID: 6,
@ -64,6 +64,6 @@ func TestTree(t *testing.T) {
State: StatusSuccess, State: StatusSuccess,
Error: "0", Error: "0",
}} }}
_, err = Tree(procs) _, err = Tree(steps)
assert.Error(t, err) assert.Error(t, err)
} }

View file

@ -33,53 +33,53 @@ func Cancel(ctx context.Context, store store.Store, repo *model.Repo, pipeline *
return ErrBadRequest{Msg: "Cannot cancel a non-running or non-pending or non-blocked pipeline"} return ErrBadRequest{Msg: "Cannot cancel a non-running or non-pending or non-blocked pipeline"}
} }
procs, err := store.ProcList(pipeline) steps, err := store.StepList(pipeline)
if err != nil { if err != nil {
return ErrNotFound{Msg: err.Error()} return ErrNotFound{Msg: err.Error()}
} }
// First cancel/evict procs in the queue in one go // First cancel/evict steps in the queue in one go
var ( var (
procsToCancel []string stepsToCancel []string
procsToEvict []string stepsToEvict []string
) )
for _, proc := range procs { for _, step := range steps {
if proc.PPID != 0 { if step.PPID != 0 {
continue continue
} }
if proc.State == model.StatusRunning { if step.State == model.StatusRunning {
procsToCancel = append(procsToCancel, fmt.Sprint(proc.ID)) stepsToCancel = append(stepsToCancel, fmt.Sprint(step.ID))
} }
if proc.State == model.StatusPending { if step.State == model.StatusPending {
procsToEvict = append(procsToEvict, fmt.Sprint(proc.ID)) stepsToEvict = append(stepsToEvict, fmt.Sprint(step.ID))
} }
} }
if len(procsToEvict) != 0 { if len(stepsToEvict) != 0 {
if err := server.Config.Services.Queue.EvictAtOnce(ctx, procsToEvict); err != nil { if err := server.Config.Services.Queue.EvictAtOnce(ctx, stepsToEvict); err != nil {
log.Error().Err(err).Msgf("queue: evict_at_once: %v", procsToEvict) log.Error().Err(err).Msgf("queue: evict_at_once: %v", stepsToEvict)
} }
if err := server.Config.Services.Queue.ErrorAtOnce(ctx, procsToEvict, queue.ErrCancel); err != nil { if err := server.Config.Services.Queue.ErrorAtOnce(ctx, stepsToEvict, queue.ErrCancel); err != nil {
log.Error().Err(err).Msgf("queue: evict_at_once: %v", procsToEvict) log.Error().Err(err).Msgf("queue: evict_at_once: %v", stepsToEvict)
} }
} }
if len(procsToCancel) != 0 { if len(stepsToCancel) != 0 {
if err := server.Config.Services.Queue.ErrorAtOnce(ctx, procsToCancel, queue.ErrCancel); err != nil { if err := server.Config.Services.Queue.ErrorAtOnce(ctx, stepsToCancel, queue.ErrCancel); err != nil {
log.Error().Err(err).Msgf("queue: evict_at_once: %v", procsToCancel) log.Error().Err(err).Msgf("queue: evict_at_once: %v", stepsToCancel)
} }
} }
// Then update the DB status for pending pipelines // Then update the DB status for pending pipelines
// Running ones will be set when the agents stop on the cancel signal // Running ones will be set when the agents stop on the cancel signal
for _, proc := range procs { for _, step := range steps {
if proc.State == model.StatusPending { if step.State == model.StatusPending {
if proc.PPID != 0 { if step.PPID != 0 {
if _, err = shared.UpdateProcToStatusSkipped(store, *proc, 0); err != nil { if _, err = shared.UpdateStepToStatusSkipped(store, *step, 0); err != nil {
log.Error().Msgf("error: done: cannot update proc_id %d state: %s", proc.ID, err) log.Error().Msgf("error: done: cannot update step_id %d state: %s", step.ID, err)
} }
} else { } else {
if _, err = shared.UpdateProcToStatusKilled(store, *proc); err != nil { if _, err = shared.UpdateStepToStatusKilled(store, *step); err != nil {
log.Error().Msgf("error: done: cannot update proc_id %d state: %s", proc.ID, err) log.Error().Msgf("error: done: cannot update step_id %d state: %s", step.ID, err)
} }
} }
} }
@ -91,11 +91,11 @@ func Cancel(ctx context.Context, store store.Store, repo *model.Repo, pipeline *
return err return err
} }
procs, err = store.ProcList(killedBuild) steps, err = store.StepList(killedBuild)
if err != nil { if err != nil {
return ErrNotFound{Msg: err.Error()} return ErrNotFound{Msg: err.Error()}
} }
if killedBuild.Procs, err = model.Tree(procs); err != nil { if killedBuild.Steps, err = model.Tree(steps); err != nil {
return err return err
} }
if err := publishToTopic(ctx, killedBuild, repo); err != nil { if err := publishToTopic(ctx, killedBuild, repo); err != nil {

View file

@ -101,7 +101,7 @@ func Create(ctx context.Context, _store store.Store, repo *model.Repo, pipeline
pipeline.Status = model.StatusBlocked pipeline.Status = model.StatusBlocked
} }
err = _store.CreatePipeline(pipeline, pipeline.Procs...) err = _store.CreatePipeline(pipeline, pipeline.Steps...)
if err != nil { if err != nil {
msg := fmt.Sprintf("failure to save pipeline for %s", repo.FullName) msg := fmt.Sprintf("failure to save pipeline for %s", repo.FullName)
log.Error().Err(err).Msg(msg) log.Error().Err(err).Msg(msg)

View file

@ -35,11 +35,11 @@ func Decline(ctx context.Context, store store.Store, pipeline *model.Pipeline, u
return nil, fmt.Errorf("error updating pipeline. %s", err) return nil, fmt.Errorf("error updating pipeline. %s", err)
} }
if pipeline.Procs, err = store.ProcList(pipeline); err != nil { if pipeline.Steps, err = store.StepList(pipeline); err != nil {
log.Error().Err(err).Msg("can not get proc list from store") log.Error().Err(err).Msg("can not get step list from store")
} }
if pipeline.Procs, err = model.Tree(pipeline.Procs); err != nil { if pipeline.Steps, err = model.Tree(pipeline.Steps); err != nil {
log.Error().Err(err).Msg("can not build tree from proc list") log.Error().Err(err).Msg("can not build tree from step list")
} }
if err := updatePipelineStatus(ctx, pipeline, repo, user); err != nil { if err := updatePipelineStatus(ctx, pipeline, repo, user); err != nil {

View file

@ -27,7 +27,7 @@ import (
) )
func zeroSteps(pipeline *model.Pipeline, remoteYamlConfigs []*remote.FileMeta) bool { func zeroSteps(pipeline *model.Pipeline, remoteYamlConfigs []*remote.FileMeta) bool {
b := shared.ProcBuilder{ b := shared.StepBuilder{
Repo: &model.Repo{}, Repo: &model.Repo{},
Curr: pipeline, Curr: pipeline,
Last: &model.Pipeline{}, Last: &model.Pipeline{},

View file

@ -24,13 +24,13 @@ import (
) )
func updatePipelineStatus(ctx context.Context, pipeline *model.Pipeline, repo *model.Repo, user *model.User) error { func updatePipelineStatus(ctx context.Context, pipeline *model.Pipeline, repo *model.Repo, user *model.User) error {
for _, proc := range pipeline.Procs { for _, step := range pipeline.Steps {
// skip child procs // skip child steps
if !proc.IsParent() { if !step.IsParent() {
continue continue
} }
err := server.Config.Services.Remote.Status(ctx, user, repo, pipeline, proc) err := server.Config.Services.Remote.Status(ctx, user, repo, pipeline, step)
if err != nil { if err != nil {
log.Error().Err(err).Msgf("error setting commit status for %s/%d", repo.FullName, pipeline.Number) log.Error().Err(err).Msgf("error setting commit status for %s/%d", repo.FullName, pipeline.Number)
return err return err

View file

@ -64,7 +64,7 @@ func createPipelineItems(ctx context.Context, store store.Store, pipeline *model
envs[k] = v envs[k] = v
} }
b := shared.ProcBuilder{ b := shared.StepBuilder{
Repo: repo, Repo: repo,
Curr: pipeline, Curr: pipeline,
Last: last, Last: last,

View file

@ -29,11 +29,11 @@ import (
func queueBuild(pipeline *model.Pipeline, repo *model.Repo, pipelineItems []*shared.PipelineItem) error { func queueBuild(pipeline *model.Pipeline, repo *model.Repo, pipelineItems []*shared.PipelineItem) error {
var tasks []*queue.Task var tasks []*queue.Task
for _, item := range pipelineItems { for _, item := range pipelineItems {
if item.Proc.State == model.StatusSkipped { if item.Step.State == model.StatusSkipped {
continue continue
} }
task := new(queue.Task) task := new(queue.Task)
task.ID = fmt.Sprint(item.Proc.ID) task.ID = fmt.Sprint(item.Step.ID)
task.Labels = map[string]string{} task.Labels = map[string]string{}
for k, v := range item.Labels { for k, v := range item.Labels {
task.Labels[k] = v task.Labels[k] = v
@ -45,7 +45,7 @@ func queueBuild(pipeline *model.Pipeline, repo *model.Repo, pipelineItems []*sha
task.DepStatus = make(map[string]string) task.DepStatus = make(map[string]string)
task.Data, _ = json.Marshal(rpc.Pipeline{ task.Data, _ = json.Marshal(rpc.Pipeline{
ID: fmt.Sprint(item.Proc.ID), ID: fmt.Sprint(item.Step.ID),
Config: item.Config, Config: item.Config,
Timeout: repo.Timeout, Timeout: repo.Timeout,
}) })
@ -61,8 +61,8 @@ func queueBuild(pipeline *model.Pipeline, repo *model.Repo, pipelineItems []*sha
func taskIds(dependsOn []string, pipelineItems []*shared.PipelineItem) (taskIds []string) { func taskIds(dependsOn []string, pipelineItems []*shared.PipelineItem) (taskIds []string) {
for _, dep := range dependsOn { for _, dep := range dependsOn {
for _, pipelineItem := range pipelineItems { for _, pipelineItem := range pipelineItems {
if pipelineItem.Proc.Name == dep { if pipelineItem.Step.Name == dep {
taskIds = append(taskIds, fmt.Sprint(pipelineItem.Proc.ID)) taskIds = append(taskIds, fmt.Sprint(pipelineItem.Step.ID))
} }
} }
} }

View file

@ -32,8 +32,8 @@ func start(ctx context.Context, store store.Store, activePipeline *model.Pipelin
log.Error().Err(err).Msg("Failed to cancel previous pipelines") log.Error().Err(err).Msg("Failed to cancel previous pipelines")
} }
if err := store.ProcCreate(activePipeline.Procs); err != nil { if err := store.StepCreate(activePipeline.Steps); err != nil {
log.Error().Err(err).Str("repo", repo.FullName).Msgf("error persisting procs for %s#%d", repo.FullName, activePipeline.Number) log.Error().Err(err).Str("repo", repo.FullName).Msgf("error persisting steps for %s#%d", repo.FullName, activePipeline.Number)
return nil, err return nil, err
} }

View file

@ -33,7 +33,7 @@ func publishToTopic(c context.Context, pipeline *model.Pipeline, repo *model.Rep
}, },
} }
pipelineCopy := *pipeline pipelineCopy := *pipeline
if pipelineCopy.Procs, err = model.Tree(pipelineCopy.Procs); err != nil { if pipelineCopy.Steps, err = model.Tree(pipelineCopy.Steps); err != nil {
return err return err
} }

View file

@ -227,7 +227,7 @@ func (c *config) Dir(ctx context.Context, u *model.User, r *model.Repo, p *model
} }
// Status creates a pipeline status for the Bitbucket commit. // Status creates a pipeline status for the Bitbucket commit.
func (c *config) Status(ctx context.Context, user *model.User, repo *model.Repo, pipeline *model.Pipeline, proc *model.Proc) error { func (c *config) Status(ctx context.Context, user *model.User, repo *model.Repo, pipeline *model.Pipeline, step *model.Step) error {
status := internal.PipelineStatus{ status := internal.PipelineStatus{
State: convertStatus(pipeline.Status), State: convertStatus(pipeline.Status),
Desc: common.GetPipelineStatusDescription(pipeline.Status), Desc: common.GetPipelineStatusDescription(pipeline.Status),

View file

@ -255,7 +255,7 @@ func Test_bitbucket(t *testing.T) {
}) })
g.It("Should update the status", func() { g.It("Should update the status", func() {
err := c.Status(ctx, fakeUser, fakeRepo, fakePipeline, fakeProc) err := c.Status(ctx, fakeUser, fakeRepo, fakePipeline, fakeStep)
g.Assert(err).IsNil() g.Assert(err).IsNil()
}) })
@ -355,7 +355,7 @@ var (
Commit: "9ecad50", Commit: "9ecad50",
} }
fakeProc = &model.Proc{ fakeStep = &model.Step{
Name: "test", Name: "test",
State: model.StatusSuccess, State: model.StatusSuccess,
} }

View file

@ -190,7 +190,7 @@ func (c *Config) Dir(ctx context.Context, u *model.User, r *model.Repo, p *model
} }
// Status is not supported by the bitbucketserver driver. // Status is not supported by the bitbucketserver driver.
func (c *Config) Status(ctx context.Context, user *model.User, repo *model.Repo, pipeline *model.Pipeline, proc *model.Proc) error { func (c *Config) Status(ctx context.Context, user *model.User, repo *model.Repo, pipeline *model.Pipeline, step *model.Step) error {
status := internal.PipelineStatus{ status := internal.PipelineStatus{
State: convertStatus(pipeline.Status), State: convertStatus(pipeline.Status),
Desc: common.GetPipelineStatusDescription(pipeline.Status), Desc: common.GetPipelineStatusDescription(pipeline.Status),

View file

@ -249,7 +249,7 @@ func (c *Coding) Dir(ctx context.Context, u *model.User, r *model.Repo, b *model
} }
// Status sends the commit status to the remote system. // Status sends the commit status to the remote system.
func (c *Coding) Status(ctx context.Context, u *model.User, r *model.Repo, b *model.Pipeline, proc *model.Proc) error { func (c *Coding) Status(ctx context.Context, u *model.User, r *model.Repo, b *model.Pipeline, step *model.Step) error {
// EMPTY: not implemented in Coding OAuth API // EMPTY: not implemented in Coding OAuth API
return nil return nil
} }

View file

@ -23,7 +23,7 @@ import (
"github.com/woodpecker-ci/woodpecker/server/model" "github.com/woodpecker-ci/woodpecker/server/model"
) )
func GetPipelineStatusContext(repo *model.Repo, pipeline *model.Pipeline, proc *model.Proc) string { func GetPipelineStatusContext(repo *model.Repo, pipeline *model.Pipeline, step *model.Step) string {
event := string(pipeline.Event) event := string(pipeline.Event)
switch pipeline.Event { switch pipeline.Event {
case model.EventPull: case model.EventPull:
@ -38,7 +38,7 @@ func GetPipelineStatusContext(repo *model.Repo, pipeline *model.Pipeline, proc *
err = tmpl.Execute(&ctx, map[string]interface{}{ err = tmpl.Execute(&ctx, map[string]interface{}{
"context": server.Config.Server.StatusContext, "context": server.Config.Server.StatusContext,
"event": event, "event": event,
"pipeline": proc.Name, "pipeline": step.Name,
"owner": repo.Owner, "owner": repo.Owner,
"repo": repo.Name, "repo": repo.Name,
}) })
@ -72,10 +72,10 @@ func GetPipelineStatusDescription(status model.StatusValue) string {
} }
} }
func GetPipelineStatusLink(repo *model.Repo, pipeline *model.Pipeline, proc *model.Proc) string { func GetPipelineStatusLink(repo *model.Repo, pipeline *model.Pipeline, step *model.Step) string {
if proc == nil { if step == nil {
return fmt.Sprintf("%s/%s/pipeline/%d", server.Config.Server.Host, repo.FullName, pipeline.Number) return fmt.Sprintf("%s/%s/pipeline/%d", server.Config.Server.Host, repo.FullName, pipeline.Number)
} }
return fmt.Sprintf("%s/%s/pipeline/%d/%d", server.Config.Server.Host, repo.FullName, pipeline.Number, proc.PID) return fmt.Sprintf("%s/%s/pipeline/%d/%d", server.Config.Server.Host, repo.FullName, pipeline.Number, step.PID)
} }

View file

@ -33,17 +33,17 @@ func TestGetPipelineStatusContext(t *testing.T) {
repo := &model.Repo{Owner: "user1", Name: "repo1"} repo := &model.Repo{Owner: "user1", Name: "repo1"}
pipeline := &model.Pipeline{Event: model.EventPull} pipeline := &model.Pipeline{Event: model.EventPull}
proc := &model.Proc{Name: "lint"} step := &model.Step{Name: "lint"}
assert.EqualValues(t, "", GetPipelineStatusContext(repo, pipeline, proc)) assert.EqualValues(t, "", GetPipelineStatusContext(repo, pipeline, step))
server.Config.Server.StatusContext = "ci/woodpecker" server.Config.Server.StatusContext = "ci/woodpecker"
server.Config.Server.StatusContextFormat = "{{ .context }}/{{ .event }}/{{ .pipeline }}" server.Config.Server.StatusContextFormat = "{{ .context }}/{{ .event }}/{{ .pipeline }}"
assert.EqualValues(t, "ci/woodpecker/pr/lint", GetPipelineStatusContext(repo, pipeline, proc)) assert.EqualValues(t, "ci/woodpecker/pr/lint", GetPipelineStatusContext(repo, pipeline, step))
pipeline.Event = model.EventPush pipeline.Event = model.EventPush
assert.EqualValues(t, "ci/woodpecker/push/lint", GetPipelineStatusContext(repo, pipeline, proc)) assert.EqualValues(t, "ci/woodpecker/push/lint", GetPipelineStatusContext(repo, pipeline, step))
server.Config.Server.StatusContext = "ci" server.Config.Server.StatusContext = "ci"
server.Config.Server.StatusContextFormat = "{{ .context }}:{{ .owner }}/{{ .repo }}:{{ .event }}:{{ .pipeline }}" server.Config.Server.StatusContextFormat = "{{ .context }}:{{ .owner }}/{{ .repo }}:{{ .event }}:{{ .pipeline }}"
assert.EqualValues(t, "ci:user1/repo1:push:lint", GetPipelineStatusContext(repo, pipeline, proc)) assert.EqualValues(t, "ci:user1/repo1:push:lint", GetPipelineStatusContext(repo, pipeline, step))
} }

View file

@ -326,7 +326,7 @@ func (c *Gitea) Dir(ctx context.Context, u *model.User, r *model.Repo, b *model.
} }
// Status is supported by the Gitea driver. // Status is supported by the Gitea driver.
func (c *Gitea) Status(ctx context.Context, user *model.User, repo *model.Repo, pipeline *model.Pipeline, proc *model.Proc) error { func (c *Gitea) Status(ctx context.Context, user *model.User, repo *model.Repo, pipeline *model.Pipeline, step *model.Step) error {
client, err := c.newClientToken(ctx, user.Token) client, err := c.newClientToken(ctx, user.Token)
if err != nil { if err != nil {
return err return err
@ -337,10 +337,10 @@ func (c *Gitea) Status(ctx context.Context, user *model.User, repo *model.Repo,
repo.Name, repo.Name,
pipeline.Commit, pipeline.Commit,
gitea.CreateStatusOption{ gitea.CreateStatusOption{
State: getStatus(proc.State), State: getStatus(step.State),
TargetURL: common.GetPipelineStatusLink(repo, pipeline, proc), TargetURL: common.GetPipelineStatusLink(repo, pipeline, step),
Description: common.GetPipelineStatusDescription(proc.State), Description: common.GetPipelineStatusDescription(step.State),
Context: common.GetPipelineStatusContext(repo, pipeline, proc), Context: common.GetPipelineStatusContext(repo, pipeline, step),
}, },
) )
return err return err

View file

@ -138,7 +138,7 @@ func Test_gitea(t *testing.T) {
}) })
g.It("Should return nil from send pipeline status", func() { g.It("Should return nil from send pipeline status", func() {
err := c.Status(ctx, fakeUser, fakeRepo, fakePipeline, fakeProc) err := c.Status(ctx, fakeUser, fakeRepo, fakePipeline, fakeStep)
g.Assert(err).IsNil() g.Assert(err).IsNil()
}) })
@ -186,7 +186,7 @@ var (
Commit: "9ecad50", Commit: "9ecad50",
} }
fakeProc = &model.Proc{ fakeStep = &model.Step{
Name: "test", Name: "test",
State: model.StatusSuccess, State: model.StatusSuccess,
} }

View file

@ -437,7 +437,7 @@ var reDeploy = regexp.MustCompile(`.+/deployments/(\d+)`)
// Status sends the commit status to the remote system. // Status sends the commit status to the remote system.
// An example would be the GitHub pull request status. // An example would be the GitHub pull request status.
func (c *client) Status(ctx context.Context, user *model.User, repo *model.Repo, pipeline *model.Pipeline, proc *model.Proc) error { func (c *client) Status(ctx context.Context, user *model.User, repo *model.Repo, pipeline *model.Pipeline, step *model.Step) error {
client := c.newClientToken(ctx, user.Token) client := c.newClientToken(ctx, user.Token)
if pipeline.Event == model.EventDeploy { if pipeline.Event == model.EventDeploy {
@ -456,10 +456,10 @@ func (c *client) Status(ctx context.Context, user *model.User, repo *model.Repo,
} }
_, _, err := client.Repositories.CreateStatus(ctx, repo.Owner, repo.Name, pipeline.Commit, &github.RepoStatus{ _, _, err := client.Repositories.CreateStatus(ctx, repo.Owner, repo.Name, pipeline.Commit, &github.RepoStatus{
Context: github.String(common.GetPipelineStatusContext(repo, pipeline, proc)), Context: github.String(common.GetPipelineStatusContext(repo, pipeline, step)),
State: github.String(convertStatus(proc.State)), State: github.String(convertStatus(step.State)),
Description: github.String(common.GetPipelineStatusDescription(proc.State)), Description: github.String(common.GetPipelineStatusDescription(step.State)),
TargetURL: github.String(common.GetPipelineStatusLink(repo, pipeline, proc)), TargetURL: github.String(common.GetPipelineStatusLink(repo, pipeline, step)),
}) })
return err return err
} }

View file

@ -387,7 +387,7 @@ func (g *Gitlab) Dir(ctx context.Context, user *model.User, repo *model.Repo, pi
} }
// Status sends the commit status back to gitlab. // Status sends the commit status back to gitlab.
func (g *Gitlab) Status(ctx context.Context, user *model.User, repo *model.Repo, pipeline *model.Pipeline, proc *model.Proc) error { func (g *Gitlab) Status(ctx context.Context, user *model.User, repo *model.Repo, pipeline *model.Pipeline, step *model.Step) error {
client, err := newClient(g.URL, user.Token, g.SkipVerify) client, err := newClient(g.URL, user.Token, g.SkipVerify)
if err != nil { if err != nil {
return err return err
@ -399,10 +399,10 @@ func (g *Gitlab) Status(ctx context.Context, user *model.User, repo *model.Repo,
} }
_, _, err = client.Commits.SetCommitStatus(_repo.ID, pipeline.Commit, &gitlab.SetCommitStatusOptions{ _, _, err = client.Commits.SetCommitStatus(_repo.ID, pipeline.Commit, &gitlab.SetCommitStatusOptions{
State: getStatus(proc.State), State: getStatus(step.State),
Description: gitlab.String(common.GetPipelineStatusDescription(proc.State)), Description: gitlab.String(common.GetPipelineStatusDescription(step.State)),
TargetURL: gitlab.String(common.GetPipelineStatusLink(repo, pipeline, proc)), TargetURL: gitlab.String(common.GetPipelineStatusLink(repo, pipeline, step)),
Context: gitlab.String(common.GetPipelineStatusContext(repo, pipeline, proc)), Context: gitlab.String(common.GetPipelineStatusContext(repo, pipeline, step)),
}, gitlab.WithContext(ctx)) }, gitlab.WithContext(ctx))
return err return err

View file

@ -214,7 +214,7 @@ func (c *client) Dir(ctx context.Context, u *model.User, r *model.Repo, b *model
} }
// Status is not supported by the Gogs driver. // Status is not supported by the Gogs driver.
func (c *client) Status(ctx context.Context, u *model.User, r *model.Repo, b *model.Pipeline, proc *model.Proc) error { func (c *client) Status(ctx context.Context, u *model.User, r *model.Repo, b *model.Pipeline, step *model.Step) error {
return nil return nil
} }

View file

@ -342,11 +342,11 @@ func (_m *Remote) Repos(ctx context.Context, u *model.User) ([]*model.Repo, erro
} }
// Status provides a mock function with given fields: ctx, u, r, b, p // Status provides a mock function with given fields: ctx, u, r, b, p
func (_m *Remote) Status(ctx context.Context, u *model.User, r *model.Repo, b *model.Pipeline, p *model.Proc) error { func (_m *Remote) Status(ctx context.Context, u *model.User, r *model.Repo, b *model.Pipeline, p *model.Step) error {
ret := _m.Called(ctx, u, r, b, p) ret := _m.Called(ctx, u, r, b, p)
var r0 error var r0 error
if rf, ok := ret.Get(0).(func(context.Context, *model.User, *model.Repo, *model.Pipeline, *model.Proc) error); ok { if rf, ok := ret.Get(0).(func(context.Context, *model.User, *model.Repo, *model.Pipeline, *model.Step) error); ok {
r0 = rf(ctx, u, r, b, p) r0 = rf(ctx, u, r, b, p)
} else { } else {
r0 = ret.Error(0) r0 = ret.Error(0)

View file

@ -62,7 +62,7 @@ type Remote interface {
// Status sends the commit status to the remote system. // Status sends the commit status to the remote system.
// An example would be the GitHub pull request status. // An example would be the GitHub pull request status.
Status(ctx context.Context, u *model.User, r *model.Repo, b *model.Pipeline, p *model.Proc) error Status(ctx context.Context, u *model.User, r *model.Repo, b *model.Pipeline, p *model.Step) error
// Netrc returns a .netrc file that can be used to clone // Netrc returns a .netrc file that can be used to clone
// private repositories from a remote system. // private repositories from a remote system.

View file

@ -85,16 +85,15 @@ func apiRoutes(e *gin.Engine) {
repo.DELETE("/pipelines/:number", session.MustPush, api.DeletePipeline) repo.DELETE("/pipelines/:number", session.MustPush, api.DeletePipeline)
repo.POST("/pipelines/:number/approve", session.MustPush, api.PostApproval) repo.POST("/pipelines/:number/approve", session.MustPush, api.PostApproval)
repo.POST("/pipelines/:number/decline", session.MustPush, api.PostDecline) repo.POST("/pipelines/:number/decline", session.MustPush, api.PostDecline)
repo.DELETE("/pipelines/:number/:job", session.MustPush, api.DeletePipeline)
repo.GET("/logs/:number/:pid", api.GetProcLogs) repo.GET("/logs/:number/:pid", api.GetStepLogs)
repo.GET("/logs/:number/:pid/:proc", api.GetPipelineLogs) repo.GET("/logs/:number/:pid/:step", api.GetPipelineLogs)
// requires push permissions // requires push permissions
repo.DELETE("/logs/:number", session.MustPush, api.DeletePipelineLogs) repo.DELETE("/logs/:number", session.MustPush, api.DeletePipelineLogs)
repo.GET("/files/:number", api.FileList) repo.GET("/files/:number", api.FileList)
repo.GET("/files/:number/:proc/*file", api.FileGet) repo.GET("/files/:number/:step/*file", api.FileGet)
// requires push permissions // requires push permissions
repo.GET("/secrets", session.MustPush, api.GetSecretList) repo.GET("/secrets", session.MustPush, api.GetSecretList)

View file

@ -1,87 +0,0 @@
// Copyright 2019 mhmxs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package shared
import (
"time"
"github.com/woodpecker-ci/woodpecker/pipeline/rpc"
"github.com/woodpecker-ci/woodpecker/server/model"
)
// TODO(974) move to server/pipeline/*
func UpdateProcStatus(store model.UpdateProcStore, proc model.Proc, state rpc.State, started int64) (*model.Proc, error) {
if state.Exited {
proc.Stopped = state.Finished
proc.ExitCode = state.ExitCode
proc.Error = state.Error
proc.State = model.StatusSuccess
if state.ExitCode != 0 || state.Error != "" {
proc.State = model.StatusFailure
}
if state.ExitCode == 137 {
proc.State = model.StatusKilled
}
} else {
proc.Started = state.Started
proc.State = model.StatusRunning
}
if proc.Started == 0 && proc.Stopped != 0 {
proc.Started = started
}
return &proc, store.ProcUpdate(&proc)
}
func UpdateProcToStatusStarted(store model.UpdateProcStore, proc model.Proc, state rpc.State) (*model.Proc, error) {
proc.Started = state.Started
proc.State = model.StatusRunning
return &proc, store.ProcUpdate(&proc)
}
func UpdateProcToStatusSkipped(store model.UpdateProcStore, proc model.Proc, stopped int64) (*model.Proc, error) {
proc.State = model.StatusSkipped
if proc.Started != 0 {
proc.State = model.StatusSuccess // for daemons that are killed
proc.Stopped = stopped
}
return &proc, store.ProcUpdate(&proc)
}
func UpdateProcStatusToDone(store model.UpdateProcStore, proc model.Proc, state rpc.State) (*model.Proc, error) {
proc.Stopped = state.Finished
proc.Error = state.Error
proc.ExitCode = state.ExitCode
if state.Started == 0 {
proc.State = model.StatusSkipped
} else {
proc.State = model.StatusSuccess
}
if proc.ExitCode != 0 || proc.Error != "" {
proc.State = model.StatusFailure
}
return &proc, store.ProcUpdate(&proc)
}
func UpdateProcToStatusKilled(store model.UpdateProcStore, proc model.Proc) (*model.Proc, error) {
proc.State = model.StatusKilled
proc.Stopped = time.Now().Unix()
if proc.Started == 0 {
proc.Started = proc.Stopped
}
proc.ExitCode = 137
return &proc, store.ProcUpdate(&proc)
}

View file

@ -1,286 +0,0 @@
// Copyright 2019 mhmxs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package shared
import (
"testing"
"time"
"github.com/woodpecker-ci/woodpecker/pipeline/rpc"
"github.com/woodpecker-ci/woodpecker/server/model"
)
// TODO(974) move to server/pipeline/*
type mockUpdateProcStore struct{}
func (m *mockUpdateProcStore) ProcUpdate(build *model.Proc) error {
return nil
}
func TestUpdateProcStatusNotExited(t *testing.T) {
t.Parallel()
state := rpc.State{
Started: int64(42),
Exited: false,
// Dummy data
Finished: int64(1),
ExitCode: 137,
Error: "not an error",
}
proc, _ := UpdateProcStatus(&mockUpdateProcStore{}, model.Proc{}, state, int64(1))
if proc.State != model.StatusRunning {
t.Errorf("Proc status not equals '%s' != '%s'", model.StatusRunning, proc.State)
} else if proc.Started != int64(42) {
t.Errorf("Proc started not equals 42 != %d", proc.Started)
} else if proc.Stopped != int64(0) {
t.Errorf("Proc stopped not equals 0 != %d", proc.Stopped)
} else if proc.ExitCode != 0 {
t.Errorf("Proc exit code not equals 0 != %d", proc.ExitCode)
} else if proc.Error != "" {
t.Errorf("Proc error not equals '' != '%s'", proc.Error)
}
}
func TestUpdateProcStatusNotExitedButStopped(t *testing.T) {
t.Parallel()
proc := &model.Proc{Stopped: int64(64)}
state := rpc.State{
Exited: false,
// Dummy data
Finished: int64(1),
ExitCode: 137,
Error: "not an error",
}
proc, _ = UpdateProcStatus(&mockUpdateProcStore{}, *proc, state, int64(42))
if proc.State != model.StatusRunning {
t.Errorf("Proc status not equals '%s' != '%s'", model.StatusRunning, proc.State)
} else if proc.Started != int64(42) {
t.Errorf("Proc started not equals 42 != %d", proc.Started)
} else if proc.Stopped != int64(64) {
t.Errorf("Proc stopped not equals 64 != %d", proc.Stopped)
} else if proc.ExitCode != 0 {
t.Errorf("Proc exit code not equals 0 != %d", proc.ExitCode)
} else if proc.Error != "" {
t.Errorf("Proc error not equals '' != '%s'", proc.Error)
}
}
func TestUpdateProcStatusExited(t *testing.T) {
t.Parallel()
state := rpc.State{
Started: int64(42),
Exited: true,
Finished: int64(34),
ExitCode: 137,
Error: "an error",
}
proc, _ := UpdateProcStatus(&mockUpdateProcStore{}, model.Proc{}, state, int64(42))
if proc.State != model.StatusKilled {
t.Errorf("Proc status not equals '%s' != '%s'", model.StatusKilled, proc.State)
} else if proc.Started != int64(42) {
t.Errorf("Proc started not equals 42 != %d", proc.Started)
} else if proc.Stopped != int64(34) {
t.Errorf("Proc stopped not equals 34 != %d", proc.Stopped)
} else if proc.ExitCode != 137 {
t.Errorf("Proc exit code not equals 137 != %d", proc.ExitCode)
} else if proc.Error != "an error" {
t.Errorf("Proc error not equals 'an error' != '%s'", proc.Error)
}
}
func TestUpdateProcStatusExitedButNot137(t *testing.T) {
t.Parallel()
state := rpc.State{
Started: int64(42),
Exited: true,
Finished: int64(34),
Error: "an error",
}
proc, _ := UpdateProcStatus(&mockUpdateProcStore{}, model.Proc{}, state, int64(42))
if proc.State != model.StatusFailure {
t.Errorf("Proc status not equals '%s' != '%s'", model.StatusFailure, proc.State)
} else if proc.Started != int64(42) {
t.Errorf("Proc started not equals 42 != %d", proc.Started)
} else if proc.Stopped != int64(34) {
t.Errorf("Proc stopped not equals 34 != %d", proc.Stopped)
} else if proc.ExitCode != 0 {
t.Errorf("Proc exit code not equals 0 != %d", proc.ExitCode)
} else if proc.Error != "an error" {
t.Errorf("Proc error not equals 'an error' != '%s'", proc.Error)
}
}
func TestUpdateProcStatusExitedWithCode(t *testing.T) {
t.Parallel()
state := rpc.State{
Started: int64(42),
Exited: true,
Finished: int64(34),
ExitCode: 1,
Error: "an error",
}
proc, _ := UpdateProcStatus(&mockUpdateProcStore{}, model.Proc{}, state, int64(42))
if proc.State != model.StatusFailure {
t.Errorf("Proc status not equals '%s' != '%s'", model.StatusFailure, proc.State)
} else if proc.ExitCode != 1 {
t.Errorf("Proc exit code not equals 1 != %d", proc.ExitCode)
}
}
func TestUpdateProcToStatusStarted(t *testing.T) {
t.Parallel()
state := rpc.State{Started: int64(42)}
proc, _ := UpdateProcToStatusStarted(&mockUpdateProcStore{}, model.Proc{}, state)
if proc.State != model.StatusRunning {
t.Errorf("Proc status not equals '%s' != '%s'", model.StatusRunning, proc.State)
} else if proc.Started != int64(42) {
t.Errorf("Proc started not equals 42 != %d", proc.Started)
}
}
func TestUpdateProcToStatusSkipped(t *testing.T) {
t.Parallel()
proc, _ := UpdateProcToStatusSkipped(&mockUpdateProcStore{}, model.Proc{}, int64(1))
if proc.State != model.StatusSkipped {
t.Errorf("Proc status not equals '%s' != '%s'", model.StatusSkipped, proc.State)
} else if proc.Stopped != int64(0) {
t.Errorf("Proc stopped not equals 0 != %d", proc.Stopped)
}
}
func TestUpdateProcToStatusSkippedButStarted(t *testing.T) {
t.Parallel()
proc := &model.Proc{
Started: int64(42),
}
proc, _ = UpdateProcToStatusSkipped(&mockUpdateProcStore{}, *proc, int64(1))
if proc.State != model.StatusSuccess {
t.Errorf("Proc status not equals '%s' != '%s'", model.StatusSuccess, proc.State)
} else if proc.Stopped != int64(1) {
t.Errorf("Proc stopped not equals 1 != %d", proc.Stopped)
}
}
func TestUpdateProcStatusToDoneSkipped(t *testing.T) {
t.Parallel()
state := rpc.State{
Finished: int64(34),
}
proc, _ := UpdateProcStatusToDone(&mockUpdateProcStore{}, model.Proc{}, state)
if proc.State != model.StatusSkipped {
t.Errorf("Proc status not equals '%s' != '%s'", model.StatusSkipped, proc.State)
} else if proc.Stopped != int64(34) {
t.Errorf("Proc stopped not equals 34 != %d", proc.Stopped)
} else if proc.Error != "" {
t.Errorf("Proc error not equals '' != '%s'", proc.Error)
} else if proc.ExitCode != 0 {
t.Errorf("Proc exit code not equals 0 != %d", proc.ExitCode)
}
}
func TestUpdateProcStatusToDoneSuccess(t *testing.T) {
t.Parallel()
state := rpc.State{
Started: int64(42),
Finished: int64(34),
}
proc, _ := UpdateProcStatusToDone(&mockUpdateProcStore{}, model.Proc{}, state)
if proc.State != model.StatusSuccess {
t.Errorf("Proc status not equals '%s' != '%s'", model.StatusSuccess, proc.State)
} else if proc.Stopped != int64(34) {
t.Errorf("Proc stopped not equals 34 != %d", proc.Stopped)
} else if proc.Error != "" {
t.Errorf("Proc error not equals '' != '%s'", proc.Error)
} else if proc.ExitCode != 0 {
t.Errorf("Proc exit code not equals 0 != %d", proc.ExitCode)
}
}
func TestUpdateProcStatusToDoneFailureWithError(t *testing.T) {
t.Parallel()
state := rpc.State{Error: "an error"}
proc, _ := UpdateProcStatusToDone(&mockUpdateProcStore{}, model.Proc{}, state)
if proc.State != model.StatusFailure {
t.Errorf("Proc status not equals '%s' != '%s'", model.StatusFailure, proc.State)
}
}
func TestUpdateProcStatusToDoneFailureWithExitCode(t *testing.T) {
t.Parallel()
state := rpc.State{ExitCode: 43}
proc, _ := UpdateProcStatusToDone(&mockUpdateProcStore{}, model.Proc{}, state)
if proc.State != model.StatusFailure {
t.Errorf("Proc status not equals '%s' != '%s'", model.StatusFailure, proc.State)
}
}
func TestUpdateProcToStatusKilledStarted(t *testing.T) {
t.Parallel()
now := time.Now().Unix()
proc, _ := UpdateProcToStatusKilled(&mockUpdateProcStore{}, model.Proc{})
if proc.State != model.StatusKilled {
t.Errorf("Proc status not equals '%s' != '%s'", model.StatusKilled, proc.State)
} else if proc.Stopped < now {
t.Errorf("Proc stopped not equals %d < %d", now, proc.Stopped)
} else if proc.Started != proc.Stopped {
t.Errorf("Proc started not equals %d != %d", proc.Stopped, proc.Started)
} else if proc.ExitCode != 137 {
t.Errorf("Proc exit code not equals 137 != %d", proc.ExitCode)
}
}
func TestUpdateProcToStatusKilledNotStarted(t *testing.T) {
t.Parallel()
proc, _ := UpdateProcToStatusKilled(&mockUpdateProcStore{}, model.Proc{Started: int64(1)})
if proc.Started != int64(1) {
t.Errorf("Proc started not equals 1 != %d", proc.Started)
}
}

View file

@ -39,8 +39,8 @@ import (
// TODO(974) move to pipeline/* // TODO(974) move to pipeline/*
// ProcBuilder Takes the hook data and the yaml and returns in internal data model // StepBuilder Takes the hook data and the yaml and returns in internal data model
type ProcBuilder struct { type StepBuilder struct {
Repo *model.Repo Repo *model.Repo
Curr *model.Pipeline Curr *model.Pipeline
Last *model.Pipeline Last *model.Pipeline
@ -53,7 +53,7 @@ type ProcBuilder struct {
} }
type PipelineItem struct { type PipelineItem struct {
Proc *model.Proc Step *model.Step
Platform string Platform string
Labels map[string]string Labels map[string]string
DependsOn []string DependsOn []string
@ -61,7 +61,7 @@ type PipelineItem struct {
Config *backend.Config Config *backend.Config
} }
func (b *ProcBuilder) Build() ([]*PipelineItem, error) { func (b *StepBuilder) Build() ([]*PipelineItem, error) {
var items []*PipelineItem var items []*PipelineItem
sort.Sort(remote.ByName(b.Yamls)) sort.Sort(remote.ByName(b.Yamls))
@ -79,7 +79,7 @@ func (b *ProcBuilder) Build() ([]*PipelineItem, error) {
} }
for _, axis := range axes { for _, axis := range axes {
proc := &model.Proc{ step := &model.Step{
PipelineID: b.Curr.ID, PipelineID: b.Curr.ID,
PID: pidSequence, PID: pidSequence,
PGID: pidSequence, PGID: pidSequence,
@ -88,7 +88,7 @@ func (b *ProcBuilder) Build() ([]*PipelineItem, error) {
Name: SanitizePath(y.Name), Name: SanitizePath(y.Name),
} }
metadata := metadataFromStruct(b.Repo, b.Curr, b.Last, proc, b.Link) metadata := metadataFromStruct(b.Repo, b.Curr, b.Last, step, b.Link)
environ := b.environmentVariables(metadata, axis) environ := b.environmentVariables(metadata, axis)
// add global environment variables for substituting // add global environment variables for substituting
@ -121,12 +121,12 @@ func (b *ProcBuilder) Build() ([]*PipelineItem, error) {
// checking if filtered. // checking if filtered.
if match, err := parsed.When.Match(metadata, true); !match && err == nil { if match, err := parsed.When.Match(metadata, true); !match && err == nil {
log.Debug().Str("pipeline", proc.Name).Msg( log.Debug().Str("pipeline", step.Name).Msg(
"Marked as skipped, dose not match metadata", "Marked as skipped, dose not match metadata",
) )
proc.State = model.StatusSkipped step.State = model.StatusSkipped
} else if err != nil { } else if err != nil {
log.Debug().Str("pipeline", proc.Name).Msg( log.Debug().Str("pipeline", step.Name).Msg(
"Pipeline config could not be parsed", "Pipeline config could not be parsed",
) )
return nil, err return nil, err
@ -134,13 +134,13 @@ func (b *ProcBuilder) Build() ([]*PipelineItem, error) {
// TODO: deprecated branches filter => remove after some time // TODO: deprecated branches filter => remove after some time
if !parsed.Branches.Match(b.Curr.Branch) && (b.Curr.Event != model.EventDeploy && b.Curr.Event != model.EventTag) { if !parsed.Branches.Match(b.Curr.Branch) && (b.Curr.Event != model.EventDeploy && b.Curr.Event != model.EventTag) {
log.Debug().Str("pipeline", proc.Name).Msg( log.Debug().Str("pipeline", step.Name).Msg(
"Marked as skipped, dose not match branch", "Marked as skipped, dose not match branch",
) )
proc.State = model.StatusSkipped step.State = model.StatusSkipped
} }
ir, err := b.toInternalRepresentation(parsed, environ, metadata, proc.ID) ir, err := b.toInternalRepresentation(parsed, environ, metadata, step.ID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -150,7 +150,7 @@ func (b *ProcBuilder) Build() ([]*PipelineItem, error) {
} }
item := &PipelineItem{ item := &PipelineItem{
Proc: proc, Step: step,
Config: ir, Config: ir,
Labels: parsed.Labels, Labels: parsed.Labels,
DependsOn: parsed.DependsOn, DependsOn: parsed.DependsOn,
@ -168,17 +168,17 @@ func (b *ProcBuilder) Build() ([]*PipelineItem, error) {
items = filterItemsWithMissingDependencies(items) items = filterItemsWithMissingDependencies(items)
// check if at least one proc can start, if list is not empty // check if at least one step can start, if list is not empty
if len(items) > 0 && !procListContainsItemsToRun(items) { if len(items) > 0 && !stepListContainsItemsToRun(items) {
return nil, fmt.Errorf("pipeline has no startpoint") return nil, fmt.Errorf("pipeline has no startpoint")
} }
return items, nil return items, nil
} }
func procListContainsItemsToRun(items []*PipelineItem) bool { func stepListContainsItemsToRun(items []*PipelineItem) bool {
for i := range items { for i := range items {
if items[i].Proc.State == model.StatusPending { if items[i].Step.State == model.StatusPending {
return true return true
} }
} }
@ -199,7 +199,7 @@ func filterItemsWithMissingDependencies(items []*PipelineItem) []*PipelineItem {
if len(itemsToRemove) > 0 { if len(itemsToRemove) > 0 {
filtered := make([]*PipelineItem, 0) filtered := make([]*PipelineItem, 0)
for _, item := range items { for _, item := range items {
if !containsItemWithName(item.Proc.Name, itemsToRemove) { if !containsItemWithName(item.Step.Name, itemsToRemove) {
filtered = append(filtered, item) filtered = append(filtered, item)
} }
} }
@ -212,14 +212,14 @@ func filterItemsWithMissingDependencies(items []*PipelineItem) []*PipelineItem {
func containsItemWithName(name string, items []*PipelineItem) bool { func containsItemWithName(name string, items []*PipelineItem) bool {
for _, item := range items { for _, item := range items {
if name == item.Proc.Name { if name == item.Step.Name {
return true return true
} }
} }
return false return false
} }
func (b *ProcBuilder) envsubst(y string, environ map[string]string) (string, error) { func (b *StepBuilder) envsubst(y string, environ map[string]string) (string, error) {
return envsubst.Eval(y, func(name string) string { return envsubst.Eval(y, func(name string) string {
env := environ[name] env := environ[name]
if strings.Contains(env, "\n") { if strings.Contains(env, "\n") {
@ -229,7 +229,7 @@ func (b *ProcBuilder) envsubst(y string, environ map[string]string) (string, err
}) })
} }
func (b *ProcBuilder) environmentVariables(metadata frontend.Metadata, axis matrix.Axis) map[string]string { func (b *StepBuilder) environmentVariables(metadata frontend.Metadata, axis matrix.Axis) map[string]string {
environ := metadata.Environ() environ := metadata.Environ()
for k, v := range axis { for k, v := range axis {
environ[k] = v environ[k] = v
@ -237,7 +237,7 @@ func (b *ProcBuilder) environmentVariables(metadata frontend.Metadata, axis matr
return environ return environ
} }
func (b *ProcBuilder) toInternalRepresentation(parsed *yaml.Config, environ map[string]string, metadata frontend.Metadata, procID int64) (*backend.Config, error) { func (b *StepBuilder) toInternalRepresentation(parsed *yaml.Config, environ map[string]string, metadata frontend.Metadata, stepID int64) (*backend.Config, error) {
var secrets []compiler.Secret var secrets []compiler.Secret
for _, sec := range b.Secs { for _, sec := range b.Secs {
if !sec.Match(b.Curr.Event) { if !sec.Match(b.Curr.Event) {
@ -283,7 +283,7 @@ func (b *ProcBuilder) toInternalRepresentation(parsed *yaml.Config, environ map[
compiler.WithPrefix( compiler.WithPrefix(
fmt.Sprintf( fmt.Sprintf(
"wp_%d_%d", "wp_%d_%d",
procID, stepID,
rand.Int(), rand.Int(),
), ),
), ),
@ -296,9 +296,9 @@ func (b *ProcBuilder) toInternalRepresentation(parsed *yaml.Config, environ map[
func SetPipelineStepsOnPipeline(pipeline *model.Pipeline, pipelineItems []*PipelineItem) *model.Pipeline { func SetPipelineStepsOnPipeline(pipeline *model.Pipeline, pipelineItems []*PipelineItem) *model.Pipeline {
var pidSequence int var pidSequence int
for _, item := range pipelineItems { for _, item := range pipelineItems {
pipeline.Procs = append(pipeline.Procs, item.Proc) pipeline.Steps = append(pipeline.Steps, item.Step)
if pidSequence < item.Proc.PID { if pidSequence < item.Step.PID {
pidSequence = item.Proc.PID pidSequence = item.Step.PID
} }
} }
@ -310,18 +310,18 @@ func SetPipelineStepsOnPipeline(pipeline *model.Pipeline, pipelineItems []*Pipel
if gid == 0 { if gid == 0 {
gid = pidSequence gid = pidSequence
} }
proc := &model.Proc{ step := &model.Step{
PipelineID: pipeline.ID, PipelineID: pipeline.ID,
Name: step.Alias, Name: step.Alias,
PID: pidSequence, PID: pidSequence,
PPID: item.Proc.PID, PPID: item.Step.PID,
PGID: gid, PGID: gid,
State: model.StatusPending, State: model.StatusPending,
} }
if item.Proc.State == model.StatusSkipped { if item.Step.State == model.StatusSkipped {
proc.State = model.StatusSkipped step.State = model.StatusSkipped
} }
pipeline.Procs = append(pipeline.Procs, proc) pipeline.Steps = append(pipeline.Steps, step)
} }
} }
} }
@ -330,7 +330,7 @@ func SetPipelineStepsOnPipeline(pipeline *model.Pipeline, pipelineItems []*Pipel
} }
// return the metadata from the cli context. // return the metadata from the cli context.
func metadataFromStruct(repo *model.Repo, pipeline, last *model.Pipeline, proc *model.Proc, link string) frontend.Metadata { func metadataFromStruct(repo *model.Repo, pipeline, last *model.Pipeline, step *model.Step, link string) frontend.Metadata {
host := link host := link
uri, err := url.Parse(link) uri, err := url.Parse(link)
if err == nil { if err == nil {
@ -346,9 +346,9 @@ func metadataFromStruct(repo *model.Repo, pipeline, last *model.Pipeline, proc *
}, },
Curr: metadataPipelineFromModelPipeline(pipeline, true), Curr: metadataPipelineFromModelPipeline(pipeline, true),
Prev: metadataPipelineFromModelPipeline(last, false), Prev: metadataPipelineFromModelPipeline(last, false),
Job: frontend.Job{ Step: frontend.Step{
Number: proc.PID, Number: step.PID,
Matrix: proc.Environ, Matrix: step.Environ,
}, },
Sys: frontend.System{ Sys: frontend.System{
Name: "woodpecker", Name: "woodpecker",

View file

@ -28,7 +28,7 @@ import (
func TestGlobalEnvsubst(t *testing.T) { func TestGlobalEnvsubst(t *testing.T) {
t.Parallel() t.Parallel()
b := ProcBuilder{ b := StepBuilder{
Envs: map[string]string{ Envs: map[string]string{
"KEY_K": "VALUE_V", "KEY_K": "VALUE_V",
"IMAGE": "scratch", "IMAGE": "scratch",
@ -62,7 +62,7 @@ pipeline:
func TestMissingGlobalEnvsubst(t *testing.T) { func TestMissingGlobalEnvsubst(t *testing.T) {
t.Parallel() t.Parallel()
b := ProcBuilder{ b := StepBuilder{
Envs: map[string]string{ Envs: map[string]string{
"KEY_K": "VALUE_V", "KEY_K": "VALUE_V",
"NO_IMAGE": "scratch", "NO_IMAGE": "scratch",
@ -96,7 +96,7 @@ pipeline:
func TestMultilineEnvsubst(t *testing.T) { func TestMultilineEnvsubst(t *testing.T) {
t.Parallel() t.Parallel()
b := ProcBuilder{ b := StepBuilder{
Repo: &model.Repo{}, Repo: &model.Repo{},
Curr: &model.Pipeline{ Curr: &model.Pipeline{
Message: `aaa Message: `aaa
@ -133,7 +133,7 @@ pipeline:
func TestMultiPipeline(t *testing.T) { func TestMultiPipeline(t *testing.T) {
t.Parallel() t.Parallel()
b := ProcBuilder{ b := StepBuilder{
Repo: &model.Repo{}, Repo: &model.Repo{},
Curr: &model.Pipeline{}, Curr: &model.Pipeline{},
Last: &model.Pipeline{}, Last: &model.Pipeline{},
@ -167,7 +167,7 @@ pipeline:
func TestDependsOn(t *testing.T) { func TestDependsOn(t *testing.T) {
t.Parallel() t.Parallel()
b := ProcBuilder{ b := StepBuilder{
Repo: &model.Repo{}, Repo: &model.Repo{},
Curr: &model.Pipeline{}, Curr: &model.Pipeline{},
Last: &model.Pipeline{}, Last: &model.Pipeline{},
@ -213,7 +213,7 @@ depends_on:
func TestRunsOn(t *testing.T) { func TestRunsOn(t *testing.T) {
t.Parallel() t.Parallel()
b := ProcBuilder{ b := StepBuilder{
Repo: &model.Repo{}, Repo: &model.Repo{},
Curr: &model.Pipeline{}, Curr: &model.Pipeline{},
Last: &model.Pipeline{}, Last: &model.Pipeline{},
@ -249,7 +249,7 @@ runs_on:
func TestPipelineName(t *testing.T) { func TestPipelineName(t *testing.T) {
t.Parallel() t.Parallel()
b := ProcBuilder{ b := StepBuilder{
Repo: &model.Repo{Config: ".woodpecker"}, Repo: &model.Repo{Config: ".woodpecker"},
Curr: &model.Pipeline{}, Curr: &model.Pipeline{},
Last: &model.Pipeline{}, Last: &model.Pipeline{},
@ -275,7 +275,7 @@ pipeline:
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
pipelineNames := []string{pipelineItems[0].Proc.Name, pipelineItems[1].Proc.Name} pipelineNames := []string{pipelineItems[0].Step.Name, pipelineItems[1].Step.Name}
if !containsItemWithName("lint", pipelineItems) || !containsItemWithName("test", pipelineItems) { if !containsItemWithName("lint", pipelineItems) || !containsItemWithName("test", pipelineItems) {
t.Fatalf("Pipeline name should be 'lint' and 'test' but are '%v'", pipelineNames) t.Fatalf("Pipeline name should be 'lint' and 'test' but are '%v'", pipelineNames)
} }
@ -284,7 +284,7 @@ pipeline:
func TestBranchFilter(t *testing.T) { func TestBranchFilter(t *testing.T) {
t.Parallel() t.Parallel()
b := ProcBuilder{ b := StepBuilder{
Repo: &model.Repo{}, Repo: &model.Repo{},
Curr: &model.Pipeline{Branch: "dev"}, Curr: &model.Pipeline{Branch: "dev"},
Last: &model.Pipeline{}, Last: &model.Pipeline{},
@ -314,15 +314,15 @@ pipeline:
if len(pipelineItems) != 2 { if len(pipelineItems) != 2 {
t.Fatal("Should have generated 2 pipeline") t.Fatal("Should have generated 2 pipeline")
} }
if pipelineItems[0].Proc.State != model.StatusSkipped { if pipelineItems[0].Step.State != model.StatusSkipped {
t.Fatal("Should not run on dev branch") t.Fatal("Should not run on dev branch")
} }
for _, child := range pipelineItems[0].Proc.Children { for _, child := range pipelineItems[0].Step.Children {
if child.State != model.StatusSkipped { if child.State != model.StatusSkipped {
t.Fatal("Children should skipped status too") t.Fatal("Children should skipped status too")
} }
} }
if pipelineItems[1].Proc.State != model.StatusPending { if pipelineItems[1].Step.State != model.StatusPending {
t.Fatal("Should run on dev branch") t.Fatal("Should run on dev branch")
} }
} }
@ -330,7 +330,7 @@ pipeline:
func TestRootWhenFilter(t *testing.T) { func TestRootWhenFilter(t *testing.T) {
t.Parallel() t.Parallel()
b := ProcBuilder{ b := StepBuilder{
Repo: &model.Repo{}, Repo: &model.Repo{},
Curr: &model.Pipeline{Event: "tester"}, Curr: &model.Pipeline{Event: "tester"},
Last: &model.Pipeline{}, Last: &model.Pipeline{},
@ -378,7 +378,7 @@ func TestZeroSteps(t *testing.T) {
pipeline := &model.Pipeline{Branch: "dev"} pipeline := &model.Pipeline{Branch: "dev"}
b := ProcBuilder{ b := StepBuilder{
Repo: &model.Repo{}, Repo: &model.Repo{},
Curr: pipeline, Curr: pipeline,
Last: &model.Pipeline{}, Last: &model.Pipeline{},
@ -412,7 +412,7 @@ func TestZeroStepsAsMultiPipelineDeps(t *testing.T) {
pipeline := &model.Pipeline{Branch: "dev"} pipeline := &model.Pipeline{Branch: "dev"}
b := ProcBuilder{ b := StepBuilder{
Repo: &model.Repo{}, Repo: &model.Repo{},
Curr: pipeline, Curr: pipeline,
Last: &model.Pipeline{}, Last: &model.Pipeline{},
@ -450,7 +450,7 @@ depends_on: [ zerostep ]
if len(pipelineItems) != 1 { if len(pipelineItems) != 1 {
t.Fatal("Zerostep and the step that depends on it should not generate a pipeline item") t.Fatal("Zerostep and the step that depends on it should not generate a pipeline item")
} }
if pipelineItems[0].Proc.Name != "justastep" { if pipelineItems[0].Step.Name != "justastep" {
t.Fatal("justastep should have been generated") t.Fatal("justastep should have been generated")
} }
} }
@ -460,7 +460,7 @@ func TestZeroStepsAsMultiPipelineTransitiveDeps(t *testing.T) {
pipeline := &model.Pipeline{Branch: "dev"} pipeline := &model.Pipeline{Branch: "dev"}
b := ProcBuilder{ b := StepBuilder{
Repo: &model.Repo{}, Repo: &model.Repo{},
Curr: pipeline, Curr: pipeline,
Last: &model.Pipeline{}, Last: &model.Pipeline{},
@ -504,7 +504,7 @@ depends_on: [ shouldbefiltered ]
if len(pipelineItems) != 1 { if len(pipelineItems) != 1 {
t.Fatal("Zerostep and the step that depends on it, and the one depending on it should not generate a pipeline item") t.Fatal("Zerostep and the step that depends on it, and the one depending on it should not generate a pipeline item")
} }
if pipelineItems[0].Proc.Name != "justastep" { if pipelineItems[0].Step.Name != "justastep" {
t.Fatal("justastep should have been generated") t.Fatal("justastep should have been generated")
} }
} }
@ -516,7 +516,7 @@ func TestTree(t *testing.T) {
Event: model.EventPush, Event: model.EventPush,
} }
b := ProcBuilder{ b := StepBuilder{
Repo: &model.Repo{}, Repo: &model.Repo{},
Curr: pipeline, Curr: pipeline,
Last: &model.Pipeline{}, Last: &model.Pipeline{},
@ -538,13 +538,13 @@ pipeline:
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if len(pipeline.Procs) != 3 { if len(pipeline.Steps) != 3 {
t.Fatal("Should generate three in total") t.Fatal("Should generate three in total")
} }
if pipeline.Procs[1].PPID != 1 { if pipeline.Steps[1].PPID != 1 {
t.Fatal("Clone step should be a children of the stage") t.Fatal("Clone step should be a children of the stage")
} }
if pipeline.Procs[2].PPID != 1 { if pipeline.Steps[2].PPID != 1 {
t.Fatal("Pipeline step should be a children of the stage") t.Fatal("Pipeline step should be a children of the stage")
} }
} }

View file

@ -0,0 +1,88 @@
// Copyright 2022 Woodpecker Authors
// Copyright 2019 mhmxs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package shared
import (
"time"
"github.com/woodpecker-ci/woodpecker/pipeline/rpc"
"github.com/woodpecker-ci/woodpecker/server/model"
)
// TODO(974) move to server/pipeline/*
func UpdateStepStatus(store model.UpdateStepStore, step model.Step, state rpc.State, started int64) (*model.Step, error) {
if state.Exited {
step.Stopped = state.Finished
step.ExitCode = state.ExitCode
step.Error = state.Error
step.State = model.StatusSuccess
if state.ExitCode != 0 || state.Error != "" {
step.State = model.StatusFailure
}
if state.ExitCode == 137 {
step.State = model.StatusKilled
}
} else {
step.Started = state.Started
step.State = model.StatusRunning
}
if step.Started == 0 && step.Stopped != 0 {
step.Started = started
}
return &step, store.StepUpdate(&step)
}
func UpdateStepToStatusStarted(store model.UpdateStepStore, step model.Step, state rpc.State) (*model.Step, error) {
step.Started = state.Started
step.State = model.StatusRunning
return &step, store.StepUpdate(&step)
}
func UpdateStepToStatusSkipped(store model.UpdateStepStore, step model.Step, stopped int64) (*model.Step, error) {
step.State = model.StatusSkipped
if step.Started != 0 {
step.State = model.StatusSuccess // for daemons that are killed
step.Stopped = stopped
}
return &step, store.StepUpdate(&step)
}
func UpdateStepStatusToDone(store model.UpdateStepStore, step model.Step, state rpc.State) (*model.Step, error) {
step.Stopped = state.Finished
step.Error = state.Error
step.ExitCode = state.ExitCode
if state.Started == 0 {
step.State = model.StatusSkipped
} else {
step.State = model.StatusSuccess
}
if step.ExitCode != 0 || step.Error != "" {
step.State = model.StatusFailure
}
return &step, store.StepUpdate(&step)
}
func UpdateStepToStatusKilled(store model.UpdateStepStore, step model.Step) (*model.Step, error) {
step.State = model.StatusKilled
step.Stopped = time.Now().Unix()
if step.Started == 0 {
step.Started = step.Stopped
}
step.ExitCode = 137
return &step, store.StepUpdate(&step)
}

View file

@ -0,0 +1,287 @@
// Copyright 2022 Woodpecker Authors
// Copyright 2019 mhmxs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package shared
import (
"testing"
"time"
"github.com/woodpecker-ci/woodpecker/pipeline/rpc"
"github.com/woodpecker-ci/woodpecker/server/model"
)
// TODO(974) move to server/pipeline/*
type mockUpdateStepStore struct{}
func (m *mockUpdateStepStore) StepUpdate(build *model.Step) error {
return nil
}
func TestUpdateStepStatusNotExited(t *testing.T) {
t.Parallel()
state := rpc.State{
Started: int64(42),
Exited: false,
// Dummy data
Finished: int64(1),
ExitCode: 137,
Error: "not an error",
}
step, _ := UpdateStepStatus(&mockUpdateStepStore{}, model.Step{}, state, int64(1))
if step.State != model.StatusRunning {
t.Errorf("Step status not equals '%s' != '%s'", model.StatusRunning, step.State)
} else if step.Started != int64(42) {
t.Errorf("Step started not equals 42 != %d", step.Started)
} else if step.Stopped != int64(0) {
t.Errorf("Step stopped not equals 0 != %d", step.Stopped)
} else if step.ExitCode != 0 {
t.Errorf("Step exit code not equals 0 != %d", step.ExitCode)
} else if step.Error != "" {
t.Errorf("Step error not equals '' != '%s'", step.Error)
}
}
func TestUpdateStepStatusNotExitedButStopped(t *testing.T) {
t.Parallel()
step := &model.Step{Stopped: int64(64)}
state := rpc.State{
Exited: false,
// Dummy data
Finished: int64(1),
ExitCode: 137,
Error: "not an error",
}
step, _ = UpdateStepStatus(&mockUpdateStepStore{}, *step, state, int64(42))
if step.State != model.StatusRunning {
t.Errorf("Step status not equals '%s' != '%s'", model.StatusRunning, step.State)
} else if step.Started != int64(42) {
t.Errorf("Step started not equals 42 != %d", step.Started)
} else if step.Stopped != int64(64) {
t.Errorf("Step stopped not equals 64 != %d", step.Stopped)
} else if step.ExitCode != 0 {
t.Errorf("Step exit code not equals 0 != %d", step.ExitCode)
} else if step.Error != "" {
t.Errorf("Step error not equals '' != '%s'", step.Error)
}
}
func TestUpdateStepStatusExited(t *testing.T) {
t.Parallel()
state := rpc.State{
Started: int64(42),
Exited: true,
Finished: int64(34),
ExitCode: 137,
Error: "an error",
}
step, _ := UpdateStepStatus(&mockUpdateStepStore{}, model.Step{}, state, int64(42))
if step.State != model.StatusKilled {
t.Errorf("Step status not equals '%s' != '%s'", model.StatusKilled, step.State)
} else if step.Started != int64(42) {
t.Errorf("Step started not equals 42 != %d", step.Started)
} else if step.Stopped != int64(34) {
t.Errorf("Step stopped not equals 34 != %d", step.Stopped)
} else if step.ExitCode != 137 {
t.Errorf("Step exit code not equals 137 != %d", step.ExitCode)
} else if step.Error != "an error" {
t.Errorf("Step error not equals 'an error' != '%s'", step.Error)
}
}
func TestUpdateStepStatusExitedButNot137(t *testing.T) {
t.Parallel()
state := rpc.State{
Started: int64(42),
Exited: true,
Finished: int64(34),
Error: "an error",
}
step, _ := UpdateStepStatus(&mockUpdateStepStore{}, model.Step{}, state, int64(42))
if step.State != model.StatusFailure {
t.Errorf("Step status not equals '%s' != '%s'", model.StatusFailure, step.State)
} else if step.Started != int64(42) {
t.Errorf("Step started not equals 42 != %d", step.Started)
} else if step.Stopped != int64(34) {
t.Errorf("Step stopped not equals 34 != %d", step.Stopped)
} else if step.ExitCode != 0 {
t.Errorf("Step exit code not equals 0 != %d", step.ExitCode)
} else if step.Error != "an error" {
t.Errorf("Step error not equals 'an error' != '%s'", step.Error)
}
}
func TestUpdateStepStatusExitedWithCode(t *testing.T) {
t.Parallel()
state := rpc.State{
Started: int64(42),
Exited: true,
Finished: int64(34),
ExitCode: 1,
Error: "an error",
}
step, _ := UpdateStepStatus(&mockUpdateStepStore{}, model.Step{}, state, int64(42))
if step.State != model.StatusFailure {
t.Errorf("Step status not equals '%s' != '%s'", model.StatusFailure, step.State)
} else if step.ExitCode != 1 {
t.Errorf("Step exit code not equals 1 != %d", step.ExitCode)
}
}
func TestUpdateStepPToStatusStarted(t *testing.T) {
t.Parallel()
state := rpc.State{Started: int64(42)}
step, _ := UpdateStepToStatusStarted(&mockUpdateStepStore{}, model.Step{}, state)
if step.State != model.StatusRunning {
t.Errorf("Step status not equals '%s' != '%s'", model.StatusRunning, step.State)
} else if step.Started != int64(42) {
t.Errorf("Step started not equals 42 != %d", step.Started)
}
}
func TestUpdateStepToStatusSkipped(t *testing.T) {
t.Parallel()
step, _ := UpdateStepToStatusSkipped(&mockUpdateStepStore{}, model.Step{}, int64(1))
if step.State != model.StatusSkipped {
t.Errorf("Step status not equals '%s' != '%s'", model.StatusSkipped, step.State)
} else if step.Stopped != int64(0) {
t.Errorf("Step stopped not equals 0 != %d", step.Stopped)
}
}
func TestUpdateStepToStatusSkippedButStarted(t *testing.T) {
t.Parallel()
step := &model.Step{
Started: int64(42),
}
step, _ = UpdateStepToStatusSkipped(&mockUpdateStepStore{}, *step, int64(1))
if step.State != model.StatusSuccess {
t.Errorf("Step status not equals '%s' != '%s'", model.StatusSuccess, step.State)
} else if step.Stopped != int64(1) {
t.Errorf("Step stopped not equals 1 != %d", step.Stopped)
}
}
func TestUpdateStepStatusToDoneSkipped(t *testing.T) {
t.Parallel()
state := rpc.State{
Finished: int64(34),
}
step, _ := UpdateStepStatusToDone(&mockUpdateStepStore{}, model.Step{}, state)
if step.State != model.StatusSkipped {
t.Errorf("Step status not equals '%s' != '%s'", model.StatusSkipped, step.State)
} else if step.Stopped != int64(34) {
t.Errorf("Step stopped not equals 34 != %d", step.Stopped)
} else if step.Error != "" {
t.Errorf("Step error not equals '' != '%s'", step.Error)
} else if step.ExitCode != 0 {
t.Errorf("Step exit code not equals 0 != %d", step.ExitCode)
}
}
func TestUpdateStepStatusToDoneSuccess(t *testing.T) {
t.Parallel()
state := rpc.State{
Started: int64(42),
Finished: int64(34),
}
step, _ := UpdateStepStatusToDone(&mockUpdateStepStore{}, model.Step{}, state)
if step.State != model.StatusSuccess {
t.Errorf("Step status not equals '%s' != '%s'", model.StatusSuccess, step.State)
} else if step.Stopped != int64(34) {
t.Errorf("Step stopped not equals 34 != %d", step.Stopped)
} else if step.Error != "" {
t.Errorf("Step error not equals '' != '%s'", step.Error)
} else if step.ExitCode != 0 {
t.Errorf("Step exit code not equals 0 != %d", step.ExitCode)
}
}
func TestUpdateStepStatusToDoneFailureWithError(t *testing.T) {
t.Parallel()
state := rpc.State{Error: "an error"}
step, _ := UpdateStepStatusToDone(&mockUpdateStepStore{}, model.Step{}, state)
if step.State != model.StatusFailure {
t.Errorf("Step status not equals '%s' != '%s'", model.StatusFailure, step.State)
}
}
func TestUpdateStepStatusToDoneFailureWithExitCode(t *testing.T) {
t.Parallel()
state := rpc.State{ExitCode: 43}
step, _ := UpdateStepStatusToDone(&mockUpdateStepStore{}, model.Step{}, state)
if step.State != model.StatusFailure {
t.Errorf("Step status not equals '%s' != '%s'", model.StatusFailure, step.State)
}
}
func TestUpdateStepToStatusKilledStarted(t *testing.T) {
t.Parallel()
now := time.Now().Unix()
step, _ := UpdateStepToStatusKilled(&mockUpdateStepStore{}, model.Step{})
if step.State != model.StatusKilled {
t.Errorf("Step status not equals '%s' != '%s'", model.StatusKilled, step.State)
} else if step.Stopped < now {
t.Errorf("Step stopped not equals %d < %d", now, step.Stopped)
} else if step.Started != step.Stopped {
t.Errorf("Step started not equals %d != %d", step.Stopped, step.Started)
} else if step.ExitCode != 137 {
t.Errorf("Step exit code not equals 137 != %d", step.ExitCode)
}
}
func TestUpdateStepToStatusKilledNotStarted(t *testing.T) {
t.Parallel()
step, _ := UpdateStepToStatusKilled(&mockUpdateStepStore{}, model.Step{Started: int64(1)})
if step.Started != int64(1) {
t.Errorf("Step started not equals 1 != %d", step.Started)
}
}

View file

@ -200,7 +200,7 @@ func TestConfigApproved(t *testing.T) {
} }
func TestConfigIndexes(t *testing.T) { func TestConfigIndexes(t *testing.T) {
store, closer := newTestStore(t, new(model.Config), new(model.Proc), new(model.Pipeline), new(model.Repo)) store, closer := newTestStore(t, new(model.Config), new(model.Step), new(model.Pipeline), new(model.Repo))
defer closer() defer closer()
var ( var (

View file

@ -26,16 +26,16 @@ func (s storage) FileList(pipeline *model.Pipeline) ([]*model.File, error) {
return files, s.engine.Where("file_pipeline_id = ?", pipeline.ID).Find(&files) return files, s.engine.Where("file_pipeline_id = ?", pipeline.ID).Find(&files)
} }
func (s storage) FileFind(proc *model.Proc, name string) (*model.File, error) { func (s storage) FileFind(step *model.Step, name string) (*model.File, error) {
file := &model.File{ file := &model.File{
ProcID: proc.ID, StepID: step.ID,
Name: name, Name: name,
} }
return file, wrapGet(s.engine.Get(file)) return file, wrapGet(s.engine.Get(file))
} }
func (s storage) FileRead(proc *model.Proc, name string) (io.ReadCloser, error) { func (s storage) FileRead(step *model.Step, name string) (io.ReadCloser, error) {
file, err := s.FileFind(proc, name) file, err := s.FileFind(step, name)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -25,13 +25,13 @@ import (
) )
func TestFileFind(t *testing.T) { func TestFileFind(t *testing.T) {
store, closer := newTestStore(t, new(model.File), new(model.Proc)) store, closer := newTestStore(t, new(model.File), new(model.Step))
defer closer() defer closer()
if err := store.FileCreate( if err := store.FileCreate(
&model.File{ &model.File{
PipelineID: 2, PipelineID: 2,
ProcID: 1, StepID: 1,
Name: "hello.txt", Name: "hello.txt",
Mime: "text/plain", Mime: "text/plain",
Size: 11, Size: 11,
@ -42,7 +42,7 @@ func TestFileFind(t *testing.T) {
return return
} }
file, err := store.FileFind(&model.Proc{ID: 1}, "hello.txt") file, err := store.FileFind(&model.Step{ID: 1}, "hello.txt")
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -53,8 +53,8 @@ func TestFileFind(t *testing.T) {
if got, want := file.PipelineID, int64(2); got != want { if got, want := file.PipelineID, int64(2); got != want {
t.Errorf("Want file pipeline id %d, got %d", want, got) t.Errorf("Want file pipeline id %d, got %d", want, got)
} }
if got, want := file.ProcID, int64(1); got != want { if got, want := file.StepID, int64(1); got != want {
t.Errorf("Want file proc id %d, got %d", want, got) t.Errorf("Want file step id %d, got %d", want, got)
} }
if got, want := file.Name, "hello.txt"; got != want { if got, want := file.Name, "hello.txt"; got != want {
t.Errorf("Want file name %s, got %s", want, got) t.Errorf("Want file name %s, got %s", want, got)
@ -66,7 +66,7 @@ func TestFileFind(t *testing.T) {
t.Errorf("Want file size %d, got %d", want, got) t.Errorf("Want file size %d, got %d", want, got)
} }
rc, err := store.FileRead(&model.Proc{ID: 1}, "hello.txt") rc, err := store.FileRead(&model.Step{ID: 1}, "hello.txt")
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
@ -84,7 +84,7 @@ func TestFileList(t *testing.T) {
assert.NoError(t, store.FileCreate( assert.NoError(t, store.FileCreate(
&model.File{ &model.File{
PipelineID: 1, PipelineID: 1,
ProcID: 1, StepID: 1,
Name: "hello.txt", Name: "hello.txt",
Mime: "text/plain", Mime: "text/plain",
Size: 11, Size: 11,
@ -94,7 +94,7 @@ func TestFileList(t *testing.T) {
assert.NoError(t, store.FileCreate( assert.NoError(t, store.FileCreate(
&model.File{ &model.File{
PipelineID: 1, PipelineID: 1,
ProcID: 1, StepID: 1,
Name: "hola.txt", Name: "hola.txt",
Mime: "text/plain", Mime: "text/plain",
Size: 11, Size: 11,
@ -120,7 +120,7 @@ func TestFileIndexes(t *testing.T) {
if err := store.FileCreate( if err := store.FileCreate(
&model.File{ &model.File{
PipelineID: 1, PipelineID: 1,
ProcID: 1, StepID: 1,
Name: "hello.txt", Name: "hello.txt",
Size: 11, Size: 11,
Mime: "text/plain", Mime: "text/plain",
@ -135,7 +135,7 @@ func TestFileIndexes(t *testing.T) {
if err := store.FileCreate( if err := store.FileCreate(
&model.File{ &model.File{
PipelineID: 1, PipelineID: 1,
ProcID: 1, StepID: 1,
Name: "hello.txt", Name: "hello.txt",
Mime: "text/plain", Mime: "text/plain",
Size: 11, Size: 11,
@ -147,23 +147,23 @@ func TestFileIndexes(t *testing.T) {
} }
func TestFileCascade(t *testing.T) { func TestFileCascade(t *testing.T) {
store, closer := newTestStore(t, new(model.File), new(model.Proc), new(model.Pipeline)) store, closer := newTestStore(t, new(model.File), new(model.Step), new(model.Pipeline))
defer closer() defer closer()
procOne := &model.Proc{ stepOne := &model.Step{
PipelineID: 1, PipelineID: 1,
PID: 1, PID: 1,
PGID: 1, PGID: 1,
Name: "build", Name: "build",
State: "success", State: "success",
} }
err1 := store.ProcCreate([]*model.Proc{procOne}) err1 := store.StepCreate([]*model.Step{stepOne})
assert.EqualValues(t, int64(1), procOne.ID) assert.EqualValues(t, int64(1), stepOne.ID)
err2 := store.FileCreate( err2 := store.FileCreate(
&model.File{ &model.File{
PipelineID: 1, PipelineID: 1,
ProcID: 1, StepID: 1,
Name: "hello.txt", Name: "hello.txt",
Mime: "text/plain", Mime: "text/plain",
Size: 11, Size: 11,
@ -172,19 +172,19 @@ func TestFileCascade(t *testing.T) {
) )
if err1 != nil { if err1 != nil {
t.Errorf("Unexpected error: cannot insert proc: %s", err1) t.Errorf("Unexpected error: cannot insert step: %s", err1)
} else if err2 != nil { } else if err2 != nil {
t.Errorf("Unexpected error: cannot insert file: %s", err2) t.Errorf("Unexpected error: cannot insert file: %s", err2)
} }
if _, err3 := store.ProcFind(&model.Pipeline{ID: 1}, 1); err3 != nil { if _, err3 := store.StepFind(&model.Pipeline{ID: 1}, 1); err3 != nil {
t.Errorf("Unexpected error: cannot get inserted proc: %s", err3) t.Errorf("Unexpected error: cannot get inserted step: %s", err3)
} }
err := store.ProcClear(&model.Pipeline{ID: 1, Procs: []*model.Proc{procOne}}) err := store.StepClear(&model.Pipeline{ID: 1, Steps: []*model.Step{stepOne}})
assert.NoError(t, err) assert.NoError(t, err)
file, err4 := store.FileFind(&model.Proc{ID: 1}, "hello.txt") file, err4 := store.FileFind(&model.Step{ID: 1}, "hello.txt")
if err4 == nil { if err4 == nil {
t.Errorf("Expected no rows in result set error") t.Errorf("Expected no rows in result set error")
t.Log(file) t.Log(file)

View file

@ -21,9 +21,9 @@ import (
"github.com/woodpecker-ci/woodpecker/server/model" "github.com/woodpecker-ci/woodpecker/server/model"
) )
func (s storage) LogFind(proc *model.Proc) (io.ReadCloser, error) { func (s storage) LogFind(step *model.Step) (io.ReadCloser, error) {
logs := &model.Logs{ logs := &model.Logs{
ProcID: proc.ID, StepID: step.ID,
} }
if err := wrapGet(s.engine.Get(logs)); err != nil { if err := wrapGet(s.engine.Get(logs)); err != nil {
return nil, err return nil, err
@ -32,7 +32,7 @@ func (s storage) LogFind(proc *model.Proc) (io.ReadCloser, error) {
return io.NopCloser(buf), nil return io.NopCloser(buf), nil
} }
func (s storage) LogSave(proc *model.Proc, reader io.Reader) error { func (s storage) LogSave(step *model.Step, reader io.Reader) error {
data, _ := io.ReadAll(reader) data, _ := io.ReadAll(reader)
sess := s.engine.NewSession() sess := s.engine.NewSession()
@ -42,7 +42,7 @@ func (s storage) LogSave(proc *model.Proc, reader io.Reader) error {
} }
logs := new(model.Logs) logs := new(model.Logs)
exist, err := sess.Where("log_job_id = ?", proc.ID).Get(logs) exist, err := sess.Where("log_step_id = ?", step.ID).Get(logs)
if err != nil { if err != nil {
return err return err
} }
@ -53,7 +53,7 @@ func (s storage) LogSave(proc *model.Proc, reader io.Reader) error {
} }
} else { } else {
if _, err := sess.Insert(&model.Logs{ if _, err := sess.Insert(&model.Logs{
ProcID: proc.ID, StepID: step.ID,
Data: data, Data: data,
}); err != nil { }); err != nil {
return err return err

View file

@ -23,19 +23,19 @@ import (
) )
func TestLogCreateFind(t *testing.T) { func TestLogCreateFind(t *testing.T) {
store, closer := newTestStore(t, new(model.Proc), new(model.Logs)) store, closer := newTestStore(t, new(model.Step), new(model.Logs))
defer closer() defer closer()
proc := model.Proc{ step := model.Step{
ID: 1, ID: 1,
} }
buf := bytes.NewBufferString("echo hi") buf := bytes.NewBufferString("echo hi")
err := store.LogSave(&proc, buf) err := store.LogSave(&step, buf)
if err != nil { if err != nil {
t.Errorf("Unexpected error: log create: %s", err) t.Errorf("Unexpected error: log create: %s", err)
} }
rc, err := store.LogFind(&proc) rc, err := store.LogFind(&step)
if err != nil { if err != nil {
t.Errorf("Unexpected error: log create: %s", err) t.Errorf("Unexpected error: log create: %s", err)
} }
@ -48,16 +48,16 @@ func TestLogCreateFind(t *testing.T) {
} }
func TestLogUpdate(t *testing.T) { func TestLogUpdate(t *testing.T) {
store, closer := newTestStore(t, new(model.Proc), new(model.Logs)) store, closer := newTestStore(t, new(model.Step), new(model.Logs))
defer closer() defer closer()
proc := model.Proc{ step := model.Step{
ID: 1, ID: 1,
} }
buf1 := bytes.NewBufferString("echo hi") buf1 := bytes.NewBufferString("echo hi")
buf2 := bytes.NewBufferString("echo allo?") buf2 := bytes.NewBufferString("echo allo?")
err1 := store.LogSave(&proc, buf1) err1 := store.LogSave(&step, buf1)
err2 := store.LogSave(&proc, buf2) err2 := store.LogSave(&step, buf2)
if err1 != nil { if err1 != nil {
t.Errorf("Unexpected error: log create: %s", err1) t.Errorf("Unexpected error: log create: %s", err1)
} }
@ -65,7 +65,7 @@ func TestLogUpdate(t *testing.T) {
t.Errorf("Unexpected error: log update: %s", err2) t.Errorf("Unexpected error: log update: %s", err2)
} }
rc, err := store.LogFind(&proc) rc, err := store.LogFind(&step)
if err != nil { if err != nil {
t.Errorf("Unexpected error: log create: %s", err) t.Errorf("Unexpected error: log create: %s", err)
} }

View file

@ -20,7 +20,7 @@ import (
"xorm.io/xorm" "xorm.io/xorm"
) )
type oldBuildColumn struct { type oldTable struct {
table string table string
columns []string columns []string
} }
@ -29,9 +29,9 @@ var renameColumnsBuildsToPipeline = task{
name: "rename-columns-builds-to-pipeline", name: "rename-columns-builds-to-pipeline",
required: true, required: true,
fn: func(sess *xorm.Session) error { fn: func(sess *xorm.Session) error {
var oldColumns []*oldBuildColumn var oldColumns []*oldTable
oldColumns = append(oldColumns, &oldBuildColumn{ oldColumns = append(oldColumns, &oldTable{
table: "pipelines", table: "pipelines",
columns: []string{ columns: []string{
"build_id", "build_id",
@ -68,17 +68,17 @@ var renameColumnsBuildsToPipeline = task{
}, },
) )
oldColumns = append(oldColumns, &oldBuildColumn{ oldColumns = append(oldColumns, &oldTable{
table: "pipeline_config", table: "pipeline_config",
columns: []string{"build_id"}, columns: []string{"build_id"},
}) })
oldColumns = append(oldColumns, &oldBuildColumn{ oldColumns = append(oldColumns, &oldTable{
table: "files", table: "files",
columns: []string{"file_build_id"}, columns: []string{"file_build_id"},
}) })
oldColumns = append(oldColumns, &oldBuildColumn{ oldColumns = append(oldColumns, &oldTable{
table: "procs", table: "procs",
columns: []string{"proc_build_id"}, columns: []string{"proc_build_id"},
}) })

View file

@ -0,0 +1,87 @@
// Copyright 2022 Woodpecker Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package migration
import (
"strings"
"xorm.io/xorm"
)
var renameTableProcsToSteps = task{
name: "rename-procs-to-steps",
required: true,
fn: func(sess *xorm.Session) error {
err := renameTable(sess, "procs", "steps")
if err != nil {
return err
}
oldProcColumns := []*oldTable{
{
table: "steps",
columns: []string{
"proc_id",
"proc_pipeline_id",
"proc_pid",
"proc_ppid",
"proc_pgid",
"proc_name",
"proc_state",
"proc_error",
"proc_exit_code",
"proc_started",
"proc_stopped",
"proc_machine",
"proc_platform",
"proc_environ",
},
},
{
table: "files",
columns: []string{"file_proc_id"},
},
}
for _, table := range oldProcColumns {
for _, column := range table.columns {
err := renameColumn(sess, table.table, column, strings.Replace(column, "proc_", "step_", 1))
if err != nil {
return err
}
}
}
oldJobColumns := []*oldTable{
{
table: "logs",
columns: []string{
"log_job_id",
},
},
}
for _, table := range oldJobColumns {
for _, column := range table.columns {
err := renameColumn(sess, table.table, column, strings.Replace(column, "job_", "step_", 1))
if err != nil {
return err
}
}
}
return nil
},
}

View file

@ -38,6 +38,7 @@ var migrationTasks = []*task{
&lowercaseSecretNames, &lowercaseSecretNames,
&renameBuildsToPipeline, &renameBuildsToPipeline,
&renameColumnsBuildsToPipeline, &renameColumnsBuildsToPipeline,
&renameTableProcsToSteps,
} }
var allBeans = []interface{}{ var allBeans = []interface{}{
@ -48,7 +49,7 @@ var allBeans = []interface{}{
new(model.File), new(model.File),
new(model.Logs), new(model.Logs),
new(model.Perm), new(model.Perm),
new(model.Proc), new(model.Step),
new(model.Registry), new(model.Registry),
new(model.Repo), new(model.Repo),
new(model.Secret), new(model.Secret),

View file

@ -97,7 +97,7 @@ func (s storage) GetPipelineCount() (int64, error) {
return s.engine.Count(new(model.Pipeline)) return s.engine.Count(new(model.Pipeline))
} }
func (s storage) CreatePipeline(pipeline *model.Pipeline, procList ...*model.Proc) error { func (s storage) CreatePipeline(pipeline *model.Pipeline, stepList ...*model.Step) error {
sess := s.engine.NewSession() sess := s.engine.NewSession()
defer sess.Close() defer sess.Close()
if err := sess.Begin(); err != nil { if err := sess.Begin(); err != nil {
@ -127,10 +127,10 @@ func (s storage) CreatePipeline(pipeline *model.Pipeline, procList ...*model.Pro
return err return err
} }
for i := range procList { for i := range stepList {
procList[i].PipelineID = pipeline.ID stepList[i].PipelineID = pipeline.ID
// only Insert set auto created ID back to object // only Insert set auto created ID back to object
if _, err := sess.Insert(procList[i]); err != nil { if _, err := sess.Insert(stepList[i]); err != nil {
return err return err
} }
} }
@ -144,18 +144,18 @@ func (s storage) UpdatePipeline(pipeline *model.Pipeline) error {
} }
func deletePipeline(sess *xorm.Session, pipelineID int64) error { func deletePipeline(sess *xorm.Session, pipelineID int64) error {
// delete related procs // delete related steps
for startProcs := 0; ; startProcs += perPage { for startSteps := 0; ; startSteps += perPage {
procIDs := make([]int64, 0, perPage) stepIDs := make([]int64, 0, perPage)
if err := sess.Limit(perPage, startProcs).Table("procs").Cols("proc_id").Where("proc_pipeline_id = ?", pipelineID).Find(&procIDs); err != nil { if err := sess.Limit(perPage, startSteps).Table("steps").Cols("step_id").Where("step_pipeline_id = ?", pipelineID).Find(&stepIDs); err != nil {
return err return err
} }
if len(procIDs) == 0 { if len(stepIDs) == 0 {
break break
} }
for i := range procIDs { for i := range stepIDs {
if err := deleteProc(sess, procIDs[i]); err != nil { if err := deleteStep(sess, stepIDs[i]); err != nil {
return err return err
} }
} }

View file

@ -33,7 +33,7 @@ func TestPipelines(t *testing.T) {
Name: "test", Name: "test",
} }
store, closer := newTestStore(t, new(model.Repo), new(model.Proc), new(model.Pipeline)) store, closer := newTestStore(t, new(model.Repo), new(model.Step), new(model.Pipeline))
defer closer() defer closer()
g := goblin.Goblin(t) g := goblin.Goblin(t)
@ -53,7 +53,7 @@ func TestPipelines(t *testing.T) {
g.BeforeEach(func() { g.BeforeEach(func() {
_, err := store.engine.Exec("DELETE FROM pipelines") _, err := store.engine.Exec("DELETE FROM pipelines")
g.Assert(err).IsNil() g.Assert(err).IsNil()
_, err = store.engine.Exec("DELETE FROM procs") _, err = store.engine.Exec("DELETE FROM steps")
g.Assert(err).IsNil() g.Assert(err).IsNil()
}) })
@ -114,7 +114,7 @@ func TestPipelines(t *testing.T) {
RepoID: repo.ID, RepoID: repo.ID,
Status: model.StatusSuccess, Status: model.StatusSuccess,
} }
err := store.CreatePipeline(&pipeline, []*model.Proc{}...) err := store.CreatePipeline(&pipeline, []*model.Step{}...)
g.Assert(err).IsNil() g.Assert(err).IsNil()
GetPipeline, err := store.GetPipeline(pipeline.ID) GetPipeline, err := store.GetPipeline(pipeline.ID)
g.Assert(err).IsNil() g.Assert(err).IsNil()
@ -132,9 +132,9 @@ func TestPipelines(t *testing.T) {
RepoID: repo.ID, RepoID: repo.ID,
Status: model.StatusPending, Status: model.StatusPending,
} }
err1 := store.CreatePipeline(pipeline1, []*model.Proc{}...) err1 := store.CreatePipeline(pipeline1, []*model.Step{}...)
g.Assert(err1).IsNil() g.Assert(err1).IsNil()
err2 := store.CreatePipeline(pipeline2, []*model.Proc{}...) err2 := store.CreatePipeline(pipeline2, []*model.Step{}...)
g.Assert(err2).IsNil() g.Assert(err2).IsNil()
GetPipeline, err3 := store.GetPipelineNumber(&model.Repo{ID: 1}, pipeline2.Number) GetPipeline, err3 := store.GetPipelineNumber(&model.Repo{ID: 1}, pipeline2.Number)
g.Assert(err3).IsNil() g.Assert(err3).IsNil()
@ -154,9 +154,9 @@ func TestPipelines(t *testing.T) {
Status: model.StatusPending, Status: model.StatusPending,
Ref: "refs/pull/6", Ref: "refs/pull/6",
} }
err1 := store.CreatePipeline(pipeline1, []*model.Proc{}...) err1 := store.CreatePipeline(pipeline1, []*model.Step{}...)
g.Assert(err1).IsNil() g.Assert(err1).IsNil()
err2 := store.CreatePipeline(pipeline2, []*model.Proc{}...) err2 := store.CreatePipeline(pipeline2, []*model.Step{}...)
g.Assert(err2).IsNil() g.Assert(err2).IsNil()
GetPipeline, err3 := store.GetPipelineRef(&model.Repo{ID: 1}, "refs/pull/6") GetPipeline, err3 := store.GetPipelineRef(&model.Repo{ID: 1}, "refs/pull/6")
g.Assert(err3).IsNil() g.Assert(err3).IsNil()
@ -177,9 +177,9 @@ func TestPipelines(t *testing.T) {
Status: model.StatusPending, Status: model.StatusPending,
Ref: "refs/pull/6", Ref: "refs/pull/6",
} }
err1 := store.CreatePipeline(pipeline1, []*model.Proc{}...) err1 := store.CreatePipeline(pipeline1, []*model.Step{}...)
g.Assert(err1).IsNil() g.Assert(err1).IsNil()
err2 := store.CreatePipeline(pipeline2, []*model.Proc{}...) err2 := store.CreatePipeline(pipeline2, []*model.Step{}...)
g.Assert(err2).IsNil() g.Assert(err2).IsNil()
GetPipeline, err3 := store.GetPipelineRef(&model.Repo{ID: 1}, "refs/pull/6") GetPipeline, err3 := store.GetPipelineRef(&model.Repo{ID: 1}, "refs/pull/6")
g.Assert(err3).IsNil() g.Assert(err3).IsNil()
@ -202,9 +202,9 @@ func TestPipelines(t *testing.T) {
Branch: "dev", Branch: "dev",
Commit: "85f8c029b902ed9400bc600bac301a0aadb144aa", Commit: "85f8c029b902ed9400bc600bac301a0aadb144aa",
} }
err1 := store.CreatePipeline(pipeline1, []*model.Proc{}...) err1 := store.CreatePipeline(pipeline1, []*model.Step{}...)
g.Assert(err1).IsNil() g.Assert(err1).IsNil()
err2 := store.CreatePipeline(pipeline2, []*model.Proc{}...) err2 := store.CreatePipeline(pipeline2, []*model.Step{}...)
g.Assert(err2).IsNil() g.Assert(err2).IsNil()
GetPipeline, err3 := store.GetPipelineCommit(&model.Repo{ID: 1}, pipeline2.Commit, pipeline2.Branch) GetPipeline, err3 := store.GetPipelineCommit(&model.Repo{ID: 1}, pipeline2.Commit, pipeline2.Branch)
g.Assert(err3).IsNil() g.Assert(err3).IsNil()
@ -230,8 +230,8 @@ func TestPipelines(t *testing.T) {
Commit: "85f8c029b902ed9400bc600bac301a0aadb144aa", Commit: "85f8c029b902ed9400bc600bac301a0aadb144aa",
Event: model.EventPush, Event: model.EventPush,
} }
err1 := store.CreatePipeline(pipeline1, []*model.Proc{}...) err1 := store.CreatePipeline(pipeline1, []*model.Step{}...)
err2 := store.CreatePipeline(pipeline2, []*model.Proc{}...) err2 := store.CreatePipeline(pipeline2, []*model.Step{}...)
GetPipeline, err3 := store.GetPipelineLast(&model.Repo{ID: 1}, pipeline2.Branch) GetPipeline, err3 := store.GetPipelineLast(&model.Repo{ID: 1}, pipeline2.Branch)
g.Assert(err1).IsNil() g.Assert(err1).IsNil()
g.Assert(err2).IsNil() g.Assert(err2).IsNil()
@ -263,11 +263,11 @@ func TestPipelines(t *testing.T) {
Branch: "master", Branch: "master",
Commit: "85f8c029b902ed9400bc600bac301a0aadb144aa", Commit: "85f8c029b902ed9400bc600bac301a0aadb144aa",
} }
err1 := store.CreatePipeline(pipeline1, []*model.Proc{}...) err1 := store.CreatePipeline(pipeline1, []*model.Step{}...)
g.Assert(err1).IsNil() g.Assert(err1).IsNil()
err2 := store.CreatePipeline(pipeline2, []*model.Proc{}...) err2 := store.CreatePipeline(pipeline2, []*model.Step{}...)
g.Assert(err2).IsNil() g.Assert(err2).IsNil()
err3 := store.CreatePipeline(pipeline3, []*model.Proc{}...) err3 := store.CreatePipeline(pipeline3, []*model.Step{}...)
g.Assert(err3).IsNil() g.Assert(err3).IsNil()
GetPipeline, err4 := store.GetPipelineLastBefore(&model.Repo{ID: 1}, pipeline3.Branch, pipeline3.ID) GetPipeline, err4 := store.GetPipelineLastBefore(&model.Repo{ID: 1}, pipeline3.Branch, pipeline3.ID)
g.Assert(err4).IsNil() g.Assert(err4).IsNil()
@ -288,9 +288,9 @@ func TestPipelines(t *testing.T) {
RepoID: repo.ID, RepoID: repo.ID,
Status: model.StatusSuccess, Status: model.StatusSuccess,
} }
err1 := store.CreatePipeline(pipeline1, []*model.Proc{}...) err1 := store.CreatePipeline(pipeline1, []*model.Step{}...)
g.Assert(err1).IsNil() g.Assert(err1).IsNil()
err2 := store.CreatePipeline(pipeline2, []*model.Proc{}...) err2 := store.CreatePipeline(pipeline2, []*model.Step{}...)
g.Assert(err2).IsNil() g.Assert(err2).IsNil()
pipelines, err3 := store.GetPipelineList(&model.Repo{ID: 1}, 1) pipelines, err3 := store.GetPipelineList(&model.Repo{ID: 1}, 1)
g.Assert(err3).IsNil() g.Assert(err3).IsNil()

View file

@ -383,7 +383,7 @@ func TestRepoCrud(t *testing.T) {
new(model.Pipeline), new(model.Pipeline),
new(model.PipelineConfig), new(model.PipelineConfig),
new(model.Logs), new(model.Logs),
new(model.Proc), new(model.Step),
new(model.File), new(model.File),
new(model.Secret), new(model.Secret),
new(model.Registry), new(model.Registry),
@ -401,10 +401,10 @@ func TestRepoCrud(t *testing.T) {
pipeline := model.Pipeline{ pipeline := model.Pipeline{
RepoID: repo.ID, RepoID: repo.ID,
} }
proc := model.Proc{ step := model.Step{
Name: "a proc", Name: "a step",
} }
assert.NoError(t, store.CreatePipeline(&pipeline, &proc)) assert.NoError(t, store.CreatePipeline(&pipeline, &step))
// create unrelated // create unrelated
repoUnrelated := model.Repo{ repoUnrelated := model.Repo{
@ -417,10 +417,10 @@ func TestRepoCrud(t *testing.T) {
pipelineUnrelated := model.Pipeline{ pipelineUnrelated := model.Pipeline{
RepoID: repoUnrelated.ID, RepoID: repoUnrelated.ID,
} }
procUnrelated := model.Proc{ stepUnrelated := model.Step{
Name: "a unrelated proc", Name: "a unrelated step",
} }
assert.NoError(t, store.CreatePipeline(&pipelineUnrelated, &procUnrelated)) assert.NoError(t, store.CreatePipeline(&pipelineUnrelated, &stepUnrelated))
_, err := store.GetRepo(repo.ID) _, err := store.GetRepo(repo.ID)
assert.NoError(t, err) assert.NoError(t, err)
@ -428,9 +428,9 @@ func TestRepoCrud(t *testing.T) {
_, err = store.GetRepo(repo.ID) _, err = store.GetRepo(repo.ID)
assert.Error(t, err) assert.Error(t, err)
procCount, err := store.engine.Count(new(model.Proc)) stepCount, err := store.engine.Count(new(model.Step))
assert.NoError(t, err) assert.NoError(t, err)
assert.EqualValues(t, 1, procCount) assert.EqualValues(t, 1, stepCount)
pipelineCount, err := store.engine.Count(new(model.Pipeline)) pipelineCount, err := store.engine.Count(new(model.Pipeline))
assert.NoError(t, err) assert.NoError(t, err)
assert.EqualValues(t, 1, pipelineCount) assert.EqualValues(t, 1, pipelineCount)

View file

@ -20,46 +20,46 @@ import (
"github.com/woodpecker-ci/woodpecker/server/model" "github.com/woodpecker-ci/woodpecker/server/model"
) )
func (s storage) ProcLoad(id int64) (*model.Proc, error) { func (s storage) StepLoad(id int64) (*model.Step, error) {
proc := new(model.Proc) step := new(model.Step)
return proc, wrapGet(s.engine.ID(id).Get(proc)) return step, wrapGet(s.engine.ID(id).Get(step))
} }
func (s storage) ProcFind(pipeline *model.Pipeline, pid int) (*model.Proc, error) { func (s storage) StepFind(pipeline *model.Pipeline, pid int) (*model.Step, error) {
proc := &model.Proc{ step := &model.Step{
PipelineID: pipeline.ID, PipelineID: pipeline.ID,
PID: pid, PID: pid,
} }
return proc, wrapGet(s.engine.Get(proc)) return step, wrapGet(s.engine.Get(step))
} }
func (s storage) ProcChild(pipeline *model.Pipeline, ppid int, child string) (*model.Proc, error) { func (s storage) StepChild(pipeline *model.Pipeline, ppid int, child string) (*model.Step, error) {
proc := &model.Proc{ step := &model.Step{
PipelineID: pipeline.ID, PipelineID: pipeline.ID,
PPID: ppid, PPID: ppid,
Name: child, Name: child,
} }
return proc, wrapGet(s.engine.Get(proc)) return step, wrapGet(s.engine.Get(step))
} }
func (s storage) ProcList(pipeline *model.Pipeline) ([]*model.Proc, error) { func (s storage) StepList(pipeline *model.Pipeline) ([]*model.Step, error) {
procList := make([]*model.Proc, 0, perPage) stepList := make([]*model.Step, 0, perPage)
return procList, s.engine. return stepList, s.engine.
Where("proc_pipeline_id = ?", pipeline.ID). Where("step_pipeline_id = ?", pipeline.ID).
OrderBy("proc_pid"). OrderBy("step_pid").
Find(&procList) Find(&stepList)
} }
func (s storage) ProcCreate(procs []*model.Proc) error { func (s storage) StepCreate(steps []*model.Step) error {
sess := s.engine.NewSession() sess := s.engine.NewSession()
defer sess.Close() defer sess.Close()
if err := sess.Begin(); err != nil { if err := sess.Begin(); err != nil {
return err return err
} }
for i := range procs { for i := range steps {
// only Insert on single object ref set auto created ID back to object // only Insert on single object ref set auto created ID back to object
if _, err := sess.Insert(procs[i]); err != nil { if _, err := sess.Insert(steps[i]); err != nil {
return err return err
} }
} }
@ -67,12 +67,12 @@ func (s storage) ProcCreate(procs []*model.Proc) error {
return sess.Commit() return sess.Commit()
} }
func (s storage) ProcUpdate(proc *model.Proc) error { func (s storage) StepUpdate(step *model.Step) error {
_, err := s.engine.ID(proc.ID).AllCols().Update(proc) _, err := s.engine.ID(step.ID).AllCols().Update(step)
return err return err
} }
func (s storage) ProcClear(pipeline *model.Pipeline) error { func (s storage) StepClear(pipeline *model.Pipeline) error {
sess := s.engine.NewSession() sess := s.engine.NewSession()
defer sess.Close() defer sess.Close()
if err := sess.Begin(); err != nil { if err := sess.Begin(); err != nil {
@ -83,20 +83,20 @@ func (s storage) ProcClear(pipeline *model.Pipeline) error {
return err return err
} }
if _, err := sess.Where("proc_pipeline_id = ?", pipeline.ID).Delete(new(model.Proc)); err != nil { if _, err := sess.Where("step_pipeline_id = ?", pipeline.ID).Delete(new(model.Step)); err != nil {
return err return err
} }
return sess.Commit() return sess.Commit()
} }
func deleteProc(sess *xorm.Session, procID int64) error { func deleteStep(sess *xorm.Session, stepID int64) error {
if _, err := sess.Where("log_job_id = ?", procID).Delete(new(model.Logs)); err != nil { if _, err := sess.Where("log_step_id = ?", stepID).Delete(new(model.Logs)); err != nil {
return err return err
} }
if _, err := sess.Where("file_proc_id = ?", procID).Delete(new(model.File)); err != nil { if _, err := sess.Where("file_step_id = ?", stepID).Delete(new(model.File)); err != nil {
return err return err
} }
_, err := sess.ID(procID).Delete(new(model.Proc)) _, err := sess.ID(stepID).Delete(new(model.Step))
return err return err
} }

View file

@ -23,11 +23,11 @@ import (
"github.com/woodpecker-ci/woodpecker/server/model" "github.com/woodpecker-ci/woodpecker/server/model"
) )
func TestProcFind(t *testing.T) { func TestStepFind(t *testing.T) {
store, closer := newTestStore(t, new(model.Proc), new(model.Pipeline)) store, closer := newTestStore(t, new(model.Step), new(model.Pipeline))
defer closer() defer closer()
procs := []*model.Proc{ steps := []*model.Step{
{ {
PipelineID: 1000, PipelineID: 1000,
PID: 1, PID: 1,
@ -42,22 +42,22 @@ func TestProcFind(t *testing.T) {
Environ: map[string]string{"GOLANG": "tip"}, Environ: map[string]string{"GOLANG": "tip"},
}, },
} }
assert.NoError(t, store.ProcCreate(procs)) assert.NoError(t, store.StepCreate(steps))
assert.EqualValues(t, 1, procs[0].ID) assert.EqualValues(t, 1, steps[0].ID)
assert.Error(t, store.ProcCreate(procs)) assert.Error(t, store.StepCreate(steps))
proc, err := store.ProcFind(&model.Pipeline{ID: 1000}, 1) step, err := store.StepFind(&model.Pipeline{ID: 1000}, 1)
if !assert.NoError(t, err) { if !assert.NoError(t, err) {
return return
} }
assert.Equal(t, procs[0], proc) assert.Equal(t, steps[0], step)
} }
func TestProcChild(t *testing.T) { func TestStepChild(t *testing.T) {
store, closer := newTestStore(t, new(model.Proc), new(model.Pipeline)) store, closer := newTestStore(t, new(model.Step), new(model.Pipeline))
defer closer() defer closer()
err := store.ProcCreate([]*model.Proc{ err := store.StepCreate([]*model.Step{
{ {
PipelineID: 1, PipelineID: 1,
PID: 1, PID: 1,
@ -75,28 +75,28 @@ func TestProcChild(t *testing.T) {
}, },
}) })
if err != nil { if err != nil {
t.Errorf("Unexpected error: insert procs: %s", err) t.Errorf("Unexpected error: insert steps: %s", err)
return return
} }
proc, err := store.ProcChild(&model.Pipeline{ID: 1}, 1, "build") step, err := store.StepChild(&model.Pipeline{ID: 1}, 1, "build")
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
if got, want := proc.PID, 2; got != want { if got, want := step.PID, 2; got != want {
t.Errorf("Want proc pid %d, got %d", want, got) t.Errorf("Want step pid %d, got %d", want, got)
} }
if got, want := proc.Name, "build"; got != want { if got, want := step.Name, "build"; got != want {
t.Errorf("Want proc name %s, got %s", want, got) t.Errorf("Want step name %s, got %s", want, got)
} }
} }
func TestProcList(t *testing.T) { func TestStepList(t *testing.T) {
store, closer := newTestStore(t, new(model.Proc), new(model.Pipeline)) store, closer := newTestStore(t, new(model.Step), new(model.Pipeline))
defer closer() defer closer()
err := store.ProcCreate([]*model.Proc{ err := store.StepCreate([]*model.Step{
{ {
PipelineID: 2, PipelineID: 2,
PID: 1, PID: 1,
@ -121,24 +121,24 @@ func TestProcList(t *testing.T) {
}, },
}) })
if err != nil { if err != nil {
t.Errorf("Unexpected error: insert procs: %s", err) t.Errorf("Unexpected error: insert steps: %s", err)
return return
} }
procs, err := store.ProcList(&model.Pipeline{ID: 1}) steps, err := store.StepList(&model.Pipeline{ID: 1})
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
if got, want := len(procs), 2; got != want { if got, want := len(steps), 2; got != want {
t.Errorf("Want %d procs, got %d", want, got) t.Errorf("Want %d steps, got %d", want, got)
} }
} }
func TestProcUpdate(t *testing.T) { func TestStepUpdate(t *testing.T) {
store, closer := newTestStore(t, new(model.Proc), new(model.Pipeline)) store, closer := newTestStore(t, new(model.Step), new(model.Pipeline))
defer closer() defer closer()
proc := &model.Proc{ step := &model.Step{
PipelineID: 1, PipelineID: 1,
PID: 1, PID: 1,
PPID: 2, PPID: 2,
@ -151,30 +151,30 @@ func TestProcUpdate(t *testing.T) {
Platform: "linux/amd64", Platform: "linux/amd64",
Environ: map[string]string{"GOLANG": "tip"}, Environ: map[string]string{"GOLANG": "tip"},
} }
if err := store.ProcCreate([]*model.Proc{proc}); err != nil { if err := store.StepCreate([]*model.Step{step}); err != nil {
t.Errorf("Unexpected error: insert proc: %s", err) t.Errorf("Unexpected error: insert step: %s", err)
return return
} }
proc.State = "running" step.State = "running"
if err := store.ProcUpdate(proc); err != nil { if err := store.StepUpdate(step); err != nil {
t.Errorf("Unexpected error: update proc: %s", err) t.Errorf("Unexpected error: update step: %s", err)
return return
} }
updated, err := store.ProcFind(&model.Pipeline{ID: 1}, 1) updated, err := store.StepFind(&model.Pipeline{ID: 1}, 1)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
return return
} }
if got, want := updated.State, model.StatusRunning; got != want { if got, want := updated.State, model.StatusRunning; got != want {
t.Errorf("Want proc name %s, got %s", want, got) t.Errorf("Want step name %s, got %s", want, got)
} }
} }
func TestProcIndexes(t *testing.T) { func TestStepIndexes(t *testing.T) {
store, closer := newTestStore(t, new(model.Proc), new(model.Pipeline)) store, closer := newTestStore(t, new(model.Step), new(model.Pipeline))
defer closer() defer closer()
if err := store.ProcCreate([]*model.Proc{ if err := store.StepCreate([]*model.Step{
{ {
PipelineID: 1, PipelineID: 1,
PID: 1, PID: 1,
@ -184,12 +184,12 @@ func TestProcIndexes(t *testing.T) {
Name: "build", Name: "build",
}, },
}); err != nil { }); err != nil {
t.Errorf("Unexpected error: insert procs: %s", err) t.Errorf("Unexpected error: insert steps: %s", err)
return return
} }
// fail due to duplicate pid // fail due to duplicate pid
if err := store.ProcCreate([]*model.Proc{ if err := store.StepCreate([]*model.Step{
{ {
PipelineID: 1, PipelineID: 1,
PID: 1, PID: 1,
@ -203,4 +203,4 @@ func TestProcIndexes(t *testing.T) {
} }
} }
// TODO: func TestProcCascade(t *testing.T) {} // TODO: func TestStepCascade(t *testing.T) {}

View file

@ -24,7 +24,7 @@ import (
) )
func TestUsers(t *testing.T) { func TestUsers(t *testing.T) {
store, closer := newTestStore(t, new(model.User), new(model.Repo), new(model.Pipeline), new(model.Proc), new(model.Perm)) store, closer := newTestStore(t, new(model.User), new(model.Repo), new(model.Pipeline), new(model.Step), new(model.Perm))
defer closer() defer closer()
g := goblin.Goblin(t) g := goblin.Goblin(t)
@ -38,7 +38,7 @@ func TestUsers(t *testing.T) {
g.Assert(err).IsNil() g.Assert(err).IsNil()
_, err = store.engine.Exec("DELETE FROM pipelines") _, err = store.engine.Exec("DELETE FROM pipelines")
g.Assert(err).IsNil() g.Assert(err).IsNil()
_, err = store.engine.Exec("DELETE FROM procs") _, err = store.engine.Exec("DELETE FROM steps")
g.Assert(err).IsNil() g.Assert(err).IsNil()
}) })

View file

@ -110,7 +110,7 @@ func (_m *Store) ConfigsForPipeline(pipelineID int64) ([]*model.Config, error) {
} }
// CreatePipeline provides a mock function with given fields: _a0, _a1 // CreatePipeline provides a mock function with given fields: _a0, _a1
func (_m *Store) CreatePipeline(_a0 *model.Pipeline, _a1 ...*model.Proc) error { func (_m *Store) CreatePipeline(_a0 *model.Pipeline, _a1 ...*model.Step) error {
_va := make([]interface{}, len(_a1)) _va := make([]interface{}, len(_a1))
for _i := range _a1 { for _i := range _a1 {
_va[_i] = _a1[_i] _va[_i] = _a1[_i]
@ -121,7 +121,7 @@ func (_m *Store) CreatePipeline(_a0 *model.Pipeline, _a1 ...*model.Proc) error {
ret := _m.Called(_ca...) ret := _m.Called(_ca...)
var r0 error var r0 error
if rf, ok := ret.Get(0).(func(*model.Pipeline, ...*model.Proc) error); ok { if rf, ok := ret.Get(0).(func(*model.Pipeline, ...*model.Step) error); ok {
r0 = rf(_a0, _a1...) r0 = rf(_a0, _a1...)
} else { } else {
r0 = ret.Error(0) r0 = ret.Error(0)
@ -347,11 +347,11 @@ func (_m *Store) FileCreate(_a0 *model.File, _a1 io.Reader) error {
} }
// FileFind provides a mock function with given fields: _a0, _a1 // FileFind provides a mock function with given fields: _a0, _a1
func (_m *Store) FileFind(_a0 *model.Proc, _a1 string) (*model.File, error) { func (_m *Store) FileFind(_a0 *model.Step, _a1 string) (*model.File, error) {
ret := _m.Called(_a0, _a1) ret := _m.Called(_a0, _a1)
var r0 *model.File var r0 *model.File
if rf, ok := ret.Get(0).(func(*model.Proc, string) *model.File); ok { if rf, ok := ret.Get(0).(func(*model.Step, string) *model.File); ok {
r0 = rf(_a0, _a1) r0 = rf(_a0, _a1)
} else { } else {
if ret.Get(0) != nil { if ret.Get(0) != nil {
@ -360,7 +360,7 @@ func (_m *Store) FileFind(_a0 *model.Proc, _a1 string) (*model.File, error) {
} }
var r1 error var r1 error
if rf, ok := ret.Get(1).(func(*model.Proc, string) error); ok { if rf, ok := ret.Get(1).(func(*model.Step, string) error); ok {
r1 = rf(_a0, _a1) r1 = rf(_a0, _a1)
} else { } else {
r1 = ret.Error(1) r1 = ret.Error(1)
@ -393,11 +393,11 @@ func (_m *Store) FileList(_a0 *model.Pipeline) ([]*model.File, error) {
} }
// FileRead provides a mock function with given fields: _a0, _a1 // FileRead provides a mock function with given fields: _a0, _a1
func (_m *Store) FileRead(_a0 *model.Proc, _a1 string) (io.ReadCloser, error) { func (_m *Store) FileRead(_a0 *model.Step, _a1 string) (io.ReadCloser, error) {
ret := _m.Called(_a0, _a1) ret := _m.Called(_a0, _a1)
var r0 io.ReadCloser var r0 io.ReadCloser
if rf, ok := ret.Get(0).(func(*model.Proc, string) io.ReadCloser); ok { if rf, ok := ret.Get(0).(func(*model.Step, string) io.ReadCloser); ok {
r0 = rf(_a0, _a1) r0 = rf(_a0, _a1)
} else { } else {
if ret.Get(0) != nil { if ret.Get(0) != nil {
@ -406,7 +406,7 @@ func (_m *Store) FileRead(_a0 *model.Proc, _a1 string) (io.ReadCloser, error) {
} }
var r1 error var r1 error
if rf, ok := ret.Get(1).(func(*model.Proc, string) error); ok { if rf, ok := ret.Get(1).(func(*model.Step, string) error); ok {
r1 = rf(_a0, _a1) r1 = rf(_a0, _a1)
} else { } else {
r1 = ret.Error(1) r1 = ret.Error(1)
@ -937,11 +937,11 @@ func (_m *Store) HasRedirectionForRepo(_a0 int64, _a1 string) (bool, error) {
} }
// LogFind provides a mock function with given fields: _a0 // LogFind provides a mock function with given fields: _a0
func (_m *Store) LogFind(_a0 *model.Proc) (io.ReadCloser, error) { func (_m *Store) LogFind(_a0 *model.Step) (io.ReadCloser, error) {
ret := _m.Called(_a0) ret := _m.Called(_a0)
var r0 io.ReadCloser var r0 io.ReadCloser
if rf, ok := ret.Get(0).(func(*model.Proc) io.ReadCloser); ok { if rf, ok := ret.Get(0).(func(*model.Step) io.ReadCloser); ok {
r0 = rf(_a0) r0 = rf(_a0)
} else { } else {
if ret.Get(0) != nil { if ret.Get(0) != nil {
@ -950,7 +950,7 @@ func (_m *Store) LogFind(_a0 *model.Proc) (io.ReadCloser, error) {
} }
var r1 error var r1 error
if rf, ok := ret.Get(1).(func(*model.Proc) error); ok { if rf, ok := ret.Get(1).(func(*model.Step) error); ok {
r1 = rf(_a0) r1 = rf(_a0)
} else { } else {
r1 = ret.Error(1) r1 = ret.Error(1)
@ -960,11 +960,11 @@ func (_m *Store) LogFind(_a0 *model.Proc) (io.ReadCloser, error) {
} }
// LogSave provides a mock function with given fields: _a0, _a1 // LogSave provides a mock function with given fields: _a0, _a1
func (_m *Store) LogSave(_a0 *model.Proc, _a1 io.Reader) error { func (_m *Store) LogSave(_a0 *model.Step, _a1 io.Reader) error {
ret := _m.Called(_a0, _a1) ret := _m.Called(_a0, _a1)
var r0 error var r0 error
if rf, ok := ret.Get(0).(func(*model.Proc, io.Reader) error); ok { if rf, ok := ret.Get(0).(func(*model.Step, io.Reader) error); ok {
r0 = rf(_a0, _a1) r0 = rf(_a0, _a1)
} else { } else {
r0 = ret.Error(0) r0 = ret.Error(0)
@ -1126,140 +1126,6 @@ func (_m *Store) PipelineConfigCreate(_a0 *model.PipelineConfig) error {
return r0 return r0
} }
// ProcChild provides a mock function with given fields: _a0, _a1, _a2
func (_m *Store) ProcChild(_a0 *model.Pipeline, _a1 int, _a2 string) (*model.Proc, error) {
ret := _m.Called(_a0, _a1, _a2)
var r0 *model.Proc
if rf, ok := ret.Get(0).(func(*model.Pipeline, int, string) *model.Proc); ok {
r0 = rf(_a0, _a1, _a2)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*model.Proc)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*model.Pipeline, int, string) error); ok {
r1 = rf(_a0, _a1, _a2)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ProcClear provides a mock function with given fields: _a0
func (_m *Store) ProcClear(_a0 *model.Pipeline) error {
ret := _m.Called(_a0)
var r0 error
if rf, ok := ret.Get(0).(func(*model.Pipeline) error); ok {
r0 = rf(_a0)
} else {
r0 = ret.Error(0)
}
return r0
}
// ProcCreate provides a mock function with given fields: _a0
func (_m *Store) ProcCreate(_a0 []*model.Proc) error {
ret := _m.Called(_a0)
var r0 error
if rf, ok := ret.Get(0).(func([]*model.Proc) error); ok {
r0 = rf(_a0)
} else {
r0 = ret.Error(0)
}
return r0
}
// ProcFind provides a mock function with given fields: _a0, _a1
func (_m *Store) ProcFind(_a0 *model.Pipeline, _a1 int) (*model.Proc, error) {
ret := _m.Called(_a0, _a1)
var r0 *model.Proc
if rf, ok := ret.Get(0).(func(*model.Pipeline, int) *model.Proc); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*model.Proc)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*model.Pipeline, int) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ProcList provides a mock function with given fields: _a0
func (_m *Store) ProcList(_a0 *model.Pipeline) ([]*model.Proc, error) {
ret := _m.Called(_a0)
var r0 []*model.Proc
if rf, ok := ret.Get(0).(func(*model.Pipeline) []*model.Proc); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*model.Proc)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*model.Pipeline) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ProcLoad provides a mock function with given fields: _a0
func (_m *Store) ProcLoad(_a0 int64) (*model.Proc, error) {
ret := _m.Called(_a0)
var r0 *model.Proc
if rf, ok := ret.Get(0).(func(int64) *model.Proc); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*model.Proc)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(int64) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ProcUpdate provides a mock function with given fields: _a0
func (_m *Store) ProcUpdate(_a0 *model.Proc) error {
ret := _m.Called(_a0)
var r0 error
if rf, ok := ret.Get(0).(func(*model.Proc) error); ok {
r0 = rf(_a0)
} else {
r0 = ret.Error(0)
}
return r0
}
// RegistryCreate provides a mock function with given fields: _a0 // RegistryCreate provides a mock function with given fields: _a0
func (_m *Store) RegistryCreate(_a0 *model.Registry) error { func (_m *Store) RegistryCreate(_a0 *model.Registry) error {
ret := _m.Called(_a0) ret := _m.Called(_a0)
@ -1531,6 +1397,140 @@ func (_m *Store) ServerConfigSet(_a0 string, _a1 string) error {
return r0 return r0
} }
// StepChild provides a mock function with given fields: _a0, _a1, _a2
func (_m *Store) StepChild(_a0 *model.Pipeline, _a1 int, _a2 string) (*model.Step, error) {
ret := _m.Called(_a0, _a1, _a2)
var r0 *model.Step
if rf, ok := ret.Get(0).(func(*model.Pipeline, int, string) *model.Step); ok {
r0 = rf(_a0, _a1, _a2)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*model.Step)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*model.Pipeline, int, string) error); ok {
r1 = rf(_a0, _a1, _a2)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// StepClear provides a mock function with given fields: _a0
func (_m *Store) StepClear(_a0 *model.Pipeline) error {
ret := _m.Called(_a0)
var r0 error
if rf, ok := ret.Get(0).(func(*model.Pipeline) error); ok {
r0 = rf(_a0)
} else {
r0 = ret.Error(0)
}
return r0
}
// StepCreate provides a mock function with given fields: _a0
func (_m *Store) StepCreate(_a0 []*model.Step) error {
ret := _m.Called(_a0)
var r0 error
if rf, ok := ret.Get(0).(func([]*model.Step) error); ok {
r0 = rf(_a0)
} else {
r0 = ret.Error(0)
}
return r0
}
// StepFind provides a mock function with given fields: _a0, _a1
func (_m *Store) StepFind(_a0 *model.Pipeline, _a1 int) (*model.Step, error) {
ret := _m.Called(_a0, _a1)
var r0 *model.Step
if rf, ok := ret.Get(0).(func(*model.Pipeline, int) *model.Step); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*model.Step)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*model.Pipeline, int) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// StepList provides a mock function with given fields: _a0
func (_m *Store) StepList(_a0 *model.Pipeline) ([]*model.Step, error) {
ret := _m.Called(_a0)
var r0 []*model.Step
if rf, ok := ret.Get(0).(func(*model.Pipeline) []*model.Step); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*model.Step)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*model.Pipeline) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// StepLoad provides a mock function with given fields: _a0
func (_m *Store) StepLoad(_a0 int64) (*model.Step, error) {
ret := _m.Called(_a0)
var r0 *model.Step
if rf, ok := ret.Get(0).(func(int64) *model.Step); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*model.Step)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(int64) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// StepUpdate provides a mock function with given fields: _a0
func (_m *Store) StepUpdate(_a0 *model.Step) error {
ret := _m.Called(_a0)
var r0 error
if rf, ok := ret.Get(0).(func(*model.Step) error); ok {
r0 = rf(_a0)
} else {
r0 = ret.Error(0)
}
return r0
}
// TaskDelete provides a mock function with given fields: _a0 // TaskDelete provides a mock function with given fields: _a0
func (_m *Store) TaskDelete(_a0 string) error { func (_m *Store) TaskDelete(_a0 string) error {
ret := _m.Called(_a0) ret := _m.Called(_a0)

View file

@ -91,8 +91,8 @@ type Store interface {
GetPipelineQueue() ([]*model.Feed, error) GetPipelineQueue() ([]*model.Feed, error)
// GetPipelineCount gets a count of all pipelines in the system. // GetPipelineCount gets a count of all pipelines in the system.
GetPipelineCount() (int64, error) GetPipelineCount() (int64, error)
// CreatePipeline creates a new pipeline and jobs. // CreatePipeline creates a new pipeline and steps.
CreatePipeline(*model.Pipeline, ...*model.Proc) error CreatePipeline(*model.Pipeline, ...*model.Step) error
// UpdatePipeline updates a pipeline. // UpdatePipeline updates a pipeline.
UpdatePipeline(*model.Pipeline) error UpdatePipeline(*model.Pipeline) error
@ -137,25 +137,25 @@ type Store interface {
RegistryUpdate(*model.Registry) error RegistryUpdate(*model.Registry) error
RegistryDelete(repo *model.Repo, addr string) error RegistryDelete(repo *model.Repo, addr string) error
// Procs // Steps
ProcLoad(int64) (*model.Proc, error) StepLoad(int64) (*model.Step, error)
ProcFind(*model.Pipeline, int) (*model.Proc, error) StepFind(*model.Pipeline, int) (*model.Step, error)
ProcChild(*model.Pipeline, int, string) (*model.Proc, error) StepChild(*model.Pipeline, int, string) (*model.Step, error)
ProcList(*model.Pipeline) ([]*model.Proc, error) StepList(*model.Pipeline) ([]*model.Step, error)
ProcCreate([]*model.Proc) error StepCreate([]*model.Step) error
ProcUpdate(*model.Proc) error StepUpdate(*model.Step) error
ProcClear(*model.Pipeline) error StepClear(*model.Pipeline) error
// Logs // Logs
LogFind(*model.Proc) (io.ReadCloser, error) LogFind(*model.Step) (io.ReadCloser, error)
// TODO: since we do ReadAll in any case a ioReader is not the best idear // TODO: since we do ReadAll in any case a ioReader is not the best idear
// so either find a way to write log in chunks by xorm ... // so either find a way to write log in chunks by xorm ...
LogSave(*model.Proc, io.Reader) error LogSave(*model.Step, io.Reader) error
// Files // Files
FileList(*model.Pipeline) ([]*model.File, error) FileList(*model.Pipeline) ([]*model.File, error)
FileFind(*model.Proc, string) (*model.File, error) FileFind(*model.Step, string) (*model.File, error)
FileRead(*model.Proc, string) (io.ReadCloser, error) FileRead(*model.Step, string) (io.ReadCloser, error)
FileCreate(*model.File, io.Reader) error FileCreate(*model.File, io.Reader) error
// Tasks // Tasks

4
web/components.d.ts vendored
View file

@ -76,10 +76,10 @@ declare module '@vue/runtime-core' {
PipelineItem: typeof import('./src/components/repo/pipeline/PipelineItem.vue')['default'] PipelineItem: typeof import('./src/components/repo/pipeline/PipelineItem.vue')['default']
PipelineList: typeof import('./src/components/repo/pipeline/PipelineList.vue')['default'] PipelineList: typeof import('./src/components/repo/pipeline/PipelineList.vue')['default']
PipelineLog: typeof import('./src/components/repo/pipeline/PipelineLog.vue')['default'] PipelineLog: typeof import('./src/components/repo/pipeline/PipelineLog.vue')['default']
PipelineProcDuration: typeof import('./src/components/repo/pipeline/PipelineProcDuration.vue')['default']
PipelineProcList: typeof import('./src/components/repo/pipeline/PipelineProcList.vue')['default']
PipelineRunningIcon: typeof import('./src/components/repo/pipeline/PipelineRunningIcon.vue')['default'] PipelineRunningIcon: typeof import('./src/components/repo/pipeline/PipelineRunningIcon.vue')['default']
PipelineStatusIcon: typeof import('./src/components/repo/pipeline/PipelineStatusIcon.vue')['default'] PipelineStatusIcon: typeof import('./src/components/repo/pipeline/PipelineStatusIcon.vue')['default']
PipelineStepDuration: typeof import('./src/components/repo/pipeline/PipelineStepDuration.vue')['default']
PipelineStepList: typeof import('./src/components/repo/pipeline/PipelineStepList.vue')['default']
Popup: typeof import('./src/components/layout/Popup.vue')['default'] Popup: typeof import('./src/components/layout/Popup.vue')['default']
RadioField: typeof import('./src/components/form/RadioField.vue')['default'] RadioField: typeof import('./src/components/form/RadioField.vue')['default']
RegistriesTab: typeof import('./src/components/repo/settings/RegistriesTab.vue')['default'] RegistriesTab: typeof import('./src/components/repo/settings/RegistriesTab.vue')['default']

View file

@ -2,9 +2,9 @@
<div v-if="pipeline" class="flex flex-col pt-10 md:pt-0"> <div v-if="pipeline" class="flex flex-col pt-10 md:pt-0">
<div <div
class="fixed top-0 left-0 w-full md:hidden flex px-4 py-2 bg-gray-600 dark:bg-dark-gray-800 text-gray-50" class="fixed top-0 left-0 w-full md:hidden flex px-4 py-2 bg-gray-600 dark:bg-dark-gray-800 text-gray-50"
@click="$emit('update:proc-id', null)" @click="$emit('update:step-id', null)"
> >
<span>{{ proc?.name }}</span> <span>{{ step?.name }}</span>
<Icon name="close" class="ml-auto" /> <Icon name="close" class="ml-auto" />
</div> </div>
@ -15,14 +15,14 @@
> >
<div v-show="showActions" class="absolute top-0 right-0 z-40 mt-2 mr-4 hidden md:flex"> <div v-show="showActions" class="absolute top-0 right-0 z-40 mt-2 mr-4 hidden md:flex">
<Button <Button
v-if="proc?.end_time !== undefined" v-if="step?.end_time !== undefined"
:is-loading="downloadInProgress" :is-loading="downloadInProgress"
:title="$t('repo.pipeline.actions.log_download')" :title="$t('repo.pipeline.actions.log_download')"
start-icon="download" start-icon="download"
@click="download" @click="download"
/> />
<Button <Button
v-if="proc?.end_time === undefined" v-if="step?.end_time === undefined"
:title=" :title="
autoScroll ? $t('repo.pipeline.actions.log_auto_scroll_off') : $t('repo.pipeline.actions.log_auto_scroll') autoScroll ? $t('repo.pipeline.actions.log_auto_scroll_off') : $t('repo.pipeline.actions.log_auto_scroll')
" "
@ -45,20 +45,20 @@
</div> </div>
<div class="m-auto text-xl text-color"> <div class="m-auto text-xl text-color">
<span v-if="proc?.error" class="text-red-400">{{ proc.error }}</span> <span v-if="step?.error" class="text-red-400">{{ step.error }}</span>
<span v-else-if="proc?.state === 'skipped'" class="text-red-400">{{ <span v-else-if="step?.state === 'skipped'" class="text-red-400">{{
$t('repo.pipeline.actions.canceled') $t('repo.pipeline.actions.canceled')
}}</span> }}</span>
<span v-else-if="!proc?.start_time">{{ $t('repo.pipeline.step_not_started') }}</span> <span v-else-if="!step?.start_time">{{ $t('repo.pipeline.step_not_started') }}</span>
<div v-else-if="!loadedLogs">{{ $t('repo.pipeline.loading') }}</div> <div v-else-if="!loadedLogs">{{ $t('repo.pipeline.loading') }}</div>
</div> </div>
<div <div
v-if="proc?.end_time !== undefined" v-if="step?.end_time !== undefined"
:class="proc.exit_code == 0 ? 'dark:text-lime-400 text-lime-700' : 'dark:text-red-400 text-red-600'" :class="step.exit_code == 0 ? 'dark:text-lime-400 text-lime-700' : 'dark:text-red-400 text-red-600'"
class="w-full bg-gray-200 dark:bg-dark-gray-800 text-md p-4" class="w-full bg-gray-200 dark:bg-dark-gray-800 text-md p-4"
> >
{{ $t('repo.pipeline.exit_code', { exitCode: proc.exit_code }) }} {{ $t('repo.pipeline.exit_code', { exitCode: step.exit_code }) }}
</div> </div>
</div> </div>
</div> </div>
@ -78,7 +78,7 @@ import Icon from '~/components/atomic/Icon.vue';
import useApiClient from '~/compositions/useApiClient'; import useApiClient from '~/compositions/useApiClient';
import useNotifications from '~/compositions/useNotifications'; import useNotifications from '~/compositions/useNotifications';
import { Pipeline, Repo } from '~/lib/api/types'; import { Pipeline, Repo } from '~/lib/api/types';
import { findProc, isProcFinished, isProcRunning } from '~/utils/helpers'; import { findStep, isStepFinished, isStepRunning } from '~/utils/helpers';
type LogLine = { type LogLine = {
index: number; index: number;
@ -97,7 +97,7 @@ export default defineComponent({
required: true, required: true,
}, },
procId: { stepId: {
type: Number, type: Number,
required: true, required: true,
}, },
@ -105,22 +105,22 @@ export default defineComponent({
emits: { emits: {
// eslint-disable-next-line @typescript-eslint/no-unused-vars // eslint-disable-next-line @typescript-eslint/no-unused-vars
'update:proc-id': (procId: number | null) => true, 'update:step-id': (stepId: number | null) => true,
}, },
setup(props) { setup(props) {
const notifications = useNotifications(); const notifications = useNotifications();
const i18n = useI18n(); const i18n = useI18n();
const pipeline = toRef(props, 'pipeline'); const pipeline = toRef(props, 'pipeline');
const procId = toRef(props, 'procId'); const stepId = toRef(props, 'stepId');
const repo = inject<Ref<Repo>>('repo'); const repo = inject<Ref<Repo>>('repo');
const apiClient = useApiClient(); const apiClient = useApiClient();
const loadedProcSlug = ref<string>(); const loadedStepSlug = ref<string>();
const procSlug = computed( const stepSlug = computed(
() => `${repo?.value.owner} - ${repo?.value.name} - ${pipeline.value.id} - ${procId.value}`, () => `${repo?.value.owner} - ${repo?.value.name} - ${pipeline.value.id} - ${stepId.value}`,
); );
const proc = computed(() => pipeline.value && findProc(pipeline.value.procs || [], procId.value)); const step = computed(() => pipeline.value && findStep(pipeline.value.steps || [], stepId.value));
const stream = ref<EventSource>(); const stream = ref<EventSource>();
const log = ref<LogLine[]>(); const log = ref<LogLine[]>();
const consoleElement = ref<Element>(); const consoleElement = ref<Element>();
@ -128,8 +128,8 @@ export default defineComponent({
const loadedLogs = computed(() => !!log.value); const loadedLogs = computed(() => !!log.value);
const hasLogs = computed( const hasLogs = computed(
() => () =>
// we do not have logs for skipped jobs // we do not have logs for skipped steps
repo?.value && pipeline.value && proc.value && proc.value.state !== 'skipped' && proc.value.state !== 'killed', repo?.value && pipeline.value && step.value && step.value.state !== 'skipped' && step.value.state !== 'killed',
); );
const autoScroll = useStorage('log-auto-scroll', false); const autoScroll = useStorage('log-auto-scroll', false);
const showActions = ref(false); const showActions = ref(false);
@ -200,13 +200,13 @@ export default defineComponent({
}, 500); }, 500);
async function download() { async function download() {
if (!repo?.value || !pipeline.value || !proc.value) { if (!repo?.value || !pipeline.value || !step.value) {
throw new Error('The repository, pipeline or proc was undefined'); throw new Error('The repository, pipeline or step was undefined');
} }
let logs; let logs;
try { try {
downloadInProgress.value = true; downloadInProgress.value = true;
logs = await apiClient.getLogs(repo.value.owner, repo.value.name, pipeline.value.number, proc.value.pid); logs = await apiClient.getLogs(repo.value.owner, repo.value.name, pipeline.value.number, step.value.pid);
} catch (e) { } catch (e) {
notifications.notifyError(e, i18n.t('repo.pipeline.log_download_error')); notifications.notifyError(e, i18n.t('repo.pipeline.log_download_error'));
return; return;
@ -223,7 +223,7 @@ export default defineComponent({
fileLink.href = fileURL; fileLink.href = fileURL;
fileLink.setAttribute( fileLink.setAttribute(
'download', 'download',
`${repo.value.owner}-${repo.value.name}-${pipeline.value.number}-${proc.value.name}.log`, `${repo.value.owner}-${repo.value.name}-${pipeline.value.number}-${step.value.name}.log`,
); );
document.body.appendChild(fileLink); document.body.appendChild(fileLink);
@ -233,10 +233,10 @@ export default defineComponent({
} }
async function loadLogs() { async function loadLogs() {
if (loadedProcSlug.value === procSlug.value) { if (loadedStepSlug.value === stepSlug.value) {
return; return;
} }
loadedProcSlug.value = procSlug.value; loadedStepSlug.value = stepSlug.value;
log.value = undefined; log.value = undefined;
logBuffer.value = []; logBuffer.value = [];
ansiUp.value = new AnsiUp(); ansiUp.value = new AnsiUp();
@ -250,26 +250,26 @@ export default defineComponent({
stream.value.close(); stream.value.close();
} }
if (!hasLogs.value || !proc.value) { if (!hasLogs.value || !step.value) {
return; return;
} }
if (isProcFinished(proc.value)) { if (isStepFinished(step.value)) {
const logs = await apiClient.getLogs(repo.value.owner, repo.value.name, pipeline.value.number, proc.value.pid); const logs = await apiClient.getLogs(repo.value.owner, repo.value.name, pipeline.value.number, step.value.pid);
logs?.forEach((line) => writeLog({ index: line.pos, text: line.out, time: line.time })); logs?.forEach((line) => writeLog({ index: line.pos, text: line.out, time: line.time }));
flushLogs(false); flushLogs(false);
} }
if (isProcRunning(proc.value)) { if (isStepRunning(step.value)) {
// load stream of parent process (which receives all child processes logs) // load stream of parent process (which receives all child processes logs)
// TODO: change stream to only send data of single child process // TODO: change stream to only send data of single child process
stream.value = apiClient.streamLogs( stream.value = apiClient.streamLogs(
repo.value.owner, repo.value.owner,
repo.value.name, repo.value.name,
pipeline.value.number, pipeline.value.number,
proc.value.ppid, step.value.ppid,
(line) => { (line) => {
if (line?.proc !== proc.value?.name) { if (line?.step !== step.value?.name) {
return; return;
} }
writeLog({ index: line.pos, text: line.out, time: line.time }); writeLog({ index: line.pos, text: line.out, time: line.time });
@ -283,12 +283,12 @@ export default defineComponent({
loadLogs(); loadLogs();
}); });
watch(procSlug, () => { watch(stepSlug, () => {
loadLogs(); loadLogs();
}); });
watch(proc, (oldProc, newProc) => { watch(step, (oldStep, newStep) => {
if (oldProc && oldProc.name === newProc?.name && oldProc?.end_time !== newProc?.end_time) { if (oldStep && oldStep.name === newStep?.name && oldStep?.end_time !== newStep?.end_time) {
if (autoScroll.value) { if (autoScroll.value) {
scrollDown(); scrollDown();
} }
@ -297,7 +297,7 @@ export default defineComponent({
return { return {
consoleElement, consoleElement,
proc, step,
log, log,
loadedLogs, loadedLogs,
hasLogs, hasLogs,

View file

@ -1,30 +1,30 @@
<template> <template>
<span v-if="proc.start_time !== undefined" class="ml-auto text-sm">{{ duration }}</span> <span v-if="step.start_time !== undefined" class="ml-auto text-sm">{{ duration }}</span>
</template> </template>
<script lang="ts"> <script lang="ts">
import { computed, defineComponent, PropType, toRef } from 'vue'; import { computed, defineComponent, PropType, toRef } from 'vue';
import { useElapsedTime } from '~/compositions/useElapsedTime'; import { useElapsedTime } from '~/compositions/useElapsedTime';
import { PipelineProc } from '~/lib/api/types'; import { PipelineStep } from '~/lib/api/types';
import { durationAsNumber } from '~/utils/duration'; import { durationAsNumber } from '~/utils/duration';
export default defineComponent({ export default defineComponent({
name: 'PipelineProcDuration', name: 'PipelineStepDuration',
props: { props: {
proc: { step: {
type: Object as PropType<PipelineProc>, type: Object as PropType<PipelineStep>,
required: true, required: true,
}, },
}, },
setup(props) { setup(props) {
const proc = toRef(props, 'proc'); const step = toRef(props, 'step');
const durationRaw = computed(() => { const durationRaw = computed(() => {
const start = proc.value.start_time || 0; const start = step.value.start_time || 0;
const end = proc.value.end_time || 0; const end = step.value.end_time || 0;
if (end === 0 && start === 0) { if (end === 0 && start === 0) {
return undefined; return undefined;
@ -37,7 +37,7 @@ export default defineComponent({
return (end - start) * 1000; return (end - start) * 1000;
}); });
const running = computed(() => proc.value.state === 'running'); const running = computed(() => step.value.state === 'running');
const { time: durationElapsed } = useElapsedTime(running, durationRaw); const { time: durationElapsed } = useElapsedTime(running, durationRaw);
const duration = computed(() => { const duration = computed(() => {

View file

@ -38,19 +38,19 @@
</div> </div>
</div> </div>
<div v-if="pipeline.procs === undefined || pipeline.procs.length === 0" class="m-auto mt-4"> <div v-if="pipeline.steps === undefined || pipeline.steps.length === 0" class="m-auto mt-4">
<span>{{ $t('repo.pipeline.no_pipeline_steps') }}</span> <span>{{ $t('repo.pipeline.no_pipeline_steps') }}</span>
</div> </div>
<div class="flex flex-grow flex-col relative min-h-0 overflow-y-auto gap-2"> <div class="flex flex-grow flex-col relative min-h-0 overflow-y-auto gap-2">
<div <div
v-for="proc in pipeline.procs" v-for="step in pipeline.steps"
:key="proc.id" :key="step.id"
class="p-2 md:rounded-md bg-white shadow dark:border-b-dark-gray-600 dark:bg-dark-gray-700" class="p-2 md:rounded-md bg-white shadow dark:border-b-dark-gray-600 dark:bg-dark-gray-700"
> >
<div class="flex flex-col gap-2"> <div class="flex flex-col gap-2">
<div v-if="proc.environ" class="flex flex-wrap gap-x-1 gap-y-2 text-xs justify-end pt-1"> <div v-if="step.environ" class="flex flex-wrap gap-x-1 gap-y-2 text-xs justify-end pt-1">
<div v-for="(value, key) in proc.environ" :key="key"> <div v-for="(value, key) in step.environ" :key="key">
<span <span
class="pl-2 pr-1 py-0.5 bg-gray-800 text-gray-200 dark:bg-gray-600 border-2 border-gray-800 dark:border-gray-600 rounded-l-full" class="pl-2 pr-1 py-0.5 bg-gray-800 text-gray-200 dark:bg-gray-600 border-2 border-gray-800 dark:border-gray-600 rounded-l-full"
> >
@ -62,52 +62,54 @@
</div> </div>
</div> </div>
<button <button
v-if="pipeline.procs && pipeline.procs.length > 1" v-if="pipeline.steps && pipeline.steps.length > 1"
type="button" type="button"
:title="proc.name" :title="step.name"
class="flex items-center gap-2 py-2 px-1 hover:bg-black hover:bg-opacity-10 dark:hover:bg-white dark:hover:bg-opacity-5 rounded-md" class="flex items-center gap-2 py-2 px-1 hover:bg-black hover:bg-opacity-10 dark:hover:bg-white dark:hover:bg-opacity-5 rounded-md"
@click="procsCollapsed[proc.id] = !!!procsCollapsed[proc.id]" @click="stepsCollapsed[step.id] = !!!stepsCollapsed[step.id]"
> >
<Icon <Icon
name="chevron-right" name="chevron-right"
class="transition-transform duration-150 min-w-6 h-6" class="transition-transform duration-150 min-w-6 h-6"
:class="{ 'transform rotate-90': !procsCollapsed[proc.id] }" :class="{ 'transform rotate-90': !stepsCollapsed[step.id] }"
/> />
<span class="truncate">{{ proc.name }}</span> <span class="truncate">{{ step.name }}</span>
</button> </button>
</div> </div>
<div <div
class="transition-height duration-150 overflow-hidden" class="transition-height duration-150 overflow-hidden"
:class="{ :class="{
'max-h-screen': !procsCollapsed[proc.id], 'max-h-screen': !stepsCollapsed[step.id],
'max-h-0': procsCollapsed[proc.id], 'max-h-0': stepsCollapsed[step.id],
'ml-6': pipeline.procs && pipeline.procs.length > 1, 'ml-6': pipeline.steps && pipeline.steps.length > 1,
}" }"
> >
<button <button
v-for="job in proc.children" v-for="subStep in step.children"
:key="job.pid" :key="subStep.pid"
type="button" type="button"
:title="job.name" :title="subStep.name"
class="flex p-2 gap-2 border-2 border-transparent rounded-md items-center hover:bg-black hover:bg-opacity-10 dark:hover:bg-white dark:hover:bg-opacity-5 w-full" class="flex p-2 gap-2 border-2 border-transparent rounded-md items-center hover:bg-black hover:bg-opacity-10 dark:hover:bg-white dark:hover:bg-opacity-5 w-full"
:class="{ :class="{
'bg-black bg-opacity-10 dark:bg-white dark:bg-opacity-5': selectedProcId && selectedProcId === job.pid, 'bg-black bg-opacity-10 dark:bg-white dark:bg-opacity-5':
selectedStepId && selectedStepId === subStep.pid,
'mt-1': 'mt-1':
(pipeline.procs && pipeline.procs.length > 1) || (proc.children && job.pid !== proc.children[0].pid), (pipeline.steps && pipeline.steps.length > 1) ||
(step.children && subStep.pid !== step.children[0].pid),
}" }"
@click="$emit('update:selected-proc-id', job.pid)" @click="$emit('update:selected-step-id', subStep.pid)"
> >
<div <div
class="min-w-2 h-2 rounded-full" class="min-w-2 h-2 rounded-full"
:class="{ :class="{
'bg-lime-400': ['success'].includes(job.state), 'bg-lime-400': ['success'].includes(subStep.state),
'bg-gray-400': ['pending', 'skipped'].includes(job.state), 'bg-gray-400': ['pending', 'skipped'].includes(subStep.state),
'bg-red-400': ['killed', 'error', 'failure', 'blocked', 'declined'].includes(job.state), 'bg-red-400': ['killed', 'error', 'failure', 'blocked', 'declined'].includes(subStep.state),
'bg-blue-400': ['started', 'running'].includes(job.state), 'bg-blue-400': ['started', 'running'].includes(subStep.state),
}" }"
/> />
<span class="truncate">{{ job.name }}</span> <span class="truncate">{{ subStep.name }}</span>
<PipelineProcDuration :proc="job" /> <PipelineStepDuration :step="subStep" />
</button> </button>
</div> </div>
</div> </div>
@ -119,21 +121,21 @@
import { ref, toRef } from 'vue'; import { ref, toRef } from 'vue';
import Icon from '~/components/atomic/Icon.vue'; import Icon from '~/components/atomic/Icon.vue';
import PipelineProcDuration from '~/components/repo/pipeline/PipelineProcDuration.vue'; import PipelineStepDuration from '~/components/repo/pipeline/PipelineStepDuration.vue';
import usePipeline from '~/compositions/usePipeline'; import usePipeline from '~/compositions/usePipeline';
import { Pipeline, PipelineProc } from '~/lib/api/types'; import { Pipeline, PipelineStep } from '~/lib/api/types';
const props = defineProps<{ const props = defineProps<{
pipeline: Pipeline; pipeline: Pipeline;
selectedProcId?: number | null; selectedStepId?: number | null;
}>(); }>();
defineEmits<{ defineEmits<{
(event: 'update:selected-proc-id', selectedProcId: number): void; (event: 'update:selected-step-id', selectedStepId: number): void;
}>(); }>();
const pipeline = toRef(props, 'pipeline'); const pipeline = toRef(props, 'pipeline');
const { prettyRef } = usePipeline(pipeline); const { prettyRef } = usePipeline(pipeline);
const procsCollapsed = ref<Record<PipelineProc['id'], boolean>>({}); const stepsCollapsed = ref<Record<PipelineStep['id'], boolean>>({});
</script> </script>

View file

@ -32,11 +32,11 @@ export default () => {
pipelineStore.setPipeline(repo.owner, repo.name, pipeline); pipelineStore.setPipeline(repo.owner, repo.name, pipeline);
pipelineStore.setPipelineFeedItem({ ...pipeline, name: repo.name, owner: repo.owner, full_name: repoSlug(repo) }); pipelineStore.setPipelineFeedItem({ ...pipeline, name: repo.name, owner: repo.owner, full_name: repoSlug(repo) });
// contains proc update // contains step update
if (!data.proc) { if (!data.step) {
return; return;
} }
const { proc } = data; const { step } = data;
pipelineStore.setProc(repo.owner, repo.name, pipeline.number, proc); pipelineStore.setStep(repo.owner, repo.name, pipeline.number, step);
}); });
}; };

View file

@ -5,7 +5,7 @@ import {
PipelineConfig, PipelineConfig,
PipelineFeed, PipelineFeed,
PipelineLog, PipelineLog,
PipelineProc, PipelineStep,
Registry, Registry,
Repo, Repo,
RepoPermissions, RepoPermissions,
@ -102,12 +102,12 @@ export default class WoodpeckerClient extends ApiClient {
return this._post(`/api/repos/${owner}/${repo}/pipelines/${pipeline}?${query}`); return this._post(`/api/repos/${owner}/${repo}/pipelines/${pipeline}?${query}`);
} }
getLogs(owner: string, repo: string, pipeline: number, proc: number): Promise<PipelineLog[]> { getLogs(owner: string, repo: string, pipeline: number, step: number): Promise<PipelineLog[]> {
return this._get(`/api/repos/${owner}/${repo}/logs/${pipeline}/${proc}`) as Promise<PipelineLog[]>; return this._get(`/api/repos/${owner}/${repo}/logs/${pipeline}/${step}`) as Promise<PipelineLog[]>;
} }
getArtifact(owner: string, repo: string, pipeline: string, proc: string, file: string): Promise<unknown> { getArtifact(owner: string, repo: string, pipeline: string, step: string, file: string): Promise<unknown> {
return this._get(`/api/repos/${owner}/${repo}/files/${pipeline}/${proc}/${file}?raw=true`); return this._get(`/api/repos/${owner}/${repo}/files/${pipeline}/${step}/${file}?raw=true`);
} }
getArtifactList(owner: string, repo: string, pipeline: string): Promise<unknown> { getArtifactList(owner: string, repo: string, pipeline: string): Promise<unknown> {
@ -211,7 +211,7 @@ export default class WoodpeckerClient extends ApiClient {
} }
// eslint-disable-next-line promise/prefer-await-to-callbacks // eslint-disable-next-line promise/prefer-await-to-callbacks
on(callback: (data: { pipeline?: Pipeline; repo?: Repo; proc?: PipelineProc }) => void): EventSource { on(callback: (data: { pipeline?: Pipeline; repo?: Repo; step?: PipelineStep }) => void): EventSource {
return this._subscribe('/stream/events', callback, { return this._subscribe('/stream/events', callback, {
reconnect: true, reconnect: true,
}); });
@ -221,11 +221,11 @@ export default class WoodpeckerClient extends ApiClient {
owner: string, owner: string,
repo: string, repo: string,
pipeline: number, pipeline: number,
proc: number, step: number,
// eslint-disable-next-line promise/prefer-await-to-callbacks // eslint-disable-next-line promise/prefer-await-to-callbacks
callback: (data: PipelineLog) => void, callback: (data: PipelineLog) => void,
): EventSource { ): EventSource {
return this._subscribe(`/stream/logs/${owner}/${repo}/${pipeline}/${proc}`, callback, { return this._subscribe(`/stream/logs/${owner}/${repo}/${pipeline}/${step}`, callback, {
reconnect: true, reconnect: true,
}); });
} }

Some files were not shown because too many files have changed in this diff Show more