Merge remote-tracking branch 'origin/master' into bitbucket-fix

* origin/master: (59 commits)
  Pull in drone-go
  Readme
  UI bugfixes
  Drone tag
  Bump version
  More prometheus metrics, refactoring
  Bump version
  Test Pause/Resume
  Endpoint that blocks until there are running builds
  Pause/Resume queue
  Bump version
  Latest UI
  Persist intended URL through the OAuth flow
  Bump version
  Latest UI
  Fallback to default config. Allows incremental rollout of custom-path
  Task dependencies to survive restarts
  .drone.yml takes precedence over configured path. Allows incremental rollout
  Assign multiple pending tasks in one go
  Supporting skip_clone
  ...
This commit is contained in:
Laszlo Fogas 2019-07-09 11:17:25 +02:00
commit 6b442bf57e
115 changed files with 5419 additions and 1728 deletions

View file

@ -1,27 +1,8 @@
#!/bin/sh
# only execute this script as part of the pipeline.
# [ -z "$CI" ] && echo "missing ci environment variable" && exit 2
# only execute the script when github token exists.
# [ -z "$SSH_KEY" ] && echo "missing ssh key" && exit 3
# write the ssh key.
# mkdir /root/.ssh
# echo -n "$SSH_KEY" > /root/.ssh/id_rsa
# chmod 600 /root/.ssh/id_rsa
# add github.com to our known hosts.
# touch /root/.ssh/known_hosts
# chmod 600 /root/.ssh/known_hosts
# ssh-keyscan -H github.com > /etc/ssh/ssh_known_hosts 2> /dev/null
# clone the extras project.
set -e
set -x
# git clone git@github.com:drone/drone-enterprise.git extras
# build a static binary with the build number and extra features.
go build -ldflags '-extldflags "-static" -X github.com/laszlocph/drone-oss-08/version.VersionDev=build.'${DRONE_BUILD_NUMBER} -o release/drone-server github.com/laszlocph/drone-oss-08/cmd/drone-server
GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -ldflags '-X github.com/laszlocph/drone-oss-08/version.VersionDev=build.'${DRONE_BUILD_NUMBER} -o release/drone-agent github.com/laszlocph/drone-oss-08/cmd/drone-agent
GOOS=linux GOARCH=arm64 CGO_ENABLED=0 go build -ldflags '-X github.com/laszlocph/drone-oss-08/version.VersionDev=build.'${DRONE_BUILD_NUMBER} -o release/linux/arm64/drone-agent github.com/laszlocph/drone-oss-08/cmd/drone-agent

View file

@ -1,3 +1,7 @@
clone:
git:
image: plugins/git:next
workspace:
base: /go
path: src/github.com/laszlocph/drone-oss-08
@ -109,7 +113,7 @@ pipeline:
repo: laszlocloud/drone-oss-08-server
dockerfile: Dockerfile.alpine
secrets: [ docker_username, docker_password ]
tag: [ 0.8.95-bitbucket-alpine ]
tag: "${DRONE_TAG}-alpine"
when:
event: tag
@ -118,7 +122,7 @@ pipeline:
repo: laszlocloud/drone-oss-08-agent
dockerfile: Dockerfile.agent.alpine
secrets: [ docker_username, docker_password ]
tag: [ 0.8.95-bitbucket-alpine ]
tag: "${DRONE_TAG}-alpine"
when:
event: tag
@ -126,7 +130,7 @@ pipeline:
image: plugins/docker
repo: laszlocloud/drone-oss-08-server
secrets: [ docker_username, docker_password ]
tag: [ 0.8.95-bitbucket ]
tag: ${DRONE_TAG}
when:
event: tag
@ -135,7 +139,7 @@ pipeline:
repo: laszlocloud/drone-oss-08-agent
dockerfile: Dockerfile.agent
secrets: [ docker_username, docker_password ]
tag: [ 0.8.95-bitbucket ]
tag: ${DRONE_TAG}
when:
event: tag

1
.gitignore vendored
View file

@ -8,4 +8,5 @@ release/
cli/release/
server/swagger/files/*.json
server/swagger/swagger_gen.go
.idea/

View file

@ -10,3 +10,20 @@
go install github.com/laszlocph/drone-oss-08/cmd/drone-agent
go install github.com/laszlocph/drone-oss-08/cmd/drone-server
---
0. To generate SQL files
go get github.com/vektra/mockery/.../
export download_url=$(curl -s https://api.github.com/repos/go-swagger/go-swagger/releases/latest | \
jq -r '.assets[] | select(.name | contains("'"$(uname | tr '[:upper:]' '[:lower:]')"'_amd64")) | .browser_download_url')
curl -o swagger -L'#' "$download_url"
chmod +x swagger
sudo mv swagger /usr/local/bin
go get github.com/laszlocph/togo
go generate

81
LICENSE
View file

@ -1,80 +1,3 @@
The Drone Community Edition (the "Community Edition") is licensed under the
Apache License, Version 2.0 (the "Apache License"). You may obtain a copy of
the Apache License at
Drone-OSS-08 is Apache 2.0 licensed with the source files in this repository having a header indicating which license they are under and what copyrights apply.
http://www.apache.org/licenses/LICENSE-2.0
The Drone Enterprise Edition (the "Enterprise Edition") is licensed under
the Drone Enterprise License, Version 1.1 (the "Enterprise License"). A copy
of the Enterprise License is provided below.
The source files in this repository have a header indicating which license
they are under. The BUILDING file provides instructions for creating the
Community Edition distribution subject to the terms of the Apache License.
-----------------------------------------------------------------------------
Licensor: Drone.IO, Inc
Licensed Work: Drone Enterprise Edition
Additional Use Grant: Usage of the software is free for entities with both:
(a) annual gross revenue under (USD) $1 million
(according to GAAP, or the equivalent in its country
of domicile); and (b) less than (USD) $5 million in
aggregate debt and equity funding.
Change Date: 2022-01-01
Change License: Apache-2.0
Notice
The Drone Enterprise License (this document, or the "License") is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
-----------------------------------------------------------------------------
Drone Enterprise License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN "AS IS" BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
Files under the `docs/` folder is licensed under Creative Commons Attribution-ShareAlike 4.0 International Public License. It is a derivative work of the https://github.com/drone/docs git repository.

331
README.md
View file

@ -1,31 +1,326 @@
## Yes, it's a fork
# Drone-OSS-08
This repository is a hard fork of the Drone CI system.
An opinionated fork of the Drone CI system.
Forked at the `0.8.9` version https://github.com/drone/drone/commit/768ed784bd74b0e0c2d8d49c4c8b6dca99b25e96
- Based on the v0.8 code tree
- Focused on developer experience.
## Why fork?
[![Build Status](https://cloud.drone.io/api/badges/laszlocph/drone-oss-08/status.svg)](https://cloud.drone.io/laszlocph/drone-oss-08) [![Go Report Card](https://goreportcard.com/badge/github.com/laszlocph/drone-oss-08)](https://goreportcard.com/report/github.com/laszlocph/drone-oss-08) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
Drone has been an open-core project since many prior versions. With each source file indicating whether it is part of the Apache 2.0 licensed or the propritary enterprise license. In the 0.8 line the enterprise features were limited to features like autoscaling and secret vaults.
![Drone-OSS-08](docs/drone.png)
However in the 1.0 line, databases other than SQLite, TLS support and agent based horizontal scaling were also moved under the enterprise license. Limiting the open source version to single node, hobbyist deployments.
## Table of contents
The above feature reductions and the lack of clear communication of what is part of the open-source version led to this fork.
- [About this fork](#about-this-fork)
- [Motivation](#motivation)
- [The focus of this fork](#the-focus-of-this-fork)
- [Who uses this fork](#who-uses-this-fork)
- [Pipelines](#pipelines)
- [Getting started](#getting-started)
- [Pipeline documentation](#pipeline-documentation)
- [Plugins](#plugins)
- [Custom plugins](#custom-plugins)
- [Server setup](#server-setup)
- [Quickstart](#quickstart)
- [Authentication](#authentication)
- [Database](#database)
- [SSL](#ssl)
- [Metrics](#metrics)
- [Behind a proxy](#behind-a-proxy)
- [Contributing](#contributing)
- [License](#license)
## The focus of this fork
## About this fork
The focus of this fork is
#### Motivation
- Github
- Kubernetes and VM based backends
- Linux/amd64
- Some really good features that Drone 1.0 introduced: multiple pipelines, cron triggers
Why fork? See my [motivation](docs/motivation.md)
## Why should you use this fork?
#### The focus of this fork
you shouldn't necessarily. Paying for Drone 1.0 is a fine choice.
This fork is not meant to compete with Drone or reimplement its enterprise features in the open.
Check the issues and releases of this project if you are evaluating this project.
Also you can check the devlog to get the nuances: https://laszlo.cloud/drone-oss-08-devlog-1
Instead, I'm taking a proven CI system - that Drone 0.8 is - and applying a distinct set of product ideas focusing on:
The project is currently used by one user, with 50+ repos and 500+ builds a week.
- UI experience
- the developer feedback loop
- documentation and best practices
- tighter Github integration
- Kubernetes backend
with less focus on:
- niche git systems like gitea, gogs
- computing architectures like arm64
- new pipeline formats like jsonnet
#### Who uses this fork
Currently I know of one organization using this fork. With 50+ users, 130+ repos and more than 300 builds a week.
## Pipelines
#### Getting started
Place this snippet into a file called `.drone.yml`
```yaml
pipeline:
build:
image: debian:stable-slim
commands:
- echo "This is the build step"
a-test-step:
image: debian:stable-slim
commands:
- echo "Testing.."
```
The pipeline runs on the Drone CI server and typically triggered by webhooks. One benefit of the container architecture is that it runs on your laptop too:
```sh
$ drone exec --local
stable-slim: Pulling from library/debian
a94641239323: Pull complete
Digest: sha256:d846d80f98c8aca7d3db0fadd14a0a4c51a2ce1eb2e9e14a550b3bd0c45ba941
Status: Downloaded newer image for debian:stable-slim
[build:L0:0s] + echo "This is the build step"
[build:L1:0s] This is the build step
[a-test-step:L0:0s] + echo "Testing.."
[a-test-step:L1:0s] Testing..
```
Pipeline steps are commands running in container images.
These containers are wired together and they share a volume with the source code on it.
#### Pipeline documentation
See all [pipeline features](docs/usage/pipeline.md).
## Plugins
Plugins are Docker containers that perform pre-defined tasks and are configured as steps in your pipeline. Plugins can be used to deploy code, publish artifacts, send notification, and more.
Example pipeline using the Docker and Slack plugins:
```yaml
pipeline:
backend:
image: golang
commands:
- go get
- go build
- go test
docker:
image: plugins/docker
username: kevinbacon
password: pa55word
repo: foo/bar
tags: latest
notify:
image: plugins/slack
channel: developers
username: drone
```
#### Custom plugins
Plugins are Docker containers with their entrypoint set to a predefined script.
[See how an example plugin can be implemented in a bash script](docs/usage/bash_plugin.md).
## Server setup
#### Quickstart
The below [docker-compose](https://docs.docker.com/compose/) configuration can be used to start the Drone server with a single agent. It relies on a number of environment variables that you must set before running `docker-compose up`. The variables are described below.
Each agent is able to process one build by default. If you have 4 agents installed and connected to the Drone server, your system will process 4 builds in parallel. You can add more agents to increase the number of parallel builds or set the agent's `DRONE_MAX_PROCS=1` environment variable to increase the number of parallel builds for that agent.
```yaml
version: '2'
services:
drone-server:
image: drone/drone:{{% version %}}
ports:
- 80:8000
- 9000
volumes:
- drone-server-data:/var/lib/drone/
restart: always
environment:
- DRONE_OPEN=true
- DRONE_HOST=${DRONE_HOST}
- DRONE_GITHUB=true
- DRONE_GITHUB_CLIENT=${DRONE_GITHUB_CLIENT}
- DRONE_GITHUB_SECRET=${DRONE_GITHUB_SECRET}
- DRONE_SECRET=${DRONE_SECRET}
drone-agent:
image: drone/agent:{{% version %}}
command: agent
restart: always
depends_on:
- drone-server
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
- DRONE_SERVER=drone-server:9000
- DRONE_SECRET=${DRONE_SECRET}
volumes:
drone-server-data:
```
Drone needs to know its own address. You must therefore provide the address in `<scheme>://<hostname>` format. Please omit trailing slashes.
```diff
services:
drone-server:
image: drone/drone:{{% version %}}
environment:
- DRONE_OPEN=true
+ - DRONE_HOST=${DRONE_HOST}
- DRONE_GITHUB=true
- DRONE_GITHUB_CLIENT=${DRONE_GITHUB_CLIENT}
- DRONE_GITHUB_SECRET=${DRONE_GITHUB_SECRET}
- DRONE_SECRET=${DRONE_SECRET}
```
Drone agents require access to the host machine Docker daemon.
```diff
services:
drone-agent:
image: drone/agent:{{% version %}}
command: agent
restart: always
depends_on: [ drone-server ]
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
```
Drone agents require the server address for agent-to-server communication.
```diff
services:
drone-agent:
image: drone/agent:{{% version %}}
command: agent
restart: always
depends_on: [ drone-server ]
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
+ - DRONE_SERVER=drone-server:9000
- DRONE_SECRET=${DRONE_SECRET}
```
Drone server and agents use a shared secret to authenticate communication. This should be a random string of your choosing and should be kept private. You can generate such string with `openssl rand -hex 32`.
```diff
services:
drone-server:
image: drone/drone:{{% version %}}
environment:
- DRONE_OPEN=true
- DRONE_HOST=${DRONE_HOST}
- DRONE_GITHUB=true
- DRONE_GITHUB_CLIENT=${DRONE_GITHUB_CLIENT}
- DRONE_GITHUB_SECRET=${DRONE_GITHUB_SECRET}
+ - DRONE_SECRET=${DRONE_SECRET}
drone-agent:
image: drone/agent:{{% version %}}
environment:
- DRONE_SERVER=drone-server:9000
- DRONE_DEBUG=true
+ - DRONE_SECRET=${DRONE_SECRET}
```
Drone registration is closed by default. This example enables open registration for users that are members of approved GitHub organizations.
```diff
services:
drone-server:
image: drone/drone:{{% version %}}
environment:
+ - DRONE_OPEN=true
+ - DRONE_ORGS=dolores,dogpatch
- DRONE_HOST=${DRONE_HOST}
- DRONE_GITHUB=true
- DRONE_GITHUB_CLIENT=${DRONE_GITHUB_CLIENT}
- DRONE_GITHUB_SECRET=${DRONE_GITHUB_SECRET}
- DRONE_SECRET=${DRONE_SECRET}
```
Drone administrators should also be enumerated in your configuration.
```diff
services:
drone-server:
image: drone/drone:{{% version %}}
environment:
- DRONE_OPEN=true
- DRONE_ORGS=dolores,dogpatch
+ - DRONE_ADMIN=johnsmith,janedoe
- DRONE_HOST=${DRONE_HOST}
- DRONE_GITHUB=true
- DRONE_GITHUB_CLIENT=${DRONE_GITHUB_CLIENT}
- DRONE_GITHUB_SECRET=${DRONE_GITHUB_SECRET}
- DRONE_SECRET=${DRONE_SECRET}
```
#### Authentication
Authentication is done using OAuth and is delegated to one of multiple version control providers, configured using environment variables. The example above demonstrates basic GitHub integration.
See the complete reference for [Github](docs/administration/github.md), [Bitbucket Cloud](docs/administration/bitbucket.md), [Bitbucket Server](docs/administration/bitbucket_server.md) and [Gitlab](docs/administration/gitlab.md).
#### Database
Drone mounts a [data volume](https://docs.docker.com/storage/volumes/#create-and-manage-volumes) to persist the sqlite database.
See the [database settings](docs/administration/database.md) page to configure Postgresql or MySQL as database.
```diff
services:
drone-server:
image: drone/drone:{{% version %}}
ports:
- 80:8000
- 9000
+ volumes:
+ - drone-server-data:/var/lib/drone/
restart: always
```
#### SSL
Drone supports ssl configuration by mounting certificates into your container.
See the [SSL guide](docs/administration/ssl.md).
Automated [Lets Encrypt](docs/administration/lets_encrypt.md) is also supported.
#### Metrics
A [Prometheus endpoint](docs/administration/lets_encrypt.md) is exposed.
#### Behind a proxy
See the [proxy guide](docs/administration/proxy.md) if you want to see a setup behind Apache, Nginx, Caddy or ngrok.
## Contributing
Drone-OSS-08 is Apache 2.0 licensed and accepts contributions via GitHub pull requests.
[How to build the project]()
## License
Drone-OSS-08 is Apache 2.0 licensed with the source files in this repository having a header indicating which license they are under and what copyrights apply.
Files under the `docs/` folder is licensed under Creative Commons Attribution-ShareAlike 4.0 International Public License. It is a derivative work of the https://github.com/drone/docs git repository.

View file

@ -7,7 +7,7 @@ import (
"strconv"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/drone/drone-go/drone"
"github.com/laszlocph/drone-oss-08/drone-go/drone"
"github.com/urfave/cli"
)

View file

@ -11,7 +11,7 @@ import (
"golang.org/x/net/proxy"
"golang.org/x/oauth2"
"github.com/drone/drone-go/drone"
"github.com/laszlocph/drone-oss-08/drone-go/drone"
)
// NewClient returns a new client from the CLI context.

View file

@ -5,7 +5,7 @@ import (
"strings"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/drone/drone-go/drone"
"github.com/laszlocph/drone-oss-08/drone-go/drone"
"github.com/urfave/cli"
)

View file

@ -5,7 +5,7 @@ import (
"strings"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/drone/drone-go/drone"
"github.com/laszlocph/drone-oss-08/drone-go/drone"
"github.com/urfave/cli"
)

View file

@ -5,7 +5,7 @@ import (
"time"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/drone/drone-go/drone"
"github.com/laszlocph/drone-oss-08/drone-go/drone"
"github.com/urfave/cli"
)

View file

@ -5,7 +5,7 @@ import (
"strings"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/drone/drone-go/drone"
"github.com/laszlocph/drone-oss-08/drone-go/drone"
"github.com/urfave/cli"
)

View file

@ -5,7 +5,7 @@ import (
"strings"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/drone/drone-go/drone"
"github.com/laszlocph/drone-oss-08/drone-go/drone"
"github.com/urfave/cli"
)

View file

@ -3,7 +3,7 @@ package user
import (
"fmt"
"github.com/drone/drone-go/drone"
"github.com/laszlocph/drone-oss-08/drone-go/drone"
"github.com/urfave/cli"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"

View file

@ -563,23 +563,17 @@ func server(c *cli.Context) error {
auther := &authorizer{
password: c.String("agent-secret"),
}
s := grpc.NewServer(
grpcServer := grpc.NewServer(
grpc.StreamInterceptor(auther.streamInterceptor),
grpc.UnaryInterceptor(auther.unaryIntercaptor),
grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
MinTime: c.Duration("keepalive-min-time"),
}),
)
ss := new(droneserver.DroneServer)
ss.Queue = droneserver.Config.Services.Queue
ss.Logger = droneserver.Config.Services.Logs
ss.Pubsub = droneserver.Config.Services.Pubsub
ss.Remote = remote_
ss.Store = store_
ss.Host = droneserver.Config.Server.Host
proto.RegisterDroneServer(s, ss)
droneServer := droneserver.NewDroneServer(remote_, droneserver.Config.Services.Queue, droneserver.Config.Services.Logs, droneserver.Config.Services.Pubsub, store_, droneserver.Config.Server.Host)
proto.RegisterDroneServer(grpcServer, droneServer)
err = s.Serve(lis)
err = grpcServer.Serve(lis)
if err != nil {
logrus.Error(err)
return err
@ -651,11 +645,6 @@ func server(c *cli.Context) error {
return g.Wait()
}
// HACK please excuse the message during this period of heavy refactoring.
// We are currently transitioning from storing services (ie database, queue)
// in the gin.Context to storing them in a struct. We are also moving away
// from gin to gorilla. We will temporarily use global during our refactoring
// which will be removing in the final implementation.
func setupEvilGlobals(c *cli.Context, v store.Store, r remote.Remote) {
// storage

View file

@ -219,7 +219,7 @@ func setupMetrics(g *errgroup.Group, store_ store.Store) {
})
builds := promauto.NewGauge(prometheus.GaugeOpts{
Namespace: "drone",
Name: "build_count",
Name: "build_total_count",
Help: "Total number of builds.",
})
users := promauto.NewGauge(prometheus.GaugeOpts{

View file

@ -30,7 +30,7 @@ func TestLogging(t *testing.T) {
logger.Tail(ctx, testPath, func(entry ...*Entry) { wg.Done() })
}()
<-time.After(time.Millisecond)
<-time.After(500 * time.Millisecond)
wg.Add(4)
go func() {
@ -45,7 +45,7 @@ func TestLogging(t *testing.T) {
logger.Tail(ctx, testPath, func(entry ...*Entry) { wg.Done() })
}()
<-time.After(time.Millisecond)
<-time.After(500 * time.Millisecond)
wg.Wait()
cancel()

View file

@ -222,3 +222,10 @@ func (m *Metadata) EnvironDrone() map[string]string {
}
var pullRegexp = regexp.MustCompile("\\d+")
func (m *Metadata) SetPlatform(platform string) {
if platform == "" {
platform = "linux/amd64"
}
m.Sys.Arch = platform
}

View file

@ -97,7 +97,7 @@ func (c *Compiler) Compile(conf *yaml.Config) *backend.Config {
}
// add default clone step
if c.local == false && len(conf.Clone.Containers) == 0 {
if c.local == false && len(conf.Clone.Containers) == 0 && !conf.SkipClone {
container := &yaml.Container{
Name: "clone",
Image: "plugins/git:latest",
@ -118,7 +118,7 @@ func (c *Compiler) Compile(conf *yaml.Config) *backend.Config {
stage.Steps = append(stage.Steps, step)
config.Stages = append(config.Stages, stage)
} else if c.local == false {
} else if c.local == false && !conf.SkipClone {
for i, container := range conf.Clone.Containers {
if !container.Constraints.Match(c.metadata) {
continue

View file

@ -22,6 +22,9 @@ type (
Networks Networks
Volumes Volumes
Labels libcompose.SliceorMap
DependsOn []string `yaml:"depends_on,omitempty"`
RunsOn []string `yaml:"runs_on,omitempty"`
SkipClone bool `yaml:"skip_clone"`
}
// Workspace defines a pipeline workspace.

View file

@ -7,7 +7,7 @@ import (
"github.com/franela/goblin"
)
func xTestParse(t *testing.T) {
func TestParse(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("Parser", func() {
@ -35,9 +35,14 @@ func xTestParse(t *testing.T) {
g.Assert(out.Pipeline.Containers[1].Commands).Equal(yaml.Stringorslice{"go build"})
g.Assert(out.Pipeline.Containers[2].Name).Equal("notify")
g.Assert(out.Pipeline.Containers[2].Image).Equal("slack")
g.Assert(out.Pipeline.Containers[2].NetworkMode).Equal("container:name")
// g.Assert(out.Pipeline.Containers[2].NetworkMode).Equal("container:name")
g.Assert(out.Labels["com.example.team"]).Equal("frontend")
g.Assert(out.Labels["com.example.type"]).Equal("build")
g.Assert(out.DependsOn[0]).Equal("lint")
g.Assert(out.DependsOn[1]).Equal("test")
g.Assert(out.RunsOn[0]).Equal("success")
g.Assert(out.RunsOn[1]).Equal("failure")
g.Assert(out.SkipClone).Equal(false)
})
// Check to make sure variable expansion works in yaml.MapSlice
// g.It("Should unmarshal variables", func() {
@ -94,6 +99,12 @@ volumes:
labels:
com.example.type: "build"
com.example.team: "frontend"
depends_on:
- lint
- test
runs_on:
- success
- failure
`
var sampleVarYaml = `

View file

@ -40,9 +40,8 @@ func Parse(data []byte) ([]Axis, error) {
return nil, err
}
// if not a matrix build return an array with just the single axis.
if len(matrix) == 0 {
return nil, nil
return []Axis{}, nil
}
return calc(matrix), nil

View file

@ -25,10 +25,10 @@ func TestMatrix(t *testing.T) {
g.Assert(len(set)).Equal(24)
})
g.It("Should return nil if no matrix", func() {
g.It("Should return empty array if no matrix", func() {
axis, err := ParseString("")
g.Assert(err == nil).IsTrue()
g.Assert(axis == nil).IsTrue()
g.Assert(len(axis) == 0).IsTrue()
})
g.It("Should return included axis", func() {

View file

@ -30,7 +30,7 @@ func TestPubsub(t *testing.T) {
broker.Subscribe(ctx, testTopic, func(message Message) { wg.Done() })
}()
<-time.After(time.Millisecond)
<-time.After(500 * time.Millisecond)
if _, ok := broker.(*publisher).topics[testTopic]; !ok {
t.Errorf("Expect topic registered with publisher")
@ -86,7 +86,7 @@ func TestSubscriptionClosed(t *testing.T) {
wg.Done()
}()
<-time.After(time.Millisecond)
<-time.After(500 * time.Millisecond)
if _, ok := broker.(*publisher).topics[testTopic]; !ok {
t.Errorf("Expect topic registered with publisher")

View file

@ -7,6 +7,8 @@ import (
"runtime"
"sync"
"time"
"github.com/Sirupsen/logrus"
)
type entry struct {
@ -29,6 +31,7 @@ type fifo struct {
running map[string]*entry
pending *list.List
extension time.Duration
paused bool
}
// New returns a new fifo queue.
@ -38,6 +41,7 @@ func New() Queue {
running: map[string]*entry{},
pending: list.New(),
extension: time.Minute * 10,
paused: false,
}
}
@ -50,6 +54,17 @@ func (q *fifo) Push(c context.Context, task *Task) error {
return nil
}
// Push pushes an item to the tail of this queue.
func (q *fifo) PushAtOnce(c context.Context, tasks []*Task) error {
q.Lock()
for _, task := range tasks {
q.pending.PushBack(task)
}
q.Unlock()
go q.process()
return nil
}
// Poll retrieves and removes the head of this queue.
func (q *fifo) Poll(c context.Context, f Filter) (*Task, error) {
q.Lock()
@ -82,11 +97,14 @@ func (q *fifo) Done(c context.Context, id string) error {
// Error signals that the item is done executing with error.
func (q *fifo) Error(c context.Context, id string, err error) error {
q.Lock()
state, ok := q.running[id]
taskEntry, ok := q.running[id]
if ok {
state.error = err
close(state.done)
q.updateDepStatusInQueue(id, err == nil)
taskEntry.error = err
close(taskEntry.done)
delete(q.running, id)
} else {
q.removeFromPending(id)
}
q.Unlock()
return nil
@ -151,14 +169,32 @@ func (q *fifo) Info(c context.Context) InfoT {
for _, entry := range q.running {
stats.Running = append(stats.Running, entry.item)
}
stats.Paused = q.paused
q.Unlock()
return stats
}
func (q *fifo) Pause() {
q.Lock()
q.paused = true
q.Unlock()
}
func (q *fifo) Resume() {
q.Lock()
q.paused = false
q.Unlock()
go q.process()
}
// helper function that loops through the queue and attempts to
// match the item to a single subscriber.
func (q *fifo) process() {
if q.paused {
return
}
defer func() {
// the risk of panic is low. This code can probably be removed
// once the code has been used in real world installs without issue.
@ -173,8 +209,44 @@ func (q *fifo) process() {
q.Lock()
defer q.Unlock()
// TODO(bradrydzewski) move this to a helper function
// push items to the front of the queue if the item expires.
q.resubmitExpiredBuilds()
for pending, worker := q.assignToWorker(); pending != nil && worker != nil; pending, worker = q.assignToWorker() {
task := pending.Value.(*Task)
delete(q.workers, worker)
q.pending.Remove(pending)
q.running[task.ID] = &entry{
item: task,
done: make(chan bool),
deadline: time.Now().Add(q.extension),
}
worker.channel <- task
}
}
func (q *fifo) assignToWorker() (*list.Element, *worker) {
var next *list.Element
for e := q.pending.Front(); e != nil; e = next {
next = e.Next()
task := e.Value.(*Task)
logrus.Debugf("queue: trying to assign task: %v with deps %v", task.ID, task.Dependencies)
if q.depsInQueue(task) {
logrus.Debugf("queue: skipping due to unmet dependencies %v", task.ID)
continue
}
for w := range q.workers {
if w.filter(task) {
logrus.Debugf("queue: assigned task: %v with deps %v", task.ID, task.Dependencies)
return e, w
}
}
}
return nil, nil
}
func (q *fifo) resubmitExpiredBuilds() {
for id, state := range q.running {
if time.Now().After(state.deadline) {
q.pending.PushFront(state.item)
@ -182,26 +254,61 @@ func (q *fifo) process() {
close(state.done)
}
}
}
func (q *fifo) depsInQueue(task *Task) bool {
var next *list.Element
loop:
for e := q.pending.Front(); e != nil; e = next {
next = e.Next()
item := e.Value.(*Task)
for w := range q.workers {
if w.filter(item) {
delete(q.workers, w)
q.pending.Remove(e)
possibleDep, ok := e.Value.(*Task)
logrus.Debugf("queue: pending right now: %v", possibleDep.ID)
for _, dep := range task.Dependencies {
if ok && possibleDep.ID == dep {
return true
}
}
}
for possibleDepID := range q.running {
logrus.Debugf("queue: running right now: %v", possibleDepID)
for _, dep := range task.Dependencies {
if possibleDepID == dep {
return true
}
}
}
return false
}
q.running[item.ID] = &entry{
item: item,
done: make(chan bool),
deadline: time.Now().Add(q.extension),
}
w.channel <- item
break loop
func (q *fifo) updateDepStatusInQueue(taskID string, success bool) {
var next *list.Element
for e := q.pending.Front(); e != nil; e = next {
next = e.Next()
pending, ok := e.Value.(*Task)
for _, dep := range pending.Dependencies {
if ok && taskID == dep {
pending.DepStatus[dep] = success
}
}
}
for _, running := range q.running {
for _, dep := range running.item.Dependencies {
if taskID == dep {
running.item.DepStatus[dep] = success
}
}
}
}
func (q *fifo) removeFromPending(taskID string) {
logrus.Debugf("queue: trying to remove %s", taskID)
var next *list.Element
for e := q.pending.Front(); e != nil; e = next {
next = e.Next()
task := e.Value.(*Task)
if task.ID == taskID {
logrus.Debugf("queue: %s is removed from pending", taskID)
q.pending.Remove(e)
return
}
}
}

View file

@ -2,6 +2,7 @@ package queue
import (
"context"
"fmt"
"sync"
"testing"
"time"
@ -117,3 +118,234 @@ func TestFifoEvict(t *testing.T) {
t.Errorf("expect not found error when evicting item not in queue, got %s", err)
}
}
func TestFifoDependencies(t *testing.T) {
task1 := &Task{
ID: "1",
}
task2 := &Task{
ID: "2",
Dependencies: []string{"1"},
DepStatus: make(map[string]bool),
}
q := New().(*fifo)
q.Push(noContext, task2)
q.Push(noContext, task1)
got, _ := q.Poll(noContext, func(*Task) bool { return true })
if got != task1 {
t.Errorf("expect task1 returned from queue as task2 depends on it")
return
}
q.Done(noContext, got.ID)
got, _ = q.Poll(noContext, func(*Task) bool { return true })
if got != task2 {
t.Errorf("expect task2 returned from queue")
return
}
}
func TestFifoErrors(t *testing.T) {
task1 := &Task{
ID: "1",
}
task2 := &Task{
ID: "2",
Dependencies: []string{"1"},
DepStatus: make(map[string]bool),
}
task3 := &Task{
ID: "3",
Dependencies: []string{"1"},
DepStatus: make(map[string]bool),
RunOn: []string{"success", "failure"},
}
q := New().(*fifo)
q.Push(noContext, task2)
q.Push(noContext, task3)
q.Push(noContext, task1)
got, _ := q.Poll(noContext, func(*Task) bool { return true })
if got != task1 {
t.Errorf("expect task1 returned from queue as task2 depends on it")
return
}
q.Error(noContext, got.ID, fmt.Errorf("exitcode 1, there was an error"))
got, _ = q.Poll(noContext, func(*Task) bool { return true })
if got != task2 {
t.Errorf("expect task2 returned from queue")
return
}
if got.ShouldRun() {
t.Errorf("expect task2 should not run, since task1 failed")
return
}
got, _ = q.Poll(noContext, func(*Task) bool { return true })
if got != task3 {
t.Errorf("expect task3 returned from queue")
return
}
if !got.ShouldRun() {
t.Errorf("expect task3 should run, task1 failed, but task3 runs on failure too")
return
}
}
func TestFifoCancel(t *testing.T) {
task1 := &Task{
ID: "1",
}
task2 := &Task{
ID: "2",
Dependencies: []string{"1"},
DepStatus: make(map[string]bool),
}
task3 := &Task{
ID: "3",
Dependencies: []string{"1"},
DepStatus: make(map[string]bool),
RunOn: []string{"success", "failure"},
}
q := New().(*fifo)
q.Push(noContext, task2)
q.Push(noContext, task3)
q.Push(noContext, task1)
_, _ = q.Poll(noContext, func(*Task) bool { return true })
q.Error(noContext, task1.ID, fmt.Errorf("cancelled"))
q.Error(noContext, task2.ID, fmt.Errorf("cancelled"))
q.Error(noContext, task3.ID, fmt.Errorf("cancelled"))
info := q.Info(noContext)
if len(info.Pending) != 0 {
t.Errorf("All pipelines should be cancelled")
return
}
}
func TestFifoPause(t *testing.T) {
task1 := &Task{
ID: "1",
}
q := New().(*fifo)
var wg sync.WaitGroup
wg.Add(1)
go func() {
_, _ = q.Poll(noContext, func(*Task) bool { return true })
wg.Done()
}()
q.Pause()
t0 := time.Now()
q.Push(noContext, task1)
time.Sleep(20 * time.Millisecond)
q.Resume()
wg.Wait()
t1 := time.Now()
if t1.Sub(t0) < 20 * time.Millisecond {
t.Errorf("Should have waited til resume")
}
q.Pause()
q.Push(noContext, task1)
q.Resume()
_, _ = q.Poll(noContext, func(*Task) bool { return true })
}
func TestFifoPauseResume(t *testing.T) {
task1 := &Task{
ID: "1",
}
q := New().(*fifo)
q.Pause()
q.Push(noContext, task1)
q.Resume()
_, _ = q.Poll(noContext, func(*Task) bool { return true })
}
func TestShouldRun(t *testing.T) {
task := &Task{
ID: "2",
Dependencies: []string{"1"},
DepStatus: map[string]bool{
"1": true,
},
RunOn: []string{"failure"},
}
if task.ShouldRun() {
t.Errorf("expect task to not run, it runs on failure only")
return
}
task = &Task{
ID: "2",
Dependencies: []string{"1"},
DepStatus: map[string]bool{
"1": true,
},
RunOn: []string{"failure", "success"},
}
if !task.ShouldRun() {
t.Errorf("expect task to run")
return
}
task = &Task{
ID: "2",
Dependencies: []string{"1"},
DepStatus: map[string]bool{
"1": false,
},
}
if task.ShouldRun() {
t.Errorf("expect task to not run")
return
}
task = &Task{
ID: "2",
Dependencies: []string{"1"},
DepStatus: map[string]bool{
"1": true,
},
RunOn: []string{"success"},
}
if !task.ShouldRun() {
t.Errorf("expect task to run")
return
}
task = &Task{
ID: "2",
Dependencies: []string{"1"},
DepStatus: map[string]bool{
"1": false,
},
RunOn: []string{"failure"},
}
if !task.ShouldRun() {
t.Errorf("expect task to run")
return
}
}

View file

@ -23,6 +23,64 @@ type Task struct {
// Labels represents the key-value pairs the entry is lebeled with.
Labels map[string]string `json:"labels,omitempty"`
// Task IDs this task depend
Dependencies []string
// If dep finished sucessfully
DepStatus map[string]bool
// RunOn failure or success
RunOn []string
}
// ShouldRun tells if a task should be run or skipped, based on dependencies
func (t *Task) ShouldRun() bool {
if runsOnFailure(t.RunOn) && runsOnSuccess(t.RunOn) {
return true
}
if !runsOnFailure(t.RunOn) && runsOnSuccess(t.RunOn) {
for _, success := range t.DepStatus {
if !success {
return false
}
}
return true
}
if runsOnFailure(t.RunOn) && !runsOnSuccess(t.RunOn) {
for _, success := range t.DepStatus {
if success {
return false
}
}
return true
}
return false
}
func runsOnFailure(runsOn []string) bool {
for _, status := range runsOn {
if status == "failure" {
return true
}
}
return false
}
func runsOnSuccess(runsOn []string) bool {
if len(runsOn) == 0 {
return true
}
for _, status := range runsOn {
if status == "success" {
return true
}
}
return false
}
// InfoT provides runtime information.
@ -35,6 +93,7 @@ type InfoT struct {
Running int `json:"running_count"`
Complete int `json:"completed_count"`
} `json:"stats"`
Paused bool
}
// Filter filters tasks in the queue. If the Filter returns false,
@ -44,9 +103,12 @@ type Filter func(*Task) bool
// Queue defines a task queue for scheduling tasks among
// a pool of workers.
type Queue interface {
// Push pushes an task to the tail of this queue.
// Push pushes a task to the tail of this queue.
Push(c context.Context, task *Task) error
// Push pushes a task to the tail of this queue.
PushAtOnce(c context.Context, tasks []*Task) error
// Poll retrieves and removes a task head of this queue.
Poll(c context.Context, f Filter) (*Task, error)
@ -67,47 +129,10 @@ type Queue interface {
// Info returns internal queue information.
Info(c context.Context) InfoT
}
// // global instance of the queue.
// var global = New()
//
// // Set sets the global queue.
// func Set(queue Queue) {
// global = queue
// }
//
// // Push pushes an task to the tail of the global queue.
// func Push(c context.Context, task *Task) error {
// return global.Push(c, task)
// }
//
// // Poll retrieves and removes a task head of the global queue.
// func Poll(c context.Context, f Filter) (*Task, error) {
// return global.Poll(c, f)
// }
//
// // Extend extends the deadline for a task.
// func Extend(c context.Context, id string) error {
// return global.Extend(c, id)
// }
//
// // Done signals the task is complete.
// func Done(c context.Context, id string) error {
// return global.Done(c, id)
// }
//
// // Error signals the task is complete with errors.
// func Error(c context.Context, id string, err error) {
// global.Error(c, id, err)
// }
//
// // Wait waits until the task is complete.
// func Wait(c context.Context, id string) error {
// return global.Wait(c, id)
// }
//
// // Info returns internal queue information.
// func Info(c context.Context) InfoT {
// return global.Info(c)
// }
// Stops the queue from handing out new work items in Poll
Pause()
// Starts the queue again, Poll returns new items
Resume()
}

View file

@ -28,4 +28,4 @@ services:
environment:
- DRONE_SERVER=drone-server:9000
- DRONE_SECRET=${DRONE_SECRET}
- DRONE_MAX_PROCS=1
- DRONE_MAX_PROCS=2

1
docs/LICENSE Normal file
View file

@ -0,0 +1 @@
Files in this folder are licensed under Creative Commons Attribution-ShareAlike 4.0 International Public License. It is a derivative work of the https://github.com/drone/docs git repository.

View file

@ -0,0 +1,70 @@
Drone comes with built-in support for Bitbucket Cloud. To enable Bitbucket Cloud you should configure the Drone container using the following environment variables:
```diff
version: '2'
services:
drone-server:
image: drone/drone:{{% version %}}
ports:
- 80:8000
- 9000
volumes:
- /var/lib/drone:/var/lib/drone/
restart: always
environment:
- DRONE_OPEN=true
- DRONE_HOST=${DRONE_HOST}
+ - DRONE_BITBUCKET=true
+ - DRONE_BITBUCKET_CLIENT=95c0282573633eb25e82
+ - DRONE_BITBUCKET_SECRET=30f5064039e6b359e075
- DRONE_SECRET=${DRONE_SECRET}
drone-agent:
image: drone/agent:{{% version %}}
restart: always
depends_on:
- drone-server
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
- DRONE_SERVER=drone-server:9000
- DRONE_SECRET=${DRONE_SECRET}
```
# Configuration
This is a full list of configuration options. Please note that many of these options use default configuration values that should work for the majority of installations.
DRONE_BITBUCKET=true
: Set to true to enable the Bitbucket driver.
DRONE_BITBUCKET_CLIENT
: Bitbucket oauth2 client id
DRONE_BITBUCKET_SECRET
: Bitbucket oauth2 client secret
# Registration
You must register your application with Bitbucket in order to generate a client and secret. Navigate to your account settings and choose OAuth from the menu, and click Add Consumer.
Please use the Authorization callback URL:
```nohighlight
http://drone.mycompany.com/authorize
```
Please also be sure to check the following permissions:
```nohighlight
Account:Email
Account:Read
Team Membership:Read
Repositories:Read
Webhooks:Read and Write
```
# Missing Features
Merge requests are not currently supported. We are interested in patches to include this functionality. If you are interested in contributing to Drone and submitting a patch please [contact us](https://discourse.drone.io).

View file

@ -0,0 +1,133 @@
Drone comes with experimental support for Bitbucket Server, formerly known as Atlassian Stash. To enable Bitbucket Server you should configure the Drone container using the following environment variables:
```diff
version: '2'
services:
drone-server:
image: drone/drone:{{% version %}}
ports:
- 80:8000
- 9000
volumes:
- /var/lib/drone:/var/lib/drone/
restart: always
environment:
- DRONE_OPEN=true
- DRONE_HOST=${DRONE_HOST}
+ - DRONE_STASH=true
+ - DRONE_STASH_GIT_USERNAME=foo
+ - DRONE_STASH_GIT_PASSWORD=bar
+ - DRONE_STASH_CONSUMER_KEY=95c0282573633eb25e82
+ - DRONE_STASH_CONSUMER_RSA=/etc/bitbucket/key.pem
+ - DRONE_STASH_URL=http://stash.mycompany.com
- DRONE_SECRET=${DRONE_SECRET}
volumes:
+ - /path/to/key.pem:/path/to/key.pem
drone-agent:
image: drone/agent:{{% version %}}
restart: always
depends_on:
- drone-server
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
- DRONE_SERVER=drone-server:9000
- DRONE_SECRET=${DRONE_SECRET}
```
# Private Key File
The OAuth process in Bitbucket server requires a private and a public RSA certificate. This is how you create the private RSA certificate.
```nohighlight
openssl genrsa -out /etc/bitbucket/key.pem 1024
```
This stores the private RSA certificate in `key.pem`. The next command generates the public RSA certificate and stores it in `key.pub`.
```nohighlight
openssl rsa -in /etc/bitbucket/key.pem -pubout >> /etc/bitbucket/key.pub
```
Please note that the private key file can be mounted into your Drone conatiner at runtime or as an environment variable
Private key file mounted into your Drone container at runtime as a volume.
```diff
version: '2'
services:
drone-server:
image: drone/drone:{{% version %}}
environment:
- DRONE_OPEN=true
- DRONE_HOST=${DRONE_HOST}
- DRONE_STASH=true
- DRONE_STASH_GIT_USERNAME=foo
- DRONE_STASH_GIT_PASSWORD=bar
- DRONE_STASH_CONSUMER_KEY=95c0282573633eb25e82
+ - DRONE_STASH_CONSUMER_RSA=/etc/bitbucket/key.pem
- DRONE_STASH_URL=http://stash.mycompany.com
- DRONE_SECRET=${DRONE_SECRET}
+ volumes:
+ - /etc/bitbucket/key.pem:/etc/bitbucket/key.pem
```
Private key as environment variable
```diff
version: '2'
services:
drone-server:
image: drone/drone:{{% version %}}
environment:
- DRONE_OPEN=true
- DRONE_HOST=${DRONE_HOST}
- DRONE_STASH=true
- DRONE_STASH_GIT_USERNAME=foo
- DRONE_STASH_GIT_PASSWORD=bar
- DRONE_STASH_CONSUMER_KEY=95c0282573633eb25e82
+ - DRONE_STASH_CONSUMER_RSA_STRING=contentOfPemKeyAsString
- DRONE_STASH_URL=http://stash.mycompany.com
- DRONE_SECRET=${DRONE_SECRET}
```
# Service Account
Drone uses `git+https` to clone repositories, however, Bitbucket Server does not currently support cloning repositories with oauth token. To work around this limitation, you must create a service account and provide the username and password to Drone. This service account will be used to authenticate and clone private repositories.
# Registration
You must register your application with Bitbucket Server in order to generate a consumer key. Navigate to your account settings and choose Applications from the menu, and click Register new application. Now copy & paste the text value from `/etc/bitbucket/key.pub` into the `Public Key` in the incoming link part of the application registration.
Please use http://drone.mycompany.com/authorize as the Authorization callback URL.
# Configuration
This is a full list of configuration options. Please note that many of these options use default configuration values that should work for the majority of installations.
DRONE_STASH=true
: Set to true to enable the Bitbucket Server (Stash) driver.
DRONE_STASH_URL
: Bitbucket Server address.
DRONE_STASH_CONSUMER_KEY
: Bitbucket Server oauth1 consumer key
DRONE_STASH_CONSUMER_RSA
: Bitbucket Server oauth1 private key file
DRONE_STASH_CONSUMER_RSA_STRING
: Bibucket Server oauth1 private key as a string
DRONE_STASH_GIT_USERNAME
: Machine account username used to clone repositories.
DRONE_STASH_GIT_PASSWORD
: Machine account password used to clone repositories.

View file

@ -0,0 +1,48 @@
This guide provides instructions for using alternate storage engines. Please note this is optional. The default storage engine is an embedded SQLite database which requires zero installation or configuration.
# Configure MySQL
The below example demonstrates mysql database configuration. See the official driver [documentation](https://github.com/go-sql-driver/mysql#dsn-data-source-name) for configuration options and examples.
```diff
version: '2'
services:
drone-server:
image: drone/drone:{{% version %}}
environment:
+ DRONE_DATABASE_DRIVER: mysql
+ DRONE_DATABASE_DATASOURCE: root:password@tcp(1.2.3.4:3306)/drone?parseTime=true
```
# Configure Postgres
The below example demonstrates postgres database configuration. See the official driver [documentation](https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING) for configuration options and examples.
```diff
version: '2'
services:
drone-server:
image: drone/drone:{{% version %}}
environment:
+ DRONE_DATABASE_DRIVER: postgres
+ DRONE_DATABASE_DATASOURCE: postgres://root:password@1.2.3.4:5432/postgres?sslmode=disable
```
# Database Creation
Drone does not create your database automatically. If you are using the mysql or postgres driver you will need to manually create your database using `CREATE DATABASE`
# Database Migration
Drone automatically handles database migration, including the initial creation of tables and indexes. New versions of Drone will automatically upgrade the database unless otherwise specified in the release notes.
# Database Backups
Drone does not perform database backups. This should be handled by separate third party tools provided by your database vendor of choice.
# Database Archiving
Drone does not perform data archival; it considered out-of-scope for the project. Drone is rather conservative with the amount of data it stores, however, you should expect the database logs to grow the size of your database considerably.

View file

@ -0,0 +1,78 @@
Drone comes with built-in support for GitHub and GitHub Enterprise. To enable GitHub you should configure the Drone container using the following environment variables:
```diff
version: '2'
services:
drone-server:
image: drone/drone:{{% version %}}
ports:
- 80:8000
- 9000
volumes:
- /var/lib/drone:/var/lib/drone/
restart: always
environment:
- DRONE_OPEN=true
- DRONE_HOST=${DRONE_HOST}
+ - DRONE_GITHUB=true
+ - DRONE_GITHUB_CLIENT=${DRONE_GITHUB_CLIENT}
+ - DRONE_GITHUB_SECRET=${DRONE_GITHUB_SECRET}
- DRONE_SECRET=${DRONE_SECRET}
drone-agent:
image: drone/agent:{{% version %}}
restart: always
depends_on:
- drone-server
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
- DRONE_SERVER=drone-server:9000
- DRONE_SECRET=${DRONE_SECRET}
```
# Registration
Register your application with GitHub to create your client id and secret. It is very import the authorization callback URL matches your http(s) scheme and hostname exactly with `<scheme>://<host>/authorize` as the path.
Please use this screenshot for reference:
![github oauth setup](github_oauth.png)
# Configuration
This is a full list of configuration options. Please note that many of these options use default configuration values that should work for the majority of installations.
DRONE_GITHUB=true
: Set to true to enable the GitHub driver.
DRONE_GITHUB_URL=`https://github.com`
: GitHub server address.
DRONE_GITHUB_CLIENT
: Github oauth2 client id.
DRONE_GITHUB_SECRET
: Github oauth2 client secret.
DRONE_GITHUB_SCOPE=repo,repo:status,user:email,read:org
: Comma-separated Github oauth scope.
DRONE_GITHUB_GIT_USERNAME
: Optional. Use a single machine account username to clone all repositories.
DRONE_GITHUB_GIT_PASSWORD
: Optional. Use a single machine account password to clone all repositories.
DRONE_GITHUB_PRIVATE_MODE=false
: Set to true if Github is running in private mode.
DRONE_GITHUB_MERGE_REF=true
: Set to true to use the `refs/pulls/%d/merge` vs `refs/pulls/%d/head`
DRONE_GITHUB_CONTEXT=continuous-integration/drone
: Customize the GitHub status message context
DRONE_GITHUB_SKIP_VERIFY=false
: Set to true to disable SSL verification.

Binary file not shown.

After

Width:  |  Height:  |  Size: 154 KiB

View file

@ -0,0 +1,66 @@
Drone comes with built-in support for the GitLab version 8.2 and higher. To enable GitLab you should configure the Drone container using the following environment variables:
```diff
version: '2'
services:
drone-server:
image: drone/drone:{{% version %}}
ports:
- 80:8000
- 9000
volumes:
- /var/lib/drone:/var/lib/drone/
restart: always
environment:
+ - DRONE_GITLAB=true
+ - DRONE_GITLAB_CLIENT=95c0282573633eb25e82
+ - DRONE_GITLAB_SECRET=30f5064039e6b359e075
+ - DRONE_GITLAB_URL=http://gitlab.mycompany.com
- DRONE_SECRET=${DRONE_SECRET}
drone-agent:
image: drone/agent:{{% version %}}
restart: always
depends_on:
- drone-server
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
- DRONE_SERVER=drone-server:9000
- DRONE_SECRET=${DRONE_SECRET}
```
# Configuration
This is a full list of configuration options. Please note that many of these options use default configuration values that should work for the majority of installations.
DRONE_GITLAB=true
: Set to true to enable the GitLab driver.
DRONE_GITLAB_URL=`https://gitlab.com`
: GitLab Server address.
DRONE_GITLAB_CLIENT
: GitLab oauth2 client id.
DRONE_GITLAB_SECRET
: GitLab oauth2 client secret.
DRONE_GITLAB_GIT_USERNAME
: Optional. Use a single machine account username to clone all repositories.
DRONE_GITLAB_GIT_PASSWORD
: Optional. Use a single machine account password to clone all repositories.
DRONE_GITLAB_SKIP_VERIFY=false
: Set to true to disable SSL verification.
DRONE_GITLAB_PRIVATE_MODE=false
: Set to true if GitLab is running in private mode.
# Registration
You must register your application with GitLab in order to generate a Client and Secret. Navigate to your account settings and choose Applications from the menu, and click New Application.
Please use `http://drone.mycompany.com/authorize` as the Authorization callback URL. Grant `api` scope to the application.

View file

@ -0,0 +1,38 @@
Drone supports automated ssl configuration and updates using let's encrypt. You can enable let's encrypt by making the following modifications to your server configuration:
```diff
services:
drone-server:
image: drone/drone:{{% version %}}
ports:
+ - 80:80
+ - 443:443
- 9000:9000
volumes:
- /var/lib/drone:/var/lib/drone/
restart: always
environment:
- DRONE_OPEN=true
- DRONE_HOST=${DRONE_HOST}
- DRONE_GITHUB=true
- DRONE_GITHUB_CLIENT=${DRONE_GITHUB_CLIENT}
- DRONE_GITHUB_SECRET=${DRONE_GITHUB_SECRET}
- DRONE_SECRET=${DRONE_SECRET}
+ - DRONE_LETS_ENCRYPT=true
```
Note that Drone uses the hostname from the `DRONE_HOST` environment variable when requesting certificates. For example, if `DRONE_HOST=https://foo.com` the certificate is requested for `foo.com`.
>Once enabled you can visit your website at both the http and the https address
# Certificate Cache
Drone writes the certificates to the below directory:
```
/var/lib/drone/golang-autocert
```
# Certificate Updates
Drone uses the official Go acme library which will handle certificate upgrades. There should be no addition configuration or management required.

View file

@ -0,0 +1,162 @@
Drone is compatible with Prometheus and exposes a `/metrics` endpoint. Please note that access to the metrics endpoint is restricted and requires an authorization token with administrative privileges.
```nohighlight
global:
scrape_interval: 60s
scrape_configs:
- job_name: 'drone'
bearer_token: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...
static_configs:
- targets: ['drone.domain.com']
```
# Authorization
An administrator will need to generate a user api token and configure in the prometheus configuration file as a bearer token. Please see the following example:
```diff
global:
scrape_interval: 60s
scrape_configs:
- job_name: 'drone'
+ bearer_token: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...
static_configs:
- targets: ['drone.domain.com']
```
# Metric Reference
List of prometheus metrics specific to Drone:
```
# HELP drone_build_count Total number of builds.
# TYPE drone_build_count gauge
drone_build_count 7275
# HELP drone_pending_jobs Total number of pending build processes.
# TYPE drone_pending_jobs gauge
drone_pending_jobs 0
# HELP drone_repo_count Total number of registered repositories.
# TYPE drone_repo_count gauge
drone_repo_count 133
# HELP drone_running_jobs Total number of running build processes.
# TYPE drone_running_jobs gauge
drone_running_jobs 0
# HELP drone_user_count Total number of active users.
# TYPE drone_user_count gauge
drone_user_count 15
```
List of prometheus metrics for server resource usage:
```
# HELP go_gc_duration_seconds A summary of the GC invocation durations.
# TYPE go_gc_duration_seconds summary
go_gc_duration_seconds{quantile="0"} 0.000189189
go_gc_duration_seconds{quantile="0.25"} 0.000391444
go_gc_duration_seconds{quantile="0.5"} 0.001895967
go_gc_duration_seconds{quantile="0.75"} 0.003075854
go_gc_duration_seconds{quantile="1"} 0.004224575
go_gc_duration_seconds_sum 0.019922696
go_gc_duration_seconds_count 10
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
go_goroutines 24
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# TYPE go_memstats_alloc_bytes gauge
go_memstats_alloc_bytes 2.556344e+06
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
# TYPE go_memstats_alloc_bytes_total counter
go_memstats_alloc_bytes_total 2.0479656e+07
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
# TYPE go_memstats_buck_hash_sys_bytes gauge
go_memstats_buck_hash_sys_bytes 1.45144e+06
# HELP go_memstats_frees_total Total number of frees.
# TYPE go_memstats_frees_total counter
go_memstats_frees_total 200332
# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.
# TYPE go_memstats_gc_cpu_fraction gauge
go_memstats_gc_cpu_fraction 8.821705133777562e-05
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
# TYPE go_memstats_gc_sys_bytes gauge
go_memstats_gc_sys_bytes 557056
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
# TYPE go_memstats_heap_alloc_bytes gauge
go_memstats_heap_alloc_bytes 2.556344e+06
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
# TYPE go_memstats_heap_idle_bytes gauge
go_memstats_heap_idle_bytes 3.842048e+06
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
# TYPE go_memstats_heap_inuse_bytes gauge
go_memstats_heap_inuse_bytes 4.972544e+06
# HELP go_memstats_heap_objects Number of allocated objects.
# TYPE go_memstats_heap_objects gauge
go_memstats_heap_objects 19986
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.
# TYPE go_memstats_heap_released_bytes gauge
go_memstats_heap_released_bytes 0
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
# TYPE go_memstats_heap_sys_bytes gauge
go_memstats_heap_sys_bytes 8.814592e+06
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
# TYPE go_memstats_last_gc_time_seconds gauge
go_memstats_last_gc_time_seconds 1.4941783810383117e+09
# HELP go_memstats_lookups_total Total number of pointer lookups.
# TYPE go_memstats_lookups_total counter
go_memstats_lookups_total 325
# HELP go_memstats_mallocs_total Total number of mallocs.
# TYPE go_memstats_mallocs_total counter
go_memstats_mallocs_total 220318
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
# TYPE go_memstats_mcache_inuse_bytes gauge
go_memstats_mcache_inuse_bytes 2400
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
# TYPE go_memstats_mcache_sys_bytes gauge
go_memstats_mcache_sys_bytes 16384
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
# TYPE go_memstats_mspan_inuse_bytes gauge
go_memstats_mspan_inuse_bytes 81016
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
# TYPE go_memstats_mspan_sys_bytes gauge
go_memstats_mspan_sys_bytes 98304
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
# TYPE go_memstats_next_gc_bytes gauge
go_memstats_next_gc_bytes 4.819216e+06
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
# TYPE go_memstats_other_sys_bytes gauge
go_memstats_other_sys_bytes 672584
# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
# TYPE go_memstats_stack_inuse_bytes gauge
go_memstats_stack_inuse_bytes 622592
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
# TYPE go_memstats_stack_sys_bytes gauge
go_memstats_stack_sys_bytes 622592
# HELP go_memstats_sys_bytes Number of bytes obtained from system.
# TYPE go_memstats_sys_bytes gauge
go_memstats_sys_bytes 1.2232952e+07
# HELP go_threads Number of OS threads created
# TYPE go_threads gauge
go_threads 9
# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
# TYPE process_cpu_seconds_total counter
process_cpu_seconds_total 0.9
# HELP process_max_fds Maximum number of open file descriptors.
# TYPE process_max_fds gauge
process_max_fds 524288
# HELP process_open_fds Number of open file descriptors.
# TYPE process_open_fds gauge
process_open_fds 17
# HELP process_resident_memory_bytes Resident memory size in bytes.
# TYPE process_resident_memory_bytes gauge
process_resident_memory_bytes 2.5296896e+07
# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
# TYPE process_start_time_seconds gauge
process_start_time_seconds 1.494177893e+09
# HELP process_virtual_memory_bytes Virtual memory size in bytes.
# TYPE process_virtual_memory_bytes gauge
process_virtual_memory_bytes 4.23243776e+08
```

View file

@ -0,0 +1,148 @@
**Table of Contents**
- [Apache](#apache)
- [Nginx](#nginx)
- [Caddy](#caddy)
- [Ngrok](#ngrok)
# Apache
This guide provides a brief overview for installing Drone server behind the Apache2 webserver. This is an example configuration:
```nohighlight
ProxyPreserveHost On
RequestHeader set X-Forwarded-Proto "https"
ProxyPass / http://127.0.0.1:8000/
ProxyPassReverse / http://127.0.0.1:8000/
```
You must have the below Apache modules installed.
```nohighlight
a2enmod proxy
a2enmod proxy_http
```
You must configure Apache to set `X-Forwarded-Proto` when using https.
```diff
ProxyPreserveHost On
+RequestHeader set X-Forwarded-Proto "https"
ProxyPass / http://127.0.0.1:8000/
ProxyPassReverse / http://127.0.0.1:8000/
```
# Nginx
This guide provides a basic overview for installing Drone server behind the nginx webserver. For more advanced configuration options please consult the official nginx [documentation](https://www.nginx.com/resources/admin-guide/).
Example configuration:
```nginx
server {
listen 80;
server_name drone.example.com;
location / {
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_pass http://127.0.0.1:8000;
proxy_redirect off;
proxy_http_version 1.1;
proxy_buffering off;
chunked_transfer_encoding off;
}
}
```
You must configure the proxy to set `X-Forwarded` proxy headers:
```diff
server {
listen 80;
server_name drone.example.com;
location / {
+ proxy_set_header X-Forwarded-For $remote_addr;
+ proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass http://127.0.0.1:8000;
proxy_redirect off;
proxy_http_version 1.1;
proxy_buffering off;
chunked_transfer_encoding off;
}
}
```
# Caddy
This guide provides a brief overview for installing Drone server behind the [Caddy webserver](https://caddyserver.com/). This is an example caddyfile proxy configuration:
```nohighlight
drone.mycompany.com {
gzip {
not /stream/
}
proxy / localhost:8000 {
websocket
transparent
}
}
```
You must disable gzip compression for streamed data otherwise the live updates won't be instant:
```diff
drone.mycompany.com {
+ gzip {
+ not /stream/
+ }
proxy / localhost:8000 {
websocket
transparent
}
}
```
You must configure the proxy to enable websocket upgrades:
```diff
drone.mycompany.com {
gzip {
not /stream/
}
proxy / localhost:8000 {
+ websocket
transparent
}
}
```
You must configure the proxy to include `X-Forwarded` headers using the `transparent` directive:
```diff
drone.mycompany.com {
gzip {
not /stream/
}
proxy / localhost:8000 {
websocket
+ transparent
}
}
```
# Ngrok
After installing [ngrok](https://ngrok.com/), open a new console and run:
```
ngrok http 80
```
Set `DRONE_HOST` (for example in `docker-compose.yml`) to the ngrok url (usually xxx.ngrok.io) and start the server.

View file

@ -0,0 +1,77 @@
Drone supports ssl configuration by mounting certificates into your container.
```diff
services:
drone-server:
image: drone/drone:{{% version %}}
ports:
+ - 80:80
+ - 443:443
- 9000:9000
volumes:
- /var/lib/drone:/var/lib/drone/
+ - /etc/certs/drone.foo.com/server.crt:/etc/certs/drone.foo.com/server.crt
+ - /etc/certs/drone.foo.com/server.key:/etc/certs/drone.foo.com/server.key
restart: always
environment:
+ - DRONE_SERVER_CERT=/etc/certs/drone.foo.com/server.crt
+ - DRONE_SERVER_KEY=/etc/certs/drone.foo.com/server.key
```
Update your configuration to expose the following ports:
```diff
services:
drone-server:
image: drone/drone:{{% version %}}
ports:
+ - 80:80
+ - 443:443
- 9000:9000
```
Update your configuration to mount your certificate and key:
```diff
services:
drone-server:
image: drone/drone:{{% version %}}
ports:
- 80:80
- 443:443
- 9000:9000
volumes:
- /var/lib/drone:/var/lib/drone/
+ - /etc/certs/drone.foo.com/server.crt:/etc/certs/drone.foo.com/server.crt
+ - /etc/certs/drone.foo.com/server.key:/etc/certs/drone.foo.com/server.key
```
Update your configuration to provide the paths of your certificate and key:
```diff
services:
drone-server:
image: drone/drone:{{% version %}}
ports:
- 80:80
- 443:443
- 9000:9000
volumes:
- /var/lib/drone:/var/lib/drone/
- /etc/certs/drone.foo.com/server.crt:/etc/certs/drone.foo.com/server.crt
- /etc/certs/drone.foo.com/server.key:/etc/certs/drone.foo.com/server.key
restart: always
environment:
+ - DRONE_SERVER_CERT=/etc/certs/drone.foo.com/server.crt
+ - DRONE_SERVER_KEY=/etc/certs/drone.foo.com/server.key
```
# Certificate Chain
The most common problem encountered is providing a certificate file without the intermediate chain.
> LoadX509KeyPair reads and parses a public/private key pair from a pair of files. The files must contain PEM encoded data. The certificate file may contain intermediate certificates following the leaf certificate to form a certificate chain.
# Certificate Errors
SSL support is provided using the [ListenAndServeTLS](https://golang.org/pkg/net/http/#ListenAndServeTLS) function from the Go standard library. If you receive certificate errors or warnings please examine your configuration more closely. Please do not create issues claiming SSL is broken.

BIN
docs/drone.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 136 KiB

18
docs/motivation.md Normal file
View file

@ -0,0 +1,18 @@
# Motivation
I was using Drone for two years with great satisfaction. The container architecture, the speedy backend and UI, the simple plugin system made it a flexible and simple platform. Kudos for the author, Brad to make it such a joy to use.
It wasn't without flaws
- inconsistencies in variables and CLI features
- lack of documentation
- lack of published best practices
- UI/UX issues
- stuck builds
Things that could be circumvented by reading the codebase. Over time however these started to annoy me, also PRs that tried to address these were not merged. Instead the development of Drone headed towards a 1.0 release with features less interesting to me.
1.0 landed and it came with a licence change. Drone has been an open-core project since many prior versions, but the enterprise features were limited to features like autoscaling and secret vaults.
In the 1.0 line however, Postgresql, Mysql and TLS support along with agent based horizontal scaling were also moved under the enterprise license. Limiting the open source version to single node, hobbyist deployments.
These feature reductions and my long time UX annoyance and general dissatisfaction of the CI space lead to this fork.

49
docs/usage/bash_plugin.md Normal file
View file

@ -0,0 +1,49 @@
This provides a brief tutorial for creating a Drone webhook plugin, using simple shell scripting, to make an http requests during the build pipeline. The below example demonstrates how we might configure a webhook plugin in the Yaml file:
```yaml
pipeline:
webhook:
image: foo/webhook
url: http://foo.com
method: post
body: |
hello world
```
Create a simple shell script that invokes curl using the Yaml configuration parameters, which are passed to the script as environment variables in uppercase and prefixed with `PLUGIN_`.
```bash
#!/bin/sh
curl \
-X ${PLUGIN_METHOD} \
-d ${PLUGIN_BODY} \
${PLUGIN_URL}
```
Create a Dockerfile that adds your shell script to the image, and configures the image to execute your shell script as the main entrypoint.
```dockerfile
FROM alpine
ADD script.sh /bin/
RUN chmod +x /bin/script.sh
RUN apk -Uuv add curl ca-certificates
ENTRYPOINT /bin/script.sh
```
Build and publish your plugin to the Docker registry. Once published your plugin can be shared with the broader Drone community.
```nohighlight
docker build -t foo/webhook .
docker push foo/webhook
```
Execute your plugin locally from the command line to verify it is working:
```nohighlight
docker run --rm \
-e PLUGIN_METHOD=post \
-e PLUGIN_URL=http://foo.com \
-e PLUGIN_BODY="hello world" \
foo/webhook
```

1400
docs/usage/pipeline.md Normal file

File diff suppressed because it is too large Load diff

BIN
docs/usage/repo_list.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 128 KiB

View file

@ -2,7 +2,7 @@
```Go
import (
"github.com/drone/drone-go/drone"
"github.com/laszlocph/drone-oss-08/drone-go/drone"
"golang.org/x/oauth2"
)

View file

@ -16,10 +16,11 @@ package model
// ConfigStore persists pipeline configuration to storage.
type ConfigStore interface {
ConfigLoad(int64) (*Config, error)
ConfigFind(*Repo, string) (*Config, error)
ConfigsForBuild(buildID int64) ([]*Config, error)
ConfigFindIdentical(repoID int64, sha string) (*Config, error)
ConfigFindApproved(*Config) (bool, error)
ConfigCreate(*Config) error
BuildConfigCreate(*BuildConfig) error
}
// Config represents a pipeline configuration.
@ -28,4 +29,11 @@ type Config struct {
RepoID int64 `json:"-" meddler:"config_repo_id"`
Data string `json:"data" meddler:"config_data"`
Hash string `json:"hash" meddler:"config_hash"`
Name string `json:"name" meddler:"config_name"`
}
// BuildConfig is the n:n relation between Build and Config
type BuildConfig struct {
ConfigID int64 `json:"-" meddler:"config_id"`
BuildID int64 `json:"-" meddler:"build_id"`
}

View file

@ -14,6 +14,8 @@
package model
import "fmt"
// ProcStore persists process information to storage.
type ProcStore interface {
ProcLoad(int64) (*Proc, error)
@ -57,18 +59,24 @@ func (p *Proc) Failing() bool {
// Tree creates a process tree from a flat process list.
func Tree(procs []*Proc) []*Proc {
var (
nodes []*Proc
parent *Proc
)
var nodes []*Proc
for _, proc := range procs {
if proc.PPID == 0 {
nodes = append(nodes, proc)
parent = proc
continue
} else {
parent, _ := findNode(nodes, proc.PPID)
parent.Children = append(parent.Children, proc)
}
}
return nodes
}
func findNode(nodes []*Proc, pid int) (*Proc, error) {
for _, node := range nodes {
if node.PID == pid {
return node, nil
}
}
return nil, fmt.Errorf("Corrupt proc structure")
}

View file

@ -23,9 +23,11 @@ import (
// Task defines scheduled pipeline Task.
type Task struct {
ID string `meddler:"task_id"`
Data []byte `meddler:"task_data"`
Labels map[string]string `meddler:"task_labels,json"`
ID string `meddler:"task_id"`
Data []byte `meddler:"task_data"`
Labels map[string]string `meddler:"task_labels,json"`
Dependencies []string `meddler:"task_dependencies,json"`
RunOn []string `meddler:"task_run_on,json"`
}
// TaskStore defines storage for scheduled Tasks.
@ -39,13 +41,18 @@ type TaskStore interface {
// ensures the task Queue can be restored when the system starts.
func WithTaskStore(q queue.Queue, s TaskStore) queue.Queue {
tasks, _ := s.TaskList()
toEnqueue := []*queue.Task{}
for _, task := range tasks {
q.Push(context.Background(), &queue.Task{
ID: task.ID,
Data: task.Data,
Labels: task.Labels,
toEnqueue = append(toEnqueue, &queue.Task{
ID: task.ID,
Data: task.Data,
Labels: task.Labels,
Dependencies: task.Dependencies,
RunOn: task.RunOn,
DepStatus: make(map[string]bool),
})
}
q.PushAtOnce(context.Background(), toEnqueue)
return &persistentQueue{q, s}
}
@ -54,12 +61,14 @@ type persistentQueue struct {
store TaskStore
}
// Push pushes an task to the tail of this queue.
// Push pushes a task to the tail of this queue.
func (q *persistentQueue) Push(c context.Context, task *queue.Task) error {
q.store.TaskInsert(&Task{
ID: task.ID,
Data: task.Data,
Labels: task.Labels,
ID: task.ID,
Data: task.Data,
Labels: task.Labels,
Dependencies: task.Dependencies,
RunOn: task.RunOn,
})
err := q.Queue.Push(c, task)
if err != nil {
@ -68,6 +77,26 @@ func (q *persistentQueue) Push(c context.Context, task *queue.Task) error {
return err
}
// Push pushes multiple tasks to the tail of this queue.
func (q *persistentQueue) PushAtOnce(c context.Context, tasks []*queue.Task) error {
for _, task := range tasks {
q.store.TaskInsert(&Task{
ID: task.ID,
Data: task.Data,
Labels: task.Labels,
Dependencies: task.Dependencies,
RunOn: task.RunOn,
})
}
err := q.Queue.PushAtOnce(c, tasks)
if err != nil {
for _, task := range tasks {
q.store.TaskDelete(task.ID)
}
}
return err
}
// Poll retrieves and removes a task head of this queue.
func (q *persistentQueue) Poll(c context.Context, f queue.Filter) (*queue.Task, error) {
task, err := q.Queue.Poll(c, f)

View file

@ -55,6 +55,7 @@ type Repo struct {
Config string `json:"config_file" meddler:"repo_config_path"`
Hash string `json:"-" meddler:"repo_hash"`
Perm *Perm `json:"-" meddler:"-"`
Fallback bool `json:"fallback" meddler:"repo_fallback"`
}
func (r *Repo) ResetVisibility() {
@ -105,4 +106,5 @@ type RepoPatch struct {
AllowDeploy *bool `json:"allow_deploy,omitempty"`
AllowTag *bool `json:"allow_tag,omitempty"`
BuildCounter *int `json:"build_counter,omitempty"`
Fallback *bool `json:"fallback,omitempty"`
}

View file

@ -202,20 +202,19 @@ func (c *config) Perm(u *model.User, owner, name string) (*model.Perm, error) {
// File fetches the file from the Bitbucket repository and returns its contents.
func (c *config) File(u *model.User, r *model.Repo, b *model.Build, f string) ([]byte, error) {
return c.FileRef(u, r, b.Commit, f)
}
// FileRef fetches the file from the Bitbucket repository and returns its contents.
func (c *config) FileRef(u *model.User, r *model.Repo, ref, f string) ([]byte, error) {
config, err := c.newClient(u).FindSource(r.Owner, r.Name, ref, f)
config, err := c.newClient(u).FindSource(r.Owner, r.Name, b.Commit, f)
if err != nil {
return nil, err
}
return []byte(*config), err
}
func (c *config) Dir(u *model.User, r *model.Repo, b *model.Build, f string) ([]*remote.FileMeta, error) {
return nil, fmt.Errorf("Not implemented")
}
// Status creates a build status for the Bitbucket commit.
func (c *config) Status(u *model.User, r *model.Repo, b *model.Build, link string) error {
func (c *config) Status(u *model.User, r *model.Repo, b *model.Build, link string, proc *model.Proc) error {
status := internal.BuildStatus{
State: convertStatus(b.Status),
Desc: convertDesc(b.Status),

View file

@ -283,7 +283,7 @@ func Test_bitbucket(t *testing.T) {
})
g.It("Should update the status", func() {
err := c.Status(fakeUser, fakeRepo, fakeBuild, "http://127.0.0.1")
err := c.Status(fakeUser, fakeRepo, fakeBuild, "http://127.0.0.1", nil)
g.Assert(err == nil).IsTrue()
})

View file

@ -179,14 +179,12 @@ func (c *Config) File(u *model.User, r *model.Repo, b *model.Build, f string) ([
return client.FindFileForRepo(r.Owner, r.Name, f, b.Ref)
}
func (c *Config) FileRef(u *model.User, r *model.Repo, ref, f string) ([]byte, error) {
client := internal.NewClientWithToken(c.URL, c.Consumer, u.Token)
return client.FindFileForRepo(r.Owner, r.Name, f, ref)
func (c *Config) Dir(u *model.User, r *model.Repo, b *model.Build, f string) ([]*remote.FileMeta, error) {
return nil, fmt.Errorf("Not implemented")
}
// Status is not supported by the bitbucketserver driver.
func (c *Config) Status(u *model.User, r *model.Repo, b *model.Build, link string) error {
func (c *Config) Status(u *model.User, r *model.Repo, b *model.Build, link string, proc *model.Proc) error {
status := internal.BuildStatus{
State: convertStatus(b.Status),
Desc: convertDesc(b.Status),

View file

@ -238,18 +238,12 @@ func (c *Coding) File(u *model.User, r *model.Repo, b *model.Build, f string) ([
return data, nil
}
// FileRef fetches a file from the remote repository for the given ref
// and returns in string format.
func (c *Coding) FileRef(u *model.User, r *model.Repo, ref, f string) ([]byte, error) {
data, err := c.newClient(u).GetFile(r.Owner, r.Name, ref, f)
if err != nil {
return nil, err
}
return data, nil
func (c *Coding) Dir(u *model.User, r *model.Repo, b *model.Build, f string) ([]*remote.FileMeta, error) {
return nil, fmt.Errorf("Not implemented")
}
// Status sends the commit status to the remote system.
func (c *Coding) Status(u *model.User, r *model.Repo, b *model.Build, link string) error {
func (c *Coding) Status(u *model.User, r *model.Repo, b *model.Build, link string, proc *model.Proc) error {
// EMPTY: not implemented in Coding OAuth API
return nil
}

View file

@ -184,11 +184,6 @@ func Test_coding(t *testing.T) {
g.Assert(err == nil).IsTrue()
g.Assert(string(data)).Equal("pipeline:\n test:\n image: golang:1.6\n commands:\n - go test\n")
})
g.It("Should return file for specified ref", func() {
data, err := c.FileRef(fakeUser, fakeRepo, "master", ".drone.yml")
g.Assert(err == nil).IsTrue()
g.Assert(string(data)).Equal("pipeline:\n test:\n image: golang:1.6\n commands:\n - go test\n")
})
})
g.Describe("When requesting a netrc config", func() {

View file

@ -103,13 +103,12 @@ func (c *client) File(u *model.User, r *model.Repo, b *model.Build, f string) ([
return nil, nil
}
// File is not supported by the Gerrit driver.
func (c *client) FileRef(u *model.User, r *model.Repo, ref, f string) ([]byte, error) {
return nil, nil
func (c *client) Dir(u *model.User, r *model.Repo, b *model.Build, f string) ([]*remote.FileMeta, error) {
return nil, fmt.Errorf("Not implemented")
}
// Status is not supported by the Gogs driver.
func (c *client) Status(u *model.User, r *model.Repo, b *model.Build, link string) error {
func (c *client) Status(u *model.User, r *model.Repo, b *model.Build, link string, proc *model.Proc) error {
return nil
}

View file

@ -249,13 +249,12 @@ func (c *client) File(u *model.User, r *model.Repo, b *model.Build, f string) ([
return cfg, err
}
// FileRef fetches the file from the Gitea repository and returns its contents.
func (c *client) FileRef(u *model.User, r *model.Repo, ref, f string) ([]byte, error) {
return c.newClientToken(u.Token).GetFile(r.Owner, r.Name, ref, f)
func (c *client) Dir(u *model.User, r *model.Repo, b *model.Build, f string) ([]*remote.FileMeta, error) {
return nil, fmt.Errorf("Not implemented")
}
// Status is supported by the Gitea driver.
func (c *client) Status(u *model.User, r *model.Repo, b *model.Build, link string) error {
func (c *client) Status(u *model.User, r *model.Repo, b *model.Build, link string, proc *model.Proc) error {
client := c.newClientToken(u.Token)
status := getStatus(b.Status)

View file

@ -18,10 +18,10 @@ import (
"net/http/httptest"
"testing"
"github.com/laszlocph/drone-oss-08/model"
"github.com/laszlocph/drone-oss-08/remote/gitea/fixtures"
"github.com/franela/goblin"
"github.com/gin-gonic/gin"
"github.com/laszlocph/drone-oss-08/model"
"github.com/laszlocph/drone-oss-08/remote/gitea/fixtures"
)
func Test_gitea(t *testing.T) {
@ -149,7 +149,7 @@ func Test_gitea(t *testing.T) {
})
g.It("Should return nil from send build status", func() {
err := c.Status(fakeUser, fakeRepo, fakeBuild, "http://gitea.io")
err := c.Status(fakeUser, fakeRepo, fakeBuild, "http://gitea.io", nil)
g.Assert(err == nil).IsTrue()
})

View file

@ -51,7 +51,7 @@ const (
// GitHub commit status.
func convertStatus(status string) string {
switch status {
case model.StatusPending, model.StatusRunning, model.StatusBlocked:
case model.StatusPending, model.StatusRunning, model.StatusBlocked, model.StatusSkipped:
return statusPending
case model.StatusFailure, model.StatusDeclined:
return statusFailure

View file

@ -23,6 +23,7 @@ import (
"regexp"
"strconv"
"strings"
"sync"
"github.com/laszlocph/drone-oss-08/model"
"github.com/laszlocph/drone-oss-08/remote"
@ -104,7 +105,7 @@ type client struct {
// Login authenticates the session and returns the remote user details.
func (c *client) Login(res http.ResponseWriter, req *http.Request) (*model.User, error) {
config := c.newConfig(httputil.GetURL(req))
config := c.newConfig(req)
// get the OAuth errors
if err := req.FormValue("error"); err != "" {
@ -225,22 +226,77 @@ func (c *client) Perm(u *model.User, owner, name string) (*model.Perm, error) {
// File fetches the file from the GitHub repository and returns its contents.
func (c *client) File(u *model.User, r *model.Repo, b *model.Build, f string) ([]byte, error) {
return c.FileRef(u, r, b.Commit, f)
}
// FileRef fetches the file from the GitHub repository and returns its contents.
func (c *client) FileRef(u *model.User, r *model.Repo, ref, f string) ([]byte, error) {
client := c.newClientToken(u.Token)
opts := new(github.RepositoryContentGetOptions)
opts.Ref = ref
opts.Ref = b.Commit
data, _, _, err := client.Repositories.GetContents(r.Owner, r.Name, f, opts)
if err != nil {
return nil, err
}
if data == nil {
return nil, fmt.Errorf("%s is a folder not a file use Dir(..)", f)
}
return data.Decode()
}
func (c *client) Dir(u *model.User, r *model.Repo, b *model.Build, f string) ([]*remote.FileMeta, error) {
client := c.newClientToken(u.Token)
opts := new(github.RepositoryContentGetOptions)
opts.Ref = b.Commit
_, data, _, err := client.Repositories.GetContents(r.Owner, r.Name, f, opts)
if err != nil {
return nil, err
}
fc := make(chan *remote.FileMeta)
errc := make(chan error)
wg := &sync.WaitGroup{}
wg.Add(len(data))
for _, file := range data {
go func(path string) {
content, err := c.File(u, r, b, path)
if err != nil {
errc <- err
} else {
fc <- &remote.FileMeta{
Name: path,
Data: content,
}
}
}(f + "/" + *file.Name)
}
var files []*remote.FileMeta
var errors []error
go func() {
for {
select {
case err, open := <-errc:
if open {
errors = append(errors, err)
wg.Done()
}
case fileMeta, open := <-fc:
if open {
files = append(files, fileMeta)
wg.Done()
}
}
}
}()
wg.Wait()
close(fc)
close(errc)
return files, nil
}
// Netrc returns a netrc file capable of authenticating GitHub requests and
// cloning GitHub repositories. The netrc will use the global machine account
// when configured.
@ -292,7 +348,16 @@ func (c *client) newContext() context.Context {
}
// helper function to return the GitHub oauth2 config
func (c *client) newConfig(redirect string) *oauth2.Config {
func (c *client) newConfig(req *http.Request) *oauth2.Config {
var redirect string
intendedURL := req.URL.Query()["url"]
if len(intendedURL) > 0 {
redirect = fmt.Sprintf("%s/authorize?url=%s", httputil.GetURL(req), intendedURL[0])
} else {
redirect = fmt.Sprintf("%s/authorize", httputil.GetURL(req))
}
return &oauth2.Config{
ClientID: c.Client,
ClientSecret: c.Secret,
@ -301,7 +366,7 @@ func (c *client) newConfig(redirect string) *oauth2.Config {
AuthURL: fmt.Sprintf("%s/login/oauth/authorize", c.URL),
TokenURL: fmt.Sprintf("%s/login/oauth/access_token", c.URL),
},
RedirectURL: fmt.Sprintf("%s/authorize", redirect),
RedirectURL: redirect,
}
}
@ -374,17 +439,17 @@ func matchingHooks(hooks []github.Hook, rawurl string) *github.Hook {
// Status sends the commit status to the remote system.
// An example would be the GitHub pull request status.
func (c *client) Status(u *model.User, r *model.Repo, b *model.Build, link string) error {
func (c *client) Status(u *model.User, r *model.Repo, b *model.Build, link string, proc *model.Proc) error {
client := c.newClientToken(u.Token)
switch b.Event {
case "deployment":
return deploymentStatus(client, r, b, link)
default:
return repoStatus(client, r, b, link, c.Context)
return repoStatus(client, r, b, link, c.Context, proc)
}
}
func repoStatus(client *github.Client, r *model.Repo, b *model.Build, link, ctx string) error {
func repoStatus(client *github.Client, r *model.Repo, b *model.Build, link, ctx string, proc *model.Proc) error {
context := ctx
switch b.Event {
case model.EventPull:
@ -395,10 +460,19 @@ func repoStatus(client *github.Client, r *model.Repo, b *model.Build, link, ctx
}
}
status := github.String(convertStatus(b.Status))
desc := github.String(convertDesc(b.Status))
if proc != nil {
context += "/" + proc.Name
status = github.String(convertStatus(proc.State))
desc = github.String(convertDesc(proc.State))
}
data := github.RepoStatus{
Context: github.String(context),
State: github.String(convertStatus(b.Status)),
Description: github.String(convertDesc(b.Status)),
State: status,
Description: desc,
TargetURL: github.String(link),
}
_, _, err := client.Repositories.CreateStatus(r.Owner, r.Name, b.Commit, &data)

View file

@ -325,28 +325,27 @@ func (g *Gitlab) Perm(u *model.User, owner, name string) (*model.Perm, error) {
// File fetches a file from the remote repository and returns in string format.
func (g *Gitlab) File(user *model.User, repo *model.Repo, build *model.Build, f string) ([]byte, error) {
return g.FileRef(user, repo, build.Commit, f)
}
// FileRef fetches the file from the GitHub repository and returns its contents.
func (g *Gitlab) FileRef(u *model.User, r *model.Repo, ref, f string) ([]byte, error) {
var client = NewClient(g.URL, u.Token, g.SkipVerify)
id, err := GetProjectId(g, client, r.Owner, r.Name)
var client = NewClient(g.URL, user.Token, g.SkipVerify)
id, err := GetProjectId(g, client, repo.Owner, repo.Name)
if err != nil {
return nil, err
}
out, err := client.RepoRawFileRef(id, ref, f)
out, err := client.RepoRawFileRef(id, build.Commit, f)
if err != nil {
return nil, err
}
return out, err
}
func (c *Gitlab) Dir(u *model.User, r *model.Repo, b *model.Build, f string) ([]*remote.FileMeta, error) {
return nil, fmt.Errorf("Not implemented")
}
// NOTE Currently gitlab doesn't support status for commits and events,
// also if we want get MR status in gitlab we need implement a special plugin for gitlab,
// gitlab uses API to fetch build status on client side. But for now we skip this.
func (g *Gitlab) Status(u *model.User, repo *model.Repo, b *model.Build, link string) error {
func (g *Gitlab) Status(u *model.User, repo *model.Repo, b *model.Build, link string, proc *model.Proc) error {
client := NewClient(g.URL, u.Token, g.SkipVerify)
status := getStatus(b.Status)

View file

@ -338,25 +338,14 @@ func (g *Gitlab) File(user *model.User, repo *model.Repo, build *model.Build, f
return out, err
}
// FileRef fetches the file from the GitHub repository and returns its contents.
func (g *Gitlab) FileRef(u *model.User, r *model.Repo, ref, f string) ([]byte, error) {
var client = NewClient(g.URL, u.Token, g.SkipVerify)
id, err := GetProjectId(g, client, r.Owner, r.Name)
if err != nil {
return nil, err
}
out, err := client.RepoRawFileRef(id, ref, f)
if err != nil {
return nil, err
}
return out, err
func (c *Gitlab) Dir(u *model.User, r *model.Repo, b *model.Build, f string) ([]*remote.FileMeta, error) {
return nil, fmt.Errorf("Not implemented")
}
// NOTE Currently gitlab doesn't support status for commits and events,
// also if we want get MR status in gitlab we need implement a special plugin for gitlab,
// gitlab uses API to fetch build status on client side. But for now we skip this.
func (g *Gitlab) Status(u *model.User, repo *model.Repo, b *model.Build, link string) error {
func (g *Gitlab) Status(u *model.User, repo *model.Repo, b *model.Build, link string, proc *model.Proc) error {
client := NewClient(g.URL, u.Token, g.SkipVerify)
status := getStatus(b.Status)

View file

@ -22,9 +22,9 @@ import (
"net/url"
"strings"
"github.com/gogits/go-gogs-client"
"github.com/laszlocph/drone-oss-08/model"
"github.com/laszlocph/drone-oss-08/remote"
"github.com/gogits/go-gogs-client"
)
// Opts defines configuration options.
@ -202,13 +202,12 @@ func (c *client) File(u *model.User, r *model.Repo, b *model.Build, f string) ([
return cfg, err
}
// FileRef fetches the file from the Gogs repository and returns its contents.
func (c *client) FileRef(u *model.User, r *model.Repo, ref, f string) ([]byte, error) {
return c.newClientToken(u.Token).GetFile(r.Owner, r.Name, ref, f)
func (c *client) Dir(u *model.User, r *model.Repo, b *model.Build, f string) ([]*remote.FileMeta, error) {
return nil, fmt.Errorf("Not implemented")
}
// Status is not supported by the Gogs driver.
func (c *client) Status(u *model.User, r *model.Repo, b *model.Build, link string) error {
func (c *client) Status(u *model.User, r *model.Repo, b *model.Build, link string, proc *model.Proc) error {
return nil
}

View file

@ -163,7 +163,7 @@ func Test_gogs(t *testing.T) {
g.It("Should return no-op for usupporeted features", func() {
_, err1 := c.Auth("octocat", "4vyW6b49Z")
err2 := c.Status(nil, nil, nil, "")
err2 := c.Status(nil, nil, nil, "", nil)
err3 := c.Deactivate(nil, nil, "")
g.Assert(err1 != nil).IsTrue()
g.Assert(err2 == nil).IsTrue()

View file

@ -1,25 +1,11 @@
// Copyright 2018 Drone.IO Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by mockery v1.0.0. DO NOT EDIT.
package mock
package mocks
import (
"net/http"
"github.com/laszlocph/drone-oss-08/model"
"github.com/stretchr/testify/mock"
)
import http "net/http"
import mock "github.com/stretchr/testify/mock"
import model "github.com/laszlocph/drone-oss-08/model"
import remote "github.com/laszlocph/drone-oss-08/remote"
// Remote is an autogenerated mock type for the Remote type
type Remote struct {
@ -75,6 +61,29 @@ func (_m *Remote) Deactivate(u *model.User, r *model.Repo, link string) error {
return r0
}
// Dir provides a mock function with given fields: u, r, b, f
func (_m *Remote) Dir(u *model.User, r *model.Repo, b *model.Build, f string) ([]*remote.FileMeta, error) {
ret := _m.Called(u, r, b, f)
var r0 []*remote.FileMeta
if rf, ok := ret.Get(0).(func(*model.User, *model.Repo, *model.Build, string) []*remote.FileMeta); ok {
r0 = rf(u, r, b, f)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*remote.FileMeta)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*model.User, *model.Repo, *model.Build, string) error); ok {
r1 = rf(u, r, b, f)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// File provides a mock function with given fields: u, r, b, f
func (_m *Remote) File(u *model.User, r *model.Repo, b *model.Build, f string) ([]byte, error) {
ret := _m.Called(u, r, b, f)
@ -98,29 +107,6 @@ func (_m *Remote) File(u *model.User, r *model.Repo, b *model.Build, f string) (
return r0, r1
}
// FileRef provides a mock function with given fields: u, r, ref, f
func (_m *Remote) FileRef(u *model.User, r *model.Repo, ref string, f string) ([]byte, error) {
ret := _m.Called(u, r, ref, f)
var r0 []byte
if rf, ok := ret.Get(0).(func(*model.User, *model.Repo, string, string) []byte); ok {
r0 = rf(u, r, ref, f)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]byte)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*model.User, *model.Repo, string, string) error); ok {
r1 = rf(u, r, ref, f)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Hook provides a mock function with given fields: r
func (_m *Remote) Hook(r *http.Request) (*model.Repo, *model.Build, error) {
ret := _m.Called(r)
@ -246,15 +232,15 @@ func (_m *Remote) Repo(u *model.User, owner string, repo string) (*model.Repo, e
}
// Repos provides a mock function with given fields: u
func (_m *Remote) Repos(u *model.User) ([]*model.RepoLite, error) {
func (_m *Remote) Repos(u *model.User) ([]*model.Repo, error) {
ret := _m.Called(u)
var r0 []*model.RepoLite
if rf, ok := ret.Get(0).(func(*model.User) []*model.RepoLite); ok {
var r0 []*model.Repo
if rf, ok := ret.Get(0).(func(*model.User) []*model.Repo); ok {
r0 = rf(u)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*model.RepoLite)
r0 = ret.Get(0).([]*model.Repo)
}
}
@ -282,29 +268,6 @@ func (_m *Remote) Status(u *model.User, r *model.Repo, b *model.Build, link stri
return r0
}
// TeamPerm provides a mock function with given fields: u, org
func (_m *Remote) TeamPerm(u *model.User, org string) (*model.Perm, error) {
ret := _m.Called(u, org)
var r0 *model.Perm
if rf, ok := ret.Get(0).(func(*model.User, string) *model.Perm); ok {
r0 = rf(u, org)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*model.Perm)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*model.User, string) error); ok {
r1 = rf(u, org)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Teams provides a mock function with given fields: u
func (_m *Remote) Teams(u *model.User) ([]*model.Team, error) {
ret := _m.Called(u)

View file

@ -18,7 +18,6 @@ package remote
import (
"net/http"
"time"
"github.com/laszlocph/drone-oss-08/model"
@ -51,13 +50,12 @@ type Remote interface {
// format.
File(u *model.User, r *model.Repo, b *model.Build, f string) ([]byte, error)
// FileRef fetches a file from the remote repository for the given ref
// and returns in string format.
FileRef(u *model.User, r *model.Repo, ref, f string) ([]byte, error)
// Dir fetches a folder from the remote repository
Dir(u *model.User, r *model.Repo, b *model.Build, f string) ([]*FileMeta, error)
// Status sends the commit status to the remote system.
// An example would be the GitHub pull request status.
Status(u *model.User, r *model.Repo, b *model.Build, link string) error
Status(u *model.User, r *model.Repo, b *model.Build, link string, proc *model.Proc) error
// Netrc returns a .netrc file that can be used to clone
// private repositories from a remote system.
@ -75,6 +73,18 @@ type Remote interface {
Hook(r *http.Request) (*model.Repo, *model.Build, error)
}
// FileMeta represents a file in version control
type FileMeta struct {
Name string
Data []byte
}
type ByName []*FileMeta
func (a ByName) Len() int { return len(a) }
func (a ByName) Less(i, j int) bool { return a[i].Name < a[j].Name }
func (a ByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// Refresher refreshes an oauth token and expiration for the given user. It
// returns true if the token was refreshed, false if the token was not refreshed,
// and error if it failed to refersh.
@ -115,22 +125,10 @@ func Perm(c context.Context, u *model.User, owner, repo string) (*model.Perm, er
return FromContext(c).Perm(u, owner, repo)
}
// File fetches a file from the remote repository and returns in string format.
func File(c context.Context, u *model.User, r *model.Repo, b *model.Build, f string) (out []byte, err error) {
for i := 0; i < 12; i++ {
out, err = FromContext(c).File(u, r, b, f)
if err == nil {
return
}
time.Sleep(5 * time.Second)
}
return
}
// Status sends the commit status to the remote system.
// An example would be the GitHub pull request status.
func Status(c context.Context, u *model.User, r *model.Repo, b *model.Build, link string) error {
return FromContext(c).Status(u, r, b, link)
func Status(c context.Context, u *model.User, r *model.Repo, b *model.Build, link string, proc *model.Proc) error {
return FromContext(c).Status(u, r, b, link, proc)
}
// Netrc returns a .netrc file that can be used to clone
@ -168,18 +166,3 @@ func Refresh(c context.Context, u *model.User) (bool, error) {
}
return refresher.Refresh(u)
}
// FileBackoff fetches the file using an exponential backoff.
// TODO replace this with a proper backoff
func FileBackoff(remote Remote, u *model.User, r *model.Repo, b *model.Build, f string) (out []byte, err error) {
for i := 0; i < 5; i++ {
select {
case <-time.After(time.Second * time.Duration(i)):
out, err = remote.File(u, r, b, f)
if err == nil {
return
}
}
}
return
}

View file

@ -148,6 +148,22 @@ func Load(mux *httptreemux.ContextMux, middleware ...gin.HandlerFunc) http.Handl
)
}
queue := e.Group("/api/queue")
{
queue.GET("/pause",
session.MustAdmin(),
server.PauseQueue,
)
queue.GET("/resume",
session.MustAdmin(),
server.ResumeQueue,
)
queue.GET("/norunningbuilds",
session.MustAdmin(),
server.BlockTilQueueHasRunningItem,
)
}
auth := e.Group("/authorize")
{
auth.GET("", server.HandleAuth)

View file

@ -156,13 +156,12 @@ func GetProcLogs(c *gin.Context) {
io.Copy(c.Writer, rc)
}
// DeleteBuild cancels a build
func DeleteBuild(c *gin.Context) {
repo := session.Repo(c)
// parse the build number and job sequence number from
// the repquest parameter.
// parse the build number from the request parameter.
num, _ := strconv.Atoi(c.Params.ByName("number"))
seq, _ := strconv.Atoi(c.Params.ByName("job"))
build, err := store.GetBuildNumber(c, repo, num)
if err != nil {
@ -170,27 +169,40 @@ func DeleteBuild(c *gin.Context) {
return
}
proc, err := store.FromContext(c).ProcFind(build, seq)
procs, err := store.FromContext(c).ProcList(build)
if err != nil {
c.AbortWithError(404, err)
return
}
if proc.State != model.StatusRunning {
cancelled := false
for _, proc := range procs {
if proc.PPID != 0 {
continue
}
if proc.State != model.StatusRunning && proc.State != model.StatusPending {
continue
}
proc.State = model.StatusKilled
proc.Stopped = time.Now().Unix()
if proc.Started == 0 {
proc.Started = proc.Stopped
}
proc.ExitCode = 137
// TODO cancel child procs
store.FromContext(c).ProcUpdate(proc)
Config.Services.Queue.Error(context.Background(), fmt.Sprint(proc.ID), queue.ErrCancel)
cancelled = true
}
if !cancelled {
c.String(400, "Cannot cancel a non-running build")
return
}
proc.State = model.StatusKilled
proc.Stopped = time.Now().Unix()
if proc.Started == 0 {
proc.Started = proc.Stopped
}
proc.ExitCode = 137
// TODO cancel child procs
store.FromContext(c).ProcUpdate(proc)
Config.Services.Queue.Error(context.Background(), fmt.Sprint(proc.ID), queue.ErrCancel)
c.String(204, "")
}
@ -268,7 +280,7 @@ func PostApproval(c *gin.Context) {
build.Reviewer = user.Login
// fetch the build file from the database
conf, err := Config.Storage.Config.ConfigLoad(build.ConfigID)
configs, err := Config.Storage.Config.ConfigsForBuild(build.ID)
if err != nil {
logrus.Errorf("failure to get build config for %s. %s", repo.FullName, err)
c.AbortWithError(404, err)
@ -307,13 +319,10 @@ func PostApproval(c *gin.Context) {
}
}
defer func() {
uri := fmt.Sprintf("%s/%s/%d", httputil.GetURL(c.Request), repo.FullName, build.Number)
err = remote_.Status(user, repo, build, uri)
if err != nil {
logrus.Errorf("error setting commit status for %s/%d: %v", repo.FullName, build.Number, err)
}
}()
var yamls []*remote.FileMeta
for _, y := range configs {
yamls = append(yamls, &remote.FileMeta{Data: []byte(y.Data), Name: y.Name})
}
b := procBuilder{
Repo: repo,
@ -323,7 +332,7 @@ func PostApproval(c *gin.Context) {
Secs: secs,
Regs: regs,
Link: httputil.GetURL(c.Request),
Yaml: conf.Data,
Yamls: yamls,
Envs: envs,
}
buildItems, err := b.Build()
@ -336,12 +345,25 @@ func PostApproval(c *gin.Context) {
return
}
setBuildProcs(build, buildItems)
err = store.FromContext(c).ProcCreate(build.Procs)
if err != nil {
logrus.Errorf("error persisting procs %s/%d: %s", repo.FullName, build.Number, err)
}
defer func() {
for _, item := range buildItems {
uri := fmt.Sprintf("%s/%s/%d", httputil.GetURL(c.Request), repo.FullName, build.Number)
if len(buildItems) > 1 {
err = remote_.Status(user, repo, build, uri, item.Proc)
} else {
err = remote_.Status(user, repo, build, uri, nil)
}
if err != nil {
logrus.Errorf("error setting commit status for %s/%d: %v", repo.FullName, build.Number, err)
}
}
}()
publishToTopic(c, build, repo)
queueBuild(build, repo, buildItems)
}
@ -376,7 +398,7 @@ func PostDecline(c *gin.Context) {
}
uri := fmt.Sprintf("%s/%s/%d", httputil.GetURL(c.Request), repo.FullName, build.Number)
err = remote_.Status(user, repo, build, uri)
err = remote_.Status(user, repo, build, uri, nil)
if err != nil {
logrus.Errorf("error setting commit status for %s/%d: %v", repo.FullName, build.Number, err)
}
@ -436,7 +458,7 @@ func PostBuild(c *gin.Context) {
}
// fetch the .drone.yml file from the database
conf, err := Config.Storage.Config.ConfigLoad(build.ConfigID)
configs, err := Config.Storage.Config.ConfigsForBuild(build.ID)
if err != nil {
logrus.Errorf("failure to get build config for %s. %s", repo.FullName, err)
c.AbortWithError(404, err)
@ -474,6 +496,13 @@ func PostBuild(c *gin.Context) {
return
}
err = persistBuildConfigs(configs, build.ID)
if err != nil {
logrus.Errorf("failure to persist build config for %s. %s", repo.FullName, err)
c.AbortWithError(500, err)
return
}
// Read query string parameters into buildParams, exclude reserved params
var buildParams = map[string]string{}
for key, val := range c.Request.URL.Query() {
@ -504,6 +533,11 @@ func PostBuild(c *gin.Context) {
}
}
var yamls []*remote.FileMeta
for _, y := range configs {
yamls = append(yamls, &remote.FileMeta{Data: []byte(y.Data), Name: y.Name})
}
b := procBuilder{
Repo: repo,
Curr: build,
@ -512,7 +546,7 @@ func PostBuild(c *gin.Context) {
Secs: secs,
Regs: regs,
Link: httputil.GetURL(c.Request),
Yaml: conf.Data,
Yamls: yamls,
Envs: buildParams,
}
buildItems, err := b.Build()
@ -525,8 +559,6 @@ func PostBuild(c *gin.Context) {
return
}
setBuildProcs(build, buildItems)
err = store.FromContext(c).ProcCreate(build.Procs)
if err != nil {
logrus.Errorf("cannot restart %s#%d: %s", repo.FullName, build.Number, err)
@ -582,6 +614,20 @@ func DeleteBuildLogs(c *gin.Context) {
c.String(204, "")
}
func persistBuildConfigs(configs []*model.Config, buildID int64) error {
for _, conf := range configs {
buildConfig := &model.BuildConfig{
ConfigID: conf.ID,
BuildID: buildID,
}
err := Config.Storage.Config.BuildConfigCreate(buildConfig)
if err != nil {
return err
}
}
return nil
}
var deleteStr = `[
{
"proc": %q,

53
server/configFetcher.go Normal file
View file

@ -0,0 +1,53 @@
package server
import (
"strings"
"time"
"github.com/laszlocph/drone-oss-08/model"
"github.com/laszlocph/drone-oss-08/remote"
)
type configFetcher struct {
remote_ remote.Remote
user *model.User
repo *model.Repo
build *model.Build
}
func (cf *configFetcher) Fetch() ([]*remote.FileMeta, error) {
for i := 0; i < 5; i++ {
select {
case <-time.After(time.Second * time.Duration(i)):
// either a file
file, fileerr := cf.remote_.File(cf.user, cf.repo, cf.build, cf.repo.Config)
if fileerr == nil {
return []*remote.FileMeta{&remote.FileMeta{
Name: cf.repo.Config,
Data: file,
}}, nil
}
// or a folder
dir, direrr := cf.remote_.Dir(cf.user, cf.repo, cf.build, strings.TrimSuffix(cf.repo.Config, "/"))
if direrr == nil {
return dir, nil
} else if !cf.repo.Fallback {
return nil, direrr
}
// or fallback
file, fileerr = cf.remote_.File(cf.user, cf.repo, cf.build, ".drone.yml")
if fileerr != nil {
return nil, fileerr
}
return []*remote.FileMeta{&remote.FileMeta{
Name: cf.repo.Config,
Data: file,
}}, nil
}
}
return []*remote.FileMeta{}, nil
}

View file

@ -0,0 +1,22 @@
package server
import (
"testing"
"github.com/laszlocph/drone-oss-08/model"
"github.com/laszlocph/drone-oss-08/remote/github"
)
func TestFetchGithub(t *testing.T) {
github, err := github.New(github.Opts{URL: "https://github.com"})
if err != nil {
t.Fatal(err)
}
configFetcher := &configFetcher{
remote_: github,
user: &model.User{Token: "xxx"},
repo: &model.Repo{Owner: "laszlocph", Name: "drone-multipipeline", Config: ".drone"},
build: &model.Build{Commit: "89ab7b2d6bfb347144ac7c557e638ab402848fee"},
}
configFetcher.Fetch()
}

View file

@ -20,6 +20,7 @@ import (
"encoding/json"
"fmt"
"math/rand"
"net/http"
"regexp"
"strconv"
"time"
@ -51,6 +52,26 @@ func GetQueueInfo(c *gin.Context) {
)
}
func PauseQueue(c *gin.Context) {
Config.Services.Queue.Pause()
c.Status(http.StatusOK)
}
func ResumeQueue(c *gin.Context) {
Config.Services.Queue.Resume()
c.Status(http.StatusOK)
}
func BlockTilQueueHasRunningItem(c *gin.Context) {
for {
info := Config.Services.Queue.Info(c)
if info.Stats.Running == 0 {
break
}
}
c.Status(http.StatusOK)
}
func PostHook(c *gin.Context) {
remote_ := remote.FromContext(c)
@ -143,34 +164,21 @@ func PostHook(c *gin.Context) {
}
// fetch the build file from the remote
remoteYamlConfig, err := remote.FileBackoff(remote_, user, repo, build, repo.Config)
configFetcher := &configFetcher{remote_: remote_, user: user, repo: repo, build: build}
remoteYamlConfigs, err := configFetcher.Fetch()
if err != nil {
logrus.Errorf("error: %s: cannot find %s in %s: %s", repo.FullName, repo.Config, build.Ref, err)
c.AbortWithError(404, err)
return
}
conf, err := findOrPersistPipelineConfig(repo, remoteYamlConfig)
if err != nil {
logrus.Errorf("failure to find or persist build config for %s. %s", repo.FullName, err)
c.AbortWithError(500, err)
if branchFiltered(build, remoteYamlConfigs) {
c.String(200, "Branch does not match restrictions defined in yaml")
return
}
build.ConfigID = conf.ID
// verify that pipeline can be built at all
parsedPipelineConfig, err := yaml.ParseString(conf.Data)
if err == nil {
if !parsedPipelineConfig.Branches.Match(build.Branch) && build.Event != model.EventTag && build.Event != model.EventDeploy {
c.String(200, "Branch does not match restrictions defined in yaml")
return
}
}
if repo.IsGated {
allowed, _ := Config.Services.Senders.SenderAllowed(user, repo, build, conf)
if !allowed {
build.Status = model.StatusBlocked
}
if repo.IsGated { // This feature is not clear to me. Reenabling once better understood
build.Status = model.StatusBlocked
}
// update some build fields
@ -185,6 +193,16 @@ func PostHook(c *gin.Context) {
return
}
// persist the build config for historical correctness, restarts, etc
for _, remoteYamlConfig := range remoteYamlConfigs {
_, err := findOrPersistPipelineConfig(build, remoteYamlConfig)
if err != nil {
logrus.Errorf("failure to find or persist build config for %s. %s", repo.FullName, err)
c.AbortWithError(500, err)
return
}
}
c.JSON(200, build)
if build.Status == model.StatusBlocked {
@ -218,14 +236,6 @@ func PostHook(c *gin.Context) {
// get the previous build so that we can send status change notifications
last, _ := store.GetBuildLastBefore(c, repo, build.Branch, build.ID)
defer func() {
uri := fmt.Sprintf("%s/%s/%d", httputil.GetURL(c.Request), repo.FullName, build.Number)
err = remote_.Status(user, repo, build, uri)
if err != nil {
logrus.Errorf("error setting commit status for %s/%d: %v", repo.FullName, build.Number, err)
}
}()
b := procBuilder{
Repo: repo,
Curr: build,
@ -235,7 +245,7 @@ func PostHook(c *gin.Context) {
Regs: regs,
Envs: envs,
Link: httputil.GetURL(c.Request),
Yaml: conf.Data,
Yamls: remoteYamlConfigs,
}
buildItems, err := b.Build()
if err != nil {
@ -247,66 +257,75 @@ func PostHook(c *gin.Context) {
return
}
setBuildProcs(build, buildItems)
err = store.FromContext(c).ProcCreate(build.Procs)
if err != nil {
logrus.Errorf("error persisting procs %s/%d: %s", repo.FullName, build.Number, err)
}
defer func() {
for _, item := range buildItems {
uri := fmt.Sprintf("%s/%s/%d", httputil.GetURL(c.Request), repo.FullName, build.Number)
if len(buildItems) > 1 {
err = remote_.Status(user, repo, build, uri, item.Proc)
} else {
err = remote_.Status(user, repo, build, uri, nil)
}
if err != nil {
logrus.Errorf("error setting commit status for %s/%d: %v", repo.FullName, build.Number, err)
}
}
}()
publishToTopic(c, build, repo)
queueBuild(build, repo, buildItems)
}
func findOrPersistPipelineConfig(repo *model.Repo, remoteYamlConfig []byte) (*model.Config, error) {
sha := shasum(remoteYamlConfig)
conf, err := Config.Storage.Config.ConfigFind(repo, sha)
func branchFiltered(build *model.Build, remoteYamlConfigs []*remote.FileMeta) bool {
for _, remoteYamlConfig := range remoteYamlConfigs {
parsedPipelineConfig, err := yaml.ParseString(string(remoteYamlConfig.Data))
if err == nil {
if !parsedPipelineConfig.Branches.Match(build.Branch) && build.Event != model.EventTag && build.Event != model.EventDeploy {
} else {
return false
}
}
}
return true
}
func findOrPersistPipelineConfig(build *model.Build, remoteYamlConfig *remote.FileMeta) (*model.Config, error) {
sha := shasum(remoteYamlConfig.Data)
conf, err := Config.Storage.Config.ConfigFindIdentical(build.RepoID, sha)
if err != nil {
conf = &model.Config{
RepoID: repo.ID,
Data: string(remoteYamlConfig),
RepoID: build.RepoID,
Data: string(remoteYamlConfig.Data),
Hash: sha,
Name: sanitizePath(remoteYamlConfig.Name),
}
err = Config.Storage.Config.ConfigCreate(conf)
if err != nil {
// retry in case we receive two hooks at the same time
conf, err = Config.Storage.Config.ConfigFind(repo, sha)
conf, err = Config.Storage.Config.ConfigFindIdentical(build.RepoID, sha)
if err != nil {
return nil, err
}
}
}
buildConfig := &model.BuildConfig{
ConfigID: conf.ID,
BuildID: build.ID,
}
err = Config.Storage.Config.BuildConfigCreate(buildConfig)
if err != nil {
return nil, err
}
return conf, nil
}
func setBuildProcs(build *model.Build, buildItems []*buildItem) {
pcounter := len(buildItems)
for _, item := range buildItems {
build.Procs = append(build.Procs, item.Proc)
item.Proc.BuildID = build.ID
for _, stage := range item.Config.Stages {
var gid int
for _, step := range stage.Steps {
pcounter++
if gid == 0 {
gid = pcounter
}
proc := &model.Proc{
BuildID: build.ID,
Name: step.Alias,
PID: pcounter,
PPID: item.Proc.PID,
PGID: gid,
State: model.StatusPending,
}
build.Procs = append(build.Procs, proc)
}
}
}
}
// publishes message to UI clients
func publishToTopic(c *gin.Context, build *model.Build, repo *model.Repo) {
message := pubsub.Message{
Labels: map[string]string{
@ -325,7 +344,11 @@ func publishToTopic(c *gin.Context, build *model.Build, repo *model.Repo) {
}
func queueBuild(build *model.Build, repo *model.Repo, buildItems []*buildItem) {
var tasks []*queue.Task
for _, item := range buildItems {
if item.Proc.State == model.StatusSkipped {
continue
}
task := new(queue.Task)
task.ID = fmt.Sprint(item.Proc.ID)
task.Labels = map[string]string{}
@ -334,6 +357,9 @@ func queueBuild(build *model.Build, repo *model.Repo, buildItems []*buildItem) {
}
task.Labels["platform"] = item.Platform
task.Labels["repo"] = repo.FullName
task.Dependencies = taskIds(item.DependsOn, buildItems)
task.RunOn = item.RunsOn
task.DepStatus = make(map[string]bool)
task.Data, _ = json.Marshal(rpc.Pipeline{
ID: fmt.Sprint(item.Proc.ID),
@ -342,8 +368,21 @@ func queueBuild(build *model.Build, repo *model.Repo, buildItems []*buildItem) {
})
Config.Services.Logs.Open(context.Background(), task.ID)
Config.Services.Queue.Push(context.Background(), task)
tasks = append(tasks, task)
}
Config.Services.Queue.PushAtOnce(context.Background(), tasks)
}
func taskIds(dependsOn []string, buildItems []*buildItem) []string {
taskIds := []string{}
for _, dep := range dependsOn {
for _, buildItem := range buildItems {
if buildItem.Proc.Name == dep {
taskIds = append(taskIds, fmt.Sprint(buildItem.Proc.ID))
}
}
}
return taskIds
}
func shasum(raw []byte) string {

View file

@ -1,45 +0,0 @@
// Copyright 2018 Drone.IO Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"testing"
"github.com/laszlocph/drone-oss-08/model"
)
func TestMultilineEnvsubst(t *testing.T) {
b := procBuilder{
Repo: &model.Repo{},
Curr: &model.Build{
Message: `aaa
bbb`,
},
Last: &model.Build{},
Netrc: &model.Netrc{},
Secs: []*model.Secret{},
Regs: []*model.Registry{},
Link: "",
Yaml: `pipeline:
xxx:
image: scratch
yyy: ${DRONE_COMMIT_MESSAGE}
`,
}
if _, err := b.Build(); err != nil {
t.Fatal(err)
}
}

View file

@ -38,7 +38,12 @@ func HandleLogin(c *gin.Context) {
if err := r.FormValue("error"); err != "" {
http.Redirect(w, r, "/login/error?code="+err, 303)
} else {
http.Redirect(w, r, "/authorize", 303)
intendedURL := r.URL.Query()["url"]
if len(intendedURL) > 0 {
http.Redirect(w, r, "/authorize?url="+intendedURL[0], 303)
} else {
http.Redirect(w, r, "/authorize", 303)
}
}
}
@ -136,8 +141,13 @@ func HandleAuth(c *gin.Context) {
}
httputil.SetCookie(c.Writer, c.Request, "user_sess", tokenstr)
c.Redirect(303, "/")
intendedURL := c.Request.URL.Query()["url"]
if len(intendedURL) > 0 {
c.Redirect(303, intendedURL[0])
} else {
c.Redirect(303, "/")
}
}
func GetLogout(c *gin.Context) {

View file

@ -18,6 +18,7 @@ import (
"fmt"
"math/rand"
"net/url"
"sort"
"strings"
"github.com/drone/envsubst"
@ -28,6 +29,7 @@ import (
"github.com/laszlocph/drone-oss-08/cncd/pipeline/pipeline/frontend/yaml/linter"
"github.com/laszlocph/drone-oss-08/cncd/pipeline/pipeline/frontend/yaml/matrix"
"github.com/laszlocph/drone-oss-08/model"
"github.com/laszlocph/drone-oss-08/remote"
)
// Takes the hook data and the yaml and returns in internal data model
@ -39,156 +41,198 @@ type procBuilder struct {
Secs []*model.Secret
Regs []*model.Registry
Link string
Yaml string
Yamls []*remote.FileMeta
Envs map[string]string
}
type buildItem struct {
Proc *model.Proc
Platform string
Labels map[string]string
Config *backend.Config
Proc *model.Proc
Platform string
Labels map[string]string
DependsOn []string
RunsOn []string
Config *backend.Config
}
func (b *procBuilder) Build() ([]*buildItem, error) {
axes, err := matrix.ParseString(b.Yaml)
if err != nil {
return nil, err
}
if len(axes) == 0 {
axes = append(axes, matrix.Axis{})
}
var items []*buildItem
for i, axis := range axes {
proc := &model.Proc{
BuildID: b.Curr.ID,
PID: i + 1,
PGID: i + 1,
State: model.StatusPending,
Environ: axis,
}
metadata := metadataFromStruct(b.Repo, b.Curr, b.Last, proc, b.Link)
environ := metadata.Environ()
for k, v := range metadata.EnvironDrone() {
environ[k] = v
}
for k, v := range axis {
environ[k] = v
}
sort.Sort(remote.ByName(b.Yamls))
var secrets []compiler.Secret
for _, sec := range b.Secs {
if !sec.Match(b.Curr.Event) {
continue
}
secrets = append(secrets, compiler.Secret{
Name: sec.Name,
Value: sec.Value,
Match: sec.Images,
})
}
y := b.Yaml
s, err := envsubst.Eval(y, func(name string) string {
env := environ[name]
if strings.Contains(env, "\n") {
env = fmt.Sprintf("%q", env)
}
return env
})
for j, y := range b.Yamls {
// matrix axes
axes, err := matrix.ParseString(string(y.Data))
if err != nil {
return nil, err
}
y = s
parsed, err := yaml.ParseString(y)
if err != nil {
return nil, err
}
metadata.Sys.Arch = parsed.Platform
if metadata.Sys.Arch == "" {
metadata.Sys.Arch = "linux/amd64"
if len(axes) == 0 {
axes = append(axes, matrix.Axis{})
}
lerr := linter.New(
linter.WithTrusted(b.Repo.IsTrusted),
).Lint(parsed)
if lerr != nil {
return nil, lerr
}
for i, axis := range axes {
proc := &model.Proc{
BuildID: b.Curr.ID,
PID: j + i + 1,
PGID: j + i + 1,
State: model.StatusPending,
Environ: axis,
Name: sanitizePath(y.Name),
}
b.Curr.Procs = append(b.Curr.Procs, proc)
var registries []compiler.Registry
for _, reg := range b.Regs {
registries = append(registries, compiler.Registry{
Hostname: reg.Address,
Username: reg.Username,
Password: reg.Password,
Email: reg.Email,
})
}
metadata := metadataFromStruct(b.Repo, b.Curr, b.Last, proc, b.Link)
environ := b.environmentVariables(metadata, axis)
ir := compiler.New(
compiler.WithEnviron(environ),
compiler.WithEnviron(b.Envs),
compiler.WithEscalated(Config.Pipeline.Privileged...),
compiler.WithResourceLimit(Config.Pipeline.Limits.MemSwapLimit, Config.Pipeline.Limits.MemLimit, Config.Pipeline.Limits.ShmSize, Config.Pipeline.Limits.CPUQuota, Config.Pipeline.Limits.CPUShares, Config.Pipeline.Limits.CPUSet),
compiler.WithVolumes(Config.Pipeline.Volumes...),
compiler.WithNetworks(Config.Pipeline.Networks...),
compiler.WithLocal(false),
compiler.WithOption(
compiler.WithNetrc(
b.Netrc.Login,
b.Netrc.Password,
b.Netrc.Machine,
),
b.Repo.IsPrivate,
),
compiler.WithRegistry(registries...),
compiler.WithSecret(secrets...),
compiler.WithPrefix(
fmt.Sprintf(
"%d_%d",
proc.ID,
rand.Int(),
),
),
compiler.WithEnviron(proc.Environ),
compiler.WithProxy(),
compiler.WithWorkspaceFromURL("/drone", b.Repo.Link),
compiler.WithMetadata(metadata),
).Compile(parsed)
// substitute vars
substituted, err := b.envsubst_(string(y.Data), environ)
if err != nil {
return nil, err
}
// for _, sec := range b.Secs {
// if !sec.MatchEvent(b.Curr.Event) {
// continue
// }
// if b.Curr.Verified || sec.SkipVerify {
// ir.Secrets = append(ir.Secrets, &backend.Secret{
// Mask: sec.Conceal,
// Name: sec.Name,
// Value: sec.Value,
// })
// }
// }
// parse yaml pipeline
parsed, err := yaml.ParseString(substituted)
if err != nil {
return nil, err
}
item := &buildItem{
Proc: proc,
Config: ir,
Labels: parsed.Labels,
Platform: metadata.Sys.Arch,
// lint pipeline
lerr := linter.New(
linter.WithTrusted(b.Repo.IsTrusted),
).Lint(parsed)
if lerr != nil {
return nil, lerr
}
if !parsed.Branches.Match(b.Curr.Branch) {
proc.State = model.StatusSkipped
}
metadata.SetPlatform(parsed.Platform)
ir := b.toInternalRepresentation(parsed, environ, metadata, proc.ID)
item := &buildItem{
Proc: proc,
Config: ir,
Labels: parsed.Labels,
DependsOn: parsed.DependsOn,
RunsOn: parsed.RunsOn,
Platform: metadata.Sys.Arch,
}
if item.Labels == nil {
item.Labels = map[string]string{}
}
items = append(items, item)
}
if item.Labels == nil {
item.Labels = map[string]string{}
}
items = append(items, item)
}
setBuildSteps(b.Curr, items)
return items, nil
}
func (b *procBuilder) envsubst_(y string, environ map[string]string) (string, error) {
return envsubst.Eval(y, func(name string) string {
env := environ[name]
if strings.Contains(env, "\n") {
env = fmt.Sprintf("%q", env)
}
return env
})
}
func (b *procBuilder) environmentVariables(metadata frontend.Metadata, axis matrix.Axis) map[string]string {
environ := metadata.Environ()
for k, v := range metadata.EnvironDrone() {
environ[k] = v
}
for k, v := range axis {
environ[k] = v
}
return environ
}
func (b *procBuilder) toInternalRepresentation(parsed *yaml.Config, environ map[string]string, metadata frontend.Metadata, procID int64) *backend.Config {
var secrets []compiler.Secret
for _, sec := range b.Secs {
if !sec.Match(b.Curr.Event) {
continue
}
secrets = append(secrets, compiler.Secret{
Name: sec.Name,
Value: sec.Value,
Match: sec.Images,
})
}
var registries []compiler.Registry
for _, reg := range b.Regs {
registries = append(registries, compiler.Registry{
Hostname: reg.Address,
Username: reg.Username,
Password: reg.Password,
Email: reg.Email,
})
}
return compiler.New(
compiler.WithEnviron(environ),
compiler.WithEnviron(b.Envs),
compiler.WithEscalated(Config.Pipeline.Privileged...),
compiler.WithResourceLimit(Config.Pipeline.Limits.MemSwapLimit, Config.Pipeline.Limits.MemLimit, Config.Pipeline.Limits.ShmSize, Config.Pipeline.Limits.CPUQuota, Config.Pipeline.Limits.CPUShares, Config.Pipeline.Limits.CPUSet),
compiler.WithVolumes(Config.Pipeline.Volumes...),
compiler.WithNetworks(Config.Pipeline.Networks...),
compiler.WithLocal(false),
compiler.WithOption(
compiler.WithNetrc(
b.Netrc.Login,
b.Netrc.Password,
b.Netrc.Machine,
),
b.Repo.IsPrivate,
),
compiler.WithRegistry(registries...),
compiler.WithSecret(secrets...),
compiler.WithPrefix(
fmt.Sprintf(
"%d_%d",
procID,
rand.Int(),
),
),
compiler.WithProxy(),
compiler.WithWorkspaceFromURL("/drone", b.Repo.Link),
compiler.WithMetadata(metadata),
).Compile(parsed)
}
func setBuildSteps(build *model.Build, buildItems []*buildItem) {
pcounter := len(buildItems)
for _, item := range buildItems {
for _, stage := range item.Config.Stages {
var gid int
for _, step := range stage.Steps {
pcounter++
if gid == 0 {
gid = pcounter
}
proc := &model.Proc{
BuildID: build.ID,
Name: step.Alias,
PID: pcounter,
PPID: item.Proc.PID,
PGID: gid,
State: model.StatusPending,
}
if item.Proc.State == model.StatusSkipped {
proc.State = model.StatusSkipped
}
build.Procs = append(build.Procs, proc)
}
}
}
}
// return the metadata from the cli context.
func metadataFromStruct(repo *model.Repo, build, last *model.Build, proc *model.Proc, link string) frontend.Metadata {
host := link
@ -261,3 +305,10 @@ func metadataFromStruct(repo *model.Repo, build, last *model.Build, proc *model.
},
}
}
func sanitizePath(path string) string {
path = strings.TrimSuffix(path, ".yml")
path = strings.TrimPrefix(path, ".drone/")
path = strings.TrimPrefix(path, ".")
return path
}

206
server/procBuilder_test.go Normal file
View file

@ -0,0 +1,206 @@
// Copyright 2018 Drone.IO Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"fmt"
"testing"
"github.com/laszlocph/drone-oss-08/model"
"github.com/laszlocph/drone-oss-08/remote"
)
func TestMultilineEnvsubst(t *testing.T) {
b := procBuilder{
Repo: &model.Repo{},
Curr: &model.Build{
Message: `aaa
bbb`,
},
Last: &model.Build{},
Netrc: &model.Netrc{},
Secs: []*model.Secret{},
Regs: []*model.Registry{},
Link: "",
Yamls: []*remote.FileMeta{
&remote.FileMeta{Data: []byte(`
pipeline:
xxx:
image: scratch
yyy: ${DRONE_COMMIT_MESSAGE}
`)},
&remote.FileMeta{Data: []byte(`
pipeline:
build:
image: scratch
yyy: ${DRONE_COMMIT_MESSAGE}
`)},
}}
if buildItems, err := b.Build(); err != nil {
t.Fatal(err)
} else {
fmt.Println(buildItems)
}
}
func TestMultiPipeline(t *testing.T) {
b := procBuilder{
Repo: &model.Repo{},
Curr: &model.Build{},
Last: &model.Build{},
Netrc: &model.Netrc{},
Secs: []*model.Secret{},
Regs: []*model.Registry{},
Link: "",
Yamls: []*remote.FileMeta{
&remote.FileMeta{Data: []byte(`
pipeline:
xxx:
image: scratch
yyy: ${DRONE_COMMIT_MESSAGE}
`)},
&remote.FileMeta{Data: []byte(`
pipeline:
build:
image: scratch
yyy: ${DRONE_COMMIT_MESSAGE}
`)},
},
}
buildItems, err := b.Build()
if err != nil {
t.Fatal(err)
}
if len(buildItems) != 2 {
t.Fatal("Should have generated 2 buildItems")
}
}
func TestDependsOn(t *testing.T) {
b := procBuilder{
Repo: &model.Repo{},
Curr: &model.Build{},
Last: &model.Build{},
Netrc: &model.Netrc{},
Secs: []*model.Secret{},
Regs: []*model.Registry{},
Link: "",
Yamls: []*remote.FileMeta{
&remote.FileMeta{Data: []byte(`
pipeline:
deploy:
image: scratch
depends_on:
- lint
- test
- build
`)},
},
}
buildItems, err := b.Build()
if err != nil {
t.Fatal(err)
}
if len(buildItems[0].DependsOn) != 3 {
t.Fatal("Should have 3 dependencies")
}
if buildItems[0].DependsOn[1] != "test" {
t.Fatal("Should depend on test")
}
}
func TestRunsOn(t *testing.T) {
b := procBuilder{
Repo: &model.Repo{},
Curr: &model.Build{},
Last: &model.Build{},
Netrc: &model.Netrc{},
Secs: []*model.Secret{},
Regs: []*model.Registry{},
Link: "",
Yamls: []*remote.FileMeta{
&remote.FileMeta{Data: []byte(`
pipeline:
deploy:
image: scratch
runs_on:
- success
- failure
`)},
},
}
buildItems, err := b.Build()
if err != nil {
t.Fatal(err)
}
if len(buildItems[0].RunsOn) != 2 {
t.Fatal("Should run on success and failure")
}
if buildItems[0].RunsOn[1] != "failure" {
t.Fatal("Should run on failure")
}
}
func TestBranchFilter(t *testing.T) {
b := procBuilder{
Repo: &model.Repo{},
Curr: &model.Build{Branch: "dev"},
Last: &model.Build{},
Netrc: &model.Netrc{},
Secs: []*model.Secret{},
Regs: []*model.Registry{},
Link: "",
Yamls: []*remote.FileMeta{
&remote.FileMeta{Data: []byte(`
pipeline:
xxx:
image: scratch
yyy: ${DRONE_COMMIT_MESSAGE}
branches: master
`)},
&remote.FileMeta{Data: []byte(`
pipeline:
build:
image: scratch
yyy: ${DRONE_COMMIT_MESSAGE}
`)},
},
}
buildItems, err := b.Build()
if err != nil {
t.Fatal(err)
}
if len(buildItems) != 2 {
t.Fatal("Should have generated 2 buildItems")
}
if buildItems[0].Proc.State != model.StatusSkipped {
t.Fatal("Should not run on dev branch")
}
for _, child := range buildItems[0].Proc.Children {
if child.State != model.StatusSkipped {
t.Fatal("Children should skipped status too")
}
}
if buildItems[1].Proc.State != model.StatusPending {
t.Fatal("Should not run on dev branch")
}
}

View file

@ -150,6 +150,9 @@ func PatchRepo(c *gin.Context) {
if in.BuildCounter != nil {
repo.Counter = *in.BuildCounter
}
if in.Fallback != nil {
repo.Fallback = *in.Fallback
}
err := store.UpdateRepo(c, repo)
if err != nil {

View file

@ -33,6 +33,8 @@ import (
"github.com/laszlocph/drone-oss-08/cncd/pipeline/pipeline/rpc/proto"
"github.com/laszlocph/drone-oss-08/cncd/pubsub"
"github.com/laszlocph/drone-oss-08/cncd/queue"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/laszlocph/drone-oss-08/model"
"github.com/laszlocph/drone-oss-08/remote"
@ -41,11 +43,6 @@ import (
"github.com/drone/expr"
)
// This file is a complete disaster because I'm trying to wedge in some
// experimental code. Please pardon our appearance during renovations.
// Config is an evil global configuration that will be used as we transition /
// refactor the codebase to move away from storing these values in the Context.
var Config = struct {
Services struct {
Pubsub pubsub.Publisher
@ -91,12 +88,14 @@ var Config = struct {
}{}
type RPC struct {
remote remote.Remote
queue queue.Queue
pubsub pubsub.Publisher
logger logging.Log
store store.Store
host string
remote remote.Remote
queue queue.Queue
pubsub pubsub.Publisher
logger logging.Log
store store.Store
host string
buildTime *prometheus.GaugeVec
buildCount *prometheus.CounterVec
}
// Next implements the rpc.Next function
@ -113,26 +112,22 @@ func (s *RPC) Next(c context.Context, filter rpc.Filter) (*rpc.Pipeline, error)
if err != nil {
return nil, err
}
task, err := s.queue.Poll(c, fn)
if err != nil {
return nil, err
} else if task == nil {
return nil, nil
for {
task, err := s.queue.Poll(c, fn)
if err != nil {
return nil, err
} else if task == nil {
return nil, nil
}
if task.ShouldRun() {
pipeline := new(rpc.Pipeline)
err = json.Unmarshal(task.Data, pipeline)
return pipeline, err
} else {
s.Done(c, task.ID, rpc.State{})
}
}
pipeline := new(rpc.Pipeline)
// check if the process was previously cancelled
// cancelled, _ := s.checkCancelled(pipeline)
// if cancelled {
// logrus.Debugf("ignore pid %v: cancelled by user", pipeline.ID)
// if derr := s.queue.Done(c, pipeline.ID); derr != nil {
// logrus.Errorf("error: done: cannot ack proc_id %v: %s", pipeline.ID, err)
// }
// return nil, nil
// }
err = json.Unmarshal(task.Data, pipeline)
return pipeline, err
}
// Wait implements the rpc.Wait function
@ -383,76 +378,147 @@ func (s *RPC) Done(c context.Context, id string, state rpc.State) error {
return err
}
proc.Stopped = state.Finished
proc.Error = state.Error
proc.ExitCode = state.ExitCode
proc.State = model.StatusSuccess
if proc.ExitCode != 0 || proc.Error != "" {
proc.State = model.StatusFailure
}
if err := s.store.ProcUpdate(proc); err != nil {
log.Printf("error: done: cannot update proc_id %d state: %s", procID, err)
}
s.updateProcState(proc, state)
if err := s.queue.Done(c, id); err != nil {
var queueErr error
if proc.Failing() {
queueErr = s.queue.Error(c, id, fmt.Errorf("Proc finished with exitcode %d, %s", state.ExitCode, state.Error))
} else {
queueErr = s.queue.Done(c, id)
}
if queueErr != nil {
log.Printf("error: done: cannot ack proc_id %d: %s", procID, err)
}
// TODO handle this error
procs, _ := s.store.ProcList(build)
for _, p := range procs {
if p.Running() && p.PPID == proc.PID {
p.State = model.StatusSkipped
if p.Started != 0 {
p.State = model.StatusSuccess // for deamons that are killed
p.Stopped = proc.Stopped
}
if err := s.store.ProcUpdate(p); err != nil {
log.Printf("error: done: cannot update proc_id %d child state: %s", p.ID, err)
}
}
}
s.completeChildrenIfParentCompleted(procs, proc)
running := false
status := model.StatusSuccess
for _, p := range procs {
if p.PPID == 0 {
if p.Running() {
running = true
}
if p.Failing() {
status = p.State
}
}
}
if !running {
build.Status = status
if !isThereRunningStage(procs) {
build.Status = buildStatus(procs)
build.Finished = proc.Stopped
if err := s.store.UpdateBuild(build); err != nil {
log.Printf("error: done: cannot update build_id %d final state: %s", build.ID, err)
}
// update the status
user, err := s.store.GetUser(repo.UserID)
if err == nil {
if refresher, ok := s.remote.(remote.Refresher); ok {
ok, _ := refresher.Refresh(user)
if ok {
s.store.UpdateUser(user)
}
}
uri := fmt.Sprintf("%s/%s/%d", s.host, repo.FullName, build.Number)
err = s.remote.Status(user, repo, build, uri)
if err != nil {
logrus.Errorf("error setting commit status for %s/%d: %v", repo.FullName, build.Number, err)
}
if !isMultiPipeline(procs) {
s.updateRemoteStatus(repo, build, nil)
}
}
if isMultiPipeline(procs) {
s.updateRemoteStatus(repo, build, proc)
}
if err := s.logger.Close(c, id); err != nil {
log.Printf("error: done: cannot close build_id %d logger: %s", proc.ID, err)
}
s.notify(c, repo, build, procs)
if build.Status == model.StatusSuccess || build.Status == model.StatusFailure {
s.buildCount.WithLabelValues(repo.FullName, build.Branch, build.Status, "total").Inc()
s.buildTime.WithLabelValues(repo.FullName, build.Branch, build.Status, "total").Set(float64(build.Finished - build.Started))
}
if isMultiPipeline(procs) {
s.buildTime.WithLabelValues(repo.FullName, build.Branch, proc.State, proc.Name).Set(float64(proc.Stopped - proc.Started))
}
return nil
}
func isMultiPipeline(procs []*model.Proc) bool {
countPPIDZero := 0
for _, proc := range procs {
if proc.PPID == 0 {
countPPIDZero++
}
}
return countPPIDZero > 1
}
// Log implements the rpc.Log function
func (s *RPC) Log(c context.Context, id string, line *rpc.Line) error {
entry := new(logging.Entry)
entry.Data, _ = json.Marshal(line)
s.logger.Write(c, id, entry)
return nil
}
func (s *RPC) updateProcState(proc *model.Proc, state rpc.State) {
proc.Stopped = state.Finished
proc.Error = state.Error
proc.ExitCode = state.ExitCode
if state.Started == 0 {
proc.State = model.StatusSkipped
} else {
proc.State = model.StatusSuccess
}
if proc.ExitCode != 0 || proc.Error != "" {
proc.State = model.StatusFailure
}
if err := s.store.ProcUpdate(proc); err != nil {
log.Printf("error: done: cannot update proc_id %d state: %s", proc.ID, err)
}
}
func (s *RPC) completeChildrenIfParentCompleted(procs []*model.Proc, completedProc *model.Proc) {
for _, p := range procs {
if p.Running() && p.PPID == completedProc.PID {
p.State = model.StatusSkipped
if p.Started != 0 {
p.State = model.StatusSuccess // for deamons that are killed
p.Stopped = completedProc.Stopped
}
if err := s.store.ProcUpdate(p); err != nil {
log.Printf("error: done: cannot update proc_id %d child state: %s", p.ID, err)
}
}
}
}
func isThereRunningStage(procs []*model.Proc) bool {
for _, p := range procs {
if p.PPID == 0 {
if p.Running() {
return true
}
}
}
return false
}
func buildStatus(procs []*model.Proc) string {
status := model.StatusSuccess
for _, p := range procs {
if p.PPID == 0 {
if p.Failing() {
status = p.State
}
}
}
return status
}
func (s *RPC) updateRemoteStatus(repo *model.Repo, build *model.Build, proc *model.Proc) {
user, err := s.store.GetUser(repo.UserID)
if err == nil {
if refresher, ok := s.remote.(remote.Refresher); ok {
ok, _ := refresher.Refresh(user)
if ok {
s.store.UpdateUser(user)
}
}
uri := fmt.Sprintf("%s/%s/%d", s.host, repo.FullName, build.Number)
err = s.remote.Status(user, repo, build, uri, proc)
if err != nil {
logrus.Errorf("error setting commit status for %s/%d: %v", repo.FullName, build.Number, err)
}
}
}
func (s *RPC) notify(c context.Context, repo *model.Repo, build *model.Build, procs []*model.Proc) {
build.Procs = model.Tree(procs)
message := pubsub.Message{
Labels: map[string]string{
@ -465,31 +531,6 @@ func (s *RPC) Done(c context.Context, id string, state rpc.State) error {
Build: *build,
})
s.pubsub.Publish(c, "topic/events", message)
return nil
}
// Log implements the rpc.Log function
func (s *RPC) Log(c context.Context, id string, line *rpc.Line) error {
entry := new(logging.Entry)
entry.Data, _ = json.Marshal(line)
s.logger.Write(c, id, entry)
return nil
}
func (s *RPC) checkCancelled(pipeline *rpc.Pipeline) (bool, error) {
pid, err := strconv.ParseInt(pipeline.ID, 10, 64)
if err != nil {
return false, err
}
proc, err := s.store.ProcLoad(pid)
if err != nil {
return false, err
}
if proc.State == model.StatusKilled {
return true, nil
}
return false, err
}
func createFilterFunc(filter rpc.Filter) (queue.Filter, error) {
@ -524,30 +565,41 @@ func createFilterFunc(filter rpc.Filter) (queue.Filter, error) {
// DroneServer is a grpc server implementation.
type DroneServer struct {
Remote remote.Remote
Queue queue.Queue
Pubsub pubsub.Publisher
Logger logging.Log
Store store.Store
Host string
peer RPC
}
func NewDroneServer(remote remote.Remote, queue queue.Queue, logger logging.Log, pubsub pubsub.Publisher, store store.Store, host string) *DroneServer {
buildTime := promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "drone",
Name: "build_time",
Help: "Build time.",
}, []string{"repo", "branch", "status", "pipeline"})
buildCount := promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "drone",
Name: "build_count",
Help: "Build count.",
}, []string{"repo", "branch", "status", "pipeline"})
peer := RPC{
remote: remote,
store: store,
queue: queue,
pubsub: pubsub,
logger: logger,
host: host,
buildTime: buildTime,
buildCount: buildCount,
}
return &DroneServer{peer: peer}
}
func (s *DroneServer) Next(c oldcontext.Context, req *proto.NextRequest) (*proto.NextReply, error) {
peer := RPC{
remote: s.Remote,
store: s.Store,
queue: s.Queue,
pubsub: s.Pubsub,
logger: s.Logger,
host: s.Host,
}
filter := rpc.Filter{
Labels: req.GetFilter().GetLabels(),
Expr: req.GetFilter().GetExpr(),
}
res := new(proto.NextReply)
pipeline, err := peer.Next(c, filter)
pipeline, err := s.peer.Next(c, filter)
if err != nil {
return res, err
}
@ -561,53 +613,9 @@ func (s *DroneServer) Next(c oldcontext.Context, req *proto.NextRequest) (*proto
res.Pipeline.Payload, _ = json.Marshal(pipeline.Config)
return res, err
// fn := func(task *queue.Task) bool {
// for k, v := range req.GetFilter().Labels {
// if task.Labels[k] != v {
// return false
// }
// }
// return true
// }
// task, err := s.Queue.Poll(c, fn)
// if err != nil {
// return nil, err
// } else if task == nil {
// return nil, nil
// }
//
// pipeline := new(rpc.Pipeline)
// json.Unmarshal(task.Data, pipeline)
//
// res := new(proto.NextReply)
// res.Pipeline = new(proto.Pipeline)
// res.Pipeline.Id = pipeline.ID
// res.Pipeline.Timeout = pipeline.Timeout
// res.Pipeline.Payload, _ = json.Marshal(pipeline.Config)
//
// // check if the process was previously cancelled
// // cancelled, _ := s.checkCancelled(pipeline)
// // if cancelled {
// // logrus.Debugf("ignore pid %v: cancelled by user", pipeline.ID)
// // if derr := s.queue.Done(c, pipeline.ID); derr != nil {
// // logrus.Errorf("error: done: cannot ack proc_id %v: %s", pipeline.ID, err)
// // }
// // return nil, nil
// // }
//
// return res, nil
}
func (s *DroneServer) Init(c oldcontext.Context, req *proto.InitRequest) (*proto.Empty, error) {
peer := RPC{
remote: s.Remote,
store: s.Store,
queue: s.Queue,
pubsub: s.Pubsub,
logger: s.Logger,
host: s.Host,
}
state := rpc.State{
Error: req.GetState().GetError(),
ExitCode: int(req.GetState().GetExitCode()),
@ -617,19 +625,11 @@ func (s *DroneServer) Init(c oldcontext.Context, req *proto.InitRequest) (*proto
Exited: req.GetState().GetExited(),
}
res := new(proto.Empty)
err := peer.Init(c, req.GetId(), state)
err := s.peer.Init(c, req.GetId(), state)
return res, err
}
func (s *DroneServer) Update(c oldcontext.Context, req *proto.UpdateRequest) (*proto.Empty, error) {
peer := RPC{
remote: s.Remote,
store: s.Store,
queue: s.Queue,
pubsub: s.Pubsub,
logger: s.Logger,
host: s.Host,
}
state := rpc.State{
Error: req.GetState().GetError(),
ExitCode: int(req.GetState().GetExitCode()),
@ -639,19 +639,11 @@ func (s *DroneServer) Update(c oldcontext.Context, req *proto.UpdateRequest) (*p
Exited: req.GetState().GetExited(),
}
res := new(proto.Empty)
err := peer.Update(c, req.GetId(), state)
err := s.peer.Update(c, req.GetId(), state)
return res, err
}
func (s *DroneServer) Upload(c oldcontext.Context, req *proto.UploadRequest) (*proto.Empty, error) {
peer := RPC{
remote: s.Remote,
store: s.Store,
queue: s.Queue,
pubsub: s.Pubsub,
logger: s.Logger,
host: s.Host,
}
file := &rpc.File{
Data: req.GetFile().GetData(),
Mime: req.GetFile().GetMime(),
@ -663,19 +655,11 @@ func (s *DroneServer) Upload(c oldcontext.Context, req *proto.UploadRequest) (*p
}
res := new(proto.Empty)
err := peer.Upload(c, req.GetId(), file)
err := s.peer.Upload(c, req.GetId(), file)
return res, err
}
func (s *DroneServer) Done(c oldcontext.Context, req *proto.DoneRequest) (*proto.Empty, error) {
peer := RPC{
remote: s.Remote,
store: s.Store,
queue: s.Queue,
pubsub: s.Pubsub,
logger: s.Logger,
host: s.Host,
}
state := rpc.State{
Error: req.GetState().GetError(),
ExitCode: int(req.GetState().GetExitCode()),
@ -685,47 +669,23 @@ func (s *DroneServer) Done(c oldcontext.Context, req *proto.DoneRequest) (*proto
Exited: req.GetState().GetExited(),
}
res := new(proto.Empty)
err := peer.Done(c, req.GetId(), state)
err := s.peer.Done(c, req.GetId(), state)
return res, err
}
func (s *DroneServer) Wait(c oldcontext.Context, req *proto.WaitRequest) (*proto.Empty, error) {
peer := RPC{
remote: s.Remote,
store: s.Store,
queue: s.Queue,
pubsub: s.Pubsub,
logger: s.Logger,
host: s.Host,
}
res := new(proto.Empty)
err := peer.Wait(c, req.GetId())
err := s.peer.Wait(c, req.GetId())
return res, err
}
func (s *DroneServer) Extend(c oldcontext.Context, req *proto.ExtendRequest) (*proto.Empty, error) {
peer := RPC{
remote: s.Remote,
store: s.Store,
queue: s.Queue,
pubsub: s.Pubsub,
logger: s.Logger,
host: s.Host,
}
res := new(proto.Empty)
err := peer.Extend(c, req.GetId())
err := s.peer.Extend(c, req.GetId())
return res, err
}
func (s *DroneServer) Log(c oldcontext.Context, req *proto.LogRequest) (*proto.Empty, error) {
peer := RPC{
remote: s.Remote,
store: s.Store,
queue: s.Queue,
pubsub: s.Pubsub,
logger: s.Logger,
host: s.Host,
}
line := &rpc.Line{
Out: req.GetLine().GetOut(),
Pos: int(req.GetLine().GetPos()),
@ -733,6 +693,6 @@ func (s *DroneServer) Log(c oldcontext.Context, req *proto.LogRequest) (*proto.E
Proc: req.GetLine().GetProc(),
}
res := new(proto.Empty)
err := peer.Log(c, req.GetId(), line)
err := s.peer.Log(c, req.GetId(), line)
return res, err
}

View file

@ -77,7 +77,6 @@ func EventStreamSSE(c *gin.Context) {
}()
go func() {
// TODO remove this from global config
Config.Services.Pubsub.Subscribe(ctx, "topic/events", func(m pubsub.Message) {
defer func() {
recover() // fix #2480

View file

@ -18,8 +18,8 @@ import (
"fmt"
"testing"
"github.com/laszlocph/drone-oss-08/model"
"github.com/franela/goblin"
"github.com/laszlocph/drone-oss-08/model"
)
func TestBuilds(t *testing.T) {

View file

@ -22,17 +22,17 @@ import (
"github.com/russross/meddler"
)
func (db *datastore) ConfigLoad(id int64) (*model.Config, error) {
func (db *datastore) ConfigsForBuild(buildID int64) ([]*model.Config, error) {
stmt := sql.Lookup(db.driver, "config-find-id")
conf := new(model.Config)
err := meddler.QueryRow(db, conf, stmt, id)
return conf, err
var configs = []*model.Config{}
err := meddler.QueryAll(db, &configs, stmt, buildID)
return configs, err
}
func (db *datastore) ConfigFind(repo *model.Repo, hash string) (*model.Config, error) {
func (db *datastore) ConfigFindIdentical(repoID int64, hash string) (*model.Config, error) {
stmt := sql.Lookup(db.driver, "config-find-repo-hash")
conf := new(model.Config)
err := meddler.QueryRow(db, conf, stmt, repo.ID, hash)
err := meddler.QueryRow(db, conf, stmt, repoID, hash)
return conf, err
}
@ -51,3 +51,7 @@ func (db *datastore) ConfigFindApproved(config *model.Config) (bool, error) {
func (db *datastore) ConfigCreate(config *model.Config) error {
return meddler.Insert(db, "config", config)
}
func (db *datastore) BuildConfigCreate(buildConfig *model.BuildConfig) error {
return meddler.Insert(db, "build_config", buildConfig)
}

View file

@ -23,6 +23,9 @@ import (
func TestConfig(t *testing.T) {
s := newTest()
defer func() {
s.Exec("delete from repos")
s.Exec("delete from builds")
s.Exec("delete from procs")
s.Exec("delete from config")
s.Close()
}()
@ -32,18 +35,49 @@ func TestConfig(t *testing.T) {
hash = "8d8647c9aa90d893bfb79dddbe901f03e258588121e5202632f8ae5738590b26"
)
if err := s.ConfigCreate(
&model.Config{
RepoID: 2,
Data: data,
Hash: hash,
},
); err != nil {
repo := &model.Repo{
UserID: 1,
FullName: "bradrydzewski/drone",
Owner: "bradrydzewski",
Name: "drone",
}
if err := s.CreateRepo(repo); err != nil {
t.Errorf("Unexpected error: insert repo: %s", err)
return
}
config := &model.Config{
RepoID: repo.ID,
Data: data,
Hash: hash,
Name: "default",
}
if err := s.ConfigCreate(config); err != nil {
t.Errorf("Unexpected error: insert config: %s", err)
return
}
config, err := s.ConfigFind(&model.Repo{ID: 2}, hash)
build := &model.Build{
RepoID: repo.ID,
Status: model.StatusRunning,
Commit: "85f8c029b902ed9400bc600bac301a0aadb144ac",
}
if err := s.CreateBuild(build); err != nil {
t.Errorf("Unexpected error: insert build: %s", err)
return
}
if err := s.BuildConfigCreate(
&model.BuildConfig{
ConfigID: config.ID,
BuildID: build.ID,
},
); err != nil {
t.Errorf("Unexpected error: insert build config: %s", err)
return
}
config, err := s.ConfigFindIdentical(repo.ID, hash)
if err != nil {
t.Error(err)
return
@ -51,7 +85,7 @@ func TestConfig(t *testing.T) {
if got, want := config.ID, int64(1); got != want {
t.Errorf("Want config id %d, got %d", want, got)
}
if got, want := config.RepoID, int64(2); got != want {
if got, want := config.RepoID, repo.ID; got != want {
t.Errorf("Want config repo id %d, got %d", want, got)
}
if got, want := config.Data, data; got != want {
@ -60,13 +94,16 @@ func TestConfig(t *testing.T) {
if got, want := config.Hash, hash; got != want {
t.Errorf("Want config hash %s, got %s", want, got)
}
if got, want := config.Name, "default"; got != want {
t.Errorf("Want config name %s, got %s", want, got)
}
loaded, err := s.ConfigLoad(config.ID)
loaded, err := s.ConfigsForBuild(build.ID)
if err != nil {
t.Errorf("Want config by id, got error %q", err)
return
}
if got, want := loaded.ID, config.ID; got != want {
if got, want := loaded[0].ID, config.ID; got != want {
t.Errorf("Want config by id %d, got %d", want, got)
}
}
@ -74,9 +111,10 @@ func TestConfig(t *testing.T) {
func TestConfigApproved(t *testing.T) {
s := newTest()
defer func() {
s.Exec("delete from config")
s.Exec("delete from builds")
s.Exec("delete from repos")
s.Exec("delete from builds")
s.Exec("delete from procs")
s.Exec("delete from config")
s.Close()
}()
@ -86,49 +124,83 @@ func TestConfigApproved(t *testing.T) {
Owner: "bradrydzewski",
Name: "drone",
}
s.CreateRepo(repo)
if err := s.CreateRepo(repo); err != nil {
t.Errorf("Unexpected error: insert repo: %s", err)
return
}
var (
data = "pipeline: [ { image: golang, commands: [ go build, go test ] } ]"
hash = "8d8647c9aa90d893bfb79dddbe901f03e258588121e5202632f8ae5738590b26"
conf = &model.Config{
data = "pipeline: [ { image: golang, commands: [ go build, go test ] } ]"
hash = "8d8647c9aa90d893bfb79dddbe901f03e258588121e5202632f8ae5738590b26"
buildBlocked = &model.Build{
RepoID: repo.ID,
Data: data,
Hash: hash,
Status: model.StatusBlocked,
Commit: "85f8c029b902ed9400bc600bac301a0aadb144ac",
}
buildPending = &model.Build{
RepoID: repo.ID,
Status: model.StatusPending,
Commit: "85f8c029b902ed9400bc600bac301a0aadb144ac",
}
buildRunning = &model.Build{
RepoID: repo.ID,
Status: model.StatusRunning,
Commit: "85f8c029b902ed9400bc600bac301a0aadb144ac",
}
)
if err := s.CreateBuild(buildBlocked); err != nil {
t.Errorf("Unexpected error: insert build: %s", err)
return
}
if err := s.CreateBuild(buildPending); err != nil {
t.Errorf("Unexpected error: insert build: %s", err)
return
}
conf := &model.Config{
RepoID: repo.ID,
Data: data,
Hash: hash,
}
if err := s.ConfigCreate(conf); err != nil {
t.Errorf("Unexpected error: insert config: %s", err)
return
}
s.CreateBuild(&model.Build{
RepoID: repo.ID,
buildConfig := &model.BuildConfig{
ConfigID: conf.ID,
Status: model.StatusBlocked,
Commit: "85f8c029b902ed9400bc600bac301a0aadb144ac",
})
s.CreateBuild(&model.Build{
RepoID: repo.ID,
ConfigID: conf.ID,
Status: model.StatusPending,
Commit: "85f8c029b902ed9400bc600bac301a0aadb144ac",
})
if ok, _ := s.ConfigFindApproved(conf); ok == true {
t.Errorf("Want config not approved, when blocked or pending")
BuildID: buildBlocked.ID,
}
if err := s.BuildConfigCreate(buildConfig); err != nil {
t.Errorf("Unexpected error: insert build_config: %s", err)
return
}
s.CreateBuild(&model.Build{
RepoID: repo.ID,
ConfigID: conf.ID,
Status: model.StatusRunning,
Commit: "85f8c029b902ed9400bc600bac301a0aadb144ac",
})
if approved, err := s.ConfigFindApproved(conf); approved != false || err != nil {
t.Errorf("Want config not approved, when blocked or pending. %v", err)
return
}
if ok, _ := s.ConfigFindApproved(conf); ok == false {
t.Errorf("Want config approved, when running.")
s.CreateBuild(buildRunning)
conf2 := &model.Config{
RepoID: repo.ID,
Data: data,
Hash: "xxx",
}
if err := s.ConfigCreate(conf2); err != nil {
t.Errorf("Unexpected error: insert config: %s", err)
return
}
buildConfig2 := &model.BuildConfig{
ConfigID: conf2.ID,
BuildID: buildRunning.ID,
}
if err := s.BuildConfigCreate(buildConfig2); err != nil {
t.Errorf("Unexpected error: insert config: %s", err)
return
}
if approved, err := s.ConfigFindApproved(conf2); approved != true || err != nil {
t.Errorf("Want config approved, when running. %v", err)
return
}
}
@ -136,6 +208,9 @@ func TestConfigApproved(t *testing.T) {
func TestConfigIndexes(t *testing.T) {
s := newTest()
defer func() {
s.Exec("delete from repos")
s.Exec("delete from builds")
s.Exec("delete from procs")
s.Exec("delete from config")
s.Close()
}()

View file

@ -1,17 +1,3 @@
// Copyright 2018 Drone.IO Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mysql
import (
@ -170,6 +156,38 @@ var migrations = []struct {
name: "alter-table-update-file-meta",
stmt: alterTableUpdateFileMeta,
},
{
name: "create-table-build-config",
stmt: createTableBuildConfig,
},
{
name: "alter-table-add-config-name",
stmt: alterTableAddConfigName,
},
{
name: "update-table-set-config-name",
stmt: updateTableSetConfigName,
},
{
name: "populate-build-config",
stmt: populateBuildConfig,
},
{
name: "alter-table-add-task-dependencies",
stmt: alterTableAddTaskDependencies,
},
{
name: "alter-table-add-task-run-on",
stmt: alterTableAddTaskRunOn,
},
{
name: "alter-table-add-repo-fallback",
stmt: alterTableAddRepoFallback,
},
{
name: "update-table-set-repo-fallback",
stmt: updateTableSetRepoFallback,
},
}
// Migrate performs the database migration. If the migration fails
@ -636,3 +654,62 @@ UPDATE files SET
,file_meta_failed=0
,file_meta_skipped=0
`
//
// 019_create_table_build_config.sql
//
var createTableBuildConfig = `
CREATE TABLE IF NOT EXISTS build_config (
config_id INTEGER NOT NULL
,build_id INTEGER NOT NULL
,PRIMARY KEY (config_id, build_id)
,FOREIGN KEY (config_id) REFERENCES config (config_id)
,FOREIGN KEY (build_id) REFERENCES builds (build_id)
);
`
//
// 020_add_column_config_name.sql
//
var alterTableAddConfigName = `
ALTER TABLE config ADD COLUMN config_name TEXT
`
var updateTableSetConfigName = `
UPDATE config SET config_name = "drone"
`
//
// 021_populate_build_config.sql
//
var populateBuildConfig = `
INSERT INTO build_config (config_id, build_id)
SELECT build_config_id, build_id FROM builds
`
//
// 022_add_task_columns.sql
//
var alterTableAddTaskDependencies = `
ALTER TABLE tasks ADD COLUMN task_dependencies MEDIUMBLOB
`
var alterTableAddTaskRunOn = `
ALTER TABLE tasks ADD COLUMN task_run_on MEDIUMBLOB
`
//
// 023_add_repo_fallback_column.sql
//
var alterTableAddRepoFallback = `
ALTER TABLE repos ADD COLUMN repo_fallback BOOLEAN
`
var updateTableSetRepoFallback = `
UPDATE repos SET repo_fallback='false'
`

View file

@ -0,0 +1,9 @@
-- name: create-table-build-config
CREATE TABLE IF NOT EXISTS build_config (
config_id INTEGER NOT NULL
,build_id INTEGER NOT NULL
,PRIMARY KEY (config_id, build_id)
,FOREIGN KEY (config_id) REFERENCES config (config_id)
,FOREIGN KEY (build_id) REFERENCES builds (build_id)
);

View file

@ -0,0 +1,7 @@
-- name: alter-table-add-config-name
ALTER TABLE config ADD COLUMN config_name TEXT
-- name: update-table-set-config-name
UPDATE config SET config_name = "drone"

View file

@ -0,0 +1,4 @@
-- name: populate-build-config
INSERT INTO build_config (config_id, build_id)
SELECT build_config_id, build_id FROM builds

View file

@ -0,0 +1,6 @@
-- name: alter-table-add-task-dependencies
ALTER TABLE tasks ADD COLUMN task_dependencies MEDIUMBLOB
-- name: alter-table-add-task-run-on
ALTER TABLE tasks ADD COLUMN task_run_on MEDIUMBLOB

View file

@ -0,0 +1,5 @@
-- name: alter-table-add-repo-fallback
ALTER TABLE repos ADD COLUMN repo_fallback BOOLEAN
-- name: update-table-set-repo-fallback
UPDATE repos SET repo_fallback='false'

View file

@ -1,17 +1,3 @@
// Copyright 2018 Drone.IO Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package postgres
import (
@ -170,6 +156,38 @@ var migrations = []struct {
name: "alter-table-update-file-meta",
stmt: alterTableUpdateFileMeta,
},
{
name: "create-table-build-config",
stmt: createTableBuildConfig,
},
{
name: "alter-table-add-config-name",
stmt: alterTableAddConfigName,
},
{
name: "update-table-set-config-name",
stmt: updateTableSetConfigName,
},
{
name: "populate-build-config",
stmt: populateBuildConfig,
},
{
name: "alter-table-add-task-dependencies",
stmt: alterTableAddTaskDependencies,
},
{
name: "alter-table-add-task-run-on",
stmt: alterTableAddTaskRunOn,
},
{
name: "alter-table-add-repo-fallback",
stmt: alterTableAddRepoFallback,
},
{
name: "update-table-set-repo-fallback",
stmt: updateTableSetRepoFallback,
},
}
// Migrate performs the database migration. If the migration fails
@ -530,7 +548,7 @@ CREATE INDEX IF NOT EXISTS sender_repo_ix ON senders (sender_repo_id);
//
var alterTableAddRepoVisibility = `
ALTER TABLE repos ADD COLUMN repo_visibility VARCHAR(50)
ALTER TABLE repos ADD COLUMN repo_visibility VARCHAR(50);
`
var updateTableSetRepoVisibility = `
@ -538,7 +556,7 @@ UPDATE repos
SET repo_visibility = (CASE
WHEN repo_private = false THEN 'public'
ELSE 'private'
END)
END);
`
//
@ -554,12 +572,13 @@ UPDATE repos SET repo_counter = (
SELECT max(build_number)
FROM builds
WHERE builds.build_repo_id = repos.repo_id
)
);
`
var updateTableSetRepoSeqDefault = `
UPDATE repos SET repo_counter = 0
WHERE repo_counter IS NULL
;
`
//
@ -567,11 +586,11 @@ WHERE repo_counter IS NULL
//
var alterTableAddRepoActive = `
ALTER TABLE repos ADD COLUMN repo_active BOOLEAN
ALTER TABLE repos ADD COLUMN repo_active BOOLEAN;
`
var updateTableSetRepoActive = `
UPDATE repos SET repo_active = true
UPDATE repos SET repo_active = true;
`
//
@ -583,7 +602,7 @@ ALTER TABLE users ADD COLUMN user_synced INTEGER;
`
var updateTableSetUserSynced = `
UPDATE users SET user_synced = 0
UPDATE users SET user_synced = 0;
`
//
@ -615,19 +634,19 @@ CREATE INDEX IF NOT EXISTS ix_perms_user ON perms (perm_user_id);
//
var alterTableAddFilePid = `
ALTER TABLE files ADD COLUMN file_pid INTEGER
ALTER TABLE files ADD COLUMN file_pid INTEGER;
`
var alterTableAddFileMetaPassed = `
ALTER TABLE files ADD COLUMN file_meta_passed INTEGER
ALTER TABLE files ADD COLUMN file_meta_passed INTEGER;
`
var alterTableAddFileMetaFailed = `
ALTER TABLE files ADD COLUMN file_meta_failed INTEGER
ALTER TABLE files ADD COLUMN file_meta_failed INTEGER;
`
var alterTableAddFileMetaSkipped = `
ALTER TABLE files ADD COLUMN file_meta_skipped INTEGER
ALTER TABLE files ADD COLUMN file_meta_skipped INTEGER;
`
var alterTableUpdateFileMeta = `
@ -635,4 +654,64 @@ UPDATE files SET
file_meta_passed=0
,file_meta_failed=0
,file_meta_skipped=0
;
`
//
// 019_create_table_build_config.sql
//
var createTableBuildConfig = `
CREATE TABLE IF NOT EXISTS build_config (
config_id INTEGER NOT NULL
,build_id INTEGER NOT NULL
,PRIMARY KEY (config_id, build_id)
,FOREIGN KEY (config_id) REFERENCES config (config_id)
,FOREIGN KEY (build_id) REFERENCES builds (build_id)
);
`
//
// 020_add_column_config_name.sql
//
var alterTableAddConfigName = `
ALTER TABLE config ADD COLUMN config_name TEXT
`
var updateTableSetConfigName = `
UPDATE config SET config_name = 'drone'
`
//
// 021_populate_build_config.sql
//
var populateBuildConfig = `
INSERT INTO build_config (config_id, build_id)
SELECT build_config_id, build_id FROM builds
`
//
// 022_add_task_columns.sql
//
var alterTableAddTaskDependencies = `
ALTER TABLE tasks ADD COLUMN task_dependencies BYTEA
`
var alterTableAddTaskRunOn = `
ALTER TABLE tasks ADD COLUMN task_run_on BYTEA
`
//
// 023_add_repo_fallback_column.sql
//
var alterTableAddRepoFallback = `
ALTER TABLE repos ADD COLUMN repo_fallback BOOLEAN
`
var updateTableSetRepoFallback = `
UPDATE repos SET repo_fallback='false'
`

View file

@ -0,0 +1,9 @@
-- name: create-table-build-config
CREATE TABLE IF NOT EXISTS build_config (
config_id INTEGER NOT NULL
,build_id INTEGER NOT NULL
,PRIMARY KEY (config_id, build_id)
,FOREIGN KEY (config_id) REFERENCES config (config_id)
,FOREIGN KEY (build_id) REFERENCES builds (build_id)
);

View file

@ -0,0 +1,7 @@
-- name: alter-table-add-config-name
ALTER TABLE config ADD COLUMN config_name TEXT
-- name: update-table-set-config-name
UPDATE config SET config_name = 'drone'

View file

@ -0,0 +1,4 @@
-- name: populate-build-config
INSERT INTO build_config (config_id, build_id)
SELECT build_config_id, build_id FROM builds

View file

@ -0,0 +1,6 @@
-- name: alter-table-add-task-dependencies
ALTER TABLE tasks ADD COLUMN task_dependencies BYTEA
-- name: alter-table-add-task-run-on
ALTER TABLE tasks ADD COLUMN task_run_on BYTEA

View file

@ -0,0 +1,5 @@
-- name: alter-table-add-repo-fallback
ALTER TABLE repos ADD COLUMN repo_fallback BOOLEAN
-- name: update-table-set-repo-fallback
UPDATE repos SET repo_fallback='false'

View file

@ -1,17 +1,3 @@
// Copyright 2018 Drone.IO Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sqlite
import (
@ -174,6 +160,38 @@ var migrations = []struct {
name: "alter-table-update-file-meta",
stmt: alterTableUpdateFileMeta,
},
{
name: "create-table-build-config",
stmt: createTableBuildConfig,
},
{
name: "alter-table-add-config-name",
stmt: alterTableAddConfigName,
},
{
name: "update-table-set-config-name",
stmt: updateTableSetConfigName,
},
{
name: "populate-build-config",
stmt: populateBuildConfig,
},
{
name: "alter-table-add-task-dependencies",
stmt: alterTableAddTaskDependencies,
},
{
name: "alter-table-add-task-run-on",
stmt: alterTableAddTaskRunOn,
},
{
name: "alter-table-add-repo-fallback",
stmt: alterTableAddRepoFallback,
},
{
name: "update-table-set-repo-fallback",
stmt: updateTableSetRepoFallback,
},
}
// Migrate performs the database migration. If the migration fails
@ -637,3 +655,62 @@ UPDATE files SET
,file_meta_failed=0
,file_meta_skipped=0
`
//
// 019_create_table_build_config.sql
//
var createTableBuildConfig = `
CREATE TABLE IF NOT EXISTS build_config (
config_id INTEGER NOT NULL
,build_id INTEGER NOT NULL
,PRIMARY KEY (config_id, build_id)
,FOREIGN KEY (config_id) REFERENCES config (config_id)
,FOREIGN KEY (build_id) REFERENCES builds (build_id)
);
`
//
// 020_add_column_config_name.sql
//
var alterTableAddConfigName = `
ALTER TABLE config ADD COLUMN config_name TEXT
`
var updateTableSetConfigName = `
UPDATE config SET config_name = "drone"
`
//
// 021_populate_build_config.sql
//
var populateBuildConfig = `
INSERT INTO build_config (config_id, build_id)
SELECT build_config_id, build_id FROM builds
`
//
// 022_add_task_columns.sql
//
var alterTableAddTaskDependencies = `
ALTER TABLE tasks ADD COLUMN task_dependencies BLOB
`
var alterTableAddTaskRunOn = `
ALTER TABLE tasks ADD COLUMN task_run_on BLOB
`
//
// 023_add_repo_fallback_column.sql
//
var alterTableAddRepoFallback = `
ALTER TABLE repos ADD COLUMN repo_fallback BOOLEAN
`
var updateTableSetRepoFallback = `
UPDATE repos SET repo_fallback='false'
`

View file

@ -0,0 +1,9 @@
-- name: create-table-build-config
CREATE TABLE IF NOT EXISTS build_config (
config_id INTEGER NOT NULL
,build_id INTEGER NOT NULL
,PRIMARY KEY (config_id, build_id)
,FOREIGN KEY (config_id) REFERENCES config (config_id)
,FOREIGN KEY (build_id) REFERENCES builds (build_id)
);

View file

@ -0,0 +1,7 @@
-- name: alter-table-add-config-name
ALTER TABLE config ADD COLUMN config_name TEXT
-- name: update-table-set-config-name
UPDATE config SET config_name = "drone"

Some files were not shown because too many files have changed in this diff Show more