initial payload

envfile 0.1.0
Ximo Guanter 3 years ago
commit aed887b584

@ -0,0 +1,2 @@
target
.ci

@ -0,0 +1,56 @@
name: CI
on:
pull_request:
branches:
- master
push:
branches:
- master
jobs:
test:
name: Unit tests
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v2
- name: Install Rust stable toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- name: Run cargo test
uses: actions-rs/cargo@v1
with:
command: test
lints:
name: Lints
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v2
- name: Install stable toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
components: rustfmt, clippy
- name: Run cargo fmt
uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
- name: Run cargo clippy
uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --all-features

1
.gitignore vendored

@ -0,0 +1 @@
target

1621
Cargo.lock generated

File diff suppressed because it is too large Load Diff

@ -0,0 +1,6 @@
[workspace]
members = [
"buildkit-proto",
"buildkit-llb",
"dockerfile-plus",
]

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -0,0 +1,26 @@
Copyright (c) 2019 Denys Zariaiev
Copyright (c) 2020 Ximo Guanter
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

@ -0,0 +1,82 @@
# Dockerfile+
This project provides Dockerfile syntax extensions that have been rejected by the moby project or haven't been addressed in a long time.
Currently, the project adds an `INCLUDE+` Dockerfile directive that allows you to import the content of another file into your Dockerfile. There are plans to add more features in the near future.
- [Getting started](#getting-started)
- [Features](#features)
- [INCLUDE+](#include)
- [Roadmap](#roadmap)
- [Feedback](#feedback)
## Getting started
First, you need to make sure you are running a compatible version of Docker:
- if you are using Docker 20.10+, you're all set!
- if you are using Docker 18.09+, then you need to export the following environment variable: `DOCKER_BUILDKIT=1`
- if you are using an older version of Docker, you are out of luck. Sorry!
Once your Docker is set, you just need to add the following line as your first line in your Dockerfile:
```Dockerfile
# syntax = edrevo/dockerfile-plus
```
That's it!
## Features
### INCLUDE+
Right now there is just one extra instruction: `INCLUDE+`. All Dockerfile+ commands will end up with a `+` sign to avoid any potential future collisions with Dockerfile commands.
`INCLUDE+` will import the verbatim contents of another file into your Dockerfile. Here's an example Dockerfile which uses the `INCLUDE+` instruction:
```Dockerfile
# syntax = edrevo/dockerfile-plus
FROM alpine
INCLUDE+ Dockerfile.common
ENTRYPOINT [ "mybin" ]
```
If Dockerfile.common contained a single line that said `RUN echo "Hello World"`, then the resulting Docker image would be identical to the one generated by this Dockerfile:
```Dockerfile
FROM alpine
RUN echo "Hello World"
ENTRYPOINT [ "mybin" ]
```
## Roadmap
The next features in line would be:
- `ENVFILE+` command, which would read a .env file and import all of those environment variable definitions into the Dockerfile
- `RUN+ --no-cache`, which would disable the cache only for a specific RUN step (useful for non-idempotent commands, for example those that clone git repos)
- `TAG` command
- improvements to .dockerignore, like recursive dockerignore files
## Feedback
Found a bug? Want to contribute a PR? Want to improve documentation or add a cool logo for the project? All contributions are welcome!
### Development environment
Install cargo (you can use [rustup.rs](https://rustup.rs/)) and run:
```bash
$ cargo build
```
### Creating a local release of the Buildkit frontend
```bash
$ docker build -f dockerfile-plus/Dockerfile .
```

@ -0,0 +1,30 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [0.2.0] - 2020-03-04
### Changed
- Update `buildkit-proto` dependency to use `tonic` for gRPC.
## [0.1.3] - 2020-01-24
### Added
- `Mount::OptionalSshAgent` to mount the host SSH agent socket with `docker build --ssh=default`.
## [0.1.2] - 2019-11-20
### Added
- `ImageSource::with_tag` method.
### Changed
- `Source::image` behavior to conform Docker.
## [0.1.1] - 2019-10-22
### Added
- `GitSource::with_reference` method.
- HTTP source.
## [0.1.0] - 2019-09-24
Initial release.

@ -0,0 +1,27 @@
[package]
name = "buildkit-llb"
version = "0.2.0"
authors = ["Denys Zariaiev <denys.zariaiev@gmail.com>"]
edition = "2018"
description = "Idiomatic high-level API to create BuildKit LLB graphs"
documentation = "https://docs.rs/buildkit-llb"
repository = "https://github.com/denzp/rust-buildkit"
readme = "README.md"
keywords = ["buildkit", "docker", "llb"]
categories = ["development-tools::build-utils", "api-bindings"]
license = "MIT/Apache-2.0"
[dependencies]
either = "1.6"
failure = "0.1"
lazy_static = "1"
log = "0.4"
prost = "0.6"
regex = "1"
serde_json = "1.0"
sha2 = "0.8"
[dependencies.buildkit-proto]
version = "0.2"
path = "../buildkit-proto"

@ -0,0 +1,36 @@
`buildkit-llb` - high-level API to create BuildKit LLB graphs
=======
[![Actions Status]][Actions Link]
[![buildkit-llb Crates Badge]][buildkit-llb Crates Link]
[![buildkit-llb Docs Badge]][buildkit-llb Docs Link]
# Usage
Please check [docs][buildkit-llb Docs Link] or examples on how to use the crate.
The LLB graph from stdout can easily be used with `buildctl`:
```
cargo run --example=scratch | buildctl build
```
# License
`buildkit-llb` is primarily distributed under the terms of both the MIT license and
the Apache License (Version 2.0), with portions covered by various BSD-like
licenses.
See LICENSE-APACHE, and LICENSE-MIT for details.
# Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in `buildkit-llb` by you, as defined in the Apache-2.0 license,
shall be dual licensed as above, without any additional terms or conditions.
[Actions Link]: https://github.com/denzp/rust-buildkit/actions
[Actions Status]: https://github.com/denzp/rust-buildkit/workflows/CI/badge.svg
[buildkit-llb Docs Badge]: https://docs.rs/buildkit-llb/badge.svg
[buildkit-llb Docs Link]: https://docs.rs/buildkit-llb/
[buildkit-llb Crates Badge]: https://img.shields.io/crates/v/buildkit-llb.svg
[buildkit-llb Crates Link]: https://crates.io/crates/buildkit-llb

@ -0,0 +1,93 @@
use std::io::stdout;
use buildkit_llb::ops::source::ImageSource;
use buildkit_llb::prelude::*;
fn main() {
let image = Source::image("library/alpine:latest");
let commands = build_init_commands(&image);
let commands = build_modify_commands(&image, commands);
let base_fs = FileSystem::sequence()
.custom_name("assemble outputs")
.append(FileSystem::mkdir(
OutputIdx(0),
LayerPath::Scratch("/files"),
));
let (final_fs, final_output) =
commands
.into_iter()
.zip(0..)
.fold((base_fs, 0), |(fs, last_output), (output, idx)| {
let layer = fs.append(
FileSystem::copy()
.from(LayerPath::Other(output, format!("/file-{}.out", idx)))
.to(
OutputIdx(idx + 1),
LayerPath::Own(
OwnOutputIdx(last_output),
format!("/files/file-{}.out", idx),
),
),
);
(layer, idx + 1)
});
Terminal::with(final_fs.output(final_output))
.write_definition(stdout())
.unwrap()
}
fn build_init_commands(image: &ImageSource) -> Vec<OperationOutput> {
(0..100)
.map(|idx| {
let base_dir = format!("/file/{}", idx);
let shell = format!("echo 'test {}' > /out{}/file.out", idx, base_dir);
let output_mount = FileSystem::mkdir(OutputIdx(0), LayerPath::Scratch(&base_dir))
.make_parents(true)
.into_operation()
.ignore_cache(true)
.ref_counted();
Command::run("/bin/sh")
.args(&["-c", &shell])
.mount(Mount::ReadOnlyLayer(image.output(), "/"))
.mount(Mount::Layer(OutputIdx(0), output_mount.output(0), "/out"))
.ignore_cache(true)
.ref_counted()
.output(0)
})
.collect()
}
fn build_modify_commands<'a>(
image: &'a ImageSource,
layers: Vec<OperationOutput<'a>>,
) -> Vec<OperationOutput<'a>> {
layers
.into_iter()
.zip(0..)
.map(|(output, idx)| {
let shell = format!(
"sed s/test/modified/ < /in/file/{}/file.in > /out/file-{}.out",
idx, idx
);
Command::run("/bin/sh")
.args(&["-c", &shell])
.mount(Mount::ReadOnlyLayer(image.output(), "/"))
.mount(Mount::Scratch(OutputIdx(0), "/out"))
.mount(Mount::ReadOnlySelector(
output,
format!("/in/file/{}/file.in", idx),
format!("file/{}/file.out", idx),
))
.ignore_cache(true)
.ref_counted()
.output(0)
})
.collect()
}

@ -0,0 +1,47 @@
use std::io::stdout;
use buildkit_llb::prelude::*;
fn main() {
let bitflags_archive = Source::http("https://crates.io/api/v1/crates/bitflags/1.0.4/download")
.with_file_name("bitflags.tar");
let alpine = Source::image("library/alpine:latest");
let bitflags_unpacked = {
Command::run("/bin/tar")
.args(&[
"-xvzC",
"/out",
"--strip-components=1",
"-f",
"/in/bitflags.tar",
])
.mount(Mount::ReadOnlyLayer(alpine.output(), "/"))
.mount(Mount::ReadOnlyLayer(bitflags_archive.output(), "/in"))
.mount(Mount::Scratch(OutputIdx(0), "/out"))
};
let env_logger_repo = Source::git("https://github.com/sebasmagri/env_logger.git")
.with_reference("ebf4829f3c04ce9b6d3e5d59fa8770bb71bffca3");
let fs = {
FileSystem::sequence()
.append(
FileSystem::copy()
.from(LayerPath::Other(bitflags_unpacked.output(0), "/Cargo.toml"))
.to(OutputIdx(0), LayerPath::Scratch("/bitflags.toml")),
)
.append(
FileSystem::copy()
.from(LayerPath::Other(env_logger_repo.output(), "/Cargo.toml"))
.to(
OutputIdx(1),
LayerPath::Own(OwnOutputIdx(0), "/env_logger.toml"),
),
)
};
Terminal::with(fs.output(1))
.write_definition(stdout())
.unwrap()
}

@ -0,0 +1,41 @@
use std::io::stdout;
use buildkit_llb::prelude::*;
fn main() {
Terminal::with(build_graph())
.write_definition(stdout())
.unwrap()
}
fn build_graph() -> OperationOutput<'static> {
let builder_image = Source::image("library/alpine:latest")
.custom_name("Using alpine:latest as a builder")
.ref_counted();
let command = {
Command::run("/bin/sh")
.args(&["-c", "echo 'test string 5' > /out/file0"])
.custom_name("create a dummy file")
.mount(Mount::ReadOnlyLayer(builder_image.output(), "/"))
.mount(Mount::Scratch(OutputIdx(0), "/out"))
.ref_counted()
};
let fs = {
FileSystem::sequence()
.custom_name("do multiple file system manipulations")
.append(
FileSystem::copy()
.from(LayerPath::Other(command.output(0), "/file0"))
.to(OutputIdx(0), LayerPath::Other(command.output(0), "/file1")),
)
.append(
FileSystem::copy()
.from(LayerPath::Own(OwnOutputIdx(0), "/file0"))
.to(OutputIdx(1), LayerPath::Own(OwnOutputIdx(0), "/file2")),
)
};
fs.ref_counted().output(1)
}

@ -0,0 +1,35 @@
use std::io::stdout;
use buildkit_llb::prelude::*;
fn main() {
let builder_image =
Source::image("library/alpine:latest").custom_name("Using alpine:latest as a builder");
let command = {
Command::run("/bin/sh")
.args(&["-c", "echo 'test string 5' > /out/file0"])
.custom_name("create a dummy file")
.mount(Mount::ReadOnlyLayer(builder_image.output(), "/"))
.mount(Mount::Scratch(OutputIdx(0), "/out"))
};
let fs = {
FileSystem::sequence()
.custom_name("do multiple file system manipulations")
.append(
FileSystem::copy()
.from(LayerPath::Other(command.output(0), "/file0"))
.to(OutputIdx(0), LayerPath::Other(command.output(0), "/file1")),
)
.append(
FileSystem::copy()
.from(LayerPath::Own(OwnOutputIdx(0), "/file0"))
.to(OutputIdx(1), LayerPath::Own(OwnOutputIdx(0), "/file2")),
)
};
Terminal::with(fs.output(1))
.write_definition(stdout())
.unwrap()
}

@ -0,0 +1,23 @@
#![deny(warnings)]
#![deny(clippy::all)]
// FIXME: get rid of the unwraps
// TODO: implement warnings for op hash collisions (will incredibly help to debug problems).
// TODO: implement efficient `std::fmt::Debug` for the ops (naive implementation can't handle huge nested graphs).
mod serialization;
/// Supported operations - building blocks of the LLB definition graph.
pub mod ops;
/// Various helpers and types.
pub mod utils;
/// Convenient re-export of a commonly used things.
pub mod prelude {
pub use crate::ops::exec::Mount;
pub use crate::ops::fs::LayerPath;
pub use crate::ops::source::ResolveMode;
pub use crate::ops::*;
pub use crate::utils::{OperationOutput, OutputIdx, OwnOutputIdx};
}

@ -0,0 +1,319 @@
use std::collections::HashMap;
use std::iter::{empty, once};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use buildkit_proto::pb::{
self, op::Op, ExecOp, Input, MountType, NetMode, OpMetadata, SecurityMode,
};
use either::Either;
use super::context::Context;
use super::mount::Mount;
use crate::ops::{MultiBorrowedOutput, MultiOwnedOutput, OperationBuilder};
use crate::serialization::{Context as SerializationCtx, Node, Operation, OperationId, Result};
use crate::utils::{OperationOutput, OutputIdx};
/// Command execution operation. This is what a Dockerfile's `RUN` directive is translated to.
#[derive(Debug, Clone)]
pub struct Command<'a> {
id: OperationId,
context: Context,
root_mount: Option<Mount<'a, PathBuf>>,
other_mounts: Vec<Mount<'a, PathBuf>>,
description: HashMap<String, String>,
caps: HashMap<String, bool>,
ignore_cache: bool,
}
impl<'a> Command<'a> {
pub fn run<S>(name: S) -> Self
where
S: Into<String>,
{
Self {
id: OperationId::default(),
context: Context::new(name),
root_mount: None,
other_mounts: vec![],
description: Default::default(),
caps: Default::default(),
ignore_cache: false,
}
}
pub fn args<A, S>(mut self, args: A) -> Self
where
A: IntoIterator<Item = S>,
S: AsRef<str>,
{
self.context.args = args.into_iter().map(|item| item.as_ref().into()).collect();
self
}
pub fn env<S, Q>(mut self, name: S, value: Q) -> Self
where
S: AsRef<str>,
Q: AsRef<str>,
{
let env = format!("{}={}", name.as_ref(), value.as_ref());
self.context.env.push(env);
self
}
pub fn env_iter<I, S, Q>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = (S, Q)>,
S: AsRef<str>,
Q: AsRef<str>,
{
for (name, value) in iter.into_iter() {
let env = format!("{}={}", name.as_ref(), value.as_ref());
self.context.env.push(env);
}
self
}
pub fn cwd<P>(mut self, path: P) -> Self
where
P: Into<PathBuf>,
{
self.context.cwd = path.into();
self
}
pub fn user<S>(mut self, user: S) -> Self
where
S: Into<String>,
{
self.context.user = user.into();
self
}
pub fn mount<P>(mut self, mount: Mount<'a, P>) -> Self
where
P: AsRef<Path>,
{
match mount {
Mount::Layer(..) | Mount::ReadOnlyLayer(..) | Mount::Scratch(..) => {
self.caps.insert("exec.mount.bind".into(), true);
}
Mount::ReadOnlySelector(..) => {
self.caps.insert("exec.mount.bind".into(), true);
self.caps.insert("exec.mount.selector".into(), true);
}
Mount::SharedCache(..) => {
self.caps.insert("exec.mount.cache".into(), true);
self.caps.insert("exec.mount.cache.sharing".into(), true);
}
Mount::OptionalSshAgent(..) => {
self.caps.insert("exec.mount.ssh".into(), true);
}
}
if mount.is_root() {
self.root_mount = Some(mount.into_owned());
} else {
self.other_mounts.push(mount.into_owned());
}
self
}
}
impl<'a, 'b: 'a> MultiBorrowedOutput<'b> for Command<'b> {
fn output(&'b self, index: u32) -> OperationOutput<'b> {
// TODO: check if the requested index available.
OperationOutput::borrowed(self, OutputIdx(index))
}
}
impl<'a> MultiOwnedOutput<'a> for Arc<Command<'a>> {
fn output(&self, index: u32) -> OperationOutput<'a> {
// TODO: check if the requested index available.
OperationOutput::owned(self.clone(), OutputIdx(index))
}
}
impl<'a> OperationBuilder<'a> for Command<'a> {
fn custom_name<S>(mut self, name: S) -> Self
where
S: Into<String>,
{
self.description
.insert("llb.customname".into(), name.into());
self
}
fn ignore_cache(mut self, ignore: bool) -> Self {
self.ignore_cache = ignore;
self
}
}
impl<'a> Operation for Command<'a> {
fn id(&self) -> &OperationId {
&self.id
}
fn serialize(&self, cx: &mut SerializationCtx) -> Result<Node> {
let (inputs, mounts): (Vec<_>, Vec<_>) = {
let mut last_input_index = 0;
self.root_mount
.as_ref()
.into_iter()
.chain(self.other_mounts.iter())
.map(|mount| {
let inner_mount = match mount {
Mount::ReadOnlyLayer(_, destination) => pb::Mount {
input: last_input_index,
dest: destination.to_string_lossy().into(),
output: -1,
readonly: true,
mount_type: MountType::Bind as i32,
..Default::default()
},
Mount::ReadOnlySelector(_, destination, source) => pb::Mount {
input: last_input_index,
dest: destination.to_string_lossy().into(),
output: -1,
readonly: true,
selector: source.to_string_lossy().into(),
mount_type: MountType::Bind as i32,
..Default::default()
},
Mount::Layer(output, _, path) => pb::Mount {
input: last_input_index,
dest: path.to_string_lossy().into(),
output: output.into(),
mount_type: MountType::Bind as i32,
..Default::default()
},
Mount::Scratch(output, path) => {
let mount = pb::Mount {
input: -1,
dest: path.to_string_lossy().into(),
output: output.into(),
mount_type: MountType::Bind as i32,
..Default::default()
};
return (Either::Right(empty()), mount);
}
Mount::SharedCache(path) => {
use buildkit_proto::pb::{CacheOpt, CacheSharingOpt};
let mount = pb::Mount {
input: -1,
dest: path.to_string_lossy().into(),
output: -1,
mount_type: MountType::Cache as i32,
cache_opt: Some(CacheOpt {
id: path.display().to_string(),
sharing: CacheSharingOpt::Shared as i32,
}),
..Default::default()
};
return (Either::Right(empty()), mount);
}
Mount::OptionalSshAgent(path) => {
use buildkit_proto::pb::SshOpt;
let mount = pb::Mount {
input: -1,
dest: path.to_string_lossy().into(),
output: -1,
mount_type: MountType::Ssh as i32,
ssh_opt: Some(SshOpt {
mode: 0o600,
optional: true,
..Default::default()
}),
..Default::default()
};
return (Either::Right(empty()), mount);
}
};
let input = match mount {
Mount::ReadOnlyLayer(input, ..) => input,
Mount::ReadOnlySelector(input, ..) => input,
Mount::Layer(_, input, ..) => input,
Mount::SharedCache(..) => {
unreachable!();
}
Mount::Scratch(..) => {
unreachable!();
}
Mount::OptionalSshAgent(..) => {
unreachable!();
}
};
let serialized = cx.register(input.operation()).unwrap();
let input = Input {
digest: serialized.digest.clone(),
index: input.output().into(),
};
last_input_index += 1;
(Either::Left(once(input)), inner_mount)
})
.unzip()
};
let head = pb::Op {
op: Some(Op::Exec(ExecOp {
mounts,
network: NetMode::Unset.into(),
security: SecurityMode::Sandbox.into(),
meta: Some(self.context.clone().into()),
})),
inputs: inputs.into_iter().flatten().collect(),
..Default::default()
};
let metadata = OpMetadata {
description: self.description.clone(),
caps: self.caps.clone(),
ignore_cache: self.ignore_cache,
..Default::default()
};
Ok(Node::new(head, metadata))
}
}

@ -0,0 +1,49 @@
use std::iter::once;
use std::path::PathBuf;
use buildkit_proto::pb::Meta;
#[derive(Debug, Clone)]
pub(crate) struct Context {
pub name: String,
pub args: Vec<String>,
pub env: Vec<String>,
pub cwd: PathBuf,
pub user: String,
}
impl Context {
pub fn new<S>(name: S) -> Self
where
S: Into<String>,
{
Self {
name: name.into(),
cwd: PathBuf::from("/"),
user: "root".into(),
args: vec![],
env: vec![],
}
}
}
impl Into<Meta> for Context {
fn into(self) -> Meta {
Meta {
args: {
once(self.name.clone())
.chain(self.args.iter().cloned())
.collect()
},
env: self.env,
cwd: self.cwd.to_string_lossy().into(),
user: self.user,
..Default::default()
}
}
}

@ -0,0 +1,440 @@
mod command;
mod context;
mod mount;
pub use command::Command;
pub use mount::Mount;
#[test]
fn serialization() {
use crate::prelude::*;
use buildkit_proto::pb::{op::Op, ExecOp, Meta, NetMode, SecurityMode};
crate::check_op!(
{
Command::run("/bin/sh")
.args(&["-c", "echo 'test string' > /out/file0"])
.env("HOME", "/root")
.custom_name("exec custom name")
},
|digest| { "sha256:dc9a5a3cd84bb1c7b633f1750fdfccd9d0a69d060f8e3babb297bc190e2d7484" },
|description| { vec![("llb.customname", "exec custom name")] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Exec(ExecOp {
mounts: vec![],
network: NetMode::Unset.into(),
security: SecurityMode::Sandbox.into(),
meta: Some(Meta {
args: crate::utils::test::to_vec(vec![
"/bin/sh",
"-c",
"echo 'test string' > /out/file0",
]),
env: crate::utils::test::to_vec(vec!["HOME=/root"]),
cwd: "/".into(),
user: "root".into(),
extra_hosts: vec![],
proxy_env: None,
}),
})
},
);
}
#[test]
fn serialization_with_env_iter() {
use crate::prelude::*;
use buildkit_proto::pb::{op::Op, ExecOp, Meta, NetMode, SecurityMode};
crate::check_op!(
{
Command::run("cargo").args(&["build"]).env_iter(vec![
("HOME", "/root"),
("PATH", "/bin"),
("CARGO_HOME", "/root/.cargo"),
])
},
|digest| { "sha256:7675be0b02acb379d57bafee5dc749fca7e795fb1e0a92748ccc59a7bc3b491e" },
|description| { vec![] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Exec(ExecOp {
mounts: vec![],
network: NetMode::Unset.into(),
security: SecurityMode::Sandbox.into(),
meta: Some(Meta {
args: crate::utils::test::to_vec(vec!["cargo", "build"]),
env: crate::utils::test::to_vec(vec![
"HOME=/root",
"PATH=/bin",
"CARGO_HOME=/root/.cargo",
]),
cwd: "/".into(),
user: "root".into(),
extra_hosts: vec![],
proxy_env: None,
}),
})
},
);
}
#[test]
fn serialization_with_cwd() {
use crate::prelude::*;
use buildkit_proto::pb::{op::Op, ExecOp, Meta, NetMode, SecurityMode};
crate::check_op!(
Command::run("cargo").args(&["build"]).cwd("/rust-src"),
|digest| { "sha256:b8120a0e1d1f7fcaa3d6c95db292d064524dc92c6cae8b97672d4e1eafcd03fa" },
|description| { vec![] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Exec(ExecOp {
mounts: vec![],
network: NetMode::Unset.into(),
security: SecurityMode::Sandbox.into(),
meta: Some(Meta {
args: crate::utils::test::to_vec(vec!["cargo", "build"]),
env: vec![],
cwd: "/rust-src".into(),
user: "root".into(),
extra_hosts: vec![],
proxy_env: None,
}),
})
},
);
}
#[test]
fn serialization_with_user() {
use crate::prelude::*;
use buildkit_proto::pb::{op::Op, ExecOp, Meta, NetMode, SecurityMode};
crate::check_op!(
Command::run("cargo").args(&["build"]).user("builder"),
|digest| { "sha256:7631ea645e2126e9dbc5d9ae789e34301d9d5c80ce89bfa72bc9b82aa43b57c0" },
|description| { vec![] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Exec(ExecOp {
mounts: vec![],
network: NetMode::Unset.into(),
security: SecurityMode::Sandbox.into(),
meta: Some(Meta {
args: crate::utils::test::to_vec(vec!["cargo", "build"]),
env: vec![],
cwd: "/".into(),
user: "builder".into(),
extra_hosts: vec![],
proxy_env: None,
}),
})
},
);
}
#[test]
fn serialization_with_mounts() {
use crate::prelude::*;
use buildkit_proto::pb::{
op::Op, CacheOpt, CacheSharingOpt, ExecOp, Meta, MountType, NetMode, SecurityMode,
};
let context = Source::local("context");
let builder_image = Source::image("rustlang/rust:nightly");
let final_image = Source::image("library/alpine:latest");
let command = Command::run("cargo")
.args(&["build"])
.mount(Mount::ReadOnlyLayer(builder_image.output(), "/"))
.mount(Mount::Scratch(OutputIdx(1), "/tmp"))
.mount(Mount::ReadOnlySelector(
context.output(),
"/buildkit-frontend",
"/frontend-sources",
))
.mount(Mount::Layer(OutputIdx(0), final_image.output(), "/output"))
.mount(Mount::SharedCache("/root/.cargo"));
crate::check_op!(
command,
|digest| { "sha256:54a66b514361b13b17f8b5aaaa2392a4c07b55ac53303e4f50584f3dfef6add0" },
|description| { vec![] },
|caps| {
vec![
"exec.mount.bind",
"exec.mount.cache",
"exec.mount.cache.sharing",
"exec.mount.selector",
]
},
|cached_tail| {
vec![
"sha256:a60212791641cbeaa3a49de4f7dff9e40ae50ec19d1be9607232037c1db16702",
"sha256:dee2a3d7dd482dd8098ba543ff1dcb01efd29fcd16fdb0979ef556f38564543a",
"sha256:0e6b31ceed3e6dc542018f35a53a0e857e6a188453d32a2a5bbe7aa2971c1220",
]
},
|inputs| {
vec![
(
"sha256:dee2a3d7dd482dd8098ba543ff1dcb01efd29fcd16fdb0979ef556f38564543a",
0,
),
(
"sha256:a60212791641cbeaa3a49de4f7dff9e40ae50ec19d1be9607232037c1db16702",
0,
),
(
"sha256:0e6b31ceed3e6dc542018f35a53a0e857e6a188453d32a2a5bbe7aa2971c1220",
0,
),
]
},
|op| {
Op::Exec(ExecOp {
mounts: vec![
pb::Mount {
input: 0,
selector: "".into(),
dest: "/".into(),
output: -1,
readonly: true,
mount_type: MountType::Bind.into(),
cache_opt: None,
secret_opt: None,
ssh_opt: None,
},
pb::Mount {
input: -1,
selector: "".into(),
dest: "/tmp".into(),
output: 1,
readonly: false,
mount_type: MountType::Bind.into(),
cache_opt: None,
secret_opt: None,
ssh_opt: None,
},
pb::Mount {
input: 1,
selector: "/frontend-sources".into(),
dest: "/buildkit-frontend".into(),
output: -1,
readonly: true,
mount_type: MountType::Bind.into(),
cache_opt: None,
secret_opt: None,
ssh_opt: None,
},
pb::Mount {
input: 2,
selector: "".into(),
dest: "/output".into(),
output: 0,
readonly: false,
mount_type: MountType::Bind.into(),
cache_opt: None,
secret_opt: None,
ssh_opt: None,
},
pb::Mount {
input: -1,
selector: "".into(),
dest: "/root/.cargo".into(),
output: -1,
readonly: false,
mount_type: MountType::Cache.into(),
cache_opt: Some(CacheOpt {
id: "/root/.cargo".into(),
sharing: CacheSharingOpt::Shared.into(),
}),
secret_opt: None,
ssh_opt: None,
},
],
network: NetMode::Unset.into(),
security: SecurityMode::Sandbox.into(),
meta: Some(Meta {
args: crate::utils::test::to_vec(vec!["cargo", "build"]),
env: vec![],
cwd: "/".into(),
user: "root".into(),
extra_hosts: vec![],
proxy_env: None,
}),
})
},
);
}
#[test]
fn serialization_with_several_root_mounts() {
use crate::prelude::*;
use buildkit_proto::pb::{op::Op, ExecOp, Meta, MountType, NetMode, SecurityMode};
let builder_image = Source::image("rustlang/rust:nightly");
let final_image = Source::image("library/alpine:latest");
let command = Command::run("cargo")
.args(&["build"])
.mount(Mount::Scratch(OutputIdx(0), "/tmp"))
.mount(Mount::ReadOnlyLayer(builder_image.output(), "/"))
.mount(Mount::Scratch(OutputIdx(1), "/var"))
.mount(Mount::ReadOnlyLayer(final_image.output(), "/"));
crate::check_op!(
command,
|digest| { "sha256:baa1bf591d2c47058b7361a0284fa8a3f1bd0fac8a93c87affa77ddc0a5026fd" },
|description| { vec![] },
|caps| { vec!["exec.mount.bind"] },
|cached_tail| {
vec!["sha256:0e6b31ceed3e6dc542018f35a53a0e857e6a188453d32a2a5bbe7aa2971c1220"]
},
|inputs| {
vec![(
"sha256:0e6b31ceed3e6dc542018f35a53a0e857e6a188453d32a2a5bbe7aa2971c1220",
0,
)]
},
|op| {
Op::Exec(ExecOp {
mounts: vec![
pb::Mount {
input: 0,
selector: "".into(),
dest: "/".into(),
output: -1,
readonly: true,
mount_type: MountType::Bind.into(),
cache_opt: None,
secret_opt: None,
ssh_opt: None,
},
pb::Mount {
input: -1,
selector: "".into(),
dest: "/tmp".into(),
output: 0,
readonly: false,
mount_type: MountType::Bind.into(),
cache_opt: None,
secret_opt: None,
ssh_opt: None,
},
pb::Mount {
input: -1,
selector: "".into(),
dest: "/var".into(),
output: 1,
readonly: false,
mount_type: MountType::Bind.into(),
cache_opt: None,
secret_opt: None,
ssh_opt: None,
},
],
network: NetMode::Unset.into(),
security: SecurityMode::Sandbox.into(),
meta: Some(Meta {
args: crate::utils::test::to_vec(vec!["cargo", "build"]),
env: vec![],
cwd: "/".into(),
user: "root".into(),
extra_hosts: vec![],
proxy_env: None,
}),
})
},
);
}
#[test]
fn serialization_with_ssh_mounts() {
use crate::prelude::*;
use buildkit_proto::pb::{op::Op, ExecOp, Meta, MountType, NetMode, SecurityMode, SshOpt};
let builder_image = Source::image("rustlang/rust:nightly");
let command = Command::run("cargo")
.args(&["build"])
.mount(Mount::ReadOnlyLayer(builder_image.output(), "/"))
.mount(Mount::OptionalSshAgent("/run/buildkit/ssh_agent.0"));
crate::check_op!(
command,
|digest| { "sha256:1ac1438c67a153878f21fe8067383fd7544901261374eb53ba8bf26e9a5821a5" },
|description| { vec![] },
|caps| { vec!["exec.mount.bind", "exec.mount.ssh"] },
|cached_tail| {
vec!["sha256:dee2a3d7dd482dd8098ba543ff1dcb01efd29fcd16fdb0979ef556f38564543a"]
},
|inputs| {
vec![(
"sha256:dee2a3d7dd482dd8098ba543ff1dcb01efd29fcd16fdb0979ef556f38564543a",
0,
)]
},
|op| {
Op::Exec(ExecOp {
mounts: vec![
pb::Mount {
input: 0,
selector: "".into(),
dest: "/".into(),
output: -1,
readonly: true,
mount_type: MountType::Bind.into(),
cache_opt: None,
secret_opt: None,
ssh_opt: None,
},
pb::Mount {
input: -1,
selector: "".into(),
dest: "/run/buildkit/ssh_agent.0".into(),
output: -1,
readonly: false,
mount_type: MountType::Ssh.into(),
cache_opt: None,
secret_opt: None,
ssh_opt: Some(SshOpt {
mode: 0o600,
optional: true,
..Default::default()
}),
},
],
network: NetMode::Unset.into(),
security: SecurityMode::Sandbox.into(),
meta: Some(Meta {
args: crate::utils::test::to_vec(vec!["cargo", "build"]),
env: vec![],
cwd: "/".into(),
user: "root".into(),
extra_hosts: vec![],
proxy_env: None,
}),
})
},
);
}

@ -0,0 +1,59 @@
use std::path::{Path, PathBuf};
use crate::utils::{OperationOutput, OutputIdx};
/// Operand of *command execution operation* that specifies how are input sources mounted.
#[derive(Debug, Clone)]
pub enum Mount<'a, P: AsRef<Path>> {
/// Read-only output of another operation.
ReadOnlyLayer(OperationOutput<'a>, P),
/// Read-only output of another operation with a selector.
ReadOnlySelector(OperationOutput<'a>, P, P),
/// Empty layer that produces an output.
Scratch(OutputIdx, P),
/// Writable output of another operation.
Layer(OutputIdx, OperationOutput<'a>, P),
/// Writable persistent cache.
SharedCache(P),
/// Optional SSH agent socket at the specified path.
OptionalSshAgent(P),
}
impl<'a, P: AsRef<Path>> Mount<'a, P> {
/// Transform the mount into owned variant (basically, with `PathBuf` as the path).
pub fn into_owned(self) -> Mount<'a, PathBuf> {
use Mount::*;
match self {
ReadOnlySelector(op, path, selector) => {
ReadOnlySelector(op, path.as_ref().into(), selector.as_ref().into())
}
ReadOnlyLayer(op, path) => ReadOnlyLayer(op, path.as_ref().into()),
Scratch(output, path) => Scratch(output, path.as_ref().into()),
Layer(output, input, path) => Layer(output, input, path.as_ref().into()),
SharedCache(path) => SharedCache(path.as_ref().into()),
OptionalSshAgent(path) => OptionalSshAgent(path.as_ref().into()),
}
}
pub fn is_root(&self) -> bool {
use Mount::*;
let path = match self {
ReadOnlySelector(_, path, ..) => path,
ReadOnlyLayer(_, path) => path,
Scratch(_, path) => path,
Layer(_, _, path) => path,
SharedCache(path) => path,
OptionalSshAgent(_) => return false,
};
path.as_ref() == Path::new("/")
}
}

@ -0,0 +1,214 @@
use std::collections::HashMap;
use std::fmt::Debug;
use std::path::{Path, PathBuf};
use buildkit_proto::pb;
use super::path::{LayerPath, UnsetPath};
use super::FileOperation;
use crate::serialization::{Context, Result};
use crate::utils::OutputIdx;
#[derive(Debug)]
pub struct CopyOperation<From: Debug, To: Debug> {
source: From,
destination: To,
follow_symlinks: bool,
recursive: bool,
create_path: bool,
wildcard: bool,
description: HashMap<String, String>,
caps: HashMap<String, bool>,
}
type OpWithoutSource = CopyOperation<UnsetPath, UnsetPath>;
type OpWithSource<'a> = CopyOperation<LayerPath<'a, PathBuf>, UnsetPath>;
type OpWithDestination<'a> =
CopyOperation<LayerPath<'a, PathBuf>, (OutputIdx, LayerPath<'a, PathBuf>)>;
impl OpWithoutSource {
pub(crate) fn new() -> OpWithoutSource {
let mut caps = HashMap::<String, bool>::new();
caps.insert("file.base".into(), true);
CopyOperation {
source: UnsetPath,
destination: UnsetPath,
follow_symlinks: false,
recursive: false,
create_path: false,
wildcard: false,
caps,
description: Default::default(),
}
}
pub fn from<P>(self, source: LayerPath<'_, P>) -> OpWithSource
where
P: AsRef<Path>,
{
CopyOperation {
source: source.into_owned(),
destination: UnsetPath,
follow_symlinks: self.follow_symlinks,
recursive: self.recursive,
create_path: self.create_path,
wildcard: self.wildcard,
description: self.description,
caps: self.caps,
}
}
}
impl<'a> OpWithSource<'a> {
pub fn to<P>(self, output: OutputIdx, destination: LayerPath<'a, P>) -> OpWithDestination<'a>
where
P: AsRef<Path>,
{
CopyOperation {
source: self.source,
destination: (output, destination.into_owned()),
follow_symlinks: self.follow_symlinks,
recursive: self.recursive,
create_path: self.create_path,
wildcard: self.wildcard,
description: self.description,
caps: self.caps,
}
}
}
impl<'a> OpWithDestination<'a> {
pub fn into_operation(self) -> super::sequence::SequenceOperation<'a> {
super::sequence::SequenceOperation::new().append(self)
}
}
impl<From, To> CopyOperation<From, To>
where
From: Debug,
To: Debug,
{
pub fn follow_symlinks(mut self, value: bool) -> Self {
self.follow_symlinks = value;
self
}
pub fn recursive(mut self, value: bool) -> Self {
self.recursive = value;
self
}
pub fn create_path(mut self, value: bool) -> Self {
self.create_path = value;
self
}
pub fn wildcard(mut self, value: bool) -> Self {
self.wildcard = value;
self
}
}
impl<'a> FileOperation for OpWithDestination<'a> {
fn output(&self) -> i32 {
self.destination.0.into()
}
fn serialize_inputs(&self, cx: &mut Context) -> Result<Vec<pb::Input>> {
let mut inputs = if let LayerPath::Other(ref op, ..) = self.source {
let serialized_from_head = cx.register(op.operation())?;
vec![pb::Input {
digest: serialized_from_head.digest.clone(),
index: op.output().into(),
}]
} else {
vec![]
};
if let LayerPath::Other(ref op, ..) = self.destination.1 {
let serialized_to_head = cx.register(op.operation())?;
inputs.push(pb::Input {
digest: serialized_to_head.digest.clone(),
index: op.output().into(),
});
}
Ok(inputs)
}
fn serialize_action(
&self,
inputs_count: usize,
inputs_offset: usize,
) -> Result<pb::FileAction> {
let (src_idx, src_offset, src) = match self.source {
LayerPath::Scratch(ref path) => (-1, 0, path.to_string_lossy().into()),
LayerPath::Other(_, ref path) => {
(inputs_offset as i64, 1, path.to_string_lossy().into())
}
LayerPath::Own(ref output, ref path) => {
let output: i64 = output.into();
(
inputs_count as i64 + output,
0,
path.to_string_lossy().into(),
)
}
};
let (dest_idx, dest) = match self.destination.1 {
LayerPath::Scratch(ref path) => (-1, path.to_string_lossy().into()),
LayerPath::Other(_, ref path) => (
inputs_offset as i32 + src_offset,
path.to_string_lossy().into(),
),
LayerPath::Own(ref output, ref path) => {
let output: i32 = output.into();
(inputs_count as i32 + output, path.to_string_lossy().into())
}
};
Ok(pb::FileAction {
input: i64::from(dest_idx),
secondary_input: src_idx,
output: i64::from(self.output()),
action: Some(pb::file_action::Action::Copy(pb::FileActionCopy {
src,
dest,
follow_symlink: self.follow_symlinks,
dir_copy_contents: self.recursive,
create_dest_path: self.create_path,
allow_wildcard: self.wildcard,
// TODO: make this configurable
mode: -1,
// TODO: make this configurable
timestamp: -1,
..Default::default()
})),
})
}
}

@ -0,0 +1,110 @@
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use buildkit_proto::pb;
use super::path::LayerPath;
use super::FileOperation;
use crate::serialization::{Context, Result};
use crate::utils::OutputIdx;
#[derive(Debug)]
pub struct MakeDirOperation<'a> {
path: LayerPath<'a, PathBuf>,
output: OutputIdx,
make_parents: bool,
description: HashMap<String, String>,
caps: HashMap<String, bool>,
}
impl<'a> MakeDirOperation<'a> {
pub(crate) fn new<P>(output: OutputIdx, path: LayerPath<'a, P>) -> Self
where
P: AsRef<Path>,
{
let mut caps = HashMap::<String, bool>::new();
caps.insert("file.base".into(), true);
MakeDirOperation {
path: path.into_owned(),
output,
make_parents: false,
caps,
description: Default::default(),
}
}
pub fn make_parents(mut self, value: bool) -> Self {
self.make_parents = value;
self
}
pub fn into_operation(self) -> super::sequence::SequenceOperation<'a> {
super::sequence::SequenceOperation::new().append(self)
}
}
impl<'a> FileOperation for MakeDirOperation<'a> {
fn output(&self) -> i32 {
self.output.into()
}
fn serialize_inputs(&self, cx: &mut Context) -> Result<Vec<pb::Input>> {
if let LayerPath::Other(ref op, ..) = self.path {
let serialized_from_head = cx.register(op.operation())?;
let inputs = vec![pb::Input {
digest: serialized_from_head.digest.clone(),
index: op.output().into(),
}];
Ok(inputs)
} else {
Ok(Vec::with_capacity(0))
}
}
fn serialize_action(
&self,
inputs_count: usize,
inputs_offset: usize,
) -> Result<pb::FileAction> {
let (src_idx, path) = match self.path {
LayerPath::Scratch(ref path) => (-1, path.to_string_lossy().into()),
LayerPath::Other(_, ref path) => (inputs_offset as i64, path.to_string_lossy().into()),
LayerPath::Own(ref output, ref path) => {
let output: i64 = output.into();
(inputs_count as i64 + output, path.to_string_lossy().into())
}
};
Ok(pb::FileAction {
input: src_idx,
secondary_input: -1,
output: i64::from(self.output()),
action: Some(pb::file_action::Action::Mkdir(pb::FileActionMkDir {
path,
make_parents: self.make_parents,
// TODO: make this configurable
mode: -1,
// TODO: make this configurable
timestamp: -1,
// TODO: make this configurable
owner: None,
})),
})
}
}

@ -0,0 +1,110 @@
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use buildkit_proto::pb;
use super::path::LayerPath;
use super::FileOperation;
use crate::serialization::{Context, Result};
use crate::utils::OutputIdx;
#[derive(Debug)]
pub struct MakeFileOperation<'a> {
path: LayerPath<'a, PathBuf>,
output: OutputIdx,
data: Option<Vec<u8>>,
description: HashMap<String, String>,
caps: HashMap<String, bool>,
}
impl<'a> MakeFileOperation<'a> {
pub(crate) fn new<P>(output: OutputIdx, path: LayerPath<'a, P>) -> Self
where
P: AsRef<Path>,
{
let mut caps = HashMap::<String, bool>::new();
caps.insert("file.base".into(), true);
MakeFileOperation {
path: path.into_owned(),
output,
data: None,
caps,
description: Default::default(),
}
}
pub fn data(mut self, bytes: Vec<u8>) -> Self {
self.data = Some(bytes);
self
}
pub fn into_operation(self) -> super::sequence::SequenceOperation<'a> {
super::sequence::SequenceOperation::new().append(self)
}
}
impl<'a> FileOperation for MakeFileOperation<'a> {
fn output(&self) -> i32 {
self.output.into()
}
fn serialize_inputs(&self, cx: &mut Context) -> Result<Vec<pb::Input>> {
if let LayerPath::Other(ref op, ..) = self.path {
let serialized_from_head = cx.register(op.operation())?;
let inputs = vec![pb::Input {
digest: serialized_from_head.digest.clone(),
index: op.output().into(),
}];
Ok(inputs)
} else {
Ok(Vec::with_capacity(0))
}
}
fn serialize_action(
&self,
inputs_count: usize,
inputs_offset: usize,
) -> Result<pb::FileAction> {
let (src_idx, path) = match self.path {
LayerPath::Scratch(ref path) => (-1, path.to_string_lossy().into()),
LayerPath::Other(_, ref path) => (inputs_offset as i64, path.to_string_lossy().into()),
LayerPath::Own(ref output, ref path) => {
let output: i64 = output.into();
(inputs_count as i64 + output, path.to_string_lossy().into())
}
};
Ok(pb::FileAction {
input: src_idx,
secondary_input: -1,
output: i64::from(self.output()),
action: Some(pb::file_action::Action::Mkfile(pb::FileActionMkFile {
path,
data: self.data.clone().unwrap_or_else(|| Vec::with_capacity(0)),
// TODO: make this configurable
mode: -1,
// TODO: make this configurable
timestamp: -1,
// TODO: make this configurable
owner: None,
})),
})
}
}

@ -0,0 +1,475 @@
use std::fmt::Debug;
use std::path::Path;
use buildkit_proto::pb;
use crate::serialization::{Context, Result};
use crate::utils::OutputIdx;
mod copy;
mod mkdir;
mod mkfile;
mod path;
mod sequence;
pub use self::copy::CopyOperation;
pub use self::mkdir::MakeDirOperation;
pub use self::mkfile::MakeFileOperation;
pub use self::path::{LayerPath, UnsetPath};
pub use self::sequence::SequenceOperation;
/// Umbrella operation that handles file system related routines.
/// Dockerfile's `COPY` directive is a partial case of this.
pub struct FileSystem;
impl FileSystem {
pub fn sequence() -> SequenceOperation<'static> {
SequenceOperation::new()
}
pub fn copy() -> copy::CopyOperation<UnsetPath, UnsetPath> {
CopyOperation::new()
}
pub fn mkdir<P>(output: OutputIdx, layer: LayerPath<P>) -> MakeDirOperation
where
P: AsRef<Path>,
{
MakeDirOperation::new(output, layer)
}
pub fn mkfile<P>(output: OutputIdx, layer: LayerPath<P>) -> MakeFileOperation
where
P: AsRef<Path>,
{
MakeFileOperation::new(output, layer)
}
}
pub trait FileOperation: Debug + Send + Sync {
fn output(&self) -> i32;
fn serialize_inputs(&self, cx: &mut Context) -> Result<Vec<pb::Input>>;
fn serialize_action(&self, inputs_count: usize, inputs_offset: usize)
-> Result<pb::FileAction>;
}
#[test]
fn copy_serialization() {
use crate::prelude::*;
use buildkit_proto::pb::{file_action::Action, op::Op, FileAction, FileActionCopy, FileOp};
let context = Source::local("context");
let builder_image = Source::image("rustlang/rust:nightly");
let operation = FileSystem::sequence()
.append(
FileSystem::copy()
.from(LayerPath::Other(context.output(), "Cargo.toml"))
.to(OutputIdx(0), LayerPath::Scratch("Cargo.toml")),
)
.append(
FileSystem::copy()
.from(LayerPath::Other(builder_image.output(), "/bin/sh"))
.to(OutputIdx(1), LayerPath::Own(OwnOutputIdx(0), "/bin/sh")),
)
.append(
FileSystem::copy()
.from(LayerPath::Own(OwnOutputIdx(1), "Cargo.toml"))
.to(OutputIdx(2), LayerPath::Scratch("Cargo.toml")),
);
crate::check_op!(
operation,
|digest| { "sha256:c4f7fb723fa87f03788aaf660dc9110ad8748fc9971e13713f103b632c05ae96" },
|description| { vec![] },
|caps| { vec!["file.base"] },
|cached_tail| {
vec![
"sha256:a60212791641cbeaa3a49de4f7dff9e40ae50ec19d1be9607232037c1db16702",
"sha256:dee2a3d7dd482dd8098ba543ff1dcb01efd29fcd16fdb0979ef556f38564543a",
]
},
|inputs| {
vec![
(
"sha256:a60212791641cbeaa3a49de4f7dff9e40ae50ec19d1be9607232037c1db16702",
0,
),
(
"sha256:dee2a3d7dd482dd8098ba543ff1dcb01efd29fcd16fdb0979ef556f38564543a",
0,
),
]
},
|op| {
Op::File(FileOp {
actions: vec![
FileAction {
input: -1,
secondary_input: 0,
output: 0,
action: Some(Action::Copy(FileActionCopy {
src: "Cargo.toml".into(),
dest: "Cargo.toml".into(),
owner: None,
mode: -1,
follow_symlink: false,
dir_copy_contents: false,
attempt_unpack_docker_compatibility: false,
create_dest_path: false,
allow_wildcard: false,
allow_empty_wildcard: false,
timestamp: -1,
})),
},
FileAction {
input: 2,
secondary_input: 1,
output: 1,
action: Some(Action::Copy(FileActionCopy {
src: "/bin/sh".into(),
dest: "/bin/sh".into(),
owner: None,
mode: -1,
follow_symlink: false,
dir_copy_contents: false,
attempt_unpack_docker_compatibility: false,
create_dest_path: false,
allow_wildcard: false,
allow_empty_wildcard: false,
timestamp: -1,
})),
},
FileAction {
input: -1,
secondary_input: 3,
output: 2,
action: Some(Action::Copy(FileActionCopy {
src: "Cargo.toml".into(),
dest: "Cargo.toml".into(),
owner: None,
mode: -1,
follow_symlink: false,
dir_copy_contents: false,
attempt_unpack_docker_compatibility: false,
create_dest_path: false,
allow_wildcard: false,
allow_empty_wildcard: false,
timestamp: -1,
})),
},
],
})
},
);
}
#[test]
fn copy_with_params_serialization() {
use crate::prelude::*;
use buildkit_proto::pb::{file_action::Action, op::Op, FileAction, FileActionCopy, FileOp};
let context = Source::local("context");
let operation = FileSystem::sequence()
.append(
FileSystem::copy()
.from(LayerPath::Other(context.output(), "Cargo.toml"))
.to(OutputIdx(0), LayerPath::Scratch("Cargo.toml"))
.follow_symlinks(true),
)
.append(
FileSystem::copy()
.from(LayerPath::Other(context.output(), "Cargo.toml"))
.to(OutputIdx(1), LayerPath::Scratch("Cargo.toml"))
.recursive(true),
)
.append(
FileSystem::copy()
.from(LayerPath::Other(context.output(), "Cargo.toml"))
.to(OutputIdx(2), LayerPath::Scratch("Cargo.toml"))
.create_path(true),
)
.append(
FileSystem::copy()
.from(LayerPath::Other(context.output(), "Cargo.toml"))
.to(OutputIdx(3), LayerPath::Scratch("Cargo.toml"))
.wildcard(true),
);
crate::check_op!(
operation,
|digest| { "sha256:8be9c1c8335d53c894d0f5848ef354c69a96a469a72b00aadae704b23d465022" },
|description| { vec![] },
|caps| { vec!["file.base"] },
|cached_tail| {
vec!["sha256:a60212791641cbeaa3a49de4f7dff9e40ae50ec19d1be9607232037c1db16702"]
},
|inputs| {
// TODO: improve the correct, but inefficent serialization
vec![
(
"sha256:a60212791641cbeaa3a49de4f7dff9e40ae50ec19d1be9607232037c1db16702",
0,
),
(
"sha256:a60212791641cbeaa3a49de4f7dff9e40ae50ec19d1be9607232037c1db16702",
0,
),
(
"sha256:a60212791641cbeaa3a49de4f7dff9e40ae50ec19d1be9607232037c1db16702",
0,
),
(
"sha256:a60212791641cbeaa3a49de4f7dff9e40ae50ec19d1be9607232037c1db16702",
0,
),
]
},
|op| {
Op::File(FileOp {
actions: vec![
FileAction {
input: -1,
secondary_input: 0,
output: 0,
action: Some(Action::Copy(FileActionCopy {
src: "Cargo.toml".into(),
dest: "Cargo.toml".into(),
owner: None,
mode: -1,
follow_symlink: true,
dir_copy_contents: false,
attempt_unpack_docker_compatibility: false,
create_dest_path: false,
allow_wildcard: false,
allow_empty_wildcard: false,
timestamp: -1,
})),
},
FileAction {
input: -1,
secondary_input: 1,
output: 1,
action: Some(Action::Copy(FileActionCopy {
src: "Cargo.toml".into(),
dest: "Cargo.toml".into(),
owner: None,
mode: -1,
follow_symlink: false,
dir_copy_contents: true,
attempt_unpack_docker_compatibility: false,
create_dest_path: false,
allow_wildcard: false,
allow_empty_wildcard: false,
timestamp: -1,
})),
},
FileAction {
input: -1,
secondary_input: 2,
output: 2,
action: Some(Action::Copy(FileActionCopy {
src: "Cargo.toml".into(),
dest: "Cargo.toml".into(),
owner: None,
mode: -1,
follow_symlink: false,
dir_copy_contents: false,
attempt_unpack_docker_compatibility: false,
create_dest_path: true,
allow_wildcard: false,
allow_empty_wildcard: false,
timestamp: -1,
})),
},
FileAction {
input: -1,
secondary_input: 3,
output: 3,
action: Some(Action::Copy(FileActionCopy {
src: "Cargo.toml".into(),
dest: "Cargo.toml".into(),
owner: None,
mode: -1,
follow_symlink: false,
dir_copy_contents: false,
attempt_unpack_docker_compatibility: false,
create_dest_path: false,
allow_wildcard: true,
allow_empty_wildcard: false,
timestamp: -1,
})),
},
],
})
},
);
}
#[test]
fn mkdir_serialization() {
use crate::prelude::*;
use buildkit_proto::pb::{file_action::Action, op::Op, FileAction, FileActionMkDir, FileOp};
let context = Source::local("context");
let operation = FileSystem::sequence()
.append(
FileSystem::mkdir(
OutputIdx(0),
LayerPath::Other(context.output(), "/new-crate"),
)
.make_parents(true),
)
.append(FileSystem::mkdir(
OutputIdx(1),
LayerPath::Scratch("/new-crate"),
))
.append(FileSystem::mkdir(
OutputIdx(2),
LayerPath::Own(OwnOutputIdx(1), "/another-crate/deep/directory"),
));
crate::check_op!(
operation,
|digest| { "sha256:bfcd58256cba441c6d9e89c439bc6640b437d47213472cf8491646af4f0aa5b2" },
|description| { vec![] },
|caps| { vec!["file.base"] },
|cached_tail| {
vec!["sha256:a60212791641cbeaa3a49de4f7dff9e40ae50ec19d1be9607232037c1db16702"]
},
|inputs| {
vec![(
"sha256:a60212791641cbeaa3a49de4f7dff9e40ae50ec19d1be9607232037c1db16702",
0,
)]
},
|op| {
Op::File(FileOp {
actions: vec![
FileAction {
input: 0,
secondary_input: -1,
output: 0,
action: Some(Action::Mkdir(FileActionMkDir {
path: "/new-crate".into(),
owner: None,
mode: -1,
timestamp: -1,
make_parents: true,
})),
},
FileAction {
input: -1,
secondary_input: -1,
output: 1,
action: Some(Action::Mkdir(FileActionMkDir {
path: "/new-crate".into(),
owner: None,
mode: -1,
timestamp: -1,
make_parents: false,
})),
},
FileAction {
input: 2,
secondary_input: -1,
output: 2,
action: Some(Action::Mkdir(FileActionMkDir {
path: "/another-crate/deep/directory".into(),
owner: None,
mode: -1,
timestamp: -1,
make_parents: false,
})),
},
],
})
},
);
}
#[test]
fn mkfile_serialization() {
use crate::prelude::*;
use buildkit_proto::pb::{file_action::Action, op::Op, FileAction, FileActionMkFile, FileOp};
let context = Source::local("context");
let operation = FileSystem::sequence()
.append(
FileSystem::mkfile(
OutputIdx(0),
LayerPath::Other(context.output(), "/build-plan.json"),
)
.data(b"any bytes".to_vec()),
)
.append(FileSystem::mkfile(
OutputIdx(1),
LayerPath::Scratch("/build-graph.json"),
))
.append(FileSystem::mkfile(
OutputIdx(2),
LayerPath::Own(OwnOutputIdx(1), "/llb.pb"),
));
crate::check_op!(
operation,
|digest| { "sha256:9c0d9f741dfc9b4ea8d909ebf388bc354da0ee401eddf5633e8e4ece7e87d22d" },
|description| { vec![] },
|caps| { vec!["file.base"] },
|cached_tail| {
vec!["sha256:a60212791641cbeaa3a49de4f7dff9e40ae50ec19d1be9607232037c1db16702"]
},
|inputs| {
vec![(
"sha256:a60212791641cbeaa3a49de4f7dff9e40ae50ec19d1be9607232037c1db16702",
0,
)]
},
|op| {
Op::File(FileOp {
actions: vec![
FileAction {
input: 0,
secondary_input: -1,
output: 0,
action: Some(Action::Mkfile(FileActionMkFile {
path: "/build-plan.json".into(),
owner: None,
mode: -1,
timestamp: -1,
data: b"any bytes".to_vec(),
})),
},
FileAction {
input: -1,
secondary_input: -1,
output: 1,
action: Some(Action::Mkfile(FileActionMkFile {
path: "/build-graph.json".into(),
owner: None,
mode: -1,
timestamp: -1,
data: vec![],
})),
},
FileAction {
input: 2,
secondary_input: -1,
output: 2,
action: Some(Action::Mkfile(FileActionMkFile {
path: "/llb.pb".into(),
owner: None,
mode: -1,
timestamp: -1,
data: vec![],
})),
},
],
})
},
);
}

@ -0,0 +1,33 @@
use std::path::{Path, PathBuf};
use crate::utils::{OperationOutput, OwnOutputIdx};
/// Internal representation for not yet specified path.
#[derive(Debug)]
pub struct UnsetPath;
/// Operand of *file system operations* that defines either source or destination layer and a path.
#[derive(Debug)]
pub enum LayerPath<'a, P: AsRef<Path>> {
/// References one of the *current operation outputs* and a path.
Own(OwnOutputIdx, P),
/// References an *output of another operation* and a path.
Other(OperationOutput<'a>, P),
/// A path in an *empty* layer (equivalent of Dockerfile's scratch source).
Scratch(P),
}
impl<'a, P: AsRef<Path>> LayerPath<'a, P> {
/// Transform the layer path into owned variant (basically, with `PathBuf` as the path).
pub fn into_owned(self) -> LayerPath<'a, PathBuf> {
use LayerPath::*;
match self {
Other(input, path) => Other(input, path.as_ref().into()),
Own(output, path) => Own(output, path.as_ref().into()),
Scratch(path) => Scratch(path.as_ref().into()),
}
}
}

@ -0,0 +1,140 @@
use std::collections::HashMap;
use std::sync::Arc;
use buildkit_proto::pb::{self, op::Op};
use super::FileOperation;
use crate::ops::*;
use crate::serialization::{Context, Node, Operation, OperationId, Result};
use crate::utils::{OperationOutput, OutputIdx};
#[derive(Debug)]
pub struct SequenceOperation<'a> {
id: OperationId,
inner: Vec<Box<dyn FileOperation + 'a>>,
description: HashMap<String, String>,
caps: HashMap<String, bool>,
ignore_cache: bool,
}
impl<'a> SequenceOperation<'a> {
pub(crate) fn new() -> Self {
let mut caps = HashMap::<String, bool>::new();
caps.insert("file.base".into(), true);
Self {
id: OperationId::default(),
inner: vec![],
caps,
description: Default::default(),
ignore_cache: false,
}
}
pub fn append<T>(mut self, op: T) -> Self
where
T: FileOperation + 'a,
{
// TODO: verify no duplicated outputs
self.inner.push(Box::new(op));
self
}
pub fn last_output_index(&self) -> Option<u32> {
// TODO: make sure the `inner` elements have monotonic indexes
self.inner
.iter()
.filter(|fs| fs.output() >= 0)
.last()
.map(|fs| fs.output() as u32)
}
}
impl<'a, 'b: 'a> MultiBorrowedOutput<'b> for SequenceOperation<'b> {
fn output(&'b self, index: u32) -> OperationOutput<'b> {
// TODO: check if the requested index available.
OperationOutput::borrowed(self, OutputIdx(index))
}
}
impl<'a> MultiOwnedOutput<'a> for Arc<SequenceOperation<'a>> {
fn output(&self, index: u32) -> OperationOutput<'a> {
// TODO: check if the requested index available.
OperationOutput::owned(self.clone(), OutputIdx(index))
}
}
impl<'a, 'b: 'a> MultiBorrowedLastOutput<'b> for SequenceOperation<'b> {
fn last_output(&'b self) -> Option<OperationOutput<'b>> {
self.last_output_index().map(|index| self.output(index))
}
}
impl<'a> MultiOwnedLastOutput<'a> for Arc<SequenceOperation<'a>> {
fn last_output(&self) -> Option<OperationOutput<'a>> {
self.last_output_index().map(|index| self.output(index))
}
}
impl<'a> OperationBuilder<'a> for SequenceOperation<'a> {
fn custom_name<S>(mut self, name: S) -> Self
where
S: Into<String>,
{
self.description
.insert("llb.customname".into(), name.into());
self
}
fn ignore_cache(mut self, ignore: bool) -> Self {
self.ignore_cache = ignore;
self
}
}
impl<'a> Operation for SequenceOperation<'a> {
fn id(&self) -> &OperationId {
&self.id
}
fn serialize(&self, cx: &mut Context) -> Result<Node> {
let mut inputs = vec![];
let mut input_offsets = vec![];
for item in &self.inner {
let mut inner_inputs = item.serialize_inputs(cx)?;
input_offsets.push(inputs.len());
inputs.append(&mut inner_inputs);
}
let mut actions = vec![];
for (item, offset) in self.inner.iter().zip(input_offsets.into_iter()) {
actions.push(item.serialize_action(inputs.len(), offset)?);
}
let head = pb::Op {
inputs,
op: Some(Op::File(pb::FileOp { actions })),
..Default::default()
};
let metadata = pb::OpMetadata {
description: self.description.clone(),
caps: self.caps.clone(),
ignore_cache: self.ignore_cache,
..Default::default()
};
Ok(Node::new(head, metadata))
}
}

@ -0,0 +1,56 @@
use std::sync::Arc;
pub mod exec;
pub mod fs;
pub mod source;
pub mod terminal;
pub use self::exec::Command;
pub use self::fs::FileSystem;
pub use self::source::Source;
pub use self::terminal::Terminal;
use crate::utils::OperationOutput;
pub trait MultiBorrowedOutput<'a> {
fn output(&'a self, number: u32) -> OperationOutput<'a>;
}
pub trait MultiBorrowedLastOutput<'a> {
fn last_output(&'a self) -> Option<OperationOutput<'a>>;
}
pub trait MultiOwnedOutput<'a> {
fn output(&self, number: u32) -> OperationOutput<'a>;
}
pub trait MultiOwnedLastOutput<'a> {
fn last_output(&self) -> Option<OperationOutput<'a>>;
}
pub trait SingleBorrowedOutput<'a> {
fn output(&'a self) -> OperationOutput<'a>;
}
pub trait SingleOwnedOutput<'a> {
fn output(&self) -> OperationOutput<'a>;
}
/// Common operation methods.
pub trait OperationBuilder<'a> {
/// Sets an operation display name.
fn custom_name<S>(self, name: S) -> Self
where
S: Into<String>;
/// Sets caching behavior.
fn ignore_cache(self, ignore: bool) -> Self;
/// Convert the operation into `Arc` so it can be shared when efficient borrowing is not possible.
fn ref_counted(self) -> Arc<Self>
where
Self: Sized + 'a,
{
Arc::new(self)
}
}

@ -0,0 +1,230 @@
use std::collections::HashMap;
use std::sync::Arc;
use buildkit_proto::pb::{self, op::Op, OpMetadata, SourceOp};
use crate::ops::{OperationBuilder, SingleBorrowedOutput, SingleOwnedOutput};
use crate::serialization::{Context, Node, Operation, OperationId, Result};
use crate::utils::{OperationOutput, OutputIdx};
#[derive(Default, Debug)]
pub struct GitSource {
id: OperationId,
remote: String,
reference: Option<String>,
description: HashMap<String, String>,
ignore_cache: bool,
}
impl GitSource {
pub(crate) fn new<S>(url: S) -> Self
where
S: Into<String>,
{
let mut raw_url = url.into();
let remote = if raw_url.starts_with("http://") {
raw_url.split_off(7)
} else if raw_url.starts_with("https://") {
raw_url.split_off(8)
} else if raw_url.starts_with("git://") {
raw_url.split_off(6)
} else if raw_url.starts_with("git@") {
raw_url.split_off(4)
} else {
raw_url
};
Self {
id: OperationId::default(),
remote,
reference: None,
description: Default::default(),
ignore_cache: false,
}
}
}
impl GitSource {
pub fn with_reference<S>(mut self, reference: S) -> Self
where
S: Into<String>,
{
self.reference = Some(reference.into());
self
}
}
impl<'a> SingleBorrowedOutput<'a> for GitSource {
fn output(&'a self) -> OperationOutput<'a> {
OperationOutput::borrowed(self, OutputIdx(0))
}
}
impl<'a> SingleOwnedOutput<'static> for Arc<GitSource> {
fn output(&self) -> OperationOutput<'static> {
OperationOutput::owned(self.clone(), OutputIdx(0))
}
}
impl OperationBuilder<'static> for GitSource {
fn custom_name<S>(mut self, name: S) -> Self
where
S: Into<String>,
{
self.description
.insert("llb.customname".into(), name.into());
self
}
fn ignore_cache(mut self, ignore: bool) -> Self {
self.ignore_cache = ignore;
self
}
}
impl Operation for GitSource {
fn id(&self) -> &OperationId {
&self.id
}
fn serialize(&self, _: &mut Context) -> Result<Node> {
let identifier = if let Some(ref reference) = self.reference {
format!("git://{}#{}", self.remote, reference)
} else {
format!("git://{}", self.remote)
};
let head = pb::Op {
op: Some(Op::Source(SourceOp {
identifier,
attrs: Default::default(),
})),
..Default::default()
};
let metadata = OpMetadata {
description: self.description.clone(),
ignore_cache: self.ignore_cache,
..Default::default()
};
Ok(Node::new(head, metadata))
}
}
#[test]
fn serialization() {
crate::check_op!(
GitSource::new("any.url"),
|digest| { "sha256:ecde982e19ace932e5474e57b0ca71ba690ed7d28abff2a033e8f969e22bf2d8" },
|description| { vec![] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Source(SourceOp {
identifier: "git://any.url".into(),
attrs: Default::default(),
})
},
);
crate::check_op!(
GitSource::new("any.url").custom_name("git custom name"),
|digest| { "sha256:ecde982e19ace932e5474e57b0ca71ba690ed7d28abff2a033e8f969e22bf2d8" },
|description| { vec![("llb.customname", "git custom name")] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Source(SourceOp {
identifier: "git://any.url".into(),
attrs: Default::default(),
})
},
);
}
#[test]
fn prefixes() {
crate::check_op!(
GitSource::new("http://any.url"),
|digest| { "sha256:ecde982e19ace932e5474e57b0ca71ba690ed7d28abff2a033e8f969e22bf2d8" },
|description| { vec![] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Source(SourceOp {
identifier: "git://any.url".into(),
attrs: Default::default(),
})
},
);
crate::check_op!(
GitSource::new("https://any.url"),
|digest| { "sha256:ecde982e19ace932e5474e57b0ca71ba690ed7d28abff2a033e8f969e22bf2d8" },
|description| { vec![] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Source(SourceOp {
identifier: "git://any.url".into(),
attrs: Default::default(),
})
},
);
crate::check_op!(
GitSource::new("git://any.url"),
|digest| { "sha256:ecde982e19ace932e5474e57b0ca71ba690ed7d28abff2a033e8f969e22bf2d8" },
|description| { vec![] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Source(SourceOp {
identifier: "git://any.url".into(),
attrs: Default::default(),
})
},
);
crate::check_op!(
GitSource::new("git@any.url"),
|digest| { "sha256:ecde982e19ace932e5474e57b0ca71ba690ed7d28abff2a033e8f969e22bf2d8" },
|description| { vec![] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Source(SourceOp {
identifier: "git://any.url".into(),
attrs: Default::default(),
})
},
);
}
#[test]
fn with_reference() {
crate::check_op!(
GitSource::new("any.url").with_reference("abcdef"),
|digest| { "sha256:f59aa7f8db62e0b5c2a1da396752ba8a2bb0b5d28ddcfdd1d4f822d26ebfe3cf" },
|description| { vec![] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Source(SourceOp {
identifier: "git://any.url#abcdef".into(),
attrs: Default::default(),
})
},
);
}

@ -0,0 +1,153 @@
use std::collections::HashMap;
use std::sync::Arc;
use buildkit_proto::pb::{self, op::Op, OpMetadata, SourceOp};
use crate::ops::{OperationBuilder, SingleBorrowedOutput, SingleOwnedOutput};
use crate::serialization::{Context, Node, Operation, OperationId, Result};
use crate::utils::{OperationOutput, OutputIdx};
#[derive(Default, Debug)]
pub struct HttpSource {
id: OperationId,
url: String,
file_name: Option<String>,
description: HashMap<String, String>,
ignore_cache: bool,
}
impl HttpSource {
pub(crate) fn new<S>(url: S) -> Self
where
S: Into<String>,
{
Self {
id: OperationId::default(),
url: url.into(),
file_name: None,
description: Default::default(),
ignore_cache: false,
}
}
}
impl HttpSource {
pub fn with_file_name<S>(mut self, name: S) -> Self
where
S: Into<String>,
{
self.file_name = Some(name.into());
self
}
}
impl<'a> SingleBorrowedOutput<'a> for HttpSource {
fn output(&'a self) -> OperationOutput<'a> {
OperationOutput::borrowed(self, OutputIdx(0))
}
}
impl<'a> SingleOwnedOutput<'static> for Arc<HttpSource> {
fn output(&self) -> OperationOutput<'static> {
OperationOutput::owned(self.clone(), OutputIdx(0))
}
}
impl OperationBuilder<'static> for HttpSource {
fn custom_name<S>(mut self, name: S) -> Self
where
S: Into<String>,
{
self.description
.insert("llb.customname".into(), name.into());
self
}
fn ignore_cache(mut self, ignore: bool) -> Self {
self.ignore_cache = ignore;
self
}
}
impl Operation for HttpSource {
fn id(&self) -> &OperationId {
&self.id
}
fn serialize(&self, _: &mut Context) -> Result<Node> {
let mut attrs = HashMap::default();
if let Some(ref file_name) = self.file_name {
attrs.insert("http.filename".into(), file_name.into());
}
let head = pb::Op {
op: Some(Op::Source(SourceOp {
identifier: self.url.clone(),
attrs,
})),
..Default::default()
};
let metadata = OpMetadata {
description: self.description.clone(),
ignore_cache: self.ignore_cache,
..Default::default()
};
Ok(Node::new(head, metadata))
}
}
#[test]
fn serialization() {
crate::check_op!(
HttpSource::new("http://any.url/with/path"),
|digest| { "sha256:22ec64461f39dd3b54680fc240b459248b1ced597f113b5d692abe9695860d12" },
|description| { vec![] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Source(SourceOp {
identifier: "http://any.url/with/path".into(),
attrs: Default::default(),
})
},
);
crate::check_op!(
HttpSource::new("http://any.url/with/path").custom_name("git custom name"),
|digest| { "sha256:22ec64461f39dd3b54680fc240b459248b1ced597f113b5d692abe9695860d12" },
|description| { vec![("llb.customname", "git custom name")] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Source(SourceOp {
identifier: "http://any.url/with/path".into(),
attrs: Default::default(),
})
},
);
crate::check_op!(
HttpSource::new("http://any.url/with/path").with_file_name("file.name"),
|digest| { "sha256:e1fe6584287dfa2b065ed29fcf4f77bcf86fb54781832d2f45074fa1671df692" },
|description| { vec![] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Source(SourceOp {
identifier: "http://any.url/with/path".into(),
attrs: vec![("http.filename".to_string(), "file.name".to_string())]
.into_iter()
.collect(),
})
},
);
}

@ -0,0 +1,477 @@
use std::collections::HashMap;
use std::fmt;
use std::sync::Arc;
use buildkit_proto::pb::{self, op::Op, OpMetadata, SourceOp};
use lazy_static::*;
use regex::Regex;
use crate::ops::{OperationBuilder, SingleBorrowedOutput, SingleOwnedOutput};
use crate::serialization::{Context, Node, Operation, OperationId, Result};
use crate::utils::{OperationOutput, OutputIdx};
#[derive(Debug)]
pub struct ImageSource {
id: OperationId,
domain: Option<String>,
name: String,
tag: Option<String>,
digest: Option<String>,
description: HashMap<String, String>,
ignore_cache: bool,
resolve_mode: Option<ResolveMode>,
}
#[derive(Debug, Clone, Copy)]
pub enum ResolveMode {
Default,
ForcePull,
PreferLocal,
}
impl fmt::Display for ResolveMode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
ResolveMode::Default => write!(f, "default"),
ResolveMode::ForcePull => write!(f, "pull"),
ResolveMode::PreferLocal => write!(f, "local"),
}
}
}
impl Default for ResolveMode {
fn default() -> Self {
ResolveMode::Default
}
}
lazy_static! {
static ref TAG_EXPR: Regex = Regex::new(r":[\w][\w.-]+$").unwrap();
}
impl ImageSource {
// The implementation is based on:
// https://github.com/containerd/containerd/blob/614c0858f2a8db9ee0c788a9164870069f3e53ed/reference/docker/reference.go
pub(crate) fn new<S>(name: S) -> Self
where
S: Into<String>,
{
let mut name = name.into();
let (digest, digest_separator) = match name.find('@') {
Some(pos) => (Some(name[pos + 1..].into()), pos),
None => (None, name.len()),
};
name.truncate(digest_separator);
let (tag, tag_separator) = match TAG_EXPR.find(&name) {
Some(found) => (Some(name[found.start() + 1..].into()), found.start()),
None => (None, name.len()),
};
name.truncate(tag_separator);
let (domain, mut name) = match name.find('/') {
// The input has canonical-like format.
Some(separator_pos) if &name[..separator_pos] == "docker.io" => {
(None, name[separator_pos + 1..].into())
}
// Special case when domain is "localhost".
Some(separator_pos) if &name[..separator_pos] == "localhost" => {
(Some("localhost".into()), name[separator_pos + 1..].into())
}
// General case for a common domain.
Some(separator_pos) if name[..separator_pos].find('.').is_some() => (
Some(name[..separator_pos].into()),
name[separator_pos + 1..].into(),
),
// General case for a domain with port number.
Some(separator_pos) if name[..separator_pos].find(':').is_some() => (
Some(name[..separator_pos].into()),
name[separator_pos + 1..].into(),
),
// Fallback if the first component is not a domain name.
Some(_) => (None, name),
// Fallback if only single url component present.
None => (None, name),
};
if domain.is_none() && name.find('/').is_none() {
name = format!("library/{}", name);
}
Self {
id: OperationId::default(),
domain,
name,
tag,
digest,
description: Default::default(),
ignore_cache: false,
resolve_mode: None,
}
}
pub fn with_resolve_mode(mut self, mode: ResolveMode) -> Self {
self.resolve_mode = Some(mode);
self
}
pub fn resolve_mode(&self) -> Option<ResolveMode> {
self.resolve_mode
}
pub fn with_digest<S>(mut self, digest: S) -> Self
where
S: Into<String>,
{
self.digest = Some(digest.into());
self
}
pub fn with_tag<S>(mut self, tag: S) -> Self
where
S: Into<String>,
{
self.tag = Some(tag.into());
self
}
pub fn canonical_name(&self) -> String {
let domain = match self.domain {
Some(ref domain) => domain,
None => "docker.io",
};
let tag = match self.tag {
Some(ref tag) => tag,
None => "latest",
};
match self.digest {
Some(ref digest) => format!("{}/{}:{}@{}", domain, self.name, tag, digest),
None => format!("{}/{}:{}", domain, self.name, tag),
}
}
}
impl<'a> SingleBorrowedOutput<'a> for ImageSource {
fn output(&'a self) -> OperationOutput<'a> {
OperationOutput::borrowed(self, OutputIdx(0))
}
}
impl<'a> SingleOwnedOutput<'static> for Arc<ImageSource> {
fn output(&self) -> OperationOutput<'static> {
OperationOutput::owned(self.clone(), OutputIdx(0))
}
}
impl OperationBuilder<'static> for ImageSource {
fn custom_name<S>(mut self, name: S) -> Self
where
S: Into<String>,
{
self.description
.insert("llb.customname".into(), name.into());
self
}
fn ignore_cache(mut self, ignore: bool) -> Self {
self.ignore_cache = ignore;
self
}
}
impl Operation for ImageSource {
fn id(&self) -> &OperationId {
&self.id
}
fn serialize(&self, _: &mut Context) -> Result<Node> {
let mut attrs = HashMap::default();
if let Some(ref mode) = self.resolve_mode {
attrs.insert("image.resolvemode".into(), mode.to_string());
}
let head = pb::Op {
op: Some(Op::Source(SourceOp {
identifier: format!("docker-image://{}", self.canonical_name()),
attrs,
})),
..Default::default()
};
let metadata = OpMetadata {
description: self.description.clone(),
ignore_cache: self.ignore_cache,
..Default::default()
};
Ok(Node::new(head, metadata))
}
}
#[test]
fn serialization() {
crate::check_op!(
ImageSource::new("rustlang/rust:nightly"),
|digest| { "sha256:dee2a3d7dd482dd8098ba543ff1dcb01efd29fcd16fdb0979ef556f38564543a" },
|description| { vec![] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Source(SourceOp {
identifier: "docker-image://docker.io/rustlang/rust:nightly".into(),
attrs: Default::default(),
})
},
);
crate::check_op!(
ImageSource::new("library/alpine:latest"),
|digest| { "sha256:0e6b31ceed3e6dc542018f35a53a0e857e6a188453d32a2a5bbe7aa2971c1220" },
|description| { vec![] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Source(SourceOp {
identifier: "docker-image://docker.io/library/alpine:latest".into(),
attrs: Default::default(),
})
},
);
crate::check_op!(
ImageSource::new("rustlang/rust:nightly").custom_name("image custom name"),
|digest| { "sha256:dee2a3d7dd482dd8098ba543ff1dcb01efd29fcd16fdb0979ef556f38564543a" },
|description| { vec![("llb.customname", "image custom name")] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Source(SourceOp {
identifier: "docker-image://docker.io/rustlang/rust:nightly".into(),
attrs: Default::default(),
})
},
);
crate::check_op!(
ImageSource::new("rustlang/rust:nightly").with_digest("sha256:123456"),
|digest| { "sha256:a9837e26998d165e7b6433f8d40b36d259905295860fcbbc62bbce75a6c991c6" },
|description| { vec![] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Source(SourceOp {
identifier: "docker-image://docker.io/rustlang/rust:nightly@sha256:123456".into(),
attrs: Default::default(),
})
},
);
}
#[test]
fn resolve_mode() {
crate::check_op!(
ImageSource::new("rustlang/rust:nightly").with_resolve_mode(ResolveMode::Default),
|digest| { "sha256:792e246751e84b9a5e40c28900d70771a07e8cc920c1039cdddfc6bf69256dfe" },
|description| { vec![] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Source(SourceOp {
identifier: "docker-image://docker.io/rustlang/rust:nightly".into(),
attrs: crate::utils::test::to_map(vec![("image.resolvemode", "default")]),
})
},
);
crate::check_op!(
ImageSource::new("rustlang/rust:nightly").with_resolve_mode(ResolveMode::ForcePull),
|digest| { "sha256:0bd920010eab701bdce44c61d220e6943d56d3fb9a9fa4e773fc060c0d746122" },
|description| { vec![] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Source(SourceOp {
identifier: "docker-image://docker.io/rustlang/rust:nightly".into(),
attrs: crate::utils::test::to_map(vec![("image.resolvemode", "pull")]),
})
},
);
crate::check_op!(
ImageSource::new("rustlang/rust:nightly").with_resolve_mode(ResolveMode::PreferLocal),
|digest| { "sha256:bd6797c8644d2663b29c36a8b3b63931e539be44ede5e56aca2da4f35f241f18" },
|description| { vec![] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Source(SourceOp {
identifier: "docker-image://docker.io/rustlang/rust:nightly".into(),
attrs: crate::utils::test::to_map(vec![("image.resolvemode", "local")]),
})
},
);
}
#[test]
fn image_name() {
crate::check_op!(ImageSource::new("rustlang/rust"), |op| {
Op::Source(SourceOp {
identifier: "docker-image://docker.io/rustlang/rust:latest".into(),
attrs: Default::default(),
})
});
crate::check_op!(ImageSource::new("rust:nightly"), |op| {
Op::Source(SourceOp {
identifier: "docker-image://docker.io/library/rust:nightly".into(),
attrs: Default::default(),
})
});
crate::check_op!(ImageSource::new("rust"), |op| {
Op::Source(SourceOp {
identifier: "docker-image://docker.io/library/rust:latest".into(),
attrs: Default::default(),
})
});
crate::check_op!(ImageSource::new("library/rust"), |op| {
Op::Source(SourceOp {
identifier: "docker-image://docker.io/library/rust:latest".into(),
attrs: Default::default(),
})
});
crate::check_op!(ImageSource::new("rust:obj@sha256:abcdef"), |op| {
Op::Source(SourceOp {
identifier: "docker-image://docker.io/library/rust:obj@sha256:abcdef".into(),
attrs: Default::default(),
})
});
crate::check_op!(ImageSource::new("rust@sha256:abcdef"), |op| {
Op::Source(SourceOp {
identifier: "docker-image://docker.io/library/rust:latest@sha256:abcdef".into(),
attrs: Default::default(),
})
});
crate::check_op!(ImageSource::new("rust:obj@abcdef"), |op| {
Op::Source(SourceOp {
identifier: "docker-image://docker.io/library/rust:obj@abcdef".into(),
attrs: Default::default(),
})
});
crate::check_op!(
ImageSource::new("b.gcr.io/test.example.com/my-app:test.example.com"),
|op| {
Op::Source(SourceOp {
identifier: "docker-image://b.gcr.io/test.example.com/my-app:test.example.com"
.into(),
attrs: Default::default(),
})
}
);
crate::check_op!(
ImageSource::new("sub-dom1.foo.com/bar/baz/quux:some-long-tag"),
|op| {
Op::Source(SourceOp {
identifier: "docker-image://sub-dom1.foo.com/bar/baz/quux:some-long-tag".into(),
attrs: Default::default(),
})
}
);
crate::check_op!(
ImageSource::new("sub-dom1.foo.com/quux:some-long-tag"),
|op| {
Op::Source(SourceOp {
identifier: "docker-image://sub-dom1.foo.com/quux:some-long-tag".into(),
attrs: Default::default(),
})
}
);
crate::check_op!(ImageSource::new("localhost/rust:obj"), |op| {
Op::Source(SourceOp {
identifier: "docker-image://localhost/rust:obj".into(),
attrs: Default::default(),
})
});
crate::check_op!(ImageSource::new("127.0.0.1/rust:obj"), |op| {
Op::Source(SourceOp {
identifier: "docker-image://127.0.0.1/rust:obj".into(),
attrs: Default::default(),
})
});
crate::check_op!(ImageSource::new("localhost:5000/rust:obj"), |op| {
Op::Source(SourceOp {
identifier: "docker-image://localhost:5000/rust:obj".into(),
attrs: Default::default(),
})
});
crate::check_op!(ImageSource::new("127.0.0.1:5000/rust:obj"), |op| {
Op::Source(SourceOp {
identifier: "docker-image://127.0.0.1:5000/rust:obj".into(),
attrs: Default::default(),
})
});
crate::check_op!(ImageSource::new("localhost:5000/rust"), |op| {
Op::Source(SourceOp {
identifier: "docker-image://localhost:5000/rust:latest".into(),
attrs: Default::default(),
})
});
crate::check_op!(ImageSource::new("127.0.0.1:5000/rust"), |op| {
Op::Source(SourceOp {
identifier: "docker-image://127.0.0.1:5000/rust:latest".into(),
attrs: Default::default(),
})
});
crate::check_op!(ImageSource::new("docker.io/rust"), |op| {
Op::Source(SourceOp {
identifier: "docker-image://docker.io/library/rust:latest".into(),
attrs: Default::default(),
})
});
crate::check_op!(ImageSource::new("docker.io/library/rust"), |op| {
Op::Source(SourceOp {
identifier: "docker-image://docker.io/library/rust:latest".into(),
attrs: Default::default(),
})
});
}

@ -0,0 +1,202 @@
use std::collections::HashMap;
use std::sync::Arc;
use buildkit_proto::pb::{self, op::Op, OpMetadata, SourceOp};
use crate::ops::{OperationBuilder, SingleBorrowedOutput, SingleOwnedOutput};
use crate::serialization::{Context, Node, Operation, OperationId, Result};
use crate::utils::{OperationOutput, OutputIdx};
#[derive(Default, Debug)]
pub struct LocalSource {
id: OperationId,
name: String,
description: HashMap<String, String>,
ignore_cache: bool,
exclude: Vec<String>,
include: Vec<String>,
}
impl LocalSource {
pub(crate) fn new<S>(name: S) -> Self
where
S: Into<String>,
{
Self {
id: OperationId::default(),
name: name.into(),
ignore_cache: false,
..Default::default()
}
}
pub fn add_include_pattern<S>(mut self, include: S) -> Self
where
S: Into<String>,
{
// TODO: add `source.local.includepatterns` capability
self.include.push(include.into());
self
}
pub fn add_exclude_pattern<S>(mut self, exclude: S) -> Self
where
S: Into<String>,
{
// TODO: add `source.local.excludepatterns` capability
self.exclude.push(exclude.into());
self
}
}
impl<'a> SingleBorrowedOutput<'a> for LocalSource {
fn output(&'a self) -> OperationOutput<'a> {
OperationOutput::borrowed(self, OutputIdx(0))
}
}
impl<'a> SingleOwnedOutput<'static> for Arc<LocalSource> {
fn output(&self) -> OperationOutput<'static> {
OperationOutput::owned(self.clone(), OutputIdx(0))
}
}
impl OperationBuilder<'static> for LocalSource {
fn custom_name<S>(mut self, name: S) -> Self
where
S: Into<String>,
{
self.description
.insert("llb.customname".into(), name.into());
self
}
fn ignore_cache(mut self, ignore: bool) -> Self {
self.ignore_cache = ignore;
self
}
}
impl Operation for LocalSource {
fn id(&self) -> &OperationId {
&self.id
}
fn serialize(&self, _: &mut Context) -> Result<Node> {
let mut attrs = HashMap::default();
if !self.exclude.is_empty() {
attrs.insert(
"local.excludepatterns".into(),
serde_json::to_string(&self.exclude).unwrap(),
);
}
if !self.include.is_empty() {
attrs.insert(
"local.includepattern".into(),
serde_json::to_string(&self.include).unwrap(),
);
}
let head = pb::Op {
op: Some(Op::Source(SourceOp {
identifier: format!("local://{}", self.name),
attrs,
})),
..Default::default()
};
let metadata = OpMetadata {
description: self.description.clone(),
ignore_cache: self.ignore_cache,
..Default::default()
};
Ok(Node::new(head, metadata))
}
}
#[test]
fn serialization() {
crate::check_op!(
LocalSource::new("context"),
|digest| { "sha256:a60212791641cbeaa3a49de4f7dff9e40ae50ec19d1be9607232037c1db16702" },
|description| { vec![] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Source(SourceOp {
identifier: "local://context".into(),
attrs: Default::default(),
})
},
);
crate::check_op!(
LocalSource::new("context").custom_name("context custom name"),
|digest| { "sha256:a60212791641cbeaa3a49de4f7dff9e40ae50ec19d1be9607232037c1db16702" },
|description| { vec![("llb.customname", "context custom name")] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Source(SourceOp {
identifier: "local://context".into(),
attrs: Default::default(),
})
},
);
crate::check_op!(
{
LocalSource::new("context")
.custom_name("context custom name")
.add_exclude_pattern("**/target")
.add_exclude_pattern("Dockerfile")
},
|digest| { "sha256:f6962b8bb1659c63a2c2c3e2a7ccf0326c87530dd70c514343f127e4c20460c4" },
|description| { vec![("llb.customname", "context custom name")] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Source(SourceOp {
identifier: "local://context".into(),
attrs: crate::utils::test::to_map(vec![(
"local.excludepatterns",
r#"["**/target","Dockerfile"]"#,
)]),
})
},
);
crate::check_op!(
{
LocalSource::new("context")
.custom_name("context custom name")
.add_include_pattern("Cargo.toml")
.add_include_pattern("inner/Cargo.toml")
},
|digest| { "sha256:a7e628333262b810572f83193bbf8554e688abfb51d44ac30bdad7fa425f3839" },
|description| { vec![("llb.customname", "context custom name")] },
|caps| { vec![] },
|cached_tail| { vec![] },
|inputs| { vec![] },
|op| {
Op::Source(SourceOp {
identifier: "local://context".into(),
attrs: crate::utils::test::to_map(vec![(
"local.includepattern",
r#"["Cargo.toml","inner/Cargo.toml"]"#,
)]),
})
},
);
}

@ -0,0 +1,43 @@
mod git;
mod http;
mod image;
mod local;
pub use self::git::GitSource;
pub use self::http::HttpSource;
pub use self::image::{ImageSource, ResolveMode};
pub use self::local::LocalSource;
/// Provide an input for other operations. For example: `FROM` directive in Dockerfile.
#[derive(Debug)]
pub struct Source;
impl Source {
pub fn image<S>(name: S) -> ImageSource
where
S: Into<String>,
{
ImageSource::new(name)
}
pub fn git<S>(url: S) -> GitSource
where
S: Into<String>,
{
GitSource::new(url)
}
pub fn local<S>(name: S) -> LocalSource
where
S: Into<String>,
{
LocalSource::new(name)
}
pub fn http<S>(name: S) -> HttpSource
where
S: Into<String>,
{
HttpSource::new(name)
}
}

@ -0,0 +1,141 @@
use std::io::{self, Write};
use std::iter::once;
use buildkit_proto::pb::{self, Input};
use prost::Message;
use crate::serialization::{Context, Node, Result};
use crate::utils::OperationOutput;
/// Final operation in the graph. Responsible for printing the complete LLB definition.
#[derive(Debug)]
pub struct Terminal<'a> {
input: OperationOutput<'a>,
}
impl<'a> Terminal<'a> {
pub fn with(input: OperationOutput<'a>) -> Self {
Self { input }
}
pub fn into_definition(self) -> pb::Definition {
let mut cx = Context::default();
let final_node_iter = once(self.serialize(&mut cx).unwrap());
let (def, metadata) = {
cx.into_registered_nodes()
.chain(final_node_iter)
.map(|node| (node.bytes, (node.digest, node.metadata)))
.unzip()
};
pb::Definition { def, metadata }
}
pub fn write_definition(self, mut writer: impl Write) -> io::Result<()> {
let mut bytes = Vec::new();
self.into_definition().encode(&mut bytes).unwrap();
writer.write_all(&bytes)
}
fn serialize(&self, cx: &mut Context) -> Result<Node> {
let final_op = pb::Op {
inputs: vec![Input {
digest: cx.register(self.input.operation())?.digest.clone(),
index: self.input.output().into(),
}],
..Default::default()
};
Ok(Node::new(final_op, Default::default()))
}
}
#[test]
fn serialization() {
use crate::prelude::*;
let context = Source::local("context");
let builder_image = Source::image("rustlang/rust:nightly");
let final_image = Source::image("library/alpine:latest");
let first_command = Command::run("rustc")
.args(&["--crate-name", "crate-1"])
.mount(Mount::ReadOnlyLayer(builder_image.output(), "/"))
.mount(Mount::ReadOnlyLayer(context.output(), "/context"))
.mount(Mount::Scratch(OutputIdx(0), "/target"));
let second_command = Command::run("rustc")
.args(&["--crate-name", "crate-2"])
.mount(Mount::ReadOnlyLayer(builder_image.output(), "/"))
.mount(Mount::ReadOnlyLayer(context.output(), "/context"))
.mount(Mount::Scratch(OutputIdx(0), "/target"));
let assembly_op = FileSystem::sequence()
.append(FileSystem::mkdir(
OutputIdx(0),
LayerPath::Other(final_image.output(), "/output"),
))
.append(
FileSystem::copy()
.from(LayerPath::Other(first_command.output(0), "/target/crate-1"))
.to(
OutputIdx(1),
LayerPath::Own(OwnOutputIdx(0), "/output/crate-1"),
),
)
.append(
FileSystem::copy()
.from(LayerPath::Other(
second_command.output(0),
"/target/crate-2",
))
.to(
OutputIdx(2),
LayerPath::Own(OwnOutputIdx(1), "/output/crate-2"),
),
);
let definition = Terminal::with(assembly_op.output(0)).into_definition();
assert_eq!(
definition
.def
.iter()
.map(|bytes| Node::get_digest(&bytes))
.collect::<Vec<_>>(),
crate::utils::test::to_vec(vec![
"sha256:a60212791641cbeaa3a49de4f7dff9e40ae50ec19d1be9607232037c1db16702",
"sha256:dee2a3d7dd482dd8098ba543ff1dcb01efd29fcd16fdb0979ef556f38564543a",
"sha256:0e6b31ceed3e6dc542018f35a53a0e857e6a188453d32a2a5bbe7aa2971c1220",
"sha256:782f343f8f4ee33e4f342ed4209ad1a9eb4582485e45251595a5211ebf2b3cbf",
"sha256:3418ad515958b5e68fd45c9d6fbc8d2ce7d567a956150d22ff529a3fea401aa2",
"sha256:13bb644e4ec0cabe836392649a04551686e69613b1ea9c89a1a8f3bc86181791",
"sha256:d13a773a61236be3c7d539f3ef6d583095c32d2a2a60deda86e71705f2dbc99b",
])
);
let mut metadata_digests = {
definition
.metadata
.iter()
.map(|(digest, _)| digest.as_str())
.collect::<Vec<_>>()
};
metadata_digests.sort();
assert_eq!(
metadata_digests,
vec![
"sha256:0e6b31ceed3e6dc542018f35a53a0e857e6a188453d32a2a5bbe7aa2971c1220",
"sha256:13bb644e4ec0cabe836392649a04551686e69613b1ea9c89a1a8f3bc86181791",
"sha256:3418ad515958b5e68fd45c9d6fbc8d2ce7d567a956150d22ff529a3fea401aa2",
"sha256:782f343f8f4ee33e4f342ed4209ad1a9eb4582485e45251595a5211ebf2b3cbf",
"sha256:a60212791641cbeaa3a49de4f7dff9e40ae50ec19d1be9607232037c1db16702",
"sha256:d13a773a61236be3c7d539f3ef6d583095c32d2a2a60deda86e71705f2dbc99b",
"sha256:dee2a3d7dd482dd8098ba543ff1dcb01efd29fcd16fdb0979ef556f38564543a",
]
);
}

@ -0,0 +1,27 @@
use std::ops::Deref;
use std::sync::atomic::{AtomicU64, Ordering};
static LAST_ID: AtomicU64 = AtomicU64::new(0);
#[derive(Debug)]
pub(crate) struct OperationId(u64);
impl Clone for OperationId {
fn clone(&self) -> Self {
OperationId::default()
}
}
impl Default for OperationId {
fn default() -> Self {
Self(LAST_ID.fetch_add(1, Ordering::Relaxed))
}
}
impl Deref for OperationId {
type Target = u64;
fn deref(&self) -> &u64 {
&self.0
}
}

@ -0,0 +1,39 @@
use std::collections::BTreeMap;
mod id;
mod operation;
mod output;
pub(crate) use self::id::OperationId;
pub(crate) use self::operation::Operation;
pub(crate) use self::output::Node;
pub(crate) type Result<T> = std::result::Result<T, ()>;
#[derive(Default)]
pub struct Context {
inner: BTreeMap<u64, Node>,
}
impl Context {
#[allow(clippy::map_entry)]
pub(crate) fn register<'a>(&'a mut self, op: &dyn Operation) -> Result<&'a Node> {
let id = **op.id();
if !self.inner.contains_key(&id) {
let node = op.serialize(self)?;
self.inner.insert(id, node);
}
Ok(self.inner.get(&id).unwrap())
}
#[cfg(test)]
pub(crate) fn registered_nodes_iter(&self) -> impl Iterator<Item = &Node> {
self.inner.iter().map(|pair| pair.1)
}
pub(crate) fn into_registered_nodes(self) -> impl Iterator<Item = Node> {
self.inner.into_iter().map(|pair| pair.1)
}
}

@ -0,0 +1,10 @@
use std::fmt::Debug;
use super::{Context, OperationId};
use super::{Node, Result};
pub(crate) trait Operation: Debug + Send + Sync {
fn id(&self) -> &OperationId;
fn serialize(&self, cx: &mut Context) -> Result<Node>;
}

@ -0,0 +1,30 @@
use buildkit_proto::pb;
use prost::Message;
use sha2::{Digest, Sha256};
#[derive(Debug, Default, Clone)]
pub(crate) struct Node {
pub bytes: Vec<u8>,
pub digest: String,
pub metadata: pb::OpMetadata,
}
impl Node {
pub fn new(message: pb::Op, metadata: pb::OpMetadata) -> Self {
let mut bytes = Vec::new();
message.encode(&mut bytes).unwrap();
Self {
digest: Self::get_digest(&bytes),
bytes,
metadata,
}
}
pub fn get_digest(bytes: &[u8]) -> String {
let mut hasher = Sha256::new();
hasher.input(&bytes);
format!("sha256:{:x}", hasher.result())
}
}

@ -0,0 +1,191 @@
use std::sync::Arc;
use crate::serialization::Operation;
#[derive(Copy, Clone, Debug)]
pub struct OutputIdx(pub u32);
#[derive(Copy, Clone, Debug)]
pub struct OwnOutputIdx(pub u32);
#[derive(Debug, Clone)]
pub struct OperationOutput<'a> {
kind: OperationOutputKind<'a>,
}
#[derive(Debug, Clone)]
enum OperationOutputKind<'a> {
Owned(Arc<dyn Operation + 'a>, OutputIdx),
Borrowed(&'a dyn Operation, OutputIdx),
}
impl<'a> OperationOutput<'a> {
pub(crate) fn owned(op: Arc<dyn Operation + 'a>, idx: OutputIdx) -> Self {
Self {
kind: OperationOutputKind::Owned(op, idx),
}
}
pub(crate) fn borrowed(op: &'a dyn Operation, idx: OutputIdx) -> Self {
Self {
kind: OperationOutputKind::Borrowed(op, idx),
}
}
pub(crate) fn operation(&self) -> &dyn Operation {
match self.kind {
OperationOutputKind::Owned(ref op, ..) => op.as_ref(),
OperationOutputKind::Borrowed(ref op, ..) => *op,
}
}
pub(crate) fn output(&self) -> OutputIdx {
match self.kind {
OperationOutputKind::Owned(_, output) | OperationOutputKind::Borrowed(_, output) => {
output
}
}
}
}
impl Into<i64> for OutputIdx {
fn into(self) -> i64 {
self.0.into()
}
}
impl Into<i64> for &OutputIdx {
fn into(self) -> i64 {
self.0.into()
}
}
impl Into<i64> for OwnOutputIdx {
fn into(self) -> i64 {
self.0.into()
}
}
impl Into<i64> for &OwnOutputIdx {
fn into(self) -> i64 {
self.0.into()
}
}
impl Into<i32> for OutputIdx {
fn into(self) -> i32 {
self.0 as i32
}
}
impl Into<i32> for &OutputIdx {
fn into(self) -> i32 {
self.0 as i32
}
}
impl Into<i32> for OwnOutputIdx {
fn into(self) -> i32 {
self.0 as i32
}
}
impl Into<i32> for &OwnOutputIdx {
fn into(self) -> i32 {
self.0 as i32
}
}
#[cfg(test)]
pub mod test {
#[macro_export]
macro_rules! check_op {
($op:expr, $(|$name:ident| $value:expr,)*) => ($crate::check_op!($op, $(|$name| $value),*));
($op:expr, $(|$name:ident| $value:expr),*) => {{
#[allow(unused_imports)]
use crate::serialization::{Context, Operation};
let mut context = Context::default();
let serialized = $op.serialize(&mut context).unwrap();
$(crate::check_op_property!(serialized, context, $name, $value));*
}};
}
#[macro_export]
macro_rules! check_op_property {
($serialized:expr, $context:expr, op, $value:expr) => {{
use std::io::Cursor;
use buildkit_proto::pb;
use prost::Message;
assert_eq!(
pb::Op::decode(Cursor::new(&$serialized.bytes)).unwrap().op,
Some($value)
);
}};
($serialized:expr, $context:expr, inputs, $value:expr) => {{
use std::io::Cursor;
use buildkit_proto::pb;
use prost::Message;
assert_eq!(
pb::Op::decode(Cursor::new(&$serialized.bytes))
.unwrap()
.inputs
.into_iter()
.map(|input| (input.digest, input.index))
.collect::<Vec<_>>(),
$value
.into_iter()
.map(|input: (&str, i64)| (String::from(input.0), input.1))
.collect::<Vec<_>>()
);
}};
($serialized:expr, $context:expr, cached_tail, $value:expr) => {
assert_eq!(
$context
.registered_nodes_iter()
.map(|node| node.digest.clone())
.collect::<Vec<_>>(),
crate::utils::test::to_vec($value),
);
};
($serialized:expr, $context:expr, caps, $value:expr) => {{
let mut caps = $serialized
.metadata
.caps
.into_iter()
.map(|pair| pair.0)
.collect::<Vec<_>>();
caps.sort();
assert_eq!(caps, crate::utils::test::to_vec($value));
}};
($serialized:expr, $context:expr, description, $value:expr) => {
assert_eq!(
$serialized.metadata.description,
crate::utils::test::to_map($value),
);
};
($serialized:expr, $context:expr, digest, $value:expr) => {
assert_eq!($serialized.digest, $value);
};
}
use std::collections::HashMap;
pub fn to_map(pairs: Vec<(&str, &str)>) -> HashMap<String, String> {
pairs
.into_iter()
.map(|(key, value): (&str, &str)| (key.into(), value.into()))
.collect()
}
pub fn to_vec(items: Vec<&str>) -> Vec<String> {
items.into_iter().map(String::from).collect()
}
}

@ -0,0 +1,14 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [0.2.0] - 2020-03-04
### Changed
- Use `tonic` instead of `tower-grpc` for codegen.
## [0.1.0] - 2019-09-24
Initial release.

@ -0,0 +1,24 @@
[package]
name = "buildkit-proto"
version = "0.2.0"
authors = ["Denys Zariaiev <denys.zariaiev@gmail.com>"]
edition = "2018"
description = "Protobuf interfaces to BuildKit"
documentation = "https://docs.rs/buildkit-proto"
repository = "https://github.com/denzp/rust-buildkit"
readme = "README.md"
keywords = ["buildkit", "docker", "protobuf", "prost"]
categories = ["development-tools::build-utils", "api-bindings"]
license = "MIT/Apache-2.0"
[dependencies]
prost = "0.6"
prost-types = "0.6"
tonic = { git = "https://github.com/edrevo/tonic", branch = "unimplemented-content-type" }
[build-dependencies.tonic-build]
git = "https://github.com/edrevo/tonic"
branch = "unimplemented-content-type"
default-features = false
features = ["prost", "transport"]

@ -0,0 +1,33 @@
`buildkit-proto` - protobuf interfaces to BuildKit
=======
[![Actions Status]][Actions Link]
[![buildkit-proto Crates Badge]][buildkit-proto Crates Link]
[![buildkit-proto Docs Badge]][buildkit-proto Docs Link]
# Usage
The crate is not intened to be used alone.
An idiomatic high-level API provided by [`buildkit-llb`][buildkit-llb Crates Link] is a prefered way to build LLB graphs.
# License
`buildkit-proto` is primarily distributed under the terms of both the MIT license and
the Apache License (Version 2.0), with portions covered by various BSD-like
licenses.
See LICENSE-APACHE, and LICENSE-MIT for details.
# Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in `buildkit-proto` by you, as defined in the Apache-2.0 license,
shall be dual licensed as above, without any additional terms or conditions.
[Actions Link]: https://github.com/denzp/rust-buildkit/actions
[Actions Status]: https://github.com/denzp/rust-buildkit/workflows/CI/badge.svg
[buildkit-proto Docs Badge]: https://docs.rs/buildkit-proto/badge.svg
[buildkit-proto Docs Link]: https://docs.rs/buildkit-proto/
[buildkit-proto Crates Badge]: https://img.shields.io/crates/v/buildkit-proto.svg
[buildkit-proto Crates Link]: https://crates.io/crates/buildkit-proto
[buildkit-llb Crates Link]: https://crates.io/crates/buildkit-llb

@ -0,0 +1,11 @@
const DEFS: &[&str] = &["proto/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto"];
const PATHS: &[&str] = &["proto"];
fn main() -> Result<(), Box<dyn std::error::Error>> {
tonic_build::configure()
.build_client(true)
.build_server(true)
.compile(DEFS, PATHS)?;
Ok(())
}

@ -0,0 +1,47 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.rpc;
import "google/protobuf/any.proto";
option cc_enable_arenas = true;
option go_package = "google.golang.org/genproto/googleapis/rpc/status;status";
option java_multiple_files = true;
option java_outer_classname = "StatusProto";
option java_package = "com.google.rpc";
option objc_class_prefix = "RPC";
// The `Status` type defines a logical error model that is suitable for
// different programming environments, including REST APIs and RPC APIs. It is
// used by [gRPC](https://github.com/grpc). Each `Status` message contains
// three pieces of data: error code, error message, and error details.
//
// You can find out more about this error model and how to work with it in the
// [API Design Guide](https://cloud.google.com/apis/design/errors).
message Status {
// The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
int32 code = 1;
// A developer-facing error message, which should be in English. Any
// user-facing error message should be localized and sent in the
// [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
string message = 2;
// A list of messages that carry the error details. There is a common set of
// message types for APIs to use.
repeated google.protobuf.Any details = 3;
}

@ -0,0 +1,144 @@
// Protocol Buffers for Go with Gadgets
//
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto2";
package gogoproto;
import "google/protobuf/descriptor.proto";
option java_package = "com.google.protobuf";
option java_outer_classname = "GoGoProtos";
option go_package = "github.com/gogo/protobuf/gogoproto";
extend google.protobuf.EnumOptions {
optional bool goproto_enum_prefix = 62001;
optional bool goproto_enum_stringer = 62021;
optional bool enum_stringer = 62022;
optional string enum_customname = 62023;
optional bool enumdecl = 62024;
}
extend google.protobuf.EnumValueOptions {
optional string enumvalue_customname = 66001;
}
extend google.protobuf.FileOptions {
optional bool goproto_getters_all = 63001;
optional bool goproto_enum_prefix_all = 63002;
optional bool goproto_stringer_all = 63003;
optional bool verbose_equal_all = 63004;
optional bool face_all = 63005;
optional bool gostring_all = 63006;
optional bool populate_all = 63007;
optional bool stringer_all = 63008;
optional bool onlyone_all = 63009;
optional bool equal_all = 63013;
optional bool description_all = 63014;
optional bool testgen_all = 63015;
optional bool benchgen_all = 63016;
optional bool marshaler_all = 63017;
optional bool unmarshaler_all = 63018;
optional bool stable_marshaler_all = 63019;
optional bool sizer_all = 63020;
optional bool goproto_enum_stringer_all = 63021;
optional bool enum_stringer_all = 63022;
optional bool unsafe_marshaler_all = 63023;
optional bool unsafe_unmarshaler_all = 63024;
optional bool goproto_extensions_map_all = 63025;
optional bool goproto_unrecognized_all = 63026;
optional bool gogoproto_import = 63027;
optional bool protosizer_all = 63028;
optional bool compare_all = 63029;
optional bool typedecl_all = 63030;
optional bool enumdecl_all = 63031;
optional bool goproto_registration = 63032;
optional bool messagename_all = 63033;
optional bool goproto_sizecache_all = 63034;
optional bool goproto_unkeyed_all = 63035;
}
extend google.protobuf.MessageOptions {
optional bool goproto_getters = 64001;
optional bool goproto_stringer = 64003;
optional bool verbose_equal = 64004;
optional bool face = 64005;
optional bool gostring = 64006;
optional bool populate = 64007;
optional bool stringer = 67008;
optional bool onlyone = 64009;
optional bool equal = 64013;
optional bool description = 64014;
optional bool testgen = 64015;
optional bool benchgen = 64016;
optional bool marshaler = 64017;
optional bool unmarshaler = 64018;
optional bool stable_marshaler = 64019;
optional bool sizer = 64020;
optional bool unsafe_marshaler = 64023;
optional bool unsafe_unmarshaler = 64024;
optional bool goproto_extensions_map = 64025;
optional bool goproto_unrecognized = 64026;
optional bool protosizer = 64028;
optional bool compare = 64029;
optional bool typedecl = 64030;
optional bool messagename = 64033;
optional bool goproto_sizecache = 64034;
optional bool goproto_unkeyed = 64035;
}
extend google.protobuf.FieldOptions {
optional bool nullable = 65001;
optional bool embed = 65002;
optional string customtype = 65003;
optional string customname = 65004;
optional string jsontag = 65005;
optional string moretags = 65006;
optional string casttype = 65007;
optional string castkey = 65008;
optional string castvalue = 65009;
optional bool stdtime = 65010;
optional bool stdduration = 65011;
optional bool wktpointer = 65012;
}

@ -0,0 +1,24 @@
syntax = "proto3";
package moby.buildkit.v1.types;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "github.com/moby/buildkit/solver/pb/ops.proto";
option (gogoproto.sizer_all) = true;
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
message WorkerRecord {
string ID = 1;
map<string, string> Labels = 2;
repeated pb.Platform platforms = 3 [(gogoproto.nullable) = false];
repeated GCPolicy GCPolicy = 4;
}
message GCPolicy {
bool all = 1;
int64 keepDuration = 2;
int64 keepBytes = 3;
repeated string filters = 4;
}

@ -0,0 +1,164 @@
syntax = "proto3";
package moby.buildkit.v1.frontend;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "github.com/gogo/googleapis/google/rpc/status.proto";
import "github.com/moby/buildkit/solver/pb/ops.proto";
import "github.com/moby/buildkit/api/types/worker.proto";
import "github.com/moby/buildkit/util/apicaps/pb/caps.proto";
import "github.com/tonistiigi/fsutil/types/stat.proto";
option (gogoproto.sizer_all) = true;
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
service LLBBridge {
// apicaps:CapResolveImage
rpc ResolveImageConfig(ResolveImageConfigRequest) returns (ResolveImageConfigResponse);
// apicaps:CapSolveBase
rpc Solve(SolveRequest) returns (SolveResponse);
// apicaps:CapReadFile
rpc ReadFile(ReadFileRequest) returns (ReadFileResponse);
// apicaps:CapReadDir
rpc ReadDir(ReadDirRequest) returns (ReadDirResponse);
// apicaps:CapStatFile
rpc StatFile(StatFileRequest) returns (StatFileResponse);
rpc Ping(PingRequest) returns (PongResponse);
rpc Return(ReturnRequest) returns (ReturnResponse);
// apicaps:CapFrontendInputs
rpc Inputs(InputsRequest) returns (InputsResponse);
}
message Result {
oneof result {
// Deprecated non-array refs.
string refDeprecated = 1;
RefMapDeprecated refsDeprecated = 2;
Ref ref = 3;
RefMap refs = 4;
}
map<string, bytes> metadata = 10;
}
message RefMapDeprecated {
map<string, string> refs = 1;
}
message Ref {
string id = 1;
pb.Definition def = 2;
}
message RefMap {
map<string, Ref> refs = 1;
}
message ReturnRequest {
Result result = 1;
google.rpc.Status error = 2;
}
message ReturnResponse {
}
message InputsRequest {
}
message InputsResponse {
map<string, pb.Definition> Definitions = 1;
}
message ResolveImageConfigRequest {
string Ref = 1;
pb.Platform Platform = 2;
string ResolveMode = 3;
string LogName = 4;
}
message ResolveImageConfigResponse {
string Digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
bytes Config = 2;
}
message SolveRequest {
pb.Definition Definition = 1;
string Frontend = 2;
map<string, string> FrontendOpt = 3;
// ImportCacheRefsDeprecated is deprecated in favor or the new Imports since BuildKit v0.4.0.
// When ImportCacheRefsDeprecated is set, the solver appends
// {.Type = "registry", .Attrs = {"ref": importCacheRef}}
// for each of the ImportCacheRefs entry to CacheImports for compatibility. (planned to be removed)
repeated string ImportCacheRefsDeprecated = 4;
bool allowResultReturn = 5;
bool allowResultArrayRef = 6;
// apicaps.CapSolveInlineReturn deprecated
bool Final = 10;
bytes ExporterAttr = 11;
// CacheImports was added in BuildKit v0.4.0.
// apicaps:CapImportCaches
repeated CacheOptionsEntry CacheImports = 12;
// apicaps:CapFrontendInputs
map<string, pb.Definition> FrontendInputs = 13;
}
// CacheOptionsEntry corresponds to the control.CacheOptionsEntry
message CacheOptionsEntry {
string Type = 1;
map<string, string> Attrs = 2;
}
message SolveResponse {
// deprecated
string ref = 1; // can be used by readfile request
// deprecated
/* bytes ExporterAttr = 2;*/
// these fields are returned when allowMapReturn was set
Result result = 3;
}
message ReadFileRequest {
string Ref = 1;
string FilePath = 2;
FileRange Range = 3;
}
message FileRange {
int64 Offset = 1;
int64 Length = 2;
}
message ReadFileResponse {
bytes Data = 1;
}
message ReadDirRequest {
string Ref = 1;
string DirPath = 2;
string IncludePattern = 3;
}
message ReadDirResponse {
repeated fsutil.types.Stat entries = 1;
}
message StatFileRequest {
string Ref = 1;
string Path = 2;
}
message StatFileResponse {
fsutil.types.Stat stat = 1;
}
message PingRequest{
}
message PongResponse{
repeated moby.buildkit.v1.apicaps.APICap FrontendAPICaps = 1 [(gogoproto.nullable) = false];
repeated moby.buildkit.v1.apicaps.APICap LLBCaps = 2 [(gogoproto.nullable) = false];
repeated moby.buildkit.v1.types.WorkerRecord Workers = 3;
}

@ -0,0 +1,305 @@
syntax = "proto3";
// Package pb provides the protobuf definition of LLB: low-level builder instruction.
// LLB is DAG-structured; Op represents a vertex, and Definition represents a graph.
package pb;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.stable_marshaler_all) = true;
// Op represents a vertex of the LLB DAG.
message Op {
// inputs is a set of input edges.
repeated Input inputs = 1;
oneof op {
ExecOp exec = 2;
SourceOp source = 3;
FileOp file = 4;
BuildOp build = 5;
}
Platform platform = 10;
WorkerConstraints constraints = 11;
}
// Platform is github.com/opencontainers/image-spec/specs-go/v1.Platform
message Platform {
string Architecture = 1;
string OS = 2;
string Variant = 3;
string OSVersion = 4; // unused
repeated string OSFeatures = 5; // unused
}
// Input represents an input edge for an Op.
message Input {
// digest of the marshaled input Op
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
// output index of the input Op
int64 index = 2 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false];
}
// ExecOp executes a command in a container.
message ExecOp {
Meta meta = 1;
repeated Mount mounts = 2;
NetMode network = 3;
SecurityMode security = 4;
}
// Meta is a set of arguments for ExecOp.
// Meta is unrelated to LLB metadata.
// FIXME: rename (ExecContext? ExecArgs?)
message Meta {
repeated string args = 1;
repeated string env = 2;
string cwd = 3;
string user = 4;
ProxyEnv proxy_env = 5;
repeated HostIP extraHosts = 6;
}
enum NetMode {
UNSET = 0; // sandbox
HOST = 1;
NONE = 2;
}
enum SecurityMode {
SANDBOX = 0;
INSECURE = 1; // privileged mode
}
// Mount specifies how to mount an input Op as a filesystem.
message Mount {
int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false];
string selector = 2;
string dest = 3;
int64 output = 4 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false];
bool readonly = 5;
MountType mountType = 6;
CacheOpt cacheOpt = 20;
SecretOpt secretOpt = 21;
SSHOpt SSHOpt = 22;
}
// MountType defines a type of a mount from a supported set
enum MountType {
BIND = 0;
SECRET = 1;
SSH = 2;
CACHE = 3;
TMPFS = 4;
}
// CacheOpt defines options specific to cache mounts
message CacheOpt {
// ID is an optional namespace for the mount
string ID = 1;
// Sharing is the sharing mode for the mount
CacheSharingOpt sharing = 2;
}
// CacheSharingOpt defines different sharing modes for cache mount
enum CacheSharingOpt {
// SHARED cache mount can be used concurrently by multiple writers
SHARED = 0;
// PRIVATE creates a new mount if there are multiple writers
PRIVATE = 1;
// LOCKED pauses second writer until first one releases the mount
LOCKED = 2;
}
// SecretOpt defines options describing secret mounts
message SecretOpt {
// ID of secret. Used for quering the value.
string ID = 1;
// UID of secret file
uint32 uid = 2;
// GID of secret file
uint32 gid = 3;
// Mode is the filesystem mode of secret file
uint32 mode = 4;
// Optional defines if secret value is required. Error is produced
// if value is not found and optional is false.
bool optional = 5;
}
// SSHOpt defines options describing secret mounts
message SSHOpt {
// ID of exposed ssh rule. Used for quering the value.
string ID = 1;
// UID of agent socket
uint32 uid = 2;
// GID of agent socket
uint32 gid = 3;
// Mode is the filesystem mode of agent socket
uint32 mode = 4;
// Optional defines if ssh socket is required. Error is produced
// if client does not expose ssh.
bool optional = 5;
}
// SourceOp specifies a source such as build contexts and images.
message SourceOp {
// TODO: use source type or any type instead of URL protocol.
// identifier e.g. local://, docker-image://, git://, https://...
string identifier = 1;
// attrs are defined in attr.go
map<string, string> attrs = 2;
}
// BuildOp is used for nested build invocation.
// BuildOp is experimental and can break without backwards compatibility
message BuildOp {
int64 builder = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false];
map<string, BuildInput> inputs = 2;
Definition def = 3;
map<string, string> attrs = 4;
// outputs
}
// BuildInput is used for BuildOp.
message BuildInput {
int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false];
}
// OpMetadata is a per-vertex metadata entry, which can be defined for arbitrary Op vertex and overridable on the run time.
message OpMetadata {
// ignore_cache specifies to ignore the cache for this Op.
bool ignore_cache = 1;
// Description can be used for keeping any text fields that builder doesn't parse
map<string, string> description = 2;
// index 3 reserved for WorkerConstraint in previous versions
// WorkerConstraint worker_constraint = 3;
ExportCache export_cache = 4;
map<string, bool> caps = 5 [(gogoproto.castkey) = "github.com/moby/buildkit/util/apicaps.CapID", (gogoproto.nullable) = false];
}
message ExportCache {
bool Value = 1;
}
message ProxyEnv {
string http_proxy = 1;
string https_proxy = 2;
string ftp_proxy = 3;
string no_proxy = 4;
}
// WorkerConstraints defines conditions for the worker
message WorkerConstraints {
repeated string filter = 1; // containerd-style filter
}
// Definition is the LLB definition structure with per-vertex metadata entries
message Definition {
// def is a list of marshaled Op messages
repeated bytes def = 1;
// metadata contains metadata for the each of the Op messages.
// A key must be an LLB op digest string. Currently, empty string is not expected as a key, but it may change in the future.
map<string, OpMetadata> metadata = 2 [(gogoproto.castkey) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
}
message HostIP {
string Host = 1;
string IP = 2;
}
message FileOp {
repeated FileAction actions = 2;
}
message FileAction {
int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; // could be real input or target (target index + max input index)
int64 secondaryInput = 2 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; // --//--
int64 output = 3 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false];
oneof action {
// FileActionCopy copies files from secondaryInput on top of input
FileActionCopy copy = 4;
// FileActionMkFile creates a new file
FileActionMkFile mkfile = 5;
// FileActionMkDir creates a new directory
FileActionMkDir mkdir = 6;
// FileActionRm removes a file
FileActionRm rm = 7;
}
}
message FileActionCopy {
// src is the source path
string src = 1;
// dest path
string dest = 2;
// optional owner override
ChownOpt owner = 3;
// optional permission bits override
int32 mode = 4;
// followSymlink resolves symlinks in src
bool followSymlink = 5;
// dirCopyContents only copies contents if src is a directory
bool dirCopyContents = 6;
// attemptUnpackDockerCompatibility detects if src is an archive to unpack it instead
bool attemptUnpackDockerCompatibility = 7;
// createDestPath creates dest path directories if needed
bool createDestPath = 8;
// allowWildcard allows filepath.Match wildcards in src path
bool allowWildcard = 9;
// allowEmptyWildcard doesn't fail the whole copy if wildcard doesn't resolve to files
bool allowEmptyWildcard = 10;
// optional created time override
int64 timestamp = 11;
}
message FileActionMkFile {
// path for the new file
string path = 1;
// permission bits
int32 mode = 2;
// data is the new file contents
bytes data = 3;
// optional owner for the new file
ChownOpt owner = 4;
// optional created time override
int64 timestamp = 5;
}
message FileActionMkDir {
// path for the new directory
string path = 1;
// permission bits
int32 mode = 2;
// makeParents creates parent directories as well if needed
bool makeParents = 3;
// optional owner for the new directory
ChownOpt owner = 4;
// optional created time override
int64 timestamp = 5;
}
message FileActionRm {
// path to remove
string path = 1;
// allowNotFound doesn't fail the rm if file is not found
bool allowNotFound = 2;
// allowWildcard allows filepath.Match wildcards in path
bool allowWildcard = 3;
}
message ChownOpt {
UserOpt user = 1;
UserOpt group = 2;
}
message UserOpt {
oneof user {
NamedUserOpt byName = 1;
uint32 byID = 2;
}
}
message NamedUserOpt {
string name = 1;
int64 input = 2 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false];
}

@ -0,0 +1,19 @@
syntax = "proto3";
package moby.buildkit.v1.apicaps;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.sizer_all) = true;
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
// APICap defines a capability supported by the service
message APICap {
string ID = 1;
bool Enabled = 2;
bool Deprecated = 3; // Unused. May be used for warnings in the future
string DisabledReason = 4; // Reason key for detection code
string DisabledReasonMsg = 5; // Message to the user
string DisabledAlternative = 6; // Identifier that updated client could catch.
}

@ -0,0 +1,19 @@
syntax = "proto3";
package fsutil.types;
option go_package = "types";
message Stat {
string path = 1;
uint32 mode = 2;
uint32 uid = 3;
uint32 gid = 4;
int64 size = 5;
int64 modTime = 6;
// int32 typeflag = 7;
string linkname = 7;
int64 devmajor = 8;
int64 devminor = 9;
map<string, bytes> xattrs = 10;
}

@ -0,0 +1,35 @@
#[allow(clippy::all)]
pub mod moby {
pub mod buildkit {
pub mod v1 {
pub mod frontend {
include!(concat!(env!("OUT_DIR"), "/moby.buildkit.v1.frontend.rs"));
}
pub mod apicaps {
include!(concat!(env!("OUT_DIR"), "/moby.buildkit.v1.apicaps.rs"));
}
pub mod types {
include!(concat!(env!("OUT_DIR"), "/moby.buildkit.v1.types.rs"));
}
}
}
}
pub mod google {
pub mod rpc {
include!(concat!(env!("OUT_DIR"), "/google.rpc.rs"));
}
}
pub mod pb {
include!(concat!(env!("OUT_DIR"), "/pb.rs"));
}
pub mod fsutil {
pub mod types {
include!(concat!(env!("OUT_DIR"), "/fsutil.types.rs"));
}
}

@ -0,0 +1,13 @@
#!/bin/sh
set -e
export BUILDKIT_VERSION="v0.7.2"
curl "https://raw.githubusercontent.com/moby/buildkit/$BUILDKIT_VERSION/api/types/worker.proto" > proto/github.com/moby/buildkit/api/types/worker.proto
curl "https://raw.githubusercontent.com/moby/buildkit/$BUILDKIT_VERSION/frontend/gateway/pb/gateway.proto" > proto/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto
curl "https://raw.githubusercontent.com/moby/buildkit/$BUILDKIT_VERSION/solver/pb/ops.proto" > proto/github.com/moby/buildkit/solver/pb/ops.proto
curl "https://raw.githubusercontent.com/moby/buildkit/$BUILDKIT_VERSION/util/apicaps/pb/caps.proto" > proto/github.com/moby/buildkit/util/apicaps/pb/caps.proto
curl "https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/status.proto" > proto/github.com/gogo/googleapis/google/rpc/status.proto
curl "https://raw.githubusercontent.com/gogo/protobuf/v1.2.1/gogoproto/gogo.proto" > proto/github.com/gogo/protobuf/gogoproto/gogo.proto
curl "https://raw.githubusercontent.com/tonistiigi/fsutil/master/types/stat.proto" > proto/github.com/tonistiigi/fsutil/types/stat.proto

@ -0,0 +1,27 @@
[package]
name = "dockerfile-plus"
version = "0.1.0"
authors = ["Ximo Guanter <ximo.guanter@gmail.com>"]
edition = "2018"
[dependencies]
anyhow = "1"
async-trait = "0.1"
crossbeam = "0.7"
either = "1"
env_logger = "0.8"
futures = "0.3"
libc = "0.2"
mio = "0.6"
pin-project = "1"
prost = "0.6"
prost-types = "0.6"
regex = "1.3"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1"
tokio = { version = "0.2", features = ["macros", "rt-core", "rt-threaded"] }
tonic = { git = "https://github.com/edrevo/tonic", branch = "unimplemented-content-type" }
tower = "0.3"
url = "2.2"
buildkit-llb = { version = "0.2", path = "../buildkit-llb" }
buildkit-proto = { version = "0.2", path = "../buildkit-proto" }

@ -0,0 +1,19 @@
# syntax = docker/dockerfile:1.2.1
FROM clux/muslrust:stable as builder
USER root
WORKDIR /rust-src
COPY . /rust-src
RUN --mount=type=cache,target=/rust-src/target \
--mount=type=cache,target=/root/.cargo/git \
--mount=type=cache,target=/root/.cargo/registry \
["cargo", "build", "--release", "--target", "x86_64-unknown-linux-musl", "-p", "dockerfile-plus"]
RUN --mount=type=cache,target=/rust-src/target \
["cp", "/rust-src/target/x86_64-unknown-linux-musl/release/dockerfile-plus", "/usr/local/bin/dockerfile-plus"]
FROM docker/dockerfile:1.2.1
COPY --from=builder /usr/local/bin/dockerfile-plus /usr/local/bin/dockerfile-plus
ENTRYPOINT ["/usr/local/bin/dockerfile-plus"]

@ -0,0 +1,21 @@
# Examples
In this folder you can find some example Dockerfile syntax extensions.
## Noop
This is the most basic example. It just adds a new instruction `NOOP` which does nothing (i.e. it is ignored). With this extension, the following Dockerfile would success fully compile:
```dockerfile
# syntax = edrevo/noop-dockerfile
NOOP
FROM alpine
NOOP
WORKDIR /
RUN echo "Hello World"
```

@ -0,0 +1,9 @@
# syntax = edrevo/dockerfile-plus:0.1
FROM alpine
INCLUDE+ Dockerfile.common
WORKDIR /
RUN echo "Hello World"

@ -0,0 +1,5 @@
FROM alpine
ENV RUST_LOG=debug
ENTRYPOINT ["/usr/local/bin/noop"]

@ -0,0 +1,175 @@
use std::{process::Stdio, sync::Arc};
use crate::stdio::StdioSocket;
use anyhow::Result;
use buildkit_proto::moby::buildkit::v1::frontend::{
self, llb_bridge_client::LlbBridgeClient, llb_bridge_server::LlbBridge,
};
use crossbeam::{channel, Sender};
use frontend::{llb_bridge_server::LlbBridgeServer, ReadFileResponse};
use tokio::sync::RwLock;
use tonic::{transport::Channel, transport::Server, Request, Response};
pub struct DockerfileFrontend {
client: LlbBridgeClient<Channel>,
dockerfile_name: String,
}
impl DockerfileFrontend {
pub fn new(client: LlbBridgeClient<Channel>, dockerfile_name: &str) -> DockerfileFrontend {
DockerfileFrontend {
client,
dockerfile_name: dockerfile_name.to_string(),
}
}
pub async fn solve(&self, dockerfile_contents: &str) -> Result<frontend::ReturnRequest> {
let mut dockerfile_front = std::process::Command::new("/bin/dockerfile-frontend")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.envs(std::env::vars())
.spawn()?;
let (tx, rx) = channel::bounded(1);
Server::builder()
.add_service(LlbBridgeServer::new(ProxyLlbServer::new(
self.client.clone(),
tx,
self.dockerfile_name.clone(),
dockerfile_contents.as_bytes().to_vec(),
)))
.serve_with_incoming(tokio::stream::once(StdioSocket::try_new_rw(
dockerfile_front.stdout.take().unwrap(),
dockerfile_front.stdin.take().unwrap(),
)))
.await?;
dockerfile_front.wait()?;
Ok(rx.recv()?)
}
}
struct ProxyLlbServer {
client: Arc<RwLock<LlbBridgeClient<Channel>>>,
result_sender: Sender<frontend::ReturnRequest>,
dockerfile_name: String,
dockerfile_contents: Vec<u8>,
}
impl ProxyLlbServer {
fn new(
client: LlbBridgeClient<Channel>,
result_sender: Sender<frontend::ReturnRequest>,
dockerfile_name: String,
dockerfile_contents: Vec<u8>,
) -> Self {
ProxyLlbServer {
client: Arc::new(RwLock::new(client)),
result_sender,
dockerfile_name,
dockerfile_contents,
}
}
}
#[tonic::async_trait]
impl LlbBridge for ProxyLlbServer {
async fn resolve_image_config(
&self,
request: Request<frontend::ResolveImageConfigRequest>,
) -> Result<Response<frontend::ResolveImageConfigResponse>, tonic::Status> {
eprintln!("Resolve image config: {:?}", request);
let result = self
.client
.write()
.await
.resolve_image_config(request)
.await;
eprintln!("{:?}", result);
result
}
async fn solve(
&self,
request: Request<frontend::SolveRequest>,
) -> Result<Response<frontend::SolveResponse>, tonic::Status> {
eprintln!("Solve: {:?}", request);
let result = self.client.write().await.solve(request).await;
eprintln!("{:?}", result);
result
}
async fn read_file(
&self,
request: Request<frontend::ReadFileRequest>,
) -> Result<Response<frontend::ReadFileResponse>, tonic::Status> {
eprintln!("Read file: {:?}", request);
let inner = request.into_inner();
let request = Request::new(inner.clone());
let result = if inner.file_path == self.dockerfile_name {
eprintln!("ITS A TRAP!");
eprintln!(
"{}",
std::str::from_utf8(&self.dockerfile_contents).unwrap()
);
Ok(Response::new(ReadFileResponse {
data: self.dockerfile_contents.clone(),
}))
} else {
self.client.write().await.read_file(request).await
};
eprintln!("{:?}", result);
result
}
async fn read_dir(
&self,
request: Request<frontend::ReadDirRequest>,
) -> Result<Response<frontend::ReadDirResponse>, tonic::Status> {
eprintln!("Read dir: {:?}", request);
let result = self.client.write().await.read_dir(request).await;
eprintln!("{:?}", result);
result
}
async fn stat_file(
&self,
request: Request<frontend::StatFileRequest>,
) -> Result<Response<frontend::StatFileResponse>, tonic::Status> {
eprintln!("Stat file: {:?}", request);
let result = self.client.write().await.stat_file(request).await;
eprintln!("{:?}", result);
result
}
async fn ping(
&self,
request: Request<frontend::PingRequest>,
) -> Result<Response<frontend::PongResponse>, tonic::Status> {
eprintln!("Ping: {:?}", request);
let result = self.client.write().await.ping(request).await;
eprintln!("{:?}", result);
result
}
async fn r#return(
&self,
request: Request<frontend::ReturnRequest>,
) -> Result<Response<frontend::ReturnResponse>, tonic::Status> {
// Do not send return request to buildkit
let inner = request.into_inner();
self.result_sender.send(inner).unwrap();
Ok(Response::new(frontend::ReturnResponse {}))
}
async fn inputs(
&self,
request: Request<frontend::InputsRequest>,
) -> Result<Response<frontend::InputsResponse>, tonic::Status> {
eprintln!("Inputs: {:?}", request);
let result = self.client.write().await.inputs(request).await;
eprintln!("{:?}", result);
result
}
}

@ -0,0 +1,130 @@
use std::path::PathBuf;
use dockerfile_frontend::DockerfileFrontend;
use anyhow::{Context, Result};
use buildkit_llb::prelude::*;
use buildkit_proto::{
google::rpc::Status,
moby::buildkit::v1::frontend::{
llb_bridge_client::LlbBridgeClient, result::Result as RefResult, FileRange,
ReadFileRequest, ReturnRequest, SolveRequest,
},
};
use serde::Deserialize;
use tonic::{transport::Channel, transport::Endpoint};
use tower::service_fn;
mod dockerfile_frontend;
mod options;
mod stdio;
async fn read_file<P>(
client: &mut LlbBridgeClient<Channel>,
layer: &str,
path: P,
range: Option<FileRange>,
) -> Result<Vec<u8>>
where
P: Into<PathBuf>,
{
let file_path = path.into().display().to_string();
let request = ReadFileRequest {
r#ref: layer.to_string(),
file_path,
range,
};
let response = client.read_file(request).await?.into_inner().data;
Ok(response)
}
async fn solve<'a>(client: &mut LlbBridgeClient<Channel>, graph: Terminal<'a>) -> Result<String> {
let solve_request = SolveRequest {
definition: Some(graph.into_definition()),
exporter_attr: vec![],
allow_result_return: true,
..Default::default()
};
let temp_result = client
.solve(solve_request)
.await?
.into_inner()
.result
.unwrap()
.result
.unwrap();
match temp_result {
RefResult::RefDeprecated(inner) => Ok(inner),
_ => panic!("Unexpected result"),
}
}
async fn run() -> Result<()> {
let channel = {
Endpoint::from_static("http://[::]:50051")
.connect_with_connector(service_fn(stdio::stdio_connector))
.await?
};
let mut client = LlbBridgeClient::new(channel);
let o: DockerfileOptions = options::from_env(std::env::vars())?;
let dockerfile_path = o
.filename
.as_ref()
.and_then(|p| p.to_str())
.unwrap_or("Dockerfile");
let dockerfile_source = Source::local("dockerfile");
let dockerfile_layer = solve(&mut client, Terminal::with(dockerfile_source.output())).await?;
let dockerfile_contents =
String::from_utf8(read_file(&mut client, &dockerfile_layer, dockerfile_path, None).await?)?;
let dockerfile_frontend = DockerfileFrontend::new(client.clone(), dockerfile_path);
let result = dockerfile_trap(client.clone(), dockerfile_frontend, dockerfile_contents)
.await
.unwrap_or_else(|e| ReturnRequest {
result: None,
error: Some(Status {
code: 128,
message: e.to_string(),
details: vec![],
}),
});
client.r#return(result).await?;
Ok(())
}
#[tokio::main]
async fn main() {
env_logger::init();
run().await.unwrap();
}
#[derive(Debug, Deserialize)]
struct DockerfileOptions {
filename: Option<PathBuf>,
}
const INCLUDE_COMMAND: &str = "INCLUDE+";
async fn dockerfile_trap(
mut client: LlbBridgeClient<Channel>,
dockerfile_frontend: DockerfileFrontend,
dockerfile_contents: String,
) -> Result<ReturnRequest> {
let mut result: Vec<String> = vec![];
let context_source = Source::local("context");
let context_layer = solve(&mut client, Terminal::with(context_source.output())).await?;
for line in dockerfile_contents.lines() {
if let Some(file_path) = line.trim().strip_prefix(INCLUDE_COMMAND) {
let bytes = read_file(&mut client, &context_layer, file_path.trim_start().to_string(), None)
.await
.with_context(|| format!("Could not read file \"{}\". Remember that the file path is relative to the build context, not the Dockerfile path.", file_path))?;
result.push(std::str::from_utf8(&bytes)?.to_string());
} else {
result.push(line.to_string());
}
}
let dockerfile_contents = result.join("\n");
dockerfile_frontend.solve(&dockerfile_contents).await
}

@ -0,0 +1,40 @@
use std::collections::HashMap;
use buildkit_proto::moby::buildkit::v1::frontend::CacheOptionsEntry as CacheOptionsEntryProto;
use serde::Deserialize;
#[derive(Clone, Debug, Deserialize, PartialEq)]
pub struct CacheOptionsEntry {
#[serde(rename = "Type")]
pub cache_type: CacheType,
#[serde(rename = "Attrs")]
pub attrs: HashMap<String, String>,
}
#[derive(Clone, Debug, Deserialize, PartialEq)]
#[serde(rename_all = "lowercase")]
pub enum CacheType {
Local,
Registry,
Inline,
}
impl Into<CacheOptionsEntryProto> for CacheOptionsEntry {
fn into(self) -> CacheOptionsEntryProto {
CacheOptionsEntryProto {
r#type: self.cache_type.into(),
attrs: self.attrs,
}
}
}
impl Into<String> for CacheType {
fn into(self) -> String {
match self {
CacheType::Local => "local".into(),
CacheType::Registry => "registry".into(),
CacheType::Inline => "inline".into(),
}
}
}

@ -0,0 +1,9 @@
use serde::Deserialize;
#[derive(Debug, PartialEq, Deserialize)]
#[serde(untagged)]
enum OptionValue {
Flag(bool),
Single(String),
Multiple(Vec<String>),
}

@ -0,0 +1,272 @@
use std::io::Cursor;
use std::iter::empty;
use anyhow::Result;
use serde::de::value::{MapDeserializer, SeqDeserializer};
use serde::de::{self, DeserializeOwned, IntoDeserializer, Visitor};
use serde::forward_to_deserialize_any;
pub fn from_env<T, I>(pairs: I) -> Result<T>
where
T: DeserializeOwned,
I: IntoIterator<Item = (String, String)>,
{
let owned_pairs = pairs.into_iter().collect::<Vec<_>>();
let pairs = {
owned_pairs.iter().filter_map(|(name, value)| {
if name.starts_with("BUILDKIT_FRONTEND_OPT_") {
Some(value)
} else {
None
}
})
};
let deserializer = EnvDeserializer {
vals: pairs.map(|value| extract_name_and_value(&value)),
};
Ok(T::deserialize(deserializer)?)
}
#[derive(Debug)]
struct EnvDeserializer<P> {
vals: P,
}
#[derive(Debug)]
enum EnvValue<'de> {
Flag,
Json(&'de str),
Text(&'de str),
}
#[derive(Debug)]
struct EnvItem<'de>(&'de str);
fn extract_name_and_value(mut raw_value: &str) -> (&str, EnvValue) {
if raw_value.starts_with("build-arg:") {
raw_value = raw_value.trim_start_matches("build-arg:");
}
let mut parts = raw_value.splitn(2, '=');
let name = parts.next().unwrap();
match parts.next() {
None => (name, EnvValue::Flag),
Some(text) if text.is_empty() => (name, EnvValue::Flag),
Some(text) if &text[0..1] == "[" || &text[0..1] == "{" => (name, EnvValue::Json(text)),
Some(text) => (name, EnvValue::Text(text)),
}
}
impl<'de> IntoDeserializer<'de, serde::de::value::Error> for EnvValue<'de> {
type Deserializer = Self;
fn into_deserializer(self) -> Self::Deserializer {
self
}
}
impl<'de> IntoDeserializer<'de, serde::de::value::Error> for EnvItem<'de> {
type Deserializer = Self;
fn into_deserializer(self) -> Self::Deserializer {
self
}
}
impl<'de> EnvItem<'de> {
fn infer<V: Visitor<'de>>(self, visitor: V) -> Result<V::Value, serde::de::value::Error> {
match self.0 {
"true" => visitor.visit_bool(true),
"false" => visitor.visit_bool(false),
_ => visitor.visit_str(self.0),
}
}
fn json<V: Visitor<'de>>(self, visitor: V) -> Result<V::Value, serde::de::value::Error> {
use serde::de::Deserializer;
use serde::de::Error;
serde_json::Deserializer::from_reader(Cursor::new(self.0))
.deserialize_any(visitor)
.map_err(serde::de::value::Error::custom)
}
}
impl<'de, P> de::Deserializer<'de> for EnvDeserializer<P>
where
P: Iterator<Item = (&'de str, EnvValue<'de>)>,
{
type Error = serde::de::value::Error;
fn deserialize_any<V: Visitor<'de>>(self, visitor: V) -> Result<V::Value, Self::Error> {
visitor.visit_map(MapDeserializer::new(self.vals))
}
forward_to_deserialize_any! {
bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string
bytes byte_buf option unit unit_struct newtype_struct seq tuple
tuple_struct map struct enum identifier ignored_any
}
}
// The approach is shamelessly borrowed from https://github.com/softprops/envy/blob/master/src/lib.rs#L113
macro_rules! forward_parsed_values_env_value {
($($ty:ident => $method:ident,)*) => {
$(
fn $method<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor<'de>
{
match self {
EnvValue::Flag => self.deserialize_any(visitor),
EnvValue::Json(_) => self.deserialize_any(visitor),
EnvValue::Text(contents) => {
match contents.parse::<$ty>() {
Ok(val) => val.into_deserializer().$method(visitor),
Err(e) => Err(de::Error::custom(format_args!("{} while parsing value '{}'", e, contents)))
}
}
}
}
)*
}
}
macro_rules! forward_parsed_values_env_item {
($($ty:ident => $method:ident,)*) => {
$(
fn $method<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor<'de>
{
match self.0.parse::<$ty>() {
Ok(val) => val.into_deserializer().$method(visitor),
Err(e) => Err(de::Error::custom(format_args!("{} while parsing value '{}'", e, self.0)))
}
}
)*
}
}
impl<'de> de::Deserializer<'de> for EnvValue<'de> {
type Error = serde::de::value::Error;
fn deserialize_any<V: Visitor<'de>>(self, visitor: V) -> Result<V::Value, Self::Error> {
match self {
EnvValue::Flag => visitor.visit_bool(true),
EnvValue::Json(contents) => EnvItem(contents).json(visitor),
EnvValue::Text(contents) => {
if !contents.contains(',') {
EnvItem(contents).infer(visitor)
} else {
SeqDeserializer::new(contents.split(',')).deserialize_seq(visitor)
}
}
}
}
fn deserialize_seq<V: Visitor<'de>>(self, visitor: V) -> Result<V::Value, Self::Error> {
match self {
EnvValue::Flag => SeqDeserializer::new(empty::<&'de str>()).deserialize_seq(visitor),
EnvValue::Json(contents) => EnvItem(contents).json(visitor),
EnvValue::Text(contents) => {
SeqDeserializer::new(contents.split(',')).deserialize_seq(visitor)
}
}
}
fn deserialize_option<V: Visitor<'de>>(self, visitor: V) -> Result<V::Value, Self::Error> {
visitor.visit_some(self)
}
forward_parsed_values_env_value! {
bool => deserialize_bool,
u8 => deserialize_u8,
u16 => deserialize_u16,
u32 => deserialize_u32,
u64 => deserialize_u64,
u128 => deserialize_u128,
i8 => deserialize_i8,
i16 => deserialize_i16,
i32 => deserialize_i32,
i64 => deserialize_i64,
i128 => deserialize_i128,
f32 => deserialize_f32,
f64 => deserialize_f64,
}
forward_to_deserialize_any! {
byte_buf
bytes
char
enum
identifier
ignored_any
map
newtype_struct
str
string
struct
tuple
tuple_struct
unit
unit_struct
}
}
impl<'de> de::Deserializer<'de> for EnvItem<'de> {
type Error = serde::de::value::Error;
fn deserialize_any<V: Visitor<'de>>(self, visitor: V) -> Result<V::Value, Self::Error> {
self.0.into_deserializer().deserialize_any(visitor)
}
fn deserialize_map<V: Visitor<'de>>(self, visitor: V) -> Result<V::Value, Self::Error> {
self.json(visitor)
}
fn deserialize_struct<V: Visitor<'de>>(
self,
_: &'static str,
_: &'static [&'static str],
visitor: V,
) -> Result<V::Value, Self::Error> {
self.json(visitor)
}
forward_parsed_values_env_item! {
bool => deserialize_bool,
u8 => deserialize_u8,
u16 => deserialize_u16,
u32 => deserialize_u32,
u64 => deserialize_u64,
u128 => deserialize_u128,
i8 => deserialize_i8,
i16 => deserialize_i16,
i32 => deserialize_i32,
i64 => deserialize_i64,
i128 => deserialize_i128,
f32 => deserialize_f32,
f64 => deserialize_f64,
}
forward_to_deserialize_any! {
byte_buf
bytes
char
enum
identifier
ignored_any
newtype_struct
option
seq
str
string
tuple
tuple_struct
unit
unit_struct
}
}

@ -0,0 +1,123 @@
mod default;
mod deserializer;
pub use self::deserializer::from_env;
pub mod common;
#[cfg(test)]
mod tests {
use std::path::PathBuf;
use super::*;
use serde::Deserialize;
#[derive(Debug, Deserialize, PartialEq)]
#[serde(untagged)]
#[serde(field_identifier, rename_all = "lowercase")]
enum Debug {
All,
LLB,
Frontend,
}
#[derive(Debug, Deserialize, PartialEq)]
#[serde(rename_all = "kebab-case")]
struct CustomOptions {
filename: Option<PathBuf>,
verbosity: u32,
#[serde(default)]
debug: Vec<Debug>,
#[serde(default)]
cache_imports: Vec<common::CacheOptionsEntry>,
}
#[test]
fn custom_options() {
let env = vec![
(
"BUILDKIT_FRONTEND_OPT_0".into(),
"filename=/path/to/Dockerfile".into(),
),
(
"BUILDKIT_FRONTEND_OPT_1".into(),
"debug=llb,frontend".into(),
),
(
"BUILDKIT_FRONTEND_OPT_2".into(),
r#"cache-imports=[{"Type":"local","Attrs":{"src":"cache"}}]"#.into(),
),
(
"BUILDKIT_FRONTEND_OPT_3".into(),
"verbosity=12345678".into(),
),
];
assert_eq!(
from_env::<CustomOptions, _>(env.into_iter()).unwrap(),
CustomOptions {
filename: Some(PathBuf::from("/path/to/Dockerfile")),
verbosity: 12_345_678,
debug: vec![Debug::LLB, Debug::Frontend],
cache_imports: vec![common::CacheOptionsEntry {
cache_type: common::CacheType::Local,
attrs: vec![("src".into(), "cache".into())].into_iter().collect()
}],
}
);
}
#[test]
fn env_variable_names() {
let env = vec![
(
"ANOTHER_OPT_0".into(),
"filename=/path/to/Dockerfile".into(),
),
(
"ANOTHER_OPT_2".into(),
r#"cache-imports=[{"Type":"local","Attrs":{"src":"cache"}}]"#.into(),
),
("BUILDKIT_FRONTEND_OPT_1".into(), "debug=all".into()),
(
"BUILDKIT_FRONTEND_OPT_2".into(),
"verbosity=12345678".into(),
),
];
assert_eq!(
from_env::<CustomOptions, _>(env.into_iter()).unwrap(),
CustomOptions {
filename: None,
verbosity: 12_345_678,
debug: vec![Debug::All],
cache_imports: vec![],
}
);
}
#[test]
fn empty_cache() {
let env = vec![
("BUILDKIT_FRONTEND_OPT_1".into(), "cache-imports=".into()),
(
"BUILDKIT_FRONTEND_OPT_2".into(),
"verbosity=12345678".into(),
),
];
assert_eq!(
from_env::<CustomOptions, _>(env.into_iter()).unwrap(),
CustomOptions {
filename: None,
verbosity: 12_345_678,
debug: vec![],
cache_imports: vec![],
}
);
}
}

@ -0,0 +1,182 @@
use std::io::{self, stdin, stdout, Read, Write};
use std::os::unix::io::AsRawFd;
use std::pin::Pin;
use std::task::{Context, Poll};
use pin_project::pin_project;
use std::{
io::{Stdin, Stdout},
net::{IpAddr, Ipv4Addr, SocketAddr},
};
use tokio::io::*;
use tonic::transport::{server::Connected, Uri};
#[pin_project]
pub struct StdioSocket<R: Read + AsRawFd, W: Write + AsRawFd> {
#[pin]
reader: PollEvented<async_stdio::EventedStdin<R>>,
#[pin]
writer: PollEvented<async_stdio::EventedStdout<W>>,
}
pub async fn stdio_connector(_: Uri) -> io::Result<StdioSocket<Stdin, Stdout>> {
StdioSocket::try_new()
}
impl StdioSocket<Stdin, Stdout> {
pub fn try_new() -> io::Result<Self> {
Self::try_new_rw(stdin(), stdout())
}
}
impl<R: Read + AsRawFd, W: Write + AsRawFd> Connected for StdioSocket<R, W> {
fn remote_addr(&self) -> Option<SocketAddr> {
Some(SocketAddr::new(IpAddr::from(Ipv4Addr::UNSPECIFIED), 8080))
}
}
impl<R: Read + AsRawFd, W: Write + AsRawFd> StdioSocket<R, W> {
pub fn try_new_rw(read: R, write: W) -> io::Result<Self> {
Ok(StdioSocket {
reader: PollEvented::new(async_stdio::EventedStdin::try_new(read)?)?,
writer: PollEvented::new(async_stdio::EventedStdout::try_new(write)?)?,
})
}
}
impl<R: Read + AsRawFd + Unpin, W: Write + AsRawFd + Unpin> AsyncRead for StdioSocket<R, W> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<Result<usize>> {
self.project().reader.poll_read(cx, buf)
}
}
impl<R: Read + AsRawFd + Unpin, W: Write + AsRawFd + Unpin> AsyncWrite for StdioSocket<R, W> {
fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll<Result<usize>> {
self.project().writer.poll_write(cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
self.project().writer.poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
self.project().writer.poll_shutdown(cx)
}
}
mod async_stdio {
use std::io::{self, Read, Write};
use std::os::unix::io::AsRawFd;
use mio::event::Evented;
use mio::unix::EventedFd;
use mio::{Poll, PollOpt, Ready, Token};
use libc::{fcntl, F_GETFL, F_SETFL, O_NONBLOCK};
pub struct EventedStdin<T: Read + AsRawFd>(T);
pub struct EventedStdout<T: Write + AsRawFd>(T);
impl<T: Read + AsRawFd> EventedStdin<T> {
pub fn try_new(stdin: T) -> io::Result<Self> {
set_non_blocking_flag(&stdin)?;
Ok(EventedStdin(stdin))
}
}
impl<T: Write + AsRawFd> EventedStdout<T> {
pub fn try_new(stdout: T) -> io::Result<Self> {
set_non_blocking_flag(&stdout)?;
Ok(EventedStdout(stdout))
}
}
impl<T: Read + AsRawFd> Evented for EventedStdin<T> {
fn register(
&self,
poll: &Poll,
token: Token,
interest: Ready,
opts: PollOpt,
) -> io::Result<()> {
EventedFd(&self.0.as_raw_fd()).register(poll, token, interest, opts)
}
fn reregister(
&self,
poll: &Poll,
token: Token,
interest: Ready,
opts: PollOpt,
) -> io::Result<()> {
EventedFd(&self.0.as_raw_fd()).reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &Poll) -> io::Result<()> {
EventedFd(&self.0.as_raw_fd()).deregister(poll)
}
}
impl<T: Read + AsRawFd> Read for EventedStdin<T> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.0.read(buf)
}
}
impl<T: Write + AsRawFd> Evented for EventedStdout<T> {
fn register(
&self,
poll: &Poll,
token: Token,
interest: Ready,
opts: PollOpt,
) -> io::Result<()> {
EventedFd(&self.0.as_raw_fd()).register(poll, token, interest, opts)
}
fn reregister(
&self,
poll: &Poll,
token: Token,
interest: Ready,
opts: PollOpt,
) -> io::Result<()> {
EventedFd(&self.0.as_raw_fd()).reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &Poll) -> io::Result<()> {
EventedFd(&self.0.as_raw_fd()).deregister(poll)
}
}
impl<T: Write + AsRawFd> Write for EventedStdout<T> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.0.flush()
}
}
fn set_non_blocking_flag<T: AsRawFd>(stream: &T) -> io::Result<()> {
let flags = unsafe { fcntl(stream.as_raw_fd(), F_GETFL, 0) };
if flags < 0 {
return Err(std::io::Error::last_os_error());
}
if unsafe { fcntl(stream.as_raw_fd(), F_SETFL, flags | O_NONBLOCK) } != 0 {
return Err(std::io::Error::last_os_error());
}
Ok(())
}
}
Loading…
Cancel
Save