diff options
Diffstat (limited to 'tools/nixery/builder')
-rw-r--r-- | tools/nixery/builder/archive.go | 104 | ||||
-rw-r--r-- | tools/nixery/builder/builder.go | 527 | ||||
-rw-r--r-- | tools/nixery/builder/builder_test.go | 112 | ||||
-rw-r--r-- | tools/nixery/builder/cache.go | 225 |
4 files changed, 968 insertions, 0 deletions
diff --git a/tools/nixery/builder/archive.go b/tools/nixery/builder/archive.go new file mode 100644 index 000000000000..8763e4cb8566 --- /dev/null +++ b/tools/nixery/builder/archive.go @@ -0,0 +1,104 @@ +// Copyright 2022 The TVL Contributors +// SPDX-License-Identifier: Apache-2.0 +package builder + +// This file implements logic for walking through a directory and creating a +// tarball of it. +// +// The tarball is written straight to the supplied reader, which makes it +// possible to create an image layer from the specified store paths, hash it and +// upload it in one reading pass. +import ( + "archive/tar" + "compress/gzip" + "crypto/sha256" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/google/nixery/layers" +) + +// Create a new compressed tarball from each of the paths in the list +// and write it to the supplied writer. +// +// The uncompressed tarball is hashed because image manifests must +// contain both the hashes of compressed and uncompressed layers. +func packStorePaths(l *layers.Layer, w io.Writer) (string, error) { + shasum := sha256.New() + gz := gzip.NewWriter(w) + multi := io.MultiWriter(shasum, gz) + t := tar.NewWriter(multi) + + for _, path := range l.Contents { + err := filepath.Walk(path, tarStorePath(t)) + if err != nil { + return "", err + } + } + + if err := t.Close(); err != nil { + return "", err + } + + if err := gz.Close(); err != nil { + return "", err + } + + return fmt.Sprintf("sha256:%x", shasum.Sum([]byte{})), nil +} + +func tarStorePath(w *tar.Writer) filepath.WalkFunc { + return func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // If the entry is not a symlink or regular file, skip it. + if info.Mode()&os.ModeSymlink == 0 && !info.Mode().IsRegular() { + return nil + } + + // the symlink target is read if this entry is a symlink, as it + // is required when creating the file header + var link string + if info.Mode()&os.ModeSymlink != 0 { + link, err = os.Readlink(path) + if err != nil { + return err + } + } + + header, err := tar.FileInfoHeader(info, link) + if err != nil { + return err + } + + // The name retrieved from os.FileInfo only contains the file's + // basename, but the full path is required within the layer + // tarball. + header.Name = path + if err = w.WriteHeader(header); err != nil { + return err + } + + // At this point, return if no file content needs to be written + if !info.Mode().IsRegular() { + return nil + } + + f, err := os.Open(path) + if err != nil { + return err + } + + if _, err := io.Copy(w, f); err != nil { + return err + } + + f.Close() + + return nil + } +} diff --git a/tools/nixery/builder/builder.go b/tools/nixery/builder/builder.go new file mode 100644 index 000000000000..7f0bd7fffdb9 --- /dev/null +++ b/tools/nixery/builder/builder.go @@ -0,0 +1,527 @@ +// Copyright 2022 The TVL Contributors +// SPDX-License-Identifier: Apache-2.0 + +// Package builder implements the logic for assembling container +// images. It shells out to Nix to retrieve all required Nix-packages +// and assemble the symlink layer and then creates the required +// tarballs in-process. +package builder + +import ( + "bufio" + "bytes" + "compress/gzip" + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "sort" + "strings" + + "github.com/google/nixery/config" + "github.com/google/nixery/layers" + "github.com/google/nixery/manifest" + "github.com/google/nixery/storage" + "github.com/im7mortal/kmutex" + log "github.com/sirupsen/logrus" +) + +// The maximum number of layers in an image is 125. To allow for +// extensibility, the actual number of layers Nixery is "allowed" to +// use up is set at a lower point. +const LayerBudget int = 94 + +// State holds the runtime state that is carried around in Nixery and +// passed to builder functions. +type State struct { + Storage storage.Backend + Cache *LocalCache + Cfg config.Config + Pop layers.Popularity + UploadMutex *kmutex.Kmutex +} + +// Architecture represents the possible CPU architectures for which +// container images can be built. +// +// The default architecture is amd64, but support for ARM platforms is +// available within nixpkgs and can be toggled via meta-packages. +type Architecture struct { + // Name of the system tuple to pass to Nix + nixSystem string + + // Name of the architecture as used in the OCI manifests + imageArch string +} + +var amd64 = Architecture{"x86_64-linux", "amd64"} +var arm64 = Architecture{"aarch64-linux", "arm64"} + +// Image represents the information necessary for building a container image. +// This can be either a list of package names (corresponding to keys in the +// nixpkgs set) or a Nix expression that results in a *list* of derivations. +type Image struct { + Name string + Tag string + + // Names of packages to include in the image. These must correspond + // directly to top-level names of Nix packages in the nixpkgs tree. + Packages []string + + // Architecture for which to build the image. Nixery defaults + // this to amd64 if not specified via meta-packages. + Arch *Architecture +} + +// BuildResult represents the data returned from the server to the +// HTTP handlers. Error information is propagated straight from Nix +// for errors inside of the build that should be fed back to the +// client (such as missing packages). +type BuildResult struct { + Error string `json:"error"` + Pkgs []string `json:"pkgs"` + Manifest json.RawMessage `json:"manifest"` +} + +// ImageFromName parses an image name into the corresponding structure which can +// be used to invoke Nix. +// +// It will expand convenience names under the hood (see the `convenienceNames` +// function below) and append packages that are always included (cacert, iana-etc). +// +// Once assembled the image structure uses a sorted representation of +// the name. This is to avoid unnecessarily cache-busting images if +// only the order of requested packages has changed. +func ImageFromName(name string, tag string) Image { + pkgs := strings.Split(name, "/") + arch, expanded := metaPackages(pkgs) + expanded = append(expanded, "cacert", "iana-etc") + + sort.Strings(pkgs) + sort.Strings(expanded) + + return Image{ + Name: strings.Join(pkgs, "/"), + Tag: tag, + Packages: expanded, + Arch: arch, + } +} + +// ImageResult represents the output of calling the Nix derivation +// responsible for preparing an image. +type ImageResult struct { + // These fields are populated in case of an error + Error string `json:"error"` + Pkgs []string `json:"pkgs"` + + // These fields are populated in case of success + Graph layers.RuntimeGraph `json:"runtimeGraph"` + SymlinkLayer struct { + Size int `json:"size"` + TarHash string `json:"tarHash"` + Path string `json:"path"` + } `json:"symlinkLayer"` +} + +// metaPackages expands package names defined by Nixery which either +// include sets of packages or trigger certain image-building +// behaviour. +// +// Meta-packages must be specified as the first packages in an image +// name. +// +// Currently defined meta-packages are: +// +// * `shell`: Includes bash, coreutils and other common command-line tools +// * `arm64`: Causes Nixery to build images for the ARM64 architecture +func metaPackages(packages []string) (*Architecture, []string) { + arch := &amd64 + + var metapkgs []string + lastMeta := 0 + for idx, p := range packages { + if p == "shell" || p == "arm64" { + metapkgs = append(metapkgs, p) + lastMeta = idx + 1 + } else { + break + } + } + + // Chop off the meta-packages from the front of the package + // list + packages = packages[lastMeta:] + + for _, p := range metapkgs { + switch p { + case "shell": + packages = append(packages, "bashInteractive", "coreutils", "moreutils", "nano") + case "arm64": + arch = &arm64 + } + } + + return arch, packages +} + +// logNix logs each output line from Nix. It runs in a goroutine per +// output channel that should be live-logged. +func logNix(image, cmd string, r io.ReadCloser) { + scanner := bufio.NewScanner(r) + for scanner.Scan() { + log.WithFields(log.Fields{ + "image": image, + "cmd": cmd, + }).Info("[nix] " + scanner.Text()) + } +} + +func callNix(program, image string, args []string) ([]byte, error) { + cmd := exec.Command(program, args...) + + outpipe, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + + errpipe, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + go logNix(image, program, errpipe) + + if err = cmd.Start(); err != nil { + log.WithError(err).WithFields(log.Fields{ + "image": image, + "cmd": program, + }).Error("error invoking Nix") + + return nil, err + } + + log.WithFields(log.Fields{ + "cmd": program, + "image": image, + }).Info("invoked Nix build") + + stdout, _ := ioutil.ReadAll(outpipe) + + if err = cmd.Wait(); err != nil { + log.WithError(err).WithFields(log.Fields{ + "image": image, + "cmd": program, + "stdout": stdout, + }).Info("failed to invoke Nix") + + return nil, err + } + + resultFile := strings.TrimSpace(string(stdout)) + buildOutput, err := ioutil.ReadFile(resultFile) + if err != nil { + log.WithError(err).WithFields(log.Fields{ + "image": image, + "file": resultFile, + }).Info("failed to read Nix result file") + + return nil, err + } + + return buildOutput, nil +} + +// Call out to Nix and request metadata for the image to be built. All +// required store paths for the image will be realised, but layers +// will not yet be created from them. +// +// This function is only invoked if the manifest is not found in any +// cache. +func prepareImage(s *State, image *Image) (*ImageResult, error) { + packages, err := json.Marshal(image.Packages) + if err != nil { + return nil, err + } + + srcType, srcArgs := s.Cfg.Pkgs.Render(image.Tag) + + args := []string{ + "--timeout", s.Cfg.Timeout, + "--argstr", "packages", string(packages), + "--argstr", "srcType", srcType, + "--argstr", "srcArgs", srcArgs, + "--argstr", "system", image.Arch.nixSystem, + } + + output, err := callNix("nixery-prepare-image", image.Name, args) + if err != nil { + // granular error logging is performed in callNix already + return nil, err + } + + log.WithFields(log.Fields{ + "image": image.Name, + "tag": image.Tag, + }).Info("finished image preparation via Nix") + + var result ImageResult + err = json.Unmarshal(output, &result) + if err != nil { + return nil, err + } + + return &result, nil +} + +// Groups layers and checks whether they are present in the cache +// already, otherwise calls out to Nix to assemble layers. +// +// Newly built layers are uploaded to the bucket. Cache entries are +// added only after successful uploads, which guarantees that entries +// retrieved from the cache are present in the bucket. +func prepareLayers(ctx context.Context, s *State, image *Image, result *ImageResult) ([]manifest.Entry, error) { + grouped := layers.GroupLayers(&result.Graph, &s.Pop, LayerBudget) + + var entries []manifest.Entry + + // Splits the layers into those which are already present in + // the cache, and those that are missing. + // + // Missing layers are built and uploaded to the storage + // bucket. + for _, l := range grouped { + lh := l.Hash() + + // While packing store paths, the SHA sum of + // the uncompressed layer is computed and + // written to `tarhash`. + // + // TODO(tazjin): Refactor this to make the + // flow of data cleaner. + lw := func(w io.Writer) (string, error) { + tarhash, err := packStorePaths(&l, w) + if err != nil { + return "", err + } + + var pkgs []string + for _, p := range l.Contents { + pkgs = append(pkgs, layers.PackageFromPath(p)) + } + + log.WithFields(log.Fields{ + "layer": lh, + "packages": pkgs, + "tarhash": tarhash, + }).Info("created image layer") + + return tarhash, err + } + + entry, err := uploadHashLayer(ctx, s, lh, l.MergeRating, lw) + if err != nil { + return nil, err + } + + entries = append(entries, *entry) + } + + // Symlink layer (built in the first Nix build) needs to be + // included here manually: + slkey := result.SymlinkLayer.TarHash + entry, err := uploadHashLayer(ctx, s, slkey, 0, func(w io.Writer) (string, error) { + f, err := os.Open(result.SymlinkLayer.Path) + if err != nil { + log.WithError(err).WithFields(log.Fields{ + "image": image.Name, + "tag": image.Tag, + "layer": slkey, + }).Error("failed to open symlink layer") + + return "", err + } + defer f.Close() + + gz := gzip.NewWriter(w) + _, err = io.Copy(gz, f) + if err != nil { + log.WithError(err).WithFields(log.Fields{ + "image": image.Name, + "tag": image.Tag, + "layer": slkey, + }).Error("failed to upload symlink layer") + + return "", err + } + + return "sha256:" + slkey, gz.Close() + }) + + if err != nil { + return nil, err + } + + entries = append(entries, *entry) + + return entries, nil +} + +// layerWriter is the type for functions that can write a layer to the +// multiwriter used for uploading & hashing. +// +// This type exists to avoid duplication between the handling of +// symlink layers and store path layers. +type layerWriter func(w io.Writer) (string, error) + +// byteCounter is a special io.Writer that counts all bytes written to +// it and does nothing else. +// +// This is required because the ad-hoc writing of tarballs leaves no +// single place to count the final tarball size otherwise. +type byteCounter struct { + count int64 +} + +func (b *byteCounter) Write(p []byte) (n int, err error) { + b.count += int64(len(p)) + return len(p), nil +} + +// Upload a layer tarball to the storage bucket, while hashing it at +// the same time. The supplied function is expected to provide the +// layer data to the writer. +// +// The initial upload is performed in a 'staging' folder, as the +// SHA256-hash is not yet available when the upload is initiated. +// +// After a successful upload, the file is moved to its final location +// in the bucket and the build cache is populated. +// +// The return value is the layer's SHA256 hash, which is used in the +// image manifest. +func uploadHashLayer(ctx context.Context, s *State, key string, mrating uint64, lw layerWriter) (*manifest.Entry, error) { + s.UploadMutex.Lock(key) + defer s.UploadMutex.Unlock(key) + + if entry, cached := layerFromCache(ctx, s, key); cached { + return entry, nil + } + + path := "staging/" + key + var tarhash string + sha256sum, size, err := s.Storage.Persist(ctx, path, manifest.LayerType, func(sw io.Writer) (string, int64, error) { + // Sets up a "multiwriter" that simultaneously runs both hash + // algorithms and uploads to the storage backend. + shasum := sha256.New() + counter := &byteCounter{} + multi := io.MultiWriter(sw, shasum, counter) + + var err error + tarhash, err = lw(multi) + sha256sum := fmt.Sprintf("%x", shasum.Sum([]byte{})) + + return sha256sum, counter.count, err + }) + + if err != nil { + log.WithError(err).WithFields(log.Fields{ + "layer": key, + "backend": s.Storage.Name(), + }).Error("failed to create and store layer") + + return nil, err + } + + // Hashes are now known and the object is in the bucket, what + // remains is to move it to the correct location and cache it. + err = s.Storage.Move(ctx, "staging/"+key, "layers/"+sha256sum) + if err != nil { + log.WithError(err).WithField("layer", key). + Error("failed to move layer from staging") + + return nil, err + } + + log.WithFields(log.Fields{ + "layer": key, + "sha256": sha256sum, + "size": size, + }).Info("created and persisted layer") + + entry := manifest.Entry{ + Digest: "sha256:" + sha256sum, + Size: size, + TarHash: tarhash, + MergeRating: mrating, + } + + cacheLayer(ctx, s, key, entry) + + return &entry, nil +} + +func BuildImage(ctx context.Context, s *State, image *Image) (*BuildResult, error) { + key := s.Cfg.Pkgs.CacheKey(image.Packages, image.Tag) + if key != "" { + if m, c := manifestFromCache(ctx, s, key); c { + return &BuildResult{ + Manifest: m, + }, nil + } + } + + imageResult, err := prepareImage(s, image) + if err != nil { + return nil, err + } + + if imageResult.Error != "" { + return &BuildResult{ + Error: imageResult.Error, + Pkgs: imageResult.Pkgs, + }, nil + } + + layers, err := prepareLayers(ctx, s, image, imageResult) + if err != nil { + return nil, err + } + + // If the requested packages include a shell, + // set cmd accordingly. + cmd := "" + for _, pkg := range image.Packages { + if pkg == "bashInteractive" { + cmd = "bash" + } + } + m, c := manifest.Manifest(image.Arch.imageArch, layers, cmd) + + lw := func(w io.Writer) (string, error) { + r := bytes.NewReader(c.Config) + _, err := io.Copy(w, r) + return "", err + } + + if _, err = uploadHashLayer(ctx, s, c.SHA256, 0, lw); err != nil { + log.WithError(err).WithFields(log.Fields{ + "image": image.Name, + "tag": image.Tag, + }).Error("failed to upload config") + + return nil, err + } + + if key != "" { + go cacheManifest(ctx, s, key, m) + } + + result := BuildResult{ + Manifest: m, + } + return &result, nil +} diff --git a/tools/nixery/builder/builder_test.go b/tools/nixery/builder/builder_test.go new file mode 100644 index 000000000000..507f3eb15a83 --- /dev/null +++ b/tools/nixery/builder/builder_test.go @@ -0,0 +1,112 @@ +// Copyright 2022 The TVL Contributors +// SPDX-License-Identifier: Apache-2.0 +package builder + +import ( + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "testing" +) + +var ignoreArch = cmpopts.IgnoreFields(Image{}, "Arch") + +func TestImageFromNameSimple(t *testing.T) { + image := ImageFromName("hello", "latest") + expected := Image{ + Name: "hello", + Tag: "latest", + Packages: []string{ + "cacert", + "hello", + "iana-etc", + }, + } + + if diff := cmp.Diff(expected, image, ignoreArch); diff != "" { + t.Fatalf("Image(\"hello\", \"latest\") mismatch:\n%s", diff) + } +} + +func TestImageFromNameMultiple(t *testing.T) { + image := ImageFromName("hello/git/htop", "latest") + expected := Image{ + Name: "git/hello/htop", + Tag: "latest", + Packages: []string{ + "cacert", + "git", + "hello", + "htop", + "iana-etc", + }, + } + + if diff := cmp.Diff(expected, image, ignoreArch); diff != "" { + t.Fatalf("Image(\"hello/git/htop\", \"latest\") mismatch:\n%s", diff) + } +} + +func TestImageFromNameShell(t *testing.T) { + image := ImageFromName("shell", "latest") + expected := Image{ + Name: "shell", + Tag: "latest", + Packages: []string{ + "bashInteractive", + "cacert", + "coreutils", + "iana-etc", + "moreutils", + "nano", + }, + } + + if diff := cmp.Diff(expected, image, ignoreArch); diff != "" { + t.Fatalf("Image(\"shell\", \"latest\") mismatch:\n%s", diff) + } +} + +func TestImageFromNameShellMultiple(t *testing.T) { + image := ImageFromName("shell/htop", "latest") + expected := Image{ + Name: "htop/shell", + Tag: "latest", + Packages: []string{ + "bashInteractive", + "cacert", + "coreutils", + "htop", + "iana-etc", + "moreutils", + "nano", + }, + } + + if diff := cmp.Diff(expected, image, ignoreArch); diff != "" { + t.Fatalf("Image(\"shell/htop\", \"latest\") mismatch:\n%s", diff) + } +} + +func TestImageFromNameShellArm64(t *testing.T) { + image := ImageFromName("shell/arm64", "latest") + expected := Image{ + Name: "arm64/shell", + Tag: "latest", + Packages: []string{ + "bashInteractive", + "cacert", + "coreutils", + "iana-etc", + "moreutils", + "nano", + }, + } + + if diff := cmp.Diff(expected, image, ignoreArch); diff != "" { + t.Fatalf("Image(\"shell/arm64\", \"latest\") mismatch:\n%s", diff) + } + + if image.Arch.imageArch != "arm64" { + t.Fatal("Image(\"shell/arm64\"): Expected arch arm64") + } +} diff --git a/tools/nixery/builder/cache.go b/tools/nixery/builder/cache.go new file mode 100644 index 000000000000..9e4283c0e5bb --- /dev/null +++ b/tools/nixery/builder/cache.go @@ -0,0 +1,225 @@ +// Copyright 2022 The TVL Contributors +// SPDX-License-Identifier: Apache-2.0 +package builder + +import ( + "bytes" + "context" + "encoding/json" + "io" + "io/ioutil" + "os" + "sync" + + "github.com/google/nixery/manifest" + log "github.com/sirupsen/logrus" +) + +// LocalCache implements the structure used for local caching of +// manifests and layer uploads. +type LocalCache struct { + // Manifest cache + mmtx sync.RWMutex + mdir string + + // Layer cache + lmtx sync.RWMutex + lcache map[string]manifest.Entry +} + +// Creates an in-memory cache and ensures that the local file path for +// manifest caching exists. +func NewCache() (LocalCache, error) { + path := os.TempDir() + "/nixery" + err := os.MkdirAll(path, 0755) + if err != nil { + return LocalCache{}, err + } + + return LocalCache{ + mdir: path + "/", + lcache: make(map[string]manifest.Entry), + }, nil +} + +// Retrieve a cached manifest if the build is cacheable and it exists. +func (c *LocalCache) manifestFromLocalCache(key string) (json.RawMessage, bool) { + c.mmtx.RLock() + defer c.mmtx.RUnlock() + + f, err := os.Open(c.mdir + key) + if err != nil { + // This is a debug log statement because failure to + // read the manifest key is currently expected if it + // is not cached. + log.WithError(err).WithField("manifest", key). + Debug("failed to read manifest from local cache") + + return nil, false + } + defer f.Close() + + m, err := ioutil.ReadAll(f) + if err != nil { + log.WithError(err).WithField("manifest", key). + Error("failed to read manifest from local cache") + + return nil, false + } + + return json.RawMessage(m), true +} + +// Adds the result of a manifest build to the local cache, if the +// manifest is considered cacheable. +// +// Manifests can be quite large and are cached on disk instead of in +// memory. +func (c *LocalCache) localCacheManifest(key string, m json.RawMessage) { + c.mmtx.Lock() + defer c.mmtx.Unlock() + + err := ioutil.WriteFile(c.mdir+key, []byte(m), 0644) + if err != nil { + log.WithError(err).WithField("manifest", key). + Error("failed to locally cache manifest") + } +} + +// Retrieve a layer build from the local cache. +func (c *LocalCache) layerFromLocalCache(key string) (*manifest.Entry, bool) { + c.lmtx.RLock() + e, ok := c.lcache[key] + c.lmtx.RUnlock() + + return &e, ok +} + +// Add a layer build result to the local cache. +func (c *LocalCache) localCacheLayer(key string, e manifest.Entry) { + c.lmtx.Lock() + c.lcache[key] = e + c.lmtx.Unlock() +} + +// Retrieve a manifest from the cache(s). First the local cache is +// checked, then the storage backend. +func manifestFromCache(ctx context.Context, s *State, key string) (json.RawMessage, bool) { + if m, cached := s.Cache.manifestFromLocalCache(key); cached { + return m, true + } + + r, err := s.Storage.Fetch(ctx, "manifests/"+key) + if err != nil { + log.WithError(err).WithFields(log.Fields{ + "manifest": key, + "backend": s.Storage.Name(), + }).Error("failed to fetch manifest from cache") + + return nil, false + } + defer r.Close() + + m, err := ioutil.ReadAll(r) + if err != nil { + log.WithError(err).WithFields(log.Fields{ + "manifest": key, + "backend": s.Storage.Name(), + }).Error("failed to read cached manifest from storage backend") + + return nil, false + } + + go s.Cache.localCacheManifest(key, m) + log.WithField("manifest", key).Info("retrieved manifest from GCS") + + return json.RawMessage(m), true +} + +// Add a manifest to the bucket & local caches +func cacheManifest(ctx context.Context, s *State, key string, m json.RawMessage) { + go s.Cache.localCacheManifest(key, m) + + path := "manifests/" + key + _, size, err := s.Storage.Persist(ctx, path, manifest.ManifestType, func(w io.Writer) (string, int64, error) { + size, err := io.Copy(w, bytes.NewReader([]byte(m))) + return "", size, err + }) + + if err != nil { + log.WithError(err).WithFields(log.Fields{ + "manifest": key, + "backend": s.Storage.Name(), + }).Error("failed to cache manifest to storage backend") + + return + } + + log.WithFields(log.Fields{ + "manifest": key, + "size": size, + "backend": s.Storage.Name(), + }).Info("cached manifest to storage backend") +} + +// Retrieve a layer build from the cache, first checking the local +// cache followed by the bucket cache. +func layerFromCache(ctx context.Context, s *State, key string) (*manifest.Entry, bool) { + if entry, cached := s.Cache.layerFromLocalCache(key); cached { + return entry, true + } + + r, err := s.Storage.Fetch(ctx, "builds/"+key) + if err != nil { + log.WithError(err).WithFields(log.Fields{ + "layer": key, + "backend": s.Storage.Name(), + }).Debug("failed to retrieve cached layer from storage backend") + + return nil, false + } + defer r.Close() + + jb := bytes.NewBuffer([]byte{}) + _, err = io.Copy(jb, r) + if err != nil { + log.WithError(err).WithFields(log.Fields{ + "layer": key, + "backend": s.Storage.Name(), + }).Error("failed to read cached layer from storage backend") + + return nil, false + } + + var entry manifest.Entry + err = json.Unmarshal(jb.Bytes(), &entry) + if err != nil { + log.WithError(err).WithField("layer", key). + Error("failed to unmarshal cached layer") + + return nil, false + } + + go s.Cache.localCacheLayer(key, entry) + return &entry, true +} + +func cacheLayer(ctx context.Context, s *State, key string, entry manifest.Entry) { + s.Cache.localCacheLayer(key, entry) + + j, _ := json.Marshal(&entry) + path := "builds/" + key + _, _, err := s.Storage.Persist(ctx, path, "", func(w io.Writer) (string, int64, error) { + size, err := io.Copy(w, bytes.NewReader(j)) + return "", size, err + }) + + if err != nil { + log.WithError(err).WithFields(log.Fields{ + "layer": key, + "backend": s.Storage.Name(), + }).Error("failed to cache layer") + } + + return +} |