diff options
Diffstat (limited to 'tools')
-rw-r--r-- | tools/nixery/server/builder/builder.go | 94 | ||||
-rw-r--r-- | tools/nixery/server/builder/cache.go | 95 | ||||
-rw-r--r-- | tools/nixery/server/main.go | 5 |
3 files changed, 152 insertions, 42 deletions
diff --git a/tools/nixery/server/builder/builder.go b/tools/nixery/server/builder/builder.go index c53b702e0537..e241d3d0b004 100644 --- a/tools/nixery/server/builder/builder.go +++ b/tools/nixery/server/builder/builder.go @@ -69,10 +69,10 @@ func ImageFromName(name string, tag string) Image { // // The later field is simply treated as opaque JSON and passed through. type BuildResult struct { - Error string `json:"error"` - Pkgs []string `json:"pkgs"` + Error string `json:"error"` + Pkgs []string `json:"pkgs"` + Manifest json.RawMessage `json:"manifest"` - Manifest json.RawMessage `json:"manifest"` LayerLocations map[string]struct { Path string `json:"path"` Md5 []byte `json:"md5"` @@ -99,50 +99,57 @@ func convenienceNames(packages []string) []string { // Call out to Nix and request that an image be built. Nix will, upon success, // return a manifest for the container image. -func BuildImage(ctx *context.Context, cfg *config.Config, image *Image, bucket *storage.BucketHandle) (*BuildResult, error) { - packages, err := json.Marshal(image.Packages) - if err != nil { - return nil, err - } +func BuildImage(ctx *context.Context, cfg *config.Config, cache *BuildCache, image *Image, bucket *storage.BucketHandle) (*BuildResult, error) { + resultFile, cached := cache.manifestFromCache(image) - args := []string{ - "--argstr", "name", image.Name, - "--argstr", "packages", string(packages), - } + if !cached { + packages, err := json.Marshal(image.Packages) + if err != nil { + return nil, err + } - if cfg.Pkgs != nil { - args = append(args, "--argstr", "pkgSource", cfg.Pkgs.Render(image.Tag)) - } - cmd := exec.Command("nixery-build-image", args...) + args := []string{ + "--argstr", "name", image.Name, + "--argstr", "packages", string(packages), + } - outpipe, err := cmd.StdoutPipe() - if err != nil { - return nil, err - } + if cfg.Pkgs != nil { + args = append(args, "--argstr", "pkgSource", cfg.Pkgs.Render(image.Tag)) + } + cmd := exec.Command("nixery-build-image", args...) - errpipe, err := cmd.StderrPipe() - if err != nil { - return nil, err - } + outpipe, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } - if err = cmd.Start(); err != nil { - log.Println("Error starting nix-build:", err) - return nil, err - } - log.Printf("Started Nix image build for '%s'", image.Name) + errpipe, err := cmd.StderrPipe() + if err != nil { + return nil, err + } - stdout, _ := ioutil.ReadAll(outpipe) - stderr, _ := ioutil.ReadAll(errpipe) + if err = cmd.Start(); err != nil { + log.Println("Error starting nix-build:", err) + return nil, err + } + log.Printf("Started Nix image build for '%s'", image.Name) - if err = cmd.Wait(); err != nil { - // TODO(tazjin): Propagate errors upwards in a usable format. - log.Printf("nix-build execution error: %s\nstdout: %s\nstderr: %s\n", err, stdout, stderr) - return nil, err - } + stdout, _ := ioutil.ReadAll(outpipe) + stderr, _ := ioutil.ReadAll(errpipe) - log.Println("Finished Nix image build") + if err = cmd.Wait(); err != nil { + // TODO(tazjin): Propagate errors upwards in a usable format. + log.Printf("nix-build execution error: %s\nstdout: %s\nstderr: %s\n", err, stdout, stderr) + return nil, err + } + + log.Println("Finished Nix image build") + + resultFile = strings.TrimSpace(string(stdout)) + cache.cacheManifest(image, resultFile) + } - buildOutput, err := ioutil.ReadFile(strings.TrimSpace(string(stdout))) + buildOutput, err := ioutil.ReadFile(resultFile) if err != nil { return nil, err } @@ -151,15 +158,20 @@ func BuildImage(ctx *context.Context, cfg *config.Config, image *Image, bucket * // contained layers to the bucket. Only the manifest itself is // re-serialised to JSON and returned. var result BuildResult + err = json.Unmarshal(buildOutput, &result) if err != nil { return nil, err } for layer, meta := range result.LayerLocations { - err = uploadLayer(ctx, bucket, layer, meta.Path, meta.Md5) - if err != nil { - return nil, err + if !cache.hasSeenLayer(layer) { + err = uploadLayer(ctx, bucket, layer, meta.Path, meta.Md5) + if err != nil { + return nil, err + } + + cache.sawLayer(layer) } } diff --git a/tools/nixery/server/builder/cache.go b/tools/nixery/server/builder/cache.go new file mode 100644 index 000000000000..0014789afff5 --- /dev/null +++ b/tools/nixery/server/builder/cache.go @@ -0,0 +1,95 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. +package builder + +import ( + "sync" + "time" +) + +// recencyThreshold is the amount of time that a manifest build will be cached +// for. When using the channel mechanism for retrieving nixpkgs, Nix will +// occasionally re-fetch the channel so things can in fact change while the +// instance is running. +const recencyThreshold = time.Duration(6) * time.Hour + +type manifestEntry struct { + built time.Time + path string +} + +type void struct{} + +type BuildCache struct { + mmtx sync.RWMutex + mcache map[string]manifestEntry + + lmtx sync.RWMutex + lcache map[string]void +} + +func NewCache() BuildCache { + return BuildCache{ + mcache: make(map[string]manifestEntry), + lcache: make(map[string]void), + } +} + +// Has this layer hash already been seen by this Nixery instance? If +// yes, we can skip upload checking and such because it has already +// been done. +func (c *BuildCache) hasSeenLayer(hash string) bool { + c.lmtx.RLock() + defer c.lmtx.RUnlock() + _, seen := c.lcache[hash] + return seen +} + +// Layer has now been seen and should be stored. +func (c *BuildCache) sawLayer(hash string) { + c.lmtx.Lock() + defer c.lmtx.Unlock() + c.lcache[hash] = void{} +} + +// Has this manifest been built already? If yes, we can reuse the +// result given that the build happened recently enough. +func (c *BuildCache) manifestFromCache(image *Image) (string, bool) { + c.mmtx.RLock() + + entry, ok := c.mcache[image.Name+image.Tag] + c.mmtx.RUnlock() + + if !ok { + return "", false + } + + if time.Since(entry.built) > recencyThreshold { + return "", false + } + + return entry.path, true +} + +// Adds the result of a manifest build to the cache. +func (c *BuildCache) cacheManifest(image *Image, path string) { + entry := manifestEntry{ + built: time.Now(), + path: path, + } + + c.mmtx.Lock() + c.mcache[image.Name+image.Tag] = entry + c.mmtx.Unlock() +} diff --git a/tools/nixery/server/main.go b/tools/nixery/server/main.go index 5d7dcd2adfc2..fd307f79d4b4 100644 --- a/tools/nixery/server/main.go +++ b/tools/nixery/server/main.go @@ -125,6 +125,7 @@ type registryHandler struct { cfg *config.Config ctx *context.Context bucket *storage.BucketHandle + cache *builder.BuildCache } func (h *registryHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { @@ -140,7 +141,7 @@ func (h *registryHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { imageTag := manifestMatches[2] log.Printf("Requesting manifest for image %q at tag %q", imageName, imageTag) image := builder.ImageFromName(imageName, imageTag) - buildResult, err := builder.BuildImage(h.ctx, h.cfg, &image, h.bucket) + buildResult, err := builder.BuildImage(h.ctx, h.cfg, h.cache, &image, h.bucket) if err != nil { writeError(w, 500, "UNKNOWN", "image build failure") @@ -192,6 +193,7 @@ func main() { cfg := config.FromEnv() ctx := context.Background() bucket := prepareBucket(&ctx, cfg) + cache := builder.NewCache() log.Printf("Starting Kubernetes Nix controller on port %s\n", cfg.Port) @@ -200,6 +202,7 @@ func main() { cfg: cfg, ctx: &ctx, bucket: bucket, + cache: &cache, }) // All other roots are served by the static file server. |