diff options
author | Vincent Ambo <tazjin@google.com> | 2019-10-06T02·18+0100 |
---|---|---|
committer | Vincent Ambo <github@tazj.in> | 2019-10-06T22·05+0100 |
commit | 6f148f789f43bf753b345b039d01d8a429f194e9 (patch) | |
tree | 87569dd6d0d351822c926c250a20ed99e1f4452c /tools/nixery | |
parent | f77c93b6aeb3e847fe00099ea5c52dc98cf74b4d (diff) |
refactor(server): Convert existing log entries to structured format
This rewrites all existing log statements into the structured logrus format. For consistency, all errors are always logged separately from the primary message in a field called `error`. Only the "info", "error" and "warn" severities are used.
Diffstat (limited to 'tools/nixery')
-rw-r--r-- | tools/nixery/server/builder/archive.go | 2 | ||||
-rw-r--r-- | tools/nixery/server/builder/builder.go | 105 | ||||
-rw-r--r-- | tools/nixery/server/builder/cache.go | 85 | ||||
-rw-r--r-- | tools/nixery/server/config/config.go | 17 | ||||
-rw-r--r-- | tools/nixery/server/config/pkgsource.go | 15 | ||||
-rw-r--r-- | tools/nixery/server/layers/grouping.go | 13 | ||||
-rw-r--r-- | tools/nixery/server/main.go | 60 |
7 files changed, 243 insertions, 54 deletions
diff --git a/tools/nixery/server/builder/archive.go b/tools/nixery/server/builder/archive.go index 6a2bd8e4b0f0..63ea9c73814d 100644 --- a/tools/nixery/server/builder/archive.go +++ b/tools/nixery/server/builder/archive.go @@ -14,7 +14,6 @@ import ( "path/filepath" "github.com/google/nixery/server/layers" - log "github.com/sirupsen/logrus" ) // Create a new tarball from each of the paths in the list and write the tarball @@ -33,7 +32,6 @@ func tarStorePaths(l *layers.Layer, w io.Writer) error { return err } - log.Printf("Created layer for '%s'\n", l.Hash()) return nil } diff --git a/tools/nixery/server/builder/builder.go b/tools/nixery/server/builder/builder.go index b675da0f7763..14696f58a80f 100644 --- a/tools/nixery/server/builder/builder.go +++ b/tools/nixery/server/builder/builder.go @@ -143,14 +143,17 @@ func convenienceNames(packages []string) []string { // logNix logs each output line from Nix. It runs in a goroutine per // output channel that should be live-logged. -func logNix(name string, r io.ReadCloser) { +func logNix(image, cmd string, r io.ReadCloser) { scanner := bufio.NewScanner(r) for scanner.Scan() { - log.Printf("\x1b[31m[nix - %s]\x1b[39m %s\n", name, scanner.Text()) + log.WithFields(log.Fields{ + "image": image, + "cmd": cmd, + }).Info("[nix] " + scanner.Text()) } } -func callNix(program string, name string, args []string) ([]byte, error) { +func callNix(program, image string, args []string) ([]byte, error) { cmd := exec.Command(program, args...) outpipe, err := cmd.StdoutPipe() @@ -162,24 +165,45 @@ func callNix(program string, name string, args []string) ([]byte, error) { if err != nil { return nil, err } - go logNix(name, errpipe) + go logNix(program, image, errpipe) if err = cmd.Start(); err != nil { - log.Printf("Error starting %s: %s\n", program, err) + log.WithFields(log.Fields{ + "image": image, + "cmd": program, + "error": err, + }).Error("error starting command") + return nil, err } - log.Printf("Invoked Nix build (%s) for '%s'\n", program, name) + + log.WithFields(log.Fields{ + "cmd": program, + "image": image, + }).Info("invoked Nix build") stdout, _ := ioutil.ReadAll(outpipe) if err = cmd.Wait(); err != nil { - log.Printf("%s execution error: %s\nstdout: %s\n", program, err, stdout) + log.WithFields(log.Fields{ + "image": image, + "cmd": program, + "error": err, + "stdout": stdout, + }).Info("Nix execution failed") + return nil, err } resultFile := strings.TrimSpace(string(stdout)) buildOutput, err := ioutil.ReadFile(resultFile) if err != nil { + log.WithFields(log.Fields{ + "image": image, + "file": resultFile, + "error": err, + }).Info("failed to read Nix result file") + return nil, err } @@ -209,10 +233,14 @@ func prepareImage(s *State, image *Image) (*ImageResult, error) { output, err := callNix("nixery-build-image", image.Name, args) if err != nil { - log.Printf("failed to call nixery-build-image: %s\n", err) + // granular error logging is performed in callNix already return nil, err } - log.Printf("Finished image preparation for '%s' via Nix\n", image.Name) + + log.WithFields(log.Fields{ + "image": image.Name, + "tag": image.Tag, + }).Info("finished image preparation via Nix") var result ImageResult err = json.Unmarshal(output, &result) @@ -243,16 +271,27 @@ func prepareLayers(ctx context.Context, s *State, image *Image, result *ImageRes if entry, cached := layerFromCache(ctx, s, l.Hash()); cached { entries = append(entries, *entry) } else { + lh := l.Hash() lw := func(w io.Writer) error { return tarStorePaths(&l, w) } - entry, err := uploadHashLayer(ctx, s, l.Hash(), lw) + entry, err := uploadHashLayer(ctx, s, lh, lw) if err != nil { return nil, err } entry.MergeRating = l.MergeRating + var pkgs []string + for _, p := range l.Contents { + pkgs = append(pkgs, layers.PackageFromPath(p)) + } + + log.WithFields(log.Fields{ + "layer": lh, + "packages": pkgs, + }).Info("created image layer") + go cacheLayer(ctx, s, l.Hash(), *entry) entries = append(entries, *entry) } @@ -264,7 +303,13 @@ func prepareLayers(ctx context.Context, s *State, image *Image, result *ImageRes entry, err := uploadHashLayer(ctx, s, slkey, func(w io.Writer) error { f, err := os.Open(result.SymlinkLayer.Path) if err != nil { - log.Printf("failed to upload symlink layer '%s': %s\n", slkey, err) + log.WithFields(log.Fields{ + "image": image.Name, + "tag": image.Tag, + "error": err, + "layer": slkey, + }).Error("failed to upload symlink layer") + return err } defer f.Close() @@ -319,7 +364,12 @@ func renameObject(ctx context.Context, s *State, old, new string) error { // renaming/moving them, hence a deletion call afterwards is // required. if err = s.Bucket.Object(old).Delete(ctx); err != nil { - log.Printf("failed to delete renamed object '%s': %s\n", old, err) + log.WithFields(log.Fields{ + "new": new, + "old": old, + "error": err, + }).Warn("failed to delete renamed object") + // this error should not break renaming and is not returned } @@ -371,12 +421,19 @@ func uploadHashLayer(ctx context.Context, s *State, key string, lw layerWriter) err := lw(multi) if err != nil { - log.Printf("failed to create and upload layer '%s': %s\n", key, err) + log.WithFields(log.Fields{ + "layer": key, + "error": err, + }).Error("failed to create and upload layer") + return nil, err } if err = sw.Close(); err != nil { - log.Printf("failed to upload layer '%s' to staging: %s\n", key, err) + log.WithFields(log.Fields{ + "layer": key, + "error": err, + }).Error("failed to upload layer to staging") } sha256sum := fmt.Sprintf("%x", shasum.Sum([]byte{})) @@ -385,12 +442,21 @@ func uploadHashLayer(ctx context.Context, s *State, key string, lw layerWriter) // remains is to move it to the correct location and cache it. err = renameObject(ctx, s, "staging/"+key, "layers/"+sha256sum) if err != nil { - log.Printf("failed to move layer '%s' from staging: %s\n", key, err) + log.WithFields(log.Fields{ + "layer": key, + "error": err, + }).Error("failed to move layer from staging") + return nil, err } size := counter.count - log.Printf("Uploaded layer sha256:%s (%v bytes written)", sha256sum, size) + + log.WithFields(log.Fields{ + "layer": key, + "sha256": sha256sum, + "size": size, + }).Info("uploaded layer") entry := manifest.Entry{ Digest: "sha256:" + sha256sum, @@ -436,7 +502,12 @@ func BuildImage(ctx context.Context, s *State, image *Image) (*BuildResult, erro } if _, err = uploadHashLayer(ctx, s, c.SHA256, lw); err != nil { - log.Printf("failed to upload config for %s: %s\n", image.Name, err) + log.WithFields(log.Fields{ + "image": image.Name, + "tag": image.Tag, + "error": err, + }).Error("failed to upload config") + return nil, err } diff --git a/tools/nixery/server/builder/cache.go b/tools/nixery/server/builder/cache.go index 5b6bf078b228..916de3af168c 100644 --- a/tools/nixery/server/builder/cache.go +++ b/tools/nixery/server/builder/cache.go @@ -60,15 +60,25 @@ func (c *LocalCache) manifestFromLocalCache(key string) (json.RawMessage, bool) f, err := os.Open(c.mdir + key) if err != nil { - // TODO(tazjin): Once log levels are available, this - // might warrant a debug log. + // This is a debug log statement because failure to + // read the manifest key is currently expected if it + // is not cached. + log.WithFields(log.Fields{ + "manifest": key, + "error": err, + }).Debug("failed to read manifest from local cache") + return nil, false } defer f.Close() m, err := ioutil.ReadAll(f) if err != nil { - log.Printf("Failed to read manifest '%s' from local cache: %s\n", key, err) + log.WithFields(log.Fields{ + "manifest": key, + "error": err, + }).Error("failed to read manifest from local cache") + return nil, false } @@ -86,7 +96,10 @@ func (c *LocalCache) localCacheManifest(key string, m json.RawMessage) { err := ioutil.WriteFile(c.mdir+key, []byte(m), 0644) if err != nil { - log.Printf("Failed to locally cache manifest for '%s': %s\n", key, err) + log.WithFields(log.Fields{ + "manifest": key, + "error": err, + }).Error("failed to locally cache manifest") } } @@ -123,18 +136,29 @@ func manifestFromCache(ctx context.Context, s *State, key string) (json.RawMessa r, err := obj.NewReader(ctx) if err != nil { - log.Printf("Failed to retrieve manifest '%s' from cache: %s\n", key, err) + log.WithFields(log.Fields{ + "manifest": key, + "error": err, + }).Error("failed to retrieve manifest from bucket cache") + return nil, false } defer r.Close() m, err := ioutil.ReadAll(r) if err != nil { - log.Printf("Failed to read cached manifest for '%s': %s\n", key, err) + log.WithFields(log.Fields{ + "manifest": key, + "error": err, + }).Error("failed to read cached manifest from bucket") + + return nil, false } go s.Cache.localCacheManifest(key, m) - log.Printf("Retrieved manifest for sha1:%s from GCS\n", key) + log.WithFields(log.Fields{ + "manifest": key, + }).Info("retrieved manifest from GCS") return json.RawMessage(m), true } @@ -149,16 +173,27 @@ func cacheManifest(ctx context.Context, s *State, key string, m json.RawMessage) size, err := io.Copy(w, r) if err != nil { - log.Printf("failed to cache manifest sha1:%s: %s\n", key, err) + log.WithFields(log.Fields{ + "manifest": key, + "error": err, + }).Error("failed to cache manifest to GCS") + return } if err = w.Close(); err != nil { - log.Printf("failed to cache manifest sha1:%s: %s\n", key, err) + log.WithFields(log.Fields{ + "manifest": key, + "error": err, + }).Error("failed to cache manifest to GCS") + return } - log.Printf("Cached manifest sha1:%s (%v bytes written)\n", key, size) + log.WithFields(log.Fields{ + "manifest": key, + "size": size, + }).Info("cached manifest to GCS") } // Retrieve a layer build from the cache, first checking the local @@ -176,7 +211,11 @@ func layerFromCache(ctx context.Context, s *State, key string) (*manifest.Entry, r, err := obj.NewReader(ctx) if err != nil { - log.Printf("Failed to retrieve layer build '%s' from cache: %s\n", key, err) + log.WithFields(log.Fields{ + "layer": key, + "error": err, + }).Error("failed to retrieve cached layer from GCS") + return nil, false } defer r.Close() @@ -184,14 +223,22 @@ func layerFromCache(ctx context.Context, s *State, key string) (*manifest.Entry, jb := bytes.NewBuffer([]byte{}) _, err = io.Copy(jb, r) if err != nil { - log.Printf("Failed to read layer build '%s' from cache: %s\n", key, err) + log.WithFields(log.Fields{ + "layer": key, + "error": err, + }).Error("failed to read cached layer from GCS") + return nil, false } var entry manifest.Entry err = json.Unmarshal(jb.Bytes(), &entry) if err != nil { - log.Printf("Failed to unmarshal layer build '%s' from cache: %s\n", key, err) + log.WithFields(log.Fields{ + "layer": key, + "error": err, + }).Error("failed to unmarshal cached layer") + return nil, false } @@ -210,12 +257,20 @@ func cacheLayer(ctx context.Context, s *State, key string, entry manifest.Entry) _, err := io.Copy(w, bytes.NewReader(j)) if err != nil { - log.Printf("failed to cache build '%s': %s\n", key, err) + log.WithFields(log.Fields{ + "layer": key, + "error": err, + }).Error("failed to cache layer") + return } if err = w.Close(); err != nil { - log.Printf("failed to cache build '%s': %s\n", key, err) + log.WithFields(log.Fields{ + "layer": key, + "error": err, + }).Error("failed to cache layer") + return } } diff --git a/tools/nixery/server/config/config.go b/tools/nixery/server/config/config.go index abc067855a88..9447975aa9a2 100644 --- a/tools/nixery/server/config/config.go +++ b/tools/nixery/server/config/config.go @@ -32,14 +32,20 @@ func signingOptsFromEnv() *storage.SignedURLOptions { id := os.Getenv("GCS_SIGNING_ACCOUNT") if path == "" || id == "" { - log.Println("GCS URL signing disabled") + log.Info("GCS URL signing disabled") return nil } - log.Printf("GCS URL signing enabled with account %q\n", id) + log.WithFields(log.Fields{ + "account": id, + }).Info("GCS URL signing enabled") + k, err := ioutil.ReadFile(path) if err != nil { - log.Fatalf("Failed to read GCS signing key: %s\n", err) + log.WithFields(log.Fields{ + "file": path, + "error": err, + }).Fatal("failed to read GCS signing key") } return &storage.SignedURLOptions{ @@ -52,7 +58,10 @@ func signingOptsFromEnv() *storage.SignedURLOptions { func getConfig(key, desc, def string) string { value := os.Getenv(key) if value == "" && def == "" { - log.Fatalln(desc + " must be specified") + log.WithFields(log.Fields{ + "option": key, + "description": desc, + }).Fatal("missing required configuration envvar") } else if value == "" { return def } diff --git a/tools/nixery/server/config/pkgsource.go b/tools/nixery/server/config/pkgsource.go index 98719ecceabb..3a817f3d76d3 100644 --- a/tools/nixery/server/config/pkgsource.go +++ b/tools/nixery/server/config/pkgsource.go @@ -132,21 +132,30 @@ func (p *PkgsPath) CacheKey(pkgs []string, tag string) string { // specified, the Nix code will default to a recent NixOS channel. func pkgSourceFromEnv() (PkgSource, error) { if channel := os.Getenv("NIXERY_CHANNEL"); channel != "" { - log.Printf("Using Nix package set from Nix channel %q\n", channel) + log.WithFields(log.Fields{ + "channel": channel, + }).Info("using Nix package set from Nix channel or commit") + return &NixChannel{ channel: channel, }, nil } if git := os.Getenv("NIXERY_PKGS_REPO"); git != "" { - log.Printf("Using Nix package set from git repository at %q\n", git) + log.WithFields(log.Fields{ + "repo": git, + }).Info("using NIx package set from git repository") + return &GitSource{ repository: git, }, nil } if path := os.Getenv("NIXERY_PKGS_PATH"); path != "" { - log.Printf("Using Nix package set from path %q\n", path) + log.WithFields(log.Fields{ + "path": path, + }).Info("using Nix package set at local path") + return &PkgsPath{ path: path, }, nil diff --git a/tools/nixery/server/layers/grouping.go b/tools/nixery/server/layers/grouping.go index 95198c90d4b3..1fbbf33db3d7 100644 --- a/tools/nixery/server/layers/grouping.go +++ b/tools/nixery/server/layers/grouping.go @@ -172,8 +172,14 @@ func (c *closure) ID() int64 { var nixRegexp = regexp.MustCompile(`^/nix/store/[a-z0-9]+-`) +// PackageFromPath returns the name of a Nix package based on its +// output store path. +func PackageFromPath(path string) string { + return nixRegexp.ReplaceAllString(path, "") +} + func (c *closure) DOTID() string { - return nixRegexp.ReplaceAllString(c.Path, "") + return PackageFromPath(c.Path) } // bigOrPopular checks whether this closure should be considered for @@ -321,7 +327,10 @@ func dominate(budget int, graph *simple.DirectedGraph) []Layer { }) if len(layers) > budget { - log.Printf("Ideal image has %v layers, but budget is %v\n", len(layers), budget) + log.WithFields(log.Fields{ + "layers": len(layers), + "budget": budget, + }).Info("ideal image exceeds layer budget") } for len(layers) > budget { diff --git a/tools/nixery/server/main.go b/tools/nixery/server/main.go index 06de6a96a143..babf50790f64 100644 --- a/tools/nixery/server/main.go +++ b/tools/nixery/server/main.go @@ -68,7 +68,9 @@ var ( // The Docker client is known to follow redirects, but this might not be true // for all other registry clients. func constructLayerUrl(cfg *config.Config, digest string) (string, error) { - log.Printf("Redirecting layer '%s' request to bucket '%s'\n", digest, cfg.Bucket) + log.WithFields(log.Fields{ + "layer": digest, + }).Info("redirecting layer request to bucket") object := "layers/" + digest if cfg.Signing != nil { @@ -90,13 +92,18 @@ func constructLayerUrl(cfg *config.Config, digest string) (string, error) { func prepareBucket(ctx context.Context, cfg *config.Config) *storage.BucketHandle { client, err := storage.NewClient(ctx) if err != nil { - log.Fatalln("Failed to set up Cloud Storage client:", err) + log.WithFields(log.Fields{ + "error": err, + }).Fatal("failed to set up Cloud Storage client") } bkt := client.Bucket(cfg.Bucket) if _, err := bkt.Attrs(ctx); err != nil { - log.Fatalln("Could not access configured bucket", err) + log.WithFields(log.Fields{ + "error": err, + "bucket": cfg.Bucket, + }).Fatal("could not access configured bucket") } return bkt @@ -169,13 +176,24 @@ func (h *registryHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if len(manifestMatches) == 3 { imageName := manifestMatches[1] imageTag := manifestMatches[2] - log.Printf("Requesting manifest for image %q at tag %q", imageName, imageTag) + + log.WithFields(log.Fields{ + "image": imageName, + "tag": imageTag, + }).Info("requesting image manifest") + image := builder.ImageFromName(imageName, imageTag) buildResult, err := builder.BuildImage(h.ctx, h.state, &image) if err != nil { writeError(w, 500, "UNKNOWN", "image build failure") - log.Println("Failed to build image manifest", err) + + log.WithFields(log.Fields{ + "image": imageName, + "tag": imageTag, + "error": err, + }).Error("failed to build image manifest") + return } @@ -184,7 +202,13 @@ func (h *registryHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if buildResult.Error == "not_found" { s := fmt.Sprintf("Could not find Nix packages: %v", buildResult.Pkgs) writeError(w, 404, "MANIFEST_UNKNOWN", s) - log.Println(s) + + log.WithFields(log.Fields{ + "image": imageName, + "tag": imageTag, + "packages": buildResult.Pkgs, + }).Error("could not find Nix packages") + return } @@ -205,7 +229,11 @@ func (h *registryHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { url, err := constructLayerUrl(&h.state.Cfg, digest) if err != nil { - log.Printf("Failed to sign GCS URL: %s\n", err) + log.WithFields(log.Fields{ + "layer": digest, + "error": err, + }).Error("failed to sign GCS URL") + writeError(w, 500, "UNKNOWN", "could not serve layer") return } @@ -215,28 +243,38 @@ func (h *registryHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } - log.Printf("Unsupported registry route: %s\n", r.RequestURI) + log.WithFields(log.Fields{ + "uri": r.RequestURI, + }).Info("unsupported registry route") + w.WriteHeader(404) } func main() { cfg, err := config.FromEnv() if err != nil { - log.Fatalln("Failed to load configuration", err) + log.WithFields(log.Fields{ + "error": err, + }).Fatal("failed to load configuration") } ctx := context.Background() bucket := prepareBucket(ctx, &cfg) cache, err := builder.NewCache() if err != nil { - log.Fatalln("Failed to instantiate build cache", err) + log.WithFields(log.Fields{ + "error": err, + }).Fatal("failed to instantiate build cache") } var pop layers.Popularity if cfg.PopUrl != "" { pop, err = downloadPopularity(cfg.PopUrl) if err != nil { - log.Fatalln("Failed to fetch popularity information", err) + log.WithFields(log.Fields{ + "error": err, + "popURL": cfg.PopUrl, + }).Fatal("failed to fetch popularity information") } } |