Skip to content

Commit

Permalink
align logging of spaceid and nodeid
Browse files Browse the repository at this point in the history
Signed-off-by: Jörn Friedrich Dreyer <jfd@butonic.de>
  • Loading branch information
butonic committed Apr 11, 2024
1 parent 767b090 commit e40115d
Show file tree
Hide file tree
Showing 13 changed files with 57 additions and 42 deletions.
5 changes: 5 additions & 0 deletions changelog/unreleased/align-logs.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
Bugfix: consistently use spaceid and nodeid in logs

Sometimes we tried to log a node which led to a JSON recursion error because it contains a reference to the space root, which references itself. We now always log `spaceid` and `nodeid`.

https://github.com/cs3org/reva/pull/4623
Original file line number Diff line number Diff line change
Expand Up @@ -1147,7 +1147,7 @@ func (h *Handler) listSharesWithOthers(w http.ResponseWriter, r *http.Request) {
s := r.URL.Query().Get("space")
spaceRef := r.URL.Query().Get("space_ref")
ctx := r.Context()
sublog := appctx.GetLogger(ctx).With().Str("path", p).Str("space", s).Str("space_ref", spaceRef).Logger()
sublog := appctx.GetLogger(ctx).With().Str("path", p).Str("spaceid", s).Str("space_ref", spaceRef).Logger()
if p != "" || s != "" || spaceRef != "" {
ref, err := h.extractReference(r)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion pkg/rhttp/datatx/manager/spaces/spaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ func (m *manager) Handler(fs storage.FS) (http.Handler, error) {
var spaceID string
spaceID, r.URL.Path = router.ShiftPath(r.URL.Path)

sublog := appctx.GetLogger(ctx).With().Str("datatx", "spaces").Str("space", spaceID).Logger()
sublog := appctx.GetLogger(ctx).With().Str("datatx", "spaces").Str("spaceid", spaceID).Logger()

switch r.Method {
case "GET", "HEAD":
Expand Down
3 changes: 2 additions & 1 deletion pkg/storage/fs/posix/lookup/lookup.go
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,8 @@ func (lu *Lookup) Path(ctx context.Context, n *node.Node, hasPermission node.Per
appctx.GetLogger(ctx).
Error().Err(err).
Str("path", p).
Interface("node", n).
Str("spaceid", n.SpaceID).
Str("nodeid", n.ID).
Msg("Path()")
return
}
Expand Down
41 changes: 24 additions & 17 deletions pkg/storage/utils/decomposedfs/decomposedfs.go
Original file line number Diff line number Diff line change
Expand Up @@ -246,21 +246,23 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
for event := range ch {
switch ev := event.Event.(type) {
case events.PostprocessingFinished:
sublog := log.With().Str("event", "PostprocessingFinished").Str("uploadid", ev.UploadID).Logger()
session, err := fs.sessionStore.Get(ctx, ev.UploadID)
if err != nil {
log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("Failed to get upload")
sublog.Error().Err(err).Msg("Failed to get upload")
continue // NOTE: since we can't get the upload, we can't delete the blob
}

ctx = session.Context(ctx)

n, err := session.Node(ctx)
if err != nil {
log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("could not read node")
sublog.Error().Err(err).Msg("could not read node")
continue
}
sublog = log.With().Str("spaceid", session.SpaceID()).Str("nodeid", session.NodeID()).Logger()
if !n.Exists {
log.Debug().Str("uploadID", ev.UploadID).Str("nodeID", session.NodeID()).Msg("node no longer exists")
sublog.Debug().Msg("node no longer exists")
fs.sessionStore.Cleanup(ctx, session, false, false, false)
continue
}
Expand All @@ -274,7 +276,7 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {

switch ev.Outcome {
default:
log.Error().Str("outcome", string(ev.Outcome)).Str("uploadID", ev.UploadID).Msg("unknown postprocessing outcome - aborting")
sublog.Error().Str("outcome", string(ev.Outcome)).Msg("unknown postprocessing outcome - aborting")
fallthrough
case events.PPOutcomeAbort:
failed = true
Expand All @@ -283,7 +285,7 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
metrics.UploadSessionsAborted.Inc()
case events.PPOutcomeContinue:
if err := session.Finalize(); err != nil {
log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("could not finalize upload")
sublog.Error().Err(err).Msg("could not finalize upload")
failed = true
revertNodeMetadata = false
keepUpload = true
Expand All @@ -301,7 +303,7 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
getParent := func() *node.Node {
p, err := n.Parent(ctx)
if err != nil {
log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("could not read parent")
sublog.Error().Err(err).Msg("could not read parent")
return nil
}
return p
Expand All @@ -312,19 +314,19 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
// if no other upload session is in progress (processing id != session id) or has finished (processing id == "")
latestSession, err := n.ProcessingID(ctx)
if err != nil {
log.Error().Err(err).Str("node", n.ID).Str("uploadID", ev.UploadID).Msg("reading node for session failed")
sublog.Error().Err(err).Msg("reading node for session failed")
}
if latestSession == session.ID() {
// propagate reverted sizeDiff after failed postprocessing
if err := fs.tp.Propagate(ctx, n, -session.SizeDiff()); err != nil {
log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("could not propagate tree size change")
sublog.Error().Err(err).Msg("could not propagate tree size change")
}
}
} else if p := getParent(); p != nil {
// update parent tmtime to propagate etag change after successful postprocessing
_ = p.SetTMTime(ctx, &now)
if err := fs.tp.Propagate(ctx, p, 0); err != nil {
log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("could not propagate etag change")
sublog.Error().Err(err).Msg("could not propagate etag change")
}
}

Expand All @@ -350,22 +352,24 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
SpaceOwner: n.SpaceOwnerOrManager(ctx),
},
); err != nil {
log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("Failed to publish UploadReady event")
sublog.Error().Err(err).Msg("Failed to publish UploadReady event")
}
case events.RestartPostprocessing:
sublog := log.With().Str("event", "RestartPostprocessing").Str("uploadid", ev.UploadID).Logger()
session, err := fs.sessionStore.Get(ctx, ev.UploadID)
if err != nil {
log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("Failed to get upload")
sublog.Error().Err(err).Msg("Failed to get upload")
continue
}
n, err := session.Node(ctx)
if err != nil {
log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("could not read node")
sublog.Error().Err(err).Msg("could not read node")
continue
}
sublog = log.With().Str("spaceid", session.SpaceID()).Str("nodeid", session.NodeID()).Logger()
s, err := session.URL(ctx)
if err != nil {
log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("could not create url")
sublog.Error().Err(err).Msg("could not create url")
continue
}

Expand All @@ -381,9 +385,10 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
Filename: session.Filename(),
Filesize: uint64(session.Size()),
}); err != nil {
log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("Failed to publish BytesReceived event")
sublog.Error().Err(err).Msg("Failed to publish BytesReceived event")
}
case events.PostprocessingStepFinished:
sublog := log.With().Str("event", "PostprocessingStepFinished").Str("uploadid", ev.UploadID).Logger()
if ev.FinishedStep != events.PPStepAntivirus {
// atm we are only interested in antivirus results
continue
Expand All @@ -395,6 +400,7 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
// Should we handle this here?
continue
}
sublog = log.With().Str("scan_description", res.Description).Bool("infected", res.Infected).Logger()

var n *node.Node
switch ev.UploadID {
Expand Down Expand Up @@ -480,19 +486,20 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
// uploadid is not empty -> this is an async upload
session, err := fs.sessionStore.Get(ctx, ev.UploadID)
if err != nil {
log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("Failed to get upload")
sublog.Error().Err(err).Msg("Failed to get upload")
continue
}

n, err = session.Node(ctx)
if err != nil {
log.Error().Err(err).Interface("uploadID", ev.UploadID).Msg("Failed to get node after scan")
sublog.Error().Err(err).Msg("Failed to get node after scan")
continue
}
sublog = log.With().Str("spaceid", session.SpaceID()).Str("nodeid", session.NodeID()).Logger()
}

if err := n.SetScanData(ctx, res.Description, res.Scandate); err != nil {
log.Error().Err(err).Str("uploadID", ev.UploadID).Interface("resourceID", res.ResourceID).Msg("Failed to set scan results")
sublog.Error().Err(err).Msg("Failed to set scan results")
continue
}

Expand Down
3 changes: 2 additions & 1 deletion pkg/storage/utils/decomposedfs/lookup/lookup.go
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,8 @@ func (lu *Lookup) Path(ctx context.Context, n *node.Node, hasPermission node.Per
appctx.GetLogger(ctx).
Error().Err(err).
Str("path", p).
Interface("node", n).
Str("spaceid", n.SpaceID).
Str("nodeid", n.ID).
Msg("Path()")
return
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/storage/utils/decomposedfs/metadata.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ func (fs *Decomposedfs) SetArbitraryMetadata(ctx context.Context, ref *provider.
if err != nil {
return errors.Wrap(err, "Decomposedfs: error resolving ref")
}
sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger()
sublog := appctx.GetLogger(ctx).With().Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Logger()

if !n.Exists {
err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name))
Expand Down Expand Up @@ -135,7 +135,7 @@ func (fs *Decomposedfs) UnsetArbitraryMetadata(ctx context.Context, ref *provide
if err != nil {
return errors.Wrap(err, "Decomposedfs: error resolving ref")
}
sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger()
sublog := appctx.GetLogger(ctx).With().Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Logger()

if !n.Exists {
err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name))
Expand Down
19 changes: 10 additions & 9 deletions pkg/storage/utils/decomposedfs/node/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -513,7 +513,7 @@ func (n *Node) readOwner(ctx context.Context) (*userpb.UserId, error) {
func (n *Node) PermissionSet(ctx context.Context) (provider.ResourcePermissions, bool) {
u, ok := ctxpkg.ContextGetUser(ctx)
if !ok {
appctx.GetLogger(ctx).Debug().Interface("node", n).Msg("no user in context, returning default permissions")
appctx.GetLogger(ctx).Debug().Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Msg("no user in context, returning default permissions")
return NoPermissions(), false
}
if utils.UserEqual(u.Id, n.SpaceRoot.Owner()) {
Expand Down Expand Up @@ -582,7 +582,7 @@ func (n *Node) SetMtime(ctx context.Context, t *time.Time) (err error) {

// SetEtag sets the temporary etag of a node if it differs from the current etag
func (n *Node) SetEtag(ctx context.Context, val string) (err error) {
sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger()
sublog := appctx.GetLogger(ctx).With().Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Logger()
var tmTime time.Time
if tmTime, err = n.GetTMTime(ctx); err != nil {
return
Expand Down Expand Up @@ -1014,7 +1014,7 @@ func isGrantExpired(g *provider.Grant) bool {
func (n *Node) ReadUserPermissions(ctx context.Context, u *userpb.User) (ap provider.ResourcePermissions, accessDenied bool, err error) {
// check if the current user is the owner
if utils.UserEqual(u.Id, n.Owner()) {
appctx.GetLogger(ctx).Debug().Str("node", n.ID).Msg("user is owner, returning owner permissions")
appctx.GetLogger(ctx).Debug().Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Msg("user is owner, returning owner permissions")
return OwnerPermissions(), false, nil
}

Expand All @@ -1032,7 +1032,7 @@ func (n *Node) ReadUserPermissions(ctx context.Context, u *userpb.User) (ap prov
// we read all grantees from the node
var grantees []string
if grantees, err = n.ListGrantees(ctx); err != nil {
appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("error listing grantees")
appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Msg("error listing grantees")
return NoPermissions(), true, err
}

Expand Down Expand Up @@ -1073,15 +1073,15 @@ func (n *Node) ReadUserPermissions(ctx context.Context, u *userpb.User) (ap prov
}
AddPermissions(&ap, g.GetPermissions())
case metadata.IsAttrUnset(err):
appctx.GetLogger(ctx).Error().Interface("node", n).Str("grant", grantees[i]).Interface("grantees", grantees).Msg("grant vanished from node after listing")
appctx.GetLogger(ctx).Error().Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("grant", grantees[i]).Interface("grantees", grantees).Msg("grant vanished from node after listing")
// continue with next segment
default:
appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Str("grant", grantees[i]).Msg("error reading permissions")
appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("grant", grantees[i]).Msg("error reading permissions")
// continue with next segment
}
}

appctx.GetLogger(ctx).Debug().Interface("permissions", ap).Interface("node", n).Interface("user", u).Msg("returning aggregated permissions")
appctx.GetLogger(ctx).Debug().Interface("permissions", ap).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Interface("user", u).Msg("returning aggregated permissions")
return ap, false, nil
}

Expand Down Expand Up @@ -1135,7 +1135,7 @@ func (n *Node) IsDenied(ctx context.Context) bool {
func (n *Node) ListGrantees(ctx context.Context) (grantees []string, err error) {
attrs, err := n.Xattrs(ctx)
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("node", n.ID).Msg("error listing attributes")
appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Msg("error listing attributes")
return nil, err
}
for name := range attrs {
Expand Down Expand Up @@ -1202,7 +1202,8 @@ func (n *Node) ListGrants(ctx context.Context) ([]*provider.Grant, error) {
appctx.GetLogger(ctx).
Error().
Err(err).
Str("node", n.ID).
Str("spaceid", n.SpaceID).
Str("nodeid", n.ID).
Str("grantee", g).
Msg("error reading grant")
continue
Expand Down
8 changes: 4 additions & 4 deletions pkg/storage/utils/decomposedfs/recycle.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference
}
spaceID := ref.ResourceId.OpaqueId

sublog := appctx.GetLogger(ctx).With().Str("space", spaceID).Str("key", key).Str("relative_path", relativePath).Logger()
sublog := appctx.GetLogger(ctx).With().Str("spaceid", spaceID).Str("key", key).Str("relative_path", relativePath).Logger()

// check permissions
trashnode, err := fs.lu.NodeFromSpaceID(ctx, spaceID)
Expand Down Expand Up @@ -98,7 +98,7 @@ func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference
if attrBytes, ok := attrs[prefixes.TrashOriginAttr]; ok {
origin = string(attrBytes)
} else {
sublog.Error().Err(err).Str("space", spaceID).Msg("could not read origin path, skipping")
sublog.Error().Err(err).Str("spaceid", spaceID).Msg("could not read origin path, skipping")
return nil, err
}

Expand Down Expand Up @@ -291,14 +291,14 @@ func (fs *Decomposedfs) listTrashRoot(ctx context.Context, spaceID string) ([]*p
// TODO nanos
}
} else {
log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("node", nodeID).Str("dtime", timeSuffix).Msg("could not parse time format, ignoring")
log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("spaceid", spaceID).Str("nodeid", nodeID).Str("dtime", timeSuffix).Msg("could not parse time format, ignoring")
}

// lookup origin path in extended attributes
if attr, ok := attrs[prefixes.TrashOriginAttr]; ok {
item.Ref = &provider.Reference{Path: string(attr)}
} else {
log.Error().Str("trashRoot", trashRoot).Str("item", itemPath).Str("node", nodeID).Str("dtime", timeSuffix).Msg("could not read origin path")
log.Error().Str("trashRoot", trashRoot).Str("item", itemPath).Str("spaceid", spaceID).Str("nodeid", nodeID).Str("dtime", timeSuffix).Msg("could not read origin path")
}
select {
case results <- item:
Expand Down
2 changes: 1 addition & 1 deletion pkg/storage/utils/decomposedfs/spaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -808,7 +808,7 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node,
}
}

sublog := appctx.GetLogger(ctx).With().Str("space", n.SpaceRoot.ID).Logger()
sublog := appctx.GetLogger(ctx).With().Str("spaceid", n.SpaceID).Logger()

var err error
// TODO apply more filters
Expand Down
2 changes: 1 addition & 1 deletion pkg/storage/utils/decomposedfs/upload.go
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ func (fs *Decomposedfs) InitiateUpload(ctx context.Context, ref *provider.Refere
}
}

log.Debug().Interface("session", session).Interface("node", n).Interface("metadata", metadata).Msg("Decomposedfs: resolved filename")
log.Debug().Str("uploadid", session.ID()).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Interface("metadata", metadata).Msg("Decomposedfs: resolved filename")

_, err = node.CheckQuota(ctx, n.SpaceRoot, n.Exists, uint64(n.Blobsize), uint64(session.Size()))
if err != nil {
Expand Down
4 changes: 2 additions & 2 deletions pkg/storage/utils/decomposedfs/upload/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -216,12 +216,12 @@ func (store OcisStore) CreateNodeForUpload(session *OcisSession, initAttrs node.

f, err = store.updateExistingNode(ctx, session, n, session.SpaceID(), uint64(session.Size()))
if f != nil {
appctx.GetLogger(ctx).Info().Str("lockfile", f.Name()).Interface("err", err).Msg("got lock file from updateExistingNode")
appctx.GetLogger(ctx).Debug().Str("lockfile", f.Name()).Interface("err", err).Msg("got lock file from updateExistingNode")
}
} else {
f, err = store.initNewNode(ctx, session, n, uint64(session.Size()))
if f != nil {
appctx.GetLogger(ctx).Info().Str("lockfile", f.Name()).Interface("err", err).Msg("got lock file from initNewNode")
appctx.GetLogger(ctx).Debug().Str("lockfile", f.Name()).Interface("err", err).Msg("got lock file from initNewNode")
}
}
defer func() {
Expand Down
4 changes: 2 additions & 2 deletions pkg/storage/utils/decomposedfs/upload/upload.go
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,7 @@ func (session *OcisSession) Cleanup(revertNodeMetadata, cleanBin, cleanInfo bool
if revertNodeMetadata {
n, err := session.Node(ctx)
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("node", n.ID).Str("sessionid", session.ID()).Msg("reading node for session failed")
appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("uploadid", session.ID()).Msg("reading node for session failed")
}
if session.NodeExists() {
p := session.info.MetaData["versionsPath"]
Expand All @@ -340,7 +340,7 @@ func (session *OcisSession) Cleanup(revertNodeMetadata, cleanBin, cleanInfo bool
// if no other upload session is in progress (processing id != session id) or has finished (processing id == "")
latestSession, err := n.ProcessingID(ctx)
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("node", n.ID).Str("sessionid", session.ID()).Msg("reading processingid for session failed")
appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("uploadid", session.ID()).Msg("reading processingid for session failed")
}
if latestSession == session.ID() {
// actually delete the node
Expand Down

0 comments on commit e40115d

Please sign in to comment.