diff options
Diffstat (limited to 'tvix/eval')
-rw-r--r-- | tvix/eval/docs/known-optimisation-potential.md | 16 | ||||
-rw-r--r-- | tvix/eval/src/value/thunk.rs | 4 | ||||
-rw-r--r-- | tvix/eval/src/vm.rs | 32 |
3 files changed, 38 insertions, 14 deletions
diff --git a/tvix/eval/docs/known-optimisation-potential.md b/tvix/eval/docs/known-optimisation-potential.md index e7271f4b3527..da9cf265ea5b 100644 --- a/tvix/eval/docs/known-optimisation-potential.md +++ b/tvix/eval/docs/known-optimisation-potential.md @@ -65,3 +65,19 @@ optimisations, but note the most important ones here. The same thing goes for resolving `with builtins;`, which should definitely resolve statically. + +* Avoid nested `VM::run` calls [hard] + + Currently when encountering Nix-native callables (thunks, closures) + the VM's run loop will nest and return the value of the nested call + frame one level up. This makes the Rust call stack almost mirror the + Nix call stack, which is usually undesirable. + + It is possible to detect situations where this is avoidable and + instead set up the VM in such a way that it continues and produces + the desired result in the same run loop, but this is kind of tricky + to get right - especially while other parts are still in flux. + + For details consult the commit with Gerrit change ID + `I96828ab6a628136e0bac1bf03555faa4e6b74ece`, in which the initial + attempt at doing this was reverted. diff --git a/tvix/eval/src/value/thunk.rs b/tvix/eval/src/value/thunk.rs index 4fd41689c70c..59fe55cec945 100644 --- a/tvix/eval/src/value/thunk.rs +++ b/tvix/eval/src/value/thunk.rs @@ -84,9 +84,9 @@ impl Thunk { if let ThunkRepr::Suspended { lambda, upvalues } = std::mem::replace(&mut *thunk_mut, ThunkRepr::Blackhole) { - vm.call(lambda, upvalues, 0); *thunk_mut = ThunkRepr::Evaluated( - vm.run().map_err(|e| ErrorKind::ThunkForce(Box::new(e)))?, + vm.call(lambda, upvalues, 0) + .map_err(|e| ErrorKind::ThunkForce(Box::new(e)))?, ); } } diff --git a/tvix/eval/src/vm.rs b/tvix/eval/src/vm.rs index 8d616b8d73b8..fecaae37aa5b 100644 --- a/tvix/eval/src/vm.rs +++ b/tvix/eval/src/vm.rs @@ -161,7 +161,14 @@ impl VM { } } - pub fn call(&mut self, lambda: Rc<Lambda>, upvalues: Vec<Value>, arg_count: usize) { + /// Execute the given lambda in this VM's context, returning its + /// value after its stack frame completes. + pub fn call( + &mut self, + lambda: Rc<Lambda>, + upvalues: Vec<Value>, + arg_count: usize, + ) -> EvalResult<Value> { let frame = CallFrame { lambda, upvalues, @@ -170,22 +177,22 @@ impl VM { }; self.frames.push(frame); + self.run() } - pub fn run(&mut self) -> EvalResult<Value> { + /// Run the VM's current stack frame to completion and return the + /// value. + fn run(&mut self) -> EvalResult<Value> { #[cfg(feature = "disassembler")] let mut tracer = Tracer::new(); loop { + // Break the loop if this call frame has already run to + // completion, pop it off, and return the value to the + // caller. if self.frame().ip == self.chunk().code.len() { - // If this is the end of the top-level function, - // return, otherwise pop the call frame. - if self.frames.len() == 1 { - return Ok(self.pop()); - } - self.frames.pop(); - continue; + return Ok(self.pop()); } let op = self.inc_ip(); @@ -413,7 +420,9 @@ impl VM { let callable = self.pop(); match callable { Value::Closure(closure) => { - self.call(closure.lambda(), closure.upvalues().to_vec(), 1) + let result = + self.call(closure.lambda(), closure.upvalues().to_vec(), 1)?; + self.push(result) } Value::Builtin(builtin) => { @@ -684,8 +693,7 @@ pub fn run_lambda(lambda: Lambda) -> EvalResult<Value> { with_stack: vec![], }; - vm.call(Rc::new(lambda), vec![], 0); - let value = vm.run()?; + let value = vm.call(Rc::new(lambda), vec![], 0)?; vm.force_for_output(&value)?; Ok(value) } |