Skip to main content

miden_processor/fast/
mod.rs

1#[cfg(test)]
2use alloc::rc::Rc;
3use alloc::{boxed::Box, sync::Arc, vec::Vec};
4#[cfg(test)]
5use core::cell::Cell;
6use core::{cmp::min, ops::ControlFlow};
7
8use miden_air::{Felt, trace::RowIndex};
9use miden_core::{
10    EMPTY_WORD, WORD_SIZE, Word, ZERO,
11    mast::{MastForest, MastNodeExt, MastNodeId},
12    operations::Decorator,
13    precompile::PrecompileTranscript,
14    program::{MIN_STACK_DEPTH, Program, StackInputs, StackOutputs},
15    utils::range,
16};
17
18use crate::{
19    AdviceInputs, AdviceProvider, BaseHost, ContextId, ExecutionError, ExecutionOptions,
20    ProcessorState,
21    continuation_stack::{Continuation, ContinuationStack},
22    errors::MapExecErrNoCtx,
23    tracer::{OperationHelperRegisters, Tracer},
24};
25
26mod basic_block;
27mod call_and_dyn;
28mod execution_api;
29mod external;
30mod memory;
31mod operation;
32mod step;
33
34pub use basic_block::SystemEventError;
35pub use memory::Memory;
36pub use step::{BreakReason, ResumeContext};
37
38#[cfg(test)]
39mod tests;
40
41// CONSTANTS
42// ================================================================================================
43
44/// The size of the stack buffer.
45///
46/// Note: This value is much larger than it needs to be for the majority of programs. However, some
47/// existing programs need it, so we're forced to push it up (though this should be double-checked).
48/// At this high a value, we're starting to see some performance degradation on benchmarks. For
49/// example, the blake3 benchmark went from 285 MHz to 250 MHz (~10% degradation). Perhaps a better
50/// solution would be to make this value much smaller (~1000), and then fallback to a `Vec` if the
51/// stack overflows.
52const STACK_BUFFER_SIZE: usize = 6850;
53
54/// The initial position of the top of the stack in the stack buffer.
55///
56/// We place this value close to 0 because if a program hits the limit, it's much more likely to hit
57/// the upper bound than the lower bound, since hitting the lower bound only occurs when you drop
58/// 0's that were generated automatically to keep the stack depth at 16. In practice, if this
59/// occurs, it is most likely a bug.
60const INITIAL_STACK_TOP_IDX: usize = 250;
61
62// FAST PROCESSOR
63// ================================================================================================
64
65/// A fast processor which doesn't generate any trace.
66///
67/// This processor is designed to be as fast as possible. Hence, it only keeps track of the current
68/// state of the processor (i.e. the stack, current clock cycle, current memory context, and free
69/// memory pointer).
70///
71/// # Stack Management
72/// A few key points about how the stack was designed for maximum performance:
73///
74/// - The stack has a fixed buffer size defined by `STACK_BUFFER_SIZE`.
75///     - This was observed to increase performance by at least 2x compared to using a `Vec` with
76///       `push()` & `pop()`.
77///     - We track the stack top and bottom using indices `stack_top_idx` and `stack_bot_idx`,
78///       respectively.
79/// - Since we are using a fixed-size buffer, we need to ensure that stack buffer accesses are not
80///   out of bounds. Naively, we could check for this on every access. However, every operation
81///   alters the stack depth by a predetermined amount, allowing us to precisely determine the
82///   minimum number of operations required to reach a stack buffer boundary, whether at the top or
83///   bottom.
84///     - For example, if the stack top is 10 elements away from the top boundary, and the stack
85///       bottom is 15 elements away from the bottom boundary, then we can safely execute 10
86///       operations that modify the stack depth with no bounds check.
87/// - When switching contexts (e.g., during a call or syscall), all elements past the first 16 are
88///   stored in an `ExecutionContextInfo` struct, and the stack is truncated to 16 elements. This
89///   will be restored when returning from the call or syscall.
90///
91/// # Clock Cycle Management
92/// - The clock cycle (`clk`) is managed in the same way as in `Process`. That is, it is incremented
93///   by 1 for every row that `Process` adds to the main trace.
94///     - It is important to do so because the clock cycle is used to determine the context ID for
95///       new execution contexts when using `call` or `dyncall`.
96#[derive(Debug)]
97pub struct FastProcessor {
98    /// The stack is stored in reverse order, so that the last element is at the top of the stack.
99    stack: Box<[Felt; STACK_BUFFER_SIZE]>,
100    /// The index of the top of the stack.
101    stack_top_idx: usize,
102    /// The index of the bottom of the stack.
103    stack_bot_idx: usize,
104
105    /// The current clock cycle.
106    clk: RowIndex,
107
108    /// The current context ID.
109    ctx: ContextId,
110
111    /// The hash of the function that called into the current context, or `[ZERO, ZERO, ZERO,
112    /// ZERO]` if we are in the first context (i.e. when `call_stack` is empty).
113    caller_hash: Word,
114
115    /// The advice provider to be used during execution.
116    advice: AdviceProvider,
117
118    /// A map from (context_id, word_address) to the word stored starting at that memory location.
119    memory: Memory,
120
121    /// The call stack is used when starting a new execution context (from a `call`, `syscall` or
122    /// `dyncall`) to keep track of the information needed to return to the previous context upon
123    /// return. It is a stack since calls can be nested.
124    call_stack: Vec<ExecutionContextInfo>,
125
126    /// Options for execution, including but not limited to whether debug or tracing is enabled,
127    /// the size of core trace fragments during execution, etc.
128    options: ExecutionOptions,
129
130    /// Transcript used to record commitments via `log_precompile` instruction (implemented via
131    /// Poseidon2 sponge).
132    pc_transcript: PrecompileTranscript,
133
134    /// Tracks decorator retrieval calls for testing.
135    #[cfg(test)]
136    pub decorator_retrieval_count: Rc<Cell<usize>>,
137}
138
139impl FastProcessor {
140    /// Packages the processor state after successful execution into a public result type.
141    #[inline(always)]
142    fn into_execution_output(self, stack: StackOutputs) -> ExecutionOutput {
143        ExecutionOutput {
144            stack,
145            advice: self.advice,
146            memory: self.memory,
147            final_precompile_transcript: self.pc_transcript,
148        }
149    }
150
151    /// Converts the terminal result of a full execution run into [`ExecutionOutput`].
152    #[inline(always)]
153    fn execution_result_from_flow(
154        flow: ControlFlow<BreakReason, StackOutputs>,
155        processor: Self,
156    ) -> Result<ExecutionOutput, ExecutionError> {
157        match flow {
158            ControlFlow::Continue(stack_outputs) => {
159                Ok(processor.into_execution_output(stack_outputs))
160            },
161            ControlFlow::Break(break_reason) => match break_reason {
162                BreakReason::Err(err) => Err(err),
163                BreakReason::Stopped(_) => {
164                    unreachable!("Execution never stops prematurely with NeverStopper")
165                },
166            },
167        }
168    }
169
170    /// Converts a testing-only execution result into stack outputs.
171    #[cfg(any(test, feature = "testing"))]
172    #[inline(always)]
173    fn stack_result_from_flow(
174        flow: ControlFlow<BreakReason, StackOutputs>,
175    ) -> Result<StackOutputs, ExecutionError> {
176        match flow {
177            ControlFlow::Continue(stack_outputs) => Ok(stack_outputs),
178            ControlFlow::Break(break_reason) => match break_reason {
179                BreakReason::Err(err) => Err(err),
180                BreakReason::Stopped(_) => {
181                    unreachable!("Execution never stops prematurely with NeverStopper")
182                },
183            },
184        }
185    }
186
187    // CONSTRUCTORS
188    // ----------------------------------------------------------------------------------------------
189
190    /// Creates a new `FastProcessor` instance with the given stack inputs.
191    ///
192    /// By default, advice inputs are empty and execution options use their defaults
193    /// (debugging and tracing disabled).
194    ///
195    /// # Example
196    /// ```ignore
197    /// use miden_processor::FastProcessor;
198    ///
199    /// let processor = FastProcessor::new(stack_inputs)
200    ///     .with_advice(advice_inputs)
201    ///     .with_debugging(true)
202    ///     .with_tracing(true);
203    /// ```
204    pub fn new(stack_inputs: StackInputs) -> Self {
205        Self::new_with_options(stack_inputs, AdviceInputs::default(), ExecutionOptions::default())
206    }
207
208    /// Sets the advice inputs for the processor.
209    pub fn with_advice(mut self, advice_inputs: AdviceInputs) -> Self {
210        self.advice = advice_inputs.into();
211        self
212    }
213
214    /// Sets the execution options for the processor.
215    ///
216    /// This will override any previously set debugging or tracing settings.
217    pub fn with_options(mut self, options: ExecutionOptions) -> Self {
218        self.options = options;
219        self
220    }
221
222    /// Enables or disables debugging mode.
223    ///
224    /// When debugging is enabled, debug decorators will be executed during program execution.
225    pub fn with_debugging(mut self, enabled: bool) -> Self {
226        self.options = self.options.with_debugging(enabled);
227        self
228    }
229
230    /// Enables or disables tracing mode.
231    ///
232    /// When tracing is enabled, trace decorators will be executed during program execution.
233    pub fn with_tracing(mut self, enabled: bool) -> Self {
234        self.options = self.options.with_tracing(enabled);
235        self
236    }
237
238    /// Constructor for creating a `FastProcessor` with all options specified at once.
239    ///
240    /// For a more fluent API, consider using `FastProcessor::new()` with builder methods.
241    pub fn new_with_options(
242        stack_inputs: StackInputs,
243        advice_inputs: AdviceInputs,
244        options: ExecutionOptions,
245    ) -> Self {
246        let stack_top_idx = INITIAL_STACK_TOP_IDX;
247        let stack = {
248            // Note: we use `Vec::into_boxed_slice()` here, since `Box::new([T; N])` first allocates
249            // the array on the stack, and then moves it to the heap. This might cause a
250            // stack overflow on some systems.
251            let mut stack: Box<[Felt; STACK_BUFFER_SIZE]> =
252                vec![ZERO; STACK_BUFFER_SIZE].into_boxed_slice().try_into().unwrap();
253
254            // Copy inputs in reverse order so first element ends up at top of stack
255            for (i, &input) in stack_inputs.iter().enumerate() {
256                stack[stack_top_idx - 1 - i] = input;
257            }
258            stack
259        };
260
261        Self {
262            advice: advice_inputs.into(),
263            stack,
264            stack_top_idx,
265            stack_bot_idx: stack_top_idx - MIN_STACK_DEPTH,
266            clk: 0_u32.into(),
267            ctx: 0_u32.into(),
268            caller_hash: EMPTY_WORD,
269            memory: Memory::new(),
270            call_stack: Vec::new(),
271            options,
272            pc_transcript: PrecompileTranscript::new(),
273            #[cfg(test)]
274            decorator_retrieval_count: Rc::new(Cell::new(0)),
275        }
276    }
277
278    /// Returns the resume context to be used with the first call to `step_sync()`.
279    pub fn get_initial_resume_context(
280        &mut self,
281        program: &Program,
282    ) -> Result<ResumeContext, ExecutionError> {
283        self.advice
284            .extend_map(program.mast_forest().advice_map())
285            .map_exec_err_no_ctx()?;
286
287        Ok(ResumeContext {
288            current_forest: program.mast_forest().clone(),
289            continuation_stack: ContinuationStack::new(program),
290            kernel: program.kernel().clone(),
291        })
292    }
293
294    // ACCESSORS
295    // -------------------------------------------------------------------------------------------
296
297    /// Returns whether the processor is executing in debug mode.
298    #[inline(always)]
299    pub fn in_debug_mode(&self) -> bool {
300        self.options.enable_debugging()
301    }
302
303    /// Returns true if decorators should be executed.
304    ///
305    /// This corresponds to either being in debug mode (for debug decorators) or having tracing
306    /// enabled (for trace decorators).
307    #[inline(always)]
308    fn should_execute_decorators(&self) -> bool {
309        self.in_debug_mode() || self.options.enable_tracing()
310    }
311
312    #[cfg(test)]
313    #[inline(always)]
314    fn record_decorator_retrieval(&self) {
315        self.decorator_retrieval_count.set(self.decorator_retrieval_count.get() + 1);
316    }
317
318    /// Returns the size of the stack.
319    #[inline(always)]
320    fn stack_size(&self) -> usize {
321        self.stack_top_idx - self.stack_bot_idx
322    }
323
324    /// Returns the stack, such that the top of the stack is at the last index of the returned
325    /// slice.
326    pub fn stack(&self) -> &[Felt] {
327        &self.stack[self.stack_bot_idx..self.stack_top_idx]
328    }
329
330    /// Returns the top 16 elements of the stack.
331    pub fn stack_top(&self) -> &[Felt] {
332        &self.stack[self.stack_top_idx - MIN_STACK_DEPTH..self.stack_top_idx]
333    }
334
335    /// Returns a mutable reference to the top 16 elements of the stack.
336    pub fn stack_top_mut(&mut self) -> &mut [Felt] {
337        &mut self.stack[self.stack_top_idx - MIN_STACK_DEPTH..self.stack_top_idx]
338    }
339
340    /// Returns the element on the stack at index `idx`.
341    ///
342    /// This method is only meant to be used to access the stack top by operation handlers, and
343    /// system event handlers.
344    ///
345    /// # Preconditions
346    /// - `idx` must be less than or equal to 15.
347    #[inline(always)]
348    pub fn stack_get(&self, idx: usize) -> Felt {
349        self.stack[self.stack_top_idx - idx - 1]
350    }
351
352    /// Same as [`Self::stack_get()`], but returns [`ZERO`] if `idx` falls below index 0 in the
353    /// stack buffer.
354    ///
355    /// Use this instead of `stack_get()` when `idx` may exceed 15.
356    #[inline(always)]
357    pub fn stack_get_safe(&self, idx: usize) -> Felt {
358        if idx < self.stack_top_idx {
359            self.stack[self.stack_top_idx - idx - 1]
360        } else {
361            ZERO
362        }
363    }
364
365    /// Mutable variant of `stack_get()`.
366    ///
367    /// This method is only meant to be used to access the stack top by operation handlers, and
368    /// system event handlers.
369    ///
370    /// # Preconditions
371    /// - `idx` must be less than or equal to 15.
372    #[inline(always)]
373    pub fn stack_get_mut(&mut self, idx: usize) -> &mut Felt {
374        &mut self.stack[self.stack_top_idx - idx - 1]
375    }
376
377    /// Returns the word on the stack starting at index `start_idx` in "stack order".
378    ///
379    /// For `start_idx=0` the top element of the stack will be at position 0 in the word.
380    ///
381    /// For example, if the stack looks like this:
382    ///
383    /// top                                                       bottom
384    /// v                                                           v
385    /// a | b | c | d | e | f | g | h | i | j | k | l | m | n | o | p
386    ///
387    /// Then
388    /// - `stack_get_word(0)` returns `[a, b, c, d]`,
389    /// - `stack_get_word(1)` returns `[b, c, d, e]`,
390    /// - etc.
391    ///
392    /// This method is only meant to be used to access the stack top by operation handlers, and
393    /// system event handlers.
394    ///
395    /// # Preconditions
396    /// - `start_idx` must be less than or equal to 12.
397    #[inline(always)]
398    pub fn stack_get_word(&self, start_idx: usize) -> Word {
399        // Ensure we have enough elements to form a complete word
400        debug_assert!(
401            start_idx + WORD_SIZE <= self.stack_depth() as usize,
402            "Not enough elements on stack to read word starting at index {start_idx}"
403        );
404
405        let word_start_idx = self.stack_top_idx - start_idx - WORD_SIZE;
406        let mut result: [Felt; WORD_SIZE] =
407            self.stack[range(word_start_idx, WORD_SIZE)].try_into().unwrap();
408        // Reverse so top of stack (idx 0) goes to word[0]
409        result.reverse();
410        result.into()
411    }
412
413    /// Same as [`Self::stack_get_word()`], but returns [`ZERO`] for any element that falls below
414    /// index 0 in the stack buffer.
415    ///
416    /// Use this instead of `stack_get_word()` when `start_idx + WORD_SIZE` may exceed
417    /// `stack_top_idx`.
418    #[inline(always)]
419    pub fn stack_get_word_safe(&self, start_idx: usize) -> Word {
420        let buf_end = self.stack_top_idx.saturating_sub(start_idx);
421        let buf_start = self.stack_top_idx.saturating_sub(start_idx.saturating_add(WORD_SIZE));
422        let num_elements_to_read_from_buf = buf_end - buf_start;
423
424        let mut result = [ZERO; WORD_SIZE];
425        if num_elements_to_read_from_buf == WORD_SIZE {
426            result.copy_from_slice(&self.stack[range(buf_start, WORD_SIZE)]);
427        } else if num_elements_to_read_from_buf > 0 {
428            let offset = WORD_SIZE - num_elements_to_read_from_buf;
429            result[offset..]
430                .copy_from_slice(&self.stack[range(buf_start, num_elements_to_read_from_buf)]);
431        }
432        result.reverse();
433
434        result.into()
435    }
436
437    /// Returns the number of elements on the stack in the current context.
438    #[inline(always)]
439    pub fn stack_depth(&self) -> u32 {
440        (self.stack_top_idx - self.stack_bot_idx) as u32
441    }
442
443    /// Returns a reference to the processor's memory.
444    pub fn memory(&self) -> &Memory {
445        &self.memory
446    }
447
448    /// Returns a reference to the execution options.
449    pub fn execution_options(&self) -> &ExecutionOptions {
450        &self.options
451    }
452
453    /// Returns a narrowed interface for reading and updating the processor state.
454    #[inline(always)]
455    pub fn state(&self) -> ProcessorState<'_> {
456        ProcessorState { processor: self }
457    }
458
459    // MUTATORS
460    // -------------------------------------------------------------------------------------------
461
462    /// Writes an element to the stack at the given index.
463    #[inline(always)]
464    pub fn stack_write(&mut self, idx: usize, element: Felt) {
465        self.stack[self.stack_top_idx - idx - 1] = element
466    }
467
468    /// Writes a word to the stack starting at the given index.
469    ///
470    /// `word[0]` goes to stack position start_idx (top), `word[1]` to start_idx+1, etc.
471    #[inline(always)]
472    pub fn stack_write_word(&mut self, start_idx: usize, word: &Word) {
473        debug_assert!(start_idx <= MIN_STACK_DEPTH - WORD_SIZE);
474
475        let word_start_idx = self.stack_top_idx - start_idx - 4;
476        let mut source: [Felt; WORD_SIZE] = (*word).into();
477        // Reverse so word[0] ends up at the top of stack (highest internal index)
478        source.reverse();
479        self.stack[range(word_start_idx, WORD_SIZE)].copy_from_slice(&source)
480    }
481
482    /// Swaps the elements at the given indices on the stack.
483    #[inline(always)]
484    pub fn stack_swap(&mut self, idx1: usize, idx2: usize) {
485        let a = self.stack_get(idx1);
486        let b = self.stack_get(idx2);
487        self.stack_write(idx1, b);
488        self.stack_write(idx2, a);
489    }
490
491    // DECORATOR EXECUTORS
492    // --------------------------------------------------------------------------------------------
493
494    /// Executes the decorators that should be executed before entering a node.
495    fn execute_before_enter_decorators(
496        &self,
497        node_id: MastNodeId,
498        current_forest: &MastForest,
499        host: &mut impl BaseHost,
500    ) -> ControlFlow<BreakReason> {
501        if !self.should_execute_decorators() {
502            return ControlFlow::Continue(());
503        }
504
505        #[cfg(test)]
506        self.record_decorator_retrieval();
507
508        let node = current_forest
509            .get_node_by_id(node_id)
510            .expect("internal error: node id {node_id} not found in current forest");
511
512        for &decorator_id in node.before_enter(current_forest) {
513            self.execute_decorator(&current_forest[decorator_id], host)?;
514        }
515
516        ControlFlow::Continue(())
517    }
518
519    /// Executes the decorators that should be executed after exiting a node.
520    fn execute_after_exit_decorators(
521        &self,
522        node_id: MastNodeId,
523        current_forest: &MastForest,
524        host: &mut impl BaseHost,
525    ) -> ControlFlow<BreakReason> {
526        if !self.in_debug_mode() {
527            return ControlFlow::Continue(());
528        }
529
530        #[cfg(test)]
531        self.record_decorator_retrieval();
532
533        let node = current_forest
534            .get_node_by_id(node_id)
535            .expect("internal error: node id {node_id} not found in current forest");
536
537        for &decorator_id in node.after_exit(current_forest) {
538            self.execute_decorator(&current_forest[decorator_id], host)?;
539        }
540
541        ControlFlow::Continue(())
542    }
543
544    /// Executes the specified decorator
545    fn execute_decorator(
546        &self,
547        decorator: &Decorator,
548        host: &mut impl BaseHost,
549    ) -> ControlFlow<BreakReason> {
550        match decorator {
551            Decorator::Debug(options) => {
552                if self.in_debug_mode() {
553                    let processor_state = self.state();
554                    if let Err(err) = host.on_debug(&processor_state, options) {
555                        return ControlFlow::Break(BreakReason::Err(
556                            crate::errors::HostError::DebugHandlerError { err }.into(),
557                        ));
558                    }
559                }
560            },
561            Decorator::Trace(id) => {
562                if self.options.enable_tracing() {
563                    let processor_state = self.state();
564                    if let Err(err) = host.on_trace(&processor_state, *id) {
565                        return ControlFlow::Break(BreakReason::Err(
566                            crate::errors::HostError::TraceHandlerError { trace_id: *id, err }
567                                .into(),
568                        ));
569                    }
570                }
571            },
572        };
573        ControlFlow::Continue(())
574    }
575
576    /// Increments the stack top pointer by 1.
577    ///
578    /// The bottom of the stack is never affected by this operation.
579    #[inline(always)]
580    fn increment_stack_size(&mut self) {
581        self.stack_top_idx += 1;
582    }
583
584    /// Decrements the stack top pointer by 1.
585    ///
586    /// The bottom of the stack is only decremented in cases where the stack depth would become less
587    /// than 16.
588    #[inline(always)]
589    fn decrement_stack_size(&mut self) {
590        if self.stack_top_idx == MIN_STACK_DEPTH {
591            // We no longer have any room in the stack buffer to decrement the stack size (which
592            // would cause the `stack_bot_idx` to go below 0). We therefore reset the stack to its
593            // original position.
594            self.reset_stack_in_buffer(INITIAL_STACK_TOP_IDX);
595        }
596
597        self.stack_top_idx -= 1;
598        self.stack_bot_idx = min(self.stack_bot_idx, self.stack_top_idx - MIN_STACK_DEPTH);
599    }
600
601    /// Resets the stack in the buffer to a new position, preserving the top 16 elements of the
602    /// stack.
603    ///
604    /// # Preconditions
605    /// - The stack is expected to have exactly 16 elements.
606    #[inline(always)]
607    fn reset_stack_in_buffer(&mut self, new_stack_top_idx: usize) {
608        debug_assert_eq!(self.stack_depth(), MIN_STACK_DEPTH as u32);
609
610        let new_stack_bot_idx = new_stack_top_idx - MIN_STACK_DEPTH;
611
612        // Copy stack to its new position
613        self.stack
614            .copy_within(self.stack_bot_idx..self.stack_top_idx, new_stack_bot_idx);
615
616        // Zero out stack below the new new_stack_bot_idx, since this is where overflow values
617        // come from, and are guaranteed to be ZERO. We don't need to zero out above
618        // `stack_top_idx`, since values there are never read before being written.
619        self.stack[0..new_stack_bot_idx].fill(ZERO);
620
621        // Update indices.
622        self.stack_bot_idx = new_stack_bot_idx;
623        self.stack_top_idx = new_stack_top_idx;
624    }
625}
626
627// EXECUTION OUTPUT
628// ===============================================================================================
629
630/// The output of a program execution, containing the state of the stack, advice provider,
631/// memory, and final precompile transcript at the end of execution.
632#[derive(Debug)]
633pub struct ExecutionOutput {
634    pub stack: StackOutputs,
635    pub advice: AdviceProvider,
636    pub memory: Memory,
637    pub final_precompile_transcript: PrecompileTranscript,
638}
639
640// EXECUTION CONTEXT INFO
641// ===============================================================================================
642
643/// Information about the execution context.
644///
645/// This struct is used to keep track of the information needed to return to the previous context
646/// upon return from a `call`, `syscall` or `dyncall`.
647#[derive(Debug)]
648struct ExecutionContextInfo {
649    /// This stores all the elements on the stack at the call site, excluding the top 16 elements.
650    /// This corresponds to the overflow table in [crate::Process].
651    overflow_stack: Vec<Felt>,
652    ctx: ContextId,
653    fn_hash: Word,
654}
655
656// NOOP TRACER
657// ================================================================================================
658
659/// A [Tracer] that does nothing.
660pub struct NoopTracer;
661
662impl Tracer for NoopTracer {
663    type Processor = FastProcessor;
664
665    #[inline(always)]
666    fn start_clock_cycle(
667        &mut self,
668        _processor: &FastProcessor,
669        _continuation: Continuation,
670        _continuation_stack: &ContinuationStack,
671        _current_forest: &Arc<MastForest>,
672    ) {
673        // do nothing
674    }
675
676    #[inline(always)]
677    fn finalize_clock_cycle(
678        &mut self,
679        _processor: &FastProcessor,
680        _op_helper_registers: OperationHelperRegisters,
681        _current_forest: &Arc<MastForest>,
682    ) {
683        // do nothing
684    }
685}