bevy_render/render_resource/
pipeline_cache.rs

1use crate::{
2    render_resource::*,
3    renderer::{RenderAdapter, RenderDevice, WgpuWrapper},
4    Extract,
5};
6use alloc::{borrow::Cow, sync::Arc};
7use bevy_asset::{AssetEvent, AssetId, Assets, Handle};
8use bevy_ecs::{
9    message::MessageReader,
10    resource::Resource,
11    system::{Res, ResMut},
12};
13use bevy_platform::collections::{HashMap, HashSet};
14use bevy_shader::{
15    CachedPipelineId, PipelineCacheError, Shader, ShaderCache, ShaderCacheSource, ShaderDefVal,
16    ValidateShader,
17};
18use bevy_tasks::Task;
19use bevy_utils::default;
20use core::{future::Future, hash::Hash, mem};
21use std::sync::{Mutex, PoisonError};
22use tracing::error;
23use wgpu::{PipelineCompilationOptions, VertexBufferLayout as RawVertexBufferLayout};
24
25/// A descriptor for a [`Pipeline`].
26///
27/// Used to store a heterogenous collection of render and compute pipeline descriptors together.
28#[derive(Debug)]
29pub enum PipelineDescriptor {
30    RenderPipelineDescriptor(Box<RenderPipelineDescriptor>),
31    ComputePipelineDescriptor(Box<ComputePipelineDescriptor>),
32}
33
34/// A pipeline defining the data layout and shader logic for a specific GPU task.
35///
36/// Used to store a heterogenous collection of render and compute pipelines together.
37#[derive(Debug)]
38pub enum Pipeline {
39    RenderPipeline(RenderPipeline),
40    ComputePipeline(ComputePipeline),
41}
42
43/// Index of a cached render pipeline in a [`PipelineCache`].
44#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]
45pub struct CachedRenderPipelineId(CachedPipelineId);
46
47impl CachedRenderPipelineId {
48    /// An invalid cached render pipeline index, often used to initialize a variable.
49    pub const INVALID: Self = CachedRenderPipelineId(usize::MAX);
50
51    #[inline]
52    pub fn id(&self) -> usize {
53        self.0
54    }
55}
56
57/// Index of a cached compute pipeline in a [`PipelineCache`].
58#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
59pub struct CachedComputePipelineId(CachedPipelineId);
60
61impl CachedComputePipelineId {
62    /// An invalid cached compute pipeline index, often used to initialize a variable.
63    pub const INVALID: Self = CachedComputePipelineId(usize::MAX);
64
65    #[inline]
66    pub fn id(&self) -> usize {
67        self.0
68    }
69}
70
71pub struct CachedPipeline {
72    pub descriptor: PipelineDescriptor,
73    pub state: CachedPipelineState,
74}
75
76/// State of a cached pipeline inserted into a [`PipelineCache`].
77#[cfg_attr(
78    not(target_arch = "wasm32"),
79    expect(
80        clippy::large_enum_variant,
81        reason = "See https://github.com/bevyengine/bevy/issues/19220"
82    )
83)]
84#[derive(Debug)]
85pub enum CachedPipelineState {
86    /// The pipeline GPU object is queued for creation.
87    Queued,
88    /// The pipeline GPU object is being created.
89    Creating(Task<Result<Pipeline, PipelineCacheError>>),
90    /// The pipeline GPU object was created successfully and is available (allocated on the GPU).
91    Ok(Pipeline),
92    /// An error occurred while trying to create the pipeline GPU object.
93    Err(PipelineCacheError),
94}
95
96impl CachedPipelineState {
97    /// Convenience method to "unwrap" a pipeline state into its underlying GPU object.
98    ///
99    /// # Returns
100    ///
101    /// The method returns the allocated pipeline GPU object.
102    ///
103    /// # Panics
104    ///
105    /// This method panics if the pipeline GPU object is not available, either because it is
106    /// pending creation or because an error occurred while attempting to create GPU object.
107    pub fn unwrap(&self) -> &Pipeline {
108        match self {
109            CachedPipelineState::Ok(pipeline) => pipeline,
110            CachedPipelineState::Queued => {
111                panic!("Pipeline has not been compiled yet. It is still in the 'Queued' state.")
112            }
113            CachedPipelineState::Creating(..) => {
114                panic!("Pipeline has not been compiled yet. It is still in the 'Creating' state.")
115            }
116            CachedPipelineState::Err(err) => panic!("{}", err),
117        }
118    }
119}
120
121type LayoutCacheKey = (Vec<BindGroupLayoutId>, Vec<PushConstantRange>);
122#[derive(Default)]
123struct LayoutCache {
124    layouts: HashMap<LayoutCacheKey, Arc<WgpuWrapper<PipelineLayout>>>,
125}
126
127impl LayoutCache {
128    fn get(
129        &mut self,
130        render_device: &RenderDevice,
131        bind_group_layouts: &[BindGroupLayout],
132        push_constant_ranges: Vec<PushConstantRange>,
133    ) -> Arc<WgpuWrapper<PipelineLayout>> {
134        let bind_group_ids = bind_group_layouts.iter().map(BindGroupLayout::id).collect();
135        self.layouts
136            .entry((bind_group_ids, push_constant_ranges))
137            .or_insert_with_key(|(_, push_constant_ranges)| {
138                let bind_group_layouts = bind_group_layouts
139                    .iter()
140                    .map(BindGroupLayout::value)
141                    .collect::<Vec<_>>();
142                Arc::new(WgpuWrapper::new(render_device.create_pipeline_layout(
143                    &PipelineLayoutDescriptor {
144                        bind_group_layouts: &bind_group_layouts,
145                        push_constant_ranges,
146                        ..default()
147                    },
148                )))
149            })
150            .clone()
151    }
152}
153
154#[expect(
155    clippy::result_large_err,
156    reason = "See https://github.com/bevyengine/bevy/issues/19220"
157)]
158fn load_module(
159    render_device: &RenderDevice,
160    shader_source: ShaderCacheSource,
161    validate_shader: &ValidateShader,
162) -> Result<WgpuWrapper<ShaderModule>, PipelineCacheError> {
163    let shader_source = match shader_source {
164        #[cfg(feature = "shader_format_spirv")]
165        ShaderCacheSource::SpirV(data) => wgpu::util::make_spirv(data),
166        #[cfg(not(feature = "shader_format_spirv"))]
167        ShaderCacheSource::SpirV(_) => {
168            unimplemented!("Enable feature \"shader_format_spirv\" to use SPIR-V shaders")
169        }
170        ShaderCacheSource::Wgsl(src) => ShaderSource::Wgsl(Cow::Owned(src)),
171        #[cfg(not(feature = "decoupled_naga"))]
172        ShaderCacheSource::Naga(src) => ShaderSource::Naga(Cow::Owned(src)),
173    };
174    let module_descriptor = ShaderModuleDescriptor {
175        label: None,
176        source: shader_source,
177    };
178
179    render_device
180        .wgpu_device()
181        .push_error_scope(wgpu::ErrorFilter::Validation);
182
183    let shader_module = WgpuWrapper::new(match validate_shader {
184        ValidateShader::Enabled => {
185            render_device.create_and_validate_shader_module(module_descriptor)
186        }
187        // SAFETY: we are interfacing with shader code, which may contain undefined behavior,
188        // such as indexing out of bounds.
189        // The checks required are prohibitively expensive and a poor default for game engines.
190        ValidateShader::Disabled => unsafe {
191            render_device.create_shader_module(module_descriptor)
192        },
193    });
194
195    let error = render_device.wgpu_device().pop_error_scope();
196
197    // `now_or_never` will return Some if the future is ready and None otherwise.
198    // On native platforms, wgpu will yield the error immediately while on wasm it may take longer since the browser APIs are asynchronous.
199    // So to keep the complexity of the ShaderCache low, we will only catch this error early on native platforms,
200    // and on wasm the error will be handled by wgpu and crash the application.
201    if let Some(Some(wgpu::Error::Validation { description, .. })) =
202        bevy_tasks::futures::now_or_never(error)
203    {
204        return Err(PipelineCacheError::CreateShaderModule(description));
205    }
206
207    Ok(shader_module)
208}
209
210#[derive(Default)]
211struct BindGroupLayoutCache {
212    bgls: HashMap<BindGroupLayoutDescriptor, BindGroupLayout>,
213}
214
215impl BindGroupLayoutCache {
216    fn get(
217        &mut self,
218        render_device: &RenderDevice,
219        descriptor: BindGroupLayoutDescriptor,
220    ) -> BindGroupLayout {
221        self.bgls
222            .entry(descriptor)
223            .or_insert_with_key(|descriptor| {
224                render_device
225                    .create_bind_group_layout(descriptor.label.as_ref(), &descriptor.entries)
226            })
227            .clone()
228    }
229}
230
231/// Cache for render and compute pipelines.
232///
233/// The cache stores existing render and compute pipelines allocated on the GPU, as well as
234/// pending creation. Pipelines inserted into the cache are identified by a unique ID, which
235/// can be used to retrieve the actual GPU object once it's ready. The creation of the GPU
236/// pipeline object is deferred to the [`RenderSystems::Render`] step, just before the render
237/// graph starts being processed, as this requires access to the GPU.
238///
239/// Note that the cache does not perform automatic deduplication of identical pipelines. It is
240/// up to the user not to insert the same pipeline twice to avoid wasting GPU resources.
241///
242/// [`RenderSystems::Render`]: crate::RenderSystems::Render
243#[derive(Resource)]
244pub struct PipelineCache {
245    layout_cache: Arc<Mutex<LayoutCache>>,
246    bindgroup_layout_cache: Arc<Mutex<BindGroupLayoutCache>>,
247    shader_cache: Arc<Mutex<ShaderCache<WgpuWrapper<ShaderModule>, RenderDevice>>>,
248    device: RenderDevice,
249    pipelines: Vec<CachedPipeline>,
250    waiting_pipelines: HashSet<CachedPipelineId>,
251    new_pipelines: Mutex<Vec<CachedPipeline>>,
252    global_shader_defs: Vec<ShaderDefVal>,
253    /// If `true`, disables asynchronous pipeline compilation.
254    /// This has no effect on macOS, wasm, or without the `multi_threaded` feature.
255    synchronous_pipeline_compilation: bool,
256}
257
258impl PipelineCache {
259    /// Returns an iterator over the pipelines in the pipeline cache.
260    pub fn pipelines(&self) -> impl Iterator<Item = &CachedPipeline> {
261        self.pipelines.iter()
262    }
263
264    /// Returns a iterator of the IDs of all currently waiting pipelines.
265    pub fn waiting_pipelines(&self) -> impl Iterator<Item = CachedPipelineId> + '_ {
266        self.waiting_pipelines.iter().copied()
267    }
268
269    /// Create a new pipeline cache associated with the given render device.
270    pub fn new(
271        device: RenderDevice,
272        render_adapter: RenderAdapter,
273        synchronous_pipeline_compilation: bool,
274    ) -> Self {
275        let mut global_shader_defs = Vec::new();
276        #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
277        {
278            global_shader_defs.push("NO_ARRAY_TEXTURES_SUPPORT".into());
279            global_shader_defs.push("NO_CUBE_ARRAY_TEXTURES_SUPPORT".into());
280            global_shader_defs.push("SIXTEEN_BYTE_ALIGNMENT".into());
281        }
282
283        if cfg!(target_abi = "sim") {
284            global_shader_defs.push("NO_CUBE_ARRAY_TEXTURES_SUPPORT".into());
285        }
286
287        global_shader_defs.push(ShaderDefVal::UInt(
288            String::from("AVAILABLE_STORAGE_BUFFER_BINDINGS"),
289            device.limits().max_storage_buffers_per_shader_stage,
290        ));
291
292        Self {
293            shader_cache: Arc::new(Mutex::new(ShaderCache::new(
294                device.features(),
295                render_adapter.get_downlevel_capabilities().flags,
296                load_module,
297            ))),
298            device,
299            layout_cache: default(),
300            bindgroup_layout_cache: default(),
301            waiting_pipelines: default(),
302            new_pipelines: default(),
303            pipelines: default(),
304            global_shader_defs,
305            synchronous_pipeline_compilation,
306        }
307    }
308
309    /// Get the state of a cached render pipeline.
310    ///
311    /// See [`PipelineCache::queue_render_pipeline()`].
312    #[inline]
313    pub fn get_render_pipeline_state(&self, id: CachedRenderPipelineId) -> &CachedPipelineState {
314        // If the pipeline id isn't in `pipelines`, it's queued in `new_pipelines`
315        self.pipelines
316            .get(id.0)
317            .map_or(&CachedPipelineState::Queued, |pipeline| &pipeline.state)
318    }
319
320    /// Get the state of a cached compute pipeline.
321    ///
322    /// See [`PipelineCache::queue_compute_pipeline()`].
323    #[inline]
324    pub fn get_compute_pipeline_state(&self, id: CachedComputePipelineId) -> &CachedPipelineState {
325        // If the pipeline id isn't in `pipelines`, it's queued in `new_pipelines`
326        self.pipelines
327            .get(id.0)
328            .map_or(&CachedPipelineState::Queued, |pipeline| &pipeline.state)
329    }
330
331    /// Get the render pipeline descriptor a cached render pipeline was inserted from.
332    ///
333    /// See [`PipelineCache::queue_render_pipeline()`].
334    ///
335    /// **Note**: Be careful calling this method. It will panic if called with a pipeline that
336    /// has been queued but has not yet been processed by [`PipelineCache::process_queue()`].
337    #[inline]
338    pub fn get_render_pipeline_descriptor(
339        &self,
340        id: CachedRenderPipelineId,
341    ) -> &RenderPipelineDescriptor {
342        match &self.pipelines[id.0].descriptor {
343            PipelineDescriptor::RenderPipelineDescriptor(descriptor) => descriptor,
344            PipelineDescriptor::ComputePipelineDescriptor(_) => unreachable!(),
345        }
346    }
347
348    /// Get the compute pipeline descriptor a cached render pipeline was inserted from.
349    ///
350    /// See [`PipelineCache::queue_compute_pipeline()`].
351    ///
352    /// **Note**: Be careful calling this method. It will panic if called with a pipeline that
353    /// has been queued but has not yet been processed by [`PipelineCache::process_queue()`].
354    #[inline]
355    pub fn get_compute_pipeline_descriptor(
356        &self,
357        id: CachedComputePipelineId,
358    ) -> &ComputePipelineDescriptor {
359        match &self.pipelines[id.0].descriptor {
360            PipelineDescriptor::RenderPipelineDescriptor(_) => unreachable!(),
361            PipelineDescriptor::ComputePipelineDescriptor(descriptor) => descriptor,
362        }
363    }
364
365    /// Try to retrieve a render pipeline GPU object from a cached ID.
366    ///
367    /// # Returns
368    ///
369    /// This method returns a successfully created render pipeline if any, or `None` if the pipeline
370    /// was not created yet or if there was an error during creation. You can check the actual creation
371    /// state with [`PipelineCache::get_render_pipeline_state()`].
372    #[inline]
373    pub fn get_render_pipeline(&self, id: CachedRenderPipelineId) -> Option<&RenderPipeline> {
374        if let CachedPipelineState::Ok(Pipeline::RenderPipeline(pipeline)) =
375            &self.pipelines.get(id.0)?.state
376        {
377            Some(pipeline)
378        } else {
379            None
380        }
381    }
382
383    /// Wait for a render pipeline to finish compiling.
384    #[inline]
385    pub fn block_on_render_pipeline(&mut self, id: CachedRenderPipelineId) {
386        if self.pipelines.len() <= id.0 {
387            self.process_queue();
388        }
389
390        let state = &mut self.pipelines[id.0].state;
391        if let CachedPipelineState::Creating(task) = state {
392            *state = match bevy_tasks::block_on(task) {
393                Ok(p) => CachedPipelineState::Ok(p),
394                Err(e) => CachedPipelineState::Err(e),
395            };
396        }
397    }
398
399    /// Try to retrieve a compute pipeline GPU object from a cached ID.
400    ///
401    /// # Returns
402    ///
403    /// This method returns a successfully created compute pipeline if any, or `None` if the pipeline
404    /// was not created yet or if there was an error during creation. You can check the actual creation
405    /// state with [`PipelineCache::get_compute_pipeline_state()`].
406    #[inline]
407    pub fn get_compute_pipeline(&self, id: CachedComputePipelineId) -> Option<&ComputePipeline> {
408        if let CachedPipelineState::Ok(Pipeline::ComputePipeline(pipeline)) =
409            &self.pipelines.get(id.0)?.state
410        {
411            Some(pipeline)
412        } else {
413            None
414        }
415    }
416
417    /// Insert a render pipeline into the cache, and queue its creation.
418    ///
419    /// The pipeline is always inserted and queued for creation. There is no attempt to deduplicate it with
420    /// an already cached pipeline.
421    ///
422    /// # Returns
423    ///
424    /// This method returns the unique render shader ID of the cached pipeline, which can be used to query
425    /// the caching state with [`get_render_pipeline_state()`] and to retrieve the created GPU pipeline once
426    /// it's ready with [`get_render_pipeline()`].
427    ///
428    /// [`get_render_pipeline_state()`]: PipelineCache::get_render_pipeline_state
429    /// [`get_render_pipeline()`]: PipelineCache::get_render_pipeline
430    pub fn queue_render_pipeline(
431        &self,
432        descriptor: RenderPipelineDescriptor,
433    ) -> CachedRenderPipelineId {
434        let mut new_pipelines = self
435            .new_pipelines
436            .lock()
437            .unwrap_or_else(PoisonError::into_inner);
438        let id = CachedRenderPipelineId(self.pipelines.len() + new_pipelines.len());
439        new_pipelines.push(CachedPipeline {
440            descriptor: PipelineDescriptor::RenderPipelineDescriptor(Box::new(descriptor)),
441            state: CachedPipelineState::Queued,
442        });
443        id
444    }
445
446    /// Insert a compute pipeline into the cache, and queue its creation.
447    ///
448    /// The pipeline is always inserted and queued for creation. There is no attempt to deduplicate it with
449    /// an already cached pipeline.
450    ///
451    /// # Returns
452    ///
453    /// This method returns the unique compute shader ID of the cached pipeline, which can be used to query
454    /// the caching state with [`get_compute_pipeline_state()`] and to retrieve the created GPU pipeline once
455    /// it's ready with [`get_compute_pipeline()`].
456    ///
457    /// [`get_compute_pipeline_state()`]: PipelineCache::get_compute_pipeline_state
458    /// [`get_compute_pipeline()`]: PipelineCache::get_compute_pipeline
459    pub fn queue_compute_pipeline(
460        &self,
461        descriptor: ComputePipelineDescriptor,
462    ) -> CachedComputePipelineId {
463        let mut new_pipelines = self
464            .new_pipelines
465            .lock()
466            .unwrap_or_else(PoisonError::into_inner);
467        let id = CachedComputePipelineId(self.pipelines.len() + new_pipelines.len());
468        new_pipelines.push(CachedPipeline {
469            descriptor: PipelineDescriptor::ComputePipelineDescriptor(Box::new(descriptor)),
470            state: CachedPipelineState::Queued,
471        });
472        id
473    }
474
475    pub fn get_bind_group_layout(
476        &self,
477        bind_group_layout_descriptor: &BindGroupLayoutDescriptor,
478    ) -> BindGroupLayout {
479        self.bindgroup_layout_cache
480            .lock()
481            .unwrap()
482            .get(&self.device, bind_group_layout_descriptor.clone())
483    }
484
485    /// Inserts a [`Shader`] into this cache with the provided [`AssetId`].
486    pub fn set_shader(&mut self, id: AssetId<Shader>, shader: Shader) {
487        let mut shader_cache = self.shader_cache.lock().unwrap();
488        let pipelines_to_queue = shader_cache.set_shader(id, shader);
489        for cached_pipeline in pipelines_to_queue {
490            self.pipelines[cached_pipeline].state = CachedPipelineState::Queued;
491            self.waiting_pipelines.insert(cached_pipeline);
492        }
493    }
494
495    /// Removes a [`Shader`] from this cache if it exists.
496    pub fn remove_shader(&mut self, shader: AssetId<Shader>) {
497        let mut shader_cache = self.shader_cache.lock().unwrap();
498        let pipelines_to_queue = shader_cache.remove(shader);
499        for cached_pipeline in pipelines_to_queue {
500            self.pipelines[cached_pipeline].state = CachedPipelineState::Queued;
501            self.waiting_pipelines.insert(cached_pipeline);
502        }
503    }
504
505    fn start_create_render_pipeline(
506        &mut self,
507        id: CachedPipelineId,
508        descriptor: RenderPipelineDescriptor,
509    ) -> CachedPipelineState {
510        let device = self.device.clone();
511        let shader_cache = self.shader_cache.clone();
512        let layout_cache = self.layout_cache.clone();
513        let mut bindgroup_layout_cache = self.bindgroup_layout_cache.lock().unwrap();
514        let bind_group_layout = descriptor
515            .layout
516            .iter()
517            .map(|bind_group_layout_descriptor| {
518                bindgroup_layout_cache.get(&self.device, bind_group_layout_descriptor.clone())
519            })
520            .collect::<Vec<_>>();
521
522        create_pipeline_task(
523            async move {
524                let mut shader_cache = shader_cache.lock().unwrap();
525                let mut layout_cache = layout_cache.lock().unwrap();
526
527                let vertex_module = match shader_cache.get(
528                    &device,
529                    id,
530                    descriptor.vertex.shader.id(),
531                    &descriptor.vertex.shader_defs,
532                ) {
533                    Ok(module) => module,
534                    Err(err) => return Err(err),
535                };
536
537                let fragment_module = match &descriptor.fragment {
538                    Some(fragment) => {
539                        match shader_cache.get(
540                            &device,
541                            id,
542                            fragment.shader.id(),
543                            &fragment.shader_defs,
544                        ) {
545                            Ok(module) => Some(module),
546                            Err(err) => return Err(err),
547                        }
548                    }
549                    None => None,
550                };
551
552                let layout =
553                    if descriptor.layout.is_empty() && descriptor.push_constant_ranges.is_empty() {
554                        None
555                    } else {
556                        Some(layout_cache.get(
557                            &device,
558                            &bind_group_layout,
559                            descriptor.push_constant_ranges.to_vec(),
560                        ))
561                    };
562
563                drop((shader_cache, layout_cache));
564
565                let vertex_buffer_layouts = descriptor
566                    .vertex
567                    .buffers
568                    .iter()
569                    .map(|layout| RawVertexBufferLayout {
570                        array_stride: layout.array_stride,
571                        attributes: &layout.attributes,
572                        step_mode: layout.step_mode,
573                    })
574                    .collect::<Vec<_>>();
575
576                let fragment_data = descriptor.fragment.as_ref().map(|fragment| {
577                    (
578                        fragment_module.unwrap(),
579                        fragment.entry_point.as_deref(),
580                        fragment.targets.as_slice(),
581                    )
582                });
583
584                // TODO: Expose the rest of this somehow
585                let compilation_options = PipelineCompilationOptions {
586                    constants: &[],
587                    zero_initialize_workgroup_memory: descriptor.zero_initialize_workgroup_memory,
588                };
589
590                let descriptor = RawRenderPipelineDescriptor {
591                    multiview: None,
592                    depth_stencil: descriptor.depth_stencil.clone(),
593                    label: descriptor.label.as_deref(),
594                    layout: layout.as_ref().map(|layout| -> &PipelineLayout { layout }),
595                    multisample: descriptor.multisample,
596                    primitive: descriptor.primitive,
597                    vertex: RawVertexState {
598                        buffers: &vertex_buffer_layouts,
599                        entry_point: descriptor.vertex.entry_point.as_deref(),
600                        module: &vertex_module,
601                        // TODO: Should this be the same as the fragment compilation options?
602                        compilation_options: compilation_options.clone(),
603                    },
604                    fragment: fragment_data
605                        .as_ref()
606                        .map(|(module, entry_point, targets)| RawFragmentState {
607                            entry_point: entry_point.as_deref(),
608                            module,
609                            targets,
610                            // TODO: Should this be the same as the vertex compilation options?
611                            compilation_options,
612                        }),
613                    cache: None,
614                };
615
616                Ok(Pipeline::RenderPipeline(
617                    device.create_render_pipeline(&descriptor),
618                ))
619            },
620            self.synchronous_pipeline_compilation,
621        )
622    }
623
624    fn start_create_compute_pipeline(
625        &mut self,
626        id: CachedPipelineId,
627        descriptor: ComputePipelineDescriptor,
628    ) -> CachedPipelineState {
629        let device = self.device.clone();
630        let shader_cache = self.shader_cache.clone();
631        let layout_cache = self.layout_cache.clone();
632        let mut bindgroup_layout_cache = self.bindgroup_layout_cache.lock().unwrap();
633        let bind_group_layout = descriptor
634            .layout
635            .iter()
636            .map(|bind_group_layout_descriptor| {
637                bindgroup_layout_cache.get(&self.device, bind_group_layout_descriptor.clone())
638            })
639            .collect::<Vec<_>>();
640
641        create_pipeline_task(
642            async move {
643                let mut shader_cache = shader_cache.lock().unwrap();
644                let mut layout_cache = layout_cache.lock().unwrap();
645
646                let compute_module = match shader_cache.get(
647                    &device,
648                    id,
649                    descriptor.shader.id(),
650                    &descriptor.shader_defs,
651                ) {
652                    Ok(module) => module,
653                    Err(err) => return Err(err),
654                };
655
656                let layout =
657                    if descriptor.layout.is_empty() && descriptor.push_constant_ranges.is_empty() {
658                        None
659                    } else {
660                        Some(layout_cache.get(
661                            &device,
662                            &bind_group_layout,
663                            descriptor.push_constant_ranges.to_vec(),
664                        ))
665                    };
666
667                drop((shader_cache, layout_cache));
668
669                let descriptor = RawComputePipelineDescriptor {
670                    label: descriptor.label.as_deref(),
671                    layout: layout.as_ref().map(|layout| -> &PipelineLayout { layout }),
672                    module: &compute_module,
673                    entry_point: descriptor.entry_point.as_deref(),
674                    // TODO: Expose the rest of this somehow
675                    compilation_options: PipelineCompilationOptions {
676                        constants: &[],
677                        zero_initialize_workgroup_memory: descriptor
678                            .zero_initialize_workgroup_memory,
679                    },
680                    cache: None,
681                };
682
683                Ok(Pipeline::ComputePipeline(
684                    device.create_compute_pipeline(&descriptor),
685                ))
686            },
687            self.synchronous_pipeline_compilation,
688        )
689    }
690
691    /// Process the pipeline queue and create all pending pipelines if possible.
692    ///
693    /// This is generally called automatically during the [`RenderSystems::Render`] step, but can
694    /// be called manually to force creation at a different time.
695    ///
696    /// [`RenderSystems::Render`]: crate::RenderSystems::Render
697    pub fn process_queue(&mut self) {
698        let mut waiting_pipelines = mem::take(&mut self.waiting_pipelines);
699        let mut pipelines = mem::take(&mut self.pipelines);
700
701        {
702            let mut new_pipelines = self
703                .new_pipelines
704                .lock()
705                .unwrap_or_else(PoisonError::into_inner);
706            for new_pipeline in new_pipelines.drain(..) {
707                let id = pipelines.len();
708                pipelines.push(new_pipeline);
709                waiting_pipelines.insert(id);
710            }
711        }
712
713        for id in waiting_pipelines {
714            self.process_pipeline(&mut pipelines[id], id);
715        }
716
717        self.pipelines = pipelines;
718    }
719
720    fn process_pipeline(&mut self, cached_pipeline: &mut CachedPipeline, id: usize) {
721        match &mut cached_pipeline.state {
722            CachedPipelineState::Queued => {
723                cached_pipeline.state = match &cached_pipeline.descriptor {
724                    PipelineDescriptor::RenderPipelineDescriptor(descriptor) => {
725                        self.start_create_render_pipeline(id, *descriptor.clone())
726                    }
727                    PipelineDescriptor::ComputePipelineDescriptor(descriptor) => {
728                        self.start_create_compute_pipeline(id, *descriptor.clone())
729                    }
730                };
731            }
732
733            CachedPipelineState::Creating(task) => match bevy_tasks::futures::check_ready(task) {
734                Some(Ok(pipeline)) => {
735                    cached_pipeline.state = CachedPipelineState::Ok(pipeline);
736                    return;
737                }
738                Some(Err(err)) => cached_pipeline.state = CachedPipelineState::Err(err),
739                _ => (),
740            },
741
742            CachedPipelineState::Err(err) => match err {
743                // Retry
744                PipelineCacheError::ShaderNotLoaded(_)
745                | PipelineCacheError::ShaderImportNotYetAvailable => {
746                    cached_pipeline.state = CachedPipelineState::Queued;
747                }
748
749                // Shader could not be processed ... retrying won't help
750                PipelineCacheError::ProcessShaderError(err) => {
751                    let error_detail =
752                        err.emit_to_string(&self.shader_cache.lock().unwrap().composer);
753                    if std::env::var("VERBOSE_SHADER_ERROR")
754                        .is_ok_and(|v| !(v.is_empty() || v == "0" || v == "false"))
755                    {
756                        error!("{}", pipeline_error_context(cached_pipeline));
757                    }
758                    error!("failed to process shader error:\n{}", error_detail);
759                    return;
760                }
761                PipelineCacheError::CreateShaderModule(description) => {
762                    error!("failed to create shader module: {}", description);
763                    return;
764                }
765            },
766
767            CachedPipelineState::Ok(_) => return,
768        }
769
770        // Retry
771        self.waiting_pipelines.insert(id);
772    }
773
774    pub(crate) fn process_pipeline_queue_system(mut cache: ResMut<Self>) {
775        cache.process_queue();
776    }
777
778    pub(crate) fn extract_shaders(
779        mut cache: ResMut<Self>,
780        shaders: Extract<Res<Assets<Shader>>>,
781        mut events: Extract<MessageReader<AssetEvent<Shader>>>,
782    ) {
783        for event in events.read() {
784            #[expect(
785                clippy::match_same_arms,
786                reason = "LoadedWithDependencies is marked as a TODO, so it's likely this will no longer lint soon."
787            )]
788            match event {
789                // PERF: Instead of blocking waiting for the shader cache lock, try again next frame if the lock is currently held
790                AssetEvent::Added { id } | AssetEvent::Modified { id } => {
791                    if let Some(shader) = shaders.get(*id) {
792                        let mut shader = shader.clone();
793                        shader.shader_defs.extend(cache.global_shader_defs.clone());
794
795                        cache.set_shader(*id, shader);
796                    }
797                }
798                AssetEvent::Removed { id } => cache.remove_shader(*id),
799                AssetEvent::Unused { .. } => {}
800                AssetEvent::LoadedWithDependencies { .. } => {
801                    // TODO: handle this
802                }
803            }
804        }
805    }
806}
807
808fn pipeline_error_context(cached_pipeline: &CachedPipeline) -> String {
809    fn format(
810        shader: &Handle<Shader>,
811        entry: &Option<Cow<'static, str>>,
812        shader_defs: &[ShaderDefVal],
813    ) -> String {
814        let source = match shader.path() {
815            Some(path) => path.path().to_string_lossy().to_string(),
816            None => String::new(),
817        };
818        let entry = match entry {
819            Some(entry) => entry.to_string(),
820            None => String::new(),
821        };
822        let shader_defs = shader_defs
823            .iter()
824            .flat_map(|def| match def {
825                ShaderDefVal::Bool(k, v) if *v => Some(k.to_string()),
826                ShaderDefVal::Int(k, v) => Some(format!("{k} = {v}")),
827                ShaderDefVal::UInt(k, v) => Some(format!("{k} = {v}")),
828                _ => None,
829            })
830            .collect::<Vec<_>>()
831            .join(", ");
832        format!("{source}:{entry}\nshader defs: {shader_defs}")
833    }
834    match &cached_pipeline.descriptor {
835        PipelineDescriptor::RenderPipelineDescriptor(desc) => {
836            let vert = &desc.vertex;
837            let vert_str = format(&vert.shader, &vert.entry_point, &vert.shader_defs);
838            let Some(frag) = desc.fragment.as_ref() else {
839                return vert_str;
840            };
841            let frag_str = format(&frag.shader, &frag.entry_point, &frag.shader_defs);
842            format!("vertex {vert_str}\nfragment {frag_str}")
843        }
844        PipelineDescriptor::ComputePipelineDescriptor(desc) => {
845            format(&desc.shader, &desc.entry_point, &desc.shader_defs)
846        }
847    }
848}
849
850#[cfg(all(
851    not(target_arch = "wasm32"),
852    not(target_os = "macos"),
853    feature = "multi_threaded"
854))]
855fn create_pipeline_task(
856    task: impl Future<Output = Result<Pipeline, PipelineCacheError>> + Send + 'static,
857    sync: bool,
858) -> CachedPipelineState {
859    if !sync {
860        return CachedPipelineState::Creating(bevy_tasks::AsyncComputeTaskPool::get().spawn(task));
861    }
862
863    match bevy_tasks::block_on(task) {
864        Ok(pipeline) => CachedPipelineState::Ok(pipeline),
865        Err(err) => CachedPipelineState::Err(err),
866    }
867}
868
869#[cfg(any(
870    target_arch = "wasm32",
871    target_os = "macos",
872    not(feature = "multi_threaded")
873))]
874fn create_pipeline_task(
875    task: impl Future<Output = Result<Pipeline, PipelineCacheError>> + Send + 'static,
876    _sync: bool,
877) -> CachedPipelineState {
878    match bevy_tasks::block_on(task) {
879        Ok(pipeline) => CachedPipelineState::Ok(pipeline),
880        Err(err) => CachedPipelineState::Err(err),
881    }
882}