1use crate::{
2 render_resource::*,
3 renderer::{RenderAdapter, RenderDevice, WgpuWrapper},
4 Extract,
5};
6use alloc::{borrow::Cow, sync::Arc};
7use bevy_asset::{AssetEvent, AssetId, Assets, Handle};
8use bevy_ecs::{
9 message::MessageReader,
10 resource::Resource,
11 system::{Res, ResMut},
12};
13use bevy_platform::collections::{HashMap, HashSet};
14use bevy_shader::{
15 CachedPipelineId, PipelineCacheError, Shader, ShaderCache, ShaderCacheSource, ShaderDefVal,
16 ValidateShader,
17};
18use bevy_tasks::Task;
19use bevy_utils::default;
20use core::{future::Future, hash::Hash, mem};
21use std::sync::{Mutex, PoisonError};
22use tracing::error;
23use wgpu::{PipelineCompilationOptions, VertexBufferLayout as RawVertexBufferLayout};
24
25#[derive(Debug)]
29pub enum PipelineDescriptor {
30 RenderPipelineDescriptor(Box<RenderPipelineDescriptor>),
31 ComputePipelineDescriptor(Box<ComputePipelineDescriptor>),
32}
33
34#[derive(Debug)]
38pub enum Pipeline {
39 RenderPipeline(RenderPipeline),
40 ComputePipeline(ComputePipeline),
41}
42
43#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]
45pub struct CachedRenderPipelineId(CachedPipelineId);
46
47impl CachedRenderPipelineId {
48 pub const INVALID: Self = CachedRenderPipelineId(usize::MAX);
50
51 #[inline]
52 pub fn id(&self) -> usize {
53 self.0
54 }
55}
56
57#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
59pub struct CachedComputePipelineId(CachedPipelineId);
60
61impl CachedComputePipelineId {
62 pub const INVALID: Self = CachedComputePipelineId(usize::MAX);
64
65 #[inline]
66 pub fn id(&self) -> usize {
67 self.0
68 }
69}
70
71pub struct CachedPipeline {
72 pub descriptor: PipelineDescriptor,
73 pub state: CachedPipelineState,
74}
75
76#[cfg_attr(
78 not(target_arch = "wasm32"),
79 expect(
80 clippy::large_enum_variant,
81 reason = "See https://github.com/bevyengine/bevy/issues/19220"
82 )
83)]
84#[derive(Debug)]
85pub enum CachedPipelineState {
86 Queued,
88 Creating(Task<Result<Pipeline, PipelineCacheError>>),
90 Ok(Pipeline),
92 Err(PipelineCacheError),
94}
95
96impl CachedPipelineState {
97 pub fn unwrap(&self) -> &Pipeline {
108 match self {
109 CachedPipelineState::Ok(pipeline) => pipeline,
110 CachedPipelineState::Queued => {
111 panic!("Pipeline has not been compiled yet. It is still in the 'Queued' state.")
112 }
113 CachedPipelineState::Creating(..) => {
114 panic!("Pipeline has not been compiled yet. It is still in the 'Creating' state.")
115 }
116 CachedPipelineState::Err(err) => panic!("{}", err),
117 }
118 }
119}
120
121type LayoutCacheKey = (Vec<BindGroupLayoutId>, Vec<PushConstantRange>);
122#[derive(Default)]
123struct LayoutCache {
124 layouts: HashMap<LayoutCacheKey, Arc<WgpuWrapper<PipelineLayout>>>,
125}
126
127impl LayoutCache {
128 fn get(
129 &mut self,
130 render_device: &RenderDevice,
131 bind_group_layouts: &[BindGroupLayout],
132 push_constant_ranges: Vec<PushConstantRange>,
133 ) -> Arc<WgpuWrapper<PipelineLayout>> {
134 let bind_group_ids = bind_group_layouts.iter().map(BindGroupLayout::id).collect();
135 self.layouts
136 .entry((bind_group_ids, push_constant_ranges))
137 .or_insert_with_key(|(_, push_constant_ranges)| {
138 let bind_group_layouts = bind_group_layouts
139 .iter()
140 .map(BindGroupLayout::value)
141 .collect::<Vec<_>>();
142 Arc::new(WgpuWrapper::new(render_device.create_pipeline_layout(
143 &PipelineLayoutDescriptor {
144 bind_group_layouts: &bind_group_layouts,
145 push_constant_ranges,
146 ..default()
147 },
148 )))
149 })
150 .clone()
151 }
152}
153
154#[expect(
155 clippy::result_large_err,
156 reason = "See https://github.com/bevyengine/bevy/issues/19220"
157)]
158fn load_module(
159 render_device: &RenderDevice,
160 shader_source: ShaderCacheSource,
161 validate_shader: &ValidateShader,
162) -> Result<WgpuWrapper<ShaderModule>, PipelineCacheError> {
163 let shader_source = match shader_source {
164 #[cfg(feature = "shader_format_spirv")]
165 ShaderCacheSource::SpirV(data) => wgpu::util::make_spirv(data),
166 #[cfg(not(feature = "shader_format_spirv"))]
167 ShaderCacheSource::SpirV(_) => {
168 unimplemented!("Enable feature \"shader_format_spirv\" to use SPIR-V shaders")
169 }
170 ShaderCacheSource::Wgsl(src) => ShaderSource::Wgsl(Cow::Owned(src)),
171 #[cfg(not(feature = "decoupled_naga"))]
172 ShaderCacheSource::Naga(src) => ShaderSource::Naga(Cow::Owned(src)),
173 };
174 let module_descriptor = ShaderModuleDescriptor {
175 label: None,
176 source: shader_source,
177 };
178
179 render_device
180 .wgpu_device()
181 .push_error_scope(wgpu::ErrorFilter::Validation);
182
183 let shader_module = WgpuWrapper::new(match validate_shader {
184 ValidateShader::Enabled => {
185 render_device.create_and_validate_shader_module(module_descriptor)
186 }
187 ValidateShader::Disabled => unsafe {
191 render_device.create_shader_module(module_descriptor)
192 },
193 });
194
195 let error = render_device.wgpu_device().pop_error_scope();
196
197 if let Some(Some(wgpu::Error::Validation { description, .. })) =
202 bevy_tasks::futures::now_or_never(error)
203 {
204 return Err(PipelineCacheError::CreateShaderModule(description));
205 }
206
207 Ok(shader_module)
208}
209
210#[derive(Default)]
211struct BindGroupLayoutCache {
212 bgls: HashMap<BindGroupLayoutDescriptor, BindGroupLayout>,
213}
214
215impl BindGroupLayoutCache {
216 fn get(
217 &mut self,
218 render_device: &RenderDevice,
219 descriptor: BindGroupLayoutDescriptor,
220 ) -> BindGroupLayout {
221 self.bgls
222 .entry(descriptor)
223 .or_insert_with_key(|descriptor| {
224 render_device
225 .create_bind_group_layout(descriptor.label.as_ref(), &descriptor.entries)
226 })
227 .clone()
228 }
229}
230
231#[derive(Resource)]
244pub struct PipelineCache {
245 layout_cache: Arc<Mutex<LayoutCache>>,
246 bindgroup_layout_cache: Arc<Mutex<BindGroupLayoutCache>>,
247 shader_cache: Arc<Mutex<ShaderCache<WgpuWrapper<ShaderModule>, RenderDevice>>>,
248 device: RenderDevice,
249 pipelines: Vec<CachedPipeline>,
250 waiting_pipelines: HashSet<CachedPipelineId>,
251 new_pipelines: Mutex<Vec<CachedPipeline>>,
252 global_shader_defs: Vec<ShaderDefVal>,
253 synchronous_pipeline_compilation: bool,
256}
257
258impl PipelineCache {
259 pub fn pipelines(&self) -> impl Iterator<Item = &CachedPipeline> {
261 self.pipelines.iter()
262 }
263
264 pub fn waiting_pipelines(&self) -> impl Iterator<Item = CachedPipelineId> + '_ {
266 self.waiting_pipelines.iter().copied()
267 }
268
269 pub fn new(
271 device: RenderDevice,
272 render_adapter: RenderAdapter,
273 synchronous_pipeline_compilation: bool,
274 ) -> Self {
275 let mut global_shader_defs = Vec::new();
276 #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
277 {
278 global_shader_defs.push("NO_ARRAY_TEXTURES_SUPPORT".into());
279 global_shader_defs.push("NO_CUBE_ARRAY_TEXTURES_SUPPORT".into());
280 global_shader_defs.push("SIXTEEN_BYTE_ALIGNMENT".into());
281 }
282
283 if cfg!(target_abi = "sim") {
284 global_shader_defs.push("NO_CUBE_ARRAY_TEXTURES_SUPPORT".into());
285 }
286
287 global_shader_defs.push(ShaderDefVal::UInt(
288 String::from("AVAILABLE_STORAGE_BUFFER_BINDINGS"),
289 device.limits().max_storage_buffers_per_shader_stage,
290 ));
291
292 Self {
293 shader_cache: Arc::new(Mutex::new(ShaderCache::new(
294 device.features(),
295 render_adapter.get_downlevel_capabilities().flags,
296 load_module,
297 ))),
298 device,
299 layout_cache: default(),
300 bindgroup_layout_cache: default(),
301 waiting_pipelines: default(),
302 new_pipelines: default(),
303 pipelines: default(),
304 global_shader_defs,
305 synchronous_pipeline_compilation,
306 }
307 }
308
309 #[inline]
313 pub fn get_render_pipeline_state(&self, id: CachedRenderPipelineId) -> &CachedPipelineState {
314 self.pipelines
316 .get(id.0)
317 .map_or(&CachedPipelineState::Queued, |pipeline| &pipeline.state)
318 }
319
320 #[inline]
324 pub fn get_compute_pipeline_state(&self, id: CachedComputePipelineId) -> &CachedPipelineState {
325 self.pipelines
327 .get(id.0)
328 .map_or(&CachedPipelineState::Queued, |pipeline| &pipeline.state)
329 }
330
331 #[inline]
338 pub fn get_render_pipeline_descriptor(
339 &self,
340 id: CachedRenderPipelineId,
341 ) -> &RenderPipelineDescriptor {
342 match &self.pipelines[id.0].descriptor {
343 PipelineDescriptor::RenderPipelineDescriptor(descriptor) => descriptor,
344 PipelineDescriptor::ComputePipelineDescriptor(_) => unreachable!(),
345 }
346 }
347
348 #[inline]
355 pub fn get_compute_pipeline_descriptor(
356 &self,
357 id: CachedComputePipelineId,
358 ) -> &ComputePipelineDescriptor {
359 match &self.pipelines[id.0].descriptor {
360 PipelineDescriptor::RenderPipelineDescriptor(_) => unreachable!(),
361 PipelineDescriptor::ComputePipelineDescriptor(descriptor) => descriptor,
362 }
363 }
364
365 #[inline]
373 pub fn get_render_pipeline(&self, id: CachedRenderPipelineId) -> Option<&RenderPipeline> {
374 if let CachedPipelineState::Ok(Pipeline::RenderPipeline(pipeline)) =
375 &self.pipelines.get(id.0)?.state
376 {
377 Some(pipeline)
378 } else {
379 None
380 }
381 }
382
383 #[inline]
385 pub fn block_on_render_pipeline(&mut self, id: CachedRenderPipelineId) {
386 if self.pipelines.len() <= id.0 {
387 self.process_queue();
388 }
389
390 let state = &mut self.pipelines[id.0].state;
391 if let CachedPipelineState::Creating(task) = state {
392 *state = match bevy_tasks::block_on(task) {
393 Ok(p) => CachedPipelineState::Ok(p),
394 Err(e) => CachedPipelineState::Err(e),
395 };
396 }
397 }
398
399 #[inline]
407 pub fn get_compute_pipeline(&self, id: CachedComputePipelineId) -> Option<&ComputePipeline> {
408 if let CachedPipelineState::Ok(Pipeline::ComputePipeline(pipeline)) =
409 &self.pipelines.get(id.0)?.state
410 {
411 Some(pipeline)
412 } else {
413 None
414 }
415 }
416
417 pub fn queue_render_pipeline(
431 &self,
432 descriptor: RenderPipelineDescriptor,
433 ) -> CachedRenderPipelineId {
434 let mut new_pipelines = self
435 .new_pipelines
436 .lock()
437 .unwrap_or_else(PoisonError::into_inner);
438 let id = CachedRenderPipelineId(self.pipelines.len() + new_pipelines.len());
439 new_pipelines.push(CachedPipeline {
440 descriptor: PipelineDescriptor::RenderPipelineDescriptor(Box::new(descriptor)),
441 state: CachedPipelineState::Queued,
442 });
443 id
444 }
445
446 pub fn queue_compute_pipeline(
460 &self,
461 descriptor: ComputePipelineDescriptor,
462 ) -> CachedComputePipelineId {
463 let mut new_pipelines = self
464 .new_pipelines
465 .lock()
466 .unwrap_or_else(PoisonError::into_inner);
467 let id = CachedComputePipelineId(self.pipelines.len() + new_pipelines.len());
468 new_pipelines.push(CachedPipeline {
469 descriptor: PipelineDescriptor::ComputePipelineDescriptor(Box::new(descriptor)),
470 state: CachedPipelineState::Queued,
471 });
472 id
473 }
474
475 pub fn get_bind_group_layout(
476 &self,
477 bind_group_layout_descriptor: &BindGroupLayoutDescriptor,
478 ) -> BindGroupLayout {
479 self.bindgroup_layout_cache
480 .lock()
481 .unwrap()
482 .get(&self.device, bind_group_layout_descriptor.clone())
483 }
484
485 pub fn set_shader(&mut self, id: AssetId<Shader>, shader: Shader) {
487 let mut shader_cache = self.shader_cache.lock().unwrap();
488 let pipelines_to_queue = shader_cache.set_shader(id, shader);
489 for cached_pipeline in pipelines_to_queue {
490 self.pipelines[cached_pipeline].state = CachedPipelineState::Queued;
491 self.waiting_pipelines.insert(cached_pipeline);
492 }
493 }
494
495 pub fn remove_shader(&mut self, shader: AssetId<Shader>) {
497 let mut shader_cache = self.shader_cache.lock().unwrap();
498 let pipelines_to_queue = shader_cache.remove(shader);
499 for cached_pipeline in pipelines_to_queue {
500 self.pipelines[cached_pipeline].state = CachedPipelineState::Queued;
501 self.waiting_pipelines.insert(cached_pipeline);
502 }
503 }
504
505 fn start_create_render_pipeline(
506 &mut self,
507 id: CachedPipelineId,
508 descriptor: RenderPipelineDescriptor,
509 ) -> CachedPipelineState {
510 let device = self.device.clone();
511 let shader_cache = self.shader_cache.clone();
512 let layout_cache = self.layout_cache.clone();
513 let mut bindgroup_layout_cache = self.bindgroup_layout_cache.lock().unwrap();
514 let bind_group_layout = descriptor
515 .layout
516 .iter()
517 .map(|bind_group_layout_descriptor| {
518 bindgroup_layout_cache.get(&self.device, bind_group_layout_descriptor.clone())
519 })
520 .collect::<Vec<_>>();
521
522 create_pipeline_task(
523 async move {
524 let mut shader_cache = shader_cache.lock().unwrap();
525 let mut layout_cache = layout_cache.lock().unwrap();
526
527 let vertex_module = match shader_cache.get(
528 &device,
529 id,
530 descriptor.vertex.shader.id(),
531 &descriptor.vertex.shader_defs,
532 ) {
533 Ok(module) => module,
534 Err(err) => return Err(err),
535 };
536
537 let fragment_module = match &descriptor.fragment {
538 Some(fragment) => {
539 match shader_cache.get(
540 &device,
541 id,
542 fragment.shader.id(),
543 &fragment.shader_defs,
544 ) {
545 Ok(module) => Some(module),
546 Err(err) => return Err(err),
547 }
548 }
549 None => None,
550 };
551
552 let layout =
553 if descriptor.layout.is_empty() && descriptor.push_constant_ranges.is_empty() {
554 None
555 } else {
556 Some(layout_cache.get(
557 &device,
558 &bind_group_layout,
559 descriptor.push_constant_ranges.to_vec(),
560 ))
561 };
562
563 drop((shader_cache, layout_cache));
564
565 let vertex_buffer_layouts = descriptor
566 .vertex
567 .buffers
568 .iter()
569 .map(|layout| RawVertexBufferLayout {
570 array_stride: layout.array_stride,
571 attributes: &layout.attributes,
572 step_mode: layout.step_mode,
573 })
574 .collect::<Vec<_>>();
575
576 let fragment_data = descriptor.fragment.as_ref().map(|fragment| {
577 (
578 fragment_module.unwrap(),
579 fragment.entry_point.as_deref(),
580 fragment.targets.as_slice(),
581 )
582 });
583
584 let compilation_options = PipelineCompilationOptions {
586 constants: &[],
587 zero_initialize_workgroup_memory: descriptor.zero_initialize_workgroup_memory,
588 };
589
590 let descriptor = RawRenderPipelineDescriptor {
591 multiview: None,
592 depth_stencil: descriptor.depth_stencil.clone(),
593 label: descriptor.label.as_deref(),
594 layout: layout.as_ref().map(|layout| -> &PipelineLayout { layout }),
595 multisample: descriptor.multisample,
596 primitive: descriptor.primitive,
597 vertex: RawVertexState {
598 buffers: &vertex_buffer_layouts,
599 entry_point: descriptor.vertex.entry_point.as_deref(),
600 module: &vertex_module,
601 compilation_options: compilation_options.clone(),
603 },
604 fragment: fragment_data
605 .as_ref()
606 .map(|(module, entry_point, targets)| RawFragmentState {
607 entry_point: entry_point.as_deref(),
608 module,
609 targets,
610 compilation_options,
612 }),
613 cache: None,
614 };
615
616 Ok(Pipeline::RenderPipeline(
617 device.create_render_pipeline(&descriptor),
618 ))
619 },
620 self.synchronous_pipeline_compilation,
621 )
622 }
623
624 fn start_create_compute_pipeline(
625 &mut self,
626 id: CachedPipelineId,
627 descriptor: ComputePipelineDescriptor,
628 ) -> CachedPipelineState {
629 let device = self.device.clone();
630 let shader_cache = self.shader_cache.clone();
631 let layout_cache = self.layout_cache.clone();
632 let mut bindgroup_layout_cache = self.bindgroup_layout_cache.lock().unwrap();
633 let bind_group_layout = descriptor
634 .layout
635 .iter()
636 .map(|bind_group_layout_descriptor| {
637 bindgroup_layout_cache.get(&self.device, bind_group_layout_descriptor.clone())
638 })
639 .collect::<Vec<_>>();
640
641 create_pipeline_task(
642 async move {
643 let mut shader_cache = shader_cache.lock().unwrap();
644 let mut layout_cache = layout_cache.lock().unwrap();
645
646 let compute_module = match shader_cache.get(
647 &device,
648 id,
649 descriptor.shader.id(),
650 &descriptor.shader_defs,
651 ) {
652 Ok(module) => module,
653 Err(err) => return Err(err),
654 };
655
656 let layout =
657 if descriptor.layout.is_empty() && descriptor.push_constant_ranges.is_empty() {
658 None
659 } else {
660 Some(layout_cache.get(
661 &device,
662 &bind_group_layout,
663 descriptor.push_constant_ranges.to_vec(),
664 ))
665 };
666
667 drop((shader_cache, layout_cache));
668
669 let descriptor = RawComputePipelineDescriptor {
670 label: descriptor.label.as_deref(),
671 layout: layout.as_ref().map(|layout| -> &PipelineLayout { layout }),
672 module: &compute_module,
673 entry_point: descriptor.entry_point.as_deref(),
674 compilation_options: PipelineCompilationOptions {
676 constants: &[],
677 zero_initialize_workgroup_memory: descriptor
678 .zero_initialize_workgroup_memory,
679 },
680 cache: None,
681 };
682
683 Ok(Pipeline::ComputePipeline(
684 device.create_compute_pipeline(&descriptor),
685 ))
686 },
687 self.synchronous_pipeline_compilation,
688 )
689 }
690
691 pub fn process_queue(&mut self) {
698 let mut waiting_pipelines = mem::take(&mut self.waiting_pipelines);
699 let mut pipelines = mem::take(&mut self.pipelines);
700
701 {
702 let mut new_pipelines = self
703 .new_pipelines
704 .lock()
705 .unwrap_or_else(PoisonError::into_inner);
706 for new_pipeline in new_pipelines.drain(..) {
707 let id = pipelines.len();
708 pipelines.push(new_pipeline);
709 waiting_pipelines.insert(id);
710 }
711 }
712
713 for id in waiting_pipelines {
714 self.process_pipeline(&mut pipelines[id], id);
715 }
716
717 self.pipelines = pipelines;
718 }
719
720 fn process_pipeline(&mut self, cached_pipeline: &mut CachedPipeline, id: usize) {
721 match &mut cached_pipeline.state {
722 CachedPipelineState::Queued => {
723 cached_pipeline.state = match &cached_pipeline.descriptor {
724 PipelineDescriptor::RenderPipelineDescriptor(descriptor) => {
725 self.start_create_render_pipeline(id, *descriptor.clone())
726 }
727 PipelineDescriptor::ComputePipelineDescriptor(descriptor) => {
728 self.start_create_compute_pipeline(id, *descriptor.clone())
729 }
730 };
731 }
732
733 CachedPipelineState::Creating(task) => match bevy_tasks::futures::check_ready(task) {
734 Some(Ok(pipeline)) => {
735 cached_pipeline.state = CachedPipelineState::Ok(pipeline);
736 return;
737 }
738 Some(Err(err)) => cached_pipeline.state = CachedPipelineState::Err(err),
739 _ => (),
740 },
741
742 CachedPipelineState::Err(err) => match err {
743 PipelineCacheError::ShaderNotLoaded(_)
745 | PipelineCacheError::ShaderImportNotYetAvailable => {
746 cached_pipeline.state = CachedPipelineState::Queued;
747 }
748
749 PipelineCacheError::ProcessShaderError(err) => {
751 let error_detail =
752 err.emit_to_string(&self.shader_cache.lock().unwrap().composer);
753 if std::env::var("VERBOSE_SHADER_ERROR")
754 .is_ok_and(|v| !(v.is_empty() || v == "0" || v == "false"))
755 {
756 error!("{}", pipeline_error_context(cached_pipeline));
757 }
758 error!("failed to process shader error:\n{}", error_detail);
759 return;
760 }
761 PipelineCacheError::CreateShaderModule(description) => {
762 error!("failed to create shader module: {}", description);
763 return;
764 }
765 },
766
767 CachedPipelineState::Ok(_) => return,
768 }
769
770 self.waiting_pipelines.insert(id);
772 }
773
774 pub(crate) fn process_pipeline_queue_system(mut cache: ResMut<Self>) {
775 cache.process_queue();
776 }
777
778 pub(crate) fn extract_shaders(
779 mut cache: ResMut<Self>,
780 shaders: Extract<Res<Assets<Shader>>>,
781 mut events: Extract<MessageReader<AssetEvent<Shader>>>,
782 ) {
783 for event in events.read() {
784 #[expect(
785 clippy::match_same_arms,
786 reason = "LoadedWithDependencies is marked as a TODO, so it's likely this will no longer lint soon."
787 )]
788 match event {
789 AssetEvent::Added { id } | AssetEvent::Modified { id } => {
791 if let Some(shader) = shaders.get(*id) {
792 let mut shader = shader.clone();
793 shader.shader_defs.extend(cache.global_shader_defs.clone());
794
795 cache.set_shader(*id, shader);
796 }
797 }
798 AssetEvent::Removed { id } => cache.remove_shader(*id),
799 AssetEvent::Unused { .. } => {}
800 AssetEvent::LoadedWithDependencies { .. } => {
801 }
803 }
804 }
805 }
806}
807
808fn pipeline_error_context(cached_pipeline: &CachedPipeline) -> String {
809 fn format(
810 shader: &Handle<Shader>,
811 entry: &Option<Cow<'static, str>>,
812 shader_defs: &[ShaderDefVal],
813 ) -> String {
814 let source = match shader.path() {
815 Some(path) => path.path().to_string_lossy().to_string(),
816 None => String::new(),
817 };
818 let entry = match entry {
819 Some(entry) => entry.to_string(),
820 None => String::new(),
821 };
822 let shader_defs = shader_defs
823 .iter()
824 .flat_map(|def| match def {
825 ShaderDefVal::Bool(k, v) if *v => Some(k.to_string()),
826 ShaderDefVal::Int(k, v) => Some(format!("{k} = {v}")),
827 ShaderDefVal::UInt(k, v) => Some(format!("{k} = {v}")),
828 _ => None,
829 })
830 .collect::<Vec<_>>()
831 .join(", ");
832 format!("{source}:{entry}\nshader defs: {shader_defs}")
833 }
834 match &cached_pipeline.descriptor {
835 PipelineDescriptor::RenderPipelineDescriptor(desc) => {
836 let vert = &desc.vertex;
837 let vert_str = format(&vert.shader, &vert.entry_point, &vert.shader_defs);
838 let Some(frag) = desc.fragment.as_ref() else {
839 return vert_str;
840 };
841 let frag_str = format(&frag.shader, &frag.entry_point, &frag.shader_defs);
842 format!("vertex {vert_str}\nfragment {frag_str}")
843 }
844 PipelineDescriptor::ComputePipelineDescriptor(desc) => {
845 format(&desc.shader, &desc.entry_point, &desc.shader_defs)
846 }
847 }
848}
849
850#[cfg(all(
851 not(target_arch = "wasm32"),
852 not(target_os = "macos"),
853 feature = "multi_threaded"
854))]
855fn create_pipeline_task(
856 task: impl Future<Output = Result<Pipeline, PipelineCacheError>> + Send + 'static,
857 sync: bool,
858) -> CachedPipelineState {
859 if !sync {
860 return CachedPipelineState::Creating(bevy_tasks::AsyncComputeTaskPool::get().spawn(task));
861 }
862
863 match bevy_tasks::block_on(task) {
864 Ok(pipeline) => CachedPipelineState::Ok(pipeline),
865 Err(err) => CachedPipelineState::Err(err),
866 }
867}
868
869#[cfg(any(
870 target_arch = "wasm32",
871 target_os = "macos",
872 not(feature = "multi_threaded")
873))]
874fn create_pipeline_task(
875 task: impl Future<Output = Result<Pipeline, PipelineCacheError>> + Send + 'static,
876 _sync: bool,
877) -> CachedPipelineState {
878 match bevy_tasks::block_on(task) {
879 Ok(pipeline) => CachedPipelineState::Ok(pipeline),
880 Err(err) => CachedPipelineState::Err(err),
881 }
882}