Divide Framework 0.1
A free and open-source 3D Framework under heavy development
Loading...
Searching...
No Matches
ShaderProgram.cpp
Go to the documentation of this file.
1
2
5
7
12
16
19
23
24extern "C"
25{
26 #include <cppdef.h>
27 #include <fpp.h>
28}
29
30#include <vulkan/vulkan.hpp>
31
32namespace Divide
33{
34
35 namespace TypeUtil
36 {
37 const char* DescriptorSetUsageToString( const DescriptorSetUsage setUsage ) noexcept
38 {
39 return Names::descriptorSetUsage[to_base( setUsage )];
40 }
41
43 {
44 for ( U8 i = 0; i < to_U8( DescriptorSetUsage::COUNT ); ++i )
45 {
46 if ( strcmp( name.c_str(), Names::descriptorSetUsage[i] ) == 0 )
47 {
48 return static_cast<DescriptorSetUsage>(i);
49 }
50 }
51
53 }
54 };
55
57
62
67
71 eastl::fixed_vector<ShaderProgram*, U16_MAX, false> ShaderProgram::s_usedShaderPrograms;
74
76 std::atomic_int ShaderProgram::s_shaderCount;
77
79
81 []( const std::string_view atomName, const FileUpdateEvent evt )
82 {
83 ShaderProgram::OnAtomChange( atomName, evt );
84 }
85 );
86
87 namespace Preprocessor
88 {
89 struct WorkData
90 {
91 string _input;
92 std::string_view _fileName;
93
94 std::array<char, 16 << 10> _scratch{};
95 string _depends = "";
96 string _default = "";
97 string _output = "";
98
101 bool _firstError = true;
102 };
103
104 constexpr U8 g_maxTagCount = 64;
105
106 NO_DESTROY thread_local static WorkData g_workData;
107 NO_DESTROY thread_local static fppTag g_tags[g_maxTagCount]{};
108 NO_DESTROY thread_local static fppTag* g_tagHead = g_tags;
109
110 namespace Callback
111 {
112 FORCE_INLINE void AddDependency( const char* file, void* userData )
113 {
114 static_cast<WorkData*>(userData)->_depends.append(Util::StringFormat("\n{}", file));
115 }
116
117 static char* Input( char* buffer, const int size, void* userData ) noexcept
118 {
119 WorkData* work = static_cast<WorkData*>(userData);
120 int i = 0;
121 for ( char ch = work->_input[work->_fGetsPos];
122 work->_fGetsPos < work->_input.size() && i < size - 1; ch = work->_input[++work->_fGetsPos] )
123 {
124 buffer[i++] = ch;
125
126 if ( ch == '\n' || i == size )
127 {
128 buffer[i] = '\0';
129 work->_fGetsPos++;
130 return buffer;
131 }
132 }
133
134 return nullptr;
135 }
136
137 FORCE_INLINE void Output( const int ch, void* userData )
138 {
139 static_cast<WorkData*>(userData)->_output += static_cast<char>(ch);
140 }
141
142 static char* Scratch( const std::string_view fileName )
143 {
144 char* result = &g_workData._scratch[g_workData._scratchPos];
145 strncpy( result, fileName.data(),fileName.size() );
146 g_workData._scratchPos += to_U32( fileName.size() ) + 1;
147
148 return result;
149 }
150
151 static void Error( void* userData, const char* format, va_list args )
152 {
153 WorkData* work = static_cast<WorkData*>(userData);
154
155 string message;
156 const size_t length = to_size(vsnprintf( nullptr, 0, format, args ) + 1);
157 message.resize( length );
158 vsnprintf( message.data(), length, format, args );
159
160 if ( work->_firstError )
161 {
162 work->_firstError = false;
163 Console::errorfn( "------------------------------------------" );
164 Console::errorfn( LOCALE_STR( "ERROR_GLSL_PARSE_ERROR_NAME_SHORT" ), work->_fileName );
165 }
166
167 if ( !message.empty() )
168 {
169 Console::errorfn( LOCALE_STR( "ERROR_GLSL_PARSE_ERROR_MSG" ), message );
170 }
171 else
172 {
173 Console::errorfn( "------------------------------------------\n" );
174 }
175 }
176 }
177
178 static void OnThreadCreated()
179 {
180 const auto setTag = []( const int tag, void* value )
181 {
182 g_tagHead->tag = tag;
183 g_tagHead->data = value;
184 ++g_tagHead;
185 };
186
187 const auto setFlag = []( const int tag, bool flag )
188 {
189 static bool data = true;
190
191 g_tagHead->tag = tag;
192 g_tagHead->data = (flag ? &data : nullptr);
193 ++g_tagHead;
194 };
195
196 setFlag( FPPTAG_KEEPCOMMENTS, true );
197 setFlag( FPPTAG_IGNORE_NONFATAL, false );
198 setFlag( FPPTAG_IGNORE_CPLUSPLUS, false );
199 setFlag( FPPTAG_LINE, false );
200 setFlag( FPPTAG_WARNILLEGALCPP, false );
201 setFlag( FPPTAG_OUTPUTLINE, false );
202 setFlag( FPPTAG_IGNOREVERSION, false );
203 setFlag( FPPTAG_OUTPUTINCLUDES, false );
204 setFlag( FPPTAG_OUTPUTBALANCE, true );
205 setFlag( FPPTAG_OUTPUTSPACE, true );
206 setFlag( FPPTAG_NESTED_COMMENTS, true );
207 setFlag( FPPTAG_WARN_NESTED_COMMENTS, false );
208 setFlag( FPPTAG_WARNMISSINCLUDE, false );
209 setFlag( FPPTAG_RIGHTCONCAT, true );
210 setFlag( FPPTAG_DISPLAYFUNCTIONS, false );
211 setFlag( FPPTAG_WEBMODE, false );
212
213 setTag( FPPTAG_DEPENDS, (void*)Callback::AddDependency );
214 setTag( FPPTAG_INPUT, (void*)Callback::Input );
215 setTag( FPPTAG_OUTPUT, (void*)Callback::Output );
216 setTag( FPPTAG_ERROR, (void*)Callback::Error );
217
218 setTag( FPPTAG_USERDATA, &g_workData );
219
220 }
221
222 static bool PreProcessMacros( const std::string_view fileName, string& sourceInOut )
223 {
224 if ( sourceInOut.empty() )
225 {
226 return false;
227 }
228
229 g_workData = {};
230 g_workData._input = sourceInOut;
231 g_workData._fileName = fileName;
232 std::erase( g_workData._input, '\r' );
233
234 fppTag* tagptr = g_tagHead;
235
236 tagptr->tag = FPPTAG_INPUT_NAME;
237 tagptr->data = (void*)Callback::Scratch( fileName );
238 ++tagptr;
239
240 tagptr->tag = FPPTAG_END;
241 tagptr->data = nullptr;
242 ++tagptr;
243
244 bool ret = false;
245 if ( fppPreProcess( g_tags ) == 0 )
246 {
247 ret = true;
248 sourceInOut = g_workData._output;
249 }
250
251 return ret;
252 }
253
254 } //Preprocessor
255
256 namespace
257 {
259 bool s_useShaderCache = true;
260 bool s_targetVulkan = false;
261
266
268 {
269 s_textureSlot = 0u;
270 s_imageSlot = 0u;
271 s_bufferSlot = 0u;
272 }
273
275 {
276 return (s_targetVulkan ? Paths::Shaders::g_cacheLocationVK : Paths::Shaders::g_cacheLocationGL);
277 }
278
280 {
281 return Paths::Shaders::g_cacheLocation / Paths::g_buildTypeLocation;
282 }
283
285 {
287 }
288
290 {
291 return ShaderParentCacheLocation() / Paths::Shaders::g_cacheLocationSpv;
292 }
293
295 {
296 return ShaderParentCacheLocation() / Paths::Shaders::g_cacheLocationRefl;
297 }
298
300 {
301 return ShaderParentCacheLocation() / Paths::Shaders::g_cacheLocationText;
302 }
303
304 [[nodiscard]] ResourcePath SpvTargetName( const Str<256>& fileName )
305 {
306 return ResourcePath{ fileName + "." + Paths::Shaders::g_SPIRVExt.c_str() };
307 }
308
309 [[nodiscard]] ResourcePath ReflTargetName( const Str<256>& fileName )
310 {
311 return ResourcePath { fileName + "." + Paths::Shaders::g_ReflectionExt.c_str() };
312 }
313
314 [[nodiscard]] bool ValidateCacheLocked( const ShaderProgram::LoadData::ShaderCacheType type, const Str<256>& sourceFileName, const Str<256>& fileName )
315 {
316 if ( !s_useShaderCache )
317 {
318 return false;
319 }
320
321 //"There are only two hard things in Computer Science: cache invalidation and naming things" - Phil Karlton
322 //"There are two hard things in computer science: cache invalidation, naming things, and off-by-one errors." - Leon Bambrick
323
324 // Get our source file's "last written" timestamp. Every cache file that's older than this is automatically out-of-date
325 U64 lastWriteTime = 0u, lastWriteTimeCache = 0u;
326 const ResourcePath sourceShaderFullPath = Paths::Shaders::GLSL::g_GLSLShaderLoc / sourceFileName;
327 if ( fileLastWriteTime( sourceShaderFullPath, lastWriteTime ) != FileError::NONE )
328 {
329 return false;
330 }
331
332 ResourcePath filePath;
333 switch ( type )
334 {
336 case ShaderProgram::LoadData::ShaderCacheType::GLSL: filePath = TxtCacheLocation() / fileName; break;
339 }
340
341 if ( fileLastWriteTime( filePath, lastWriteTimeCache ) != FileError::NONE ||
342 lastWriteTimeCache < lastWriteTime ||
343 lastWriteTimeCache < s_newestShaderAtomWriteTime )
344 {
345 return false;
346 }
347
348 return true;
349 }
350
351 [[nodiscard]] bool DeleteCacheLocked( const ShaderProgram::LoadData::ShaderCacheType type, const Str<256>& fileName )
352 {
353 FileError err = FileError::NONE;
354 switch ( type )
355 {
357 case ShaderProgram::LoadData::ShaderCacheType::GLSL: err = deleteFile( TxtCacheLocation(), fileName.c_str() ); break;
359 case ShaderProgram::LoadData::ShaderCacheType::COUNT: err = FileError::FILE_EMPTY; return false;
360 }
361
362 return err == FileError::NONE || err == FileError::FILE_NOT_FOUND;
363 }
364
365 [[nodiscard]] bool DeleteCache( const ShaderProgram::LoadData::ShaderCacheType type, const Str<256>& fileName )
366 {
369 {
370 bool ret = false;
374 return ret;
375
376 }
377
378 return DeleteCacheLocked( type, fileName );
379 }
380 };
381
382 static bool InitGLSW( const RenderAPI renderingAPI, const Configuration& config)
383 {
384 const auto AppendToShaderHeader = []( const ShaderType type, const string& entry )
385 {
386 glswAddDirectiveToken( type != ShaderType::COUNT ? Names::shaderTypes[to_U8( type )] : "", entry.c_str() );
387 };
388
389 const auto AppendResourceBindingSlots = [&AppendToShaderHeader]( const bool targetOpenGL )
390 {
391
392 if ( targetOpenGL )
393 {
394 const auto ApplyDescriptorSetDefines = [&AppendToShaderHeader]( const DescriptorSetUsage setUsage )
395 {
396 for ( U8 i = 0u; i < MAX_BINDINGS_PER_DESCRIPTOR_SET; ++i )
397 {
398 const U8 glSlot = ShaderProgram::GetGLBindingForDescriptorSlot( setUsage, i );
399 AppendToShaderHeader( ShaderType::COUNT, Util::StringFormat( "#define {}_{} {}",
401 i,
402 glSlot ).c_str() );
403 }
404 };
405 ApplyDescriptorSetDefines( DescriptorSetUsage::PER_DRAW );
406 ApplyDescriptorSetDefines( DescriptorSetUsage::PER_BATCH );
407 ApplyDescriptorSetDefines( DescriptorSetUsage::PER_PASS );
408 ApplyDescriptorSetDefines( DescriptorSetUsage::PER_FRAME );
409
410 AppendToShaderHeader( ShaderType::COUNT, "#define DESCRIPTOR_SET_RESOURCE(SET, BINDING) layout(binding = CONCATENATE(SET, BINDING))" );
411 AppendToShaderHeader( ShaderType::COUNT, "#define DESCRIPTOR_SET_RESOURCE_OFFSET(SET, BINDING, OFFSET) layout(binding = CONCATENATE(SET, BINDING), offset = OFFSET)" );
412 AppendToShaderHeader( ShaderType::COUNT, "#define DESCRIPTOR_SET_RESOURCE_LAYOUT(SET, BINDING, LAYOUT) layout(binding = CONCATENATE(SET, BINDING), LAYOUT)" );
413 AppendToShaderHeader( ShaderType::COUNT, "#define DESCRIPTOR_SET_RESOURCE_OFFSET_LAYOUT(SET, BINDING, OFFSET, LAYOUT) layout(binding = CONCATENATE(SET, BINDING), offset = OFFSET, LAYOUT)" );
414 }
415 else
416 {
417 for ( U8 i = 0u; i < to_base( DescriptorSetUsage::COUNT ); ++i )
418 {
419 AppendToShaderHeader( ShaderType::COUNT, Util::StringFormat( "#define {} {}", TypeUtil::DescriptorSetUsageToString( static_cast<DescriptorSetUsage>(i) ), i ).c_str() );
420 }
421 AppendToShaderHeader( ShaderType::COUNT, "#define DESCRIPTOR_SET_RESOURCE(SET, BINDING) layout(set = SET, binding = BINDING)" );
422 AppendToShaderHeader( ShaderType::COUNT, "#define DESCRIPTOR_SET_RESOURCE_OFFSET(SET, BINDING, OFFSET) layout(set = SET, binding = BINDING, offset = OFFSET)" );
423 AppendToShaderHeader( ShaderType::COUNT, "#define DESCRIPTOR_SET_RESOURCE_LAYOUT(SET, BINDING, LAYOUT) layout(set = SET, binding = BINDING, LAYOUT)" );
424 AppendToShaderHeader( ShaderType::COUNT, "#define DESCRIPTOR_SET_RESOURCE_OFFSET_LAYOUT(SET, BINDING, OFFSET, LAYOUT) layout(set = SET, binding = BINDING, offset = OFFSET, LAYOUT)" );
425 }
426 };
427
428 constexpr std::pair<const char*, const char*> shaderVaryings[] =
429 {
430 { "vec4" , "_vertexW"},
431 { "vec4" , "_vertexWV"},
432 { "vec3" , "_normalWV"},
433 { "vec2" , "_texCoord"},
434 { "flat uvec4" , "_indirectionIDs"},
435 { "flat uint" , "_LoDLevel"},
436 };
437
438 constexpr const char* crossTypeGLSLHLSL = "#define float2 vec2\n"
439 "#define float3 vec3\n"
440 "#define float4 vec4\n"
441 "#define int2 ivec2\n"
442 "#define int3 ivec3\n"
443 "#define int4 ivec4\n"
444 "#define float2x2 mat2\n"
445 "#define float3x3 mat3\n"
446 "#define float4x4 mat4\n"
447 "#define lerp mix";
448
449 const auto getPassData = [&]( const ShaderType type ) -> string
450 {
451 string baseString = " _out.{} = _in[index].{};";
452 if ( type == ShaderType::TESSELLATION_CTRL )
453 {
454 baseString = " _out[gl_InvocationID].{} = _in[index].{};";
455 }
456
457 string passData( "void PassData(in int index) {" );
458 passData.append( "\n" );
459 for ( const auto& [varType, name] : shaderVaryings )
460 {
461 passData.append( Util::StringFormat( baseString.c_str(), name, name ) );
462 passData.append( "\n" );
463 }
464
465 passData.append( "#if defined(HAS_VELOCITY)\n" );
466 passData.append( Util::StringFormat( baseString.c_str(), "_prevVertexWVP", "_prevVertexWVP" ) );
467 passData.append( "\n#endif //HAS_VELOCITY\n" );
468
469 passData.append( "#if defined(ENABLE_TBN)\n" );
470 passData.append( Util::StringFormat( baseString.c_str(), "_tbnWV", "_tbnWV" ) );
471 passData.append( "\n#endif //ENABLE_TBN\n" );
472
473 passData.append( "}\n" );
474
475 return passData;
476 };
477
478 const auto addVaryings = [&]( const ShaderType type )
479 {
480 for ( const auto& [varType, name] : shaderVaryings )
481 {
482 AppendToShaderHeader( type, Util::StringFormat( " {} {};", varType, name ) );
483 }
484 AppendToShaderHeader( type, "#if defined(HAS_VELOCITY)" );
485 AppendToShaderHeader( type, " vec4 _prevVertexWVP;" );
486 AppendToShaderHeader( type, "#endif //HAS_VELOCITY" );
487
488 AppendToShaderHeader( type, "#if defined(ENABLE_TBN)" );
489 AppendToShaderHeader( type, " mat3 _tbnWV;" );
490 AppendToShaderHeader( type, "#endif //ENABLE_TBN" );
491 };
492
493 // Initialize GLSW
494 I32 glswState = glswGetCurrentContext() ? 1 : -1;
495
496 if (glswState == -1)
497 {
498 glswState = glswInit();
499 DIVIDE_ASSERT( glswState == 1 );
500 }
501
502 const U16 reflectionProbeRes = to_U16( nextPOW2( CLAMPED( to_U32( config.rendering.reflectionProbeResolution ), 16u, 4096u ) - 1u ) );
503
504 static_assert(Config::MAX_BONE_COUNT_PER_NODE <= 1024, "ShaderProgram error: too many bones per vert. Can't fit inside UBO");
505
506 // Add our engine specific defines and various code pieces to every GLSL shader
507 // Add version as the first shader statement, followed by copyright notice
508 AppendToShaderHeader( ShaderType::COUNT, renderingAPI == RenderAPI::OpenGL ? "#version 460 core" : "#version 450" );
509 AppendToShaderHeader( ShaderType::COUNT, "//_PROGRAM_NAME_\\" );
510 AppendToShaderHeader( ShaderType::COUNT, "/*Copyright 2009-2022 DIVIDE-Studio*/" );
511
512 if ( renderingAPI == RenderAPI::OpenGL )
513 {
514 //AppendToShaderHeader(ShaderType::COUNT, "#extension GL_ARB_gpu_shader5 : require");
515 AppendToShaderHeader( ShaderType::COUNT, "#define TARGET_OPENGL" );
516 AppendToShaderHeader( ShaderType::COUNT, "#define dvd_VertexIndex gl_VertexID" );
517 AppendToShaderHeader( ShaderType::COUNT, "#define dvd_InstanceIndex gl_InstanceID" );
518 AppendToShaderHeader( ShaderType::COUNT, "#define DVD_GL_BASE_INSTANCE gl_BaseInstance" );
519 AppendToShaderHeader( ShaderType::COUNT, "#define DVD_GL_BASE_VERTEX gl_BaseVertex" );
520 AppendToShaderHeader( ShaderType::COUNT, "#define DVD_GL_DRAW_ID gl_DrawID" );
521 }
522 else
523 {
524 AppendToShaderHeader( ShaderType::COUNT, "#extension GL_ARB_shader_draw_parameters : require" );
525 AppendToShaderHeader( ShaderType::COUNT, "#define TARGET_VULKAN" );
526 AppendToShaderHeader( ShaderType::COUNT, "#define dvd_VertexIndex gl_VertexIndex" );
527 AppendToShaderHeader( ShaderType::COUNT, "#define dvd_InstanceIndex gl_InstanceIndex" );
528 AppendToShaderHeader( ShaderType::COUNT, "#define DVD_GL_BASE_INSTANCE gl_BaseInstanceARB" );
529 AppendToShaderHeader( ShaderType::COUNT, "#define DVD_GL_BASE_VERTEX gl_BaseVertexARB" );
530 AppendToShaderHeader( ShaderType::COUNT, "#define DVD_GL_DRAW_ID gl_DrawIDARB" );
531 }
532
533 AppendToShaderHeader( ShaderType::COUNT, crossTypeGLSLHLSL );
534
535 // Add current build environment information to the shaders
536 if constexpr ( Config::Build::IS_DEBUG_BUILD )
537 {
538 AppendToShaderHeader( ShaderType::COUNT, "#define _DEBUG" );
539 }
540 else if constexpr ( Config::Build::IS_PROFILE_BUILD )
541 {
542 AppendToShaderHeader( ShaderType::COUNT, "#define _PROFILE" );
543 }
544 else
545 {
546 AppendToShaderHeader( ShaderType::COUNT, "#define _RELEASE" );
547 }
548 AppendToShaderHeader( ShaderType::COUNT, "#define CONCATENATE_IMPL(s1, s2) s1 ## _ ## s2" );
549 AppendToShaderHeader( ShaderType::COUNT, "#define CONCATENATE(s1, s2) CONCATENATE_IMPL(s1, s2)" );
550
551 // Shader stage level reflection system. A shader stage must know what stage it's used for
552 AppendToShaderHeader( ShaderType::VERTEX, "#define VERT_SHADER" );
553 AppendToShaderHeader( ShaderType::FRAGMENT, "#define FRAG_SHADER" );
554 AppendToShaderHeader( ShaderType::GEOMETRY, "#define GEOM_SHADER" );
555 AppendToShaderHeader( ShaderType::COMPUTE, "#define COMPUTE_SHADER" );
556 AppendToShaderHeader( ShaderType::TESSELLATION_EVAL, "#define TESS_EVAL_SHADER" );
557 AppendToShaderHeader( ShaderType::TESSELLATION_CTRL, "#define TESS_CTRL_SHADER" );
558
559 // This line gets replaced in every shader at load with the custom list of defines specified by the material
560 AppendToShaderHeader( ShaderType::COUNT, "_CUSTOM_DEFINES__" );
561
562 constexpr float Z_TEST_SIGMA = 0.00001f;// 1.f / U8_MAX;
563 // ToDo: Automate adding of buffer bindings by using, for example, a TypeUtil::bufferBindingToString -Ionut
564 AppendToShaderHeader( ShaderType::COUNT, "#define ALPHA_DISCARD_THRESHOLD " + Util::to_string( Config::ALPHA_DISCARD_THRESHOLD ) + "f" );
565 AppendToShaderHeader( ShaderType::COUNT, "#define Z_TEST_SIGMA " + Util::to_string( Z_TEST_SIGMA ) + "f" );
566 AppendToShaderHeader( ShaderType::COUNT, "#define INV_Z_TEST_SIGMA " + Util::to_string( 1.f - Z_TEST_SIGMA ) + "f" );
567 AppendToShaderHeader( ShaderType::COUNT, "#define MAX_CSM_SPLITS_PER_LIGHT " + Util::to_string( Config::Lighting::MAX_CSM_SPLITS_PER_LIGHT ) );
568 AppendToShaderHeader( ShaderType::COUNT, "#define MAX_SHADOW_CASTING_LIGHTS " + Util::to_string( Config::Lighting::MAX_SHADOW_CASTING_LIGHTS ) );
569 AppendToShaderHeader( ShaderType::COUNT, "#define MAX_SHADOW_CASTING_DIR_LIGHTS " + Util::to_string( Config::Lighting::MAX_SHADOW_CASTING_DIRECTIONAL_LIGHTS ) );
570 AppendToShaderHeader( ShaderType::COUNT, "#define MAX_SHADOW_CASTING_POINT_LIGHTS " + Util::to_string( Config::Lighting::MAX_SHADOW_CASTING_POINT_LIGHTS ) );
571 AppendToShaderHeader( ShaderType::COUNT, "#define MAX_SHADOW_CASTING_SPOT_LIGHTS " + Util::to_string( Config::Lighting::MAX_SHADOW_CASTING_SPOT_LIGHTS ) );
572 AppendToShaderHeader( ShaderType::COUNT, "#define WORLD_AO_LAYER_INDEX " + Util::to_string( ShadowMap::WORLD_AO_LAYER_INDEX ) );
573 AppendToShaderHeader( ShaderType::COUNT, "#define MAX_LIGHTS " + Util::to_string( Config::Lighting::MAX_ACTIVE_LIGHTS_PER_FRAME ) );
574 AppendToShaderHeader( ShaderType::COUNT, "#define MAX_VISIBLE_NODES " + Util::to_string( Config::MAX_VISIBLE_NODES ) );
575 AppendToShaderHeader( ShaderType::COUNT, "#define MAX_CONCURRENT_MATERIALS " + Util::to_string( Config::MAX_CONCURRENT_MATERIALS ) );
576 AppendToShaderHeader( ShaderType::COUNT, "#define MAX_CLIP_PLANES " + Util::to_string( Config::MAX_CLIP_DISTANCES ) );
577 AppendToShaderHeader( ShaderType::COUNT, "#define MAX_CULL_DISTANCES " + Util::to_string( Config::MAX_CULL_DISTANCES ) );
578 AppendToShaderHeader( ShaderType::COUNT, "#define TARGET_ACCUMULATION " + Util::to_string( to_base( GFXDevice::ScreenTargets::ACCUMULATION ) ) );
579 AppendToShaderHeader( ShaderType::COUNT, "#define TARGET_ALBEDO " + Util::to_string( to_base( GFXDevice::ScreenTargets::ALBEDO ) ) );
580 AppendToShaderHeader( ShaderType::COUNT, "#define TARGET_VELOCITY " + Util::to_string( to_base( GFXDevice::ScreenTargets::VELOCITY ) ) );
581 AppendToShaderHeader( ShaderType::COUNT, "#define TARGET_NORMALS " + Util::to_string( to_base( GFXDevice::ScreenTargets::NORMALS ) ) );
582 AppendToShaderHeader( ShaderType::COUNT, "#define TARGET_REVEALAGE " + Util::to_string( to_base( GFXDevice::ScreenTargets::REVEALAGE ) ) );
583 AppendToShaderHeader( ShaderType::COUNT, "#define TARGET_MODULATE " + Util::to_string( to_base( GFXDevice::ScreenTargets::MODULATE ) ) );
584 AppendToShaderHeader( ShaderType::COUNT, "#define CLUSTERS_X_THREADS " + Util::to_string( Config::Lighting::ClusteredForward::CLUSTERS_X_THREADS ) );
585 AppendToShaderHeader( ShaderType::COUNT, "#define CLUSTERS_Y_THREADS " + Util::to_string( Config::Lighting::ClusteredForward::CLUSTERS_Y_THREADS ) );
586 AppendToShaderHeader( ShaderType::COUNT, "#define CLUSTERS_Z_THREADS " + Util::to_string( Config::Lighting::ClusteredForward::CLUSTERS_Z_THREADS ) );
587 AppendToShaderHeader( ShaderType::COUNT, "#define CLUSTERS_X " + Util::to_string( Config::Lighting::ClusteredForward::CLUSTERS_X ) );
588 AppendToShaderHeader( ShaderType::COUNT, "#define CLUSTERS_Y " + Util::to_string( Config::Lighting::ClusteredForward::CLUSTERS_Y ) );
589 AppendToShaderHeader( ShaderType::COUNT, "#define CLUSTERS_Z " + Util::to_string( Config::Lighting::ClusteredForward::CLUSTERS_Z ) );
590 AppendToShaderHeader( ShaderType::COUNT, "#define SKY_LIGHT_LAYER_IDX " + Util::to_string( SceneEnvironmentProbePool::SkyProbeLayerIndex() ) );
591 AppendToShaderHeader( ShaderType::COUNT, "#define MAX_LIGHTS_PER_CLUSTER " + Util::to_string( config.rendering.numLightsPerCluster ) );
592 AppendToShaderHeader( ShaderType::COUNT, "#define REFLECTION_PROBE_RESOLUTION " + Util::to_string( reflectionProbeRes ) );
593 AppendToShaderHeader( ShaderType::COUNT, "#define REFLECTION_PROBE_MIP_COUNT " + Util::to_string( to_U32( std::log2( reflectionProbeRes ) ) ) );
594
595 AppendResourceBindingSlots( renderingAPI == RenderAPI::OpenGL );
596
597 for ( U8 i = 0u; i < to_base( TextureOperation::COUNT ); ++i )
598 {
599 AppendToShaderHeader( ShaderType::COUNT, Util::StringFormat( "#define TEX_{} {}", TypeUtil::TextureOperationToString( static_cast<TextureOperation>(i) ), i ).c_str() );
600 }
601 AppendToShaderHeader( ShaderType::COUNT, Util::StringFormat( "#define WORLD_X_AXIS vec3({:1.1f},{:1.1f},{:1.1f})", WORLD_X_AXIS.x, WORLD_X_AXIS.y, WORLD_X_AXIS.z ) );
602 AppendToShaderHeader( ShaderType::COUNT, Util::StringFormat( "#define WORLD_Y_AXIS vec3({:1.1f},{:1.1f},{:1.1f})", WORLD_Y_AXIS.x, WORLD_Y_AXIS.y, WORLD_Y_AXIS.z ) );
603 AppendToShaderHeader( ShaderType::COUNT, Util::StringFormat( "#define WORLD_Z_AXIS vec3({:1.1f},{:1.1f},{:1.1f})", WORLD_Z_AXIS.x, WORLD_Z_AXIS.y, WORLD_Z_AXIS.z ) );
604
605
606 AppendToShaderHeader( ShaderType::COUNT, "#define M_EPSILON 1e-5f" );
607 AppendToShaderHeader( ShaderType::COUNT, "#define M_PI 3.14159265358979323846" );
608 AppendToShaderHeader( ShaderType::COUNT, "#define M_PI_DIV_2 1.57079632679489661923" );
609 AppendToShaderHeader( ShaderType::COUNT, "#define INV_M_PI 0.31830988618379067153" );
610 AppendToShaderHeader( ShaderType::COUNT, "#define TWO_M_PI 6.28318530717958647692" );
611 AppendToShaderHeader( ShaderType::COUNT, "#define EULER_CONST 2.71828182845904523536" );
612
613 AppendToShaderHeader( ShaderType::COUNT, "#define ACCESS_RW" );
614 AppendToShaderHeader( ShaderType::COUNT, "#define ACCESS_R readonly" );
615 AppendToShaderHeader( ShaderType::COUNT, "#define ACCESS_W writeonly" );
616
617 AppendToShaderHeader( ShaderType::VERTEX, "#define COMP_ONLY_W readonly" );
618 AppendToShaderHeader( ShaderType::VERTEX, "#define COMP_ONLY_R" );
619 AppendToShaderHeader( ShaderType::VERTEX, "#define COMP_ONLY_RW readonly" );
620 AppendToShaderHeader( ShaderType::TESSELLATION_CTRL, "#define COMP_ONLY_W readonly" );
621 AppendToShaderHeader( ShaderType::TESSELLATION_CTRL, "#define COMP_ONLY_R" );
622 AppendToShaderHeader( ShaderType::TESSELLATION_CTRL, "#define COMP_ONLY_RW readonly" );
623 AppendToShaderHeader( ShaderType::TESSELLATION_EVAL, "#define COMP_ONLY_W readonly" );
624 AppendToShaderHeader( ShaderType::TESSELLATION_EVAL, "#define COMP_ONLY_R" );
625 AppendToShaderHeader( ShaderType::TESSELLATION_EVAL, "#define COMP_ONLY_RW readonly" );
626 AppendToShaderHeader( ShaderType::GEOMETRY, "#define COMP_ONLY_W readonly" );
627 AppendToShaderHeader( ShaderType::GEOMETRY, "#define COMP_ONLY_R" );
628 AppendToShaderHeader( ShaderType::GEOMETRY, "#define COMP_ONLY_RW readonly" );
629 AppendToShaderHeader( ShaderType::FRAGMENT, "#define COMP_ONLY_W readonly" );
630 AppendToShaderHeader( ShaderType::FRAGMENT, "#define COMP_ONLY_R" );
631 AppendToShaderHeader( ShaderType::FRAGMENT, "#define COMP_ONLY_RW readonly" );
632
633 AppendToShaderHeader( ShaderType::COMPUTE, "#define COMP_ONLY_W ACCESS_W" );
634 AppendToShaderHeader( ShaderType::COMPUTE, "#define COMP_ONLY_R ACCESS_R" );
635 AppendToShaderHeader( ShaderType::COMPUTE, "#define COMP_ONLY_RW ACCESS_RW" );
636
637 AppendToShaderHeader( ShaderType::COUNT, "#define AND(a, b) (a * b)" );
638 AppendToShaderHeader( ShaderType::COUNT, "#define OR(a, b) min(a + b, 1.f)" );
639
640 AppendToShaderHeader( ShaderType::COUNT, "#define XOR(a, b) ((a + b) % 2)" );
641 AppendToShaderHeader( ShaderType::COUNT, "#define NOT(X) (1.f - X)" );
642 AppendToShaderHeader( ShaderType::COUNT, "#define Squared(X) (X * X)" );
643 AppendToShaderHeader( ShaderType::COUNT, "#define Round(X) floor((X) + .5f)" );
644 AppendToShaderHeader( ShaderType::COUNT, "#define Saturate(X) clamp(X, 0, 1)" );
645 AppendToShaderHeader( ShaderType::COUNT, "#define Mad(a, b, c) (a * b + c)" );
646
647 AppendToShaderHeader( ShaderType::COUNT, "#define GLOBAL_WATER_BODIES_COUNT " + Util::to_string( GLOBAL_WATER_BODIES_COUNT ) );
648 AppendToShaderHeader( ShaderType::COUNT, "#define GLOBAL_PROBE_COUNT " + Util::to_string( GLOBAL_PROBE_COUNT ) );
649 AppendToShaderHeader( ShaderType::COUNT, "#define MATERIAL_TEXTURE_COUNT 16" );
650
651 AppendToShaderHeader( ShaderType::VERTEX, "#define MAX_BONE_COUNT_PER_NODE " + Util::to_string( Config::MAX_BONE_COUNT_PER_NODE ) );
652 AppendToShaderHeader( ShaderType::VERTEX, "#define ATTRIB_POSITION " + Util::to_string( to_base( AttribLocation::POSITION ) ) );
653 AppendToShaderHeader( ShaderType::VERTEX, "#define ATTRIB_TEXCOORD " + Util::to_string( to_base( AttribLocation::TEXCOORD ) ) );
654 AppendToShaderHeader( ShaderType::VERTEX, "#define ATTRIB_NORMAL " + Util::to_string( to_base( AttribLocation::NORMAL ) ) );
655 AppendToShaderHeader( ShaderType::VERTEX, "#define ATTRIB_TANGENT " + Util::to_string( to_base( AttribLocation::TANGENT ) ) );
656 AppendToShaderHeader( ShaderType::VERTEX, "#define ATTRIB_COLOR " + Util::to_string( to_base( AttribLocation::COLOR ) ) );
657 AppendToShaderHeader( ShaderType::VERTEX, "#define ATTRIB_BONE_WEIGHT " + Util::to_string( to_base( AttribLocation::BONE_WEIGHT ) ) );
658 AppendToShaderHeader( ShaderType::VERTEX, "#define ATTRIB_BONE_INDICE " + Util::to_string( to_base( AttribLocation::BONE_INDICE ) ) );
659 AppendToShaderHeader( ShaderType::VERTEX, "#define ATTRIB_WIDTH " + Util::to_string( to_base( AttribLocation::WIDTH ) ) );
660 AppendToShaderHeader( ShaderType::VERTEX, "#define ATTRIB_GENERIC " + Util::to_string( to_base( AttribLocation::GENERIC ) ) );
661 AppendToShaderHeader( ShaderType::COUNT, "#define ATTRIB_FREE_START 12" );
662 AppendToShaderHeader( ShaderType::FRAGMENT, "#define MAX_SHININESS " + Util::to_string( Material::MAX_SHININESS ) );
663
664 const string interfaceLocationString = "layout(location = 0) ";
665
666 for ( U8 i = 0u; i < to_U8( ShadingMode::COUNT ) + 1u; ++i )
667 {
668 const ShadingMode mode = static_cast<ShadingMode>(i);
669 AppendToShaderHeader( ShaderType::FRAGMENT, Util::StringFormat( "#define SHADING_{} {}", TypeUtil::ShadingModeToString( mode ), i ) );
670 }
671
672 AppendToShaderHeader( ShaderType::FRAGMENT, Util::StringFormat( "#define SHADING_COUNT {}", to_base( ShadingMode::COUNT ) ) );
673
674 for ( U8 i = 0u; i < to_U8( MaterialDebugFlag::COUNT ) + 1u; ++i )
675 {
676 const MaterialDebugFlag flag = static_cast<MaterialDebugFlag>(i);
677 AppendToShaderHeader( ShaderType::FRAGMENT, Util::StringFormat( "#define DEBUG_{} {}", TypeUtil::MaterialDebugFlagToString( flag ), i ) );
678 }
679
680 AppendToShaderHeader( ShaderType::COUNT, "#if defined(PRE_PASS) || defined(SHADOW_PASS)" );
681 AppendToShaderHeader( ShaderType::COUNT, "# define DEPTH_PASS" );
682 AppendToShaderHeader( ShaderType::COUNT, "#endif //PRE_PASS || SHADOW_PASS" );
683
684 AppendToShaderHeader( ShaderType::COUNT, "#if defined(COMPUTE_TBN) && !defined(ENABLE_TBN)" );
685 AppendToShaderHeader( ShaderType::COUNT, "# define ENABLE_TBN" );
686 AppendToShaderHeader( ShaderType::COUNT, "#endif //COMPUTE_TBN && !ENABLE_TBN" );
687
688 AppendToShaderHeader( ShaderType::GEOMETRY, "#if !defined(INPUT_PRIMITIVE_SIZE)" );
689 AppendToShaderHeader( ShaderType::GEOMETRY, "# define INPUT_PRIMITIVE_SIZE 1" );
690 AppendToShaderHeader( ShaderType::GEOMETRY, "#endif //!INPUT_PRIMITIVE_SIZE" );
691
692 AppendToShaderHeader( ShaderType::TESSELLATION_CTRL, "#if !defined(TESSELLATION_OUTPUT_VERTICES)" );
693 AppendToShaderHeader( ShaderType::TESSELLATION_CTRL, "# define TESSELLATION_OUTPUT_VERTICES 4" );
694 AppendToShaderHeader( ShaderType::TESSELLATION_CTRL, "#endif //!TESSELLATION_OUTPUT_VERTICES" );
695
696 // Vertex shader output
697 AppendToShaderHeader( ShaderType::VERTEX, interfaceLocationString + "out Data {" );
698 addVaryings( ShaderType::VERTEX );
699 AppendToShaderHeader( ShaderType::VERTEX, "} _out;\n" );
700
701 // Tessellation Control shader input
702 AppendToShaderHeader( ShaderType::TESSELLATION_CTRL, interfaceLocationString + "in Data {" );
703 addVaryings( ShaderType::TESSELLATION_CTRL );
704 AppendToShaderHeader( ShaderType::TESSELLATION_CTRL, "} _in[gl_MaxPatchVertices];\n" );
705
706 // Tessellation Control shader output
707 AppendToShaderHeader( ShaderType::TESSELLATION_CTRL, interfaceLocationString + "out Data {" );
708 addVaryings( ShaderType::TESSELLATION_CTRL );
709 AppendToShaderHeader( ShaderType::TESSELLATION_CTRL, "} _out[TESSELLATION_OUTPUT_VERTICES];\n" );
710
711 AppendToShaderHeader( ShaderType::TESSELLATION_CTRL, getPassData( ShaderType::TESSELLATION_CTRL ) );
712
713 // Tessellation Eval shader input
714 AppendToShaderHeader( ShaderType::TESSELLATION_EVAL, interfaceLocationString + "in Data {" );
715 addVaryings( ShaderType::TESSELLATION_EVAL );
716 AppendToShaderHeader( ShaderType::TESSELLATION_EVAL, "} _in[gl_MaxPatchVertices];\n" );
717
718 // Tessellation Eval shader output
719 AppendToShaderHeader( ShaderType::TESSELLATION_EVAL, interfaceLocationString + "out Data {" );
720 addVaryings( ShaderType::TESSELLATION_EVAL );
721 AppendToShaderHeader( ShaderType::TESSELLATION_EVAL, "} _out;\n" );
722
723 AppendToShaderHeader( ShaderType::TESSELLATION_EVAL, getPassData( ShaderType::TESSELLATION_EVAL ) );
724
725 // Geometry shader input
726 AppendToShaderHeader( ShaderType::GEOMETRY, interfaceLocationString + "in Data {" );
727 addVaryings( ShaderType::GEOMETRY );
728 AppendToShaderHeader( ShaderType::GEOMETRY, "} _in[INPUT_PRIMITIVE_SIZE];\n" );
729
730 // Geometry shader output
731 AppendToShaderHeader( ShaderType::GEOMETRY, interfaceLocationString + "out Data {" );
732 addVaryings( ShaderType::GEOMETRY );
733 AppendToShaderHeader( ShaderType::GEOMETRY, "} _out;\n" );
734
735 AppendToShaderHeader( ShaderType::GEOMETRY, getPassData( ShaderType::GEOMETRY ) );
736
737 // Fragment shader input
738 AppendToShaderHeader( ShaderType::FRAGMENT, interfaceLocationString + "in Data {" );
739 addVaryings( ShaderType::FRAGMENT );
740 AppendToShaderHeader( ShaderType::FRAGMENT, "} _in;\n" );
741
742 AppendToShaderHeader( ShaderType::VERTEX, "#define VAR _out" );
743 AppendToShaderHeader( ShaderType::TESSELLATION_CTRL, "#define VAR _in[gl_InvocationID]" );
744 AppendToShaderHeader( ShaderType::TESSELLATION_EVAL, "#define VAR _in[0]" );
745 AppendToShaderHeader( ShaderType::GEOMETRY, "#define VAR _in" );
746 AppendToShaderHeader( ShaderType::FRAGMENT, "#define VAR _in" );
747
748 AppendToShaderHeader( ShaderType::COUNT, "//_CUSTOM_UNIFORMS_\\" );
749 AppendToShaderHeader( ShaderType::COUNT, "//_PUSH_CONSTANTS_DEFINE_\\" );
750
751 // Check initialization status for GLSL and glsl-optimizer
752 return glswState == 1;
753 }
754
755 ModuleDefine::ModuleDefine( const char* define, const bool addPrefix )
756 : ModuleDefine( string{ define }, addPrefix )
757 {
758 }
759
760 ModuleDefine::ModuleDefine( const string& define, const bool addPrefix )
761 : _define( define )
762 , _addPrefix( addPrefix )
763 {
764 }
765
767 : _sourceFile( file ), _variant( variant ), _moduleType( type )
768 {
769 }
770
771 std::atomic_bool ShaderModule::s_modulesRemoved;
774
775 void ShaderModule::Idle( const bool fast )
776 {
777 if ( fast )
778 {
779 NOP();
780 return;
781 }
782
783 bool expected = true;
784 if ( s_modulesRemoved.compare_exchange_strong(expected, false) )
785 {
787 for ( auto it = s_shaderNameMap.begin(); it != s_shaderNameMap.end(); )
788 {
789 ShaderModule* shaderModule = it->second.get();
790 if ( !shaderModule->_inUse && shaderModule->_lastUsedFrame + MAX_FRAME_LIFETIME < GFXDevice::FrameCount() )
791 {
792 Console::warnfn(LOCALE_STR("SHADER_MODULE_EXPIRED"), shaderModule->_name.c_str());
793 it = s_shaderNameMap.erase(it);
794 }
795 else
796 {
797 ++it;
798 }
799 }
800 }
801 }
802
804 {
805 NOP();
806 }
807
809 {
811 s_shaderNameMap.clear();
812 }
813
814 ShaderModule* ShaderModule::GetShader( const std::string_view name )
815 {
817 return GetShaderLocked( name );
818 }
819
820 ShaderModule* ShaderModule::GetShaderLocked( const std::string_view name )
821 {
822 // Try to find the shader
823 const ShaderMap::iterator it = s_shaderNameMap.find( _ID( name ) );
824 if ( it != std::end( s_shaderNameMap ) )
825 {
826 return it->second.get();
827 }
828
829 return nullptr;
830 }
831
832 ShaderModule::ShaderModule( GFXDevice& context, const std::string_view name, const U32 generation )
833 : GUIDWrapper()
834 , GraphicsResource( context, Type::SHADER, getGUID(), _ID( name ) )
835 , _name( name )
836 , _generation( generation )
837 {
838 }
839
841 {
842 }
843
845 {
846 DIVIDE_ASSERT(parent != nullptr);
847
849 for ( ShaderProgram* it : _parents )
850 {
851 if ( it->getGUID() == parent->getGUID() )
852 {
853 return;
854 }
855 }
856
857 _parents.push_back(parent);
858 _inUse = true;
859 }
860
862 {
863 DIVIDE_ASSERT( parent != nullptr );
864
865 const I64 targetGUID = parent->getGUID();
868 [targetGUID]( ShaderProgram* it )
869 {
870 return it->getGUID() == targetGUID;
871 } ) )
872 {
873 if ( _parents.empty() )
874 {
875 _inUse = false;
876 _lastUsedFrame = GFXDevice::FrameCount();
877 s_modulesRemoved.store(true);
878 }
879 }
880 }
881
883 : CachedResource( descriptor, "ShaderProgram" )
884 , GraphicsResource( context.gfx(), Type::SHADER_PROGRAM, getGUID(), _ID( resourceName() ) )
885 , _highPriority( descriptor.flag() )
886 , _descriptor( descriptor._propertyDescriptor )
887 {
888 if ( assetName().empty() )
889 {
890 assetName( resourceName() );
891 }
892
893 if ( assetLocation().empty() )
894 {
895 assetLocation( Paths::g_shadersLocation );
896 }
897
898 DIVIDE_ASSERT ( !assetName().empty() );
899 _useShaderCache = _descriptor._useShaderCache;
900 s_shaderCount.fetch_add( 1, std::memory_order_relaxed );
901 }
902
904 {
905 Console::d_printfn( LOCALE_STR( "SHADER_PROGRAM_REMOVE" ), resourceName().c_str() );
906 s_shaderCount.fetch_sub( 1, std::memory_order_relaxed );
907 }
908
910 {
912
913 hashMap<U64, PerFileShaderData> loadDataByFile{};
914 if ( loadInternal( loadDataByFile, false ))
915 {
917 }
918
919 return false;
920 }
921
923 {
924 RegisterShaderProgram( this );
926 }
927
929 {
930 // Our GPU Arena will clean up the memory, but we should still destroy these
931 _uniformBlockBuffers.clear();
932 // Unregister the program from the manager
933 if ( !UnregisterShaderProgram( this ) )
934 {
936 }
937
938 return true;
939 }
940
942 bool ShaderProgram::recompile( bool& skipped )
943 {
945
946 skipped = true;
948 {
949 if ( validatePreBind( false ) != ShaderResult::OK )
950 {
951 return false;
952 }
953
954 skipped = false;
955 hashMap<U64, PerFileShaderData> loadDataByFile{};
956 return loadInternal( loadDataByFile, true );
957 }
958
959 return false;
960 }
961
962 ShaderResult ShaderProgram::validatePreBind( [[maybe_unused]] const bool rebind)
963 {
964 return ShaderResult::OK;
965 }
966
967 void ShaderProgram::OnThreadCreated( const GFXDevice& gfx, [[maybe_unused]] const std::thread::id& threadID, [[maybe_unused]] const bool isMainRenderThread )
968 {
970
971 if ( !InitGLSW( gfx.renderAPI(), gfx.context().config() ) )
972 {
974 }
975 }
976
977 void ShaderProgram::Idle( [[maybe_unused]] PlatformContext& platformContext, const bool fast )
978 {
980
981 ShaderModule::Idle( fast );
982
983 if ( !s_recompileFailedQueue.empty() )
984 {
986 if ( entry._queueDelay > 0u )
987 {
988 --entry._queueDelay;
989 }
990 else
991 {
992 s_recompileQueue.push( entry );
994 }
995 }
996
997 // If we don't have any shaders queued for recompilation, return early
998 if ( !s_recompileQueue.empty() )
999 {
1000 // Else, recompile the top program from the queue
1002 if ( !entry._program->recompile() )
1003 {
1004 Console::errorfn( LOCALE_STR( "ERROR_SHADER_RECOMPILE_FAILED" ), entry._program->resourceName().c_str() );
1005
1006 // We can delay a recomputation up to an interval of a minute
1007 if ( entry._queueDelayHighWaterMark < Config::TARGET_FRAME_RATE * 60)
1008 {
1009 entry._queueDelayHighWaterMark += 1u;
1010 entry._queueDelay = entry._queueDelayHighWaterMark;
1011 }
1013 }
1014
1015 s_recompileQueue.pop();
1016 }
1017 }
1018
1020 {
1022 }
1023
1025 {
1027 }
1028
1030 bool ShaderProgram::RecompileShaderProgram( const std::string_view name )
1031 {
1032 bool state = false;
1033
1035
1036 // Find the shader program
1037 for ( ShaderProgram* program : s_shaderPrograms )
1038 {
1039 DIVIDE_ASSERT( program != nullptr );
1040
1041 const string shaderName{ program->resourceName().c_str() };
1042 // Check if the name matches any of the program's name components
1043 if ( shaderName.find( name ) != Str<256>::npos || shaderName.compare( name ) == 0 )
1044 {
1045 // We process every partial match. So add it to the recompilation queue
1046 s_recompileQueue.push( ShaderQueueEntry{ ._program = program } );
1047 // Mark as found
1048 state = true;
1049 }
1050 }
1051 // If no shaders were found, show an error
1052 if ( !state )
1053 {
1054 Console::errorfn( LOCALE_STR( "ERROR_RECOMPILE_NOT_FOUND" ), name );
1055 }
1056
1057 return state;
1058 }
1059
1061 {
1062 RefreshBindingSlots();
1063
1064 if constexpr ( !Config::Build::IS_SHIPPING_BUILD )
1065 {
1067 s_shaderFileWatcherID = watcher.getGUID();
1070
1071 const vector<ResourcePath> atomLocations = GetAllAtomLocations();
1072 for ( const ResourcePath& loc : atomLocations )
1073 {
1074 if ( createDirectory( loc ) != FileError::NONE )
1075 {
1077 }
1078 watcher().addWatch( loc.string().c_str(), &g_sFileWatcherListener );
1079 }
1080 }
1081
1082 shaderAtomLocationPrefix[to_base( ShaderType::FRAGMENT )] = Paths::Shaders::GLSL::g_fragAtomLoc;
1083 shaderAtomLocationPrefix[to_base( ShaderType::VERTEX )] = Paths::Shaders::GLSL::g_vertAtomLoc;
1084 shaderAtomLocationPrefix[to_base( ShaderType::GEOMETRY )] = Paths::Shaders::GLSL::g_geomAtomLoc;
1085 shaderAtomLocationPrefix[to_base( ShaderType::TESSELLATION_CTRL )] = Paths::Shaders::GLSL::g_tescAtomLoc;
1086 shaderAtomLocationPrefix[to_base( ShaderType::TESSELLATION_EVAL )] = Paths::Shaders::GLSL::g_teseAtomLoc;
1087 shaderAtomLocationPrefix[to_base( ShaderType::COMPUTE )] = Paths::Shaders::GLSL::g_compAtomLoc;
1088 shaderAtomLocationPrefix[to_base( ShaderType::COUNT )] = Paths::Shaders::GLSL::g_comnAtomLoc;
1089
1090 shaderAtomExtensionName[to_base( ShaderType::FRAGMENT )] = Paths::Shaders::GLSL::g_fragAtomExt;
1091 shaderAtomExtensionName[to_base( ShaderType::VERTEX )] = Paths::Shaders::GLSL::g_vertAtomExt;
1092 shaderAtomExtensionName[to_base( ShaderType::GEOMETRY )] = Paths::Shaders::GLSL::g_geomAtomExt;
1093 shaderAtomExtensionName[to_base( ShaderType::TESSELLATION_CTRL )] = Paths::Shaders::GLSL::g_tescAtomExt;
1094 shaderAtomExtensionName[to_base( ShaderType::TESSELLATION_EVAL )] = Paths::Shaders::GLSL::g_teseAtomExt;
1095 shaderAtomExtensionName[to_base( ShaderType::COMPUTE )] = Paths::Shaders::GLSL::g_compAtomExt;
1096 shaderAtomExtensionName[to_base( ShaderType::COUNT )] = "." + Paths::Shaders::GLSL::g_comnAtomExt;
1097
1098 for ( U8 i = 0u; i < to_base( ShaderType::COUNT ) + 1; ++i )
1099 {
1101 }
1102
1103 const Configuration& config = context.config();
1104 s_useShaderCache = config.debug.cache.enabled && config.debug.cache.shaders;
1105 s_targetVulkan = context.gfx().renderAPI() == RenderAPI::Vulkan;
1106
1107 FileList list{};
1108 if ( s_useShaderCache )
1109 {
1110 for ( U8 i = 0u; i < to_base( ShaderType::COUNT ) + 1; ++i )
1111 {
1112 const ResourcePath& atomLocation = shaderAtomLocationPrefix[i];
1113 if ( getAllFilesInDirectory( atomLocation, list, shaderAtomExtensionName[i].c_str() ) )
1114 {
1115 for ( const FileEntry& it : list )
1116 {
1117 s_newestShaderAtomWriteTime = std::max( s_newestShaderAtomWriteTime, it._lastWriteTime );
1118 }
1119 }
1120 list.resize( 0 );
1121 }
1122 }
1123
1124 return ErrorCode::NO_ERR;
1125 }
1126
1128 {
1130 if ( !InitGLSW( gfx.renderAPI(), gfx.context().config() ))
1131 {
1133 }
1134
1136 return ErrorCode::NO_ERR;
1137 }
1138
1140 {
1142
1143 while ( !s_recompileQueue.empty() )
1144 {
1145 s_recompileQueue.pop();
1146 }
1149
1152
1153 s_shaderCount = 0u;
1154 s_atoms.clear();
1155 s_atomIncludes.clear();
1156
1158
1159 for ( auto& bindings : s_bindingsPerSet )
1160 {
1161 bindings.fill( {} );
1162 }
1163
1164 return glswGetCurrentContext() == nullptr || glswShutdown() == 1;
1165 }
1166
1167
1169 {
1170 return s_bindingsPerSet[to_base( usage )][slot]._glBinding;
1171 }
1172
1173 std::pair<DescriptorSetUsage, U8> ShaderProgram::GetDescriptorSlotForGLBinding( const U8 binding, const DescriptorSetBindingType type ) noexcept
1174 {
1175 for ( U8 i = 0u; i < to_base( DescriptorSetUsage::COUNT ); ++i )
1176 {
1177 const BindingsPerSetArray& bindings = s_bindingsPerSet[i];
1178 for ( U8 j = 0u; j < bindings.size(); ++j )
1179 {
1180 if ( bindings[j]._glBinding == binding && bindings[j]._type == type )
1181 {
1182 return { static_cast<DescriptorSetUsage>(i), j };
1183 }
1184 }
1185 }
1186
1187 // If we didn't specify any images, we assume per-draw granularity
1188 return { DescriptorSetUsage::PER_DRAW, binding };
1189 }
1190
1192 {
1193 return s_bindingsPerSet;
1194 }
1195
1197 {
1199
1200 BindingsPerSet& bindingData = s_bindingsPerSet[to_base( usage )][slot];
1201 bindingData._type = type;
1202 bindingData._visibility = to_base(visibility);
1203
1204 switch ( type )
1205 {
1207 bindingData._glBinding = s_textureSlot++;
1208 break;
1210 bindingData._glBinding = s_imageSlot++;
1211 break;
1214 if ( usage == DescriptorSetUsage::PER_BATCH && slot == 0 )
1215 {
1216 bindingData._glBinding = k_commandBufferID;
1217 }
1218 else
1219 {
1220 bindingData._glBinding = s_bufferSlot++;
1221 }
1222 break;
1225 break;
1226 }
1227 }
1228
1230 {
1232
1233 U32 count = 0u;
1234 if ( usage == DescriptorSetUsage::PER_DRAW )
1235 {
1236 switch ( type )
1237 {
1238 case DescriptorSetBindingType::COMBINED_IMAGE_SAMPLER: count = to_base( TextureSlot::COUNT ) + 2u; /*{Reflection + Refraction}*/ break;
1239 case DescriptorSetBindingType::IMAGE: count = 2u; break;
1243 }
1244 }
1245 else
1246 {
1247 for ( const BindingsPerSet& binding : s_bindingsPerSet[to_base( usage )] )
1248 {
1249 if ( binding._type == type )
1250 {
1251 ++count;
1252 }
1253 }
1254 }
1255
1256 return count;
1257 }
1258
1259 void ShaderProgram::OnBeginFrame( [[maybe_unused]] GFXDevice& gfx )
1260 {
1262 }
1263
1265 {
1267
1268 size_t& totalUniformBufferSize = gfx.getPerformanceMetrics()._uniformBufferVRAMUsage;
1269 totalUniformBufferSize = 0u;
1270
1271 for ( ShaderProgram* program : s_usedShaderPrograms )
1272 {
1273 for ( UniformBlockUploader& block : program->_uniformBlockBuffers )
1274 {
1275 block.onFrameEnd();
1276 totalUniformBufferSize += block.totalBufferSize();
1277 }
1278 }
1279
1280 }
1281
1284 {
1285 DIVIDE_ASSERT( shaderProgram != nullptr );
1286
1288 for ( ShaderProgram*& program : s_shaderPrograms )
1289 {
1290 if ( program == nullptr )
1291 {
1292 program = shaderProgram;
1293 return;
1294 }
1295 }
1296 s_shaderPrograms.push_back(shaderProgram);
1297 }
1298
1301 {
1302 DIVIDE_ASSERT( shaderProgram != nullptr );
1303 const I64 guid = shaderProgram->getGUID();
1305 for ( ShaderProgram*& program : s_shaderPrograms )
1306 {
1307 if ( program != nullptr && program->getGUID() == guid)
1308 {
1309 program = nullptr;
1310 return true;
1311 }
1312 }
1313
1314 return false;
1315 }
1316
1318 {
1320 for ( ShaderProgram* program : s_shaderPrograms )
1321 {
1322 DIVIDE_ASSERT ( program != nullptr );
1323 s_recompileQueue.push( ShaderQueueEntry{ ._program = program } );
1324 }
1325 }
1326
1328 {
1329 NO_DESTROY static vector<ResourcePath> atomLocations;
1330 if ( atomLocations.empty() )
1331 {
1332 // General
1333 atomLocations.emplace_back( Paths::g_shadersLocation );
1334 // GLSL
1335 atomLocations.emplace_back( Paths::Shaders::GLSL::g_GLSLShaderLoc );
1336
1337 atomLocations.emplace_back( Paths::Shaders::GLSL::g_comnAtomLoc );
1338
1339 atomLocations.emplace_back( Paths::Shaders::GLSL::g_compAtomLoc );
1340
1341 atomLocations.emplace_back( Paths::Shaders::GLSL::g_fragAtomLoc );
1342
1343 atomLocations.emplace_back( Paths::Shaders::GLSL::g_geomAtomLoc );
1344
1345 atomLocations.emplace_back( Paths::Shaders::GLSL::g_tescAtomLoc );
1346
1347 atomLocations.emplace_back( Paths::Shaders::GLSL::g_teseAtomLoc );
1348
1349 atomLocations.emplace_back( Paths::Shaders::GLSL::g_vertAtomLoc );
1350 }
1351
1352 return atomLocations;
1353 }
1354
1355 const string& ShaderProgram::ShaderFileRead( const ResourcePath& filePath, const std::string_view atomName, const bool recurse, eastl::set<U64>& foundAtomIDsInOut, bool& wasParsed )
1356 {
1357 LockGuard<Mutex> w_lock( s_atomLock );
1358 return ShaderFileReadLocked( filePath, atomName, recurse, foundAtomIDsInOut, wasParsed );
1359 }
1360
1361 void ShaderProgram::PreprocessIncludes( const std::string_view name,
1362 string& sourceInOut,
1363 const I32 level,
1364 eastl::set<U64>& foundAtomIDsInOut,
1365 const bool lock )
1366 {
1367 if ( level > s_maxHeaderRecursionLevel )
1368 {
1369 Console::errorfn( LOCALE_STR( "ERROR_GLSL_INCLUD_LIMIT" ) );
1370 }
1371
1372 size_t lineNumber = 1;
1373
1374 string line;
1375 string output, includeString;
1376 output.reserve(sourceInOut.size());
1377
1378 istringstream input( sourceInOut );
1379
1380 while ( Util::GetLine( input, line ) )
1381 {
1382 const std::string_view directive = !line.empty() ? std::string_view{ line }.substr( 1 ) : "";
1383
1384 bool isInclude = Util::BeginsWith( line, "#", true ) &&
1385 !Util::BeginsWith( directive, "version", true ) &&
1386 !Util::BeginsWith( directive, "extension", true ) &&
1387 !Util::BeginsWith( directive, "define", true ) &&
1388 !Util::BeginsWith( directive, "if", true ) &&
1389 !Util::BeginsWith( directive, "else", true ) &&
1390 !Util::BeginsWith( directive, "elif", true ) &&
1391 !Util::BeginsWith( directive, "endif", true ) &&
1392 !Util::BeginsWith( directive, "pragma", true );
1393
1394 bool skip = false;
1395 if ( isInclude )
1396 {
1397 if ( auto m = ctre::match<Paths::g_includePattern>( line ) )
1398 {
1399 skip = true;
1400
1401 const auto includeFile = Util::Trim( m.get<1>().str() );
1402
1403 foundAtomIDsInOut.insert( _ID( includeFile.c_str() ) );
1404
1405 ShaderType typeIndex = ShaderType::COUNT;
1406 bool found = false;
1407 // switch will throw warnings due to promotion to int
1408 const U64 extHash = _ID( Util::GetTrailingCharacters( includeFile, 4 ).c_str() );
1409 for ( U8 i = 0; i < to_base( ShaderType::COUNT ) + 1; ++i )
1410 {
1411 if ( extHash == shaderAtomExtensionHash[i] )
1412 {
1413 typeIndex = static_cast<ShaderType>(i);
1414 found = true;
1415 break;
1416 }
1417 }
1418
1419 DIVIDE_ASSERT( found, "Invalid shader include type" );
1420 bool wasParsed = false;
1421 if ( lock )
1422 {
1423 includeString = ShaderFileRead( shaderAtomLocationPrefix[to_U32( typeIndex )], includeFile, true, foundAtomIDsInOut, wasParsed ).c_str();
1424 }
1425 else
1426 {
1427 includeString = ShaderFileReadLocked( shaderAtomLocationPrefix[to_U32( typeIndex )], includeFile, true, foundAtomIDsInOut, wasParsed ).c_str();
1428 }
1429 if ( includeString.empty() )
1430 {
1431 Console::errorfn( LOCALE_STR( "ERROR_GLSL_NO_INCLUDE_FILE" ), name, lineNumber, includeFile );
1432 }
1433 if ( !wasParsed )
1434 {
1435 PreprocessIncludes( name, includeString, level + 1, foundAtomIDsInOut, lock );
1436 }
1437
1438 output.append( includeString );
1439 }
1440 }
1441
1442 if (!skip)
1443 {
1444 output.append( line.c_str() );
1445 }
1446
1447 output.append( "\n" );
1448 ++lineNumber;
1449 }
1450
1451 sourceInOut = output;
1452 }
1453
1455 const string& ShaderProgram::ShaderFileReadLocked( const ResourcePath& filePath, const std::string_view atomName, const bool recurse, eastl::set<U64>& foundAtomIDsInOut, bool& wasParsed )
1456 {
1457 const U64 atomNameHash = _ID( atomName );
1458 // See if the atom was previously loaded and still in cache
1459 const AtomMap::iterator it = s_atoms.find( atomNameHash );
1460
1461 // If that's the case, return the code from cache
1462 if ( it != std::cend( s_atoms ) )
1463 {
1464 const auto& atoms = s_atomIncludes[atomNameHash];
1465 for ( const auto& atom : atoms )
1466 {
1467 foundAtomIDsInOut.insert( atom );
1468 }
1469 wasParsed = true;
1470 return it->second;
1471 }
1472
1473 wasParsed = false;
1474 // If we forgot to specify an atom location, we have nothing to return
1475 assert( !filePath.empty() );
1476
1477 // Open the atom file and add the code to the atom cache for future reference
1478 string& output = s_atoms[atomNameHash];
1479 output.clear();
1480 eastl::set<U64>& atoms = s_atomIncludes[atomNameHash];
1481 atoms.clear();
1482
1483 if ( readFile( filePath, atomName, FileType::TEXT, output ) != FileError::NONE )
1484 {
1486 }
1487
1488 if ( recurse )
1489 {
1490 PreprocessIncludes( atomName, output, 0, atoms, false );
1491 }
1492
1493 for ( const auto& atom : atoms )
1494 {
1495 foundAtomIDsInOut.insert( atom );
1496 }
1497
1498 // Return the source code
1499 return output;
1500 }
1501
1502 bool ShaderProgram::SaveToCache( const LoadData::ShaderCacheType cache, const LoadData& dataIn, const eastl::set<U64>& atomIDsIn )
1503 {
1504 bool ret = false;
1505
1506 LockGuard<Mutex> rw_lock( g_cacheLock );
1507 FileError err = FileError::FILE_EMPTY;
1508 switch ( cache )
1509 {
1511 {
1512 if ( !dataIn._sourceCodeGLSL.empty() )
1513 {
1514 {
1515 err = writeFile( TxtCacheLocation(),
1516 dataIn._shaderName.c_str(),
1517 dataIn._sourceCodeGLSL.c_str(),
1518 dataIn._sourceCodeGLSL.length(),
1520 }
1521
1522 if ( err != FileError::NONE )
1523 {
1524 Console::errorfn( LOCALE_STR( "ERROR_SHADER_SAVE_TEXT_FAILED" ), dataIn._shaderName.c_str() );
1525 }
1526 else
1527 {
1528 ret = true;
1529 }
1530 }
1531 } break;
1533 {
1534 if ( !dataIn._sourceCodeSpirV.empty() )
1535 {
1536 err = writeFile( SpvCacheLocation(),
1537 SpvTargetName( dataIn._shaderName ).string(),
1538 (bufferPtr)dataIn._sourceCodeSpirV.data(),
1539 dataIn._sourceCodeSpirV.size() * sizeof( SpvWord ),
1541
1542 if ( err != FileError::NONE )
1543 {
1544 Console::errorfn( LOCALE_STR( "ERROR_SHADER_SAVE_SPIRV_FAILED" ), dataIn._shaderName.c_str() );
1545 }
1546 else
1547 {
1548 ret = true;
1549 }
1550 }
1551 } break;
1553 {
1554 ret = Reflection::SaveReflectionData( ReflCacheLocation(), ReflTargetName( dataIn._shaderName ), dataIn._reflectionData, atomIDsIn );
1555 if ( !ret )
1556 {
1557 Console::errorfn( LOCALE_STR( "ERROR_SHADER_SAVE_REFL_FAILED" ), dataIn._shaderName.c_str() );
1558 }
1559 } break;
1560
1562 }
1563
1564 if ( !ret )
1565 {
1566 if ( !DeleteCacheLocked( cache, dataIn._shaderName ) )
1567 {
1568 NOP();
1569 }
1570 }
1571
1572 return ret;
1573 }
1574
1575 bool ShaderProgram::LoadFromCache( const LoadData::ShaderCacheType cache, LoadData& dataInOut, eastl::set<U64>& atomIDsOut )
1576 {
1577 if ( !s_useShaderCache )
1578 {
1579 return false;
1580 }
1581
1582 LockGuard<Mutex> rw_lock( g_cacheLock );
1583 if ( !ValidateCacheLocked( cache, dataInOut._sourceFile, dataInOut._shaderName ) )
1584 {
1585 if ( !DeleteCacheLocked( cache, dataInOut._shaderName ) )
1586 {
1587 NOP();
1588 }
1589
1590 return false;
1591 }
1592
1593 FileError err = FileError::FILE_EMPTY;
1594 switch ( cache )
1595 {
1597 {
1598 err = readFile( TxtCacheLocation(),
1599 dataInOut._shaderName.c_str(),
1601 dataInOut._sourceCodeGLSL );
1602 return err == FileError::NONE;
1603 }
1605 {
1606 std::ifstream tempData;
1607 {
1608 err = readFile( SpvCacheLocation(),
1609 SpvTargetName( dataInOut._shaderName ).string(),
1611 tempData );
1612 }
1613
1614 if ( err == FileError::NONE )
1615 {
1616 tempData.seekg(0, std::ios::end);
1617 dataInOut._sourceCodeSpirV.reserve( tempData.tellg() / sizeof( SpvWord ) );
1618 tempData.seekg(0);
1619
1620 while (!tempData.eof())
1621 {
1622 SpvWord inWord;
1623 tempData.read((char *)&inWord, sizeof(inWord));
1624 if (!tempData.eof())
1625 {
1626 dataInOut._sourceCodeSpirV.push_back(inWord);
1627 if (tempData.fail())
1628 {
1629 return false;
1630 }
1631 }
1632 }
1633
1634 return true;
1635 }
1636
1637 return false;
1638 }
1640 {
1641 return Reflection::LoadReflectionData( ReflCacheLocation(), ReflTargetName( dataInOut._shaderName ), dataInOut._reflectionData, atomIDsOut );
1642 }
1643
1645 }
1646
1647 return false;
1648 }
1649
1650 bool ShaderProgram::loadInternal( hashMap<U64, PerFileShaderData>& fileData, const bool overwrite )
1651 {
1652 // The context is thread_local so each call to this should be thread safe
1653 if ( overwrite )
1654 {
1656 }
1657 glswSetPath(( Paths::Shaders::GLSL::g_GLSLShaderLoc.string() + Paths::g_pathSeparator).c_str(), ".glsl" );
1658
1659 _usedAtomIDs.clear();
1660
1661 for ( const ShaderModuleDescriptor& shaderDescriptor : _descriptor._modules )
1662 {
1663 const U64 fileHash = _ID( shaderDescriptor._sourceFile.data() );
1664 fileData[fileHash]._modules.push_back( shaderDescriptor );
1665 ShaderModuleDescriptor& newDescriptor = fileData[fileHash]._modules.back();
1666 newDescriptor._defines.insert( end( newDescriptor._defines ), begin( _descriptor._globalDefines ), end( _descriptor._globalDefines ) );
1667 _usedAtomIDs.insert( _ID( shaderDescriptor._sourceFile.c_str() ) );
1668 }
1669
1670 U8 blockOffset = 0u;
1671
1672 Reflection::UniformsSet previousUniforms;
1673
1674 _uniformBlockBuffers.clear();
1675 _setUsage.fill( false );
1676
1677 for ( auto& [fileHash, loadDataPerFile] : fileData )
1678 {
1679 for ( const ShaderModuleDescriptor& data : loadDataPerFile._modules )
1680 {
1681 const ShaderType type = data._moduleType;
1682 assert( type != ShaderType::COUNT );
1683
1684 ShaderProgram::LoadData& stageData = loadDataPerFile._loadData[to_base( data._moduleType )];
1685 assert( stageData._type == ShaderType::COUNT );
1686
1687 stageData._type = data._moduleType;
1688 stageData._sourceFile = data._sourceFile.c_str();
1689 stageData._sourceName = data._sourceFile.substr( 0, data._sourceFile.find_first_of( "." ) ).c_str();
1690 stageData._sourceName.append( "." );
1691 stageData._sourceName.append( Names::shaderTypes[to_U8( type )] );
1692 if ( !data._variant.empty() )
1693 {
1694 stageData._sourceName.append( ("." + data._variant).c_str() );
1695 }
1696 stageData._definesHash = DefinesHash( data._defines );
1697 stageData._shaderName.append(stageData._sourceName );
1698 if ( stageData._definesHash != 0u )
1699 {
1700 stageData._shaderName.append( ("." + Util::to_string(stageData._definesHash)).c_str());
1701 }
1702 stageData._shaderName.append( ("." + shaderAtomExtensionName[to_U8(type)]).c_str() );
1703
1704 if ( !loadSourceCode( data._defines, overwrite, stageData, previousUniforms, blockOffset ) )
1705 {
1706 Console::errorfn(LOCALE_STR("ERROR_SHADER_LOAD_SOURCE_CODE_FAILED"), stageData._shaderName.c_str(), overwrite ? "TRUE" : "FALSE");
1707 return false;
1708 }
1709
1710 if ( !loadDataPerFile._programName.empty() )
1711 {
1712 loadDataPerFile._programName.append( "-" );
1713 }
1714 loadDataPerFile._programName.append( stageData._shaderName.c_str() );
1715 }
1716
1717 initUniformUploader( loadDataPerFile );
1718 initDrawDescriptorSetLayout( loadDataPerFile );
1719 }
1720
1721 return true;
1722 }
1723
1725 {
1726 const ShaderLoadData& programLoadData = loadData._loadData;
1727
1728 const auto SetVisibility = []( BindingsPerSet& binding, const Reflection::DataEntry& entry)
1729 {
1731 {
1733 }
1734
1735 binding._visibility |= entry._stageVisibility;
1736 };
1737
1738 for ( const LoadData& stageData : programLoadData )
1739 {
1740 const Reflection::Data& data = stageData._reflectionData;
1741 if ( stageData._type == ShaderType::FRAGMENT )
1742 {
1743 fragmentOutputs(data._fragmentOutputs);
1744 }
1745
1746 for ( const Reflection::ImageEntry& image : data._images )
1747 {
1748 _setUsage[image._bindingSet] = true;
1750 {
1751 continue;
1752 }
1753
1754 BindingsPerSet& binding = _perDrawDescriptorSetLayout[image._bindingSlot];
1755 SetVisibility( binding, image );
1756
1757 if ( image._combinedImageSampler )
1758 {
1761 }
1762 else
1763 {
1766 }
1767 }
1768
1769 for ( const Reflection::BufferEntry& buffer : data._buffers )
1770 {
1771 _setUsage[buffer._bindingSet] = true;
1772
1774 {
1775 continue;
1776 }
1777
1778 BindingsPerSet& binding = _perDrawDescriptorSetLayout[buffer._bindingSlot];
1779 SetVisibility( binding, buffer );
1780
1781 if ( buffer._uniformBuffer )
1782 {
1785 }
1786 else
1787 {
1790 }
1791 }
1792 }
1793 }
1794
1796 {
1797 const ShaderLoadData& programLoadData = loadData._loadData;
1798
1799 for ( const LoadData& stageData : programLoadData )
1800 {
1801 if ( stageData._type == ShaderType::COUNT )
1802 {
1803 continue;
1804 }
1805
1806 const Reflection::BufferEntry* uniformBlock = Reflection::FindUniformBlock( stageData._reflectionData );
1807
1808 if ( uniformBlock != nullptr )
1809 {
1810 bool found = false;
1812 {
1813 const Reflection::BufferEntry& uploaderBlock = block.uniformBlock();
1814 if ( uploaderBlock._bindingSet != stageData._reflectionData._uniformBlockBindingSet ||
1815 uploaderBlock._bindingSlot != stageData._reflectionData._uniformBlockBindingIndex )
1816 {
1817 continue;
1818 }
1819
1820 block.toggleStageVisibility( uniformBlock->_stageVisibility, true );
1821 found = true;
1822 break;
1823 }
1824
1825 if ( !found )
1826 {
1827 _uniformBlockBuffers.emplace_back( _context, loadData._programName.c_str(), *uniformBlock, uniformBlock->_stageVisibility );
1828 }
1829 }
1830 }
1831 }
1832
1834 {
1836
1837 bool ret = false;
1838 for ( auto& blockBuffer : _uniformBlockBuffers )
1839 {
1840 blockBuffer.uploadUniformData( data );
1841
1842 if ( blockBuffer.commit( set, memCmdInOut ) )
1843 {
1844 s_usedShaderPrograms.emplace_back(this );
1845 ret = true;
1846 }
1847 }
1848
1849 return ret;
1850 }
1851
1852 bool ShaderProgram::loadSourceCode( const ModuleDefines& defines, bool reloadExisting, LoadData& loadDataInOut, Reflection::UniformsSet& previousUniformsInOut, U8& blockIndexInOut )
1853 {
1854 // Clear existing code
1855 loadDataInOut._sourceCodeGLSL.resize( 0 );
1856 loadDataInOut._sourceCodeSpirV.resize( 0 );
1857
1858 eastl::set<U64> atomIDs;
1859
1860 bool needGLSL = !s_targetVulkan;
1861 if ( reloadExisting )
1862 {
1863 // Hot reloading will always reparse GLSL source files so the best way to achieve that is to delete cache files
1864 needGLSL = true;
1865 if ( !DeleteCache( LoadData::ShaderCacheType::COUNT, loadDataInOut._shaderName ) )
1866 {
1867 // We should have cached the existing shader, so a failure here is NOT expected
1869 }
1870 }
1871
1872 // Load SPIRV code from cache (if needed)
1873 if ( reloadExisting || !useShaderCache() || !LoadFromCache(LoadData::ShaderCacheType::SPIRV, loadDataInOut, atomIDs) )
1874 {
1875 needGLSL = true;
1876 }
1877
1878 // We either have SPIRV code or we explicitly require GLSL code (e.g. for OpenGL)
1879 if ( needGLSL )
1880 {
1881 // Try and load GLSL code from cache (if needed)
1882 if ( reloadExisting || !useShaderCache() || !LoadFromCache( LoadData::ShaderCacheType::GLSL, loadDataInOut, atomIDs ) )
1883 {
1884 // That failed, so re-parse the code
1885 loadAndParseGLSL( defines, loadDataInOut, previousUniformsInOut, blockIndexInOut, atomIDs );
1886 if ( loadDataInOut._sourceCodeGLSL.empty() )
1887 {
1888 // That failed so we have no choice but to bail
1889 return false;
1890 }
1891 else
1892 {
1893 // That succeeded so save the new cache file for future use
1894 SaveToCache( LoadData::ShaderCacheType::GLSL, loadDataInOut, atomIDs );
1895 }
1896 }
1897
1898 // We MUST have GLSL code at this point so now we have too options.
1899 // We already have SPIRV code and can proceed or we failed loading SPIRV from cache so we must convert GLSL -> SPIRV
1900 if ( loadDataInOut._sourceCodeSpirV.empty() )
1901 {
1902 // We are in situation B: we need SPIRV code, so convert our GLSL code over
1903 DIVIDE_ASSERT( !loadDataInOut._sourceCodeGLSL.empty() );
1904 if ( !SpirvHelper::GLSLtoSPV( loadDataInOut._type, loadDataInOut._sourceCodeGLSL.c_str(), loadDataInOut._sourceCodeSpirV, s_targetVulkan ) )
1905 {
1906 Console::errorfn( LOCALE_STR( "ERROR_SHADER_CONVERSION_SPIRV_FAILED" ), loadDataInOut._shaderName.c_str() );
1907 // We may fail here for WHATEVER reason so bail
1908 if ( !DeleteCache( LoadData::ShaderCacheType::GLSL, loadDataInOut._shaderName ) )
1909 {
1910 NOP();
1911 }
1912 return false;
1913 }
1914 else
1915 {
1916 // We managed to generate good SPIRV so save it to the cache for future use
1917 SaveToCache( LoadData::ShaderCacheType::SPIRV, loadDataInOut, atomIDs );
1918 }
1919 }
1920 }
1921
1922 // Whatever the process to get here was, we need SPIRV to proceed
1923 DIVIDE_ASSERT( !loadDataInOut._sourceCodeSpirV.empty() );
1924 // Time to see if we have any cached reflection data, and, if not, build it
1925 if ( reloadExisting || !useShaderCache() || !LoadFromCache( LoadData::ShaderCacheType::REFLECTION, loadDataInOut, atomIDs ) )
1926 {
1927 // Well, we failed. Time to build our reflection data again
1928 if ( !SpirvHelper::BuildReflectionData( loadDataInOut._type, loadDataInOut._sourceCodeSpirV, s_targetVulkan, loadDataInOut._reflectionData ) )
1929 {
1930 Console::errorfn( LOCALE_STR( "ERROR_SHADER_REFLECTION_SPIRV_FAILED" ), loadDataInOut._shaderName.c_str() );
1931 return false;
1932 }
1933 // Save reflection data to cache for future use
1934 SaveToCache( LoadData::ShaderCacheType::REFLECTION, loadDataInOut, atomIDs );
1935 }
1937 {
1938 blockIndexInOut = loadDataInOut._reflectionData._uniformBlockBindingIndex - s_uboStartOffset;
1939 }
1940
1941 if ( !loadDataInOut._sourceCodeGLSL.empty() || !loadDataInOut._sourceCodeSpirV.empty() )
1942 {
1943 _usedAtomIDs.insert( begin( atomIDs ), end( atomIDs ) );
1944 return true;
1945 }
1946
1947 return false;
1948 }
1949
1951 LoadData& loadDataInOut,
1952 Reflection::UniformsSet& previousUniformsInOut,
1953 U8& blockIndexInOut,
1954 eastl::set<U64>& atomIDsInOut )
1955 {
1956 auto& glslCodeOut = loadDataInOut._sourceCodeGLSL;
1957 glslCodeOut.resize( 0 );
1958
1959 // Use GLSW to read the appropriate part of the effect file
1960 // based on the specified stage and properties
1961 const char* sourceCodeStr = glswGetShader( loadDataInOut._sourceName.c_str() );
1962 if ( sourceCodeStr != nullptr )
1963 {
1964 glslCodeOut.append( sourceCodeStr );
1965 }
1966
1967 // GLSW may fail for various reasons (not a valid effect stage, invalid name, etc)
1968 if ( !glslCodeOut.empty() )
1969 {
1970
1971 string header;
1972 for ( const auto& [defineString, appendPrefix] : defines )
1973 {
1974 // Placeholders are ignored
1975 if ( defineString == "DEFINE_PLACEHOLDER" )
1976 {
1977 continue;
1978 }
1979
1980 // We manually add define dressing if needed
1981 header.append( (appendPrefix ? "#define " : "") + defineString + '\n' );
1982 }
1983
1984 for ( const auto& [defineString, appendPrefix] : defines )
1985 {
1986 // Placeholders are ignored
1987 if ( !appendPrefix || defineString == "DEFINE_PLACEHOLDER" )
1988 {
1989 continue;
1990 }
1991
1992 // We also add a comment so that we can check what defines we have set because
1993 // the shader preprocessor strips defines before sending the code to the GPU
1994 header.append( "/*Engine define: [ " + defineString + " ]*/\n" );
1995 }
1996 // And replace in place with our program's headers created earlier
1997 Util::ReplaceStringInPlace( glslCodeOut, "_CUSTOM_DEFINES__", header );
1998
1999 PreprocessIncludes( resourceName(), glslCodeOut, 0, atomIDsInOut, true );
2000
2001 if (!Preprocessor::PreProcessMacros( loadDataInOut._shaderName, glslCodeOut ))
2002 {
2003 NOP();
2004 }
2005
2006 Reflection::PreProcessUniforms( glslCodeOut, loadDataInOut._uniforms );
2007 }
2008
2009 if ( !loadDataInOut._uniforms.empty() )
2010 {
2011 if ( !previousUniformsInOut.empty() && previousUniformsInOut != loadDataInOut._uniforms )
2012 {
2013 ++blockIndexInOut;
2014
2015 DIVIDE_ASSERT(blockIndexInOut < 2, "ShaderProgram::load: We only support 2 uniform blocks per shader program at the moment. Batch uniforms from different stages together to reduce usage!");
2016 }
2017
2019 loadDataInOut._reflectionData._uniformBlockBindingIndex = s_uboStartOffset + blockIndexInOut;
2020
2021 string& uniformBlock = loadDataInOut._uniformBlock;
2022 uniformBlock = "layout( ";
2023 if ( _context.renderAPI() == RenderAPI::Vulkan )
2024 {
2025 uniformBlock.append( Util::StringFormat( "set = {}, ", to_base( DescriptorSetUsage::PER_DRAW ) ) );
2026 }
2027 uniformBlock.append( "binding = {}, std140 ) uniform {} {{" );
2028
2029 for ( const Reflection::UniformDeclaration& uniform : loadDataInOut._uniforms )
2030 {
2031 uniformBlock.append( Util::StringFormat( "\n {} {};", uniform._type.c_str(), uniform._name.c_str() ) );
2032 }
2033 uniformBlock.append( "\n}} ");
2034 uniformBlock.append(Util::StringFormat("{};", UNIFORM_BLOCK_NAME));
2035
2036 for ( const Reflection::UniformDeclaration& uniform : loadDataInOut._uniforms )
2037 {
2038 const string rawName = uniform._name.substr( 0, uniform._name.find_first_of( "[" ) ).c_str();
2039 uniformBlock.append( Util::StringFormat( "\n#define {} {}.{}", rawName.c_str(), UNIFORM_BLOCK_NAME, rawName.c_str() ) );
2040 }
2041
2042 const U8 layoutIndex = _context.renderAPI() == RenderAPI::Vulkan
2046
2047 Util::StringFormat( uniformBlock, uniformBlock.c_str(), layoutIndex, Util::StringFormat( "dvd_UniformBlock_{}", blockIndexInOut ) );
2048
2049 previousUniformsInOut = loadDataInOut._uniforms;
2050 }
2051
2052 string pushConstantCodeBlock{};
2053 if ( _context.renderAPI() == RenderAPI::Vulkan )
2054 {
2055 pushConstantCodeBlock =
2056 "layout( push_constant ) uniform constants\n"
2057 "{\n"
2058 " mat4 data0;\n"
2059 " mat4 data1;\n"
2060 "} PushConstants;\n"
2061 "#define PushData0 PushConstants.data0\n"
2062 "#define PushData1 PushConstants.data1";
2063 }
2064 else
2065 {
2066 pushConstantCodeBlock =
2067 "layout(location = 18) uniform mat4 PushConstantData[2];\n"
2068 "#define PushData0 PushConstantData[0]\n"
2069 "#define PushData1 PushConstantData[1]";
2070 }
2071
2072 Util::ReplaceStringInPlace( loadDataInOut._sourceCodeGLSL, "//_PROGRAM_NAME_\\", Util::StringFormat("/*[ {} ]*/", loadDataInOut._shaderName.c_str()));
2073 Util::ReplaceStringInPlace( loadDataInOut._sourceCodeGLSL, "//_CUSTOM_UNIFORMS_\\", loadDataInOut._uniformBlock );
2074 Util::ReplaceStringInPlace( loadDataInOut._sourceCodeGLSL, "//_PUSH_CONSTANTS_DEFINE_\\", pushConstantCodeBlock );
2075 }
2076
2077 void ShaderProgram::EraseAtom( const U64 atomHash )
2078 {
2079 // Clear the atom from the cache
2080 LockGuard<Mutex> w_lock( s_atomLock );
2081 EraseAtomLocked(atomHash);
2082 }
2083
2085 {
2086 eastl::fixed_vector<U64, 128, true> queuedDeletion;
2087
2088 s_atoms.erase( atomHash );
2089
2090 for ( auto it = s_atomIncludes.cbegin(); it != s_atomIncludes.cend(); )
2091 {
2092 if ( it->first == atomHash)
2093 {
2094 it = s_atomIncludes.erase( it );
2095 continue;
2096 }
2097
2098 if ( it->second.find( atomHash ) != it->second.cend() )
2099 {
2100 // Remove all atoms that included our target atom as well
2101 queuedDeletion.push_back( it->first );
2102 }
2103 ++it;
2104 }
2105
2106 for (const U64 atom : queuedDeletion )
2107 {
2108 EraseAtomLocked(atom);
2109 }
2110 }
2111
2112 void ShaderProgram::OnAtomChange( const std::string_view atomName, const FileUpdateEvent evt )
2113 {
2115
2116 // Do nothing if the specified file is "deleted". We do not want to break running programs
2117 // ADD and MODIFY events should get processed as usual
2118 if ( evt == FileUpdateEvent::DELETE )
2119 {
2120 return;
2121 }
2122
2123 const U64 atomNameHash = _ID( string{ atomName }.c_str() );
2124 EraseAtomLocked(atomNameHash);
2125
2126 //Get list of shader programs that use the atom and rebuild all shaders in list;
2128 for ( ShaderProgram* program : s_shaderPrograms )
2129 {
2130 DIVIDE_ASSERT( program != nullptr );
2131
2132 for ( const U64 atomID : program->_usedAtomIDs )
2133 {
2134 if ( atomID == atomNameHash )
2135 {
2136 s_recompileQueue.push( ShaderQueueEntry{ ._program = program } );
2137 break;
2138 }
2139 }
2140 }
2141 }
2142
2143};
#define LOCALE_STR(X)
Definition: Localization.h:91
#define DIVIDE_ASSERT(...)
#define NO_DESTROY
#define DIVIDE_UNEXPECTED_CALL()
#define NOP()
#define FORCE_INLINE
#define PROFILE_SCOPE_AUTO(CATEGORY)
Definition: Profiler.h:87
virtual bool postLoad()
Definition: Resource.cpp:60
virtual bool load(PlatformContext &context)
Loading and unloading interface.
Definition: Resource.cpp:55
static void deallocateWatcher(I64 fileWatcherGUID)
static FileWatcher & allocateWatcher()
Rough around the edges Adapter pattern abstracting the actual rendering API and access to the GPU.
Definition: GFXDevice.h:215
static U64 FrameCount() noexcept
Definition: GFXDevice.h:340
PerformanceMetrics & getPerformanceMetrics() noexcept
Definition: GFXDevice.inl:194
Utility class that adds basic GUID management to objects.
Definition: GUIDWrapper.h:44
FORCE_INLINE I64 getGUID() const noexcept
Definition: GUIDWrapper.h:51
GFXDevice & context() const noexcept
static constexpr F32 MAX_SHININESS
Definition: Material.h:129
PlatformContext & context() noexcept
Configuration & config() noexcept
ResourceState getState() const noexcept
Definition: Resource.cpp:17
virtual ~ShaderModule() override
void deregisterParent(ShaderProgram *parent)
static constexpr U32 MAX_FRAME_LIFETIME
Definition: ShaderProgram.h:78
static void Idle(bool fast)
eastl::fixed_vector< ShaderProgram *, 4, true > _parents
static void DestroyStaticData()
static ShaderModule * GetShader(const std::string_view name)
Returns a reference to an already loaded shader, null otherwise.
static void InitStaticData()
static ShaderModule * GetShaderLocked(const std::string_view name)
hashMap< U64, ShaderModule_uptr > ShaderMap
Definition: ShaderProgram.h:75
ShaderModule(GFXDevice &context, const std::string_view name, U32 generation)
void registerParent(ShaderProgram *parent)
static std::atomic_bool s_modulesRemoved
Shader cache.
static SharedMutex s_shaderNameLock
static ShaderMap s_shaderNameMap
static const string & ShaderFileReadLocked(const ResourcePath &filePath, std::string_view atomName, bool recurse, eastl::set< U64 > &foundAtomIDsInOut, bool &wasParsed)
Open the file found at 'filePath' matching 'atomName' and return it's source code.
static void OnEndFrame(GFXDevice &gfx)
std::array< BindingsPerSetArray, to_base(DescriptorSetUsage::COUNT)> BindingSetData
virtual ShaderResult validatePreBind(bool rebind=true)
bool loadSourceCode(const ModuleDefines &defines, bool reloadExisting, LoadData &loadDataInOut, Reflection::UniformsSet &previousUniformsInOut, U8 &blockIndexInOut)
static void EraseAtom(const U64 atomHash)
void initDrawDescriptorSetLayout(const PerFileShaderData &loadData)
static U64 shaderAtomExtensionHash[to_base(ShaderType::COUNT)+1]
void loadAndParseGLSL(const ModuleDefines &defines, LoadData &loadDataInOut, Reflection::UniformsSet &previousUniformsInOut, U8 &blockIndexInOut, eastl::set< U64 > &atomIDsInOut)
static ResourcePath shaderAtomLocationPrefix[to_base(ShaderType::COUNT)+1]
static std::pair< DescriptorSetUsage, U8 > GetDescriptorSlotForGLBinding(U8 binding, DescriptorSetBindingType type) noexcept
static SharedMutex s_programLock
static Str< 8 > shaderAtomExtensionName[to_base(ShaderType::COUNT)+1]
static void OnBeginFrame(GFXDevice &gfx)
static void DestroyStaticData()
static void RebuildAllShaders()
static const string & ShaderFileRead(const ResourcePath &filePath, std::string_view atomName, bool recurse, eastl::set< U64 > &foundAtomIDsInOut, bool &wasParsed)
static void RegisterShaderProgram(ShaderProgram *shaderProgram)
Add a shaderProgram to the program cache.
static U8 GetGLBindingForDescriptorSlot(DescriptorSetUsage usage, U8 slot) noexcept
static std::atomic_int s_shaderCount
static bool UnregisterShaderProgram(ShaderProgram *shaderProgram)
Remove a shaderProgram from the program cache.
virtual bool loadInternal(hashMap< U64, PerFileShaderData > &fileData, bool overwrite)
std::array< BindingsPerSet, MAX_BINDINGS_PER_DESCRIPTOR_SET > BindingsPerSetArray
bool uploadUniformData(const UniformData &data, DescriptorSet &set, GFX::MemoryBarrierCommand &memCmdInOut)
bool load(PlatformContext &context) override
Loading and unloading interface.
static ShaderQueue s_recompileQueue
static Mutex s_atomLock
Shaders loaded from files are kept as atoms.
eastl::stack< ShaderQueueEntry, vector< ShaderQueueEntry > > ShaderQueue
static constexpr const char * UNIFORM_BLOCK_NAME
ShaderProgram(PlatformContext &context, const ResourceDescriptor< ShaderProgram > &descriptor)
static void PreprocessIncludes(std::string_view name, string &sourceInOut, I32 level, eastl::set< U64 > &foundAtomIDsInOut, bool lock)
static ShaderProgramMap s_shaderPrograms
Shader program cache.
static AtomMap s_atoms
static LastRequestedShader s_lastRequestedShaderProgram
std::array< LoadData, to_base(ShaderType::COUNT)> ShaderLoadData
static eastl::fixed_vector< ShaderProgram *, U16_MAX, false > s_usedShaderPrograms
static ShaderQueue s_recompileFailedQueue
static void RegisterSetLayoutBinding(DescriptorSetUsage usage, U8 slot, DescriptorSetBindingType type, ShaderStageVisibility visibility)
static vector< ResourcePath > GetAllAtomLocations()
static AtomInclusionMap s_atomIncludes
static BindingSetData & GetBindingSetData() noexcept
const ShaderProgramDescriptor _descriptor
static void OnAtomChange(std::string_view atomName, FileUpdateEvent evt)
static void Idle(PlatformContext &platformContext, bool fast)
static ErrorCode OnStartup(PlatformContext &context)
static void EraseAtomLocked(const U64 atomHash)
static bool LoadFromCache(LoadData::ShaderCacheType cache, LoadData &dataInOut, eastl::set< U64 > &atomIDsOut)
static U32 GetBindingCount(DescriptorSetUsage usage, DescriptorSetBindingType type)
hashMap< U64, string > AtomMap
static BindingSetData s_bindingsPerSet
static void InitStaticData()
hashMap< U64, eastl::set< U64 > > AtomInclusionMap
static Mutex g_cacheLock
eastl::fixed_vector< ShaderProgram *, U16_MAX, true > ShaderProgramMap
void initUniformUploader(const PerFileShaderData &loadData)
static I64 s_shaderFileWatcherID
bool postLoad() override
static bool RecompileShaderProgram(const std::string_view name)
Queue a shaderProgram recompile request.
static ErrorCode SubmitSetLayouts(GFXDevice &gfx)
vector< UniformBlockUploader > _uniformBlockBuffers
static void OnThreadCreated(const GFXDevice &gfx, const std::thread::id &threadID, bool isMainRenderThread)
static bool SaveToCache(LoadData::ShaderCacheType cache, const LoadData &dataIn, const eastl::set< U64 > &atomIDsIn)
eastl::set< U64 > _usedAtomIDs
bool unload() override
static constexpr U8 WORLD_AO_LAYER_INDEX
Definition: ShadowMap.h:91
size_t totalBufferSize() const noexcept
void addIgnoredEndCharacter(char character)
void addIgnoredExtension(const char *extension)
int glswInit()
Definition: glsw.c:98
int glswSetPath(const char *pathPrefix, const char *pathSuffix)
Definition: glsw.c:133
const char * glswGetShader(const char *pEffectKey)
Definition: glsw.c:169
int glswAddDirectiveToken(const char *token, const char *directive)
Definition: glsw.c:403
glswContext * glswGetCurrentContext()
Definition: glsw.c:83
void glswClearCurrentContext()
Definition: glsw.c:93
int glswShutdown()
Definition: glsw.c:112
constexpr bool IS_SHIPPING_BUILD
Definition: config.h:60
constexpr bool IS_DEBUG_BUILD
Definition: config.h:55
constexpr bool IS_PROFILE_BUILD
Definition: config.h:56
constexpr U8 CLUSTERS_X
Controls compute shader dispatch. e.g. Dispatch Z count = CLUSTERS_Z / CLUSTERS_Z_THREADS.
Definition: config.h:170
constexpr U8 MAX_SHADOW_CASTING_POINT_LIGHTS
Definition: config.h:152
constexpr U8 MAX_SHADOW_CASTING_SPOT_LIGHTS
Definition: config.h:153
constexpr U16 MAX_ACTIVE_LIGHTS_PER_FRAME
Maximum number of lights we process per frame. We need this upper bound for pre-allocating arrays and...
Definition: config.h:162
constexpr U8 MAX_SHADOW_CASTING_LIGHTS
Maximum number of shadow casting lights processed per frame.
Definition: config.h:156
constexpr U8 MAX_CSM_SPLITS_PER_LIGHT
Used for CSM or PSSM to determine the maximum number of frustum splits.
Definition: config.h:159
constexpr U8 MAX_SHADOW_CASTING_DIRECTIONAL_LIGHTS
How many lights (in order as passed to the shader for the node) should cast shadows.
Definition: config.h:151
constexpr U16 TARGET_FRAME_RATE
Application desired framerate for physics and input simulations.
Definition: config.h:97
constexpr U16 MAX_CONCURRENT_MATERIALS
Estimated maximum number of materials used in a single frame by all passes combined.
Definition: config.h:123
constexpr U8 MAX_CULL_DISTANCES
Definition: config.h:131
constexpr U16 MAX_BONE_COUNT_PER_NODE
Maximum number of bones available per node.
Definition: config.h:117
constexpr float ALPHA_DISCARD_THRESHOLD
Definition: config.h:91
constexpr U16 MAX_VISIBLE_NODES
Estimated maximum number of visible objects per render pass (this includes debug primitives)
Definition: config.h:120
constexpr U8 MAX_CLIP_DISTANCES
Definition: config.h:127
static constexpr const char * shaderTypes[]
static constexpr const char * descriptorSetUsage[]
static void Error(void *userData, const char *format, va_list args)
static char * Input(char *buffer, const int size, void *userData) noexcept
static char * Scratch(const std::string_view fileName)
FORCE_INLINE void Output(const int ch, void *userData)
FORCE_INLINE void AddDependency(const char *file, void *userData)
NO_DESTROY static thread_local WorkData g_workData
static void OnThreadCreated()
constexpr U8 g_maxTagCount
NO_DESTROY static thread_local fppTag g_tags[g_maxTagCount]
static bool PreProcessMacros(const std::string_view fileName, string &sourceInOut)
NO_DESTROY static thread_local fppTag * g_tagHead
constexpr Optick::Category::Type Streaming
Definition: Profiler.h:65
constexpr Optick::Category::Type Graphics
Definition: Profiler.h:60
static constexpr U8 INVALID_BINDING_INDEX
void PreProcessUniforms(string &sourceInOut, UniformsSet &foundUniforms)
eastl::set< UniformDeclaration, UniformCompare > UniformsSet
bool SaveReflectionData(const ResourcePath &path, const ResourcePath &file, const Data &reflectionDataIn, const eastl::set< U64 > &atomIDsIn)
const Reflection::BufferEntry * FindUniformBlock(const Reflection::Data &data)
bool LoadReflectionData(const ResourcePath &path, const ResourcePath &file, Data &reflectionDataOut, eastl::set< U64 > &atomIDsOut)
DescriptorSetUsage StringToDescriptorSetUsage(const string &name)
const char * DescriptorSetUsageToString(DescriptorSetUsage setUsage) noexcept
const char * ShadingModeToString(ShadingMode shadingMode) noexcept
Definition: Material.cpp:82
const char * MaterialDebugFlagToString(const MaterialDebugFlag unitType) noexcept
Definition: Material.cpp:28
const char * TextureOperationToString(TextureOperation textureOp) noexcept
Definition: Material.cpp:100
bool GetLine(istringstream &input, T_str &line, char delimiter='\n')
Str StringFormat(const char *fmt, Args &&...args)
string to_string(GET_PASS_TYPE< T > value)
bool BeginsWith(std::string_view input, std::string_view compare, bool ignoreWhitespace)
T_str & Trim(T_str &s)
bool ReplaceStringInPlace(T_str &subject, std::span< const std::string_view > search, std::string_view replace, bool recursive=false)
T_str GetTrailingCharacters(const T_str &input, size_t count)
bool DeleteCache(const ShaderProgram::LoadData::ShaderCacheType type, const Str< 256 > &fileName)
ResourcePath ReflTargetName(const Str< 256 > &fileName)
ResourcePath SpvTargetName(const Str< 256 > &fileName)
bool ValidateCacheLocked(const ShaderProgram::LoadData::ShaderCacheType type, const Str< 256 > &sourceFileName, const Str< 256 > &fileName)
bool DeleteCacheLocked(const ShaderProgram::LoadData::ShaderCacheType type, const Str< 256 > &fileName)
U64 s_newestShaderAtomWriteTime
Used to detect modified shader atoms to validate/invalidate shader cache.
Handle console commands that start with a forward slash.
Definition: AIProcessor.cpp:7
std::lock_guard< mutex > LockGuard
Definition: SharedMutex.h:55
constexpr U32 nextPOW2(U32 n) noexcept
Definition: MathHelper.inl:207
constexpr U32 to_U32(const T value)
FileError writeFile(const ResourcePath &filePath, const std::string_view fileName, const char *content, const size_t length, const FileType fileType)
std::mutex Mutex
Definition: SharedMutex.h:40
FileError readFile(const ResourcePath &filePath, std::string_view fileName, FileType fileType, std::ifstream &sreamOut)
static const vec3< F32 > WORLD_X_AXIS
Definition: MathVectors.h:1439
constexpr U16 to_U16(const T value)
int32_t I32
MaterialDebugFlag
Definition: MaterialEnums.h:37
uint8_t U8
constexpr U8 MAX_BINDINGS_PER_DESCRIPTOR_SET
@ RES_LOADED
The resource is available for usage.
static const vec3< F32 > WORLD_Z_AXIS
Definition: MathVectors.h:1441
static bool InitGLSW(const RenderAPI renderingAPI, const Configuration &config)
vector< ModuleDefine > ModuleDefines
U32 SpvWord
Definition: ShaderProgram.h:60
std::shared_mutex SharedMutex
Definition: SharedMutex.h:43
size_t DefinesHash(const ModuleDefines &defines) noexcept
TextureOperation
How should each texture be added.
constexpr I8 s_maxHeaderRecursionLevel
FileError createDirectory(const ResourcePath &path)
eastl::vector< Type > vector
Definition: Vector.h:42
hashAlg::unordered_map< K, V, HashFun, Predicate > hashMap
Definition: HashMap.h:55
DescriptorSetBindingType
std::shared_lock< mutex > SharedLock
Definition: SharedMutex.h:49
bool getAllFilesInDirectory(const ResourcePath &filePath, FileList &listInOut, const char *extensionNoDot)
Project & parent
Definition: DefaultScene.h:41
uint16_t U16
constexpr U64 _ID(const char *const str, const U64 value=val_64_const) noexcept
constexpr U16 GLOBAL_PROBE_COUNT
std::basic_istringstream< char, std::char_traits< char >, dvd_allocator< char > > istringstream
Definition: STLString.h:47
@ Vulkan
not supported yet
constexpr U8 U8_MAX
void efficient_clear(eastl::fixed_vector< T, nodeCount, bEnableOverflow, OverflowAllocator > &fixed_vector)
Definition: Vector.h:52
constexpr U8 to_U8(const T value)
::value constexpr T CLAMPED(T n, T min, T max) noexcept
Definition: MathHelper.inl:126
ShaderStageVisibility
constexpr size_t to_size(const T value)
ShaderType
Available shader stages.
std::basic_string< char, std::char_traits< char >, dvd_allocator< char > > string
Definition: STLString.h:41
FileError deleteFile(const ResourcePath &filePath, const std::string_view fileName)
constexpr U8 GLOBAL_WATER_BODIES_COUNT
DescriptorSetUsage
static NO_DESTROY UpdateListener g_sFileWatcherListener([](const std::string_view atomName, const FileUpdateEvent evt) { ShaderProgram::OnAtomChange(atomName, evt);})
bool dvd_erase_if(eastl::vector< T, A > &vec, Predicate &&pred)
Definition: Vector.h:109
int64_t I64
uint32_t U32
Project const SceneEntry & entry
Definition: DefaultScene.h:41
void * bufferPtr
uint64_t U64
FileError fileLastWriteTime(const ResourcePath &filePathAndName, U64 &timeOutSec)
static const vec3< F32 > WORLD_Y_AXIS
Definition: MathVectors.h:1440
constexpr auto to_base(const Type value) -> Type
struct Divide::Configuration::Debug::Cache cache
struct Divide::Configuration::Rendering rendering
struct Divide::Configuration::Debug debug
static NO_INLINE void d_printfn(const char *format, T &&... args)
static NO_INLINE void errorfn(const char *format, T &&... args)
static NO_INLINE void warnfn(const char *format, T &&... args)
static constexpr RTColourAttachmentSlot ALBEDO
Definition: GFXDevice.h:228
static constexpr RTColourAttachmentSlot NORMALS
Definition: GFXDevice.h:230
static constexpr RTColourAttachmentSlot MODULATE
Definition: GFXDevice.h:231
static constexpr RTColourAttachmentSlot VELOCITY
Definition: GFXDevice.h:229
static constexpr RTColourAttachmentSlot ACCUMULATION
Definition: GFXDevice.h:232
static constexpr RTColourAttachmentSlot REVEALAGE
Definition: GFXDevice.h:233
ShaderProgram::ShaderLoadData _loadData
size_t _uniformBufferVRAMUsage
Total VRAM used for shader uniform storage across all used shader programs.
std::array< char, 16<< 10 > _scratch
vector< ShaderModuleDescriptor > _modules
bool _uniformBuffer
U16 _stageVisibility
U8 _bindingSlot
U8 _bindingSet
vector< BufferEntry > _buffers
vector< ImageEntry > _images
std::array< bool, to_base(RTColourAttachmentSlot::COUNT)> _fragmentOutputs
bool _combinedImageSampler
StringReturnType< N > string() const noexcept
Definition: ResourcePath.h:64
bool empty() const noexcept
BaseType< ShaderStageVisibility > _visibility
DescriptorSetBindingType _type
Reflection::UniformsSet _uniforms
std::vector< SpvWord > _sourceCodeSpirV
ShaderProgram * _program
static void Init()
Definition: GLSLToSPIRV.cpp:35
static bool BuildReflectionData(Divide::ShaderType shader_type, const std::vector< unsigned int > &spirv, bool targetVulkan, Divide::Reflection::Data &reflectionDataInOut)
static bool GLSLtoSPV(Divide::ShaderType shader_type, const char *pshader, std::vector< unsigned int > &spirv, const bool targetVulkan)
static void Finalize()
Definition: GLSLToSPIRV.cpp:43