I followed learnopengl.com IBL tutorial but implemented in on DirectX11, so every shader was converted from GLSL to HLSL. As a result, I have artifacts with some HDRI images and also as I use bloom they seem to be too bright. For cube textures I use R16G16B16A16_FLOAT format and R16G16_FLOAT for BRDF texture.
Here are my IBL shaders:
RectToCube:
#pragma pack_matrix( row_major ) cbuffer ObjectBuffer : register(b0) { matrix modelMat; matrix viewMat; matrix projMat; matrix normalMat; matrix viewProjMatInv; }; struct VertexInput { float3 position : POSITION; float2 uv : TEXCOORD0; float3 normal : NORMAL; int4 boneIds : BONEIDS; float4 boneWeights : BONEWEIGHTS; }; struct PixelInput { float4 position : SV_POSITION; float3 localPos : TEXCOORD0; }; SamplerState textureSampler { Filter = MIN_MAG_MIP_LINEAR; AddressU = Clamp; AddressV = Clamp; }; Texture2D<float4> HDRMap : register(t0); PixelInput VSMain(VertexInput input) { PixelInput output; output.localPos = input.position.xyz; float4x4 newView = viewMat; newView[3][0] = 0.0; newView[3][1] = 0.0; newView[3][2] = 0.0; output.position.w = 1.f; output.position = mul(float4(input.position, 1.f), newView); output.position = mul(output.position, projMat); output.position = output.position.xyzw; output.position.z = output.position.w * 0.9999; return output; } static const float2 invAtan = float2(0.1591, 0.3183); float2 SampleSphericalMap(float3 v) { float2 uv = float2(atan2(v.z, v.x), asin(v.y)); uv *= invAtan; uv += 0.5; return uv; } float4 PSMain(PixelInput input) : SV_TARGET { float2 uv = SampleSphericalMap(normalize(input.localPos)); // make sure to normalize localPos float3 color = HDRMap.Sample(textureSampler, uv).rgb; return float4(color, 1.0); } Prefilter:
#pragma pack_matrix( row_major ) cbuffer ObjectBuffer : register(b0) { matrix worldMatrix; matrix viewMatrix; matrix projectionMatrix; float4 customData; }; struct VertexInputType { float3 position : POSITION; float2 uv : TEXCOORD0; float3 normal : NORMAL; int4 boneIds : BONEIDS; float4 boneWeights : BONEWEIGHTS; }; struct PixelInputType { float4 position : SV_POSITION; float3 localPos : TEXCOORD0; }; TextureCube<float4> shaderTexture : register(t0); SamplerState textureSampler { Filter = MIN_MAG_MIP_LINEAR; AddressU = Clamp; AddressV = Clamp; }; PixelInputType VSMain(VertexInputType input) { PixelInputType output; output.localPos = input.position.xyz; float4x4 newView = viewMatrix; newView[3][0] = 0.0; newView[3][1] = 0.0; newView[3][2] = 0.0; output.position.w = 1.0f; output.position = mul(float4(input.position, 1.f), newView); output.position = mul(output.position, projectionMatrix); output.position = output.position.xyzw; output.position.z = output.position.w * 0.9999; return output; } static const float PI = 3.14159265359; float DistributionGGX(float3 N, float3 H, float roughness) { float a = roughness * roughness; float a2 = a * a; float NdotH = max(dot(N, H), 0.0); float NdotH2 = NdotH * NdotH; float nom = a2; float denom = (NdotH2 * (a2 - 1.0) + 1.0); denom = PI * denom * denom; return nom / denom; } float RadicalInverse_VdC(uint bits) { bits = (bits << 16u) | (bits >> 16u); bits = ((bits & 0x55555555u) << 1u) | ((bits & 0xAAAAAAAAu) >> 1u); bits = ((bits & 0x33333333u) << 2u) | ((bits & 0xCCCCCCCCu) >> 2u); bits = ((bits & 0x0F0F0F0Fu) << 4u) | ((bits & 0xF0F0F0F0u) >> 4u); bits = ((bits & 0x00FF00FFu) << 8u) | ((bits & 0xFF00FF00u) >> 8u); return float(bits) * 2.3283064365386963e-10; // / 0x100000000 } // ---------------------------------------------------------------------------- float2 Hammersley(uint i, uint N) { return float2(float(i) / float(N), RadicalInverse_VdC(i)); } float3 ImportanceSampleGGX(float2 Xi, float3 N, float roughness) { float a = roughness * roughness; float phi = 2.0 * PI * Xi.x; float cosTheta = sqrt((1.0 - Xi.y) / (1.0 + (a * a - 1.0) * Xi.y)); float sinTheta = sqrt(1.0 - cosTheta * cosTheta); // from spherical coordinates to cartesian coordinates float3 H; H.x = cos(phi) * sinTheta; H.y = sin(phi) * sinTheta; H.z = cosTheta; // from tangent-space vector to world-space sample vector float3 up = abs(N.z) < 0.999 ? float3(0.0, 0.0, 1.0) : float3(1.0, 0.0, 0.0); float3 tangent = normalize(cross(up, N)); float3 bitangent = cross(N, tangent); float3 sampleVec = tangent * H.x + bitangent * H.y + N * H.z; return normalize(sampleVec); } float4 PSMain(PixelInputType input) : SV_TARGET { float roughness = customData.x; float3 N = normalize(input.localPos); float3 R = N; float3 V = R; const uint SAMPLE_COUNT = 2048u; float totalWeight = 0.0; float3 prefilteredColor = float3(0.0, 0.0, 0.0); for (uint i = 0u; i < SAMPLE_COUNT; ++i) { float2 Xi = Hammersley(i, SAMPLE_COUNT); float3 H = ImportanceSampleGGX(Xi, N, roughness); float3 L = normalize(2.0 * dot(V, H) * H - V); float NdotL = max(dot(N, L), 0.0); if (NdotL > 0.0) { // sample from the environment's mip level based on roughness/pdf float D = DistributionGGX(N, H, roughness); float NdotH = max(dot(N, H), 0.0); float HdotV = max(dot(H, V), 0.0); float pdf = D * NdotH / (4.0 * HdotV) + 0.0001; float resolution = 1024.0; // resolution of source cubemap (per face) float saTexel = 4.0 * PI / (6.0 * resolution * resolution); float saSample = 1.0 / (float(SAMPLE_COUNT) * pdf + 0.0001); float mipLevel = roughness == 0.0 ? 0.0 : 0.5 * log2(saSample / saTexel); prefilteredColor += shaderTexture.Sample(textureSampler, L).rgb * NdotL; totalWeight += NdotL; } } prefilteredColor = prefilteredColor / totalWeight; return float4(prefilteredColor, 1.0); } Irradiance:
#pragma pack_matrix( row_major ) cbuffer ObjectBuffer : register(b0) { matrix worldMatrix; matrix viewMatrix; matrix projectionMatrix; matrix normalMat; matrix viewProjMatInv; }; struct VertexInputType { float3 position : POSITION; float2 uv : TEXCOORD0; float3 normal : NORMAL; int4 boneIds : BONEIDS; float4 boneWeights : BONEWEIGHTS; }; struct PixelInputType { float4 position : SV_POSITION; float3 localPos : TEXCOORD0; }; SamplerState textureSampler { Filter = MIN_MAG_MIP_LINEAR; AddressU = Clamp; AddressV = Clamp; }; TextureCube<float4> environmentMap : register(t0); PixelInputType VSMain(VertexInputType input) { PixelInputType output; output.localPos = input.position.xyz; float4x4 newView = viewMatrix; newView[3][0] = 0.0; newView[3][1] = 0.0; newView[3][2] = 0.0; output.position.w = 1.0f; output.position = mul(float4(input.position, 1.f), newView); output.position = mul(output.position, projectionMatrix); output.position = output.position.xyzw; output.position.z = output.position.w * 0.9999; return output; } static const float PI = 3.14159265359; float4 PSMain(PixelInputType input) : SV_TARGET { // The world vector acts as the normal of a tangent surface // from the origin, aligned to WorldPos. Given this normal, calculate all // incoming radiance of the environment. The result of this radiance // is the radiance of light coming from -Normal direction, which is what // we use in the PBR shader to sample irradiance. float3 N = normalize(input.localPos); float3 irradiance = float3(0, 0, 0); // tangent space calculation from origin point float3 up = float3(0.0, 1.0, 0.0); float3 right = normalize(cross(up, N)); up = normalize(cross(N, right)); float sampleDelta = 0.025; float nrSamples = 0.0; for (float phi = 0.0; phi < 2.0 * PI; phi += sampleDelta) { for (float theta = 0.0; theta < 0.5 * PI; theta += sampleDelta) { // spherical to cartesian (in tangent space) float3 tangentSample = float3(sin(theta) * cos(phi), sin(theta) * sin(phi), cos(theta)); // tangent space to world float3 sampleVec = tangentSample.x * right + tangentSample.y * up + tangentSample.z * N; irradiance += environmentMap.Sample(textureSampler, sampleVec).rgb * cos(theta) * sin(theta); nrSamples++; } } irradiance = PI * irradiance * (1.0 / nrSamples); return float4(irradiance, 1.0); } BRDF:
#pragma pack_matrix( row_major ) cbuffer FrameBuffer { matrix worldMatrix; matrix viewMatrix; matrix projectionMatrix; float4 customData; }; struct VertexInputType { float3 position : POSITION; float2 uv : TEXCOORD0; float3 normal : NORMAL; int4 boneIds : BONEIDS; float4 boneWeights : BONEWEIGHTS; }; struct PixelInputType { float4 position : SV_POSITION; float2 uv : TEXCOORD0; }; PixelInputType VSMain(VertexInputType input) { PixelInputType output; output.uv = input.uv; float4x4 newView = viewMatrix; newView[3][0] = 0.0; newView[3][1] = 0.0; newView[3][2] = 0.0; output.position.w = 1.0f; output.position = mul(float4(input.position, 1.f), newView); output.position = mul(output.position, projectionMatrix); output.position = output.position.xyzw; output.position.z = output.position.w * 0.9999; return output; } static const float PI = 3.14159265359; float RadicalInverse_VdC(uint bits) { bits = (bits << 16u) | (bits >> 16u); bits = ((bits & 0x55555555u) << 1u) | ((bits & 0xAAAAAAAAu) >> 1u); bits = ((bits & 0x33333333u) << 2u) | ((bits & 0xCCCCCCCCu) >> 2u); bits = ((bits & 0x0F0F0F0Fu) << 4u) | ((bits & 0xF0F0F0F0u) >> 4u); bits = ((bits & 0x00FF00FFu) << 8u) | ((bits & 0xFF00FF00u) >> 8u); return float(bits) * 2.3283064365386963e-10; // / 0x100000000 } float2 Hammersley(uint i, uint N) { return float2(float(i) / float(N), RadicalInverse_VdC(i)); } float3 ImportanceSampleGGX(float2 Xi, float3 N, float roughness) { float a = roughness * roughness; float phi = 2.0 * PI * Xi.x; float cosTheta = sqrt((1.0 - Xi.y) / (1.0 + (a * a - 1.0) * Xi.y)); float sinTheta = sqrt(1.0 - cosTheta * cosTheta); // from spherical coordinates to cartesian coordinates float3 H; H.x = cos(phi) * sinTheta; H.y = sin(phi) * sinTheta; H.z = cosTheta; // from tangent-space vector to world-space sample vector float3 up = abs(N.z) < 0.999 ? float3(0.0, 0.0, 1.0) : float3(1.0, 0.0, 0.0); float3 tangent = normalize(cross(up, N)); float3 bitangent = cross(N, tangent); float3 sampleVec = tangent * H.x + bitangent * H.y + N * H.z; return normalize(sampleVec); } float GeometrySchlickGGX(float NdotV, float roughness) { float a = roughness; float k = (a * a) / 2.0; float nom = NdotV; float denom = NdotV * (1.0 - k) + k; return nom / denom; } float GeometrySmith(float3 N, float3 V, float3 L, float roughness) { float NdotV = max(dot(N, V), 0.0); float NdotL = max(dot(N, L), 0.0); float ggx2 = GeometrySchlickGGX(NdotV, roughness); float ggx1 = GeometrySchlickGGX(NdotL, roughness); return ggx1 * ggx2; } float2 IntegrateBRDF(float NdotV, float roughness) { float3 V; V.x = sqrt(1.0 - NdotV * NdotV); V.y = 0.0; V.z = NdotV; float A = 0.0; float B = 0.0; float3 N = float3(0.0, 0.0, 1.0); const uint SAMPLE_COUNT = 1024u; for (uint i = 0u; i < SAMPLE_COUNT; ++i) { float2 Xi = Hammersley(i, SAMPLE_COUNT); float3 H = ImportanceSampleGGX(Xi, N, roughness); float3 L = normalize(2.0 * dot(V, H) * H - V); float NdotL = max(L.z, 0.0); float NdotH = max(H.z, 0.0); float VdotH = max(dot(V, H), 0.0); if (NdotL > 0.0) { float G = GeometrySmith(N, V, L, roughness); float G_Vis = (G * VdotH) / (NdotH * NdotV); float Fc = pow(1.0 - VdotH, 5.0); A += (1.0 - Fc) * G_Vis; B += Fc * G_Vis; } } A /= float(SAMPLE_COUNT); B /= float(SAMPLE_COUNT); return float2(A, B); } float4 PSMain(PixelInputType input) : SV_TARGET { float2 integratedBRDF = IntegrateBRDF(input.uv.x, input.uv.y); return float4(integratedBRDF, 0.0, 1.0); } This is how I preform final IBL calculation in main shader:
// ambient lighting (we now use IBL as the ambient term) float3 F = FresnelSchlickRoughness(max(dot(N, V), 0.0), F0, roughness); float3 kS = F; float3 kD = 1.0 - kS; kD *= 1.0 - metallic; float3 irradiance = irradianceMap.Sample(linearClampSampler, N).rgb; float3 diffuse = irradiance * albedo.rgb; float3 R = reflect(-V, N); // sample both the pre-filter map and the BRDF lut and combine them together as per the Split-Sum approximation to get the IBL specular part. const float MAX_REFLECTION_LOD = 4.0; float3 prefilteredColor = preFilterMap.SampleLevel(minMagLinearMipPointClampSampler, R, roughness * MAX_REFLECTION_LOD).rgb; float2 brdf = brdfLUT.Sample(pointWrapSampler, float2(max(dot(N, V), 0.0), roughness)).rg; float3 specular = prefilteredColor * (F * brdf.x + brdf.y); float3 ambient = (kD * diffuse + specular); if (enableHBAO) ambient = ambient * hbao; float emission = albedoOnlyMode.y; color = directLighting + ambient + emission * albedo.rgb; 
