The Hardware Instancing, No Hardware Instancing & Shadows Shader all mixed together

I'm not going to have my a## sitting on top of this..

I know there are others out there who would like to see and have the Effect file that I've put together, I've blended Reimers Shadow Map with my Hardware Instancing Shader so that you can have both worlds.

Here's the complete Shader Effect file, I've added a bias for the floating point error called xBias and values between 0.001f - 0.006f will do good.
// Matrices
float4x4 World;     // The world matrix, where in 3D space the object will be placed.
float4x4 View;     // The camera view
float4x4 Projection;   // The frustum
float4x4 xLightsViewProjection; // The view of the light and its frustum, already multiplic together in the app
float3 xLightPos;    // The position of the light
float xLightPower;    // The power of the light
float xAmbient;     // The ambient surrounding light
float xBias;
// The texture sampler that will hold // the information about the Texture2D // that we send from the Application Texture xTexture; sampler TextureSampler = sampler_state { texture = <xTexture>; magfilter = LINEAR; minfilter = LINEAR; mipfilter = LINEAR; AddressU = mirror; AddressV = mirror; }; // The sadow Texture2D that will contain // the depth data image drawn from the // light source point of view Texture xShadowMap; sampler ShadowMapSampler = sampler_state { texture = <xShadowMap>; magfilter = LINEAR; minfilter = LINEAR; mipfilter = LINEAR; AddressU = clamp; AddressV = clamp; }; // The texture of the light // like if we want the light to cast // an image like if it's lit through // a churchwindow etc Texture xLightShape; sampler LightShapeSampler = sampler_state { texture = <xLightShape>; magfilter = LINEAR; minfilter = LINEAR; mipfilter = LINEAR; AddressU = clamp; AddressV = clamp; }; // -------------------------------------------------------- // Here's the shadow map part that we will draw first // to create the Texture2D image that the light view sees // It is a plain 2D image // -------------------------------------------------------- // This is the struct that we are using // to return data out from the Vertex Shader struct SMapVertexToPixel { float4 Position : POSITION; // The 2D position of a vertex point in the camera view float4 Position2D : TEXCOORD0; // The 2D position of a vertex in the light view }; // The color of the vertex in the // Shadow Map that we will pass on // to the Pixel Shader struct SMapPixelToFrame { float4 Color : COLOR0; }; // The Vertex Shader function, that will handle all the vertices of the game SMapVertexToPixel VertexShaderCommonShadowMap(float4 inPos : POSITION, float4x4 instanceTransform) { // Create an empty object SMapVertexToPixel Output = (SMapVertexToPixel)0; // Add the entities translations to the camera world matrix (inPos 2D) float4 temp1 = mul(inPos, instanceTransform); // Add it to the view of the light (still 2D) // but the position will also contain the Z cordinate // which is the distance from the light to the pixel Output.Position = mul(temp1, xLightsViewProjection); // Copy the position to the 2D position // that we are going to use in the Pixel Shader // the position will contain x,y,z,w Output.Position2D = Output.Position; return Output; } // The Hardware Instancing Function // instanceTransform contains the object translations SMapVertexToPixel HardwareInstancingVertexShaderShadowMap(float4 inPos : POSITION, float4x4 instanceTransform : BLENDWEIGHT) { // We must transpose the Matrices because they are // not built up the same way in the Monogame and in the HLSL // I'm writing about it here http://3dgamehistory.blogspot.se/2017/12/more-planets-more-artificial-suns.html return VertexShaderCommonShadowMap(inPos, mul(World, transpose(instanceTransform))); } // When instancing is disabled we take the // world transform just like it is // because it will already contain the object transform // since here we are only rendering one object SMapVertexToPixel NoInstancingVertexShaderShadowMap(float4 inPos : POSITION) { return VertexShaderCommonShadowMap(inPos, World); } // Let's create the image from the light view SMapPixelToFrame PixelShaderFunctionShadowMap(SMapVertexToPixel PSIn) { // Create the output object SMapPixelToFrame Output = (SMapPixelToFrame)0; // Divide the Z component with the homogeneous component w, // the result will be between 0 and 1, // where 0 corresponds to pixels at the near clipping plane // and 1 to pixels at the far clipping plane, // as defined in the creation of the Projection matrix. Output.Color = PSIn.Position2D.z / PSIn.Position2D.w; return Output; } // The Hardware instancing technique technique HardwareInstancingShadowMap { pass Pass0 { VertexShader = compile vs_3_0 HardwareInstancingVertexShaderShadowMap(); PixelShader = compile ps_3_0 PixelShaderFunctionShadowMap(); } } // The ordinary rendering technique technique NoInstancingShadowMap { pass Pass0 { VertexShader = compile vs_2_0 NoInstancingVertexShaderShadowMap(); PixelShader = compile ps_2_0 PixelShaderFunctionShadowMap(); } } // ---------------------------------- // End of the Shadow Map creation // ---------------------------------- // ------------------------------------- // Let's create the scene with shadows // ------------------------------------- // The input object that will hold // all the data needed. struct VertexShaderInput { float4 Position : POSITION0; float4 Pos2DAsSeenByLight : TEXCOORD1; float3 Normal : NORMAL0; float2 TextureCoordinate : TEXCOORD0; }; struct VertexShaderOutput { float4 Position : POSITION0; // The vertex position again float4 Position3D : TEXCOORD2; float4 Pos2DAsSeenByLight : TEXCOORD1; float2 TextureCoordinate : TEXCOORD0; float3 Normal : TEXCOORD3; float4 ParticleColor : COLOR0; }; struct SScenePixelToFrame { float4 Color : COLOR0; }; // Vertex shader helper function shared between the two techniques. VertexShaderOutput VertexShaderCommon(VertexShaderInput input, float4x4 instanceTransform, float4 instanceColor) { VertexShaderOutput output; // Apply the objects translation in the world // to the input.Position that contain the // X and Y values of the screen coordinate of the current pixel float4 worldPosition = mul(input.Position, instanceTransform); // Apply the camera view to it float4 viewPosition = mul(worldPosition, View); // And the projection frustum to become the camera screen position output.Position = mul(viewPosition, Projection); // And do the same for the light screen pixels output.Pos2DAsSeenByLight = mul(worldPosition, xLightsViewProjection); // Calculate the objects in the world vertex normals output.Normal = normalize(mul(input.Normal, (float3x3)instanceTransform)); // The objects 3D positions is stored output.Position3D = worldPosition; // Copy across the input texture coordinate. output.TextureCoordinate = input.TextureCoordinate; output.ParticleColor = instanceColor; return output; } // Hardware instancing reads the per-instance world transform from a secondary vertex stream. VertexShaderOutput HardwareInstancingVertexShader(VertexShaderInput input, float4x4 instanceTransform : BLENDWEIGHT, float4 instanceColor : COLOR0) { return VertexShaderCommon(input, mul(World, transpose(instanceTransform)), instanceColor); } // When instancing is disabled we take the world transform from an effect parameter. VertexShaderOutput NoInstancingVertexShader(VertexShaderInput input) { return VertexShaderCommon(input, World, 0); } float DotProduct(float3 lightPos, float3 pos3D, float3 normal) { float3 lightDir = normalize(pos3D - lightPos); return dot(-lightDir, normal); } // Both techniques share this same pixel shader. SScenePixelToFrame PixelShaderFunction(VertexShaderOutput input) : COLOR0 { // Create the output object that will hold the data SScenePixelToFrame Output = (SScenePixelToFrame)0; // Texture coordinates have to be between the [0, 1] region, // so we need a simple remap of point (-1,-1) which has // to become (0,0), while point (1,1) has to stay (1,1) float2 ProjectedTexCoords; ProjectedTexCoords[0] = input.Pos2DAsSeenByLight.x / input.Pos2DAsSeenByLight.w / 2.0f + 0.5f; ProjectedTexCoords[1] = -input.Pos2DAsSeenByLight.y / input.Pos2DAsSeenByLight.w / 2.0f + 0.5f; float diffuseLightingFactor = 0; if ((saturate(ProjectedTexCoords).x == ProjectedTexCoords.x) && (saturate(ProjectedTexCoords).y == ProjectedTexCoords.y)) { float depthStoredInShadowMap = tex2D(ShadowMapSampler, ProjectedTexCoords).r; float realDistance = input.Pos2DAsSeenByLight.z / input.Pos2DAsSeenByLight.w; if ((realDistance - 1.0f / 100.0f) + xBias <= depthStoredInShadowMap)
  {
   diffuseLightingFactor = DotProduct(xLightPos, input.Position3D, input.Normal);
   diffuseLightingFactor = saturate(diffuseLightingFactor);
   diffuseLightingFactor *= xLightPower;

   // The light texture that will be projected onto the objects
   float lightTextureFactor = tex2D(LightShapeSampler, ProjectedTexCoords).r;
   diffuseLightingFactor *= lightTextureFactor;
  }
 }

 // Apply the objects textures
 // and add the lightning color and ambient power to the pixel
 float4 baseColor = tex2D(TextureSampler, input.TextureCoordinate);
 Output.Color = baseColor * (diffuseLightingFactor + xAmbient);

 return Output;
}

// Hardware instancing technique.
technique HardwareInstancing
{
 pass Pass1
 {
  VertexShader = compile vs_3_0 HardwareInstancingVertexShader();
  PixelShader = compile ps_3_0 PixelShaderFunction();
 }
}

// For rendering without instancing.
technique NoInstancing
{
 pass Pass1
 {
  VertexShader = compile vs_2_0 NoInstancingVertexShader();
  PixelShader = compile ps_2_0 PixelShaderFunction();
 }
}

Here's an animated gif of the result, I'm using a large frustum so the shadow is kind of pixelated. This kind of shadow technique is intended for a much smaller frustum than the one I'm using.




Hope you will have use for it.

See you!

Kommentarer