To experiment with a certain technique, I'm implementing my own shadow mapping in Unity. I'm using a camera for the light "view", which is rendered to a RenderTexture after being post-processed with the following shader:
Shader "Custom/DepthGrayscale" {
SubShader {
Tags { "RenderType"="Opaque" }
Pass{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
sampler2D _CameraDepthTexture;
struct v2f {
float4 pos : SV_POSITION;
float4 scrPos:TEXCOORD1;
};
//Vertex Shader
v2f vert (appdata_base v){
v2f o;
o.pos = mul (UNITY_MATRIX_MVP, v.vertex);
o.scrPos=ComputeScreenPos(o.pos);
//for some reason, the y position of the depth texture comes out inverted
o.scrPos.y = 1 - o.scrPos.y;
return o;
}
//Fragment Shader
half4 frag (v2f i) : COLOR{
float depthValue = tex2Dproj(_CameraDepthTexture, UNITY_PROJ_COORD(i.scrPos)).r;
return fixed4(depthValue, 0.0, 0.0, 0.0);
}
ENDCG
}
}
FallBack "Diffuse"
}
With a width and height of 2048
for the Render Texture and the color format R Float
for 32-bit precision.
The objects in the scene then have the following shader that takes the projection * view
matrix of the light and "depth" texture as input. (Basic shadow mapping with bias)
Shader "Tutorial/Display Normals" {
Properties {
_CameraTex ("Camera texture", 2D) = "white" {}
}
SubShader {
Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
uniform float4x4 lightMatrix;
struct v2f {
float4 pos : SV_POSITION;
float3 worldPos : COLOR0;
fixed3 color : COLOR1;
};
v2f vert (appdata_base v)
{
v2f o;
o.pos = mul (UNITY_MATRIX_MVP, v.vertex);
o.worldPos = v.vertex.xyz;
o.color = v.normal * 0.5 + 0.5;
return o;
}
uniform sampler2D _CameraTex;
fixed4 frag (v2f i) : SV_Target
{
fixed4 coords = mul(lightMatrix, fixed4(i.worldPos, 1.0));
coords /= coords.w;
coords.x = (coords.x + 1.0) / 2.0;
coords.y = (coords.y + 1.0) / 2.0;
coords.z = (coords.z + 1.0) / 2.0;
//return fixed4(coords.z, 0.0, 0.0, 1);
float lightDepth = tex2D(_CameraTex, fixed2(coords.x, 1.0 - coords.y)).r;
float depth = coords.z;
if (depth < lightDepth + 0.005) {
return fixed4(1.0, 1.0, 1.0, 1.0);
} else {
return fixed4(0.1, 0.1, 0.1, 1.0);
}
}
ENDCG
}
}
}
The result of this is perfect with a plane and cube:
The red plane displays the shadow map and the camera represents the light.
However, when using a more complex model like Sibenik, the following happens:
When I move it slightly, the shadow on the plane is suddenly correct, but the shadow on the church model itself is still wrong:
What could be the problem?
Answer
I solved the problem by using the model matrix:
o.worldPos = mul(_Object2World, v.vertex).xyz;
No comments:
Post a Comment