Skip to main content
Assorted clarifications
Source Link
DMGregory
  • 140.8k
  • 23
  • 257
  • 401

This is also straightforward to incorporate into a Surface shader.:

Then modify our inputInput struct to include the screenPosscreenPos parameter, which signals to Unity that it should pass the screenspace position of the fragment to us:

Now we use the z value of this screenspace coordinate to blend between our texture colour and the fog colour, z being the depth or distance of the fragment from the camera:

Shader "Unlit/UnlitManualFog"
{
    Properties
    {
        _MainTex ("Texture", 2D) = "white" {}
        _FogColor ("Fog Color", Color) = (0.5, 0.5, 0.5, 1.0)
    }
    SubShader
    {
        Tags { "RenderType"="Opaque" }
        LOD 100

        Pass
        {
            CGPROGRAM
            #pragma vertex vert
            #pragma fragment frag
            
            #include "UnityCG.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };

            struct v2f
            {
                float2 uv : TEXCOORD0;
                // Add a variable to carry depth information
                float depth : TEXCOORD1;
                float4 vertex : SV_POSITION;
            };

            sampler2D _MainTex;
            float4 _MainTex_ST;
            fixed4 _FogColor;
            
            v2f vert (appdata v)
            {
                v2f o;
                // Transform vertex into view space
                o.vertex = mul(UNITY_MATRIX_MV, v.vertex);
                // Copy the viewspace z coordinate and call it depth
                o.depth = o.vertex.z;
                // Finish transforming the vetex by applying the projection matrix
                o.vertex = mul(UNITY_MATRIX_P, o.vertex);

                o.uv = TRANSFORM_TEX(v.uv, _MainTex);
                return o;
            }
            
            fixed4 frag (v2f i) : SV_Target
            {
                fixed4 col = tex2D(_MainTex, i.uv); 
                // Blend colour using depth parameter we calculated earlier         
                return lerp(col, _FogColor, saturate(-0.05f * i.depth));
            }
            ENDCG
        }
    }
}

This is also straightforward to incorporate into a Surface shader.

Then modify our input struct to include the screenPos parameter, which signals to Unity that it should pass the screenspace position of the fragment to us:

Now we use the z value of this screenspace coordinate to blend between our texture colour and the fog colour

Shader "Unlit/UnlitManualFog"
{
    Properties
    {
        _MainTex ("Texture", 2D) = "white" {}
        _FogColor ("Fog Color", Color) = (0.5, 0.5, 0.5, 1.0)
    }
    SubShader
    {
        Tags { "RenderType"="Opaque" }
        LOD 100

        Pass
        {
            CGPROGRAM
            #pragma vertex vert
            #pragma fragment frag
            
            #include "UnityCG.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };

            struct v2f
            {
                float2 uv : TEXCOORD0;
                float depth : TEXCOORD1;
                float4 vertex : SV_POSITION;
            };

            sampler2D _MainTex;
            float4 _MainTex_ST;
            fixed4 _FogColor;
            
            v2f vert (appdata v)
            {
                v2f o;
                o.vertex = mul(UNITY_MATRIX_MV, v.vertex);
                o.depth = o.vertex.z;
                o.vertex = mul(UNITY_MATRIX_P, o.vertex);

                o.uv = TRANSFORM_TEX(v.uv, _MainTex);
                return o;
            }
            
            fixed4 frag (v2f i) : SV_Target
            {
                fixed4 col = tex2D(_MainTex, i.uv);             
                return lerp(col, _FogColor, saturate(-0.05f * i.depth));
            }
            ENDCG
        }
    }
}

This is also straightforward to incorporate into a Surface shader:

Then modify our Input struct to include the screenPos parameter, which signals to Unity that it should pass the screenspace position of the fragment to us:

Now we use the z value of this screenspace coordinate to blend between our texture colour and the fog colour, z being the depth or distance of the fragment from the camera:

Shader "Unlit/UnlitManualFog"
{
    Properties
    {
        _MainTex ("Texture", 2D) = "white" {}
        _FogColor ("Fog Color", Color) = (0.5, 0.5, 0.5, 1.0)
    }
    SubShader
    {
        Tags { "RenderType"="Opaque" }
        LOD 100

        Pass
        {
            CGPROGRAM
            #pragma vertex vert
            #pragma fragment frag
            
            #include "UnityCG.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };

            struct v2f
            {
                float2 uv : TEXCOORD0;
                // Add a variable to carry depth information
                float depth : TEXCOORD1;
                float4 vertex : SV_POSITION;
            };

            sampler2D _MainTex;
            float4 _MainTex_ST;
            fixed4 _FogColor;
            
            v2f vert (appdata v)
            {
                v2f o;
                // Transform vertex into view space
                o.vertex = mul(UNITY_MATRIX_MV, v.vertex);
                // Copy the viewspace z coordinate and call it depth
                o.depth = o.vertex.z;
                // Finish transforming the vetex by applying the projection matrix
                o.vertex = mul(UNITY_MATRIX_P, o.vertex);

                o.uv = TRANSFORM_TEX(v.uv, _MainTex);
                return o;
            }
            
            fixed4 frag (v2f i) : SV_Target
            {
                fixed4 col = tex2D(_MainTex, i.uv); 
                // Blend colour using depth parameter we calculated earlier         
                return lerp(col, _FogColor, saturate(-0.05f * i.depth));
            }
            ENDCG
        }
    }
}
Source Link
DMGregory
  • 140.8k
  • 23
  • 257
  • 401

The lowest-effort way to get what you describe:

far away objects are completely grey and close objects are in full color

...is to use fog, either directly through the Lighting settings for your scene (see the Fog entries at this link, no custom shaders required) or by using the legacy shader paths.

Here's an example of what it looks like with linear fog on a series of four identical red cubes at increasing distance:

Example of four cubes rendered with fog: farther cubes are rendered more grey in colour

This is also straightforward to incorporate into a Surface shader.

Same four cubes, rendered with a Surface shader with similar effects

To do this, we add a fog colour parameter in the shader Properties block:

_FogColor ("Fog Color", Color) = (0.5, 0.5, 0.5, 1)

and define a corresponding variable in our CGProgram: fixed4 _FogColor;

Then modify our input struct to include the screenPos parameter, which signals to Unity that it should pass the screenspace position of the fragment to us:

struct Input {
    float2 uv_MainTex;
    float4 screenPos; // This we add.
};

Now we use the z value of this screenspace coordinate to blend between our texture colour and the fog colour

void surf (Input IN, inout SurfaceOutputStandard o) {
    // Albedo comes from a texture tinted by color
    fixed4 c = tex2D (_MainTex, IN.uv_MainTex) * _Color;
    c = lerp(c, _FogColor, saturate(IN.screenPos.z * 0.05f));
...

Here the * 0.05f is just a magic number to get the fog to fall off over the example depth range I was using. In practice you might want to expose extra variables to control the fog density or falloff distance.

Note that applying it this way only affects the object's albedo. Lighting and shadow will be applied on top of this colour. To apply the fog at the end, we can do this in a finalcolor method instead:

#pragma surface surf Standard fullforwardshadows finalcolor:mycolor
...
void mycolor(Input IN, SurfaceOutputStandard o, inout fixed4 color)
{
    color = lerp(color, _FogColor, saturate(IN.screenPos.z * 0.05f - 0.5f));
}

...also note that the snippets above were written for a DirectX system, where screenspace z increases with distance. To make it work on DirectX and OpenGL you might need to add a bit like this:

#if (defined(SHADER_API_GLES) || defined(SHADER_API_GLES3)) && defined(SHADER_API_MOBILE)
    // multiply z by -1 before using it to compute fog amount.
#endif

Lastly we can do this in a vertex/fragment shader pair like so (using an unlit shader in this example for simplicity):

Same cubes, rendered with the unlit shader below

Shader "Unlit/UnlitManualFog"
{
    Properties
    {
        _MainTex ("Texture", 2D) = "white" {}
        _FogColor ("Fog Color", Color) = (0.5, 0.5, 0.5, 1.0)
    }
    SubShader
    {
        Tags { "RenderType"="Opaque" }
        LOD 100

        Pass
        {
            CGPROGRAM
            #pragma vertex vert
            #pragma fragment frag
            
            #include "UnityCG.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };

            struct v2f
            {
                float2 uv : TEXCOORD0;
                float depth : TEXCOORD1;
                float4 vertex : SV_POSITION;
            };

            sampler2D _MainTex;
            float4 _MainTex_ST;
            fixed4 _FogColor;
            
            v2f vert (appdata v)
            {
                v2f o;
                o.vertex = mul(UNITY_MATRIX_MV, v.vertex);
                o.depth = o.vertex.z;
                o.vertex = mul(UNITY_MATRIX_P, o.vertex);

                o.uv = TRANSFORM_TEX(v.uv, _MainTex);
                return o;
            }
            
            fixed4 frag (v2f i) : SV_Target
            {
                fixed4 col = tex2D(_MainTex, i.uv);             
                return lerp(col, _FogColor, saturate(-0.05f * i.depth));
            }
            ENDCG
        }
    }
}

To answer your "bonus question" about reading from the depth buffer to change behaviour based on what's already rendered, I'd recommend checking out the Unity presentation "Special Effects with Depth." It's a five years old now, but the core techniques hold up, and it gives a solid primer on how this works in Unity, what you can use it to achieve, and what costs it comes with.