1. 再谈运动模糊
之前的文章中曾经通过保存渲染结果进行叠加的方式实现过运动模糊效果,下面的例子我们通过深度纹理重建世界坐标的方式来实现运动模糊:
- 首先,基于深度纹理重建像素的世界坐标,原理在【Unity Shader入门精要 第13章】使用深度和法线纹理(一)中介绍过
- 然后使用保存的前一帧的VP矩阵进行矩阵变换,求出前一帧的NDC坐标
- 通过两帧的NDC坐标计算像素的速度
- 最后通过速度进行模糊处理
由于需要对摄像机进行操作,为了方便拿到当前摄像机,在后处理父类中加入如下代码:
private Camera mCamera;
public Camera Camera
{
get
{
if (null == mCamera) mCamera = gameObject.GetComponent<Camera>();
return mCamera;
}
}
另外,在进行运动模糊的后处理子类脚本中,还需要设置摄像机的深度纹理模式:
private void OnEnable()
{
Camera.depthTextureMode |= DepthTextureMode.Depth;
}
测试脚本
using UnityEngine;
public class PostEffect_MotionBlur_NDC : PostEffectBase
{
public Shader MotionBlurShader_NDC;
public Material MotionBlurMat_NDC;
[Range(0, 1)]
public float BlurSize;
[Range(1, 4)]
public int BlurRound;
private Matrix4x4 mPreviousVP;
private void OnEnable()
{
Camera.depthTextureMode |= DepthTextureMode.Depth;
}
private void OnRenderImage(RenderTexture src, RenderTexture dest)
{
Material _mat = CheckShaderAndMaterial(MotionBlurShader_NDC, MotionBlurMat_NDC);
if(null == _mat) Graphics.Blit(src, dest);
else
{
_mat.SetFloat("_BlurRound", BlurRound);
_mat.SetFloat("_BlurSize", BlurSize);
_mat.SetMatrix("_PreviousVP", mPreviousVP);
Matrix4x4 _curVP = Camera.projectionMatrix * Camera.worldToCameraMatrix;
_mat.SetMatrix("_InversVP", _curVP.inverse);
mPreviousVP = _curVP;
Graphics.Blit(src, dest, _mat);
}
}
}
测试Shader:
Shader "MyShader/Chapter_13/Chapter_13_MotionBlur_NDC_Shader"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
}
SubShader
{
ZTest Always ZWrite Off Cull Off
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct v2f
{
float4 pos : SV_POSITION;
float4 uv : TEXCOORD0;
};
sampler2D _MainTex;
float4 _MainTex_TexelSize;
sampler2D _CameraDepthTexture;
float4x4 _InversVP;
float4x4 _PreviousVP;
half _BlurRound;
half _BlurSize;
v2f vert(appdata_img v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.uv.xy = v.texcoord;
o.uv.zw = v.texcoord;
#if UNITY_UV_STARTS_AT_TOP
if (_MainTex_TexelSize.y < 0)
o.uv.w = 1 - o.uv.w;
#endif
return o;
}
fixed4 frag(v2f i) : SV_Target
{
fixed _d = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, i.uv.zw);
fixed4 _curNDC = fixed4(i.uv.x * 2 - 1, i.uv.y * 2 - 1, _d * 2 - 1, 1);
float4 _worldPos = mul(_InversVP, _curNDC);
_worldPos /= _worldPos.w;
float4 _preNDC = mul(_PreviousVP, _worldPos);
_preNDC /= _preNDC.w;
float2 _velocity = (_curNDC.xy - _preNDC.xy) * 0.5;
float2 _uv = i.uv.xy;
float4 _color = tex2D(_MainTex, i.uv);
for(int it = 1; it < _BlurRound; it++)
{
_uv += _velocity * _BlurSize;
_color += tex2D(_MainTex, _uv);
}
_color /= _BlurRound;
return fixed4(_color.rgb, 1);
}
ENDCG
}
}
}
测试效果:
2. 全局雾效
2.1 Unity内置雾效
Unity内置的雾效可以产生基于距离的线性或指数的雾效,如果在自己编写的Shader中支持雾效,需要在Shader中添加#pragma multi_compile_fog编译指令,同时,在Shader中还要通过 UNITY_FOG_COORDS、UNITY_TRANSFER_FOG、UNITY_APPLY_FOG 宏计算雾效效果(过程与阴影一致,就不演示了)。
这种雾效的缺点主要有两个:
- 需要在每个Shader中手动添加代码,不方便修改
- 这种雾效的实现是固定的,无法定制效果,比如无法实现基于高度的雾效。
2.2 使用深度纹理实现高度雾
下面的例子我们通过深度纹理实现一种基于高度的全局无效,通过深度纹理重建每个像素的世界坐标,本次重建世界坐标使用的方法为射线插值,原理在【Unity Shader入门精要 第13章】使用深度和法线纹理(一)中也有介绍,然后根据高度控制雾的浓度,将原始颜色与雾的颜色进行混合。
测试脚本:
using UnityEngine;
public class PostEffect_HeightFog : PostEffectBase
{
public Shader HeightFogShader;
public Material HeightFogMat;
public float FogBottom;
public float FogTop;
public float FogDensity;
public Color FogColor;
private Matrix4x4 mFrustumCornerRays;
/// <summary>
/// 计算纹理四个顶点的射线
/// </summary>
private void FillFrustumCornerRays()
{
//HalfHeight = | ToTop | = Near * Tangent(Fov / 2)
//ToTop = Camera.Up * HalfHeight
//ToRight = Camera.Right * HalfHeight * aspect
float _halfHeight = Camera.nearClipPlane * Mathf.Tan(Camera.fieldOfView * 0.5f * Mathf.Deg2Rad);
Vector3 _toTop = Camera.transform.up * _halfHeight;
Vector3 _toRight = Camera.transform.right * _halfHeight * Camera.aspect;
//Scale = 1 / Near
//Scaled_O_LD = ( Camera.Forward * Near - ToRight - ToTop ) * Scale
//Scaled_O_RD = ( Camera.Forward * Near + ToRight - ToTop ) * Scale
//Scaled_O_RU = ( Camera.Forward * Near + ToRight + ToTop ) * Scale
//Scaled_O_LU = ( Camera.Forward * Near - ToRight + ToTop ) * Scale
Vector3 _forwardVec = Camera.transform.forward * Camera.nearClipPlane;
float _scale = 1 / Camera.nearClipPlane;
mFrustumCornerRays.SetRow(0, (_forwardVec - _toRight - _toTop) * _scale); //左下
mFrustumCornerRays.SetRow(1, (_forwardVec + _toRight - _toTop) * _scale); //右下
mFrustumCornerRays.SetRow(2, (_forwardVec + _toRight + _toTop) * _scale); //右上
mFrustumCornerRays.SetRow(3, (_forwardVec - _toRight + _toTop) * _scale); //左上
}
private void OnRenderImage(RenderTexture src, RenderTexture dest)
{
Material _mat = CheckShaderAndMaterial(HeightFogShader, HeightFogMat);
if (null == _mat) Graphics.Blit(src, dest);
else
{
_mat.SetFloat("_FogBottom", FogBottom);
_mat.SetFloat("_FogTop", FogTop);
_mat.SetFloat("_FogDensity", FogDensity);
_mat.SetColor("_FogColor", FogColor);
FillFrustumCornerRays();
_mat.SetMatrix("_FrustumCornersRay", mFrustumCornerRays);
Graphics.Blit(src, dest, _mat);
}
}
}
测试Shader:
Shader "MyShader/Chapter_13/Chapter_13_HeightFog_Shader"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
}
SubShader
{
ZTest Always ZWrite Off Cull Off
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct v2f
{
float4 pos : SV_POSITION;
float4 uv : TEXCOORD0;
float3 scaledRay : TEXCOORD1;
};
sampler2D _MainTex;
float4 _MainTex_TexelSize;
sampler2D _CameraDepthTexture;
float4x4 _FrustumCornersRay;
float _FogBottom;
float _FogTop;
float _FogDensity;
fixed4 _FogColor;
v2f vert(appdata_img v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.uv.xy = v.texcoord;
o.uv.zw = v.texcoord;
#if UNITY_UV_STARTS_AT_TOP
if (_MainTex_TexelSize.y < 0)
o.uv.w = 1 - o.uv.w;
#endif
//判断当前处理的是四个顶点中的哪一个
//然后选择对应的射线放入插值寄存器
int _index;
if(o.uv.x < 0.5 && o.uv.y < 0.5)
_index = 0; //左下
else if(o.uv.x > 0.5 && o.uv.y < 0.5)
_index = 1; //右下
else if(o.uv.x > 0.5 && o.uv.y > 0.5)
_index = 2; //右上
else
_index = 3; //左上
#if UNITY_UV_STARTS_AT_TOP
if (_MainTex_TexelSize.y < 0)
_index = 3 - _index;
#endif
o.scaledRay = _FrustumCornersRay[_index];
return o;
}
fixed4 frag(v2f i) : SV_Target
{
fixed4 _samplerColor = tex2D(_MainTex, i.uv.xy);
fixed _d = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, i.uv.zw);
float _linearEyeDepth = LinearEyeDepth(_d);
float3 _worldPos = _WorldSpaceCameraPos.xyz + i.scaledRay.xyz * _linearEyeDepth;
float _ratio = (_FogTop - _worldPos.y) / (_FogTop - _FogBottom);
float _fogDensity = saturate(_ratio * _FogDensity);
fixed4 _finalColor = lerp(_samplerColor, _FogColor, _fogDensity);
return fixed4(_finalColor.rgb, 1);
}
ENDCG
}
}
}
测试效果: