kinect沙池遊戲的紋理混合

今天想講講kinect沙池遊戲中的紋理混合,這是去年公司的項目,一個人歷時數月完成的,當然這裏只講基於深度做出的紋理混合的原理,其他的就不便多說。先看效果圖吧,我不懂怎麼發動圖

1,先獲得kinect的深度數據,原始數據並不是一張圖,而是一個數組,我們需要將此數組轉成二維數組,方便使用着色器計算,專業術語應該是gpgpu:

m_ComputeBuffer = new ComputeBuffer(m_DepthMapLimitWidth * m_DepthMapLimitHeight, sizeof(float));
void UpdateDepthMap()
    {
        ushort[] ushortRes = m_KinectManager.GetRawDepthMap();
        int arrLength = ushortRes.Length;
        int curIndex = 0;
        for (int i = 0; i < arrLength; ++i)
        {
            int depthCoordX = i % m_KinectManager.GetDepthImageWidth();
            int depthCoordY = i / m_KinectManager.GetDepthImageWidth();
            if (depthCoordX >= m_DepthMapOffsetX && depthCoordX < m_DepthMapLimitWidth + m_DepthMapOffsetX &&
                depthCoordY >= m_DepthMapOffsetY && depthCoordY < m_DepthMapLimitHeight + m_DepthMapOffsetY)
            {
                if (ushortRes[i] == 0)
                {
                    ushortRes[i] = 4500;
                }
                m_DepthMapBuffer[curIndex] = m_DepthMapBuffer[curIndex]*0.8f + ushortRes[i]*0.2f;
                ++curIndex;
            }
        }
        MapManager.Single.UpdateDepthData(m_DepthMapBuffer);
        m_ComputeBuffer.SetData(m_DepthMapBuffer);
        m_DepthBlendTextureMat.SetBuffer("_DepthBuffer", m_ComputeBuffer);//這個材質球在下面會做說明
    }

此段代碼意思爲,先創建一個ComputeBuffer,用來存儲kinect獲取的深度數據,然後傳入一個材質球的shader中。

2,我們需要定義深度值所對應的紋理,我們這個項目準備了6張圖

這個材質球的shader等下再講,先定義了六個深度值分別對應六張圖,也就是說kinect獲取了深度值,當該深度值等於對應的值得時候就顯示對應圖片上的像素,當該深度值處於兩張圖的中間,就取兩張圖根據深度差距做紋理的混合。

3,重點來了,上面的工作完成後,意味着,我們已經將kinect獲取的深度數據完美地傳入了shader,並且6張紋理也已經傳進去,一切準備就緒,我們來看看shader是怎麼工作的:

// Upgrade NOTE: replaced 'mul(UNITY_MATRIX_MVP,*)' with 'UnityObjectToClipPos(*)'

/*
 * @authors: liangjian
 * @desc:	
*/
Shader "lj/DepthBlendTexture"
{
	Properties
	{
		_BlendTex0("Texture", 2D) = "white" {}
		_BlendTex1("Texture", 2D) = "white" {}
		_BlendTex2("Texture", 2D) = "white" {}
		_BlendTex3("Texture", 2D) = "white" {}
		_BlendTex4("Texture", 2D) = "white" {}
		_BlendTex5("Texture", 2D) = "white" {}
		_Color ("Color", Color) = (1,1,1,1)
	}
	SubShader
	{
		Tags
		{
			"Queue" = "Transparent-100"
			"RenderType" = "Transparent"
		}
		Pass
		{
		Blend SrcAlpha OneMinusSrcAlpha
			CGPROGRAM
			#pragma exclude_renderers d3d9
			#pragma vertex vert
			#pragma fragment frag
			
			#include "UnityCG.cginc"

			struct appdata
			{
				float4 vertex : POSITION;
				float2 uv : TEXCOORD0;
			};

			struct v2f
			{
				float2 uv : TEXCOORD0;
				float4 vertex : SV_POSITION;
			};

			v2f vert (appdata v)
			{
				v2f o;
				o.vertex = UnityObjectToClipPos(v.vertex);
				o.uv = v.uv;
				return o;
			}
			
			sampler2D _BlendTex0;
			sampler2D _BlendTex1;
			sampler2D _BlendTex2;
			sampler2D _BlendTex3;
			sampler2D _BlendTex4;
			sampler2D _BlendTex5;
			fixed4 _Color;

			uniform float _DepthValue0;
			uniform float _DepthValue1;
			uniform float _DepthValue2;
			uniform float _DepthValue3;
			uniform float _DepthValue4;
			uniform float _DepthValue5;

			uniform float _UvAnimationOffsetX;
			uniform int _DepthMapWidth;
			uniform int _DepthMapHeight;

			uniform float _CloudDepthFlag_min;
			uniform float _CloudDepthFlag_max;


			StructuredBuffer<float> _DepthBuffer;

			fixed4 frag (v2f i) : SV_Target
			{
				int x = floor(i.uv.x * _DepthMapWidth);
				int y = floor(i.uv.y * _DepthMapHeight);

				float depthValue = _DepthBuffer[_DepthMapWidth* y + x];

				fixed4 blendCol0 = tex2D(_BlendTex0, i.uv);
				fixed4 blendCol1 = tex2D(_BlendTex1, i.uv);
				fixed4 blendCol2 = tex2D(_BlendTex2, i.uv);
				fixed4 blendCol3 = tex2D(_BlendTex3, i.uv);
				fixed4 blendCol4 = tex2D(_BlendTex4, i.uv);
				fixed4 blendCol5 = tex2D(_BlendTex5, float2(i.uv.x + _UvAnimationOffsetX, i.uv.y));
				blendCol5.a = 0.4f;

				float4 blendCol;
				float alpha = 0.05f;
//_DepthValue0是雪山,距離kinect最近,所以深度值最小,如果depthValue 小於雪山的深度值,則設置爲雪山的紋理
				if (depthValue < _DepthValue0)
				{
					blendCol = blendCol0;
				}
//_DepthValue1爲樹林紋理的深度值,如果depthValue 大於雪山,小於樹林,則取雪山和樹林的紋理間插值計算出混合的紋理,以下以此類推,不一一說明
				else if (depthValue < _DepthValue1)
				{
					float offset01 = _DepthValue1 - _DepthValue0;
					float alpha1 = (depthValue - _DepthValue0) / offset01;
					blendCol = blendCol0*(1.0f - alpha1) + blendCol1*alpha1;
					alpha = alpha1;
				}
				else if (depthValue < _DepthValue2)
				{
					float offset12 = _DepthValue2 - _DepthValue1;
					float alpha2 = (depthValue - _DepthValue1) / offset12;
					blendCol = blendCol1*(1.0f - alpha2) + blendCol2*alpha2;
					alpha = alpha2;
				}
				else if (depthValue < _DepthValue3)
				{
					float offset23 = _DepthValue3 - _DepthValue2;
					float alpha3 = (depthValue - _DepthValue2) / offset23;
					blendCol = blendCol2*(1.0f - alpha3) + blendCol3*alpha3;
					alpha = alpha3;
				}
				else if (depthValue < _DepthValue4)
				{
					float offset34 = _DepthValue4 - _DepthValue3;
					float alpha4 = (depthValue - _DepthValue3) / offset34;
					blendCol = blendCol3*(1.0f - alpha4) + blendCol4*alpha4;
					alpha = alpha4;
				}
				else if (depthValue < _DepthValue5)
				{
					float offset45 = _DepthValue5 - _DepthValue4;
					float alpha5 = (depthValue - _DepthValue4) / offset45;
					blendCol = blendCol4*(1.0f - alpha5) + blendCol5*alpha5;
					alpha = alpha5;
				}
				else
				{
					blendCol = blendCol5;
				}
                //將深度值大于海洋或者小於天空的當前像素顏色設置爲黑色
				if(depthValue > _CloudDepthFlag_min && depthValue < _CloudDepthFlag_max)
				{
					blendCol = fixed4(0.0f, 0.0f, 0.0f, 1.0f);
				}

				return blendCol;
			}
			ENDCG
		}
	}
	FallBack "Diffuse"
}

在片元着色器中獲取了6張上面說過的紋理像素顏色,float depthValue = _DepthBuffer[_DepthMapWidth* y + x];這一句就是kinect獲取在此uv座標下的深度值了,然後就是根據這個深度值處於哪兩張紋理之間取得插值。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章