項目需要接入對話口型動作,所以將OVRLipSync集成到項目中。
1、下載OVRLipSync.unitypackage
2、導入完成後可以參看demo,我這邊主要是導入了模型動作的功能。我們主要關注的是OVRLipSync.cs,OVRLipSyncContext.cs,OVRLipSyncContextMorphTarget.cs類
3、OVRLipSync.cs 主要是導入Dll接口函數。需要預先初始化這個。可以在Scence創建一個GameObject綁上這個腳本。
4、我將OVRLipSyncContext.cs,OVRLipSyncContextMorphTarget.cs 合併爲一個.cs 腳本。
[RequireComponent(typeof(AudioSource))]
public class OVRLipSyncContextEx: MonoBehaviour
{
public AudioSource audioSource = null;
public float gain = 1.0f;
public OVRLipSync.ovrLipSyncContextProvider provider = OVRLipSync.ovrLipSyncContextProvider.Main;
public bool delayCompensate = false;
private OVRLipSync.ovrLipSyncFrame frame = new OVRLipSync.ovrLipSyncFrame(0);
private uint context = 0; // 0 is no context
public SkinnedMeshRenderer skinnedMeshRenderer = null;
// 目標的模型動作名稱,這個美術給定
public int [] VisemeToBlendTargets = new int[(int)OVRLipSync.ovrLipSyncViseme.Count];
void Awake()
{
// Cache the audio source we are going to be using to pump data to the SR
if (!audioSource) audioSource = GetComponent<AudioSource>();
if (!audioSource) return;
}
void Start()
{
lock (this) //不存在異步同時,可以去掉lock
{
if (context == 0)
{
if (OVRLipSync.CreateContext(ref context, provider) != OVRLipSync.ovrLipSyncSuccess)
{
Debug.Log("OVRPhonemeContext.Start ERROR: Could not create Phoneme context.");
return;
}
}
//通過AudioSourceListener 去獲取Audio的數據。這個是Unity大概沒20ms會出發一次。
gameObject.GetComponent<AudioSourceListener>().OnEventAudioFilterRead += AudioFilterRead;
}
SendSignal(OVRLipSync.ovrLipSyncSignals.VisemeSmoothing, SmoothAmount, 0);
//測試動作名稱
VisemeToBlendTargets[0] = "doubt";
VisemeToBlendTargets[1] = "smile";
VisemeToBlendTargets[2] = "anger";
VisemeToBlendTargets[3] = "surprise";
VisemeToBlendTargets[4] = "scare";
VisemeToBlendTargets[5] = "nervous";
VisemeToBlendTargets[6] = "upset";
VisemeToBlendTargets[7] = "tiresome";
VisemeToBlendTargets[8] = "bashful";
VisemeToBlendTargets[9] = "greedy";
VisemeToBlendTargets[10] = "doubt";
VisemeToBlendTargets[11] = "doubt";
VisemeToBlendTargets[12] = "doubt";
VisemeToBlendTargets[13] = "doubt";
VisemeToBlendTargets[14] = "doubt";
}
void Update()
{
if(skinnedMeshRenderer != null)
{
if(GetCurrentPhonemeFrame(ref frame) == OVRLipSync.ovrLipSyncSuccess)
{
SetVisemeToMorphTarget();
}
}
}
void OnDestroy()
{
// Create the context that we will feed into the audio buffer
lock (this) //不存在異步同時,可以去掉lock
{
if (context != 0)
{
if (OVRLipSync.DestroyContext(context) != OVRLipSync.ovrLipSyncSuccess)
{
Debug.Log("OVRPhonemeContext.OnDestroy ERROR: Could not delete Phoneme context.");
}
}
}
}
void AudioFilterRead(float[] data, int channels)
{
// Do not spatialize if we are not initialized, or if there is no
// audio source attached to game object
if ((OVRLipSync.IsInitialized() != OVRLipSync.ovrLipSyncSuccess) || audioSource == null)
return;
// increase the gain of the input to get a better signal input
for (int i = 0; i < data.Length; ++i)
data[i] = data[i] * gain;
// Send data into Phoneme context for processing (if context is not 0)
lock (this) //不存在異步同時,可以去掉lock
{
if (context != 0)
{
OVRLipSync.ovrLipSyncFlag flags = 0;
// Set flags to feed into process
if (delayCompensate == true)
flags |= OVRLipSync.ovrLipSyncFlag.DelayCompensateAudio;
OVRLipSync.ProcessFrameInterleaved(context, data, flags, ref frame);
}
}
}
public int GetCurrentPhonemeFrame(ref OVRLipSync.ovrLipSyncFrame inFrame)
{
if (OVRLipSync.IsInitialized() != OVRLipSync.ovrLipSyncSuccess)
return (int)OVRLipSync.ovrLipSyncError.Unknown;
lock (this) //不存在異步同時,可以去掉lock
{
inFrame.frameNumber = frame.frameNumber;
inFrame.frameDelay = frame.frameDelay;
for (int i = 0; i < inFrame.Visemes.Length; i++)
{
inFrame.Visemes[i] = frame.Visemes[i];
}
}
return OVRLipSync.ovrLipSyncSuccess;
}
public int ResetContext()
{
if (OVRLipSync.IsInitialized() != OVRLipSync.ovrLipSyncSuccess)
return (int)OVRLipSync.ovrLipSyncError.Unknown;
return OVRLipSync.ResetContext(context);
}
//暫時沒用到
public int SendSignal(OVRLipSync.ovrLipSyncSignals signal, int arg1, int arg2)
{
if (OVRLipSync.IsInitialized() != OVRLipSync.ovrLipSyncSuccess)
return (int)OVRLipSync.ovrLipSyncError.Unknown;
return OVRLipSync.SendSignal(context, signal, arg1, arg2);
}
void SetVisemeToMorphTarget()
{
for (int i = 0; i < VisemeToBlendTargets.Length; i++)
{
if(VisemeToBlendTargets[i] != -1)
{
// 播放對應模型的動作權重。
skinnedMeshRenderer.SetBlendShapeWeight(VisemeToBlendTargets[i], frame.Visemes[i] * 100.0f);
}
}
}
}
/以上是聲音綁定在模型上播放。由於很多聲音播放相對獨立。可以實現以下函數。
float m_detaTime = 0;
void Update()
{
if (m_bSpeak)
{
// trap inputs and send signals to phoneme engine for testing purposes
AudioItem item = AudioController.GetAudioItem(speakKey);
if (item != null)
{
if (m_obj) // 播放聲音的GameObject 外部傳入
{
AudioSource ads = m_obj.GetComponent<AudioSource>();
if (ads)
{
//unity3d OnAudioFilterRead 大概23毫秒回調一次。只新數據來,纔在Update內去更新
// demo裏不加的話,會出現嘴脣抖動的問題。
if (m_detaTime < 0.023f) // 48000/2048;
{
m_detaTime += Time.deltaTime;
}
else
{
// get the current viseme frame
if (GetCurrentPhonemeFrame(ref frame) == OVRLipSync.ovrLipSyncSuccess)
{
SetVisemeToMorphTarget();
}
m_detaTime = 0;
}
}
}
}
}
}
增加一個deglet 回調類 放在AduioSource下
public class AudioSourceListener : MonoBehaviour
{
public delegate void DgtEventAudioFilterRead(float[] data, int channels,AudioObject obj);
public DgtEventAudioFilterRead OnEventAudioFilterRead;
private AudioObject m_audio_obj;
// Use this for initialization
public void Start()
{
m_audio_obj = this.gameObject.GetComponent<AudioObject>();
}
// Update is called once per frame
void Update()
{
}
void OnAudioFilterRead(float[] data, int channels)
{
if (OnEventAudioFilterRead != null)
{
OnEventAudioFilterRead(data,channels, m_audio_obj);
}
}
}