summaryrefslogtreecommitdiff
path: root/Runtime/Video
diff options
context:
space:
mode:
Diffstat (limited to 'Runtime/Video')
-rw-r--r--Runtime/Video/BaseVideoTexture.cpp517
-rw-r--r--Runtime/Video/BaseVideoTexture.h110
-rw-r--r--Runtime/Video/MoviePlayback.cpp879
-rw-r--r--Runtime/Video/MoviePlayback.h212
-rw-r--r--Runtime/Video/MovieTexture.cpp262
-rw-r--r--Runtime/Video/MovieTexture.h77
-rw-r--r--Runtime/Video/ScriptBindings/MovieTextureBindings.txt92
-rw-r--r--Runtime/Video/ScriptBindings/UnityEngineWebCamTexture.txt200
-rw-r--r--Runtime/Video/VideoTexture.h252
9 files changed, 2601 insertions, 0 deletions
diff --git a/Runtime/Video/BaseVideoTexture.cpp b/Runtime/Video/BaseVideoTexture.cpp
new file mode 100644
index 0000000..414c776
--- /dev/null
+++ b/Runtime/Video/BaseVideoTexture.cpp
@@ -0,0 +1,517 @@
+#include "UnityPrefix.h"
+#include "BaseVideoTexture.h"
+#include "Runtime/Utilities/BitUtility.h"
+#include "Runtime/Graphics/Image.h"
+#include "Runtime/GfxDevice/GfxDevice.h"
+
+using std::vector;
+
+extern void GenerateLookupTables ();
+extern bool IsNPOTTextureAllowed(bool hasMipMap);
+
+void BaseVideoTexture::UploadTextureData() //Upload image buffer to texture memory
+{
+ if(m_ImageBuffer)
+ {
+ // pad to assure clamping works
+ if (m_PaddedHeight > m_VideoHeight && m_PaddedHeight > 1)
+ for (int x = 0;x < m_PaddedWidth;x++)
+ m_ImageBuffer[m_VideoHeight * m_PaddedWidth + x] = m_ImageBuffer[(m_VideoHeight - 1)*m_PaddedWidth + x];
+ if (m_PaddedWidth > m_VideoWidth && m_PaddedWidth > 1)
+ for (int y = 0;y < m_PaddedHeight;y++)
+ m_ImageBuffer[y * m_PaddedWidth + m_VideoWidth] = m_ImageBuffer[y * m_PaddedWidth + m_VideoWidth - 1];
+
+ // Image buffer is 32 bits per pixel
+ int dataSize = m_PaddedWidth * m_PaddedHeight * sizeof(UInt32);
+ GetGfxDevice().UploadTextureSubData2D( GetTextureID(), (UInt8*)m_ImageBuffer, dataSize, 0, 0, 0, m_PaddedWidth, m_PaddedHeight, GetBufferTextureFormat(), GetActiveTextureColorSpace() );
+ }
+
+ m_DidUpdateThisFrame = m_ImageBuffer || !m_IsReadable;
+}
+
+///@TODO: THIS IS NOT THREAD SAFE
+
+typedef UNITY_VECTOR(kMemTexture, BaseVideoTexture*) VideoList;
+VideoList gVideoList;
+
+void BaseVideoTexture::UpdateVideoTextures()
+{
+ for(VideoList::iterator i=gVideoList.begin();i!=gVideoList.end();++i)
+ {
+ (**i).m_DidUpdateThisFrame = false;
+ if((**i).m_EnableUpdates)
+ (**i).Update();
+ }
+}
+
+void BaseVideoTexture::PauseVideoTextures()
+{
+ for(VideoList::iterator i=gVideoList.begin();i!=gVideoList.end();++i)
+ (**i).Pause();
+}
+
+void BaseVideoTexture::StopVideoTextures()
+{
+ for(VideoList::iterator i=gVideoList.begin();i!=gVideoList.end();++i)
+ {
+ (**i).Stop();
+
+ // Call this to reset video texture frame contents to black.
+ (**i).UnloadFromGfxDevice(false);
+ (**i).UploadToGfxDevice();
+ }
+}
+
+void BaseVideoTexture::SuspendVideoTextures()
+{
+ for(VideoList::iterator i=gVideoList.begin();i!=gVideoList.end();++i)
+ (**i).Suspend();
+}
+
+void BaseVideoTexture::ResumeVideoTextures()
+{
+ for(VideoList::iterator i=gVideoList.begin();i!=gVideoList.end();++i)
+ (**i).Resume();
+}
+
+void BaseVideoTexture::InitVideoMemory(int width, int height)
+{
+ m_VideoWidth = width;
+ m_VideoHeight = height;
+
+ m_TextureWidth = IsNPOTTextureAllowed(false) ? m_VideoWidth : NextPowerOfTwo(m_VideoWidth);
+ m_TextureHeight = IsNPOTTextureAllowed(false) ? m_VideoHeight : NextPowerOfTwo(m_VideoHeight);
+
+ m_PaddedHeight = m_VideoHeight + 1;
+ m_PaddedWidth = m_VideoWidth + 1;
+
+ if( m_PaddedHeight > m_TextureHeight )
+ m_PaddedHeight = m_TextureHeight;
+ if( m_PaddedWidth > m_TextureWidth )
+ m_PaddedWidth = m_TextureWidth;
+
+ if(m_IsReadable)
+ {
+ // The allocated buffer for one frame has extra line before the pointer
+ // we use in all operations. YUV decoding code operates two lines at a time,
+ // goes backwards and thus needs extra line before the buffer in case of odd
+ // movie sizes.
+
+
+ if (m_PaddedHeight+1 < m_PaddedHeight)
+ {
+ ErrorString("integer overflow in addition");
+ return;
+ }
+
+ int tmp = m_PaddedWidth * (m_PaddedHeight+1);
+ if ((m_PaddedHeight+1) != tmp / m_PaddedWidth)
+ {
+ ErrorString("integer overflow in multiplication");
+ return;
+ }
+
+ int tmp2 = tmp * sizeof(UInt32);
+
+ if (tmp != tmp2/sizeof(UInt32))
+ {
+ ErrorString("integer overflow in multiplication");
+ return;
+ }
+
+ UInt32* realBuffer = (UInt32*)UNITY_MALLOC(GetMemoryLabel(), m_PaddedWidth * (m_PaddedHeight+1) * sizeof(UInt32));
+ m_ImageBuffer = realBuffer + m_PaddedWidth;
+ // Make sure to set the alpha in the image buffer, as it is not updated from the movie data.
+ for(int i=0; i<m_PaddedWidth * m_PaddedHeight;i++)
+ #if UNITY_LITTLE_ENDIAN
+ m_ImageBuffer[i]=0x000000ff;
+ #else
+ m_ImageBuffer[i]=0xff000000;
+ #endif
+ }
+
+ CreateGfxTextureAndUploadData(false);
+}
+
+void BaseVideoTexture::UploadGfxTextureBuffer(UInt32* imgBuf)
+{
+ TextureID texName = GetTextureID();
+
+ int const dataSize = m_TextureWidth * m_TextureHeight * 4;
+ GetGfxDevice().UploadTexture2D( texName, kTexDim2D, reinterpret_cast<UInt8*>(imgBuf), dataSize,
+ m_TextureWidth, m_TextureHeight, GetBufferTextureFormat(), 1, GfxDevice::kUploadTextureDontUseSubImage, 0, kTexUsageNone, GetActiveTextureColorSpace() );
+ Texture::s_TextureIDMap.insert (std::make_pair(texName,this));
+}
+
+void BaseVideoTexture::CreateGfxTextureAndUploadData(bool uploadCurrentBuffer)
+{
+ if(m_IsReadable)
+ {
+ if (m_TextureWidth == m_PaddedWidth && m_TextureHeight == m_PaddedHeight)
+ {
+ // Simply upload the buffer that we currently have; its size is the same
+ // as the size of the temporary buffer that we would create.
+ Assert(m_ImageBuffer != NULL);
+
+ UploadGfxTextureBuffer(m_ImageBuffer);
+
+ // The image buffer already uploaded, no need to duplicate the work
+ uploadCurrentBuffer = false;
+ }
+ else // image buffer size differs from texture size
+ {
+ // Since we are using a buffer smaller then the actual texture for continuous updates,
+ // we need a bigger temp buffer once to initialize the texture contents.
+ UInt32* tmpBuffer;
+ ALLOC_TEMP_ALIGNED (tmpBuffer, UInt32, m_TextureWidth * m_TextureHeight, sizeof(UInt32));
+
+ // Don't upload garbage texture.
+ for(int i=0; i<m_TextureWidth * m_TextureHeight;i++)
+ #if UNITY_LITTLE_ENDIAN
+ tmpBuffer[i]=0x000000ff;
+ #else
+ tmpBuffer[i]=0xff000000;
+ #endif
+
+ UploadGfxTextureBuffer(tmpBuffer);
+ }
+
+ if (uploadCurrentBuffer)
+ UploadTextureData(); // Upload the buffer to the created texture
+
+ m_DidUpdateThisFrame = true;
+ }
+
+ GetGfxDevice().SetTextureParams( GetTextureID(), kTexDim2D, kTexFilterBilinear, kTexWrapClamp, 1, false, GetActiveTextureColorSpace() );
+ // uvScale, so we can use the texture as if it was a normal power-of-two texture
+ SetUVScale( m_VideoWidth/(float)m_TextureWidth, m_VideoHeight/(float)m_TextureHeight );
+}
+
+void BaseVideoTexture::ReleaseVideoMemory()
+{
+ if( m_ImageBuffer )
+ {
+ // The allocated buffer for one frame has extra line before the pointer
+ // we use in all operations. YUV decoding code operates two lines at a time,
+ // goes backwards and thus needs extra line before the buffer in case of odd
+ // movie sizes.
+ UInt32* realBuffer = m_ImageBuffer - m_PaddedWidth;
+ UNITY_FREE(GetMemoryLabel(), realBuffer);
+
+ m_ImageBuffer = NULL;
+ }
+}
+
+BaseVideoTexture::BaseVideoTexture(MemLabelId label, ObjectCreationMode mode)
+: Texture(label, mode)
+{
+ m_VideoWidth = m_VideoHeight = 16;
+ m_TextureWidth = m_TextureHeight = 16;
+ m_PaddedWidth = m_PaddedHeight = 0;
+ m_DidUpdateThisFrame = false;
+
+ m_ImageBuffer = NULL;
+
+ m_EnableUpdates = false;
+ m_IsReadable = true;
+
+ {
+ SET_ALLOC_OWNER(NULL);
+ gVideoList.push_back(this);
+ GenerateLookupTables ();
+ }
+}
+
+BaseVideoTexture::~BaseVideoTexture()
+{
+ ReleaseVideoMemory ();
+
+ for(VideoList::iterator i=gVideoList.begin();i!=gVideoList.end();i++)
+ {
+ if(*i==this)
+ {
+ gVideoList.erase(i);
+ break;
+ }
+ }
+
+ GetGfxDevice().DeleteTexture( GetTextureID() );
+}
+
+void BaseVideoTexture::SetReadable(bool readable)
+{
+ if(CanSetReadable(readable))
+ m_IsReadable = readable;
+}
+
+bool BaseVideoTexture::ExtractImage (ImageReference* image, int imageIndex) const
+{
+ if(m_ImageBuffer)
+ {
+ ImageReference source (m_VideoWidth, m_VideoHeight, m_PaddedWidth*4,GetBufferTextureFormat(),m_ImageBuffer);
+ image->BlitImage( source, ImageReference::BLIT_COPY );
+
+ return true;
+ }
+ else
+ return false;
+}
+
+static int sAdjCrr[256];
+static int sAdjCrg[256];
+static int sAdjCbg[256];
+static int sAdjCbb[256];
+static int sAdjY[256];
+static UInt8 sClampBuff[1024];
+static UInt8* sClamp = sClampBuff + 384;
+
+
+#define PROFILE_YUV_CONVERSION 0
+
+#if PROFILE_YUV_CONVERSION
+static __int64 GetCpuTicks ()
+{
+#if defined(GEKKO)
+ return OSGetTick ();
+#else
+ __asm rdtsc;
+ // eax/edx returned
+#endif
+}
+#endif
+
+// precalculate adjusted YUV values for faster RGB conversion
+void GenerateLookupTables ()
+{
+ static bool generated = false;
+ if (generated)
+ return;
+ int i;
+
+ for (i = 0; i < 256; i++)
+ {
+ sAdjCrr[i] = (409 * (i - 128) + 128) >> 8;
+ sAdjCrg[i] = (208 * (i - 128) + 128) >> 8;
+ sAdjCbg[i] = (100 * (i - 128) + 128) >> 8;
+ sAdjCbb[i] = (516 * (i - 128) + 128) >> 8;
+ sAdjY[i] = (298 * (i - 16)) >> 8;
+ }
+
+ // and setup LUT clamp range
+ for (i = -384; i < 0; i++)
+ sClamp[i] = 0;
+ for (i = 0; i < 256; i++)
+ sClamp[i] = i;
+ for (i = 256; i < 640; i++)
+ sClamp[i] = 255;
+ generated = true;
+}
+
+void BaseVideoTexture::YuvToRgb (const YuvFrame *yuv)
+{
+ #if PROFILE_YUV_CONVERSION
+ __int64 time0 = GetCpuTicks();
+ #endif
+ UInt8 *rgbBuffer = (UInt8*)GetImageBuffer ();
+ int const rowBytes = GetRowBytesFromWidthAndFormat(GetPaddedWidth(), GetBufferTextureFormat());
+
+ // Somehow related to audio track being placed into an audio source in the
+ // scene with play on load checked causes the first frame decoded to return
+ // garbage (with yuv->u set to NULL).
+ if ( yuv->u == NULL ) {
+ return;
+ }
+
+ // NOTE: this code goes backwards in lines, two lines at a time. Thus for
+ // odd image sizes it can under-run rgbBuffer. BaseVideoTexture code makes
+ // sure there's one line worth of allocated memory before the passed
+ // rgbBuffer.
+
+ // get destination buffer (and 1 row offset)
+ UInt8* dst0 = rgbBuffer + (yuv->height - 1)*rowBytes;
+ UInt8 *dst1 = dst0 - rowBytes;
+
+ // find picture offset
+ int yOffset = yuv->y_stride * yuv->offset_y + yuv->offset_x;
+ int uvOffset = yuv->uv_stride * (yuv->offset_y / 2) + (yuv->offset_x / 2);
+ const int uvStep = yuv->uv_step;
+
+ for ( int y = 0; y < yuv->height; y += 2 )
+ {
+ UInt8 *lineStart = dst1;
+
+ // set pointers into yuv buffers (2 lines for y)
+ const UInt8 *pY0 = yuv->y + yOffset + y * (yuv->y_stride);
+ const UInt8 *pY1 = yuv->y + yOffset + (y | 1) * (yuv->y_stride);
+ const UInt8 *pU = yuv->u + uvOffset + ((y * (yuv->uv_stride)) >> 1);
+ const UInt8 *pV = yuv->v + uvOffset + ((y * (yuv->uv_stride)) >> 1);
+
+ for (int x = 0; x < yuv->width; x += 2)
+ {
+ // convert a 2x2 block over
+ const int yy00 = sAdjY[pY0[0]];
+ const int yy10 = sAdjY[pY0[1]];
+ const int yy01 = sAdjY[pY1[0]];
+ const int yy11 = sAdjY[pY1[1]];
+
+ // Compute RGB offsets
+ const int vv = *pV;
+ const int uu = *pU;
+ const int R = sAdjCrr[vv];
+ const int G = sAdjCrg[vv] + sAdjCbg[uu];
+ const int B = sAdjCbb[uu];
+
+ // pixel 0x0
+ dst0++;
+ *dst0++ = sClamp[yy00 + R];
+ *dst0++ = sClamp[yy00 - G];
+ *dst0++ = sClamp[yy00 + B];
+
+ // pixel 1x0
+ dst0++;
+ *dst0++ = sClamp[yy10 + R];
+ *dst0++ = sClamp[yy10 - G];
+ *dst0++ = sClamp[yy10 + B];
+
+ // pixel 0x1
+ dst1++;
+ *dst1++ = sClamp[yy01 + R];
+ *dst1++ = sClamp[yy01 - G];
+ *dst1++ = sClamp[yy01 + B];
+
+ // pixel 1x1
+ dst1++;
+ *dst1++ = sClamp[yy11 + R];
+ *dst1++ = sClamp[yy11 - G];
+ *dst1++ = sClamp[yy11 + B];
+
+
+ pY0 += 2;
+ pY1 += 2;
+ pV += uvStep;
+ pU += uvStep;
+ }
+
+ // shift the destination pointers a row (loop increments 2 at a time)
+ dst0 = lineStart - rowBytes;
+ dst1 = dst0 - rowBytes;
+ }
+
+ #if PROFILE_YUV_CONVERSION
+ __int64 time1 = GetCpuTicks();
+ {
+ __int64 deltaTime = (time1 - time0) / 1000;
+ static __int64 accumTime = 0;
+ static int counter = 0;
+ accumTime += deltaTime;
+ ++counter;
+ if ( counter == 20 )
+ {
+ printf_console( "YUV Kclocks per frame: %i\n", (int)(accumTime / counter) );
+ counter = 0;
+ accumTime = 0;
+ }
+ }
+ #endif
+}
+
+//// Math! (Reference implementation)
+//// http://en.wikipedia.org/wiki/YUV#Y.27UV422_to_RGB888_conversion
+//static inline UInt32 ConvertYUYVtoRGBImpl(int c, int d, int e)
+//{
+// int red = 0,
+// green = 0,
+// blue = 0;
+//
+// red = std::min (UCHAR_MAX, (298 * c + 409 * e + 128) >> 8);
+// green = std::min (UCHAR_MAX, (298 * c - 100 * d - 208 * e + 128) >> 8);
+// blue = std::min (UCHAR_MAX, (298 * c + 516 * d + 128) >> 8);
+//
+// return (red << 8) | (green << 16) | (blue << 24) | 0xff;
+//}
+//
+//static inline UInt32 ConvertYCrCbToRGB(int y, int u, int v)
+//{
+// return ConvertYUYVtoRGBImpl(y - 16, u - 128, v - 128);
+//}
+
+// LUT-based implementation
+void BaseVideoTexture::YUYVToRGBA (UInt16 *const src)
+{
+ YUYVToRGBA(src, GetPaddedWidth ());
+}
+
+void BaseVideoTexture::YUYVToRGBA (UInt16 *const src, int srcStride)
+{
+ #if PROFILE_YUV_CONVERSION
+ __int64 time0 = GetCpuTicks();
+ #endif
+
+ UInt16 *srcYUYV = src;
+ UInt8 *destRGBA = reinterpret_cast<UInt8*> (GetImageBuffer ());
+ int const destStride = GetRowBytesFromWidthAndFormat(GetPaddedWidth(), GetBufferTextureFormat());
+ int const widthInPixels = GetDataWidth ();
+ int const heightInPixels = GetDataHeight ();
+ int y0;
+ int u;
+ int y1;
+ int v;
+ int red;
+ int green;
+ int blue;
+
+ destRGBA += (heightInPixels - 1) * destStride;
+
+ // Lines within the destination rectangle.
+ for (int y = 0; y < heightInPixels; ++y)
+ {
+ UInt8 *srcPixel = reinterpret_cast<UInt8*> (srcYUYV);
+
+ // Increment by widthInPixels does not necessarily
+ // mean that dstPixel is incremented by the stride,
+ // so we keep a separate pointer.
+ UInt8 *dstPixel = destRGBA;
+
+ for (int x = 0; (x + 1) < widthInPixels; x += 2)
+ {
+ // Byte order is Y0 U0 Y1 V0
+ // Each word is a byte pair (Y, U/V)
+ y0 = sAdjY[*srcPixel++];
+ u = *srcPixel++;
+ y1 = sAdjY[*srcPixel++];
+ v = *srcPixel++;
+ red = sAdjCrr[v];
+ green = sAdjCrg[v] + sAdjCbg[u];
+ blue = sAdjCbb[u];
+
+ *dstPixel++ = 0xFF;
+ *dstPixel++ = sClamp[y0 + red];
+ *dstPixel++ = sClamp[y0 - green];
+ *dstPixel++ = sClamp[y0 + blue];
+ *dstPixel++ = 0xFF;
+ *dstPixel++ = sClamp[y1 + red];
+ *dstPixel++ = sClamp[y1 - green];
+ *dstPixel++ = sClamp[y1 + blue];
+ }
+ destRGBA -= destStride;
+ srcYUYV += srcStride;
+ }
+
+ #if PROFILE_YUV_CONVERSION
+ __int64 time1 = GetCpuTicks();
+ {
+ __int64 deltaTime = (time1 - time0) / 1000;
+ static __int64 accumTime = 0;
+ static int counter = 0;
+ accumTime += deltaTime;
+ ++counter;
+ if ( counter == 20 )
+ {
+ printf_console( "YUYV Kclocks per frame: %i\n", (int)(accumTime / counter) );
+ counter = 0;
+ accumTime = 0;
+ }
+ }
+ #endif
+}
+
diff --git a/Runtime/Video/BaseVideoTexture.h b/Runtime/Video/BaseVideoTexture.h
new file mode 100644
index 0000000..378acbc
--- /dev/null
+++ b/Runtime/Video/BaseVideoTexture.h
@@ -0,0 +1,110 @@
+#ifndef VIDEO_TEXTURE
+#define VIDEO_TEXTURE
+
+#include "Runtime/Graphics/Texture.h"
+
+struct YuvFrame
+{
+ unsigned char* y;
+ unsigned char* u;
+ unsigned char* v;
+ int width;
+ int height;
+ int y_stride;
+ int uv_stride;
+ int offset_x;
+ int offset_y;
+ int uv_step;
+};
+
+class BaseVideoTexture: public Texture
+{
+protected:
+ virtual ~BaseVideoTexture();
+public:
+ BaseVideoTexture (MemLabelId label, ObjectCreationMode mode);
+
+ void InitVideoMemory(int width, int height);
+ void ReleaseVideoMemory ();
+
+ virtual void UnloadFromGfxDevice(bool forceUnloadAll) { }
+ virtual void UploadToGfxDevice() { }
+
+ UInt32 *GetImageBuffer() const {return m_ImageBuffer;}
+
+ virtual int GetDataWidth() const {return m_VideoWidth; }
+ virtual int GetDataHeight() const {return m_VideoHeight; }
+
+ int GetPaddedHeight() const {return m_PaddedHeight; }
+ int GetPaddedWidth() const {return m_PaddedWidth; }
+
+ int GetTextureHeight() const {return m_TextureHeight; }
+ int GetTextureWidth() const {return m_TextureWidth; }
+
+ virtual bool HasMipMap () const { return false; }
+ virtual int CountMipmaps() const { return 1; }
+
+ bool IsReadable() const { return m_IsReadable; }
+ void SetReadable(bool readable);
+
+ void UploadTextureData();
+
+ virtual void Update() = 0;
+ virtual void Play() { m_EnableUpdates = true; }
+ virtual void Pause() { m_EnableUpdates = false; }
+ virtual void Stop() { m_EnableUpdates = false; }
+ virtual bool IsPlaying() const { return m_EnableUpdates; }
+ virtual void Suspend() {}
+ virtual void Resume() {}
+
+ static void UpdateVideoTextures();
+ static void PauseVideoTextures();
+ static void StopVideoTextures();
+
+ // Useful for platforms like WinRT that can lose DX device on app switch
+ static void SuspendVideoTextures();
+ static void ResumeVideoTextures();
+
+ virtual TextureDimension GetDimension () const { return kTexDim2D; }
+
+ virtual int GetRuntimeMemorySize() const { return m_TextureWidth * m_TextureHeight * 4; }
+ #if UNITY_EDITOR
+ virtual int GetStorageMemorySize() const { return 0; }
+ virtual TextureFormat GetEditorUITextureFormat () const { return kTexFormatARGB32; }
+ #endif
+
+ virtual bool ExtractImage (ImageReference* image, int imageIndex = 0) const;
+
+ bool DidUpdateThisFrame () const { return m_DidUpdateThisFrame; };
+
+ void YuvToRgb (const YuvFrame *yuv);
+ void YUYVToRGBA (UInt16 *const src);
+ void YUYVToRGBA (UInt16 *const src, int srcStride);
+
+ virtual int GetVideoRotationAngle() const { return 0; }
+ virtual bool IsVideoVerticallyMirrored() const { return false; }
+
+private:
+ UInt32* m_ImageBuffer; //texture image buffer
+
+ int m_VideoWidth, m_VideoHeight; //height and width of video source
+ int m_TextureWidth, m_TextureHeight; //power-of-two texture dimensions
+ int m_PaddedWidth, m_PaddedHeight; //movie size padded by 1 if non-power-of-two in order to fix clamping
+
+ bool m_EnableUpdates;
+ bool m_DidUpdateThisFrame;
+ bool m_IsReadable;
+
+private:
+ void UploadGfxTextureBuffer(UInt32* imgBuf);
+
+protected:
+ void CreateGfxTextureAndUploadData(bool uploadCurrentBuffer);
+
+ virtual TextureFormat GetBufferTextureFormat() const { return kTexFormatARGB32; }
+ // by default we support only readable textures, as we create mem buffer anyway
+ virtual bool CanSetReadable(bool readable) const { return readable ? true : false; }
+};
+
+
+#endif
diff --git a/Runtime/Video/MoviePlayback.cpp b/Runtime/Video/MoviePlayback.cpp
new file mode 100644
index 0000000..bae2372
--- /dev/null
+++ b/Runtime/Video/MoviePlayback.cpp
@@ -0,0 +1,879 @@
+#include "UnityPrefix.h"
+#include "MoviePlayback.h"
+
+#if ENABLE_MOVIES
+
+#include "MovieTexture.h"
+#include "Runtime/Input/TimeManager.h"
+#include "Runtime/Audio/AudioManager.h"
+#include "Runtime/Audio/AudioClip.h"
+#include "Runtime/Audio/AudioSource.h"
+#include "Runtime/Misc/ReproductionLog.h"
+
+#if UNITY_EDITOR
+//editor uses custom ogg, not the one from fmod, because it also needs the encoding functionality, which is not present in the fmod one.
+#include "../../External/Audio/libogg/include/ogg/ogg.h"
+#else
+#include <ogg/ogg.h> //rely on include directories to pick the ogg.h from fmod for this specific platform.
+#endif
+
+#include "Runtime/Utilities/Utility.h"
+
+#if !UNITY_EDITOR
+#include <vorbis/window.h>
+#define ogg_sync_init FMOD_ogg_sync_init
+#define ogg_sync_buffer FMOD_ogg_sync_buffer
+#define ogg_sync_wrote FMOD_ogg_sync_wrote
+#define ogg_stream_pagein FMOD_ogg_stream_pagein
+#define ogg_sync_pageout FMOD_ogg_sync_pageout
+#define ogg_page_bos FMOD_ogg_page_bos
+#define ogg_stream_init FMOD_ogg_stream_init
+#define ogg_stream_pagein FMOD_ogg_stream_pagein
+#define ogg_stream_packetout FMOD_ogg_stream_packetout
+#define vorbis_synthesis_headerin FMOD_vorbis_synthesis_headerin
+#define ogg_stream_clear FMOD_ogg_stream_clear
+#define vorbis_info_init FMOD_vorbis_info_init
+#define vorbis_comment_init FMOD_vorbis_comment_init
+#define ogg_stream_packetout FMOD_ogg_stream_packetout
+#define vorbis_synthesis_init FMOD_vorbis_synthesis_init
+#define vorbis_block_init FMOD_vorbis_block_init
+#define vorbis_info_clear FMOD_vorbis_info_clear
+#define vorbis_comment_clear FMOD_vorbis_comment_clear
+#define vorbis_synthesis_pcmout FMOD_vorbis_synthesis_pcmout
+#define vorbis_synthesis_read FMOD_vorbis_synthesis_read
+#define ogg_stream_packetout FMOD_ogg_stream_packetout
+#define vorbis_synthesis FMOD_vorbis_synthesis
+#define vorbis_synthesis_blockin FMOD_vorbis_synthesis_blockin
+#define vorbis_comment_clear FMOD_vorbis_comment_clear
+#define vorbis_info_clear FMOD_vorbis_info_clear
+#define ogg_stream_clear FMOD_ogg_stream_clear
+#define vorbis_block_clear FMOD_vorbis_block_clear
+#define vorbis_dsp_clear FMOD_vorbis_dsp_clear
+#define ogg_stream_clear FMOD_ogg_stream_clear
+#define ogg_sync_clear FMOD_ogg_sync_clear
+#define ogg_sync_reset FMOD_ogg_sync_reset
+#define ogg_page_serialno FMOD_ogg_page_serialno
+#define FMOD_OGG_PRE kFMOD_OGG_context,
+#else
+#define FMOD_OGG_PRE
+#endif
+
+#if UNITY_EDITOR
+//editor uses custom ogg, not the one from fmod, because it also needs the encoding functionality, which is not present in the fmod one.
+#include "../../External/Audio/libvorbis/include/vorbis/vorbisfile.h"
+#include "../../External/Audio/libvorbis/include/vorbis/codec.h"
+#else
+#include <vorbis/vorbisfile.h> //rely on include directories to pick the ogg.h from fmod for this specific platform.
+#endif
+
+#include "assert.h"
+
+#define DEBUG_MOVIES 0
+#define kAudioBufferSize (16 * 1024)
+
+void* kFMOD_OGG_context = NULL;
+
+#if !UNITY_EDITOR
+// FMOD doesn't implement this - and we need it to determine the duration of the movie
+char *vorbis_comment_query(vorbis_comment *vc, const char *tag, int count){
+ ogg_int32_t i;
+ int found = 0;
+ int taglen = strlen(tag)+1; /* +1 for the = we append */
+ char *fulltag = (char*)alloca(taglen+ 1);
+
+ strcpy(fulltag, tag);
+ strcat(fulltag, "=");
+
+ for(i=0;i<vc->comments;i++){
+ if(!strncmp(vc->user_comments[i], fulltag, taglen)){
+ if(count == found)
+ /* We return a pointer to the data, not a copy */
+ return vc->user_comments[i] + taglen;
+ else
+ found++;
+ }
+ }
+ return NULL; /* didn't find anything */
+ }
+#endif // vorbis_comment_query
+
+//Init structures
+MoviePlayback::MoviePlayback()
+{
+ m_InitialisedLoad = false;
+ m_VorbisInitialised = false;
+ m_VorbisStateInitialised = false;
+ m_TheoraInitialised = false;
+ m_TheoraStateInitialised = false;
+
+ m_AudioBuffer = (ogg_int16_t*)UNITY_MALLOC(kMemAudioData, kAudioBufferSize);
+
+#if !UNITY_EDITOR
+ _FMOD_vorbis_window_init();
+#endif
+
+ /* start up Ogg stream synchronization layer */
+ ogg_sync_init(&m_OggSynchState);
+
+ m_StartTime = 0.0;
+ m_Texture = NULL;
+ m_IsPlaying = false;
+ m_Loop = false;
+ m_Duration = -1;
+ m_AudioChannel = NULL;
+ m_AudioClip = NULL;
+#if ENABLE_WWW
+ m_DataStream = NULL;
+#endif
+
+#if UNITY_EDITOR
+ //shut up gcc warnings.
+ UNUSED(OV_CALLBACKS_DEFAULT);
+ UNUSED(OV_CALLBACKS_NOCLOSE);
+ UNUSED(OV_CALLBACKS_STREAMONLY);
+ UNUSED(OV_CALLBACKS_STREAMONLY_NOCLOSE);
+#endif
+}
+
+// Read data from in into the ogg synch state. returns bytes read.
+#define kReadChunkSize 4096
+int MoviePlayback::ReadBufferIntoOggStream()
+{
+ char *buffer = ogg_sync_buffer(FMOD_OGG_PRE &m_OggSynchState, kReadChunkSize);
+ unsigned int read = m_Data.size - m_Data.position;
+ if (read > kReadChunkSize)
+ read = kReadChunkSize;
+ memcpy(buffer, m_Data.data + m_Data.position, read);
+ ogg_sync_wrote(&m_OggSynchState, read);
+ m_Data.position += read;
+ return read;
+}
+
+/* helper: push a page into the appropriate steam */
+/* this can be done blindly; a stream won't accept a page
+that doesn't belong to it */
+void MoviePlayback::QueueOggPageIntoStream()
+{
+ if (m_TheoraStateInitialised)
+ ogg_stream_pagein(FMOD_OGG_PRE &m_TheoraStreamState, &m_OggPage);
+ if (m_VorbisStateInitialised)
+ ogg_stream_pagein(FMOD_OGG_PRE &m_VorbisStreamState, &m_OggPage);
+}
+
+void MoviePlayback::ChangeMovieData(UInt8 *data, long size)
+{
+ m_Data.data = data;
+ m_Data.size = size;
+}
+
+bool MoviePlayback::InitStreams(int &theoraHeadersSeen, int &vorbisHeadersSeen)
+{
+ m_Data.position = 0;
+ ogg_packet op;
+
+ /* Ogg file open; parse the headers */
+ /* Only interested in Vorbis/Theora streams */
+ while (true)
+ {
+ if (ReadBufferIntoOggStream() == 0)
+ return false;
+ while (ogg_sync_pageout(&m_OggSynchState, &m_OggPage)>0)
+ {
+ ogg_stream_state test;
+
+ /* is this a mandated initial header? If not, stop parsing */
+ if (!ogg_page_bos(&m_OggPage))
+ {
+ /* don't leak the page; get it into the appropriate stream */
+ QueueOggPageIntoStream();
+ return true;
+ }
+
+ if (ogg_stream_init(FMOD_OGG_PRE &test, ogg_page_serialno(&m_OggPage)) != 0)
+ return false;
+ if (ogg_stream_pagein(FMOD_OGG_PRE &test, &m_OggPage) != 0)
+ return false;
+ if (ogg_stream_packetout(&test, &op) != 1)
+ return false;
+
+
+ /* identify the codec: try theora */
+ if (!m_TheoraStateInitialised && theora_decode_header(&m_TheoraInfo, &m_TheoraComment, &op) >= 0)
+ {
+ /* it is theora */
+ memcpy(&m_TheoraStreamState, &test, sizeof(test));
+ theoraHeadersSeen = 1;
+ m_TheoraStateInitialised = true;
+ }else if (!m_VorbisStateInitialised && vorbis_synthesis_headerin(FMOD_OGG_PRE &m_VorbisInfo, &m_VorbisComment, &op) >= 0)
+ {
+ /* it is vorbis */
+ memcpy(&m_VorbisStreamState, &test, sizeof(test));
+ vorbisHeadersSeen = 1;
+ m_VorbisStateInitialised = true;
+ }else{
+ /* whatever it is, we don't care about it */
+ ogg_stream_clear(FMOD_OGG_PRE &test);
+ }
+ }
+ /* fall through to non-bos page parsing */
+ }
+}
+
+bool MoviePlayback::LoadMovieData( UInt8 *data, long size )
+{
+ Cleanup();
+ // Should never happen, but better safe than crashing.
+ if ( !data )
+ {
+ ErrorString( "LoadMoveData got NULL!" );
+ return false;
+ }
+
+ theora_info_init(&m_TheoraInfo);
+ theora_comment_init(&m_TheoraComment);
+ vorbis_info_init(FMOD_OGG_PRE &m_VorbisInfo);
+ vorbis_comment_init(&m_VorbisComment);
+ m_InitialisedLoad = true; //Signify we have attempted a load.
+
+ ogg_packet op;
+
+ m_Data.data = data;
+ m_Data.size = size;
+
+ int theoraHeadersSeen = 0;
+ int vorbisHeadersSeen = 0;
+
+ if (!InitStreams(theoraHeadersSeen, vorbisHeadersSeen))
+ {
+ Cleanup();
+ return false;
+ }
+
+ /* we're expecting more header packets. */
+ while ((m_TheoraStateInitialised && theoraHeadersSeen < 3) || (m_VorbisStateInitialised && vorbisHeadersSeen < 3))
+ {
+ int ret;
+ /* look for further theora headers */
+ while (m_TheoraStateInitialised && (theoraHeadersSeen < 3) && (ret = ogg_stream_packetout(&m_TheoraStreamState, &op)))
+ {
+ if (ret < 0)
+ {
+ printf_console("Error parsing Theora stream headers; corrupt stream?\n");
+ Cleanup();
+ return false;
+ }
+ if (theora_decode_header(&m_TheoraInfo, &m_TheoraComment, &op))
+ {
+ printf_console("Error parsing Theora stream headers; corrupt stream?\n");
+ Cleanup();
+ return false;
+ }
+ theoraHeadersSeen++;
+ }
+
+ /* look for more vorbis header packets */
+ while (m_VorbisStateInitialised && (vorbisHeadersSeen < 3) && (ret = ogg_stream_packetout(&m_VorbisStreamState, &op)))
+ {
+ if (ret < 0)
+ {
+ printf_console("Error parsing Vorbis stream headers; corrupt stream?\n");
+ Cleanup();
+ return false;
+ }
+ if (vorbis_synthesis_headerin(FMOD_OGG_PRE &m_VorbisInfo, &m_VorbisComment, &op))
+ {
+ printf_console("Error parsing Vorbis stream headers; corrupt stream?\n");
+ Cleanup();
+ return false;
+ }
+ vorbisHeadersSeen++;
+ }
+
+ /* The header pages/packets will arrive before anything else we
+ care about, or the stream is not obeying spec */
+
+ if (ogg_sync_pageout(&m_OggSynchState, &m_OggPage)>0)
+ {
+ QueueOggPageIntoStream(); /* demux into the appropriate stream */
+ }
+ else
+ {
+ if (ReadBufferIntoOggStream() == 0)
+ {
+ fprintf(stderr, "End of file while searching for codec headers.\n");
+ Cleanup();
+ return false;
+ }
+ }
+ }
+
+ /* and now we have it all. initialize decoders */
+ if (m_TheoraStateInitialised)
+ {
+ theora_decode_init(&m_TheoraState, &m_TheoraInfo);
+ const char *duration = theora_comment_query(&m_TheoraComment, const_cast<char*> ("DURATION"), 0);
+ if (duration)
+ sscanf(duration, "%f", &m_Duration);
+ m_TheoraInitialised = true;
+
+ }else{
+ /* tear down the partial theora setup */
+ theora_info_clear(&m_TheoraInfo);
+ theora_comment_clear(&m_TheoraComment);
+ }
+ if (m_VorbisStateInitialised)
+ {
+ vorbis_synthesis_init(FMOD_OGG_PRE &m_VorbisState, &m_VorbisInfo);
+ vorbis_block_init(FMOD_OGG_PRE &m_VorbisState, &m_VorbisBlock);
+ const char *duration = vorbis_comment_query(&m_VorbisComment, const_cast<char*> ("DURATION"), 0);
+ if (duration)
+ sscanf(duration, "%f", &m_Duration);
+ m_VorbisInitialised = true;
+
+ }else{
+ /* tear down the partial vorbis setup */
+ vorbis_info_clear(FMOD_OGG_PRE &m_VorbisInfo);
+ vorbis_comment_clear(FMOD_OGG_PRE &m_VorbisComment);
+ }
+
+ m_CanStartPlaying = false;
+ m_VideoBufferReady = false;
+ m_AudioBufferReady = false;
+ m_AudioBufferFill = 0;
+ m_AudioBufferGranulePos = -1;
+ m_VideoBufferTime = 0;
+ m_NoMoreData = false;
+ m_LastSampleTime = 0;
+ m_AudioBufferTime = 0;
+
+ //setup audio
+ if (m_AudioClip && !m_AudioChannel)
+ {
+ m_AudioClip->SetMoviePlayback(this);
+ // queue
+ // if we have no audio channel ... use the ready audio buffer to init
+ return m_AudioClip->ReadyToPlay();
+ }
+
+ return true;
+}
+
+
+#if ENABLE_WWW
+bool MoviePlayback::LoadMovieData(WWW *stream)
+{
+ if (m_DataStream != stream)
+ {
+ if (m_DataStream)
+ m_DataStream->Release();
+
+ m_DataStream = stream;
+ m_DataStream->Retain(); // Make sure the WWW object doesn't dissappear if the mono side of the WWW object is deleted before we are done.
+ }
+
+ stream->LockPartialData();
+ int size = stream->GetPartialSize();
+
+ //require headers before starting
+ if (size < 16 * 1024)
+ {
+ stream->UnlockPartialData();
+ return false;
+ }
+
+ bool accepted = LoadMovieData((UInt8*)stream->GetPartialData(), stream->GetPartialSize());
+ stream->UnlockPartialData();
+
+ return accepted;
+}
+#endif
+
+bool MoviePlayback::MovieHasAudio()
+{
+ return m_VorbisInitialised;
+}
+
+bool MoviePlayback::MovieHasVideo()
+{
+ return m_TheoraInitialised;
+}
+
+double MoviePlayback::GetMovieTime(bool useAudio)
+{
+ TimeManager& timeMgr = GetTimeManager();
+
+ double ret;
+ //use audio for timing if available
+ if (MovieHasAudio() && useAudio)
+ {
+ double curTime = timeMgr.GetRealtime();
+
+ double d = m_VorbisInfo.rate; // /m_VorbisInfo.channels) * 2;
+ double dQ = ((double)(kAudioQueueSize) / (d * 2 * m_VorbisInfo.channels));
+ double dG = ((double)(m_AudioBufferGranulePos) / d);
+ dG = dG < 0 ? 0 : dG;
+ double sDiff = (curTime - m_AudioBufferTime);
+ ret = (dG - dQ) + (sDiff);
+ }
+ else
+ {
+ //use real time if audio is not available; unless we run in capture timestep
+ double curTime = timeMgr.GetCaptureFramerate() > 0 ? timeMgr.GetCurTime() : timeMgr.GetRealtime();
+ curTime -= m_StartTime;
+
+ //if time is too far off, reset start time, to resume smooth playback from here
+ if (curTime > m_LastSampleTime + 0.1 || curTime < m_LastSampleTime)
+ {
+ double diff = curTime - m_LastSampleTime - 0.1;
+ m_StartTime += diff;
+ curTime -= diff;
+ }
+ ret = curTime;
+ }
+ m_LastSampleTime = ret;
+ return ret;
+}
+
+bool MoviePlayback::MovieStreamImage()
+{
+ //This may happen if WWW stream is cancelled before movie is stopped.
+ if (m_Data.data == NULL && !DidLoad())
+ return false;
+
+ //can we use audio for timing?
+ bool canPlayAudio = false;
+ if (m_AudioClip && m_AudioChannel)
+ m_AudioChannel->isPlaying(&canPlayAudio);
+
+ #if SUPPORT_REPRODUCE_LOG
+ if (RunningReproduction())
+ canPlayAudio = false;
+ #endif
+
+ bool didWriteBuffer = false;
+ while (!didWriteBuffer)
+ {
+ /* we want a video and audio frame ready to go at all times. If
+ we have to buffer incoming, buffer the compressed data (ie, let
+ ogg do the buffering) */
+ while (MovieHasAudio() && canPlayAudio && !m_AudioBufferReady)
+ {
+ int ret;
+ float **pcm;
+ /* if there's pending, decoded audio, grab it */
+ if ((ret = vorbis_synthesis_pcmout(&m_VorbisState, &pcm))>0)
+ {
+ int count = m_AudioBufferFill / 2;
+ int maxSamples = (kAudioBufferSize - m_AudioBufferFill) / 2 / m_VorbisInfo.channels;
+ maxSamples = std::min (maxSamples, ret);
+ for (int i = 0;i < maxSamples;i++)
+ {
+ for (int j = 0;j < m_VorbisInfo.channels;j++)
+ {
+ // TODO: implement fast RoundfToInt for intel!
+ int val = RoundfToInt(pcm[j][i]*32767.f);
+ if (val > 32767)
+ val = 32767;
+ if (val<-32768)
+ val = -32768;
+ assert (count + 1 < kAudioBufferSize);
+ m_AudioBuffer[count++] = val;
+ }
+ }
+ vorbis_synthesis_read(&m_VorbisState, maxSamples);
+ m_AudioBufferFill += maxSamples * m_VorbisInfo.channels * 2;
+
+ if (m_AudioBufferFill == kAudioBufferSize)
+ m_AudioBufferReady = true;
+ if (m_VorbisState.granulepos >= 0)
+ m_AudioBufferGranulePos = m_VorbisState.granulepos - ret + maxSamples;
+ else
+ m_AudioBufferGranulePos += maxSamples;
+
+ m_AudioBufferTime = GetTimeManager().GetRealtime();
+ }
+ else
+ {
+ ogg_packet op;
+ /* no pending audio; is there a pending packet to decode? */
+ if (ogg_stream_packetout(&m_VorbisStreamState, &op)>0)
+ {
+ if (vorbis_synthesis(FMOD_OGG_PRE &m_VorbisBlock, &op) == 0) /* test for success! */
+ vorbis_synthesis_blockin(&m_VorbisState, &m_VorbisBlock);
+ }
+ else /* we need more data; break out to suck in another page */
+ break;
+ }
+ }
+
+ while (MovieHasVideo() && !m_VideoBufferReady)
+ {
+ ogg_packet op;
+ /* theora is one in, one out... */
+ if (ogg_stream_packetout(&m_TheoraStreamState, &op)>0)
+ {
+ int ret = theora_decode_packetin(&m_TheoraState, &op);
+ AssertIf (ret != 0);
+ ogg_int64_t videobuf_granulepos = m_TheoraState.granulepos;
+
+ m_VideoBufferTime = theora_granule_time(&m_TheoraState, videobuf_granulepos);
+
+ /* is it already too old to be useful? This is only actually
+ useful cosmetically after a SIGSTOP. Note that we have to
+ decode the frame even if we don't show it (for now) due to
+ keyframing. Soon enough libtheora will be able to deal
+ with non-keyframe seeks. */
+
+ if (ret == 0 && m_VideoBufferTime >= GetMovieTime(canPlayAudio))
+ m_VideoBufferReady = true;
+
+ }else
+ break;
+ }
+
+ if (!m_VideoBufferReady && (!m_AudioBufferReady || !canPlayAudio) && m_Data.position >= m_Data.size)
+ {
+ m_NoMoreData = true;
+ return false;
+ }
+
+ if ((!m_VideoBufferReady && MovieHasVideo()) || (!m_AudioBufferReady && MovieHasAudio() && canPlayAudio))
+ {
+ /* no data yet for somebody. Grab another page */
+ ReadBufferIntoOggStream();
+ while (ogg_sync_pageout(&m_OggSynchState, &m_OggPage)>0)
+ {
+ QueueOggPageIntoStream();
+ m_NoMoreData = false;
+ }
+ }
+
+ /* If playback has begun, top audio buffer off immediately. */
+ if (m_CanStartPlaying && MovieHasAudio() && canPlayAudio && m_AudioBufferReady)
+ {
+ if (m_AudioClip->QueueAudioData(m_AudioBuffer, kAudioBufferSize))
+ {
+ m_AudioBufferFill = 0;
+ m_AudioBufferReady = false;
+ }
+ }
+
+ /* are we at or past time for this video frame? */
+ if (m_CanStartPlaying && m_VideoBufferReady && m_VideoBufferTime <= GetMovieTime(canPlayAudio))
+ {
+ if (m_Texture && m_Texture->GetImageBuffer())
+ {
+ yuv_buffer yuv;
+ int ret = theora_decode_YUVout(&m_TheoraState, &yuv);
+ if (ret == 0)
+ {
+ YuvFrame yuvFrame;
+ yuvFrame.y = yuv.y;
+ yuvFrame.u = yuv.u;
+ yuvFrame.v = yuv.v;
+ yuvFrame.width = m_TheoraInfo.frame_width;
+ yuvFrame.height = m_TheoraInfo.frame_height;
+ yuvFrame.y_stride = yuv.y_stride;
+ yuvFrame.uv_stride = yuv.uv_stride;
+ yuvFrame.uv_step = 1; // non-interleaved UV data
+ yuvFrame.offset_x = m_TheoraInfo.offset_x;
+ yuvFrame.offset_y = m_TheoraInfo.offset_y;
+ m_Texture->YuvToRgb (&yuvFrame);
+ }
+ }
+ didWriteBuffer = true;
+ m_VideoBufferReady = false;
+ }
+
+ if (m_CanStartPlaying &&
+ (m_AudioBufferReady || !(MovieHasAudio() && canPlayAudio)) &&
+ (m_VideoBufferReady || !MovieHasVideo())
+ )
+ {
+ /* we have an audio frame ready (which means the audio buffer is
+ full), it's not time to play video, so wait until one of the
+ audio buffer is ready or it's near time to play video */
+
+ return didWriteBuffer;
+ }
+
+ /* if our buffers either don't exist or are ready to go,
+ we can begin playback */
+ if ((!MovieHasVideo() || m_VideoBufferReady) && (!(MovieHasAudio() && canPlayAudio) || m_AudioBufferReady ))
+ m_CanStartPlaying = true;
+
+ /* same if we've run out of input */
+ if (m_Data.position >= m_Data.size)
+ m_CanStartPlaying = true;
+ }
+
+ return didWriteBuffer;
+}
+
+void MoviePlayback::Cleanup()
+{
+ if(!m_InitialisedLoad)
+ {
+ return;
+ }
+
+ if(m_VorbisInitialised)
+ {
+ vorbis_block_clear(FMOD_OGG_PRE &m_VorbisBlock);
+ vorbis_dsp_clear(FMOD_OGG_PRE &m_VorbisState);
+ }
+ if (m_VorbisStateInitialised)
+ {
+ ogg_stream_clear(FMOD_OGG_PRE &m_VorbisStreamState);
+ }
+
+ if (m_TheoraInitialised)
+ {
+ theora_clear(&m_TheoraState);
+ }
+ if (m_TheoraStateInitialised)
+ {
+ ogg_stream_clear(FMOD_OGG_PRE &m_TheoraStreamState);
+ }
+
+ vorbis_comment_clear(FMOD_OGG_PRE &m_VorbisComment);
+ vorbis_info_clear(FMOD_OGG_PRE &m_VorbisInfo);
+ theora_comment_clear(&m_TheoraComment);
+ theora_info_clear(&m_TheoraInfo);
+
+ m_InitialisedLoad = false;
+ m_VorbisInitialised = false;
+ m_VorbisStateInitialised = false;
+ m_TheoraInitialised = false;
+ m_TheoraStateInitialised = false;
+}
+
+MoviePlayback::~MoviePlayback()
+{
+ Cleanup();
+
+ if (m_AudioClip)
+ if (m_AudioClip->GetMoviePlayback() == this)
+ m_AudioClip->SetMoviePlayback(NULL);
+
+ UNITY_FREE(kMemAudioData, m_AudioBuffer);
+ ogg_sync_clear(FMOD_OGG_PRE &m_OggSynchState);
+
+#if ENABLE_WWW
+ if (m_DataStream)
+ m_DataStream->Release();
+#endif
+}
+
+int MoviePlayback::GetMovieBitrate()
+{
+ int bitrate = 0;
+ if (MovieHasVideo())
+ {
+ if (m_TheoraInfo.target_bitrate)
+ bitrate += m_TheoraInfo.target_bitrate;
+ else //find a good way to guess average bitrate of unknown encodings
+ bitrate += 500000;
+ }
+ if (MovieHasAudio())
+ {
+ if (m_VorbisInfo.bitrate_nominal > 0)
+ bitrate += m_VorbisInfo.bitrate_nominal;
+ else if (m_VorbisInfo.bitrate_upper > 0)
+ bitrate += m_VorbisInfo.bitrate_upper;
+ }
+ return bitrate;
+}
+
+int MoviePlayback::GetMovieWidth()
+{
+ if (MovieHasVideo())
+ return m_TheoraInfo.frame_width;
+ else
+ return 0;
+}
+
+int MoviePlayback::GetMovieHeight()
+{
+ if (MovieHasVideo())
+ return m_TheoraInfo.frame_height;
+ else
+ return 0;
+}
+
+int MoviePlayback::GetMovieAudioRate()
+{
+ if (MovieHasAudio())
+ return m_VorbisInfo.rate;
+ else
+ return 0;
+}
+
+int MoviePlayback::GetMovieAudioChannelCount()
+{
+ if (MovieHasAudio())
+ return m_VorbisInfo.channels;
+ else
+ return 0;
+}
+
+void MoviePlayback::Play()
+{
+ m_IsPlaying = true;
+ if (m_AudioClip && m_AudioChannel)
+ {
+ m_AudioChannel->setPaused(false);
+ }
+}
+
+void MoviePlayback::Stop()
+{
+ m_IsPlaying = false;
+ if (m_AudioClip && m_AudioChannel)
+ {
+ m_AudioClip->ClearQueue();
+ PauseAudio();
+ }
+ Rewind();
+}
+
+void MoviePlayback::Pause()
+{
+ m_IsPlaying = false;
+ PauseAudio();
+}
+
+void MoviePlayback::SetLoop (bool loop)
+{
+ m_Loop=loop;
+ if (m_AudioChannel)
+ {
+ FMOD_MODE mode;
+ m_AudioChannel->getMode(&mode);
+ mode = (mode & ~(FMOD_LOOP_NORMAL | FMOD_LOOP_OFF)) | (m_Loop ? FMOD_LOOP_NORMAL : FMOD_LOOP_OFF);
+ m_AudioChannel->setMode(mode);
+ }
+}
+
+void MoviePlayback::SetAudioChannel(FMOD::Channel* channel)
+{
+ m_AudioChannel = channel;
+ if(m_AudioChannel)
+ {
+ FMOD_MODE mode;
+ m_AudioChannel->getMode(&mode);
+ mode = (mode & ~(FMOD_LOOP_NORMAL | FMOD_LOOP_OFF | FMOD_3D)) | (m_Loop ? FMOD_LOOP_NORMAL : FMOD_LOOP_OFF) | FMOD_2D;
+ m_AudioChannel->setMode(mode);
+ }
+}
+
+void MoviePlayback::Rewind()
+{
+ // Destroy and recreate streams. Just setting file position to 0 may break, because we first have to parse
+ // the headers.
+ Cleanup();
+
+ ogg_sync_reset(&m_OggSynchState);
+
+#if ENABLE_WWW
+ if (m_DataStream)
+ {
+ //update download position
+ //lock stream, so it cannot reallocate
+ m_DataStream->LockPartialData();
+ ChangeMovieData((UInt8*)m_DataStream->GetPartialData(), m_DataStream->GetPartialSize());
+ }
+#endif
+
+ LoadMovieData(m_Data.data, m_Data.size);
+
+#if ENABLE_WWW
+ if (m_DataStream)
+ m_DataStream->UnlockPartialData();
+#endif
+
+ m_StartTime = GetCurTime();
+}
+
+bool MoviePlayback::Update()
+{
+ if (!MovieHasVideo() && !MovieHasAudio())
+ return false;
+
+ bool videoChanged = false;
+ if (m_IsPlaying)
+ {
+#if ENABLE_WWW
+ if (m_DataStream)
+ {
+ //update download position
+ //lock stream, so it cannot reallocate
+ m_DataStream->LockPartialData();
+ ChangeMovieData((UInt8*)m_DataStream->GetPartialData(), m_DataStream->GetPartialSize());
+ }
+#endif
+
+ if (MovieStreamImage())
+ {
+ if (m_Texture && m_Texture->GetImageBuffer())
+ videoChanged = true;
+ }
+
+ bool finished = m_NoMoreData;
+#if ENABLE_WWW
+ if (m_DataStream)
+ {
+ m_DataStream->UnlockPartialData();
+
+ // if we are still downloading, we probably aren't really at the end
+ finished &= m_DataStream->IsDone();
+ }
+#endif
+
+ // rewind if looping and movie is at end
+ if (finished)
+ {
+ if (m_Loop)
+ Rewind();
+ else
+ {
+ m_IsPlaying = false;
+ if (m_AudioChannel)
+ {
+ PauseAudio();
+ if (m_AudioClip)
+ m_AudioClip->ClearQueue();
+ }
+ }
+ }
+ }
+
+ return videoChanged;
+}
+
+void MoviePlayback::PauseAudio()
+{
+ // get AudioSource and pause the sound
+ if (m_AudioChannel)
+ {
+ AudioSource* audioSource;
+ m_AudioChannel->getUserData((void**) &audioSource);
+ if (audioSource)
+ audioSource->Pause();
+ else
+ m_AudioChannel->setPaused(true);
+ }
+}
+
+#else // ENABLE_MOVIES
+// dummy implementation coded in .h file
+#endif
+
+#if UNITY_EDITOR
+bool PlayFullScreenMovie (std::string const& path,
+ ColorRGBA32 const& backgroundColor,
+ unsigned long controlMode, unsigned long scalingMode)
+{
+ return true;
+}
+#endif
diff --git a/Runtime/Video/MoviePlayback.h b/Runtime/Video/MoviePlayback.h
new file mode 100644
index 0000000..49e4ac7
--- /dev/null
+++ b/Runtime/Video/MoviePlayback.h
@@ -0,0 +1,212 @@
+#ifndef MOVIE_PLAYBACK
+#define MOVIE_PLAYBACK
+
+#include "Configuration/UnityConfigure.h"
+#include "Runtime/Math/Color.h"
+
+bool PlayFullScreenMovie (std::string const& path,
+ ColorRGBA32 const& backgroundColor,
+ unsigned long controlMode, unsigned long scalingMode);
+
+#if ENABLE_MOVIES
+#include "Runtime/Audio/correct_fmod_includer.h"
+#include "External/theora/include/theora/theora.h"
+
+#if UNITY_EDITOR
+//editor uses custom ogg, not the one from fmod, because it also needs the encoding functionality, which is not present in the fmod one.
+#include "../../External/Audio/libvorbis/include/vorbis/codec.h"
+#else
+#include <vorbis/codec.h> //rely on include directories to pick the ogg.h from fmod for this specific platform.
+#endif
+
+#include "Runtime/Audio/AudioClip.h"
+
+#if ENABLE_WWW
+#include "Runtime/Export/WWW.h"
+#endif
+
+struct MovieDataStream {
+ UInt8 *data;
+ long size;
+ long position;
+};
+
+class MovieTexture;
+class AudioClip;
+class AudioSource;
+
+class MoviePlayback
+{
+private:
+
+ ogg_sync_state m_OggSynchState;
+ ogg_page m_OggPage;
+
+ ogg_stream_state m_TheoraStreamState;
+ theora_info m_TheoraInfo;
+ theora_comment m_TheoraComment;
+ theora_state m_TheoraState;
+
+ ogg_stream_state m_VorbisStreamState;
+ vorbis_info m_VorbisInfo;
+ vorbis_comment m_VorbisComment;
+ vorbis_dsp_state m_VorbisState;
+ vorbis_block m_VorbisBlock;
+
+ bool m_CanStartPlaying;
+ bool m_VideoBufferReady;
+ double m_VideoBufferTime;
+
+ int m_AudioBufferFill;
+ bool m_AudioBufferReady;
+ ogg_int16_t* m_AudioBuffer;
+ ogg_int64_t m_AudioBufferGranulePos; /* time position of last sample */
+ double m_AudioBufferTime; //Real time when the last audio buffer was filled
+
+ bool m_NoMoreData; //Are we finished playing?
+ MovieDataStream m_Data; //Data buffer and position of stream
+ double m_StartTime; //real time offset for start
+ double m_LastSampleTime; //last sample played
+ bool m_IsPlaying; //shall movie update in player loop?
+ bool m_Loop; //is movie looping?
+
+ bool m_InitialisedLoad; //Have we initialised the theora and vorbis codec structs
+ bool m_VorbisInitialised; //Vorbis headers are initialised and ready for data.
+ bool m_VorbisStateInitialised; //The vorbis state struct is primed, but not necessarily fully ready
+ bool m_TheoraInitialised; //Theora headers are initialised and ready for data.
+ bool m_TheoraStateInitialised; //The vorbis state struct is primed, but not necessarily fully ready
+
+ float m_Duration; //duration if known
+
+ MovieTexture* m_Texture;
+ AudioClip* m_AudioClip;
+#if ENABLE_WWW
+ WWW* m_DataStream; //if != NULL, use this as data.
+#endif
+ FMOD::Channel* m_AudioChannel;
+
+ void QueueOggPageIntoStream();
+ double GetMovieTime(bool useAudio);
+ void ChangeMovieData(UInt8 *data,long size);
+ bool MovieStreamImage();
+ bool InitStreams(int &theoraHeadersSeen, int &vorbisHeaderSeen);
+ void Cleanup();
+ void CleanupInfoStructures();
+ int ReadBufferIntoOggStream();
+ void PauseAudio();
+
+public:
+ MoviePlayback();
+ ~MoviePlayback();
+
+ int GetMovieWidth();
+ int GetMovieHeight();
+ int GetMovieAudioRate();
+ int GetMovieAudioChannelCount();
+ int GetMovieBitrate();
+ float GetMovieTotalDuration() const {return m_Duration;}
+
+ bool IsPlaying() {return m_IsPlaying;}
+
+ void SetLoop (bool l);
+ bool GetLoop () {return m_Loop;}
+
+ //Do we have a video and/or audio track?
+ bool MovieHasAudio();
+ bool MovieHasVideo();
+
+ //Load movie from a data ptr
+ bool LoadMovieData(UInt8 *data,long size);
+
+#if ENABLE_WWW
+ //Load movie from a web stream (and track data internally)
+ bool LoadMovieData(WWW *stream);
+#endif
+
+ bool DidLoad() { return m_VorbisInitialised || m_TheoraInitialised; }
+
+ void SetMovieTexture(MovieTexture *t) {m_Texture=t;}
+ void SetMovieAudioClip(AudioClip *c) {m_AudioClip=c;}
+ void SetAudioChannel(FMOD::Channel* channel);
+
+ bool GetAudioBuffer(void** buffer, unsigned* size);
+
+ void MoviePlaybackClose();
+
+ void Play();
+ void Pause();
+ void Stop ();
+ void Rewind();
+
+ bool Update();
+};
+
+#else // ENABLE_MOVIES
+
+class WWW;
+class MovieTexture;
+class AudioClip;
+class AudioSource;
+namespace FMOD
+{
+ class Channel;
+}
+
+// dummy implementation
+class MoviePlayback
+{
+public:
+ MoviePlayback() {}
+ ~MoviePlayback() {}
+
+ int GetMovieWidth() { return 320; }
+ int GetMovieHeight() { return 240; }
+ int GetMovieAudioRate() { return 22050; }
+ int GetMovieAudioChannelCount() { return 1; }
+ int GetMovieBitrate() { return 0; }
+ float GetMovieTotalDuration() {return 0;}
+
+ bool IsPlaying() {return false;}
+
+ void SetLoop (bool l) {}
+ bool GetLoop () {return false;}
+
+ //Do we have a video and/or audio track?
+ bool MovieHasAudio() { return false; }
+ bool MovieHasVideo() { return false; }
+
+ //Load movie from a data ptr
+ bool LoadMovieData(UInt8 *data,long size) {
+ return false;
+ }
+
+ //Load movie from a web stream (and track data internally)
+ bool LoadMovieData(WWW *stream) {
+ return false;
+ }
+
+ bool DidLoad() {return false; }
+
+ void SetMovieTexture(MovieTexture *t) {}
+ void SetMovieAudioClip(AudioClip *c) {}
+ void SetAudioChannel(FMOD::Channel* channel) {}
+
+ void MoviePlaybackClose() {}
+ void Play() {}
+ void Pause() {}
+ void Stop () {}
+ void Rewind() {}
+
+ void Update() {}
+};
+
+inline void AddToUpdateList(MoviePlayback *m) {}
+inline void RemoveFromUpdateList(MoviePlayback *m) {}
+
+inline void UpdateMovies() {}
+inline void ResetMovies() {}
+inline void PauseMovies() {}
+
+#endif
+
+#endif
diff --git a/Runtime/Video/MovieTexture.cpp b/Runtime/Video/MovieTexture.cpp
new file mode 100644
index 0000000..73534c9
--- /dev/null
+++ b/Runtime/Video/MovieTexture.cpp
@@ -0,0 +1,262 @@
+#include "UnityPrefix.h"
+#include "MovieTexture.h"
+
+#if ENABLE_MOVIES
+
+#include "Runtime/Serialize/TransferFunctions/SerializeTransfer.h"
+#include "Runtime/Graphics/Image.h"
+#include "Runtime/Utilities/BitUtility.h"
+#include "Runtime/GfxDevice/GfxDevice.h"
+#include "BaseVideoTexture.h"
+
+// --------------------------------------------------------------------------
+
+MovieTexture::MovieTexture (MemLabelId label, ObjectCreationMode mode, WWW *streamData)
+: BaseVideoTexture(label, mode)
+{
+ m_AudioClip = NULL;
+#if ENABLE_WWW
+ m_StreamData = NULL;
+#endif // ENABLE_WWW
+ m_MoviePlayback.SetMovieTexture( this );
+ m_MoviePlayback.SetMovieAudioClip( m_AudioClip );
+ m_TextureSettings.m_WrapMode = kTexWrapClamp;
+}
+
+#if ENABLE_WWW
+void MovieTexture::InitStream (WWW * streamData)
+{
+ AssertIf(m_AudioClip.GetInstanceID() != 0);
+ AssertIf(m_StreamData != NULL);
+ m_StreamData = streamData;
+ if(m_StreamData)
+ m_StreamData->Retain();
+
+ if (streamData != NULL)
+ {
+ m_AudioClip = NEW_OBJECT(AudioClip);
+ m_AudioClip->Reset();
+ m_AudioClip->InitStream(NULL, &m_MoviePlayback);
+ }
+ else
+ m_AudioClip = NULL;
+
+ m_MoviePlayback.SetMovieAudioClip( m_AudioClip );
+
+ // right now we're trying to load movie/sound in AwakeFromLoad (and do only that) - so we skip the call
+ HackSetAwakeWasCalled();
+}
+#endif // ENABLE_WWW
+
+MovieTexture::~MovieTexture ()
+{
+/* if( m_ImageBuffer )
+ {
+ // The allocated buffer for one frame has extra line before the pointer
+ // we use in all operations. YUV decoding code operates two lines at a time,
+ // goes backwards and thus needs extra line before the buffer in case of odd
+ // movie sizes.
+ UInt32* realBuffer = m_ImageBuffer - m_TextureWidth;
+ delete[] realBuffer;
+ }*/
+#if ENABLE_WWW
+ if(m_StreamData) m_StreamData->Release();
+#endif // ENABLE_WWW
+}
+
+void MovieTexture::SetMovieData(const UInt8* data,long size)
+{
+ m_MovieData.assign(data,data+size);
+
+ #if !UNITY_RELEASE
+ // right now we're trying to load movie/sound in AwakeFromLoad (and do only that) - so we skip the call
+ HackSetAwakeWasCalled();
+ #endif
+
+}
+
+template<class TransferFunction>
+void MovieTexture::Transfer (TransferFunction& transfer)
+{
+ Super::Transfer (transfer);
+ bool loop = GetLoop();
+ transfer.Transfer( loop, "m_Loop", kNoTransferFlags );
+ SetLoop(loop);
+ transfer.Align();
+ transfer.Transfer( m_AudioClip, "m_AudioClip", kNotEditableMask );
+ transfer.Transfer( m_MovieData, "m_MovieData", kHideInEditorMask );
+ transfer.Transfer( m_ColorSpace, "m_ColorSpace", kHideInEditorMask );
+}
+
+bool MovieTexture::ShouldIgnoreInGarbageDependencyTracking()
+{
+ return false;
+}
+
+bool MovieTexture::ReadyToPlay () //check if enough of movie loaded to start playing
+{
+ if (!m_MoviePlayback.DidLoad())
+ TryLoadMovie();
+ if (m_MoviePlayback.DidLoad())
+ {
+#if ENABLE_WWW
+ if (m_StreamData)
+ {
+ // Get duration from comment tag
+ double totalDuration = m_MoviePlayback.GetMovieTotalDuration();
+ // If there's now duration comment (Movie not encoded with unity), estimate using bitrate
+ if (totalDuration <= 0)
+ {
+ m_StreamData->LockPartialData ();
+ int totalSize = m_StreamData->GetPartialSize() / m_StreamData->GetProgress();
+ totalDuration = totalSize/(m_MoviePlayback.GetMovieBitrate() / 8.0);
+ m_StreamData->UnlockPartialData ();
+ }
+
+ // Do we have enough to start?
+ if (m_StreamData->GetETA() < totalDuration*1.1F)
+ return true;
+ }
+ else
+#endif // ENABLE_WWW
+ return true; //if this isn't a web stream then we can always start
+ }
+ return false;
+}
+
+void MovieTexture::TryLoadMovie ()
+{
+#if ENABLE_WWW
+ if(m_StreamData)
+ m_MoviePlayback.LoadMovieData(m_StreamData);
+ else
+#endif // ENABLE_WWW
+ m_MoviePlayback.LoadMovieData(&*m_MovieData.begin(),m_MovieData.size());
+
+ if(m_MoviePlayback.DidLoad())
+ {
+ //if movie loaded, init texture structures
+ int width = m_MoviePlayback.GetMovieWidth();
+ int height = m_MoviePlayback.GetMovieHeight();
+
+ InitVideoMemory(width, height);
+ }
+
+ #if !UNITY_RELEASE
+ // right now we're trying to load movie/sound in AwakeFromLoad (and do only that) - so we skip the call
+ HackSetAwakeWasCalled();
+ #endif
+}
+
+void MovieTexture::Play ()
+{
+ if(!m_MoviePlayback.DidLoad())
+ TryLoadMovie();
+ if(m_MoviePlayback.DidLoad())
+ {
+ m_MoviePlayback.Play();
+ BaseVideoTexture::Play();
+ }
+}
+
+void MovieTexture::Pause ()
+{
+ if(m_MoviePlayback.DidLoad())
+ {
+ m_MoviePlayback.Pause();
+ BaseVideoTexture::Pause();
+ }
+}
+
+void MovieTexture::Stop ()
+{
+ if(m_MoviePlayback.DidLoad())
+ {
+ m_MoviePlayback.Stop();
+ BaseVideoTexture::Stop();
+ }
+}
+
+void MovieTexture::Rewind ()
+{
+ if(m_MoviePlayback.DidLoad())
+ m_MoviePlayback.Rewind();
+}
+
+void MovieTexture::Update()
+{
+ if(m_MoviePlayback.DidLoad())
+ {
+ if(m_MoviePlayback.Update())
+ UploadTextureData();
+ }
+}
+
+bool MovieTexture::IsPlaying ()
+{
+ if(m_MoviePlayback.DidLoad())
+ return m_MoviePlayback.IsPlaying();
+ return false;
+}
+
+void MovieTexture::AwakeFromLoad (AwakeFromLoadMode awakeMode)
+{
+ Super::AwakeFromLoad (awakeMode);
+ if (!m_AudioClip.IsNull())
+ {
+ m_AudioClip->SetMoviePlayback(&m_MoviePlayback);
+ m_MoviePlayback.SetMovieAudioClip( m_AudioClip );
+ }
+ TryLoadMovie();
+}
+
+void MovieTexture::SetMovieAudioClip(AudioClip *clip)
+{
+ m_AudioClip=clip;
+ m_AudioClip->SetMoviePlayback(&m_MoviePlayback);
+ m_MoviePlayback.SetMovieAudioClip( m_AudioClip );
+}
+
+void MovieTexture::UnloadFromGfxDevice (bool forceUnloadAll)
+{
+ if (!m_MoviePlayback.DidLoad())
+ return;
+
+ // Here we want to unload strictly GFX device-specific data,
+ // and since the image buffer is not GFX device specific, it should
+ // be left untouched. We do, however, delete the GFX texture.
+ GetGfxDevice().DeleteTexture(GetTextureID());
+}
+
+void MovieTexture::UploadToGfxDevice ()
+{
+ if (!m_MoviePlayback.DidLoad())
+ return;
+
+ int width = m_MoviePlayback.GetMovieWidth();
+ int height = m_MoviePlayback.GetMovieHeight();
+
+ int texwidth = GetDataWidth();
+ int texheight = GetDataHeight();
+
+ if (GetImageBuffer() == NULL || width != texwidth || height != texheight)
+ {
+ // This accommodates the change in width and height of the image buffer,
+ // since in such a case we want to recreate the image buffer.
+ ReleaseVideoMemory();
+ InitVideoMemory(width, height);
+ }
+ else
+ {
+ // Recreate the Gfx structure and upload the previously allocated image buffer back
+ CreateGfxTextureAndUploadData(false);
+ }
+
+ UploadTextureData();
+}
+
+
+IMPLEMENT_CLASS (MovieTexture)
+IMPLEMENT_OBJECT_SERIALIZE (MovieTexture)
+
+#endif
diff --git a/Runtime/Video/MovieTexture.h b/Runtime/Video/MovieTexture.h
new file mode 100644
index 0000000..401968f
--- /dev/null
+++ b/Runtime/Video/MovieTexture.h
@@ -0,0 +1,77 @@
+#pragma once
+#include "Configuration/UnityConfigure.h"
+
+#if ENABLE_MOVIES
+
+#include "BaseVideoTexture.h"
+#include "MoviePlayback.h"
+#include "Runtime/Audio/AudioClip.h"
+#if ENABLE_WWW
+#include "Runtime/Export/WWW.h"
+#endif
+#include <vector>
+
+class ColorRGBAf;
+class MoviePlayback;
+
+
+class MovieTexture: public BaseVideoTexture
+{
+private:
+
+ std::vector<UInt8> m_MovieData; //the raw Ogg movie data
+ MoviePlayback m_MoviePlayback; //class controlling playback state
+ PPtr<AudioClip> m_AudioClip; //attached AudioClip
+#if ENABLE_WWW
+ WWW *m_StreamData; //if != NULL, use this instead of m_MovieData.
+#else
+ typedef void WWW;
+#endif
+
+protected:
+// void DestroyTexture ();
+
+ void TryLoadMovie ();
+
+public:
+ REGISTER_DERIVED_CLASS (MovieTexture, Texture)
+ DECLARE_OBJECT_SERIALIZE (MovieTexture)
+
+#if ENABLE_WWW
+ // WARNING: don't call AwakeFromLoad if you use InitStream
+ void InitStream (WWW * streamData);
+#endif
+ void AwakeFromLoad (AwakeFromLoadMode awakeMode);
+
+ void Rewind();
+ virtual void Play();
+ virtual void Pause();
+ virtual void Stop ();
+ virtual void Update ();
+ virtual void UnloadFromGfxDevice(bool forceUnloadAll);
+ virtual void UploadToGfxDevice();
+
+ bool IsPlaying();
+ bool GetLoop () {return m_MoviePlayback.GetLoop();}
+ void SetLoop (bool l) {m_MoviePlayback.SetLoop(l);}
+
+ bool ReadyToPlay ();
+
+ virtual bool ShouldIgnoreInGarbageDependencyTracking ();
+ MovieTexture (MemLabelId label, ObjectCreationMode mode, WWW* streamData = NULL);
+
+ #if ENABLE_PROFILER || UNITY_EDITOR
+ virtual int GetStorageMemorySize() const { return m_MovieData.size(); }
+ #endif
+
+ // WARNING: don't call AwakeFromLoad if you use SetMovieData
+ void SetMovieData(const UInt8* data,long size);
+ std::vector<UInt8> *GetMovieData() { return &m_MovieData; }
+
+ AudioClip *GetMovieAudioClip() { return m_AudioClip; }
+ void SetMovieAudioClip(AudioClip *clip);
+
+ float GetMovieDuration() { return m_MoviePlayback.GetMovieTotalDuration(); }
+};
+
+#endif // ENABLE_MOVIES
diff --git a/Runtime/Video/ScriptBindings/MovieTextureBindings.txt b/Runtime/Video/ScriptBindings/MovieTextureBindings.txt
new file mode 100644
index 0000000..b3b07d3
--- /dev/null
+++ b/Runtime/Video/ScriptBindings/MovieTextureBindings.txt
@@ -0,0 +1,92 @@
+C++RAW
+
+
+#include "UnityPrefix.h"
+#include "Configuration/UnityConfigure.h"
+#include "Runtime/Mono/MonoManager.h"
+#include "Runtime/Graphics/Transform.h"
+#include "Runtime/Utilities/PathNameUtility.h"
+#include "Runtime/Profiler/ProfilerHistory.h"
+#include "Runtime/Misc/PlayerSettings.h"
+#include "Runtime/Allocator/MemoryManager.h"
+#include "Runtime/Audio/AudioClip.h"
+#if ENABLE_AUDIO
+#include "Runtime/Audio/AudioSource.h"
+#include "Runtime/Audio/AudioListener.h"
+#include "Runtime/Audio/AudioManager.h"
+#include "Runtime/Audio/AudioReverbZone.h"
+#include "Runtime/Audio/AudioReverbFilter.h"
+#include "Runtime/Audio/AudioHighPassFilter.h"
+#include "Runtime/Audio/AudioLowPassFilter.h"
+#include "Runtime/Audio/AudioChorusFilter.h"
+#include "Runtime/Audio/AudioDistortionFilter.h"
+#include "Runtime/Audio/AudioEchoFilter.h"
+#endif
+#include "Runtime/Animation/Animation.h"
+#include "Runtime/Video/MovieTexture.h"
+
+using namespace Unity;
+
+/*
+ Mono defines a bool as either 1 or 2 bytes.
+ On windows a bool on the C++ side needs to be 2 bytes.
+ We use the typemap to map bool's to short's.
+ When using the C++ keyword and you want to export a bool value
+ to mono you have to use a short on the C++ side.
+*/
+
+
+void PauseEditor ();
+using namespace std;
+
+CSRAW
+using System;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+using System.Collections;
+using System.Collections.Generic;
+using UnityEngineInternal;
+
+namespace UnityEngine
+{
+
+
+// Movie Textures (Pro only) are textures onto which movies are played back.
+CONDITIONAL ENABLE_MOVIES
+CLASS MovieTexture : Texture
+
+ // Starts playing the movie.
+ AUTO void Play ();
+
+ // Stops playing the movie, and rewinds it to the beginning
+ AUTO void Stop ();
+
+ // Pauses playing the movie.
+ AUTO void Pause ();
+
+ // Returns the [[AudioClip]] belonging to the MovieTexture.
+
+ CONDITIONAL ENABLE_AUDIO
+ AUTO_PTR_PROP AudioClip audioClip GetMovieAudioClip
+
+ // Set this to true to make the movie loop.
+
+ AUTO_PROP bool loop GetLoop SetLoop
+
+ // Returns whether the movie is playing or not
+
+ AUTO_PROP bool isPlaying IsPlaying
+
+ // If the movie is downloading from a web site, this returns if enough data has been downloaded so playback should be able to start without interruptions.
+
+ AUTO_PROP bool isReadyToPlay ReadyToPlay
+
+ // The time, in seconds, that the movie takes to play back completely.
+ AUTO_PROP float duration GetMovieDuration
+
+END
+
+
+
+CSRAW }
+
diff --git a/Runtime/Video/ScriptBindings/UnityEngineWebCamTexture.txt b/Runtime/Video/ScriptBindings/UnityEngineWebCamTexture.txt
new file mode 100644
index 0000000..278d7dd
--- /dev/null
+++ b/Runtime/Video/ScriptBindings/UnityEngineWebCamTexture.txt
@@ -0,0 +1,200 @@
+C++RAW
+
+#include "UnityPrefix.h"
+#include "Configuration/UnityConfigure.h"
+#include "Runtime/Scripting/ScriptingManager.h"
+#include "Runtime/Scripting/ScriptingExportUtility.h"
+#include "Runtime/Mono/MonoBehaviour.h"
+#include "Runtime/Video/VideoTexture.h"
+
+CSRAW
+
+namespace UnityEngine
+{
+
+CONDITIONAL ENABLE_WEBCAM
+// *undocumented*
+ENUM WebCamFlags
+ // Camera faces the same direction as screen
+ FrontFacing = 1,
+END
+
+
+CONDITIONAL ENABLE_WEBCAM
+// A structure describing the webcam device.
+STRUCT WebCamDevice
+ // A human-readable name of the device. Varies across different systems.
+ CSRAW public string name { get { return m_Name; } }
+
+ // True if camera faces the same direction a screen does, false otherwise.
+ CSRAW public bool isFrontFacing { get { return (m_Flags & ((int)WebCamFlags.FrontFacing)) == 1; } }
+
+ CSRAW internal string m_Name;
+ CSRAW internal int m_Flags;
+END
+
+
+CONDITIONAL ENABLE_WEBCAM
+// WebCam Textures are textures onto which the live video input is rendered
+CLASS WebCamTexture : Texture
+
+ CUSTOM private static void Internal_CreateWebCamTexture ([Writable]WebCamTexture self, string device, int requestedWidth, int requestedHeight, int maxFramerate)
+ {
+ WebCamTexture* texture = NEW_OBJECT_MAIN_THREAD (WebCamTexture);
+ texture->Reset();
+ Scripting::ConnectScriptingWrapperToObject (self.GetScriptingObject(), texture);
+ texture->AwakeFromLoad(kInstantiateOrCreateFromCodeAwakeFromLoad);
+ texture->SetRequestedWidth (requestedWidth);
+ texture->SetRequestedHeight (requestedHeight);
+ texture->SetRequestedFPS (maxFramerate);
+ texture->SetDevice (device);
+ }
+
+ // Create a WebCamTexture
+
+ CSRAW public WebCamTexture (string deviceName, int requestedWidth, int requestedHeight, int requestedFPS)
+ {
+ Internal_CreateWebCamTexture (this, deviceName, requestedWidth, requestedHeight, requestedFPS);
+ }
+
+ ///*listonly*
+ CSRAW public WebCamTexture (string deviceName, int requestedWidth, int requestedHeight)
+ {
+ Internal_CreateWebCamTexture (this, deviceName, requestedWidth, requestedHeight, 0);
+ }
+
+ ///*listonly*
+ CSRAW public WebCamTexture (string deviceName)
+ {
+ Internal_CreateWebCamTexture (this, deviceName, 0, 0, 0);
+ }
+
+ ///*listonly*
+ CSRAW public WebCamTexture (int requestedWidth, int requestedHeight, int requestedFPS)
+ {
+ Internal_CreateWebCamTexture (this, "", requestedWidth, requestedHeight, requestedFPS);
+ }
+
+ ///*listonly*
+ CSRAW public WebCamTexture (int requestedWidth, int requestedHeight)
+ {
+ Internal_CreateWebCamTexture (this, "", requestedWidth, requestedHeight, 0);
+ }
+
+ ///*listonly*
+ CSRAW public WebCamTexture ()
+ {
+ Internal_CreateWebCamTexture (this, "", 0, 0, 0);
+ }
+
+ // Starts the camera
+ AUTO void Play();
+
+ // Pauses the camera.
+ AUTO void Pause();
+
+ // Stops the camera
+ AUTO void Stop();
+
+ // Returns if the camera is currently playing
+ AUTO_PROP bool isPlaying IsPlaying
+
+ // Set this to specify the name of the device to use.
+ CUSTOM_PROP string deviceName { return scripting_string_new(self->GetDevice ()); } { self->SetDevice (value); }
+
+ // Set the requested frame rate of the camera device (in frames per second).
+ AUTO_PROP float requestedFPS GetRequestedFPS SetRequestedFPS
+
+ // Set the requested width of the camera device.
+ AUTO_PROP int requestedWidth GetRequestedWidth SetRequestedWidth
+
+ // Set the requested height of the camera device.
+ AUTO_PROP int requestedHeight GetRequestedHeight SetRequestedHeight
+
+ CONDITIONAL UNITY_IPHONE_API
+ CUSTOM_PROP bool isReadable { return self->IsReadable(); }
+
+ CONDITIONAL UNITY_IPHONE_API
+ CUSTOM void MarkNonReadable() { self->SetReadable(false); }
+
+ // Return a list of available devices.
+ CUSTOM_PROP static WebCamDevice[] devices
+ {
+ MonoWebCamDevices devs;
+ WebCamTexture::GetDeviceNames(devs, true);
+
+ ScriptingClassPtr klass = GetScriptingManager().GetCommonClasses().webCamDevice;
+ ScriptingArrayPtr array = CreateScriptingArray<MonoWebCamDevice>(klass, devs.size());
+
+ for (MonoWebCamDevices::size_type i = 0; i < devs.size(); ++i)
+ {
+ #if UNITY_WINRT
+ ScriptingObjectPtr dev = CreateScriptingObjectFromNativeStruct(klass, devs[i]);
+ Scripting::SetScriptingArrayElement<ScriptingObjectPtr>(array, i, dev);
+ #else
+ Scripting::SetScriptingArrayElement<MonoWebCamDevice>(array, i, devs[i]);
+ #endif
+ }
+
+ return array;
+ }
+
+ // Returns pixel color at coordinates (x, y).
+ CUSTOM Color GetPixel (int x, int y) {
+ return self->GetPixel (x, y);
+ }
+
+ // Get a block of pixel colors.
+ CSRAW public Color[] GetPixels()
+ {
+ return GetPixels( 0, 0, width, height );
+ }
+
+ // Get a block of pixel colors.
+ CUSTOM Color[] GetPixels(int x, int y, int blockWidth, int blockHeight)
+ {
+ int res = blockWidth * blockHeight;
+ if (blockWidth != 0 && blockHeight != res / blockWidth) {
+ return SCRIPTING_NULL;
+ }
+ ScriptingArrayPtr colors = CreateScriptingArray<ColorRGBAf>(GetScriptingManager().GetCommonClasses().color, res );
+ self->GetPixels( x, y, blockWidth, blockHeight, &Scripting::GetScriptingArrayElement<ColorRGBAf>(colors, 0));
+ return colors;
+ }
+
+ // Returns the pixels data in raw format
+
+ CUSTOM public Color32[] GetPixels32(Color32[] colors = null)
+ {
+ int w = self->GetDataWidth();
+ int h = self->GetDataHeight();
+ if (colors != SCRIPTING_NULL)
+ {
+ int size = GetScriptingArraySize(colors);
+ if (size != w * h)
+ {
+ ErrorStringMsg ("Input color array length needs to match width * height, but %d != %d * %d", size, w, h);
+ return SCRIPTING_NULL;
+ }
+ }
+ else
+ colors = CreateScriptingArray<ColorRGBA32>(GetScriptingManager().GetCommonClasses().color32, w * h);
+ self->GetPixels(kTexFormatRGBA32, &Scripting::GetScriptingArrayElement<ColorRGBA32>(colors, 0), GetScriptingArraySize(colors) * 4);
+ return colors;
+ }
+
+ // Returns an clockwise angle, which can be used to rotate a polygon so camera contents are shown in correct orientation.
+ CUSTOM_PROP int videoRotationAngle { return self->GetVideoRotationAngle(); }
+
+ CUSTOM_PROP bool videoVerticallyMirrored
+ {
+ return self->IsVideoVerticallyMirrored();
+ }
+
+ // Did the video buffer update this frame?
+ AUTO_PROP bool didUpdateThisFrame DidUpdateThisFrame
+
+END
+
+CSRAW
+}
diff --git a/Runtime/Video/VideoTexture.h b/Runtime/Video/VideoTexture.h
new file mode 100644
index 0000000..ffa4dd4
--- /dev/null
+++ b/Runtime/Video/VideoTexture.h
@@ -0,0 +1,252 @@
+#ifndef LIVE_VIDEO_TEXTURE
+#define LIVE_VIDEO_TEXTURE
+
+#if ENABLE_WEBCAM
+
+#include "BaseVideoTexture.h"
+#include "Runtime/Graphics/Image.h"
+#include "Runtime/Math/Color.h"
+#include "Runtime/Scripting/ScriptingUtility.h"
+
+#if UNITY_WINRT
+#include <windows.foundation.h>
+#endif
+
+struct PlatformDependentWebCamTextureData;
+
+enum WebCamFlags
+{
+ kWebCamFrontFacing = 1,
+};
+
+struct MonoWebCamDevice
+{
+ ScriptingStringPtr name;
+ int flags;
+
+ bool operator== (std::string const &other) const
+ {
+ std::string cppStr = scripting_cpp_string_for (name);
+ return cppStr == other;
+ }
+};
+
+typedef UNITY_VECTOR(kMemWebCam, MonoWebCamDevice) MonoWebCamDevices;
+typedef MonoWebCamDevices::iterator MonoWebCamDevicesIter;
+
+class WebCamTexture: public BaseVideoTexture
+{
+public:
+ REGISTER_DERIVED_CLASS (WebCamTexture, Texture)
+
+ WebCamTexture (MemLabelId label, ObjectCreationMode mode = kCreateObjectDefault)
+ : BaseVideoTexture(label, mode)
+ {
+ m_RequestedFPS = 0.0f;
+ m_RequestedWidth = 0;
+ m_RequestedHeight = 0;
+ m_IsCreated = false;
+ m_VT = NULL;
+
+#if UNITY_WINRT
+ RunExactlyOnce();
+#endif
+ }
+
+ static void InitializeClass ();
+ static void CleanupClass ();
+
+ virtual void Play();
+ virtual void Pause();
+ virtual void Stop ();
+ virtual void Update ();
+
+#if UNITY_WP8
+ virtual void Suspend();
+ virtual void Resume();
+#endif
+
+ void SetRequestedWidth (int width) { m_RequestedWidth = width; SetDirty(); }
+ int GetRequestedWidth () const { return m_RequestedWidth; }
+
+ void SetRequestedHeight (int width) { m_RequestedHeight = width; SetDirty(); }
+ int GetRequestedHeight () const { return m_RequestedHeight; }
+
+ void SetRequestedFPS (float fps) { m_RequestedFPS = fps; SetDirty(); }
+ float GetRequestedFPS () const { return m_RequestedFPS; }
+
+ void SetDevice (const std::string &name) { m_DeviceName = name; SetDirty(); }
+ std::string GetDevice() const;
+
+ static void GetDeviceNames (MonoWebCamDevices &devices, bool forceUpdate);
+
+ #if ENABLE_PROFILER || UNITY_EDITOR
+ virtual int GetStorageMemorySize() const { return 0; }
+ #endif
+
+ ColorRGBAf GetPixel (int x, int y) const;
+ bool GetPixels (int x, int y, int width, int height, ColorRGBAf* data) const;
+ bool GetPixels (int dstFormat, void *dstData, size_t dstSize) const;
+
+#if UNITY_IPHONE || UNITY_ANDROID || UNITY_BLACKBERRY || UNITY_TIZEN
+ virtual int GetVideoRotationAngle() const;
+#endif
+#if UNITY_IPHONE || UNITY_BLACKBERRY
+ virtual bool IsVideoVerticallyMirrored() const;
+#endif
+
+private:
+#if UNITY_WINRT
+ // C-tor helper. It is here to ensure that some invariants are satisfied from the moment
+ // the object is created, so we don't have to check and recheck the state all the time
+ void RunExactlyOnce();
+#endif
+
+ void Create();
+ void Cleanup ();
+
+public:
+ static void EnsureUniqueName (MonoWebCamDevice &device,
+ const MonoWebCamDevices &devs);
+private:
+ int m_RequestedWidth;
+ int m_RequestedHeight;
+ float m_RequestedFPS;
+ std::string m_DeviceName;
+ bool m_IsCreated;
+
+ static void InitDeviceList();
+ int GetDeviceIdFromDeviceList(const std::string& name) const;
+
+ PlatformDependentWebCamTextureData *m_VT;
+
+protected:
+#if UNITY_IPHONE
+ virtual TextureFormat GetBufferTextureFormat() const { return kTexFormatBGRA32; }
+ virtual bool CanSetReadable(bool readable) const;
+#endif
+
+#if UNITY_BLACKBERRY || UNITY_TIZEN || UNITY_WP8
+ virtual TextureFormat GetBufferTextureFormat() const { return kTexFormatBGRA32; }
+#endif
+};
+
+inline ColorRGBAf WebCamTexture::GetPixel (int x, int y) const
+{
+ if (!m_IsCreated)
+ {
+ ErrorString ("Cannot get pixels when webcam is not running");
+ return ColorRGBAf(0,0,0,0);
+ }
+ if (!IsReadable())
+ {
+ ErrorString ("Cannot get pixels when webcam is non-readable");
+ return ColorRGBAf(0,0,0,0);
+ }
+
+ return GetImagePixel ((UInt8*)GetImageBuffer(), GetPaddedWidth(), GetPaddedHeight(), GetBufferTextureFormat(), static_cast<TextureWrapMode>(GetSettings().m_WrapMode), x, y);
+}
+
+inline bool WebCamTexture::GetPixels( int x, int y, int width, int height, ColorRGBAf* colors ) const
+{
+ if (width == 0 || height == 0)
+ return true; // nothing to do
+
+ if (!m_IsCreated)
+ {
+ ErrorString ("Cannot get pixels when webcam is not running");
+ return false;
+ }
+ if (!IsReadable())
+ {
+ ErrorString ("Cannot get pixels when webcam is non-readable");
+ return false;
+ }
+
+ return GetImagePixelBlock ((UInt8*)GetImageBuffer(), GetPaddedWidth(), GetPaddedHeight(), GetBufferTextureFormat(), x, y, width, height, colors);
+}
+
+inline bool WebCamTexture::GetPixels (int dstFormat, void *dstData, size_t dstSize) const
+{
+ size_t srcRowBytes = GetRowBytesFromWidthAndFormat(GetPaddedWidth(), GetBufferTextureFormat());
+ size_t dstRowBytes = GetRowBytesFromWidthAndFormat(GetDataWidth(), dstFormat);
+ if (dstSize < dstRowBytes * GetDataHeight())
+ {
+ ErrorString ("Buffer is too small to get image data");
+ return false;
+ }
+
+ ImageReference src (GetDataWidth(), GetDataHeight(), srcRowBytes, GetBufferTextureFormat(), (UInt8*)GetImageBuffer());
+ ImageReference dst (GetDataWidth(), GetDataHeight(), dstRowBytes, dstFormat, dstData);
+ dst.BlitImage( src );
+ return true;
+}
+
+inline int WebCamTexture::GetDeviceIdFromDeviceList(const std::string& name) const
+{
+ MonoWebCamDevices names;
+ GetDeviceNames(names, false);
+ if(!name.empty())
+ {
+ for(int i = 0 ; i < names.size() ; i++)
+ {
+ if(names[i] == name)
+ return i;
+ }
+ ErrorString ("Cannot find webcam device "+name+".");
+ return -1;
+ }
+ else
+ {
+ // Return camera 0 as default
+ if(!names.empty())
+ return 0;
+ else
+ {
+ ErrorString ("No available webcams are found. Either there is no webcam connected, or they are all in use by other applications (like Skype).");
+ return -1;
+ }
+ }
+}
+
+inline std::string WebCamTexture::GetDevice() const
+{
+ if(m_DeviceName.size() > 0)
+ {
+ return m_DeviceName;
+ }
+ else
+ {
+ MonoWebCamDevices names;
+ GetDeviceNames(names, false);
+
+ if(names.size() > 0)
+ return scripting_cpp_string_for(names[0].name);
+ else
+ return "no camera available.";
+ }
+}
+
+inline void WebCamTexture::EnsureUniqueName (MonoWebCamDevice &device,
+ MonoWebCamDevices const &devs)
+{
+ int num = 0;
+ std::string testname = scripting_cpp_string_for (device.name);
+
+ while (true)
+ {
+ if (num > 0)
+ testname += Format (" %d", num);
+
+ if (std::find (devs.begin (), devs.end (), testname) == devs.end ())
+ {
+ device.name = scripting_string_new(testname.c_str ());
+ break;
+ }
+
+ num++;
+ }
+}
+
+#endif
+#endif