Files
video-v1/vav2/platforms/windows/applications/vav2player/Vav2Player/VideoPlayerControl.xaml.cpp

1672 lines
64 KiB
C++

#include "pch.h"
#include "VideoPlayerControl.xaml.h"
#if __has_include("VideoPlayerControl.g.cpp")
#include "VideoPlayerControl.g.cpp"
#endif
// Note: VideoTypes.h not included due to VavCore migration guard
#include <winrt/Microsoft.UI.Dispatching.h>
#include <winrt/Windows.Storage.Streams.h>
#include <winrt/Windows.Storage.h>
#include <windows.storage.streams.h>
#include <chrono>
#include <algorithm>
#include <cstring>
#include <cassert>
// D3D11 for GPU surface decoding
#include <d3d11.h>
#include <wrl/client.h>
using Microsoft::WRL::ComPtr;
// Include log manager for logging
#include "src/Logger/LogManager.h"
// Using alias to avoid namespace conflicts
using LogMgr = Vav2Player::LogManager;
using namespace winrt;
using namespace winrt::Microsoft::UI::Xaml;
using namespace winrt::Microsoft::UI::Xaml::Controls;
using namespace winrt::Microsoft::UI::Dispatching;
namespace winrt::Vav2Player::implementation
{
VideoPlayerControl::VideoPlayerControl()
: m_useHardwareRendering(true) // Default to GPU rendering
, m_vavCorePlayer(nullptr)
, m_memoryPool(std::make_unique<MemoryPool>())
, m_performanceMonitor(std::make_unique<AdvancedPerformanceMonitor>())
{
InitializeComponent();
// Load decoder settings from Windows.Storage.ApplicationData
LoadDecoderSettings();
// Initialize VavCore library (only once)
static bool vavCoreInitialized = false;
if (!vavCoreInitialized) {
VavCoreResult result = vavcore_initialize();
vavCoreInitialized = (result == VAVCORE_SUCCESS);
}
// Create VavCore player
m_vavCorePlayer = vavcore_create_player();
}
VideoPlayerControl::~VideoPlayerControl()
{
// Stop all playback immediately
m_isPlaying = false;
m_shouldStopTiming = true;
// Clean up VavCore player
if (m_vavCorePlayer) {
vavcore_destroy_player(m_vavCorePlayer);
m_vavCorePlayer = nullptr;
}
// Release D3D11 device
ReleaseD3D11Device();
// GPU renderer cleanup re-enabled
if (m_gpuRenderer) {
m_gpuRenderer->Shutdown();
m_gpuRenderer.reset();
}
// Clean up timing thread
if (m_timingThread && m_timingThread->joinable()) {
m_timingThread->join();
m_timingThread.reset();
}
}
// Event Handlers
void VideoPlayerControl::UserControl_Loaded(winrt::Windows::Foundation::IInspectable const&, winrt::Microsoft::UI::Xaml::RoutedEventArgs const&)
{
try
{
m_isInitialized = true;
UpdateStatus(L"Ready");
// Auto load video if source is set
if (!m_videoSource.empty())
{
LoadVideo(m_videoSource);
}
// Setup container size change handler for AspectFit updates
VideoDisplayArea().SizeChanged([this](auto&&, auto&&) {
ApplyAspectFitIfReady();
});
// Ready for user interaction
}
catch (...)
{
UpdateStatus(L"Error during initialization");
}
}
void VideoPlayerControl::UserControl_Unloaded(winrt::Windows::Foundation::IInspectable const&, winrt::Microsoft::UI::Xaml::RoutedEventArgs const&)
{
try
{
// Stop all playback immediately (avoid seeking to prevent deadlock)
m_isPlaying = false;
m_isLoaded = false;
m_isInitialized = false;
// Stop timing thread safely
m_shouldStopTiming = true;
if (m_timingThread && m_timingThread->joinable()) {
m_timingThread->join();
m_timingThread.reset();
}
// Stop UI timer
if (m_playbackTimer)
{
m_playbackTimer.Stop();
m_playbackTimer = nullptr;
}
// GPU renderer cleanup
if (m_gpuRenderer)
{
m_gpuRenderer->Shutdown();
m_gpuRenderer.reset();
}
// Clean up VavCore player (this will handle internal cleanup safely)
if (m_vavCorePlayer) {
vavcore_destroy_player(m_vavCorePlayer);
m_vavCorePlayer = nullptr;
}
m_renderBitmap = nullptr;
UpdateStatus(L"Unloaded");
}
catch (...)
{
// Ignore cleanup errors during unload
}
}
void VideoPlayerControl::UserControl_SizeChanged(winrt::Windows::Foundation::IInspectable const&, winrt::Microsoft::UI::Xaml::SizeChangedEventArgs const& e)
{
// Recalculate AspectFit when container size changes
if (m_hasValidVideoSize && m_videoWidth > 0 && m_videoHeight > 0) {
UpdateVideoImageAspectFit(m_videoWidth, m_videoHeight);
}
// Retry GPU rendering initialization if user prefers hardware rendering
// but we're currently using CPU rendering due to previous container size issues
if (m_useHardwareRendering && m_isLoaded) {
auto container = VideoDisplayArea();
if (container) {
double containerWidth = container.ActualWidth();
double containerHeight = container.ActualHeight();
// If container size is now valid and we're not showing GPU panel, retry GPU init
if (containerWidth > 0 && containerHeight > 0 &&
VideoSwapChainPanel().Visibility() == winrt::Microsoft::UI::Xaml::Visibility::Collapsed) {
InitializeVideoRenderer();
}
}
}
}
void VideoPlayerControl::HoverDetector_PointerEntered(winrt::Windows::Foundation::IInspectable const&, winrt::Microsoft::UI::Xaml::Input::PointerRoutedEventArgs const&)
{
// Controls are disabled for now
}
void VideoPlayerControl::HoverDetector_PointerExited(winrt::Windows::Foundation::IInspectable const&, winrt::Microsoft::UI::Xaml::Input::PointerRoutedEventArgs const&)
{
// Controls are disabled for now
}
// Public Properties
winrt::hstring VideoPlayerControl::VideoSource()
{
return m_videoSource;
}
void VideoPlayerControl::VideoSource(winrt::hstring const& value)
{
if (m_videoSource != value)
{
m_videoSource = value;
if (m_isInitialized && !value.empty())
{
LoadVideo(value);
}
}
}
bool VideoPlayerControl::ShowControls()
{
return m_showControls;
}
void VideoPlayerControl::ShowControls(bool value)
{
m_showControls = value;
if (m_isInitialized)
{
// Update controls visibility based on value and loaded state
}
}
bool VideoPlayerControl::AutoPlay()
{
return m_autoPlay;
}
void VideoPlayerControl::AutoPlay(bool value)
{
m_autoPlay = value;
}
Vav2Player::VideoDecoderType VideoPlayerControl::DecoderType()
{
switch (m_decoderType)
{
case VAVCORE_DECODER_AUTO:
return Vav2Player::VideoDecoderType::Auto;
case VAVCORE_DECODER_DAV1D:
return Vav2Player::VideoDecoderType::Software;
case VAVCORE_DECODER_NVDEC:
return Vav2Player::VideoDecoderType::Software; // Temporarily map to Software
case VAVCORE_DECODER_MEDIA_FOUNDATION:
return Vav2Player::VideoDecoderType::HardwareMF;
default:
return Vav2Player::VideoDecoderType::Auto;
}
}
void VideoPlayerControl::DecoderType(Vav2Player::VideoDecoderType value)
{
VavCoreDecoderType newType;
switch (value)
{
case Vav2Player::VideoDecoderType::Auto:
newType = VAVCORE_DECODER_AUTO;
break;
case Vav2Player::VideoDecoderType::Software:
newType = VAVCORE_DECODER_DAV1D;
break;
// case Vav2Player::VideoDecoderType::HardwareNV:
// newType = VAVCORE_DECODER_NVDEC;
// break;
case Vav2Player::VideoDecoderType::HardwareMF:
newType = VAVCORE_DECODER_MEDIA_FOUNDATION;
break;
default:
newType = VAVCORE_DECODER_AUTO;
break;
}
SetInternalDecoderType(newType);
}
bool VideoPlayerControl::UseHardwareRendering()
{
return m_useHardwareRendering;
}
void VideoPlayerControl::UseHardwareRendering(bool value)
{
if (m_useHardwareRendering != value)
{
m_useHardwareRendering = value;
// Reinitialize renderer if video is already loaded
if (m_isLoaded && m_vavCorePlayer)
{
InitializeVideoRenderer();
}
else
{
// Just switch visibility for now
if (value)
{
VideoSwapChainPanel().Visibility(winrt::Microsoft::UI::Xaml::Visibility::Visible);
VideoImage().Visibility(winrt::Microsoft::UI::Xaml::Visibility::Collapsed);
}
else
{
VideoSwapChainPanel().Visibility(winrt::Microsoft::UI::Xaml::Visibility::Collapsed);
VideoImage().Visibility(winrt::Microsoft::UI::Xaml::Visibility::Visible);
}
}
}
}
VavCoreDecoderType VideoPlayerControl::GetInternalDecoderType()
{
return m_decoderType;
}
void VideoPlayerControl::SetInternalDecoderType(VavCoreDecoderType value)
{
if (m_decoderType != value)
{
m_decoderType = value;
// Update VavCore decoder type if player is active
if (m_isLoaded && m_vavCorePlayer)
{
vavcore_set_decoder_type(m_vavCorePlayer, value);
}
}
}
// Public Methods
void VideoPlayerControl::LoadVideo(winrt::hstring const& filePath)
{
std::string filePathStr = winrt::to_string(filePath);
UpdateStatus(L"Loading video...");
LoadingRing().IsActive(true);
// Log video load attempt
LogMgr::GetInstance().LogInfo(L"Attempting to load video: " + std::wstring(filePath), L"VideoPlayerControl");
// Reset video state
ResetVideoState();
if (!m_vavCorePlayer) {
UpdateStatus(L"VavCore player not initialized");
LoadingRing().IsActive(false);
LogMgr::GetInstance().LogError(L"VavCore player not initialized", L"VideoPlayerControl");
return;
}
// Set decoder type before opening file
vavcore_set_decoder_type(m_vavCorePlayer, m_decoderType);
// Log decoder type selection
std::wstring decoderName = L"Unknown";
switch (m_decoderType) {
case VAVCORE_DECODER_AUTO: decoderName = L"Auto"; break;
case VAVCORE_DECODER_DAV1D: decoderName = L"Software (dav1d)"; break;
case VAVCORE_DECODER_MEDIA_FOUNDATION: decoderName = L"Hardware (Media Foundation)"; break;
case VAVCORE_DECODER_NVDEC: decoderName = L"Hardware (NVDEC)"; break;
case VAVCORE_DECODER_VPL: decoderName = L"Hardware (Intel VPL)"; break;
case VAVCORE_DECODER_AMF: decoderName = L"Hardware (AMD AMF)"; break;
}
LogMgr::GetInstance().LogDecoderInfo(decoderName, L"Decoder type selected");
// Initialize GPU renderer and set D3D12 device BEFORE opening file
// This ensures the decoder is created with D3D12 interop from the start
if (m_useHardwareRendering) {
// Create GPU renderer early
if (!m_gpuRenderer) {
m_gpuRenderer = std::make_unique<SimpleGPURenderer>();
}
// Get container dimensions
auto container = VideoDisplayArea();
uint32_t width = static_cast<uint32_t>(container.ActualWidth());
uint32_t height = static_cast<uint32_t>(container.ActualHeight());
// If container has valid dimensions, initialize GPU renderer now
if (width > 0 && height > 0) {
HRESULT hr = m_gpuRenderer->InitializeWithSwapChain(VideoSwapChainPanel(), width, height);
if (SUCCEEDED(hr)) {
// Pass D3D12 device to VavCore BEFORE decoder initialization
auto* gpuRenderer = dynamic_cast<SimpleGPURenderer*>(m_gpuRenderer.get());
if (gpuRenderer) {
ID3D12Device* d3d12Device = gpuRenderer->GetD3D12Device();
if (d3d12Device) {
vavcore_set_d3d_device(m_vavCorePlayer, d3d12Device, VAVCORE_SURFACE_D3D12_RESOURCE);
LogMgr::GetInstance().LogInfo(L"D3D12 device set before decoder initialization", L"VideoPlayerControl");
}
}
}
}
}
// Open video file using VavCore API
VavCoreResult result = vavcore_open_file(m_vavCorePlayer, filePathStr.c_str());
if (result != VAVCORE_SUCCESS) {
UpdateStatus(L"Failed to open video file");
LoadingRing().IsActive(false);
LogMgr::GetInstance().LogVideoError(L"Failed to open file", std::wstring(filePath));
return;
}
// Log actual codec name after decoder is initialized
const char* codecName = vavcore_get_codec_name(m_vavCorePlayer);
if (codecName) {
std::wstring codecNameW = std::wstring(codecName, codecName + strlen(codecName));
LogMgr::GetInstance().LogDecoderInfo(codecNameW, L"Actual decoder initialized");
}
// Get video metadata from VavCore
VavCoreVideoMetadata metadata;
result = vavcore_get_metadata(m_vavCorePlayer, &metadata);
if (result != VAVCORE_SUCCESS) {
UpdateStatus(L"Failed to get video metadata");
LoadingRing().IsActive(false);
LogMgr::GetInstance().LogVideoError(L"Failed to get metadata", std::wstring(filePath));
return;
}
// Set up video properties
m_videoWidth = metadata.width;
m_videoHeight = metadata.height;
m_frameRate = metadata.frame_rate > 0 ? metadata.frame_rate : 30.0;
m_totalFrames = metadata.total_frames;
m_duration = metadata.total_frames / m_frameRate;
// Initialize D3D surface support if hardware rendering is enabled
if (m_useHardwareRendering) {
InitializeD3DSurfaceSupport();
}
// Log video info
std::wstring videoInfo = L"Resolution: " + std::to_wstring(m_videoWidth) + L"x" + std::to_wstring(m_videoHeight) +
L", FPS: " + std::to_wstring(static_cast<int>(m_frameRate)) +
L", Frames: " + std::to_wstring(m_totalFrames) +
L", Duration: " + std::to_wstring(static_cast<int>(m_duration)) + L"s";
LogMgr::GetInstance().LogInfo(videoInfo, L"VideoPlayerControl");
InitializeVideoRenderer();
// Create NV12 texture for zero-copy decode AFTER we know video dimensions
if (m_gpuRenderer && m_useHardwareRendering) {
auto* gpuRenderer = dynamic_cast<SimpleGPURenderer*>(m_gpuRenderer.get());
if (gpuRenderer) {
// Create NV12 texture for CUDA-D3D12 interop
HRESULT hr = gpuRenderer->CreateNV12TextureR8Layout(m_videoWidth, m_videoHeight);
if (SUCCEEDED(hr)) {
LogMgr::GetInstance().LogInfo(L"NV12 texture created for NVDEC zero-copy decode", L"VideoPlayerControl");
} else {
LogMgr::GetInstance().LogError(L"Failed to create NV12 texture", L"VideoPlayerControl");
}
}
}
m_hasValidVideoSize = true;
m_isLoaded = true;
ApplyAspectFitIfReady();
LoadingRing().IsActive(false);
UpdateStatus(L"Video loaded");
LogMgr::GetInstance().LogVideoLoad(std::wstring(filePath), true);
if (m_autoPlay) {
LogMgr::GetInstance().LogInfo(L"Auto-play enabled, starting playback", L"VideoPlayerControl");
Play();
}
}
void VideoPlayerControl::Play()
{
if (!m_isLoaded || m_isPlaying) {
if (!m_isLoaded) {
LogMgr::GetInstance().LogWarning(L"Cannot play: Video not loaded", L"VideoPlayerControl");
}
return;
}
m_isPlaying = true;
UpdateStatus(L"Playing");
LogMgr::GetInstance().LogVideoPlay(std::wstring(m_videoSource));
// Record playback start time for accurate speed measurement
m_playbackStartTime = std::chrono::high_resolution_clock::now();
// Stop any existing timer/thread
if (m_playbackTimer)
{
m_playbackTimer.Stop();
m_playbackTimer = nullptr;
}
if (m_timingThread && m_timingThread->joinable()) {
m_shouldStopTiming = true;
m_timingThread->join();
m_timingThread.reset();
}
// Start high-resolution timing thread
m_shouldStopTiming = false;
auto weakThis = get_weak();
double targetIntervalMs = 1000.0 / m_frameRate;
m_timingThread = std::make_unique<std::thread>([weakThis, targetIntervalMs]() {
auto start = std::chrono::high_resolution_clock::now();
while (true) {
if (auto strongThis = weakThis.get()) {
if (strongThis->m_shouldStopTiming || !strongThis->m_isPlaying) {
break;
}
// CRITICAL: Decode on background thread, but Present on UI thread
// This prevents UI blocking while maintaining D3D12 thread safety
bool expected = false;
if (strongThis->m_frameProcessing.compare_exchange_strong(expected, true)) {
// Decode on current background thread (heavy CUDA/NVDEC work)
if (strongThis->m_isPlaying && strongThis->m_isLoaded && strongThis->m_gpuRenderer) {
auto* gpuRenderer = dynamic_cast<SimpleGPURenderer*>(strongThis->m_gpuRenderer.get());
if (gpuRenderer) {
ID3D12Resource* nv12Texture = gpuRenderer->GetNV12TextureForCUDAInterop();
if (nv12Texture) {
VavCoreVideoFrame vavFrame;
VavCoreResult result = vavcore_decode_to_surface(
strongThis->m_vavCorePlayer,
VAVCORE_SURFACE_D3D12_RESOURCE,
nv12Texture,
&vavFrame
);
if (result == VAVCORE_SUCCESS) {
OutputDebugStringA("[VideoPlayerControl] Decode SUCCESS, enqueuing render...\n");
// Render + Present on UI thread (lightweight, thread-safe)
// CRITICAL: Keep m_frameProcessing = true until render completes
// to prevent NVDEC surface queue overflow
auto enqueued = strongThis->DispatcherQueue().TryEnqueue([strongThis, gpuRenderer]() {
OutputDebugStringA("[VideoPlayerControl] Render callback executing...\n");
if (strongThis->m_isPlaying) {
HRESULT hr = gpuRenderer->RenderNV12TextureToBackBuffer();
if (SUCCEEDED(hr)) {
OutputDebugStringA("[VideoPlayerControl] Render SUCCESS\n");
} else {
char buf[256];
sprintf_s(buf, "[VideoPlayerControl] Render FAILED: 0x%08X\n", hr);
OutputDebugStringA(buf);
}
}
// Mark frame processing complete AFTER render
strongThis->m_frameProcessing.store(false);
});
if (!enqueued) {
OutputDebugStringA("[VideoPlayerControl] WARNING: Failed to enqueue render!\n");
// If enqueue failed, release flag immediately
strongThis->m_frameProcessing.store(false);
}
} else if (result == VAVCORE_END_OF_STREAM) {
strongThis->m_isPlaying = false;
strongThis->m_frameProcessing.store(false);
OutputDebugStringA("[VideoPlayerControl] End of stream\n");
} else {
char buf[256];
sprintf_s(buf, "[VideoPlayerControl] Decode failed: %d\n", result);
OutputDebugStringA(buf);
strongThis->m_frameProcessing.store(false);
}
}
}
}
} else {
// Previous frame still processing, skip this frame
}
// High-precision sleep until next frame
auto nextFrame = start + std::chrono::microseconds(
static_cast<long long>(targetIntervalMs * 1000));
std::this_thread::sleep_until(nextFrame);
start = nextFrame;
} else {
break; // Object was destroyed
}
}
});
ProcessSingleFrame();
}
void VideoPlayerControl::Pause()
{
m_isPlaying = false;
m_shouldStopTiming = true;
if (m_playbackTimer)
{
m_playbackTimer.Stop();
}
if (m_timingThread && m_timingThread->joinable()) {
m_timingThread->join();
m_timingThread.reset();
}
UpdateStatus(L"Paused");
LogMgr::GetInstance().LogVideoPause(std::wstring(m_videoSource));
}
void VideoPlayerControl::Stop()
{
m_isPlaying = false;
m_shouldStopTiming = true;
// Properly cleanup timer and thread to prevent resource leaks
if (m_playbackTimer)
{
m_playbackTimer.Stop();
m_playbackTimer = nullptr; // Release timer completely
}
if (m_timingThread && m_timingThread->joinable()) {
m_timingThread->join();
m_timingThread.reset();
}
m_currentFrame = 0;
m_currentTime = 0.0;
// Reset VavCore player to beginning for next playback
if (m_vavCorePlayer && m_isLoaded) {
VavCoreResult result = vavcore_reset(m_vavCorePlayer);
if (result != VAVCORE_SUCCESS) {
UpdateStatus(L"Stop - Reset failed");
LogMgr::GetInstance().LogError(L"Failed to reset VavCore player", L"VideoPlayerControl");
} else {
LogMgr::GetInstance().LogInfo(L"VavCore player reset to beginning", L"VideoPlayerControl");
}
}
UpdateStatus(L"Stopped - Ready to play from beginning");
LogMgr::GetInstance().LogVideoStop(std::wstring(m_videoSource));
}
void VideoPlayerControl::ProcessSingleFrame()
{
// Simple validation
if (!m_isPlaying || !m_vavCorePlayer) {
return;
}
// Phase 2 Optimization: Start frame timing
m_performanceMonitor->RecordFrameStart();
// Phase 2 Optimization: Start decode timing
m_performanceMonitor->RecordDecodeStart();
// GPU zero-copy path: Decode directly to D3D12 NV12 texture (R8 layout for CUDA interop)
if (m_gpuRenderer && m_useHardwareRendering) {
auto* gpuRenderer = dynamic_cast<SimpleGPURenderer*>(m_gpuRenderer.get());
if (gpuRenderer) {
// Get NV12 texture for CUDA interop
ID3D12Resource* nv12Texture = gpuRenderer->GetNV12TextureForCUDAInterop();
if (nv12Texture) {
VavCoreVideoFrame vavFrame;
VavCoreResult result = vavcore_decode_to_surface(
m_vavCorePlayer,
VAVCORE_SURFACE_D3D12_RESOURCE,
nv12Texture,
&vavFrame
);
m_performanceMonitor->RecordDecodeEnd();
if (result == VAVCORE_END_OF_STREAM) {
m_isPlaying = false;
if (m_playbackTimer) m_playbackTimer.Stop();
UpdateStatus(L"Playback completed");
LogMgr::GetInstance().LogInfo(L"Playback completed - End of stream reached", L"VideoPlayerControl");
return;
}
if (result == VAVCORE_SUCCESS) {
// NV12 texture updated by NVDEC, render to back buffer
m_performanceMonitor->RecordRenderStart();
// CRITICAL: Add small sleep to ensure GPU-GPU synchronization
// cudaDeviceSynchronize() ensures CUDA completion on CPU side,
// but D3D12 GPU queue may still need time to see the writes
// This is a temporary workaround until proper D3D12-CUDA sync is implemented
std::this_thread::sleep_for(std::chrono::milliseconds(1));
// Render NV12 texture to back buffer (YUV to RGB conversion)
// NOTE: RenderNV12TextureToBackBuffer() internally executes command list,
// signals fence, and advances frame index - no separate Present() needed
HRESULT renderHr = gpuRenderer->RenderNV12TextureToBackBuffer();
if (FAILED(renderHr)) {
LogMgr::GetInstance().LogError(L"Failed to render NV12 texture to back buffer", L"VideoPlayerControl");
}
m_performanceMonitor->RecordRenderEnd();
m_currentFrame++;
m_currentTime = m_currentFrame / m_frameRate;
m_performanceMonitor->RecordFrameEnd();
// Note: No need to call vavcore_free_frame for DecodeToSurface
// The frame data is written directly to the D3D12 surface
return;
} else {
// GPU decode failed, fall through to CPU path
m_framesDecodeErrors++;
}
}
}
}
// CPU fallback path: Use traditional CPU decode
VavCoreVideoFrame vavFrame;
VavCoreResult result = vavcore_decode_next_frame(m_vavCorePlayer, &vavFrame);
// Phase 2 Optimization: End decode timing
m_performanceMonitor->RecordDecodeEnd();
if (result == VAVCORE_END_OF_STREAM) {
// End of video - stop playback
m_isPlaying = false;
if (m_playbackTimer) m_playbackTimer.Stop();
UpdateStatus(L"Playback completed");
LogMgr::GetInstance().LogInfo(L"Playback completed - End of stream reached", L"VideoPlayerControl");
return;
}
if (result != VAVCORE_SUCCESS) {
// Decode error - count but continue processing
m_framesDecodeErrors++;
m_currentFrame++;
m_currentTime = m_currentFrame / m_frameRate;
// Log decode error occasionally
if (m_framesDecodeErrors % 10 == 1) {
LogMgr::GetInstance().LogError(L"Decode error count: " + std::to_wstring(m_framesDecodeErrors), L"VideoPlayerControl");
wchar_t errorMsg[256];
swprintf_s(errorMsg, L"VavCore decode error #%llu at frame %llu", m_framesDecodeErrors, m_currentFrame);
OutputDebugStringW(errorMsg);
OutputDebugStringW(L"\n");
}
return;
}
// Phase 2 Optimization: Start render timing
m_performanceMonitor->RecordRenderStart();
// Render frame
RenderFrameToScreen(vavFrame);
// Phase 2 Optimization: End render timing
m_performanceMonitor->RecordRenderEnd();
// Update counters
m_currentFrame++;
m_currentTime = m_currentFrame / m_frameRate;
// Phase 2 Optimization: End frame timing
m_performanceMonitor->RecordFrameEnd();
// Phase 2 Optimization: Enhanced performance logging every 60 frames
if (m_currentFrame % 60 == 0) {
auto stats = m_performanceMonitor->GetStats();
// Check for adaptive quality adjustment
if (m_performanceMonitor->ShouldReduceQuality()) {
LogMgr::GetInstance().LogDebug(L"VavPlayer: QUALITY REDUCTION triggered - FPS: " +
std::to_wstring(stats.CurrentFPS), L"VideoPlayerControl");
} else if (m_performanceMonitor->ShouldRestoreQuality()) {
LogMgr::GetInstance().LogDebug(L"VavPlayer: QUALITY RESTORATION triggered - FPS: " +
std::to_wstring(stats.CurrentFPS), L"VideoPlayerControl");
}
// Enhanced performance output
std::wstring perfInfo = L"VavPlayer: PERFORMANCE STATS\n" +
std::wstring(L" FPS: ") + std::to_wstring(stats.CurrentFPS).substr(0, 4) +
L" | Decode: " + std::to_wstring(stats.AverageDecodeTime).substr(0, 4) + L"ms" +
L" | Render: " + std::to_wstring(stats.AverageRenderTime).substr(0, 4) + L"ms\n" +
L" Total: " + std::to_wstring(stats.AverageTotalTime).substr(0, 4) + L"ms" +
L" | Quality Reduction: " + (stats.QualityReductionActive ? L"True" : L"False");
LogMgr::GetInstance().LogDebug(perfInfo, L"VideoPlayerControl");
// Memory Pool Statistics
m_memoryPool->PrintStats();
// Also output to debug console for analysis
std::wstring shortStatus = L"Frame " + std::to_wstring(m_currentFrame) +
L" - FPS: " + std::to_wstring(stats.CurrentFPS).substr(0, 4) +
L", Decode: " + std::to_wstring(stats.AverageDecodeTime).substr(0, 4) + L"ms" +
L", Render: " + std::to_wstring(stats.AverageRenderTime).substr(0, 4) + L"ms";
UpdateStatus(shortStatus.c_str());
OutputDebugStringW((shortStatus + L"\n").c_str());
}
}
void VideoPlayerControl::ProcessSingleFrameLegacy()
{
// Legacy method - calls ProcessSingleFrame for compatibility
ProcessSingleFrame();
}
void VideoPlayerControl::RenderFrameToScreen(const VavCoreVideoFrame& frame)
{
// GPU rendering re-enabled for VavCore
// Try GPU rendering first if available and enabled
if (m_gpuRenderer && m_useHardwareRendering) {
// Direct VavCoreVideoFrame usage - no adapter needed
if (m_gpuRenderer->TryRenderFrame(frame)) {
return; // GPU rendering successful
}
// Fall through to CPU rendering if GPU fails
}
// CPU rendering (either by user choice or GPU fallback)
auto cpuStart = std::chrono::high_resolution_clock::now();
RenderFrameSoftware(frame);
auto cpuEnd = std::chrono::high_resolution_clock::now();
double cpuTime = std::chrono::duration<double, std::milli>(cpuEnd - cpuStart).count();
// Log CPU rendering time occasionally for debugging
if (m_currentFrame % 60 == 0) { // Every 2 seconds
wchar_t cpuMsg[256];
swprintf_s(cpuMsg, L"CPU render time: %.2fms", cpuTime);
OutputDebugStringW(cpuMsg);
OutputDebugStringW(L"\n");
}
}
void VideoPlayerControl::RenderFrameSoftware(const VavCoreVideoFrame& frame)
{
if (!frame.y_plane || frame.width == 0 || frame.height == 0) return;
try {
// Phase 2 Optimization: Check if bitmap needs recreation using Memory Pool
bool needNewBitmap = !m_renderBitmap ||
m_lastFrameWidth != static_cast<uint32_t>(frame.width) ||
m_lastFrameHeight != static_cast<uint32_t>(frame.height);
if (needNewBitmap) {
// Phase 2 Optimization: Return old bitmap to pool if exists
if (m_renderBitmap) {
m_memoryPool->ReturnBitmap(m_renderBitmap);
}
// Phase 2 Optimization: Get bitmap from Memory Pool
m_renderBitmap = m_memoryPool->GetBitmap(frame.width, frame.height);
VideoImage().Source(m_renderBitmap);
// Cache dimensions to avoid repeated checks
m_lastFrameWidth = static_cast<uint32_t>(frame.width);
m_lastFrameHeight = static_cast<uint32_t>(frame.height);
// Update video dimensions and apply AspectFit
if (m_videoWidth != static_cast<uint32_t>(frame.width) || m_videoHeight != static_cast<uint32_t>(frame.height)) {
m_videoWidth = static_cast<uint32_t>(frame.width);
m_videoHeight = static_cast<uint32_t>(frame.height);
m_hasValidVideoSize = true;
UpdateVideoImageAspectFit(frame.width, frame.height);
}
VideoImage().Visibility(winrt::Microsoft::UI::Xaml::Visibility::Visible);
}
// Fast path: direct conversion to bitmap buffer
auto buffer = m_renderBitmap.PixelBuffer();
auto bufferByteAccess = buffer.as<::Windows::Storage::Streams::IBufferByteAccess>();
uint8_t* bufferData = nullptr;
winrt::check_hresult(bufferByteAccess->Buffer(&bufferData));
// Optimized YUV to BGRA conversion (direct to target buffer)
ConvertYUVToBGRA(frame, bufferData, frame.width, frame.height);
buffer.Length(frame.width * frame.height * 4);
// Minimal UI update
m_renderBitmap.Invalidate();
} catch (...) {
// Ignore render errors to maintain playback
}
}
void VideoPlayerControl::ConvertYUVToBGRA(const VavCoreVideoFrame& yuv_frame, uint8_t* bgra_buffer, uint32_t width, uint32_t height)
{
// YUV420P to BGRA conversion using BT.709 color space
const uint8_t* y_plane = yuv_frame.y_plane;
const uint8_t* u_plane = yuv_frame.u_plane;
const uint8_t* v_plane = yuv_frame.v_plane;
if (!y_plane || !u_plane || !v_plane) {
return;
}
const uint32_t y_stride = yuv_frame.y_stride;
const uint32_t u_stride = yuv_frame.u_stride;
const uint32_t v_stride = yuv_frame.v_stride;
for (uint32_t y = 0; y < height; y++) {
const uint8_t* y_row = y_plane + y * y_stride;
const uint8_t* u_row = u_plane + (y / 2) * u_stride;
const uint8_t* v_row = v_plane + (y / 2) * v_stride;
uint8_t* bgra_row = bgra_buffer + y * width * 4;
for (uint32_t x = 0; x < width; x++) {
const uint8_t Y = y_row[x];
const uint8_t U = u_row[x / 2];
const uint8_t V = v_row[x / 2];
// BT.709 YUV to RGB conversion
const int C = Y - 16;
const int D = U - 128;
const int E = V - 128;
int R = (298 * C + 409 * E + 128) >> 8;
int G = (298 * C - 100 * D - 208 * E + 128) >> 8;
int B = (298 * C + 516 * D + 128) >> 8;
// Clamp to [0, 255]
R = std::max(0, std::min(255, R));
G = std::max(0, std::min(255, G));
B = std::max(0, std::min(255, B));
// Store as BGRA
bgra_row[x * 4 + 0] = static_cast<uint8_t>(B); // Blue
bgra_row[x * 4 + 1] = static_cast<uint8_t>(G); // Green
bgra_row[x * 4 + 2] = static_cast<uint8_t>(R); // Red
bgra_row[x * 4 + 3] = 255; // Alpha
}
}
}
void VideoPlayerControl::UpdateStatus(winrt::hstring const& message)
{
m_status = message;
}
void VideoPlayerControl::InitializeVideoRenderer()
{
// GPU rendering re-enabled for VavCore
// Try hardware rendering if enabled, fallback to software
bool useGPU = m_useHardwareRendering && TryInitializeGPURenderer();
SetRenderingMode(useGPU);
// If GPU initialization failed, ensure software rendering is ready
if (!useGPU) {
VideoImage().Visibility(winrt::Microsoft::UI::Xaml::Visibility::Visible);
VideoSwapChainPanel().Visibility(winrt::Microsoft::UI::Xaml::Visibility::Collapsed);
}
}
// GPU rendering methods re-enabled for VavCore
bool VideoPlayerControl::TryInitializeGPURenderer()
{
// Create GPU renderer if needed
if (!m_gpuRenderer) {
m_gpuRenderer = std::make_unique<SimpleGPURenderer>();
}
// Get container dimensions
auto container = VideoDisplayArea();
uint32_t width = static_cast<uint32_t>(container.ActualWidth());
uint32_t height = static_cast<uint32_t>(container.ActualHeight());
// Container must be ready with valid dimensions
if (width == 0 || height == 0) {
return false;
}
// Initialize GPU renderer
HRESULT hr = m_gpuRenderer->InitializeWithSwapChain(VideoSwapChainPanel(), width, height);
if (FAILED(hr)) {
return false;
}
// Pass D3D12 device to VavCore for zero-copy GPU pipeline
if (m_vavCorePlayer) {
auto* gpuRenderer = dynamic_cast<SimpleGPURenderer*>(m_gpuRenderer.get());
if (gpuRenderer) {
ID3D12Device* d3d12Device = gpuRenderer->GetD3D12Device();
if (d3d12Device) {
vavcore_set_d3d_device(m_vavCorePlayer, d3d12Device, VAVCORE_SURFACE_D3D12_RESOURCE);
OutputDebugStringW(L"[VideoPlayerControl] D3D12 device passed to VavCore\n");
}
}
}
return true;
}
void VideoPlayerControl::SetRenderingMode(bool useGPU)
{
if (useGPU) {
VideoSwapChainPanel().Visibility(winrt::Microsoft::UI::Xaml::Visibility::Visible);
VideoImage().Visibility(winrt::Microsoft::UI::Xaml::Visibility::Collapsed);
} else {
VideoSwapChainPanel().Visibility(winrt::Microsoft::UI::Xaml::Visibility::Collapsed);
VideoImage().Visibility(winrt::Microsoft::UI::Xaml::Visibility::Visible);
}
}
void VideoPlayerControl::ResetVideoState()
{
m_currentFrame = 0;
m_currentTime = 0.0;
m_isLoaded = false;
m_isPlaying = false;
// Reset AspectFit state
m_hasValidVideoSize = false;
m_videoWidth = 0;
m_videoHeight = 0;
// Stop and reset playback timer
if (m_playbackTimer)
{
m_playbackTimer.Stop();
}
}
void VideoPlayerControl::ApplyAspectFitIfReady()
{
if (!m_hasValidVideoSize || !m_isLoaded) {
return;
}
auto container = VideoDisplayArea();
if (!container) return;
double containerWidth = container.ActualWidth();
double containerHeight = container.ActualHeight();
if (containerWidth <= 0 || containerHeight <= 0) {
return;
}
UpdateVideoImageAspectFit(m_videoWidth, m_videoHeight);
}
void VideoPlayerControl::UpdateVideoImageAspectFit(int videoWidth, int videoHeight)
{
// Store video dimensions for future use
m_videoWidth = static_cast<uint32_t>(videoWidth);
m_videoHeight = static_cast<uint32_t>(videoHeight);
m_hasValidVideoSize = true;
// AspectFit calculation for proper video scaling
auto container = VideoDisplayArea();
if (!container) {
return;
}
double containerWidth = container.ActualWidth();
double containerHeight = container.ActualHeight();
if (containerWidth <= 0 || containerHeight <= 0) {
return;
}
double videoAspectRatio = static_cast<double>(videoWidth) / videoHeight;
double containerAspectRatio = containerWidth / containerHeight;
double displayWidth, displayHeight;
if (videoAspectRatio > containerAspectRatio) {
// Video is wider - fit to container width
displayWidth = containerWidth;
displayHeight = containerWidth / videoAspectRatio;
} else {
// Video is taller - fit to container height
displayHeight = containerHeight;
displayWidth = containerHeight * videoAspectRatio;
}
// Apply AspectFit to both CPU and GPU rendering controls
VideoImage().Width(displayWidth);
VideoImage().Height(displayHeight);
VideoImage().MaxWidth(displayWidth);
VideoImage().MaxHeight(displayHeight);
// Also apply to GPU rendering SwapChainPanel
VideoSwapChainPanel().Width(displayWidth);
VideoSwapChainPanel().Height(displayHeight);
}
void VideoPlayerControl::Seek(double timeSeconds)
{
if (!m_isLoaded || !m_vavCorePlayer) return;
// Stop playback during seek
bool wasPlaying = m_isPlaying;
if (m_isPlaying) {
Pause();
}
// Seek to the specified time using VavCore API
VavCoreResult result = vavcore_seek_to_time(m_vavCorePlayer, timeSeconds);
if (result == VAVCORE_SUCCESS) {
m_currentTime = timeSeconds;
m_currentFrame = static_cast<uint64_t>(timeSeconds * m_frameRate);
// Process one frame to update display
ProcessSingleFrame();
// Resume playback if it was playing before seek
if (wasPlaying) {
Play();
}
UpdateStatus(L"Seeked");
} else {
UpdateStatus(L"Seek failed");
}
}
bool VideoPlayerControl::IsVideoPlaying() { return m_isPlaying; }
bool VideoPlayerControl::IsVideoLoaded() { return m_isLoaded; }
double VideoPlayerControl::CurrentTime() { return m_currentTime; }
double VideoPlayerControl::Duration() { return m_duration; }
winrt::hstring VideoPlayerControl::Status() { return m_status; }
void VideoPlayerControl::LoadDecoderSettings()
{
try {
// Load from Windows.Storage.ApplicationData.Current.LocalSettings
auto localSettings = winrt::Windows::Storage::ApplicationData::Current().LocalSettings();
auto values = localSettings.Values();
// Load decoder type (default: AUTO)
if (values.HasKey(L"DecoderType")) {
auto decoderValue = values.Lookup(L"DecoderType");
if (decoderValue) {
int32_t decoderInt = winrt::unbox_value<int32_t>(decoderValue);
m_decoderType = static_cast<VavCoreDecoderType>(decoderInt);
// Log loaded decoder setting
std::wstring decoderName = L"Unknown";
switch (m_decoderType) {
case VAVCORE_DECODER_AUTO: decoderName = L"Auto"; break;
case VAVCORE_DECODER_DAV1D: decoderName = L"Software (dav1d)"; break;
case VAVCORE_DECODER_MEDIA_FOUNDATION: decoderName = L"Hardware (Media Foundation)"; break;
case VAVCORE_DECODER_NVDEC: decoderName = L"Hardware (NVDEC)"; break;
case VAVCORE_DECODER_VPL: decoderName = L"Hardware (Intel VPL)"; break;
case VAVCORE_DECODER_AMF: decoderName = L"Hardware (AMD AMF)"; break;
}
LogMgr::GetInstance().LogInfo(L"Loaded decoder setting: " + decoderName, L"VideoPlayerControl");
}
} else {
m_decoderType = VAVCORE_DECODER_AUTO;
LogMgr::GetInstance().LogInfo(L"Using default decoder: Auto", L"VideoPlayerControl");
}
} catch (...) {
// If settings loading fails, use default
m_decoderType = VAVCORE_DECODER_AUTO;
LogMgr::GetInstance().LogWarning(L"Failed to load decoder settings, using default: Auto", L"VideoPlayerControl");
}
}
void VideoPlayerControl::RefreshDecoderSettings()
{
// Reload decoder settings from storage
LoadDecoderSettings();
// If a video is currently loaded, update the VavCore player with new decoder type
if (m_vavCorePlayer && m_isLoaded) {
vavcore_set_decoder_type(m_vavCorePlayer, m_decoderType);
std::wstring decoderName = L"Unknown";
switch (m_decoderType) {
case VAVCORE_DECODER_AUTO: decoderName = L"Auto"; break;
case VAVCORE_DECODER_DAV1D: decoderName = L"Software (dav1d)"; break;
case VAVCORE_DECODER_MEDIA_FOUNDATION: decoderName = L"Hardware (Media Foundation)"; break;
case VAVCORE_DECODER_NVDEC: decoderName = L"Hardware (NVDEC)"; break;
case VAVCORE_DECODER_VPL: decoderName = L"Hardware (Intel VPL)"; break;
case VAVCORE_DECODER_AMF: decoderName = L"Hardware (AMD AMF)"; break;
}
LogMgr::GetInstance().LogInfo(L"Applied new decoder setting: " + decoderName, L"VideoPlayerControl");
}
}
// D3D Surface Support Methods
bool VideoPlayerControl::InitializeD3DSurfaceSupport()
{
try {
// Check if decoder supports GPU surface types for zero-copy pipeline
// Priority: CUDA (NVIDIA) > D3D12 > AMF (AMD) > D3D11 (fallback)
VavCoreSurfaceType supportedTypes[] = {
VAVCORE_SURFACE_CUDA_DEVICE, // CUDA device memory (NVIDIA NVDEC)
VAVCORE_SURFACE_D3D12_RESOURCE, // D3D12 resource
VAVCORE_SURFACE_AMF_SURFACE, // AMD AMF surface
VAVCORE_SURFACE_D3D11_TEXTURE // D3D11 texture (fallback)
};
for (auto surfaceType : supportedTypes) {
if (vavcore_supports_surface_type(m_vavCorePlayer, surfaceType)) {
m_supportedSurfaceType = surfaceType;
break;
}
}
if (m_supportedSurfaceType == VAVCORE_SURFACE_CPU) {
LogMgr::GetInstance().LogInfo(L"No D3D surface types supported, using CPU decoding", L"VideoPlayerControl");
return false;
}
// Try to initialize D3D device for GPU surface decoding
std::wstring surfaceTypeName;
switch (m_supportedSurfaceType) {
case VAVCORE_SURFACE_D3D11_TEXTURE: surfaceTypeName = L"D3D11"; break;
case VAVCORE_SURFACE_D3D12_RESOURCE: surfaceTypeName = L"D3D12"; break;
case VAVCORE_SURFACE_CUDA_DEVICE: surfaceTypeName = L"CUDA"; break;
case VAVCORE_SURFACE_AMF_SURFACE: surfaceTypeName = L"AMF"; break;
default: surfaceTypeName = L"Unknown"; break;
}
LogMgr::GetInstance().LogInfo(
L"Initializing D3D surface support (" + surfaceTypeName + L")...",
L"VideoPlayerControl"
);
// Create D3D11 device for NVDEC/VPL/AMF hardware decoding
if (m_supportedSurfaceType == VAVCORE_SURFACE_D3D11_TEXTURE) {
if (CreateD3D11Device()) {
VavCoreResult result = vavcore_set_d3d_device(m_vavCorePlayer, m_d3dDevice, m_supportedSurfaceType);
if (result == VAVCORE_SUCCESS) {
m_useD3DSurfaces = true;
LogMgr::GetInstance().LogInfo(L"D3D11 surface decoding enabled successfully", L"VideoPlayerControl");
return true;
} else {
LogMgr::GetInstance().LogWarning(L"Failed to set D3D11 device to VavCore", L"VideoPlayerControl");
}
} else {
LogMgr::GetInstance().LogWarning(L"Failed to create D3D11 device", L"VideoPlayerControl");
}
}
// Fallback to CPU decode path
LogMgr::GetInstance().LogInfo(
L"D3D surface support not initialized - using CPU decode path",
L"VideoPlayerControl"
);
LogMgr::GetInstance().LogInfo(
L"Note: CPU decode path still provides full hardware acceleration (NVDEC/VPL/AMF), only final output uses CPU memory",
L"VideoPlayerControl"
);
return false;
}
catch (...) {
LogMgr::GetInstance().LogError(L"Exception during D3D surface initialization", L"VideoPlayerControl");
return false;
}
}
void VideoPlayerControl::ProcessSingleFrameWithSurfaces()
{
try {
// Simple validation
if (!m_isPlaying || !m_vavCorePlayer) {
return;
}
auto totalStart = std::chrono::high_resolution_clock::now();
// Create or reuse D3D texture for this frame
void* d3dTexture = nullptr;
if (!CreateD3DTexture(m_videoWidth, m_videoHeight, &d3dTexture)) {
LogMgr::GetInstance().LogError(L"Failed to create D3D texture", L"VideoPlayerControl");
return;
}
// Decode directly to D3D surface
VavCoreVideoFrame vavFrame;
VavCoreResult result = vavcore_decode_to_surface(m_vavCorePlayer, m_supportedSurfaceType, d3dTexture, &vavFrame);
if (result == VAVCORE_END_OF_STREAM) {
// End of video - stop playback
m_isPlaying = false;
if (m_playbackTimer) m_playbackTimer.Stop();
UpdateStatus(L"Playback completed");
LogMgr::GetInstance().LogInfo(L"Playback completed - End of stream reached", L"VideoPlayerControl");
return;
}
if (result != VAVCORE_SUCCESS) {
// Decode error - count but continue processing
m_framesDecodeErrors++;
m_currentFrame++;
m_currentTime = m_currentFrame / m_frameRate;
// Log decode error occasionally
if (m_framesDecodeErrors % 10 == 1) {
LogMgr::GetInstance().LogError(L"D3D surface decode error count: " + std::to_wstring(m_framesDecodeErrors), L"VideoPlayerControl");
}
return;
}
// Render D3D surface directly to screen
RenderD3DSurfaceToScreen(d3dTexture, vavFrame);
// Update counters
m_currentFrame++;
m_currentTime = m_currentFrame / m_frameRate;
// Free VavCore frame (surface data remains in d3dTexture)
vavcore_free_frame(&vavFrame);
}
catch (...) {
LogMgr::GetInstance().LogError(L"Exception in ProcessSingleFrameWithSurfaces", L"VideoPlayerControl");
}
}
bool VideoPlayerControl::CreateD3D11Device()
{
try {
// Create D3D11 device with hardware acceleration
ComPtr<ID3D11Device> d3d11Device;
ComPtr<ID3D11DeviceContext> d3d11Context;
D3D_FEATURE_LEVEL featureLevel;
D3D_FEATURE_LEVEL featureLevels[] = {
D3D_FEATURE_LEVEL_11_1,
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0
};
UINT createDeviceFlags = 0;
#ifdef _DEBUG
createDeviceFlags |= D3D11_CREATE_DEVICE_DEBUG;
#endif
HRESULT hr = D3D11CreateDevice(
nullptr, // Default adapter
D3D_DRIVER_TYPE_HARDWARE, // Hardware acceleration
nullptr, // No software rasterizer
createDeviceFlags,
featureLevels,
ARRAYSIZE(featureLevels),
D3D11_SDK_VERSION,
&d3d11Device,
&featureLevel,
&d3d11Context
);
if (FAILED(hr)) {
LogMgr::GetInstance().LogError(
L"D3D11CreateDevice failed with HRESULT: 0x" + std::to_wstring(hr),
L"VideoPlayerControl"
);
return false;
}
// Store raw pointer for VavCore
m_d3dDevice = d3d11Device.Get();
// Keep ComPtr alive by AddRef
d3d11Device->AddRef();
LogMgr::GetInstance().LogInfo(
L"D3D11 device created successfully with feature level: " + std::to_wstring(featureLevel),
L"VideoPlayerControl"
);
return true;
}
catch (...) {
LogMgr::GetInstance().LogError(L"Exception during D3D11 device creation", L"VideoPlayerControl");
return false;
}
}
void VideoPlayerControl::ReleaseD3D11Device()
{
if (m_d3dDevice) {
// Release the D3D11 device
auto* d3d11Device = static_cast<ID3D11Device*>(m_d3dDevice);
d3d11Device->Release();
m_d3dDevice = nullptr;
LogMgr::GetInstance().LogInfo(L"D3D11 device released", L"VideoPlayerControl");
}
}
bool VideoPlayerControl::CreateD3DTexture(uint32_t width, uint32_t height, void** texture)
{
if (!m_d3dDevice || !texture) {
return false;
}
try {
auto* d3d11Device = static_cast<ID3D11Device*>(m_d3dDevice);
// Create D3D11 texture for NVDEC output (NV12 format for YUV420)
// NV12 requires height * 1.5 to accommodate Y plane (height) + UV plane (height/2)
D3D11_TEXTURE2D_DESC texDesc = {};
texDesc.Width = width;
texDesc.Height = height + (height / 2); // Y plane + UV plane space
texDesc.MipLevels = 1;
texDesc.ArraySize = 1;
texDesc.Format = DXGI_FORMAT_NV12; // NV12 is standard for video decoding
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.Usage = D3D11_USAGE_DEFAULT;
texDesc.BindFlags = D3D11_BIND_DECODER | D3D11_BIND_SHADER_RESOURCE;
texDesc.CPUAccessFlags = 0;
texDesc.MiscFlags = 0;
ComPtr<ID3D11Texture2D> d3d11Texture;
HRESULT hr = d3d11Device->CreateTexture2D(&texDesc, nullptr, &d3d11Texture);
if (FAILED(hr)) {
LogMgr::GetInstance().LogError(
L"Failed to create D3D11 texture: HRESULT 0x" + std::to_wstring(hr),
L"VideoPlayerControl"
);
return false;
}
// Return raw pointer and keep ComPtr alive
*texture = d3d11Texture.Get();
d3d11Texture->AddRef();
return true;
}
catch (...) {
LogMgr::GetInstance().LogError(L"Exception during D3D11 texture creation", L"VideoPlayerControl");
return false;
}
}
void VideoPlayerControl::RenderD3DSurfaceToScreen(void* d3dTexture, const VavCoreVideoFrame& frame)
{
// TODO: Implement zero-copy CUDA → D3D12 pipeline
// 1. NVDEC decodes to CUDA device memory
// 2. cuGraphicsD3D12RegisterResource registers D3D12 texture with CUDA
// 3. Direct CUDA to D3D12 copy (no CPU involvement)
// 4. SimpleGPURenderer renders NV12 texture
LogMgr::GetInstance().LogError(L"Zero-copy D3D12 pipeline not yet implemented", L"VideoPlayerControl");
}
// ===============================
// Phase 2 Optimization: Memory Pool Implementation
// ===============================
winrt::Microsoft::UI::Xaml::Media::Imaging::WriteableBitmap VideoPlayerControl::MemoryPool::GetBitmap(uint32_t width, uint32_t height)
{
std::lock_guard<std::mutex> lock(_poolMutex);
if (!_bitmapPool.empty()) {
auto bitmap = _bitmapPool.front();
_bitmapPool.pop();
// Check if size matches
if (bitmap.PixelWidth() == static_cast<int32_t>(width) &&
bitmap.PixelHeight() == static_cast<int32_t>(height)) {
_bitmapPoolHits++;
return bitmap;
} else {
// Size mismatch, will create new one
bitmap = nullptr;
}
}
_bitmapPoolMisses++;
return winrt::Microsoft::UI::Xaml::Media::Imaging::WriteableBitmap(width, height);
}
void VideoPlayerControl::MemoryPool::ReturnBitmap(winrt::Microsoft::UI::Xaml::Media::Imaging::WriteableBitmap bitmap)
{
std::lock_guard<std::mutex> lock(_poolMutex);
if (_bitmapPool.size() < MAX_POOL_SIZE && bitmap) {
_bitmapPool.push(bitmap);
}
// If pool is full or bitmap is null, let it be garbage collected
}
std::vector<uint8_t> VideoPlayerControl::MemoryPool::GetBuffer(size_t size)
{
std::lock_guard<std::mutex> lock(_poolMutex);
if (!_bufferPool.empty()) {
auto buffer = _bufferPool.front();
_bufferPool.pop();
// Check if size is adequate
if (buffer.size() >= size) {
_bufferPoolHits++;
buffer.resize(size); // Resize to exact size needed
return buffer;
}
// Size too small, will create new one
}
_bufferPoolMisses++;
return std::vector<uint8_t>(size);
}
void VideoPlayerControl::MemoryPool::ReturnBuffer(std::vector<uint8_t> buffer)
{
std::lock_guard<std::mutex> lock(_poolMutex);
if (_bufferPool.size() < MAX_POOL_SIZE) {
_bufferPool.push(std::move(buffer));
}
// If pool is full, let it be destroyed
}
void VideoPlayerControl::MemoryPool::PrintStats()
{
std::lock_guard<std::mutex> lock(_poolMutex);
int totalBitmapRequests = _bitmapPoolHits + _bitmapPoolMisses;
int totalBufferRequests = _bufferPoolHits + _bufferPoolMisses;
if (totalBitmapRequests > 0) {
double bitmapHitRate = (static_cast<double>(_bitmapPoolHits) / totalBitmapRequests) * 100.0;
LogMgr::GetInstance().LogDebug(
L"Memory Pool Stats - Bitmap: " + std::to_wstring(bitmapHitRate) +
L"% hit rate (" + std::to_wstring(_bitmapPoolHits) + L"/" + std::to_wstring(totalBitmapRequests) + L")",
L"VideoPlayerControl");
}
if (totalBufferRequests > 0) {
double bufferHitRate = (static_cast<double>(_bufferPoolHits) / totalBufferRequests) * 100.0;
LogMgr::GetInstance().LogDebug(
L"Memory Pool Stats - Buffer: " + std::to_wstring(bufferHitRate) +
L"% hit rate (" + std::to_wstring(_bufferPoolHits) + L"/" + std::to_wstring(totalBufferRequests) + L")",
L"VideoPlayerControl");
}
}
// ===============================
// Phase 2 Optimization: Advanced Performance Monitor Implementation
// ===============================
void VideoPlayerControl::AdvancedPerformanceMonitor::RecordFrameStart()
{
_frameStartTime = std::chrono::high_resolution_clock::now();
}
void VideoPlayerControl::AdvancedPerformanceMonitor::RecordDecodeStart()
{
_decodeStartTime = std::chrono::high_resolution_clock::now();
}
void VideoPlayerControl::AdvancedPerformanceMonitor::RecordDecodeEnd()
{
auto decodeEndTime = std::chrono::high_resolution_clock::now();
auto decodeTime = std::chrono::duration<double, std::milli>(decodeEndTime - _decodeStartTime).count();
_decodingTimes.push(decodeTime);
if (_decodingTimes.size() > SAMPLE_SIZE) {
_decodingTimes.pop();
}
}
void VideoPlayerControl::AdvancedPerformanceMonitor::RecordRenderStart()
{
_renderStartTime = std::chrono::high_resolution_clock::now();
}
void VideoPlayerControl::AdvancedPerformanceMonitor::RecordRenderEnd()
{
auto renderEndTime = std::chrono::high_resolution_clock::now();
auto renderTime = std::chrono::duration<double, std::milli>(renderEndTime - _renderStartTime).count();
_renderingTimes.push(renderTime);
if (_renderingTimes.size() > SAMPLE_SIZE) {
_renderingTimes.pop();
}
}
void VideoPlayerControl::AdvancedPerformanceMonitor::RecordFrameEnd()
{
auto frameEndTime = std::chrono::high_resolution_clock::now();
auto totalTime = std::chrono::duration<double, std::milli>(frameEndTime - _frameStartTime).count();
_totalFrameTimes.push(totalTime);
if (_totalFrameTimes.size() > SAMPLE_SIZE) {
_totalFrameTimes.pop();
}
// Check for adaptive quality adjustment
CheckForQualityAdjustment(totalTime);
}
VideoPlayerControl::AdvancedPerformanceMonitor::PerformanceStats VideoPlayerControl::AdvancedPerformanceMonitor::GetStats()
{
PerformanceStats stats = {};
if (!_decodingTimes.empty()) {
double sum = 0;
std::queue<double> temp = _decodingTimes;
while (!temp.empty()) {
sum += temp.front();
temp.pop();
}
stats.AverageDecodeTime = sum / _decodingTimes.size();
}
if (!_renderingTimes.empty()) {
double sum = 0;
std::queue<double> temp = _renderingTimes;
while (!temp.empty()) {
sum += temp.front();
temp.pop();
}
stats.AverageRenderTime = sum / _renderingTimes.size();
}
if (!_totalFrameTimes.empty()) {
double sum = 0;
std::queue<double> temp = _totalFrameTimes;
while (!temp.empty()) {
sum += temp.front();
temp.pop();
}
stats.AverageTotalTime = sum / _totalFrameTimes.size();
stats.CurrentFPS = 1000.0 / stats.AverageTotalTime;
}
stats.QualityReductionActive = _qualityReductionActive;
return stats;
}
bool VideoPlayerControl::AdvancedPerformanceMonitor::ShouldReduceQuality()
{
return (_consecutiveSlowFrames >= SLOW_FRAME_THRESHOLD && !_qualityReductionActive);
}
bool VideoPlayerControl::AdvancedPerformanceMonitor::ShouldRestoreQuality()
{
return (_consecutiveFastFrames >= FAST_FRAME_THRESHOLD && _qualityReductionActive);
}
void VideoPlayerControl::AdvancedPerformanceMonitor::CheckForQualityAdjustment(double frameTime)
{
const double SLOW_THRESHOLD = 40.0; // 25fps (too slow)
const double FAST_THRESHOLD = 25.0; // 40fps (fast enough)
if (frameTime > SLOW_THRESHOLD) {
_consecutiveSlowFrames++;
_consecutiveFastFrames = 0;
} else if (frameTime < FAST_THRESHOLD) {
_consecutiveFastFrames++;
_consecutiveSlowFrames = 0;
} else {
// Reset counters for moderate frame times
_consecutiveSlowFrames = 0;
_consecutiveFastFrames = 0;
}
// Update quality reduction state
if (ShouldReduceQuality()) {
_qualityReductionActive = true;
LogMgr::GetInstance().LogDebug(L"QUALITY REDUCTION triggered - Frame time: " + std::to_wstring(frameTime) + L"ms", L"VideoPlayerControl");
} else if (ShouldRestoreQuality()) {
_qualityReductionActive = false;
LogMgr::GetInstance().LogDebug(L"QUALITY RESTORATION triggered - Frame time: " + std::to_wstring(frameTime) + L"ms", L"VideoPlayerControl");
}
}
}