Clean up logging
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
#include "pch.h"
|
||||
#include "AV1Decoder.h"
|
||||
#include "VideoDecoderFactory.h"
|
||||
#include <iostream>
|
||||
#include "../Common/VavCoreLogger.h"
|
||||
#include <cstring>
|
||||
#include <thread>
|
||||
#include <chrono>
|
||||
@@ -56,10 +56,10 @@ bool AV1Decoder::Initialize(const VideoMetadata& metadata) {
|
||||
|
||||
m_initialized = true;
|
||||
|
||||
std::cout << "[AV1Decoder_Headless] Initialized successfully" << std::endl;
|
||||
std::cout << " Resolution: " << metadata.width << "x" << metadata.height << std::endl;
|
||||
std::cout << " Frame rate: " << metadata.frame_rate << " fps" << std::endl;
|
||||
std::cout << " Threads: " << settings.n_threads << std::endl;
|
||||
LOGF_INFO("[AV1Decoder] Initialized successfully");
|
||||
LOGF_DEBUG("[AV1Decoder] Resolution: %dx%d", metadata.width, metadata.height);
|
||||
LOGF_DEBUG("[AV1Decoder] Frame rate: %.2f fps", metadata.frame_rate);
|
||||
LOGF_DEBUG("[AV1Decoder] Threads: %d", settings.n_threads);
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -257,7 +257,7 @@ void AV1Decoder::UpdateDecodingStats(double decode_time_ms, size_t packet_size)
|
||||
}
|
||||
|
||||
void AV1Decoder::LogError(const std::string& message) {
|
||||
std::cout << "[AV1Decoder_Headless ERROR] " << message << std::endl;
|
||||
LOGF_ERROR("[AV1Decoder] %s", message.c_str());
|
||||
}
|
||||
|
||||
void AV1Decoder::ApplyOptimalSettingsForResolution(uint32_t width, uint32_t height) {
|
||||
@@ -280,8 +280,8 @@ void AV1Decoder::ApplyOptimalSettingsForResolution(uint32_t width, uint32_t heig
|
||||
|
||||
SetAV1Settings(settings);
|
||||
|
||||
std::cout << "[AV1Decoder_Headless] Applied optimal settings for " << width << "x" << height
|
||||
<< " (threads=" << settings.num_threads << ", delay=" << settings.max_frame_delay << ")" << std::endl;
|
||||
LOGF_INFO("[AV1Decoder] Applied optimal settings for %dx%d (threads=%d, delay=%d)",
|
||||
width, height, settings.num_threads, settings.max_frame_delay);
|
||||
}
|
||||
|
||||
} // namespace VavCore
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#include "AdaptiveAV1Decoder.h"
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include "../Common/VavCoreLogger.h"
|
||||
|
||||
namespace VavCore {
|
||||
|
||||
@@ -28,7 +29,7 @@ bool AdaptiveAV1Decoder::Initialize(const VideoMetadata& metadata) {
|
||||
// Initialize the base dav1d decoder
|
||||
bool result = AV1Decoder::Initialize(metadata);
|
||||
if (result) {
|
||||
OutputDebugStringA("[AdaptiveAV1Decoder] Initialized with adaptive quality control (post-decode scaling)\n");
|
||||
LOGF_DEBUG("[AdaptiveAV1Decoder] Initialized with adaptive quality control (post-decode scaling)");
|
||||
}
|
||||
|
||||
return result;
|
||||
@@ -67,7 +68,7 @@ bool AdaptiveAV1Decoder::DecodeFrame(const uint8_t* packet_data, size_t packet_s
|
||||
if (!scale_success) {
|
||||
// Fallback: use full resolution frame
|
||||
output_frame = std::move(full_resolution_frame);
|
||||
OutputDebugStringA("[AdaptiveAV1Decoder] Scaling failed, using full resolution\n");
|
||||
LOGF_ERROR("[AdaptiveAV1Decoder] Scaling failed, using full resolution");
|
||||
}
|
||||
} else {
|
||||
// ULTRA quality: use full resolution frame directly
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
// Example usage and integration guide for AdaptiveNVDECDecoder
|
||||
#include "pch.h"
|
||||
#include "AdaptiveNVDECDecoder.h"
|
||||
#include "../Common/VavCoreLogger.h"
|
||||
|
||||
namespace VavCore {
|
||||
|
||||
@@ -23,7 +24,7 @@ public:
|
||||
bool result = m_adaptiveDecoder->Initialize(metadata, config);
|
||||
|
||||
if (result) {
|
||||
OutputDebugStringA("[AdaptiveVideoPlayer] Adaptive decoding initialized successfully\n");
|
||||
LOGF_DEBUG("[AdaptiveVideoPlayer] Adaptive decoding initialized successfully");
|
||||
|
||||
// Start performance monitoring thread
|
||||
StartPerformanceMonitoring();
|
||||
@@ -74,7 +75,7 @@ public:
|
||||
void EnableAutoQuality() {
|
||||
m_adaptiveDecoder->EnableAdaptiveMode(true);
|
||||
m_adaptiveDecoder->ForceQualityAdjustment(); // Immediate analysis
|
||||
OutputDebugStringA("[AdaptiveVideoPlayer] Auto quality control re-enabled\n");
|
||||
LOGF_DEBUG("[AdaptiveVideoPlayer] Auto quality control re-enabled");
|
||||
}
|
||||
|
||||
// Example: Performance-based target adjustment
|
||||
@@ -84,12 +85,12 @@ public:
|
||||
// If system is heavily loaded, reduce target FPS
|
||||
if (metrics.cpu_usage_percent > 85.0 || metrics.gpu_usage_percent > 90.0) {
|
||||
m_adaptiveDecoder->SetTargetFrameRate(24.0); // Reduce to 24 FPS
|
||||
OutputDebugStringA("[AdaptiveVideoPlayer] Reduced target FPS due to high system load\n");
|
||||
LOGF_DEBUG("[AdaptiveVideoPlayer] Reduced target FPS due to high system load");
|
||||
}
|
||||
// If system has headroom, try to increase FPS
|
||||
else if (metrics.cpu_usage_percent < 50.0 && metrics.gpu_usage_percent < 60.0) {
|
||||
m_adaptiveDecoder->SetTargetFrameRate(60.0); // Increase to 60 FPS
|
||||
OutputDebugStringA("[AdaptiveVideoPlayer] Increased target FPS due to available resources\n");
|
||||
LOGF_DEBUG("[AdaptiveVideoPlayer] Increased target FPS due to available resources");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
|
||||
// 3rd: User defined
|
||||
#include "AdaptiveNVDECDecoder.h"
|
||||
#include "../Common/VavCoreLogger.h"
|
||||
|
||||
namespace VavCore {
|
||||
|
||||
@@ -46,7 +47,7 @@ bool AdaptiveNVDECDecoder::Initialize(const VideoMetadata& metadata) {
|
||||
|
||||
bool result = NVDECAV1Decoder::Initialize(adaptiveMetadata);
|
||||
if (result) {
|
||||
OutputDebugStringA("[AdaptiveNVDECDecoder] Initialized with adaptive quality control\n");
|
||||
LOGF_DEBUG("[AdaptiveNVDECDecoder] Initialized with adaptive quality control");
|
||||
}
|
||||
|
||||
return result;
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#include "pch.h"
|
||||
#include "D3D12SurfaceHandler.h"
|
||||
#include "ExternalMemoryCache.h"
|
||||
#include "../Common/VavCoreLogger.h"
|
||||
#include <stdio.h>
|
||||
|
||||
namespace VavCore {
|
||||
@@ -10,12 +11,12 @@ D3D12SurfaceHandler::D3D12SurfaceHandler(ID3D12Device* device, CUcontext cuda_co
|
||||
, m_cudaContext(cuda_context)
|
||||
, m_cache(std::make_unique<ExternalMemoryCache>(device, cuda_context))
|
||||
{
|
||||
OutputDebugStringA("[D3D12SurfaceHandler] Created\n");
|
||||
LOGF_DEBUG("[D3D12SurfaceHandler] Created");
|
||||
}
|
||||
|
||||
D3D12SurfaceHandler::~D3D12SurfaceHandler()
|
||||
{
|
||||
OutputDebugStringA("[D3D12SurfaceHandler] Destroyed\n");
|
||||
LOGF_DEBUG("[D3D12SurfaceHandler] Destroyed");
|
||||
}
|
||||
|
||||
bool D3D12SurfaceHandler::CopyNV12Frame(CUdeviceptr src_frame,
|
||||
@@ -27,7 +28,7 @@ bool D3D12SurfaceHandler::CopyNV12Frame(CUdeviceptr src_frame,
|
||||
// Get CUDA pointer for D3D12 resource
|
||||
CUdeviceptr dst_ptr = 0;
|
||||
if (!GetD3D12CUDAPointer(dst_texture, &dst_ptr)) {
|
||||
OutputDebugStringA("[D3D12SurfaceHandler] Failed to get CUDA pointer for D3D12 resource\n");
|
||||
LOGF_ERROR("[D3D12SurfaceHandler] Failed to get CUDA pointer for D3D12 resource");
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -51,23 +52,19 @@ bool D3D12SurfaceHandler::CopyNV12Frame(CUdeviceptr src_frame,
|
||||
uint32_t y_rows = num_rows[0];
|
||||
uint32_t uv_rows = num_rows[1];
|
||||
|
||||
char buf[512];
|
||||
sprintf_s(buf, "[D3D12SurfaceHandler] Texture descriptor: allocated height=%u (logical video height=%u)\n",
|
||||
LOGF_DEBUG("[D3D12SurfaceHandler] Texture descriptor: allocated height=%u (logical video height=%u)",
|
||||
(uint32_t)desc.Height, height);
|
||||
OutputDebugStringA(buf);
|
||||
|
||||
sprintf_s(buf, "[D3D12SurfaceHandler] Y plane: width=%u, height=%u, srcPitch=%u, dstPitch=%u, rows=%u\n",
|
||||
LOGF_DEBUG("[D3D12SurfaceHandler] Y plane: width=%u, height=%u, srcPitch=%u, dstPitch=%u, rows=%u",
|
||||
width, height, src_pitch, y_dst_pitch, y_rows);
|
||||
OutputDebugStringA(buf);
|
||||
|
||||
sprintf_s(buf, "[D3D12SurfaceHandler] UV plane: width=%u, height=%u, srcPitch=%u, dstPitch=%u, rows=%u, offset=%llu\n",
|
||||
LOGF_DEBUG("[D3D12SurfaceHandler] UV plane: width=%u, height=%u, srcPitch=%u, dstPitch=%u, rows=%u, offset=%llu",
|
||||
width, height / 2, src_pitch, uv_dst_pitch, uv_rows, uv_offset);
|
||||
OutputDebugStringA(buf);
|
||||
|
||||
// Copy Y plane
|
||||
if (!CopyYPlane(src_frame, src_pitch,
|
||||
dst_ptr, y_dst_pitch,
|
||||
width, height)) {
|
||||
width, y_rows)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -75,20 +72,23 @@ bool D3D12SurfaceHandler::CopyNV12Frame(CUdeviceptr src_frame,
|
||||
CUdeviceptr src_uv = src_frame + (static_cast<UINT64>(src_pitch) * height);
|
||||
CUdeviceptr dst_uv = dst_ptr + uv_offset;
|
||||
|
||||
LOGF_DEBUG("[D3D12SurfaceHandler] UV copy params: src_uv=0x%llX, dst_uv=0x%llX, src_pitch=%u, dst_pitch=%u, width=%u, rows=%u",
|
||||
src_uv, dst_uv, src_pitch, uv_dst_pitch, width, uv_rows);
|
||||
|
||||
if (!CopyUVPlane(src_uv, src_pitch,
|
||||
dst_uv, uv_dst_pitch,
|
||||
width, height / 2)) {
|
||||
width, uv_rows)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
OutputDebugStringA("[D3D12SurfaceHandler] NV12 frame copied successfully\n");
|
||||
LOGF_DEBUG("[D3D12SurfaceHandler] NV12 frame copied successfully");
|
||||
return true;
|
||||
}
|
||||
|
||||
bool D3D12SurfaceHandler::SignalD3D12Fence(uint64_t fence_value)
|
||||
{
|
||||
// TODO: Implement fence signaling
|
||||
OutputDebugStringA("[D3D12SurfaceHandler] Fence signaling not implemented\n");
|
||||
LOGF_DEBUG("[D3D12SurfaceHandler] Fence signaling not implemented");
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -111,14 +111,12 @@ bool D3D12SurfaceHandler::CopyYPlane(CUdeviceptr src, uint32_t src_pitch,
|
||||
);
|
||||
|
||||
if (err != cudaSuccess) {
|
||||
char buf[256];
|
||||
sprintf_s(buf, "[D3D12SurfaceHandler] Y plane copy failed: %d (%s)\n",
|
||||
LOGF_ERROR("[D3D12SurfaceHandler] Y plane copy failed: %d (%s)",
|
||||
err, cudaGetErrorString(err));
|
||||
OutputDebugStringA(buf);
|
||||
return false;
|
||||
}
|
||||
|
||||
OutputDebugStringA("[D3D12SurfaceHandler] Y plane copied\n");
|
||||
LOGF_DEBUG("[D3D12SurfaceHandler] Y plane copied");
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -137,14 +135,12 @@ bool D3D12SurfaceHandler::CopyUVPlane(CUdeviceptr src, uint32_t src_pitch,
|
||||
);
|
||||
|
||||
if (err != cudaSuccess) {
|
||||
char buf[256];
|
||||
sprintf_s(buf, "[D3D12SurfaceHandler] UV plane copy failed: %d (%s)\n",
|
||||
LOGF_ERROR("[D3D12SurfaceHandler] UV plane copy failed: %d (%s)",
|
||||
err, cudaGetErrorString(err));
|
||||
OutputDebugStringA(buf);
|
||||
return false;
|
||||
}
|
||||
|
||||
OutputDebugStringA("[D3D12SurfaceHandler] UV plane copied\n");
|
||||
LOGF_DEBUG("[D3D12SurfaceHandler] UV plane copied");
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#include "ExternalMemoryCache.h"
|
||||
#include <windows.h>
|
||||
#include <stdio.h>
|
||||
#include "../Common/VavCoreLogger.h"
|
||||
|
||||
namespace VavCore {
|
||||
|
||||
@@ -46,7 +47,7 @@ bool ExternalMemoryCache::GetOrCreateExternalMemory(ID3D12Resource* resource, CU
|
||||
m_cache[resource] = entry;
|
||||
*out_ptr = device_ptr;
|
||||
|
||||
OutputDebugStringA("[ExternalMemoryCache] New resource cached\n");
|
||||
LOGF_DEBUG("[ExternalMemoryCache] New resource cached");
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -61,7 +62,7 @@ void ExternalMemoryCache::Release(ID3D12Resource* resource)
|
||||
cudaDestroyExternalMemory(it->second.external_memory);
|
||||
m_cache.erase(it);
|
||||
|
||||
OutputDebugStringA("[ExternalMemoryCache] Resource released\n");
|
||||
LOGF_DEBUG("[ExternalMemoryCache] Resource released");
|
||||
}
|
||||
|
||||
void ExternalMemoryCache::ReleaseAll()
|
||||
@@ -71,7 +72,7 @@ void ExternalMemoryCache::ReleaseAll()
|
||||
}
|
||||
m_cache.clear();
|
||||
|
||||
OutputDebugStringA("[ExternalMemoryCache] All resources released\n");
|
||||
LOGF_DEBUG("[ExternalMemoryCache] All resources released");
|
||||
}
|
||||
|
||||
bool ExternalMemoryCache::ImportD3D12Resource(ID3D12Resource* resource,
|
||||
@@ -83,9 +84,7 @@ bool ExternalMemoryCache::ImportD3D12Resource(ID3D12Resource* resource,
|
||||
HRESULT hr = m_device->CreateSharedHandle(resource, nullptr, GENERIC_ALL, nullptr, &shared_handle);
|
||||
|
||||
if (FAILED(hr)) {
|
||||
char buf[256];
|
||||
sprintf_s(buf, "[ExternalMemoryCache] CreateSharedHandle failed: 0x%08X\n", hr);
|
||||
OutputDebugStringA(buf);
|
||||
LOGF_ERROR("[ExternalMemoryCache] CreateSharedHandle failed: 0x%08X", hr);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -107,12 +106,10 @@ bool ExternalMemoryCache::ImportD3D12Resource(ID3D12Resource* resource,
|
||||
CloseHandle(shared_handle);
|
||||
|
||||
if (err != cudaSuccess) {
|
||||
char buf[512];
|
||||
sprintf_s(buf, "[ExternalMemoryCache] cudaImportExternalMemory failed: %d (%s)\n"
|
||||
" type=%d, handle=%p, size=%llu, flags=%u\n",
|
||||
LOGF_DEBUG("[ExternalMemoryCache] cudaImportExternalMemory failed: %d (%s)"
|
||||
" type=%d, handle=%p, size=%llu, flags=%u",
|
||||
err, cudaGetErrorString(err),
|
||||
mem_desc.type, mem_desc.handle.win32.handle, mem_desc.size, mem_desc.flags);
|
||||
OutputDebugStringA(buf);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -124,9 +121,7 @@ bool ExternalMemoryCache::ImportD3D12Resource(ID3D12Resource* resource,
|
||||
err = cudaExternalMemoryGetMappedBuffer((void**)&device_ptr, external_memory, &buffer_desc);
|
||||
|
||||
if (err != cudaSuccess) {
|
||||
char buf[256];
|
||||
sprintf_s(buf, "[ExternalMemoryCache] cudaExternalMemoryGetMappedBuffer failed: %d\n", err);
|
||||
OutputDebugStringA(buf);
|
||||
LOGF_ERROR("[ExternalMemoryCache] cudaExternalMemoryGetMappedBuffer failed: %d", err);
|
||||
cudaDestroyExternalMemory(external_memory);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -129,10 +129,8 @@ bool NVDECAV1Decoder::Initialize(const VideoMetadata& metadata) {
|
||||
|
||||
// Check NVDEC availability with ColorSpace compatibility check
|
||||
if (!IsNVDECAvailable(metadata.width, metadata.height, metadata.color_space)) {
|
||||
char debug_buf[256];
|
||||
sprintf_s(debug_buf, "NVDEC not available for this video format (%dx%d, ColorSpace=%d)",
|
||||
LOGF_ERROR("NVDEC not available for this video format (%dx%d, ColorSpace=%d)",
|
||||
metadata.width, metadata.height, static_cast<int>(metadata.color_space));
|
||||
LogError(debug_buf);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -164,7 +162,6 @@ bool NVDECAV1Decoder::Initialize(const VideoMetadata& metadata) {
|
||||
|
||||
// Parse av1C box to extract OBUs
|
||||
if (metadata.codec_private_data && metadata.codec_private_size > 0) {
|
||||
char debug_buf[256];
|
||||
|
||||
// av1C box structure (ISO/IEC 14496-15):
|
||||
// [0]: marker (1 bit) + version (7 bits)
|
||||
@@ -192,9 +189,7 @@ bool NVDECAV1Decoder::Initialize(const VideoMetadata& metadata) {
|
||||
obu_length = obu_length & 0x7F;
|
||||
}
|
||||
|
||||
sprintf_s(debug_buf, "[Initialize] av1C: obu_length=%zu (leb128: 0x%02X)\n", obu_length, ptr[0]);
|
||||
OutputDebugStringA(debug_buf);
|
||||
printf("%s", debug_buf);
|
||||
LOGF_DEBUG("[Initialize] av1C: obu_length=%zu (leb128: 0x%02X)", obu_length, ptr[0]);
|
||||
|
||||
// Skip leb128 length field
|
||||
const uint8_t* obu_data = ptr + length_bytes;
|
||||
@@ -208,34 +203,24 @@ bool NVDECAV1Decoder::Initialize(const VideoMetadata& metadata) {
|
||||
m_codecPrivateSize = obu_size;
|
||||
m_firstFrameSent = false;
|
||||
|
||||
sprintf_s(debug_buf, "[Initialize] Extracted %zu bytes of OBU data from av1C (declared length: %zu)\n",
|
||||
m_codecPrivateSize, obu_length);
|
||||
OutputDebugStringA(debug_buf);
|
||||
printf("%s", debug_buf);
|
||||
LOGF_DEBUG("[Initialize] Extracted %zu bytes of OBU data from av1C (declared length: %zu)", m_codecPrivateSize, obu_length);
|
||||
|
||||
// Debug: print first few bytes
|
||||
if (obu_size >= 8) {
|
||||
sprintf_s(debug_buf, "[Initialize] OBU (first 8 bytes): %02X %02X %02X %02X %02X %02X %02X %02X\n",
|
||||
obu_data[0], obu_data[1], obu_data[2], obu_data[3],
|
||||
LOGF_DEBUG("[Initialize] OBU (first 8 bytes): %02X %02X %02X %02X %02X %02X %02X %02X", obu_data[0], obu_data[1], obu_data[2], obu_data[3],
|
||||
obu_data[4], obu_data[5], obu_data[6], obu_data[7]);
|
||||
OutputDebugStringA(debug_buf);
|
||||
printf("%s", debug_buf);
|
||||
}
|
||||
} else {
|
||||
OutputDebugStringA("[Initialize] WARNING: No OBU data available after leb128\n");
|
||||
printf("[Initialize] WARNING: No OBU data available after leb128\n");
|
||||
LOGF_WARNING("[Initialize] WARNING: No OBU data available after leb128");
|
||||
}
|
||||
} else {
|
||||
OutputDebugStringA("[Initialize] WARNING: No configOBUs data in av1C box\n");
|
||||
printf("[Initialize] WARNING: No configOBUs data in av1C box\n");
|
||||
LOGF_WARNING("[Initialize] WARNING: No configOBUs data in av1C box");
|
||||
}
|
||||
} else {
|
||||
OutputDebugStringA("[Initialize] WARNING: av1C box too small (< 5 bytes)\n");
|
||||
printf("[Initialize] WARNING: av1C box too small (< 5 bytes)\n");
|
||||
LOGF_WARNING("[Initialize] WARNING: av1C box too small (< 5 bytes)");
|
||||
}
|
||||
} else {
|
||||
OutputDebugStringA("[Initialize] WARNING: No codec private data available\n");
|
||||
printf("[Initialize] WARNING: No codec private data available\n");
|
||||
LOGF_WARNING("[Initialize] WARNING: No codec private data available");
|
||||
}
|
||||
|
||||
// Load the PTX module for the deinterleave kernel
|
||||
@@ -260,9 +245,9 @@ bool NVDECAV1Decoder::Initialize(const VideoMetadata& metadata) {
|
||||
m_pollingRunning = true;
|
||||
m_pollingThread = std::thread(&NVDECAV1Decoder::PollingThreadFunc, this);
|
||||
|
||||
std::cout << "[NVDECAV1Decoder] Initialized successfully" << std::endl;
|
||||
std::cout << " Resolution: " << m_width << "x" << m_height << std::endl;
|
||||
std::cout << " Max Resolution: " << m_maxWidth << "x" << m_maxHeight << std::endl;
|
||||
LOGF_INFO("[NVDECAV1Decoder] Initialized successfully");
|
||||
LOGF_DEBUG("[NVDECAV1Decoder] Resolution: %dx%d", m_width, m_height);
|
||||
LOGF_DEBUG("[NVDECAV1Decoder] Max Resolution: %dx%d", m_maxWidth, m_maxHeight);
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -274,12 +259,12 @@ void NVDECAV1Decoder::Cleanup() {
|
||||
if (m_pollingThread.joinable()) {
|
||||
m_pollingThread.join();
|
||||
}
|
||||
OutputDebugStringA("[NVDECAV1Decoder::Cleanup] Polling thread stopped\n");
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::Cleanup] Polling thread stopped");
|
||||
}
|
||||
|
||||
// Clean up D3D12 surface handler (releases all cached external memory)
|
||||
m_d3d12Handler.reset();
|
||||
OutputDebugStringA("[NVDECAV1Decoder::Cleanup] D3D12SurfaceHandler released\n");
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::Cleanup] D3D12SurfaceHandler released");
|
||||
|
||||
// Clean up D3D12 synchronization objects
|
||||
if (m_cudaSemaphore != nullptr) {
|
||||
@@ -528,59 +513,53 @@ std::string NVDECAV1Decoder::GetVersion() const {
|
||||
}
|
||||
|
||||
bool NVDECAV1Decoder::IsNVDECAvailable(uint32_t width, uint32_t height, ColorSpace color_space) const {
|
||||
char debug_buf[256];
|
||||
|
||||
// Check if CUDA driver is available
|
||||
OutputDebugStringA("[NVDECAV1Decoder::IsNVDECAvailable] Step 1: Initializing CUDA driver...\n");
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] Step 1: Initializing CUDA driver...");
|
||||
CUresult result = cuInit(0);
|
||||
if (result != CUDA_SUCCESS) {
|
||||
sprintf_s(debug_buf, "[NVDECAV1Decoder::IsNVDECAvailable] FAILED: cuInit returned %d\n", result);
|
||||
OutputDebugStringA(debug_buf);
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] FAILED: cuInit returned %d", result);
|
||||
return false;
|
||||
}
|
||||
OutputDebugStringA("[NVDECAV1Decoder::IsNVDECAvailable] Step 1: CUDA driver initialized successfully\n");
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] Step 1: CUDA driver initialized successfully");
|
||||
|
||||
// Check device count
|
||||
OutputDebugStringA("[NVDECAV1Decoder::IsNVDECAvailable] Step 2: Checking device count...\n");
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] Step 2: Checking device count...");
|
||||
int device_count = 0;
|
||||
result = cuDeviceGetCount(&device_count);
|
||||
if (result != CUDA_SUCCESS) {
|
||||
sprintf_s(debug_buf, "[NVDECAV1Decoder::IsNVDECAvailable] FAILED: cuDeviceGetCount returned %d\n", result);
|
||||
OutputDebugStringA(debug_buf);
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] FAILED: cuDeviceGetCount returned %d", result);
|
||||
return false;
|
||||
}
|
||||
|
||||
sprintf_s(debug_buf, "[NVDECAV1Decoder::IsNVDECAvailable] Step 2: Found %d CUDA device(s)\n", device_count);
|
||||
OutputDebugStringA(debug_buf);
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] Step 2: Found %d CUDA device(s)", device_count);
|
||||
|
||||
if (device_count == 0) {
|
||||
OutputDebugStringA("[NVDECAV1Decoder::IsNVDECAvailable] FAILED: No CUDA devices found\n");
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] FAILED: No CUDA devices found");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check decode capabilities for AV1
|
||||
OutputDebugStringA("[NVDECAV1Decoder::IsNVDECAvailable] Step 3: Getting device handle...\n");
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] Step 3: Getting device handle...");
|
||||
CUdevice device;
|
||||
result = cuDeviceGet(&device, 0);
|
||||
if (result != CUDA_SUCCESS) {
|
||||
sprintf_s(debug_buf, "[NVDECAV1Decoder::IsNVDECAvailable] FAILED: cuDeviceGet returned %d\n", result);
|
||||
OutputDebugStringA(debug_buf);
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] FAILED: cuDeviceGet returned %d", result);
|
||||
return false;
|
||||
}
|
||||
OutputDebugStringA("[NVDECAV1Decoder::IsNVDECAvailable] Step 3: Device handle obtained\n");
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] Step 3: Device handle obtained");
|
||||
|
||||
// Create CUDA context (required for cuvidGetDecoderCaps to work)
|
||||
OutputDebugStringA("[NVDECAV1Decoder::IsNVDECAvailable] Step 4: Creating CUDA context...\n");
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] Step 4: Creating CUDA context...");
|
||||
CUcontext cuContext;
|
||||
CUctxCreateParams createParams = {};
|
||||
createParams.execAffinityParams = nullptr;
|
||||
result = cuCtxCreate_v4(&cuContext, &createParams, 0, device);
|
||||
if (result != CUDA_SUCCESS) {
|
||||
sprintf_s(debug_buf, "[NVDECAV1Decoder::IsNVDECAvailable] FAILED: cuCtxCreate returned %d\n", result);
|
||||
OutputDebugStringA(debug_buf);
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] FAILED: cuCtxCreate returned %d", result);
|
||||
return false;
|
||||
}
|
||||
OutputDebugStringA("[NVDECAV1Decoder::IsNVDECAvailable] Step 4: CUDA context created\n");
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] Step 4: CUDA context created");
|
||||
|
||||
// Map ColorSpace to NVDEC ChromaFormat
|
||||
cudaVideoChromaFormat chroma_format;
|
||||
@@ -599,14 +578,12 @@ bool NVDECAV1Decoder::IsNVDECAvailable(uint32_t width, uint32_t height, ColorSpa
|
||||
color_space_name = "YUV444";
|
||||
break;
|
||||
default:
|
||||
sprintf_s(debug_buf, "[NVDECAV1Decoder::IsNVDECAvailable] FAILED: Unsupported ColorSpace %d\n", static_cast<int>(color_space));
|
||||
OutputDebugStringA(debug_buf);
|
||||
LOGF_ERROR("[NVDECAV1Decoder::IsNVDECAvailable] FAILED: Unsupported ColorSpace %d", static_cast<int>(color_space));
|
||||
cuCtxDestroy(cuContext);
|
||||
return false;
|
||||
}
|
||||
|
||||
sprintf_s(debug_buf, "[NVDECAV1Decoder::IsNVDECAvailable] Step 5: Checking %s decode capabilities...\n", color_space_name);
|
||||
OutputDebugStringA(debug_buf);
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] Step 5: Checking %s decode capabilities...", color_space_name);
|
||||
|
||||
CUVIDDECODECAPS decode_caps;
|
||||
memset(&decode_caps, 0, sizeof(decode_caps));
|
||||
@@ -619,68 +596,62 @@ bool NVDECAV1Decoder::IsNVDECAvailable(uint32_t width, uint32_t height, ColorSpa
|
||||
// Cleanup context
|
||||
cuCtxDestroy(cuContext);
|
||||
if (result != CUDA_SUCCESS) {
|
||||
sprintf_s(debug_buf, "[NVDECAV1Decoder::IsNVDECAvailable] FAILED: cuvidGetDecoderCaps returned %d\n", result);
|
||||
OutputDebugStringA(debug_buf);
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] FAILED: cuvidGetDecoderCaps returned %d", result);
|
||||
return false;
|
||||
}
|
||||
|
||||
sprintf_s(debug_buf, "[NVDECAV1Decoder::IsNVDECAvailable] Step 5: AV1 %s decode caps - bIsSupported=%d, nMaxWidth=%d, nMaxHeight=%d\n",
|
||||
color_space_name, decode_caps.bIsSupported, decode_caps.nMaxWidth, decode_caps.nMaxHeight);
|
||||
OutputDebugStringA(debug_buf);
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] Step 5: AV1 %s decode caps - bIsSupported=%d, nMaxWidth=%d, nMaxHeight=%d", color_space_name, decode_caps.bIsSupported, decode_caps.nMaxWidth, decode_caps.nMaxHeight);
|
||||
|
||||
if (!decode_caps.bIsSupported) {
|
||||
sprintf_s(debug_buf, "[NVDECAV1Decoder::IsNVDECAvailable] FAILED: AV1 %s decoding is NOT supported by this GPU\n", color_space_name);
|
||||
OutputDebugStringA(debug_buf);
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] FAILED: AV1 %s decoding is NOT supported by this GPU", color_space_name);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check supported output formats
|
||||
sprintf_s(debug_buf, "[NVDECAV1Decoder::IsNVDECAvailable] Step 6: Output format mask = 0x%X\n", decode_caps.nOutputFormatMask);
|
||||
OutputDebugStringA(debug_buf);
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] Step 6: Output format mask = 0x%X", decode_caps.nOutputFormatMask);
|
||||
|
||||
bool has_supported_output = false;
|
||||
if (decode_caps.nOutputFormatMask & (1 << cudaVideoSurfaceFormat_NV12)) {
|
||||
OutputDebugStringA("[NVDECAV1Decoder::IsNVDECAvailable] - NV12 supported\n");
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] - NV12 supported");
|
||||
has_supported_output = true;
|
||||
}
|
||||
if (decode_caps.nOutputFormatMask & (1 << cudaVideoSurfaceFormat_P016)) {
|
||||
OutputDebugStringA("[NVDECAV1Decoder::IsNVDECAvailable] - P016 (10-bit 4:2:0) supported\n");
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] - P016 (10-bit 4:2:0) supported");
|
||||
has_supported_output = true;
|
||||
}
|
||||
if (decode_caps.nOutputFormatMask & (1 << cudaVideoSurfaceFormat_YUV444)) {
|
||||
OutputDebugStringA("[NVDECAV1Decoder::IsNVDECAvailable] - YUV444 (8-bit 4:4:4) supported\n");
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] - YUV444 (8-bit 4:4:4) supported");
|
||||
has_supported_output = true;
|
||||
}
|
||||
if (decode_caps.nOutputFormatMask & (1 << cudaVideoSurfaceFormat_YUV444_16Bit)) {
|
||||
OutputDebugStringA("[NVDECAV1Decoder::IsNVDECAvailable] - YUV444_16Bit (10-bit 4:4:4) supported\n");
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] - YUV444_16Bit (10-bit 4:4:4) supported");
|
||||
has_supported_output = true;
|
||||
}
|
||||
if (decode_caps.nOutputFormatMask & (1 << cudaVideoSurfaceFormat_NV16)) {
|
||||
OutputDebugStringA("[NVDECAV1Decoder::IsNVDECAvailable] - NV16 (8-bit 4:2:2) supported\n");
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] - NV16 (8-bit 4:2:2) supported");
|
||||
has_supported_output = true;
|
||||
}
|
||||
if (decode_caps.nOutputFormatMask & (1 << cudaVideoSurfaceFormat_P216)) {
|
||||
OutputDebugStringA("[NVDECAV1Decoder::IsNVDECAvailable] - P216 (10-bit 4:2:2) supported\n");
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] - P216 (10-bit 4:2:2) supported");
|
||||
has_supported_output = true;
|
||||
}
|
||||
|
||||
if (!has_supported_output) {
|
||||
OutputDebugStringA("[NVDECAV1Decoder::IsNVDECAvailable] FAILED: No supported output format found\n");
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] FAILED: No supported output format found");
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((width > decode_caps.nMaxWidth) || (height > decode_caps.nMaxHeight)) {
|
||||
OutputDebugStringA("[NVDECAV1Decoder::IsNVDECAvailable] FAILED: Resolution not supported on this GPU\n");
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] FAILED: Resolution not supported on this GPU");
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((width >> 4) * (height >> 4) > decode_caps.nMaxMBCount) {
|
||||
OutputDebugStringA("[NVDECAV1Decoder::IsNVDECAvailable] FAILED: MBCount not supported on this GPU\n");
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] FAILED: MBCount not supported on this GPU");
|
||||
return false;
|
||||
}
|
||||
|
||||
sprintf_s(debug_buf, "[NVDECAV1Decoder::IsNVDECAvailable] SUCCESS: NVDEC AV1 %s is available!\n", color_space_name);
|
||||
OutputDebugStringA(debug_buf);
|
||||
LOGF_DEBUG("[NVDECAV1Decoder::IsNVDECAvailable] SUCCESS: NVDEC AV1 %s is available!", color_space_name);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -701,7 +672,7 @@ bool NVDECAV1Decoder::InitializeCUDA() {
|
||||
}
|
||||
|
||||
// Create CUDA context - use correct API signature for CUDA 13.0
|
||||
OutputDebugStringA("[InitializeCUDA] Creating CUDA context...\n");
|
||||
LOGF_DEBUG("[InitializeCUDA] Creating CUDA context...");
|
||||
CUctxCreateParams createParams = {};
|
||||
createParams.execAffinityParams = nullptr;
|
||||
result = cuCtxCreate_v4(&m_cuContext, &createParams, 0, device);
|
||||
@@ -712,12 +683,11 @@ bool NVDECAV1Decoder::InitializeCUDA() {
|
||||
|
||||
// If D3D device is set, enable D3D interop on this context
|
||||
if (m_d3d12Device) {
|
||||
OutputDebugStringA("[InitializeCUDA] D3D12 device detected, CUDA-D3D12 interop will be enabled\n");
|
||||
char debug_buf[256];
|
||||
sprintf_s(debug_buf, "[InitializeCUDA] D3D12 device: %p, CUDA context: %p\n", m_d3d12Device, m_cuContext);
|
||||
OutputDebugStringA(debug_buf);
|
||||
LOGF_DEBUG("[InitializeCUDA] D3D12 device detected, CUDA-D3D12 interop will be enabled");
|
||||
|
||||
LOGF_DEBUG("[InitializeCUDA] D3D12 device: %p, CUDA context: %p", m_d3d12Device, m_cuContext);
|
||||
} else {
|
||||
OutputDebugStringA("[InitializeCUDA] No D3D device, using CPU decoding path\n");
|
||||
LOGF_DEBUG("[InitializeCUDA] No D3D device, using CPU decoding path");
|
||||
}
|
||||
|
||||
// Create stream
|
||||
@@ -745,7 +715,7 @@ bool NVDECAV1Decoder::CheckCUDACapability() {
|
||||
return false;
|
||||
}
|
||||
|
||||
std::cout << "[NVDECAV1Decoder] CUDA Compute Capability: " << major << "." << minor << std::endl;
|
||||
LOGF_INFO("[NVDECAV1Decoder] CUDA Compute Capability: %d.%d", major, minor);
|
||||
|
||||
// NVDEC requires compute capability 3.0 or higher
|
||||
return (major >= 3);
|
||||
@@ -759,7 +729,7 @@ bool NVDECAV1Decoder::CreateDecoder() {
|
||||
LogCUDAError(ctxResult, "cuCtxSetCurrent in CreateDecoder");
|
||||
return false;
|
||||
}
|
||||
OutputDebugStringA("[CreateDecoder] CUDA context set as current\n");
|
||||
LOGF_DEBUG("[CreateDecoder] CUDA context set as current");
|
||||
}
|
||||
|
||||
memset(&m_createInfo, 0, sizeof(m_createInfo));
|
||||
@@ -788,13 +758,13 @@ bool NVDECAV1Decoder::CreateDecoder() {
|
||||
return false;
|
||||
}
|
||||
|
||||
OutputDebugStringA("[CreateDecoder] NVDEC decoder created successfully\n");
|
||||
LOGF_DEBUG("[CreateDecoder] NVDEC decoder created successfully");
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool NVDECAV1Decoder::CreateParser() {
|
||||
OutputDebugStringA("[CreateParser] Starting parser creation...\n");
|
||||
LOGF_DEBUG("[CreateParser] Starting parser creation...");
|
||||
|
||||
memset(&m_parserParams, 0, sizeof(m_parserParams));
|
||||
|
||||
@@ -814,10 +784,7 @@ bool NVDECAV1Decoder::CreateParser() {
|
||||
return false;
|
||||
}
|
||||
|
||||
char debug_buf[256];
|
||||
sprintf_s(debug_buf, "[CreateParser] Parser created successfully! m_parser=%p, ulMaxDisplayDelay=%d\n",
|
||||
m_parser, m_parserParams.ulMaxDisplayDelay);
|
||||
OutputDebugStringA(debug_buf);
|
||||
LOGF_DEBUG("[CreateParser] Parser created successfully! m_parser=%p, ulMaxDisplayDelay=%d", m_parser, m_parserParams.ulMaxDisplayDelay);
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -846,21 +813,14 @@ int CUDAAPI NVDECAV1Decoder::HandleVideoSequence(void* user_data, CUVIDEOFORMAT*
|
||||
return 0;
|
||||
}
|
||||
|
||||
char debug_buf[512];
|
||||
sprintf_s(debug_buf, "[HandleVideoSequence] Sequence: %dx%d ChromaFormat:%d BitDepth:%d min_num_decode_surfaces:%d\n",
|
||||
format->coded_width, format->coded_height,
|
||||
LOGF_DEBUG("[HandleVideoSequence] Sequence: %dx%d ChromaFormat:%d BitDepth:%d min_num_decode_surfaces:%d", format->coded_width, format->coded_height,
|
||||
format->chroma_format, format->bit_depth_luma_minus8 + 8,
|
||||
format->min_num_decode_surfaces);
|
||||
OutputDebugStringA(debug_buf);
|
||||
printf("%s", debug_buf);
|
||||
|
||||
// Check if format is supported
|
||||
if (format->chroma_format != cudaVideoChromaFormat_420 &&
|
||||
format->chroma_format != cudaVideoChromaFormat_422) {
|
||||
sprintf_s(debug_buf, "[HandleVideoSequence] ERROR: Unsupported ChromaFormat %d (NVDEC AV1 only supports 420/422)\n",
|
||||
format->chroma_format);
|
||||
OutputDebugStringA(debug_buf);
|
||||
printf("%s", debug_buf);
|
||||
LOGF_ERROR("[HandleVideoSequence] ERROR: Unsupported ChromaFormat %d (NVDEC AV1 only supports 420/422)", format->chroma_format);
|
||||
return 0; // Fail - unsupported format
|
||||
}
|
||||
|
||||
@@ -870,11 +830,8 @@ int CUDAAPI NVDECAV1Decoder::HandleVideoSequence(void* user_data, CUVIDEOFORMAT*
|
||||
if (decoder->m_createInfo.ChromaFormat != format->chroma_format ||
|
||||
decoder->m_createInfo.bitDepthMinus8 != format->bit_depth_luma_minus8) {
|
||||
format_changed = true;
|
||||
sprintf_s(debug_buf, "[HandleVideoSequence] WARNING: Format mismatch (ChromaFormat: %d->%d, BitDepth: %d->%d)\n",
|
||||
decoder->m_createInfo.ChromaFormat, format->chroma_format,
|
||||
LOGF_WARNING("[HandleVideoSequence] WARNING: Format mismatch (ChromaFormat: %d->%d, BitDepth: %d->%d)", decoder->m_createInfo.ChromaFormat, format->chroma_format,
|
||||
decoder->m_createInfo.bitDepthMinus8 + 8, format->bit_depth_luma_minus8 + 8);
|
||||
OutputDebugStringA(debug_buf);
|
||||
printf("%s", debug_buf);
|
||||
// Note: Cannot recreate decoder mid-stream - would need to restart decoding
|
||||
return 0; // Fail - format change not supported
|
||||
}
|
||||
@@ -882,9 +839,7 @@ int CUDAAPI NVDECAV1Decoder::HandleVideoSequence(void* user_data, CUVIDEOFORMAT*
|
||||
|
||||
// Reconfigure decoder for resolution/surface count change
|
||||
if (decoder->m_decoder) {
|
||||
sprintf_s(debug_buf, "[HandleVideoSequence] Reconfiguring decoder with actual sequence parameters\n");
|
||||
OutputDebugStringA(debug_buf);
|
||||
printf("%s", debug_buf);
|
||||
LOGF_DEBUG("[HandleVideoSequence] Reconfiguring decoder with actual sequence parameters");
|
||||
|
||||
CUVIDRECONFIGUREDECODERINFO reconfig = {};
|
||||
reconfig.ulWidth = format->coded_width;
|
||||
@@ -900,17 +855,12 @@ int CUDAAPI NVDECAV1Decoder::HandleVideoSequence(void* user_data, CUVIDEOFORMAT*
|
||||
|
||||
CUresult result = cuvidReconfigureDecoder(decoder->m_decoder, &reconfig);
|
||||
if (result != CUDA_SUCCESS) {
|
||||
sprintf_s(debug_buf, "[HandleVideoSequence] cuvidReconfigureDecoder failed with code %d\n", result);
|
||||
OutputDebugStringA(debug_buf);
|
||||
printf("%s", debug_buf);
|
||||
LOGF_ERROR("[HandleVideoSequence] cuvidReconfigureDecoder failed with code %d", result);
|
||||
decoder->LogCUDAError(result, "cuvidReconfigureDecoder");
|
||||
return 0;
|
||||
}
|
||||
|
||||
sprintf_s(debug_buf, "[HandleVideoSequence] Decoder reconfigured: %dx%d with %d surfaces\n",
|
||||
format->coded_width, format->coded_height, format->min_num_decode_surfaces);
|
||||
OutputDebugStringA(debug_buf);
|
||||
printf("%s", debug_buf);
|
||||
LOGF_DEBUG("[HandleVideoSequence] Decoder reconfigured: %dx%d with %d surfaces", format->coded_width, format->coded_height, format->min_num_decode_surfaces);
|
||||
|
||||
// Update stored dimensions
|
||||
decoder->m_width = format->coded_width;
|
||||
@@ -1021,37 +971,34 @@ int CUDAAPI NVDECAV1Decoder::HandlePictureDisplay(void* user_data, CUVIDPARSERDI
|
||||
|
||||
|
||||
void NVDECAV1Decoder::LogError(const std::string& message) const {
|
||||
std::cerr << "[NVDECAV1Decoder] ERROR: " << message << std::endl;
|
||||
LOGF_ERROR("[NVDECAV1Decoder] %s", message.c_str());
|
||||
}
|
||||
|
||||
void NVDECAV1Decoder::LogCUDAError(CUresult result, const std::string& operation) const {
|
||||
const char* error_string = nullptr;
|
||||
cuGetErrorString(result, &error_string);
|
||||
std::cerr << "[NVDECAV1Decoder] CUDA ERROR in " << operation << ": "
|
||||
<< (error_string ? error_string : "Unknown error")
|
||||
<< " (code: " << result << ")" << std::endl;
|
||||
LOGF_ERROR("[NVDECAV1Decoder] CUDA ERROR in %s: %s (code: %d)",
|
||||
operation.c_str(),
|
||||
(error_string ? error_string : "Unknown error"),
|
||||
result);
|
||||
}
|
||||
|
||||
bool NVDECAV1Decoder::CheckNVDECAvailability() {
|
||||
OutputDebugStringA("[NVDECAV1Decoder] Checking NVDEC availability...\n");
|
||||
LOGF_DEBUG("[NVDECAV1Decoder] Checking NVDEC availability...");
|
||||
try {
|
||||
// Create temporary instance to check availability
|
||||
NVDECAV1Decoder temp_decoder;
|
||||
bool available = temp_decoder.IsNVDECAvailable(1920, 1080);
|
||||
|
||||
char debug_buf[128];
|
||||
sprintf_s(debug_buf, "[NVDECAV1Decoder] Availability check result: %s\n",
|
||||
available ? "AVAILABLE" : "NOT AVAILABLE");
|
||||
OutputDebugStringA(debug_buf);
|
||||
LOGF_DEBUG("[NVDECAV1Decoder] Availability check result: %s", available ? "AVAILABLE" : "NOT AVAILABLE");
|
||||
|
||||
return available;
|
||||
} catch (const std::exception& e) {
|
||||
char debug_buf[256];
|
||||
sprintf_s(debug_buf, "[NVDECAV1Decoder] Exception during availability check: %s\n", e.what());
|
||||
OutputDebugStringA(debug_buf);
|
||||
|
||||
LOGF_DEBUG("[NVDECAV1Decoder] Exception during availability check: %s", e.what());
|
||||
return false;
|
||||
} catch (...) {
|
||||
OutputDebugStringA("[NVDECAV1Decoder] Unknown exception during availability check\n");
|
||||
LOGF_DEBUG("[NVDECAV1Decoder] Unknown exception during availability check");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -1074,16 +1021,15 @@ bool NVDECAV1Decoder::SupportsSurfaceType(VavCoreSurfaceType type) const {
|
||||
|
||||
bool NVDECAV1Decoder::SetD3DDevice(void* d3d_device, VavCoreSurfaceType type) {
|
||||
if (!d3d_device) {
|
||||
OutputDebugStringA("[SetD3DDevice] ERROR: d3d_device is null\n");
|
||||
LOGF_ERROR("[SetD3DDevice] ERROR: d3d_device is null");
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
bool success = false;
|
||||
if (type == VAVCORE_SURFACE_D3D12_RESOURCE) {
|
||||
char debug_buf[256];
|
||||
sprintf_s(debug_buf, "[SetD3DDevice] Setting D3D12 device: %p\n", d3d_device);
|
||||
OutputDebugStringA(debug_buf);
|
||||
|
||||
LOGF_DEBUG("[SetD3DDevice] Setting D3D12 device: %p", d3d_device);
|
||||
|
||||
// Store D3D12 device
|
||||
m_d3d12Device = d3d_device;
|
||||
@@ -1091,18 +1037,16 @@ bool NVDECAV1Decoder::SetD3DDevice(void* d3d_device, VavCoreSurfaceType type) {
|
||||
|
||||
// Create synchronization objects for D3D12 interop if not already created
|
||||
if (m_d3d12Fence == nullptr) {
|
||||
OutputDebugStringA("[SetD3DDevice] Creating D3D12 Fence for GPU synchronization...\n");
|
||||
LOGF_DEBUG("[SetD3DDevice] Creating D3D12 Fence for GPU synchronization...");
|
||||
ID3D12Device* device = static_cast<ID3D12Device*>(m_d3d12Device);
|
||||
HRESULT hr = device->CreateFence(0, D3D12_FENCE_FLAG_SHARED, IID_PPV_ARGS(reinterpret_cast<ID3D12Fence**>(&m_d3d12Fence)));
|
||||
if (FAILED(hr)) {
|
||||
LogError("Failed to create D3D12 Fence for CUDA interop");
|
||||
OutputDebugStringA("[SetD3DDevice] ERROR: D3D12 Fence creation FAILED\n");
|
||||
LOGF_ERROR("[SetD3DDevice] ERROR: D3D12 Fence creation FAILED");
|
||||
return false;
|
||||
}
|
||||
|
||||
char fence_buf[256];
|
||||
sprintf_s(fence_buf, "[SetD3DDevice] D3D12 Fence created successfully: %p\n", m_d3d12Fence);
|
||||
OutputDebugStringA(fence_buf);
|
||||
LOGF_DEBUG("[SetD3DDevice] D3D12 Fence created successfully: %p", m_d3d12Fence);
|
||||
|
||||
// Create shared handle for the fence so CUDA can access it
|
||||
// IMPORTANT: Must use SECURITY_ATTRIBUTES with bInheritHandle = FALSE for NT handles
|
||||
@@ -1121,14 +1065,13 @@ bool NVDECAV1Decoder::SetD3DDevice(void* d3d_device, VavCoreSurfaceType type) {
|
||||
);
|
||||
if (FAILED(hr) || sharedHandle == nullptr) {
|
||||
LogError("Failed to create shared handle for D3D12 Fence");
|
||||
OutputDebugStringA("[SetD3DDevice] ERROR: D3D12 Fence shared handle creation FAILED\n");
|
||||
LOGF_ERROR("[SetD3DDevice] ERROR: D3D12 Fence shared handle creation FAILED");
|
||||
static_cast<ID3D12Fence*>(m_d3d12Fence)->Release();
|
||||
m_d3d12Fence = nullptr;
|
||||
return false;
|
||||
}
|
||||
|
||||
sprintf_s(fence_buf, "[SetD3DDevice] D3D12 Fence shared handle created: %p\n", sharedHandle);
|
||||
OutputDebugStringA(fence_buf);
|
||||
LOGF_DEBUG("[SetD3DDevice] D3D12 Fence shared handle created: %p", sharedHandle);
|
||||
|
||||
cudaExternalSemaphoreHandleDesc semDesc = {};
|
||||
semDesc.type = cudaExternalSemaphoreHandleTypeD3D12Fence;
|
||||
@@ -1137,32 +1080,30 @@ bool NVDECAV1Decoder::SetD3DDevice(void* d3d_device, VavCoreSurfaceType type) {
|
||||
|
||||
cudaError_t cudaStatus = cudaImportExternalSemaphore(&m_cudaSemaphore, &semDesc);
|
||||
if (cudaStatus != cudaSuccess) {
|
||||
char cuda_err[256];
|
||||
sprintf_s(cuda_err, "[SetD3DDevice] CUDA semaphore import failed with error: %d\n", cudaStatus);
|
||||
OutputDebugStringA(cuda_err);
|
||||
|
||||
LOGF_ERROR("[SetD3DDevice] CUDA semaphore import failed with error: %d", cudaStatus);
|
||||
LogError("Failed to import D3D12 Fence as CUDA semaphore");
|
||||
CloseHandle(sharedHandle);
|
||||
static_cast<ID3D12Fence*>(m_d3d12Fence)->Release();
|
||||
m_d3d12Fence = nullptr;
|
||||
return false;
|
||||
}
|
||||
OutputDebugStringA("[SetD3DDevice] CUDA semaphore imported successfully\n");
|
||||
LOGF_DEBUG("[SetD3DDevice] CUDA semaphore imported successfully");
|
||||
|
||||
// Store the shared handle for cleanup later
|
||||
m_d3d12FenceSharedHandle = sharedHandle;
|
||||
} else {
|
||||
char fence_buf[256];
|
||||
sprintf_s(fence_buf, "[SetD3DDevice] D3D12 Fence already exists: %p\n", m_d3d12Fence);
|
||||
OutputDebugStringA(fence_buf);
|
||||
|
||||
LOGF_DEBUG("[SetD3DDevice] D3D12 Fence already exists: %p", m_d3d12Fence);
|
||||
}
|
||||
|
||||
|
||||
// DON'T reinitialize if already initialized - this would invalidate existing decoded frames
|
||||
if (m_initialized) {
|
||||
OutputDebugStringA("[SetD3DDevice] Decoder already initialized, D3D12 device set for DecodeToSurface\n");
|
||||
OutputDebugStringA("[SetD3DDevice] Note: D3D12 zero-copy will be available for DecodeToSurface calls\n");
|
||||
LOGF_DEBUG("[SetD3DDevice] Decoder already initialized, D3D12 device set for DecodeToSurface");
|
||||
LOGF_DEBUG("[SetD3DDevice] Note: D3D12 zero-copy will be available for DecodeToSurface calls");
|
||||
} else {
|
||||
OutputDebugStringA("[SetD3DDevice] D3D12 device set, will be used when Initialize() is called\n");
|
||||
LOGF_DEBUG("[SetD3DDevice] D3D12 device set, will be used when Initialize() is called");
|
||||
}
|
||||
|
||||
success = true;
|
||||
@@ -1175,7 +1116,7 @@ bool NVDECAV1Decoder::SetD3DDevice(void* d3d_device, VavCoreSurfaceType type) {
|
||||
return success;
|
||||
}
|
||||
catch (...) {
|
||||
OutputDebugStringA("[SetD3DDevice] Exception caught\n");
|
||||
LOGF_DEBUG("[SetD3DDevice] Exception caught");
|
||||
LogError("Failed to set D3D device");
|
||||
return false;
|
||||
}
|
||||
@@ -1461,19 +1402,17 @@ VavCoreSurfaceType NVDECAV1Decoder::GetOptimalSurfaceType() const {
|
||||
|
||||
bool NVDECAV1Decoder::SetupCUDAD3D12Interop(void* d3d_device) {
|
||||
if (!m_cuContext || !d3d_device) {
|
||||
OutputDebugStringA("[SetupCUDAD3D12Interop] ERROR: CUDA context or D3D12 device is null\n");
|
||||
LOGF_ERROR("[SetupCUDAD3D12Interop] ERROR: CUDA context or D3D12 device is null");
|
||||
return false;
|
||||
}
|
||||
|
||||
OutputDebugStringA("[SetupCUDAD3D12Interop] Setting up CUDA-D3D12 interop...\n");
|
||||
LOGF_DEBUG("[SetupCUDAD3D12Interop] Setting up CUDA-D3D12 interop...");
|
||||
|
||||
// For D3D12, we'll use cuGraphicsD3D12RegisterResource when needed
|
||||
// Store the D3D12 device for later use
|
||||
m_d3d12Device = d3d_device;
|
||||
|
||||
char debug_buf[256];
|
||||
sprintf_s(debug_buf, "[SetupCUDAD3D12Interop] D3D12 device stored: %p\n", m_d3d12Device);
|
||||
OutputDebugStringA(debug_buf);
|
||||
LOGF_DEBUG("[SetupCUDAD3D12Interop] D3D12 device stored: %p", m_d3d12Device);
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -1528,7 +1467,7 @@ void NVDECAV1Decoder::PollingThreadFunc() {
|
||||
|
||||
// Auto-registration function (outside namespace for C linkage)
|
||||
extern "C" void RegisterNVDECDecoders() {
|
||||
OutputDebugStringA("[RegisterNVDECDecoders] Registering NVDEC AV1 decoder...\n");
|
||||
LOGF_DEBUG("[RegisterNVDECDecoders] Registering NVDEC AV1 decoder...");
|
||||
VavCore::VideoDecoderFactory::RegisterAV1Decoder({
|
||||
"nvdec", // name
|
||||
"Hardware AV1 decoder using NVIDIA NVDEC", // description
|
||||
@@ -1538,5 +1477,5 @@ extern "C" void RegisterNVDECDecoders() {
|
||||
},
|
||||
[]() { return std::make_unique<VavCore::NVDECAV1Decoder>(); } // creator function
|
||||
});
|
||||
OutputDebugStringA("[RegisterNVDECDecoders] NVDEC decoder registered\n");
|
||||
LOGF_DEBUG("[RegisterNVDECDecoders] NVDEC decoder registered");
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
#include "pch.h"
|
||||
#include "VideoDecoderFactory.h"
|
||||
#include <algorithm>
|
||||
#include <iostream>
|
||||
#include "../Common/VavCoreLogger.h"
|
||||
|
||||
#ifdef ANDROID
|
||||
// Forward declaration for Android MediaCodec registration
|
||||
@@ -15,38 +15,32 @@ std::unique_ptr<IVideoDecoder> VideoDecoderFactory::CreateDecoder(VideoCodecType
|
||||
auto& decoders = GetDecoderList(codec_type);
|
||||
|
||||
// Debug: Show how many decoders are registered
|
||||
char debug_buf[256];
|
||||
sprintf_s(debug_buf, "[VideoDecoderFactory] Total registered decoders for %s: %zu\n",
|
||||
LOGF_DEBUG("[VideoDecoderFactory] Total registered decoders for %s: %zu",
|
||||
GetCodecTypeString(codec_type).c_str(), decoders.size());
|
||||
OutputDebugStringA(debug_buf);
|
||||
|
||||
// Debug: List all registered decoders
|
||||
for (size_t i = 0; i < decoders.size(); ++i) {
|
||||
sprintf_s(debug_buf, "[VideoDecoderFactory] Decoder %zu: %s (priority %d)\n",
|
||||
LOGF_DEBUG("[VideoDecoderFactory] Decoder %zu: %s (priority %d)",
|
||||
i, decoders[i].name.c_str(), decoders[i].priority);
|
||||
OutputDebugStringA(debug_buf);
|
||||
}
|
||||
|
||||
// Filter available decoders
|
||||
std::vector<DecoderRegistration> available;
|
||||
for (const auto& decoder : decoders) {
|
||||
bool is_available = decoder.isAvailable();
|
||||
sprintf_s(debug_buf, "[VideoDecoderFactory] Checking %s availability: %s\n",
|
||||
LOGF_DEBUG("[VideoDecoderFactory] Checking %s availability: %s",
|
||||
decoder.name.c_str(), is_available ? "AVAILABLE" : "NOT AVAILABLE");
|
||||
OutputDebugStringA(debug_buf);
|
||||
|
||||
if (is_available) {
|
||||
available.push_back(decoder);
|
||||
}
|
||||
}
|
||||
|
||||
sprintf_s(debug_buf, "[VideoDecoderFactory] Available decoders after filtering: %zu\n", available.size());
|
||||
OutputDebugStringA(debug_buf);
|
||||
LOGF_DEBUG("[VideoDecoderFactory] Available decoders after filtering: %zu", available.size());
|
||||
|
||||
if (available.empty()) {
|
||||
std::cerr << "[VideoDecoderFactory] No available decoders for codec type: "
|
||||
<< GetCodecTypeString(codec_type) << std::endl;
|
||||
OutputDebugStringA("[VideoDecoderFactory] ERROR: No available decoders!\n");
|
||||
LOGF_ERROR("[VideoDecoderFactory] No available decoders for codec type: %s",
|
||||
GetCodecTypeString(codec_type).c_str());
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -57,17 +51,16 @@ std::unique_ptr<IVideoDecoder> VideoDecoderFactory::CreateDecoder(VideoCodecType
|
||||
|
||||
switch (decoder_type) {
|
||||
case DecoderType::AUTO: {
|
||||
std::cout << "[VideoDecoderFactory] AUTO mode: selecting best decoder: "
|
||||
<< available[0].name << std::endl;
|
||||
sprintf_s(debug_buf, "[VideoDecoderFactory] AUTO mode: attempting to create '%s' decoder\n",
|
||||
LOGF_INFO("[VideoDecoderFactory] AUTO mode: selecting best decoder: %s",
|
||||
available[0].name.c_str());
|
||||
OutputDebugStringA(debug_buf);
|
||||
LOGF_DEBUG("[VideoDecoderFactory] AUTO mode: attempting to create '%s' decoder",
|
||||
available[0].name.c_str());
|
||||
|
||||
auto decoder = available[0].creator();
|
||||
if (decoder) {
|
||||
OutputDebugStringA("[VideoDecoderFactory] AUTO mode: decoder created successfully\n");
|
||||
LOGF_DEBUG("[VideoDecoderFactory] AUTO mode: decoder created successfully");
|
||||
} else {
|
||||
OutputDebugStringA("[VideoDecoderFactory] AUTO mode: decoder creation returned nullptr!\n");
|
||||
LOGF_DEBUG("[VideoDecoderFactory] AUTO mode: decoder creation returned nullptr!");
|
||||
}
|
||||
return decoder;
|
||||
}
|
||||
@@ -75,67 +68,67 @@ std::unique_ptr<IVideoDecoder> VideoDecoderFactory::CreateDecoder(VideoCodecType
|
||||
case DecoderType::NVDEC:
|
||||
for (const auto& decoder : available) {
|
||||
if (decoder.name == "nvdec") {
|
||||
std::cout << "[VideoDecoderFactory] NVDEC mode: selecting decoder: "
|
||||
<< decoder.name << std::endl;
|
||||
LOGF_INFO("[VideoDecoderFactory] NVDEC mode: selecting decoder: %s",
|
||||
decoder.name.c_str());
|
||||
return decoder.creator();
|
||||
}
|
||||
}
|
||||
std::cerr << "[VideoDecoderFactory] NVDEC decoder not available" << std::endl;
|
||||
LOGF_ERROR("[VideoDecoderFactory] NVDEC decoder not available");
|
||||
break;
|
||||
|
||||
case DecoderType::VPL:
|
||||
for (const auto& decoder : available) {
|
||||
if (decoder.name == "vpl") {
|
||||
std::cout << "[VideoDecoderFactory] VPL mode: selecting decoder: "
|
||||
<< decoder.name << std::endl;
|
||||
LOGF_INFO("[VideoDecoderFactory] VPL mode: selecting decoder: %s",
|
||||
decoder.name.c_str());
|
||||
return decoder.creator();
|
||||
}
|
||||
}
|
||||
std::cerr << "[VideoDecoderFactory] VPL decoder not available" << std::endl;
|
||||
LOGF_ERROR("[VideoDecoderFactory] VPL decoder not available");
|
||||
break;
|
||||
|
||||
case DecoderType::AMF:
|
||||
for (const auto& decoder : available) {
|
||||
if (decoder.name == "amf") {
|
||||
std::cout << "[VideoDecoderFactory] AMF mode: selecting decoder: "
|
||||
<< decoder.name << std::endl;
|
||||
LOGF_INFO("[VideoDecoderFactory] AMF mode: selecting decoder: %s",
|
||||
decoder.name.c_str());
|
||||
return decoder.creator();
|
||||
}
|
||||
}
|
||||
std::cerr << "[VideoDecoderFactory] AMF decoder not available" << std::endl;
|
||||
LOGF_ERROR("[VideoDecoderFactory] AMF decoder not available");
|
||||
break;
|
||||
|
||||
case DecoderType::DAV1D:
|
||||
for (const auto& decoder : available) {
|
||||
if (decoder.name == "dav1d") {
|
||||
std::cout << "[VideoDecoderFactory] DAV1D mode: selecting decoder: "
|
||||
<< decoder.name << std::endl;
|
||||
LOGF_INFO("[VideoDecoderFactory] DAV1D mode: selecting decoder: %s",
|
||||
decoder.name.c_str());
|
||||
return decoder.creator();
|
||||
}
|
||||
}
|
||||
std::cerr << "[VideoDecoderFactory] DAV1D decoder not available" << std::endl;
|
||||
LOGF_ERROR("[VideoDecoderFactory] DAV1D decoder not available");
|
||||
break;
|
||||
|
||||
case DecoderType::MEDIA_FOUNDATION:
|
||||
for (const auto& decoder : available) {
|
||||
if (decoder.name == "media_foundation") {
|
||||
std::cout << "[VideoDecoderFactory] MEDIA_FOUNDATION mode: selecting decoder: "
|
||||
<< decoder.name << std::endl;
|
||||
LOGF_INFO("[VideoDecoderFactory] MEDIA_FOUNDATION mode: selecting decoder: %s",
|
||||
decoder.name.c_str());
|
||||
return decoder.creator();
|
||||
}
|
||||
}
|
||||
std::cerr << "[VideoDecoderFactory] MEDIA_FOUNDATION decoder not available" << std::endl;
|
||||
LOGF_ERROR("[VideoDecoderFactory] MEDIA_FOUNDATION decoder not available");
|
||||
break;
|
||||
|
||||
case DecoderType::MEDIACODEC:
|
||||
for (const auto& decoder : available) {
|
||||
if (decoder.name == "mediacodec") {
|
||||
std::cout << "[VideoDecoderFactory] MEDIACODEC mode: selecting decoder: "
|
||||
<< decoder.name << std::endl;
|
||||
LOGF_INFO("[VideoDecoderFactory] MEDIACODEC mode: selecting decoder: %s",
|
||||
decoder.name.c_str());
|
||||
return decoder.creator();
|
||||
}
|
||||
}
|
||||
std::cerr << "[VideoDecoderFactory] MEDIACODEC decoder not available" << std::endl;
|
||||
LOGF_ERROR("[VideoDecoderFactory] MEDIACODEC decoder not available");
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -156,13 +149,13 @@ std::unique_ptr<IVideoDecoder> VideoDecoderFactory::CreateDecoder(const std::str
|
||||
|
||||
for (const auto& decoder : decoders) {
|
||||
if (decoder.name == decoder_name && decoder.isAvailable()) {
|
||||
std::cout << "[VideoDecoderFactory] Creating specific decoder: " << decoder_name << std::endl;
|
||||
LOGF_INFO("[VideoDecoderFactory] Creating specific decoder: %s", decoder_name.c_str());
|
||||
return decoder.creator();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::cerr << "[VideoDecoderFactory] Decoder not found or not available: " << decoder_name << std::endl;
|
||||
LOGF_ERROR("[VideoDecoderFactory] Decoder not found or not available: %s", decoder_name.c_str());
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -206,8 +199,8 @@ void VideoDecoderFactory::RegisterAV1Decoder(const DecoderRegistration& registra
|
||||
return a.priority < b.priority;
|
||||
});
|
||||
|
||||
std::cout << "[VideoDecoderFactory] Registered AV1 decoder: " << registration.name
|
||||
<< " (priority: " << registration.priority << ")" << std::endl;
|
||||
LOGF_DEBUG("[VideoDecoderFactory] Registered AV1 decoder: %s (priority: %d)",
|
||||
registration.name.c_str(), registration.priority);
|
||||
}
|
||||
|
||||
void VideoDecoderFactory::RegisterVP9Decoder(const DecoderRegistration& registration) {
|
||||
@@ -219,8 +212,8 @@ void VideoDecoderFactory::RegisterVP9Decoder(const DecoderRegistration& registra
|
||||
return a.priority < b.priority;
|
||||
});
|
||||
|
||||
std::cout << "[VideoDecoderFactory] Registered VP9 decoder: " << registration.name
|
||||
<< " (priority: " << registration.priority << ")" << std::endl;
|
||||
LOGF_DEBUG("[VideoDecoderFactory] Registered VP9 decoder: %s (priority: %d)",
|
||||
registration.name.c_str(), registration.priority);
|
||||
}
|
||||
|
||||
bool VideoDecoderFactory::IsCodecSupported(VideoCodecType codec_type) {
|
||||
@@ -250,13 +243,13 @@ std::string VideoDecoderFactory::GetDecoderDescription(const std::string& decode
|
||||
}
|
||||
|
||||
void VideoDecoderFactory::InitializeFactory() {
|
||||
std::cout << "[VideoDecoderFactory] Initializing simplified registration-based decoder factory..." << std::endl;
|
||||
LOGF_INFO("[VideoDecoderFactory] Initializing simplified registration-based decoder factory...");
|
||||
|
||||
#ifdef ANDROID
|
||||
// Explicitly register Android MediaCodec decoders
|
||||
// This ensures registration happens even if static initialization order is unpredictable
|
||||
RegisterMediaCodecDecoders();
|
||||
std::cout << "[VideoDecoderFactory] Android MediaCodec decoders explicitly registered" << std::endl;
|
||||
LOGF_INFO("[VideoDecoderFactory] Android MediaCodec decoders explicitly registered");
|
||||
#endif
|
||||
|
||||
// The registry is populated automatically through static initialization
|
||||
@@ -264,18 +257,18 @@ void VideoDecoderFactory::InitializeFactory() {
|
||||
|
||||
// Display registered decoders
|
||||
auto av1_decoders = GetAvailableDecoders(VideoCodecType::AV1);
|
||||
std::cout << "[VideoDecoderFactory] AV1 decoders: ";
|
||||
std::string av1_list;
|
||||
for (const auto& decoder : av1_decoders) {
|
||||
std::cout << decoder << " ";
|
||||
av1_list += decoder + " ";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
LOGF_INFO("[VideoDecoderFactory] AV1 decoders: %s", av1_list.c_str());
|
||||
|
||||
auto vp9_decoders = GetAvailableDecoders(VideoCodecType::VP9);
|
||||
std::cout << "[VideoDecoderFactory] VP9 decoders: ";
|
||||
std::string vp9_list;
|
||||
for (const auto& decoder : vp9_decoders) {
|
||||
std::cout << decoder << " ";
|
||||
vp9_list += decoder + " ";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
LOGF_INFO("[VideoDecoderFactory] VP9 decoders: %s", vp9_list.c_str());
|
||||
}
|
||||
|
||||
void VideoDecoderFactory::CleanupFactory() {
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
#include <fstream>
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
#include "../Common/VavCoreLogger.h"
|
||||
|
||||
namespace VavCore {
|
||||
|
||||
@@ -22,11 +23,7 @@ public:
|
||||
// Log error for debugging
|
||||
char error_msg[256];
|
||||
strerror_s(error_msg, sizeof(error_msg), err);
|
||||
OutputDebugStringA("Failed to open file: ");
|
||||
OutputDebugStringA(file_path.c_str());
|
||||
OutputDebugStringA(" - errno: ");
|
||||
OutputDebugStringA(error_msg);
|
||||
OutputDebugStringA("\n");
|
||||
LOGF_ERROR("Failed to open file: %s - errno: %s", file_path.c_str(), error_msg);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -674,21 +671,21 @@ bool WebMFileReader::ExtractVideoMetadata() {
|
||||
if (horz == 0 && vert == 0) {
|
||||
// No subsampling = YUV444
|
||||
meta.color_space = ColorSpace::YUV444P;
|
||||
OutputDebugStringA("[WebMFileReader] Detected ColorSpace: YUV444P (4:4:4)\n");
|
||||
LOGF_DEBUG("[WebMFileReader] Detected ColorSpace: YUV444P (4:4:4)");
|
||||
} else if (horz == 1 && vert == 0) {
|
||||
// Horizontal subsampling only = YUV422
|
||||
meta.color_space = ColorSpace::YUV422P;
|
||||
OutputDebugStringA("[WebMFileReader] Detected ColorSpace: YUV422P (4:2:2)\n");
|
||||
LOGF_DEBUG("[WebMFileReader] Detected ColorSpace: YUV422P (4:2:2)");
|
||||
} else if (horz == 1 && vert == 1) {
|
||||
// Both horizontal and vertical subsampling = YUV420
|
||||
meta.color_space = ColorSpace::YUV420P;
|
||||
OutputDebugStringA("[WebMFileReader] Detected ColorSpace: YUV420P (4:2:0)\n");
|
||||
LOGF_DEBUG("[WebMFileReader] Detected ColorSpace: YUV420P (4:2:0)");
|
||||
} else {
|
||||
// Unknown or unspecified - keep default YUV420P
|
||||
OutputDebugStringA("[WebMFileReader] Unknown chroma subsampling, using default YUV420P\n");
|
||||
LOGF_DEBUG("[WebMFileReader] Unknown chroma subsampling, using default YUV420P");
|
||||
}
|
||||
} else {
|
||||
OutputDebugStringA("[WebMFileReader] No Colour element found, using default YUV420P\n");
|
||||
LOGF_DEBUG("[WebMFileReader] No Colour element found, using default YUV420P");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -724,13 +721,9 @@ bool WebMFileReader::ExtractVideoMetadata() {
|
||||
meta.codec_private_data = codec_private_data;
|
||||
meta.codec_private_size = codec_private_size;
|
||||
|
||||
char debug_buf[256];
|
||||
sprintf_s(debug_buf, "[WebMFileReader] Extracted codec private data: %zu bytes\n", codec_private_size);
|
||||
OutputDebugStringA(debug_buf);
|
||||
printf("%s", debug_buf);
|
||||
LOGF_DEBUG("[WebMFileReader] Extracted codec private data: %zu bytes", codec_private_size);
|
||||
} else {
|
||||
OutputDebugStringA("[WebMFileReader] WARNING: No codec private data found in WebM track\n");
|
||||
printf("[WebMFileReader] WARNING: No codec private data found in WebM track\n");
|
||||
LOGF_WARNING("[WebMFileReader] WARNING: No codec private data found in WebM track");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#include "Decoder/IVideoDecoder.h"
|
||||
#include "Decoder/VideoDecoderFactory.h"
|
||||
#include "FileIO/WebMFileReader.h"
|
||||
#include "Common/VavCoreLogger.h"
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
@@ -299,35 +300,30 @@ VAVCORE_API VavCoreResult vavcore_open_file(VavCorePlayer* player, const char* f
|
||||
|
||||
try {
|
||||
// Debug log
|
||||
OutputDebugStringA("[VavCore] Opening file: ");
|
||||
OutputDebugStringA(filepath);
|
||||
OutputDebugStringA("\n");
|
||||
LOGF_DEBUG("[VavCore] Opening file: %s", filepath);
|
||||
|
||||
// Open file with WebM reader
|
||||
if (!player->impl->fileReader->OpenFile(filepath)) {
|
||||
OutputDebugStringA("[VavCore] OpenFile() returned false\n");
|
||||
LOGF_DEBUG("[VavCore] OpenFile() returned false");
|
||||
return VAVCORE_ERROR_FILE_NOT_FOUND;
|
||||
}
|
||||
|
||||
OutputDebugStringA("[VavCore] OpenFile() succeeded\n");
|
||||
LOGF_DEBUG("[VavCore] OpenFile() succeeded");
|
||||
|
||||
// Get video tracks and select the first AV1 track
|
||||
auto tracks = player->impl->fileReader->GetVideoTracks();
|
||||
|
||||
char buf[256];
|
||||
sprintf_s(buf, "[VavCore] Found %zu video tracks\n", tracks.size());
|
||||
OutputDebugStringA(buf);
|
||||
LOGF_DEBUG("[VavCore] Found %zu video tracks", tracks.size());
|
||||
|
||||
bool foundAV1 = false;
|
||||
for (const auto& track : tracks) {
|
||||
sprintf_s(buf, "[VavCore] Track %lld: codec_type=%d (AV1=%d)\n",
|
||||
LOGF_DEBUG("[VavCore] Track %lld: codec_type=%d (AV1=%d)",
|
||||
track.track_number, (int)track.codec_type, (int)VideoCodecType::AV1);
|
||||
OutputDebugStringA(buf);
|
||||
|
||||
if (track.codec_type == VideoCodecType::AV1) {
|
||||
OutputDebugStringA("[VavCore] AV1 track found! Selecting track...\n");
|
||||
LOGF_DEBUG("[VavCore] AV1 track found! Selecting track...");
|
||||
if (player->impl->fileReader->SelectVideoTrack(track.track_number)) {
|
||||
OutputDebugStringA("[VavCore] Track selected successfully\n");
|
||||
LOGF_DEBUG("[VavCore] Track selected successfully");
|
||||
// Get full metadata from WebMFileReader (includes codec_private_data)
|
||||
player->impl->metadata = player->impl->fileReader->GetVideoMetadata();
|
||||
foundAV1 = true;
|
||||
@@ -337,37 +333,33 @@ VAVCORE_API VavCoreResult vavcore_open_file(VavCorePlayer* player, const char* f
|
||||
}
|
||||
|
||||
if (!foundAV1) {
|
||||
OutputDebugStringA("[VavCore] No AV1 tracks found - returning VAVCORE_ERROR_NOT_SUPPORTED\n");
|
||||
LOGF_ERROR("[VavCore] No AV1 tracks found - returning VAVCORE_ERROR_NOT_SUPPORTED");
|
||||
player->impl->fileReader->CloseFile();
|
||||
return VAVCORE_ERROR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
// Create appropriate decoder
|
||||
OutputDebugStringA("[VavCore] Creating decoder...\n");
|
||||
LOGF_DEBUG("[VavCore] Creating decoder...");
|
||||
auto decoderType = to_decoder_type(player->impl->decoderType);
|
||||
|
||||
char decoder_type_buf[256];
|
||||
sprintf_s(decoder_type_buf, "[VavCore] Decoder type requested: %d (0=AUTO, 1=NVDEC, 2=VPL, 3=AMF, 4=DAV1D, 5=MF, 6=MEDIACODEC)\n",
|
||||
LOGF_DEBUG("[VavCore] Decoder type requested: %d (0=AUTO, 1=NVDEC, 2=VPL, 3=AMF, 4=DAV1D, 5=MF, 6=MEDIACODEC)",
|
||||
static_cast<int>(decoderType));
|
||||
OutputDebugStringA(decoder_type_buf);
|
||||
|
||||
player->impl->decoder = VavCore::VideoDecoderFactory::CreateDecoder(VavCore::VideoCodecType::AV1, decoderType);
|
||||
|
||||
if (!player->impl->decoder) {
|
||||
OutputDebugStringA("[VavCore] Failed to create decoder - returning VAVCORE_ERROR_INIT_FAILED\n");
|
||||
LOGF_ERROR("[VavCore] Failed to create decoder - returning VAVCORE_ERROR_INIT_FAILED");
|
||||
player->impl->fileReader->CloseFile();
|
||||
return VAVCORE_ERROR_INIT_FAILED;
|
||||
}
|
||||
|
||||
OutputDebugStringA("[VavCore] Decoder created successfully.\n");
|
||||
LOGF_DEBUG("[VavCore] Decoder created successfully.");
|
||||
|
||||
// Apply pending D3D device if it was set before decoder creation
|
||||
if (player->impl->pendingD3DDevice) {
|
||||
OutputDebugStringA("[VavCore] Applying pending D3D device before decoder initialization...\n");
|
||||
char debug_buf[256];
|
||||
sprintf_s(debug_buf, "[VavCore] Pending D3D device: %p, Type: %d\n",
|
||||
LOGF_DEBUG("[VavCore] Applying pending D3D device before decoder initialization...");
|
||||
LOGF_DEBUG("[VavCore] Pending D3D device: %p, Type: %d",
|
||||
player->impl->pendingD3DDevice, static_cast<int>(player->impl->pendingD3DSurfaceType));
|
||||
OutputDebugStringA(debug_buf);
|
||||
|
||||
player->impl->decoder->SetD3DDevice(player->impl->pendingD3DDevice, player->impl->pendingD3DSurfaceType);
|
||||
|
||||
@@ -376,17 +368,17 @@ VAVCORE_API VavCoreResult vavcore_open_file(VavCorePlayer* player, const char* f
|
||||
player->impl->pendingD3DSurfaceType = VAVCORE_SURFACE_CPU;
|
||||
}
|
||||
|
||||
OutputDebugStringA("[VavCore] Initializing decoder...\n");
|
||||
LOGF_DEBUG("[VavCore] Initializing decoder...");
|
||||
|
||||
// Initialize decoder
|
||||
if (!player->impl->decoder->Initialize(player->impl->metadata)) {
|
||||
OutputDebugStringA("[VavCore] Decoder initialization failed - returning VAVCORE_ERROR_INIT_FAILED\n");
|
||||
LOGF_ERROR("[VavCore] Decoder initialization failed - returning VAVCORE_ERROR_INIT_FAILED");
|
||||
player->impl->decoder.reset();
|
||||
player->impl->fileReader->CloseFile();
|
||||
return VAVCORE_ERROR_INIT_FAILED;
|
||||
}
|
||||
|
||||
OutputDebugStringA("[VavCore] Decoder initialized successfully!\n");
|
||||
LOGF_DEBUG("[VavCore] Decoder initialized successfully!");
|
||||
|
||||
// Store the actual decoder name for later retrieval
|
||||
player->impl->decoderName = player->impl->decoder->GetCodecName();
|
||||
@@ -699,15 +691,15 @@ VAVCORE_API VavCoreResult vavcore_set_d3d_device(VavCorePlayer* player, void* d3
|
||||
if (player->impl->decoder) {
|
||||
bool success = player->impl->decoder->SetD3DDevice(d3d_device, type);
|
||||
if (success) {
|
||||
OutputDebugStringA("[vavcore_set_d3d_device] D3D device applied to existing decoder\n");
|
||||
LOGF_DEBUG("[vavcore_set_d3d_device] D3D device applied to existing decoder");
|
||||
return VAVCORE_SUCCESS;
|
||||
} else {
|
||||
OutputDebugStringA("[vavcore_set_d3d_device] WARNING: Failed to apply D3D device to existing decoder (will retry on next decode)\n");
|
||||
LOGF_ERROR("[vavcore_set_d3d_device] WARNING: Failed to apply D3D device to existing decoder (will retry on next decode)");
|
||||
// Still return success - device is stored for later use
|
||||
return VAVCORE_SUCCESS;
|
||||
}
|
||||
} else {
|
||||
OutputDebugStringA("[vavcore_set_d3d_device] Decoder not created yet, D3D device stored for later\n");
|
||||
LOGF_DEBUG("[vavcore_set_d3d_device] Decoder not created yet, D3D device stored for later");
|
||||
return VAVCORE_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user