diff --git a/vav2/Vav2Player/Vav2Player/MultiVideoTestWindow.xaml.cpp b/vav2/Vav2Player/Vav2Player/MultiVideoTestWindow.xaml.cpp
index 8bcae3a..77e1779 100644
--- a/vav2/Vav2Player/Vav2Player/MultiVideoTestWindow.xaml.cpp
+++ b/vav2/Vav2Player/Vav2Player/MultiVideoTestWindow.xaml.cpp
@@ -178,8 +178,8 @@ namespace winrt::Vav2Player::implementation
videoPlayer.ShowControls(true);
videoPlayer.AutoPlay(false);
- // Set default decoder to Software (AV1Decoder)
- videoPlayer.DecoderType(Vav2Player::VideoDecoderType::Software);
+ // Set default decoder to Auto (AdaptiveAV1Decoder priority)
+ videoPlayer.DecoderType(Vav2Player::VideoDecoderType::Auto);
// Add border for visual separation
auto border = winrt::Microsoft::UI::Xaml::Controls::Border();
diff --git a/vav2/Vav2Player/Vav2Player/Vav2Player.vcxproj b/vav2/Vav2Player/Vav2Player/Vav2Player.vcxproj
index f56c412..7b46031 100644
--- a/vav2/Vav2Player/Vav2Player/Vav2Player.vcxproj
+++ b/vav2/Vav2Player/Vav2Player/Vav2Player.vcxproj
@@ -150,6 +150,7 @@
+
@@ -183,6 +184,7 @@
+
diff --git a/vav2/Vav2Player/Vav2Player/Vav2PlayerHeadless.vcxproj b/vav2/Vav2Player/Vav2Player/Vav2PlayerHeadless.vcxproj
index ea7d188..239f99b 100644
--- a/vav2/Vav2Player/Vav2Player/Vav2PlayerHeadless.vcxproj
+++ b/vav2/Vav2Player/Vav2Player/Vav2PlayerHeadless.vcxproj
@@ -103,6 +103,7 @@
+
@@ -129,6 +130,7 @@
+
diff --git a/vav2/Vav2Player/Vav2Player/src/Decoder/AdaptiveAV1Decoder.cpp b/vav2/Vav2Player/Vav2Player/src/Decoder/AdaptiveAV1Decoder.cpp
new file mode 100644
index 0000000..e6b9b71
--- /dev/null
+++ b/vav2/Vav2Player/Vav2Player/src/Decoder/AdaptiveAV1Decoder.cpp
@@ -0,0 +1,317 @@
+#include "pch.h"
+#include "AdaptiveAV1Decoder.h"
+#include "AdaptiveNVDECDecoder.h" // For shared types and utilities
+#include
+#include
+
+namespace Vav2Player {
+
+AdaptiveAV1Decoder::AdaptiveAV1Decoder() : AV1Decoder() {
+ // Initialize with balanced configuration
+ m_config = AdaptiveUtils::GetBalancedConfig();
+ m_metrics.last_update = std::chrono::steady_clock::now();
+}
+
+AdaptiveAV1Decoder::~AdaptiveAV1Decoder() = default;
+
+bool AdaptiveAV1Decoder::Initialize(const VideoMetadata& metadata, const AdaptiveConfig& config) {
+ m_config = config;
+ return Initialize(metadata);
+}
+
+bool AdaptiveAV1Decoder::Initialize(const VideoMetadata& metadata) {
+ // Store original dimensions for scaling calculations
+ m_originalWidth = metadata.width;
+ m_originalHeight = metadata.height;
+ m_targetScaledWidth = metadata.width;
+ m_targetScaledHeight = metadata.height;
+
+ // Initialize the base dav1d decoder
+ bool result = AV1Decoder::Initialize(metadata);
+ if (result) {
+ OutputDebugStringA("[AdaptiveAV1Decoder] Initialized with adaptive quality control (post-decode scaling)\n");
+ }
+
+ return result;
+}
+
+bool AdaptiveAV1Decoder::DecodeFrame(const VideoPacket& input_packet, VideoFrame& output_frame) {
+ return DecodeFrame(input_packet.data.get(), input_packet.size, output_frame);
+}
+
+bool AdaptiveAV1Decoder::DecodeFrame(const uint8_t* packet_data, size_t packet_size, VideoFrame& output_frame) {
+ auto decode_start = std::chrono::high_resolution_clock::now();
+
+ // Step 1: Decode frame using dav1d at full resolution
+ VideoFrame full_resolution_frame;
+ bool decode_success = AV1Decoder::DecodeFrame(packet_data, packet_size, full_resolution_frame);
+
+ auto decode_end = std::chrono::high_resolution_clock::now();
+ double decode_time = std::chrono::duration(decode_end - decode_start).count();
+
+ if (!decode_success) {
+ return false;
+ }
+
+ // Step 2: Apply post-decode scaling if quality level requires it
+ if (m_currentQuality != QualityLevel::ULTRA) {
+ auto scale_start = std::chrono::high_resolution_clock::now();
+
+ bool scale_success = ScaleDecodedFrame(full_resolution_frame, output_frame);
+
+ auto scale_end = std::chrono::high_resolution_clock::now();
+ double scale_time = std::chrono::duration(scale_end - scale_start).count();
+
+ // Add scaling time to decode time for performance analysis
+ decode_time += scale_time;
+
+ if (!scale_success) {
+ // Fallback: use full resolution frame
+ output_frame = std::move(full_resolution_frame);
+ OutputDebugStringA("[AdaptiveAV1Decoder] Scaling failed, using full resolution\n");
+ }
+ } else {
+ // ULTRA quality: use full resolution frame directly
+ output_frame = std::move(full_resolution_frame);
+ }
+
+ // Step 3: Update performance metrics and potentially adjust quality
+ if (m_adaptiveEnabled) {
+ UpdatePerformanceMetrics(decode_time, 0.0); // Render time updated separately
+ AnalyzePerformanceAndAdjust();
+ }
+
+ return true;
+}
+
+void AdaptiveAV1Decoder::SetQualityLevel(QualityLevel level) {
+ if (level == m_currentQuality) return;
+
+ m_targetQuality = level;
+ CalculateScaledDimensions(level, m_targetScaledWidth, m_targetScaledHeight);
+ m_currentQuality = level; // dav1d doesn't need decoder reconfiguration
+
+ OutputDebugStringA(("[AdaptiveAV1Decoder] Quality changed to " +
+ QualityLevelToString(level) + "\n").c_str());
+}
+
+void AdaptiveAV1Decoder::UpdatePerformanceMetrics(double decode_time, double render_time) {
+ std::lock_guard lock(m_metricsMutex);
+
+ // Update moving averages
+ if (decode_time > 0) {
+ UpdateMovingAverage(m_recentDecodeTimes, decode_time);
+ m_metrics.avg_decode_time_ms = CalculateMovingAverage(m_recentDecodeTimes);
+ }
+
+ if (render_time > 0) {
+ UpdateMovingAverage(m_recentRenderTimes, render_time);
+ m_metrics.avg_render_time_ms = CalculateMovingAverage(m_recentRenderTimes);
+ }
+
+ m_metrics.last_update = std::chrono::steady_clock::now();
+}
+
+void AdaptiveAV1Decoder::AnalyzePerformanceAndAdjust() {
+ std::lock_guard lock(m_metricsMutex);
+
+ double totalFrameTime = m_metrics.avg_decode_time_ms + m_metrics.avg_render_time_ms;
+
+ if (ShouldAdjustQuality(m_metrics.avg_decode_time_ms, m_metrics.avg_render_time_ms)) {
+ QualityLevel optimalQuality = DetermineOptimalQuality(totalFrameTime);
+
+ if (optimalQuality != m_currentQuality) {
+ m_stableFrameCount = 0; // Reset stability counter
+ SetQualityLevel(optimalQuality);
+ } else {
+ m_stableFrameCount++;
+ }
+ } else {
+ m_stableFrameCount++;
+ }
+}
+
+bool AdaptiveAV1Decoder::ShouldAdjustQuality(double avgDecodeTime, double avgRenderTime) {
+ double totalTime = avgDecodeTime + avgRenderTime;
+ double targetTime = m_config.target_frame_time_ms;
+
+ // Require minimum frames for stability
+ if (m_stableFrameCount < m_config.stable_frames_required) {
+ return false;
+ }
+
+ // Check if we're outside acceptable performance range
+ bool shouldScaleDown = totalTime > (targetTime * m_config.quality_down_threshold);
+ bool shouldScaleUp = totalTime < (targetTime * m_config.quality_up_threshold) &&
+ m_currentQuality > QualityLevel::ULTRA;
+
+ return shouldScaleDown || shouldScaleUp;
+}
+
+QualityLevel AdaptiveAV1Decoder::DetermineOptimalQuality(double totalFrameTime) {
+ double targetTime = m_config.target_frame_time_ms;
+ double ratio = totalFrameTime / targetTime;
+
+ // Determine quality based on performance ratio
+ if (ratio > 2.0) return QualityLevel::MINIMUM; // Extremely slow
+ if (ratio > 1.5) return QualityLevel::LOW; // Very slow
+ if (ratio > 1.2) return QualityLevel::MEDIUM; // Slow
+ if (ratio > 1.0) return QualityLevel::HIGH; // Slightly slow
+ return QualityLevel::ULTRA; // Fast enough for full quality
+}
+
+bool AdaptiveAV1Decoder::ScaleDecodedFrame(const VideoFrame& input_frame, VideoFrame& output_frame) {
+ // Ensure we have target dimensions calculated
+ if (m_targetScaledWidth == 0 || m_targetScaledHeight == 0) {
+ return false;
+ }
+
+ // Create output frame with scaled dimensions
+ output_frame.width = m_targetScaledWidth;
+ output_frame.height = m_targetScaledHeight;
+ output_frame.color_space = input_frame.color_space;
+ output_frame.timestamp_seconds = input_frame.timestamp_seconds;
+
+ // Allocate scaled frame buffer
+ if (!output_frame.AllocateYUV420P(m_targetScaledWidth, m_targetScaledHeight)) {
+ return false;
+ }
+
+ // Perform CPU-based YUV420P scaling
+ return ScaleYUV420P_CPU(input_frame, output_frame);
+}
+
+void AdaptiveAV1Decoder::CalculateScaledDimensions(QualityLevel quality, uint32_t& width, uint32_t& height) {
+ double scaleFactor = GetQualityScaleFactor(quality);
+
+ width = static_cast(m_originalWidth * scaleFactor);
+ height = static_cast(m_originalHeight * scaleFactor);
+
+ // Ensure dimensions are even (required for YUV420P)
+ width = (width + 1) & ~1;
+ height = (height + 1) & ~1;
+
+ // Ensure minimum dimensions
+ width = std::max(width, 64u);
+ height = std::max(height, 64u);
+}
+
+bool AdaptiveAV1Decoder::EnsureScalingBuffer(size_t required_size) {
+ if (m_scalingBufferSize < required_size) {
+ m_scalingBuffer = std::make_unique(required_size);
+ m_scalingBufferSize = required_size;
+ }
+ return m_scalingBuffer != nullptr;
+}
+
+bool AdaptiveAV1Decoder::ScaleYUV420P_CPU(const VideoFrame& input, VideoFrame& output) {
+ // Scale Y plane
+ if (!ScaleYUVPlane_CPU(
+ input.y_plane.get(), input.y_stride, input.width, input.height,
+ output.y_plane.get(), output.y_stride, output.width, output.height)) {
+ return false;
+ }
+
+ // Scale U plane (half resolution)
+ uint32_t input_uv_width = input.width / 2;
+ uint32_t input_uv_height = input.height / 2;
+ uint32_t output_uv_width = output.width / 2;
+ uint32_t output_uv_height = output.height / 2;
+
+ if (!ScaleYUVPlane_CPU(
+ input.u_plane.get(), input.u_stride, input_uv_width, input_uv_height,
+ output.u_plane.get(), output.u_stride, output_uv_width, output_uv_height)) {
+ return false;
+ }
+
+ // Scale V plane (half resolution)
+ if (!ScaleYUVPlane_CPU(
+ input.v_plane.get(), input.v_stride, input_uv_width, input_uv_height,
+ output.v_plane.get(), output.v_stride, output_uv_width, output_uv_height)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool AdaptiveAV1Decoder::ScaleYUVPlane_CPU(const uint8_t* src_plane, int src_stride, int src_width, int src_height,
+ uint8_t* dst_plane, int dst_stride, int dst_width, int dst_height) {
+ // Simple bilinear scaling implementation
+ for (int dst_y = 0; dst_y < dst_height; dst_y++) {
+ for (int dst_x = 0; dst_x < dst_width; dst_x++) {
+ // Calculate source coordinates
+ float src_x_f = (static_cast(dst_x) * src_width) / dst_width;
+ float src_y_f = (static_cast(dst_y) * src_height) / dst_height;
+
+ int src_x = static_cast(src_x_f);
+ int src_y = static_cast(src_y_f);
+
+ // Bounds checking
+ src_x = std::min(src_x, src_width - 1);
+ src_y = std::min(src_y, src_height - 1);
+
+ // Simple nearest neighbor for now (can be upgraded to bilinear later)
+ uint8_t pixel_value = src_plane[src_y * src_stride + src_x];
+ dst_plane[dst_y * dst_stride + dst_x] = pixel_value;
+ }
+ }
+
+ return true;
+}
+
+void AdaptiveAV1Decoder::UpdateMovingAverage(std::queue& queue, double newValue, size_t maxSize) {
+ queue.push(newValue);
+ while (queue.size() > maxSize) {
+ queue.pop();
+ }
+}
+
+double AdaptiveAV1Decoder::CalculateMovingAverage(const std::queue& queue) const {
+ if (queue.empty()) return 0.0;
+
+ double sum = 0.0;
+ std::queue temp = queue;
+ while (!temp.empty()) {
+ sum += temp.front();
+ temp.pop();
+ }
+ return sum / queue.size();
+}
+
+double AdaptiveAV1Decoder::GetQualityScaleFactor(QualityLevel level) {
+ switch (level) {
+ case QualityLevel::ULTRA: return 1.0; // 100% resolution
+ case QualityLevel::HIGH: return 0.75; // 75% resolution
+ case QualityLevel::MEDIUM: return 0.5; // 50% resolution
+ case QualityLevel::LOW: return 0.35; // 35% resolution
+ case QualityLevel::MINIMUM: return 0.25; // 25% resolution
+ default: return 1.0;
+ }
+}
+
+std::string AdaptiveAV1Decoder::QualityLevelToString(QualityLevel level) {
+ switch (level) {
+ case QualityLevel::ULTRA: return "ULTRA";
+ case QualityLevel::HIGH: return "HIGH";
+ case QualityLevel::MEDIUM: return "MEDIUM";
+ case QualityLevel::LOW: return "LOW";
+ case QualityLevel::MINIMUM: return "MINIMUM";
+ default: return "UNKNOWN";
+ }
+}
+
+PerformanceMetrics AdaptiveAV1Decoder::GetPerformanceMetrics() const {
+ std::lock_guard lock(m_metricsMutex);
+ return m_metrics;
+}
+
+void AdaptiveAV1Decoder::SetTargetFrameRate(double fps) {
+ m_config.target_frame_time_ms = 1000.0 / fps;
+ m_config.critical_frame_time_ms = 1000.0 / (fps * 0.6); // 60% of target FPS
+}
+
+void AdaptiveAV1Decoder::ForceQualityAdjustment() {
+ m_stableFrameCount = m_config.stable_frames_required; // Force next analysis
+}
+
+} // namespace Vav2Player
\ No newline at end of file
diff --git a/vav2/Vav2Player/Vav2Player/src/Decoder/AdaptiveAV1Decoder.h b/vav2/Vav2Player/Vav2Player/src/Decoder/AdaptiveAV1Decoder.h
new file mode 100644
index 0000000..63d3409
--- /dev/null
+++ b/vav2/Vav2Player/Vav2Player/src/Decoder/AdaptiveAV1Decoder.h
@@ -0,0 +1,96 @@
+#pragma once
+#include "AV1Decoder.h"
+#include "AdaptiveNVDECDecoder.h" // Include full definitions of shared types
+#include
+#include
+#include
+
+namespace Vav2Player {
+
+// Enhanced AV1 decoder with adaptive quality adjustment using post-decode scaling
+class AdaptiveAV1Decoder : public AV1Decoder {
+public:
+ AdaptiveAV1Decoder();
+ ~AdaptiveAV1Decoder() override;
+
+ // Enhanced initialization with adaptive features
+ bool Initialize(const VideoMetadata& metadata) override;
+ bool Initialize(const VideoMetadata& metadata, const AdaptiveConfig& config);
+
+ // Override decode with adaptive logic and post-decode scaling
+ bool DecodeFrame(const VideoPacket& input_packet, VideoFrame& output_frame) override;
+ bool DecodeFrame(const uint8_t* packet_data, size_t packet_size, VideoFrame& output_frame) override;
+
+ // Adaptive quality control
+ void SetQualityLevel(QualityLevel level);
+ QualityLevel GetCurrentQualityLevel() const { return m_currentQuality; }
+
+ // Performance monitoring
+ PerformanceMetrics GetPerformanceMetrics() const;
+ void UpdatePerformanceMetrics(double decode_time, double render_time);
+
+ // Manual override controls
+ void EnableAdaptiveMode(bool enable) { m_adaptiveEnabled = enable; }
+ void SetTargetFrameRate(double fps);
+ void ForceQualityAdjustment(); // Immediate adjustment trigger
+
+ // Configuration management
+ void UpdateConfig(const AdaptiveConfig& config) { m_config = config; }
+ AdaptiveConfig GetConfig() const { return m_config; }
+
+ // Override codec name to indicate adaptive capability
+ std::string GetCodecName() const override { return "AV1 (dav1d adaptive)"; }
+
+private:
+ // Adaptive control state
+ QualityLevel m_currentQuality = QualityLevel::ULTRA;
+ QualityLevel m_targetQuality = QualityLevel::ULTRA;
+ AdaptiveConfig m_config;
+
+ // Performance monitoring
+ mutable std::mutex m_metricsMutex;
+ PerformanceMetrics m_metrics;
+ std::queue m_recentDecodeTimes;
+ std::queue m_recentRenderTimes;
+
+ // Adaptive decision making
+ std::atomic m_adaptiveEnabled{true};
+ uint32_t m_stableFrameCount = 0;
+
+ // Original video properties for scaling calculations
+ uint32_t m_originalWidth = 0;
+ uint32_t m_originalHeight = 0;
+
+ // Current scaled properties (for post-decode scaling)
+ uint32_t m_targetScaledWidth = 0;
+ uint32_t m_targetScaledHeight = 0;
+
+ // Post-decode scaling buffer
+ std::unique_ptr m_scalingBuffer;
+ size_t m_scalingBufferSize = 0;
+
+ // Adaptive logic methods
+ void AnalyzePerformanceAndAdjust();
+ bool ShouldAdjustQuality(double avgDecodeTime, double avgRenderTime);
+ QualityLevel DetermineOptimalQuality(double totalFrameTime);
+
+ // Post-decode scaling methods (dav1d specific approach)
+ bool ScaleDecodedFrame(const VideoFrame& input_frame, VideoFrame& output_frame);
+ void CalculateScaledDimensions(QualityLevel quality, uint32_t& width, uint32_t& height);
+ bool EnsureScalingBuffer(size_t required_size);
+
+ // CPU-based scaling implementation
+ bool ScaleYUV420P_CPU(const VideoFrame& input, VideoFrame& output);
+ bool ScaleYUVPlane_CPU(const uint8_t* src_plane, int src_stride, int src_width, int src_height,
+ uint8_t* dst_plane, int dst_stride, int dst_width, int dst_height);
+
+ // Performance calculation helpers
+ void UpdateMovingAverage(std::queue& queue, double newValue, size_t maxSize = 30);
+ double CalculateMovingAverage(const std::queue& queue) const;
+
+ // Quality level utilities
+ static double GetQualityScaleFactor(QualityLevel level);
+ static std::string QualityLevelToString(QualityLevel level);
+};
+
+} // namespace Vav2Player
\ No newline at end of file
diff --git a/vav2/Vav2Player/Vav2Player/src/Decoder/VideoDecoderFactory.cpp b/vav2/Vav2Player/Vav2Player/src/Decoder/VideoDecoderFactory.cpp
index 7a2cbe1..4257a82 100644
--- a/vav2/Vav2Player/Vav2Player/src/Decoder/VideoDecoderFactory.cpp
+++ b/vav2/Vav2Player/Vav2Player/src/Decoder/VideoDecoderFactory.cpp
@@ -1,6 +1,7 @@
#include "pch.h"
#include "VideoDecoderFactory.h"
#include "AV1Decoder.h"
+#include "AdaptiveAV1Decoder.h"
#include "MediaFoundationAV1Decoder.h"
// #include "VP9Decoder.h" // TODO: activate when VP9 implemented
@@ -59,7 +60,15 @@ std::unique_ptr VideoDecoderFactory::CreateAV1Decoder(DecoderType
OutputDebugStringA("[VideoDecoderFactory] Creating NVDEC AV1 decoder\n");
return std::make_unique();
}
- OutputDebugStringA("[VideoDecoderFactory] NVDEC not available, falling back to dav1d\n");
+ OutputDebugStringA("[VideoDecoderFactory] NVDEC not available, falling back to Adaptive dav1d\n");
+ [[fallthrough]];
+
+ case DecoderType::ADAPTIVE_DAV1D:
+ if (s_av1_available) {
+ OutputDebugStringA("[VideoDecoderFactory] Creating Adaptive dav1d AV1 decoder\n");
+ return std::make_unique();
+ }
+ OutputDebugStringA("[VideoDecoderFactory] dav1d not available, falling back to regular dav1d\n");
[[fallthrough]];
case DecoderType::DAV1D:
@@ -78,7 +87,7 @@ std::unique_ptr VideoDecoderFactory::CreateAV1Decoder(DecoderType
break;
case DecoderType::AUTO:
- // Try ADAPTIVE_NVDEC first (best user experience), then NVDEC, then dav1d, finally MediaFoundation
+ // Try ADAPTIVE_NVDEC first (best user experience), then ADAPTIVE_DAV1D, then NVDEC, then dav1d, finally MediaFoundation
if (s_nvdec_available) {
OutputDebugStringA("[VideoDecoderFactory] Auto mode: trying Adaptive NVDEC AV1 decoder first\n");
auto decoder = std::make_unique();
@@ -87,6 +96,14 @@ std::unique_ptr VideoDecoderFactory::CreateAV1Decoder(DecoderType
}
}
+ if (s_av1_available) {
+ OutputDebugStringA("[VideoDecoderFactory] Auto mode: trying Adaptive dav1d AV1 decoder\n");
+ auto decoder = std::make_unique();
+ if (decoder) {
+ return decoder;
+ }
+ }
+
if (s_nvdec_available) {
OutputDebugStringA("[VideoDecoderFactory] Auto mode: trying regular NVDEC AV1 decoder\n");
auto decoder = std::make_unique();
@@ -96,7 +113,7 @@ std::unique_ptr VideoDecoderFactory::CreateAV1Decoder(DecoderType
}
if (s_av1_available) {
- OutputDebugStringA("[VideoDecoderFactory] Auto mode: trying dav1d AV1 decoder\n");
+ OutputDebugStringA("[VideoDecoderFactory] Auto mode: trying regular dav1d AV1 decoder\n");
auto decoder = std::make_unique();
if (decoder) {
return decoder;
diff --git a/vav2/Vav2Player/Vav2Player/src/Decoder/VideoDecoderFactory.h b/vav2/Vav2Player/Vav2Player/src/Decoder/VideoDecoderFactory.h
index d43e7f1..88403ee 100644
--- a/vav2/Vav2Player/Vav2Player/src/Decoder/VideoDecoderFactory.h
+++ b/vav2/Vav2Player/Vav2Player/src/Decoder/VideoDecoderFactory.h
@@ -14,10 +14,11 @@ public:
// Decoder type enumeration
enum class DecoderType {
DAV1D, // dav1d library based decoder
+ ADAPTIVE_DAV1D, // Adaptive dav1d with dynamic quality control (post-decode scaling)
MEDIA_FOUNDATION, // Windows Media Foundation based decoder
NVDEC, // NVIDIA NVDEC hardware acceleration decoder
ADAPTIVE_NVDEC, // Adaptive NVDEC with dynamic quality control
- AUTO // Auto selection (ADAPTIVE_NVDEC priority, NVDEC, dav1d, finally MediaFoundation)
+ AUTO // Auto selection (ADAPTIVE_NVDEC priority, ADAPTIVE_DAV1D, NVDEC, dav1d, finally MediaFoundation)
};
// Supported decoder information