Skip to content

Commit

Permalink
wip: inspired by nxtedition#16
Browse files Browse the repository at this point in the history
  • Loading branch information
Julusian committed Jun 5, 2024
1 parent 21ccd5e commit f0d5c9b
Show file tree
Hide file tree
Showing 5 changed files with 197 additions and 13 deletions.
2 changes: 2 additions & 0 deletions src/modules/ffmpeg/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ set(SOURCES
producer/av_producer.cpp
producer/av_input.cpp
util/av_util.cpp
util/audio_resampler.cpp
producer/ffmpeg_producer.cpp
consumer/ffmpeg_consumer.cpp

Expand All @@ -15,6 +16,7 @@ set(HEADERS
producer/av_producer.h
producer/av_input.h
util/av_util.h
util/audio_resampler.h
producer/ffmpeg_producer.h
consumer/ffmpeg_consumer.h

Expand Down
38 changes: 38 additions & 0 deletions src/modules/ffmpeg/util/audio_resampler.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
#include "audio_resampler.h"
#include "av_assert.h"

extern "C" {
#include <libavutil/samplefmt.h>
#include <libswresample/swresample.h>
}

namespace caspar::ffmpeg {

AudioResampler::AudioResampler(int64_t sample_rate, AVSampleFormat in_sample_fmt)
: ctx(std::shared_ptr<SwrContext>(swr_alloc_set_opts(nullptr,
AV_CH_LAYOUT_7POINT1,
AV_SAMPLE_FMT_S32,
sample_rate,
AV_CH_LAYOUT_7POINT1,
in_sample_fmt,
sample_rate,
0,
nullptr),
[](SwrContext* ptr) { swr_free(&ptr); }))
{
if (!ctx)
FF_RET(AVERROR(ENOMEM), "swr_alloc_set_opts");

FF_RET(swr_init(ctx.get()), "swr_init");
}

caspar::array<int32_t> AudioResampler::convert(int frames, const void** src)
{
auto result = caspar::array<int32_t>(frames * 8 * sizeof(int32_t));
auto ptr = result.data();
auto ret = swr_convert(ctx.get(), (uint8_t**)&ptr, frames, reinterpret_cast<const uint8_t**>(src), frames);

return result;
}

}; // namespace caspar::ffmpeg
25 changes: 25 additions & 0 deletions src/modules/ffmpeg/util/audio_resampler.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
#include <common/array.h>
#include <memory>

extern "C" {
#include <libavutil/samplefmt.h>
}

struct SwrContext;

namespace caspar::ffmpeg {

class AudioResampler
{
std::shared_ptr<SwrContext> ctx;

public:
AudioResampler(int64_t sample_rate, AVSampleFormat in_sample_fmt);

AudioResampler(const AudioResampler&) = delete;
AudioResampler& operator=(const AudioResampler&) = delete;

caspar::array<int32_t> convert(int frames, const void** src);
};

}; // namespace caspar::ffmpeg
4 changes: 3 additions & 1 deletion src/modules/html/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,9 @@ target_include_directories(html PRIVATE
..
../..
${CEF_INCLUDE_PATH}
)
${FFMPEG_INCLUDE_PATH}
)
target_link_libraries(html ffmpeg)

set_target_properties(html PROPERTIES FOLDER modules)
source_group(sources\\producer producer/*)
Expand Down
141 changes: 129 additions & 12 deletions src/modules/html/producer/html_producer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,13 +60,75 @@
#include <queue>
#include <utility>

#include <ffmpeg/util/audio_resampler.h>

#include "../html.h"

namespace caspar { namespace html {

inline std::int_least64_t now()
{
return std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now().time_since_epoch())
.count();
}

struct presentation_frame
{
std::int_least64_t timestamp = now();
core::draw_frame frame = core::draw_frame::empty();
bool has_video = false;
bool has_audio = false;

explicit presentation_frame()
{
}

presentation_frame(presentation_frame&& other)
noexcept : timestamp(other.timestamp),frame(std::move(other.frame))
{
}

presentation_frame(const presentation_frame&) = delete;
presentation_frame& operator=(const presentation_frame&) = delete;

presentation_frame& operator=(presentation_frame&& rhs)
{
timestamp = rhs.timestamp;
frame = std::move(rhs.frame);
return *this;
}

~presentation_frame() {}

void add_audio(core::mutable_frame audio) {
if (has_audio) return;
has_audio = true;

if (frame) {
frame = core::draw_frame::over(frame, core::draw_frame(std::move (audio)));
} else {
frame = core::draw_frame(std::move (audio));
}
}

void add_video(core::draw_frame video) {
if (has_video) return;
has_video = true;

if (frame) {
frame = core::draw_frame::over(frame, std::move (video));
} else {
frame = std::move (video);
}
}
};


class html_client
: public CefClient
, public CefRenderHandler
, public CefAudioHandler
, public CefLifeSpanHandler
, public CefLoadHandler
, public CefDisplayHandler
Expand All @@ -85,11 +147,14 @@ class html_client
bool gpu_enabled_;
tbb::concurrent_queue<std::wstring> javascript_before_load_;
std::atomic<bool> loaded_;
std::queue<std::pair<std::int_least64_t, core::draw_frame>> frames_;
std::queue<presentation_frame> frames_;
core::draw_frame last_generated_frame_;
mutable std::mutex frames_mutex_;
const size_t frames_max_size_ = 4;
std::atomic<bool> closing_;

std::unique_ptr<ffmpeg::AudioResampler> audioResampler_;

core::draw_frame last_frame_;
std::int_least64_t last_frame_time_;

Expand Down Expand Up @@ -167,15 +232,15 @@ class html_client

// Check if the sole buffered frame is too young to have a partner field generated (with a tolerance)
auto time_per_frame = (1000 * 1.5) / format_desc_.fps;
auto front_frame_is_too_young = (now_time - frames_.front().first) < time_per_frame;
auto front_frame_is_too_young = (now_time - frames_.front().timestamp) < time_per_frame;

if (follows_gap_in_frames && front_frame_is_too_young) {
return false;
}
}

last_frame_time_ = frames_.front().first;
last_frame_ = std::move(frames_.front().second);
last_frame_time_ = frames_.front().timestamp;
last_frame_ = std::move(frames_.front().frame);
frames_.pop();

graph_->set_value("buffered-frames", (double)frames_.size() / frames_max_size_);
Expand Down Expand Up @@ -245,12 +310,7 @@ class html_client
}

private:
std::int_least64_t now()
{
return std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now().time_since_epoch())
.count();
}


void GetViewRect(CefRefPtr<CefBrowser> browser, CefRect& rect) override
{
Expand Down Expand Up @@ -302,7 +362,13 @@ class html_client
{
std::lock_guard<std::mutex> lock(frames_mutex_);

frames_.push(std::make_pair(now(), core::draw_frame(std::move(frame))));
core::draw_frame new_frame = core::draw_frame(std::move(frame));
last_generated_frame_ = new_frame;

presentation_frame wrapped_frame;
wrapped_frame.add_video(std::move(new_frame));

frames_.push(std::move(wrapped_frame));
while (frames_.size() > 4) {
frames_.pop();
graph_->set_tag(diagnostics::tag_severity::WARNING, "dropped-frame");
Expand Down Expand Up @@ -353,6 +419,8 @@ class html_client

CefRefPtr<CefRenderHandler> GetRenderHandler() override { return this; }

CefRefPtr<CefAudioHandler> GetAudioHandler() override { return this; }

CefRefPtr<CefLifeSpanHandler> GetLifeSpanHandler() override { return this; }

CefRefPtr<CefLoadHandler> GetLoadHandler() override { return this; }
Expand All @@ -378,7 +446,7 @@ class html_client

{
std::lock_guard<std::mutex> lock(frames_mutex_);
frames_.push(std::make_pair(now(), core::draw_frame::empty()));
frames_.push(presentation_frame());
}

{
Expand All @@ -399,6 +467,55 @@ class html_client
return false;
}

bool GetAudioParameters(CefRefPtr<CefBrowser> browser, CefAudioParameters& params) override
{
params.channel_layout = CEF_CHANNEL_LAYOUT_7_1;
params.sample_rate = format_desc_.audio_sample_rate;
params.frames_per_buffer = format_desc_.audio_cadence[0];
return format_desc_.audio_cadence.size() == 1; // TODO - handle 59.94
}

void OnAudioStreamStarted(CefRefPtr<CefBrowser> browser, const CefAudioParameters& params, int channels) override
{
audioResampler_ = std::make_unique<ffmpeg::AudioResampler>(params.sample_rate, AV_SAMPLE_FMT_FLTP);
}
void OnAudioStreamPacket(CefRefPtr<CefBrowser> browser, const float** data, int samples, int64_t pts) override
{
if (!audioResampler_) return;

auto audio = audioResampler_->convert(samples, reinterpret_cast<const void**>(data));
auto audio_frame = core::mutable_frame(this, {}, std::move(audio), core::pixel_format_desc());

{
std::lock_guard<std::mutex> lock(frames_mutex_);
if (frames_.empty()) {
presentation_frame wrapped_frame;

wrapped_frame.add_audio(std::move(audio_frame));
if (last_generated_frame_) {
wrapped_frame.add_video(last_generated_frame_);
}

frames_.push(std::move(wrapped_frame));
} else {
if (!frames_.back().has_audio) {
frames_.back().add_audio(std::move(audio_frame));
} else {
presentation_frame wrapped_frame;
wrapped_frame.add_audio(std::move(audio_frame));
frames_.push(std::move(wrapped_frame));
}
}

}
}
void OnAudioStreamStopped(CefRefPtr<CefBrowser> browser) override { audioResampler_ = nullptr; }
void OnAudioStreamError(CefRefPtr<CefBrowser> browser, const CefString& message) override
{
CASPAR_LOG(info) << "[html_producer] OnAudioStreamError: \"" << message.ToString() << "\"";
audioResampler_ = nullptr;
}

void do_execute_javascript(const std::wstring& javascript)
{
html::begin_invoke([=] {
Expand Down

0 comments on commit f0d5c9b

Please sign in to comment.